##// END OF EJS Templates
util: ensure forwarded attrs are set in globals() as sysstr...
Augie Fackler -
r30087:9b230a8e default
parent child Browse files
Show More
@@ -1,2898 +1,2899
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from __future__ import absolute_import
17 17
18 18 import bz2
19 19 import calendar
20 20 import collections
21 21 import datetime
22 22 import errno
23 23 import gc
24 24 import hashlib
25 25 import imp
26 26 import os
27 27 import re as remod
28 28 import shutil
29 29 import signal
30 30 import socket
31 31 import string
32 32 import subprocess
33 33 import sys
34 34 import tempfile
35 35 import textwrap
36 36 import time
37 37 import traceback
38 38 import zlib
39 39
40 40 from . import (
41 41 encoding,
42 42 error,
43 43 i18n,
44 44 osutil,
45 45 parsers,
46 46 pycompat,
47 47 )
48 48
49 49 for attr in (
50 50 'empty',
51 51 'httplib',
52 52 'httpserver',
53 53 'pickle',
54 54 'queue',
55 55 'urlerr',
56 56 'urlparse',
57 57 # we do import urlreq, but we do it outside the loop
58 58 #'urlreq',
59 59 'stringio',
60 60 'socketserver',
61 61 'xmlrpclib',
62 62 ):
63 globals()[attr] = getattr(pycompat, attr)
63 a = pycompat.sysstr(attr)
64 globals()[a] = getattr(pycompat, a)
64 65
65 66 # This line is to make pyflakes happy:
66 67 urlreq = pycompat.urlreq
67 68
68 69 if os.name == 'nt':
69 70 from . import windows as platform
70 71 else:
71 72 from . import posix as platform
72 73
73 74 _ = i18n._
74 75
75 76 bindunixsocket = platform.bindunixsocket
76 77 cachestat = platform.cachestat
77 78 checkexec = platform.checkexec
78 79 checklink = platform.checklink
79 80 copymode = platform.copymode
80 81 executablepath = platform.executablepath
81 82 expandglobs = platform.expandglobs
82 83 explainexit = platform.explainexit
83 84 findexe = platform.findexe
84 85 gethgcmd = platform.gethgcmd
85 86 getuser = platform.getuser
86 87 getpid = os.getpid
87 88 groupmembers = platform.groupmembers
88 89 groupname = platform.groupname
89 90 hidewindow = platform.hidewindow
90 91 isexec = platform.isexec
91 92 isowner = platform.isowner
92 93 localpath = platform.localpath
93 94 lookupreg = platform.lookupreg
94 95 makedir = platform.makedir
95 96 nlinks = platform.nlinks
96 97 normpath = platform.normpath
97 98 normcase = platform.normcase
98 99 normcasespec = platform.normcasespec
99 100 normcasefallback = platform.normcasefallback
100 101 openhardlinks = platform.openhardlinks
101 102 oslink = platform.oslink
102 103 parsepatchoutput = platform.parsepatchoutput
103 104 pconvert = platform.pconvert
104 105 poll = platform.poll
105 106 popen = platform.popen
106 107 posixfile = platform.posixfile
107 108 quotecommand = platform.quotecommand
108 109 readpipe = platform.readpipe
109 110 rename = platform.rename
110 111 removedirs = platform.removedirs
111 112 samedevice = platform.samedevice
112 113 samefile = platform.samefile
113 114 samestat = platform.samestat
114 115 setbinary = platform.setbinary
115 116 setflags = platform.setflags
116 117 setsignalhandler = platform.setsignalhandler
117 118 shellquote = platform.shellquote
118 119 spawndetached = platform.spawndetached
119 120 split = platform.split
120 121 sshargs = platform.sshargs
121 122 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
122 123 statisexec = platform.statisexec
123 124 statislink = platform.statislink
124 125 termwidth = platform.termwidth
125 126 testpid = platform.testpid
126 127 umask = platform.umask
127 128 unlink = platform.unlink
128 129 unlinkpath = platform.unlinkpath
129 130 username = platform.username
130 131
131 132 # Python compatibility
132 133
133 134 _notset = object()
134 135
135 136 # disable Python's problematic floating point timestamps (issue4836)
136 137 # (Python hypocritically says you shouldn't change this behavior in
137 138 # libraries, and sure enough Mercurial is not a library.)
138 139 os.stat_float_times(False)
139 140
140 141 def safehasattr(thing, attr):
141 142 return getattr(thing, attr, _notset) is not _notset
142 143
143 144 DIGESTS = {
144 145 'md5': hashlib.md5,
145 146 'sha1': hashlib.sha1,
146 147 'sha512': hashlib.sha512,
147 148 }
148 149 # List of digest types from strongest to weakest
149 150 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
150 151
151 152 for k in DIGESTS_BY_STRENGTH:
152 153 assert k in DIGESTS
153 154
154 155 class digester(object):
155 156 """helper to compute digests.
156 157
157 158 This helper can be used to compute one or more digests given their name.
158 159
159 160 >>> d = digester(['md5', 'sha1'])
160 161 >>> d.update('foo')
161 162 >>> [k for k in sorted(d)]
162 163 ['md5', 'sha1']
163 164 >>> d['md5']
164 165 'acbd18db4cc2f85cedef654fccc4a4d8'
165 166 >>> d['sha1']
166 167 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
167 168 >>> digester.preferred(['md5', 'sha1'])
168 169 'sha1'
169 170 """
170 171
171 172 def __init__(self, digests, s=''):
172 173 self._hashes = {}
173 174 for k in digests:
174 175 if k not in DIGESTS:
175 176 raise Abort(_('unknown digest type: %s') % k)
176 177 self._hashes[k] = DIGESTS[k]()
177 178 if s:
178 179 self.update(s)
179 180
180 181 def update(self, data):
181 182 for h in self._hashes.values():
182 183 h.update(data)
183 184
184 185 def __getitem__(self, key):
185 186 if key not in DIGESTS:
186 187 raise Abort(_('unknown digest type: %s') % k)
187 188 return self._hashes[key].hexdigest()
188 189
189 190 def __iter__(self):
190 191 return iter(self._hashes)
191 192
192 193 @staticmethod
193 194 def preferred(supported):
194 195 """returns the strongest digest type in both supported and DIGESTS."""
195 196
196 197 for k in DIGESTS_BY_STRENGTH:
197 198 if k in supported:
198 199 return k
199 200 return None
200 201
201 202 class digestchecker(object):
202 203 """file handle wrapper that additionally checks content against a given
203 204 size and digests.
204 205
205 206 d = digestchecker(fh, size, {'md5': '...'})
206 207
207 208 When multiple digests are given, all of them are validated.
208 209 """
209 210
210 211 def __init__(self, fh, size, digests):
211 212 self._fh = fh
212 213 self._size = size
213 214 self._got = 0
214 215 self._digests = dict(digests)
215 216 self._digester = digester(self._digests.keys())
216 217
217 218 def read(self, length=-1):
218 219 content = self._fh.read(length)
219 220 self._digester.update(content)
220 221 self._got += len(content)
221 222 return content
222 223
223 224 def validate(self):
224 225 if self._size != self._got:
225 226 raise Abort(_('size mismatch: expected %d, got %d') %
226 227 (self._size, self._got))
227 228 for k, v in self._digests.items():
228 229 if v != self._digester[k]:
229 230 # i18n: first parameter is a digest name
230 231 raise Abort(_('%s mismatch: expected %s, got %s') %
231 232 (k, v, self._digester[k]))
232 233
233 234 try:
234 235 buffer = buffer
235 236 except NameError:
236 237 if not pycompat.ispy3:
237 238 def buffer(sliceable, offset=0):
238 239 return sliceable[offset:]
239 240 else:
240 241 def buffer(sliceable, offset=0):
241 242 return memoryview(sliceable)[offset:]
242 243
243 244 closefds = os.name == 'posix'
244 245
245 246 _chunksize = 4096
246 247
247 248 class bufferedinputpipe(object):
248 249 """a manually buffered input pipe
249 250
250 251 Python will not let us use buffered IO and lazy reading with 'polling' at
251 252 the same time. We cannot probe the buffer state and select will not detect
252 253 that data are ready to read if they are already buffered.
253 254
254 255 This class let us work around that by implementing its own buffering
255 256 (allowing efficient readline) while offering a way to know if the buffer is
256 257 empty from the output (allowing collaboration of the buffer with polling).
257 258
258 259 This class lives in the 'util' module because it makes use of the 'os'
259 260 module from the python stdlib.
260 261 """
261 262
262 263 def __init__(self, input):
263 264 self._input = input
264 265 self._buffer = []
265 266 self._eof = False
266 267 self._lenbuf = 0
267 268
268 269 @property
269 270 def hasbuffer(self):
270 271 """True is any data is currently buffered
271 272
272 273 This will be used externally a pre-step for polling IO. If there is
273 274 already data then no polling should be set in place."""
274 275 return bool(self._buffer)
275 276
276 277 @property
277 278 def closed(self):
278 279 return self._input.closed
279 280
280 281 def fileno(self):
281 282 return self._input.fileno()
282 283
283 284 def close(self):
284 285 return self._input.close()
285 286
286 287 def read(self, size):
287 288 while (not self._eof) and (self._lenbuf < size):
288 289 self._fillbuffer()
289 290 return self._frombuffer(size)
290 291
291 292 def readline(self, *args, **kwargs):
292 293 if 1 < len(self._buffer):
293 294 # this should not happen because both read and readline end with a
294 295 # _frombuffer call that collapse it.
295 296 self._buffer = [''.join(self._buffer)]
296 297 self._lenbuf = len(self._buffer[0])
297 298 lfi = -1
298 299 if self._buffer:
299 300 lfi = self._buffer[-1].find('\n')
300 301 while (not self._eof) and lfi < 0:
301 302 self._fillbuffer()
302 303 if self._buffer:
303 304 lfi = self._buffer[-1].find('\n')
304 305 size = lfi + 1
305 306 if lfi < 0: # end of file
306 307 size = self._lenbuf
307 308 elif 1 < len(self._buffer):
308 309 # we need to take previous chunks into account
309 310 size += self._lenbuf - len(self._buffer[-1])
310 311 return self._frombuffer(size)
311 312
312 313 def _frombuffer(self, size):
313 314 """return at most 'size' data from the buffer
314 315
315 316 The data are removed from the buffer."""
316 317 if size == 0 or not self._buffer:
317 318 return ''
318 319 buf = self._buffer[0]
319 320 if 1 < len(self._buffer):
320 321 buf = ''.join(self._buffer)
321 322
322 323 data = buf[:size]
323 324 buf = buf[len(data):]
324 325 if buf:
325 326 self._buffer = [buf]
326 327 self._lenbuf = len(buf)
327 328 else:
328 329 self._buffer = []
329 330 self._lenbuf = 0
330 331 return data
331 332
332 333 def _fillbuffer(self):
333 334 """read data to the buffer"""
334 335 data = os.read(self._input.fileno(), _chunksize)
335 336 if not data:
336 337 self._eof = True
337 338 else:
338 339 self._lenbuf += len(data)
339 340 self._buffer.append(data)
340 341
341 342 def popen2(cmd, env=None, newlines=False):
342 343 # Setting bufsize to -1 lets the system decide the buffer size.
343 344 # The default for bufsize is 0, meaning unbuffered. This leads to
344 345 # poor performance on Mac OS X: http://bugs.python.org/issue4194
345 346 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
346 347 close_fds=closefds,
347 348 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
348 349 universal_newlines=newlines,
349 350 env=env)
350 351 return p.stdin, p.stdout
351 352
352 353 def popen3(cmd, env=None, newlines=False):
353 354 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
354 355 return stdin, stdout, stderr
355 356
356 357 def popen4(cmd, env=None, newlines=False, bufsize=-1):
357 358 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
358 359 close_fds=closefds,
359 360 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
360 361 stderr=subprocess.PIPE,
361 362 universal_newlines=newlines,
362 363 env=env)
363 364 return p.stdin, p.stdout, p.stderr, p
364 365
365 366 def version():
366 367 """Return version information if available."""
367 368 try:
368 369 from . import __version__
369 370 return __version__.version
370 371 except ImportError:
371 372 return 'unknown'
372 373
373 374 def versiontuple(v=None, n=4):
374 375 """Parses a Mercurial version string into an N-tuple.
375 376
376 377 The version string to be parsed is specified with the ``v`` argument.
377 378 If it isn't defined, the current Mercurial version string will be parsed.
378 379
379 380 ``n`` can be 2, 3, or 4. Here is how some version strings map to
380 381 returned values:
381 382
382 383 >>> v = '3.6.1+190-df9b73d2d444'
383 384 >>> versiontuple(v, 2)
384 385 (3, 6)
385 386 >>> versiontuple(v, 3)
386 387 (3, 6, 1)
387 388 >>> versiontuple(v, 4)
388 389 (3, 6, 1, '190-df9b73d2d444')
389 390
390 391 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
391 392 (3, 6, 1, '190-df9b73d2d444+20151118')
392 393
393 394 >>> v = '3.6'
394 395 >>> versiontuple(v, 2)
395 396 (3, 6)
396 397 >>> versiontuple(v, 3)
397 398 (3, 6, None)
398 399 >>> versiontuple(v, 4)
399 400 (3, 6, None, None)
400 401
401 402 >>> v = '3.9-rc'
402 403 >>> versiontuple(v, 2)
403 404 (3, 9)
404 405 >>> versiontuple(v, 3)
405 406 (3, 9, None)
406 407 >>> versiontuple(v, 4)
407 408 (3, 9, None, 'rc')
408 409
409 410 >>> v = '3.9-rc+2-02a8fea4289b'
410 411 >>> versiontuple(v, 2)
411 412 (3, 9)
412 413 >>> versiontuple(v, 3)
413 414 (3, 9, None)
414 415 >>> versiontuple(v, 4)
415 416 (3, 9, None, 'rc+2-02a8fea4289b')
416 417 """
417 418 if not v:
418 419 v = version()
419 420 parts = remod.split('[\+-]', v, 1)
420 421 if len(parts) == 1:
421 422 vparts, extra = parts[0], None
422 423 else:
423 424 vparts, extra = parts
424 425
425 426 vints = []
426 427 for i in vparts.split('.'):
427 428 try:
428 429 vints.append(int(i))
429 430 except ValueError:
430 431 break
431 432 # (3, 6) -> (3, 6, None)
432 433 while len(vints) < 3:
433 434 vints.append(None)
434 435
435 436 if n == 2:
436 437 return (vints[0], vints[1])
437 438 if n == 3:
438 439 return (vints[0], vints[1], vints[2])
439 440 if n == 4:
440 441 return (vints[0], vints[1], vints[2], extra)
441 442
442 443 # used by parsedate
443 444 defaultdateformats = (
444 445 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
445 446 '%Y-%m-%dT%H:%M', # without seconds
446 447 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
447 448 '%Y-%m-%dT%H%M', # without seconds
448 449 '%Y-%m-%d %H:%M:%S', # our common legal variant
449 450 '%Y-%m-%d %H:%M', # without seconds
450 451 '%Y-%m-%d %H%M%S', # without :
451 452 '%Y-%m-%d %H%M', # without seconds
452 453 '%Y-%m-%d %I:%M:%S%p',
453 454 '%Y-%m-%d %H:%M',
454 455 '%Y-%m-%d %I:%M%p',
455 456 '%Y-%m-%d',
456 457 '%m-%d',
457 458 '%m/%d',
458 459 '%m/%d/%y',
459 460 '%m/%d/%Y',
460 461 '%a %b %d %H:%M:%S %Y',
461 462 '%a %b %d %I:%M:%S%p %Y',
462 463 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
463 464 '%b %d %H:%M:%S %Y',
464 465 '%b %d %I:%M:%S%p %Y',
465 466 '%b %d %H:%M:%S',
466 467 '%b %d %I:%M:%S%p',
467 468 '%b %d %H:%M',
468 469 '%b %d %I:%M%p',
469 470 '%b %d %Y',
470 471 '%b %d',
471 472 '%H:%M:%S',
472 473 '%I:%M:%S%p',
473 474 '%H:%M',
474 475 '%I:%M%p',
475 476 )
476 477
477 478 extendeddateformats = defaultdateformats + (
478 479 "%Y",
479 480 "%Y-%m",
480 481 "%b",
481 482 "%b %Y",
482 483 )
483 484
484 485 def cachefunc(func):
485 486 '''cache the result of function calls'''
486 487 # XXX doesn't handle keywords args
487 488 if func.__code__.co_argcount == 0:
488 489 cache = []
489 490 def f():
490 491 if len(cache) == 0:
491 492 cache.append(func())
492 493 return cache[0]
493 494 return f
494 495 cache = {}
495 496 if func.__code__.co_argcount == 1:
496 497 # we gain a small amount of time because
497 498 # we don't need to pack/unpack the list
498 499 def f(arg):
499 500 if arg not in cache:
500 501 cache[arg] = func(arg)
501 502 return cache[arg]
502 503 else:
503 504 def f(*args):
504 505 if args not in cache:
505 506 cache[args] = func(*args)
506 507 return cache[args]
507 508
508 509 return f
509 510
510 511 class sortdict(dict):
511 512 '''a simple sorted dictionary'''
512 513 def __init__(self, data=None):
513 514 self._list = []
514 515 if data:
515 516 self.update(data)
516 517 def copy(self):
517 518 return sortdict(self)
518 519 def __setitem__(self, key, val):
519 520 if key in self:
520 521 self._list.remove(key)
521 522 self._list.append(key)
522 523 dict.__setitem__(self, key, val)
523 524 def __iter__(self):
524 525 return self._list.__iter__()
525 526 def update(self, src):
526 527 if isinstance(src, dict):
527 528 src = src.iteritems()
528 529 for k, v in src:
529 530 self[k] = v
530 531 def clear(self):
531 532 dict.clear(self)
532 533 self._list = []
533 534 def items(self):
534 535 return [(k, self[k]) for k in self._list]
535 536 def __delitem__(self, key):
536 537 dict.__delitem__(self, key)
537 538 self._list.remove(key)
538 539 def pop(self, key, *args, **kwargs):
539 540 dict.pop(self, key, *args, **kwargs)
540 541 try:
541 542 self._list.remove(key)
542 543 except ValueError:
543 544 pass
544 545 def keys(self):
545 546 return self._list
546 547 def iterkeys(self):
547 548 return self._list.__iter__()
548 549 def iteritems(self):
549 550 for k in self._list:
550 551 yield k, self[k]
551 552 def insert(self, index, key, val):
552 553 self._list.insert(index, key)
553 554 dict.__setitem__(self, key, val)
554 555 def __repr__(self):
555 556 if not self:
556 557 return '%s()' % self.__class__.__name__
557 558 return '%s(%r)' % (self.__class__.__name__, self.items())
558 559
559 560 class _lrucachenode(object):
560 561 """A node in a doubly linked list.
561 562
562 563 Holds a reference to nodes on either side as well as a key-value
563 564 pair for the dictionary entry.
564 565 """
565 566 __slots__ = (u'next', u'prev', u'key', u'value')
566 567
567 568 def __init__(self):
568 569 self.next = None
569 570 self.prev = None
570 571
571 572 self.key = _notset
572 573 self.value = None
573 574
574 575 def markempty(self):
575 576 """Mark the node as emptied."""
576 577 self.key = _notset
577 578
578 579 class lrucachedict(object):
579 580 """Dict that caches most recent accesses and sets.
580 581
581 582 The dict consists of an actual backing dict - indexed by original
582 583 key - and a doubly linked circular list defining the order of entries in
583 584 the cache.
584 585
585 586 The head node is the newest entry in the cache. If the cache is full,
586 587 we recycle head.prev and make it the new head. Cache accesses result in
587 588 the node being moved to before the existing head and being marked as the
588 589 new head node.
589 590 """
590 591 def __init__(self, max):
591 592 self._cache = {}
592 593
593 594 self._head = head = _lrucachenode()
594 595 head.prev = head
595 596 head.next = head
596 597 self._size = 1
597 598 self._capacity = max
598 599
599 600 def __len__(self):
600 601 return len(self._cache)
601 602
602 603 def __contains__(self, k):
603 604 return k in self._cache
604 605
605 606 def __iter__(self):
606 607 # We don't have to iterate in cache order, but why not.
607 608 n = self._head
608 609 for i in range(len(self._cache)):
609 610 yield n.key
610 611 n = n.next
611 612
612 613 def __getitem__(self, k):
613 614 node = self._cache[k]
614 615 self._movetohead(node)
615 616 return node.value
616 617
617 618 def __setitem__(self, k, v):
618 619 node = self._cache.get(k)
619 620 # Replace existing value and mark as newest.
620 621 if node is not None:
621 622 node.value = v
622 623 self._movetohead(node)
623 624 return
624 625
625 626 if self._size < self._capacity:
626 627 node = self._addcapacity()
627 628 else:
628 629 # Grab the last/oldest item.
629 630 node = self._head.prev
630 631
631 632 # At capacity. Kill the old entry.
632 633 if node.key is not _notset:
633 634 del self._cache[node.key]
634 635
635 636 node.key = k
636 637 node.value = v
637 638 self._cache[k] = node
638 639 # And mark it as newest entry. No need to adjust order since it
639 640 # is already self._head.prev.
640 641 self._head = node
641 642
642 643 def __delitem__(self, k):
643 644 node = self._cache.pop(k)
644 645 node.markempty()
645 646
646 647 # Temporarily mark as newest item before re-adjusting head to make
647 648 # this node the oldest item.
648 649 self._movetohead(node)
649 650 self._head = node.next
650 651
651 652 # Additional dict methods.
652 653
653 654 def get(self, k, default=None):
654 655 try:
655 656 return self._cache[k].value
656 657 except KeyError:
657 658 return default
658 659
659 660 def clear(self):
660 661 n = self._head
661 662 while n.key is not _notset:
662 663 n.markempty()
663 664 n = n.next
664 665
665 666 self._cache.clear()
666 667
667 668 def copy(self):
668 669 result = lrucachedict(self._capacity)
669 670 n = self._head.prev
670 671 # Iterate in oldest-to-newest order, so the copy has the right ordering
671 672 for i in range(len(self._cache)):
672 673 result[n.key] = n.value
673 674 n = n.prev
674 675 return result
675 676
676 677 def _movetohead(self, node):
677 678 """Mark a node as the newest, making it the new head.
678 679
679 680 When a node is accessed, it becomes the freshest entry in the LRU
680 681 list, which is denoted by self._head.
681 682
682 683 Visually, let's make ``N`` the new head node (* denotes head):
683 684
684 685 previous/oldest <-> head <-> next/next newest
685 686
686 687 ----<->--- A* ---<->-----
687 688 | |
688 689 E <-> D <-> N <-> C <-> B
689 690
690 691 To:
691 692
692 693 ----<->--- N* ---<->-----
693 694 | |
694 695 E <-> D <-> C <-> B <-> A
695 696
696 697 This requires the following moves:
697 698
698 699 C.next = D (node.prev.next = node.next)
699 700 D.prev = C (node.next.prev = node.prev)
700 701 E.next = N (head.prev.next = node)
701 702 N.prev = E (node.prev = head.prev)
702 703 N.next = A (node.next = head)
703 704 A.prev = N (head.prev = node)
704 705 """
705 706 head = self._head
706 707 # C.next = D
707 708 node.prev.next = node.next
708 709 # D.prev = C
709 710 node.next.prev = node.prev
710 711 # N.prev = E
711 712 node.prev = head.prev
712 713 # N.next = A
713 714 # It is tempting to do just "head" here, however if node is
714 715 # adjacent to head, this will do bad things.
715 716 node.next = head.prev.next
716 717 # E.next = N
717 718 node.next.prev = node
718 719 # A.prev = N
719 720 node.prev.next = node
720 721
721 722 self._head = node
722 723
723 724 def _addcapacity(self):
724 725 """Add a node to the circular linked list.
725 726
726 727 The new node is inserted before the head node.
727 728 """
728 729 head = self._head
729 730 node = _lrucachenode()
730 731 head.prev.next = node
731 732 node.prev = head.prev
732 733 node.next = head
733 734 head.prev = node
734 735 self._size += 1
735 736 return node
736 737
737 738 def lrucachefunc(func):
738 739 '''cache most recent results of function calls'''
739 740 cache = {}
740 741 order = collections.deque()
741 742 if func.__code__.co_argcount == 1:
742 743 def f(arg):
743 744 if arg not in cache:
744 745 if len(cache) > 20:
745 746 del cache[order.popleft()]
746 747 cache[arg] = func(arg)
747 748 else:
748 749 order.remove(arg)
749 750 order.append(arg)
750 751 return cache[arg]
751 752 else:
752 753 def f(*args):
753 754 if args not in cache:
754 755 if len(cache) > 20:
755 756 del cache[order.popleft()]
756 757 cache[args] = func(*args)
757 758 else:
758 759 order.remove(args)
759 760 order.append(args)
760 761 return cache[args]
761 762
762 763 return f
763 764
764 765 class propertycache(object):
765 766 def __init__(self, func):
766 767 self.func = func
767 768 self.name = func.__name__
768 769 def __get__(self, obj, type=None):
769 770 result = self.func(obj)
770 771 self.cachevalue(obj, result)
771 772 return result
772 773
773 774 def cachevalue(self, obj, value):
774 775 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
775 776 obj.__dict__[self.name] = value
776 777
777 778 def pipefilter(s, cmd):
778 779 '''filter string S through command CMD, returning its output'''
779 780 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
780 781 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
781 782 pout, perr = p.communicate(s)
782 783 return pout
783 784
784 785 def tempfilter(s, cmd):
785 786 '''filter string S through a pair of temporary files with CMD.
786 787 CMD is used as a template to create the real command to be run,
787 788 with the strings INFILE and OUTFILE replaced by the real names of
788 789 the temporary files generated.'''
789 790 inname, outname = None, None
790 791 try:
791 792 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
792 793 fp = os.fdopen(infd, 'wb')
793 794 fp.write(s)
794 795 fp.close()
795 796 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
796 797 os.close(outfd)
797 798 cmd = cmd.replace('INFILE', inname)
798 799 cmd = cmd.replace('OUTFILE', outname)
799 800 code = os.system(cmd)
800 801 if sys.platform == 'OpenVMS' and code & 1:
801 802 code = 0
802 803 if code:
803 804 raise Abort(_("command '%s' failed: %s") %
804 805 (cmd, explainexit(code)))
805 806 return readfile(outname)
806 807 finally:
807 808 try:
808 809 if inname:
809 810 os.unlink(inname)
810 811 except OSError:
811 812 pass
812 813 try:
813 814 if outname:
814 815 os.unlink(outname)
815 816 except OSError:
816 817 pass
817 818
818 819 filtertable = {
819 820 'tempfile:': tempfilter,
820 821 'pipe:': pipefilter,
821 822 }
822 823
823 824 def filter(s, cmd):
824 825 "filter a string through a command that transforms its input to its output"
825 826 for name, fn in filtertable.iteritems():
826 827 if cmd.startswith(name):
827 828 return fn(s, cmd[len(name):].lstrip())
828 829 return pipefilter(s, cmd)
829 830
830 831 def binary(s):
831 832 """return true if a string is binary data"""
832 833 return bool(s and '\0' in s)
833 834
834 835 def increasingchunks(source, min=1024, max=65536):
835 836 '''return no less than min bytes per chunk while data remains,
836 837 doubling min after each chunk until it reaches max'''
837 838 def log2(x):
838 839 if not x:
839 840 return 0
840 841 i = 0
841 842 while x:
842 843 x >>= 1
843 844 i += 1
844 845 return i - 1
845 846
846 847 buf = []
847 848 blen = 0
848 849 for chunk in source:
849 850 buf.append(chunk)
850 851 blen += len(chunk)
851 852 if blen >= min:
852 853 if min < max:
853 854 min = min << 1
854 855 nmin = 1 << log2(blen)
855 856 if nmin > min:
856 857 min = nmin
857 858 if min > max:
858 859 min = max
859 860 yield ''.join(buf)
860 861 blen = 0
861 862 buf = []
862 863 if buf:
863 864 yield ''.join(buf)
864 865
865 866 Abort = error.Abort
866 867
867 868 def always(fn):
868 869 return True
869 870
870 871 def never(fn):
871 872 return False
872 873
873 874 def nogc(func):
874 875 """disable garbage collector
875 876
876 877 Python's garbage collector triggers a GC each time a certain number of
877 878 container objects (the number being defined by gc.get_threshold()) are
878 879 allocated even when marked not to be tracked by the collector. Tracking has
879 880 no effect on when GCs are triggered, only on what objects the GC looks
880 881 into. As a workaround, disable GC while building complex (huge)
881 882 containers.
882 883
883 884 This garbage collector issue have been fixed in 2.7.
884 885 """
885 886 if sys.version_info >= (2, 7):
886 887 return func
887 888 def wrapper(*args, **kwargs):
888 889 gcenabled = gc.isenabled()
889 890 gc.disable()
890 891 try:
891 892 return func(*args, **kwargs)
892 893 finally:
893 894 if gcenabled:
894 895 gc.enable()
895 896 return wrapper
896 897
897 898 def pathto(root, n1, n2):
898 899 '''return the relative path from one place to another.
899 900 root should use os.sep to separate directories
900 901 n1 should use os.sep to separate directories
901 902 n2 should use "/" to separate directories
902 903 returns an os.sep-separated path.
903 904
904 905 If n1 is a relative path, it's assumed it's
905 906 relative to root.
906 907 n2 should always be relative to root.
907 908 '''
908 909 if not n1:
909 910 return localpath(n2)
910 911 if os.path.isabs(n1):
911 912 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
912 913 return os.path.join(root, localpath(n2))
913 914 n2 = '/'.join((pconvert(root), n2))
914 915 a, b = splitpath(n1), n2.split('/')
915 916 a.reverse()
916 917 b.reverse()
917 918 while a and b and a[-1] == b[-1]:
918 919 a.pop()
919 920 b.pop()
920 921 b.reverse()
921 922 return os.sep.join((['..'] * len(a)) + b) or '.'
922 923
923 924 def mainfrozen():
924 925 """return True if we are a frozen executable.
925 926
926 927 The code supports py2exe (most common, Windows only) and tools/freeze
927 928 (portable, not much used).
928 929 """
929 930 return (safehasattr(sys, "frozen") or # new py2exe
930 931 safehasattr(sys, "importers") or # old py2exe
931 932 imp.is_frozen(u"__main__")) # tools/freeze
932 933
933 934 # the location of data files matching the source code
934 935 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
935 936 # executable version (py2exe) doesn't support __file__
936 937 datapath = os.path.dirname(sys.executable)
937 938 else:
938 939 datapath = os.path.dirname(__file__)
939 940
940 941 i18n.setdatapath(datapath)
941 942
942 943 _hgexecutable = None
943 944
944 945 def hgexecutable():
945 946 """return location of the 'hg' executable.
946 947
947 948 Defaults to $HG or 'hg' in the search path.
948 949 """
949 950 if _hgexecutable is None:
950 951 hg = os.environ.get('HG')
951 952 mainmod = sys.modules['__main__']
952 953 if hg:
953 954 _sethgexecutable(hg)
954 955 elif mainfrozen():
955 956 if getattr(sys, 'frozen', None) == 'macosx_app':
956 957 # Env variable set by py2app
957 958 _sethgexecutable(os.environ['EXECUTABLEPATH'])
958 959 else:
959 960 _sethgexecutable(sys.executable)
960 961 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
961 962 _sethgexecutable(mainmod.__file__)
962 963 else:
963 964 exe = findexe('hg') or os.path.basename(sys.argv[0])
964 965 _sethgexecutable(exe)
965 966 return _hgexecutable
966 967
967 968 def _sethgexecutable(path):
968 969 """set location of the 'hg' executable"""
969 970 global _hgexecutable
970 971 _hgexecutable = path
971 972
972 973 def _isstdout(f):
973 974 fileno = getattr(f, 'fileno', None)
974 975 return fileno and fileno() == sys.__stdout__.fileno()
975 976
976 977 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
977 978 '''enhanced shell command execution.
978 979 run with environment maybe modified, maybe in different dir.
979 980
980 981 if command fails and onerr is None, return status, else raise onerr
981 982 object as exception.
982 983
983 984 if out is specified, it is assumed to be a file-like object that has a
984 985 write() method. stdout and stderr will be redirected to out.'''
985 986 if environ is None:
986 987 environ = {}
987 988 try:
988 989 sys.stdout.flush()
989 990 except Exception:
990 991 pass
991 992 def py2shell(val):
992 993 'convert python object into string that is useful to shell'
993 994 if val is None or val is False:
994 995 return '0'
995 996 if val is True:
996 997 return '1'
997 998 return str(val)
998 999 origcmd = cmd
999 1000 cmd = quotecommand(cmd)
1000 1001 if sys.platform == 'plan9' and (sys.version_info[0] == 2
1001 1002 and sys.version_info[1] < 7):
1002 1003 # subprocess kludge to work around issues in half-baked Python
1003 1004 # ports, notably bichued/python:
1004 1005 if not cwd is None:
1005 1006 os.chdir(cwd)
1006 1007 rc = os.system(cmd)
1007 1008 else:
1008 1009 env = dict(os.environ)
1009 1010 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1010 1011 env['HG'] = hgexecutable()
1011 1012 if out is None or _isstdout(out):
1012 1013 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1013 1014 env=env, cwd=cwd)
1014 1015 else:
1015 1016 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1016 1017 env=env, cwd=cwd, stdout=subprocess.PIPE,
1017 1018 stderr=subprocess.STDOUT)
1018 1019 for line in iter(proc.stdout.readline, ''):
1019 1020 out.write(line)
1020 1021 proc.wait()
1021 1022 rc = proc.returncode
1022 1023 if sys.platform == 'OpenVMS' and rc & 1:
1023 1024 rc = 0
1024 1025 if rc and onerr:
1025 1026 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
1026 1027 explainexit(rc)[0])
1027 1028 if errprefix:
1028 1029 errmsg = '%s: %s' % (errprefix, errmsg)
1029 1030 raise onerr(errmsg)
1030 1031 return rc
1031 1032
1032 1033 def checksignature(func):
1033 1034 '''wrap a function with code to check for calling errors'''
1034 1035 def check(*args, **kwargs):
1035 1036 try:
1036 1037 return func(*args, **kwargs)
1037 1038 except TypeError:
1038 1039 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1039 1040 raise error.SignatureError
1040 1041 raise
1041 1042
1042 1043 return check
1043 1044
1044 1045 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1045 1046 '''copy a file, preserving mode and optionally other stat info like
1046 1047 atime/mtime
1047 1048
1048 1049 checkambig argument is used with filestat, and is useful only if
1049 1050 destination file is guarded by any lock (e.g. repo.lock or
1050 1051 repo.wlock).
1051 1052
1052 1053 copystat and checkambig should be exclusive.
1053 1054 '''
1054 1055 assert not (copystat and checkambig)
1055 1056 oldstat = None
1056 1057 if os.path.lexists(dest):
1057 1058 if checkambig:
1058 1059 oldstat = checkambig and filestat(dest)
1059 1060 unlink(dest)
1060 1061 # hardlinks are problematic on CIFS, quietly ignore this flag
1061 1062 # until we find a way to work around it cleanly (issue4546)
1062 1063 if False and hardlink:
1063 1064 try:
1064 1065 oslink(src, dest)
1065 1066 return
1066 1067 except (IOError, OSError):
1067 1068 pass # fall back to normal copy
1068 1069 if os.path.islink(src):
1069 1070 os.symlink(os.readlink(src), dest)
1070 1071 # copytime is ignored for symlinks, but in general copytime isn't needed
1071 1072 # for them anyway
1072 1073 else:
1073 1074 try:
1074 1075 shutil.copyfile(src, dest)
1075 1076 if copystat:
1076 1077 # copystat also copies mode
1077 1078 shutil.copystat(src, dest)
1078 1079 else:
1079 1080 shutil.copymode(src, dest)
1080 1081 if oldstat and oldstat.stat:
1081 1082 newstat = filestat(dest)
1082 1083 if newstat.isambig(oldstat):
1083 1084 # stat of copied file is ambiguous to original one
1084 1085 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1085 1086 os.utime(dest, (advanced, advanced))
1086 1087 except shutil.Error as inst:
1087 1088 raise Abort(str(inst))
1088 1089
1089 1090 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1090 1091 """Copy a directory tree using hardlinks if possible."""
1091 1092 num = 0
1092 1093
1093 1094 if hardlink is None:
1094 1095 hardlink = (os.stat(src).st_dev ==
1095 1096 os.stat(os.path.dirname(dst)).st_dev)
1096 1097 if hardlink:
1097 1098 topic = _('linking')
1098 1099 else:
1099 1100 topic = _('copying')
1100 1101
1101 1102 if os.path.isdir(src):
1102 1103 os.mkdir(dst)
1103 1104 for name, kind in osutil.listdir(src):
1104 1105 srcname = os.path.join(src, name)
1105 1106 dstname = os.path.join(dst, name)
1106 1107 def nprog(t, pos):
1107 1108 if pos is not None:
1108 1109 return progress(t, pos + num)
1109 1110 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1110 1111 num += n
1111 1112 else:
1112 1113 if hardlink:
1113 1114 try:
1114 1115 oslink(src, dst)
1115 1116 except (IOError, OSError):
1116 1117 hardlink = False
1117 1118 shutil.copy(src, dst)
1118 1119 else:
1119 1120 shutil.copy(src, dst)
1120 1121 num += 1
1121 1122 progress(topic, num)
1122 1123 progress(topic, None)
1123 1124
1124 1125 return hardlink, num
1125 1126
1126 1127 _winreservednames = '''con prn aux nul
1127 1128 com1 com2 com3 com4 com5 com6 com7 com8 com9
1128 1129 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1129 1130 _winreservedchars = ':*?"<>|'
1130 1131 def checkwinfilename(path):
1131 1132 r'''Check that the base-relative path is a valid filename on Windows.
1132 1133 Returns None if the path is ok, or a UI string describing the problem.
1133 1134
1134 1135 >>> checkwinfilename("just/a/normal/path")
1135 1136 >>> checkwinfilename("foo/bar/con.xml")
1136 1137 "filename contains 'con', which is reserved on Windows"
1137 1138 >>> checkwinfilename("foo/con.xml/bar")
1138 1139 "filename contains 'con', which is reserved on Windows"
1139 1140 >>> checkwinfilename("foo/bar/xml.con")
1140 1141 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1141 1142 "filename contains 'AUX', which is reserved on Windows"
1142 1143 >>> checkwinfilename("foo/bar/bla:.txt")
1143 1144 "filename contains ':', which is reserved on Windows"
1144 1145 >>> checkwinfilename("foo/bar/b\07la.txt")
1145 1146 "filename contains '\\x07', which is invalid on Windows"
1146 1147 >>> checkwinfilename("foo/bar/bla ")
1147 1148 "filename ends with ' ', which is not allowed on Windows"
1148 1149 >>> checkwinfilename("../bar")
1149 1150 >>> checkwinfilename("foo\\")
1150 1151 "filename ends with '\\', which is invalid on Windows"
1151 1152 >>> checkwinfilename("foo\\/bar")
1152 1153 "directory name ends with '\\', which is invalid on Windows"
1153 1154 '''
1154 1155 if path.endswith('\\'):
1155 1156 return _("filename ends with '\\', which is invalid on Windows")
1156 1157 if '\\/' in path:
1157 1158 return _("directory name ends with '\\', which is invalid on Windows")
1158 1159 for n in path.replace('\\', '/').split('/'):
1159 1160 if not n:
1160 1161 continue
1161 1162 for c in n:
1162 1163 if c in _winreservedchars:
1163 1164 return _("filename contains '%s', which is reserved "
1164 1165 "on Windows") % c
1165 1166 if ord(c) <= 31:
1166 1167 return _("filename contains %r, which is invalid "
1167 1168 "on Windows") % c
1168 1169 base = n.split('.')[0]
1169 1170 if base and base.lower() in _winreservednames:
1170 1171 return _("filename contains '%s', which is reserved "
1171 1172 "on Windows") % base
1172 1173 t = n[-1]
1173 1174 if t in '. ' and n not in '..':
1174 1175 return _("filename ends with '%s', which is not allowed "
1175 1176 "on Windows") % t
1176 1177
1177 1178 if os.name == 'nt':
1178 1179 checkosfilename = checkwinfilename
1179 1180 else:
1180 1181 checkosfilename = platform.checkosfilename
1181 1182
1182 1183 def makelock(info, pathname):
1183 1184 try:
1184 1185 return os.symlink(info, pathname)
1185 1186 except OSError as why:
1186 1187 if why.errno == errno.EEXIST:
1187 1188 raise
1188 1189 except AttributeError: # no symlink in os
1189 1190 pass
1190 1191
1191 1192 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1192 1193 os.write(ld, info)
1193 1194 os.close(ld)
1194 1195
1195 1196 def readlock(pathname):
1196 1197 try:
1197 1198 return os.readlink(pathname)
1198 1199 except OSError as why:
1199 1200 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1200 1201 raise
1201 1202 except AttributeError: # no symlink in os
1202 1203 pass
1203 1204 fp = posixfile(pathname)
1204 1205 r = fp.read()
1205 1206 fp.close()
1206 1207 return r
1207 1208
1208 1209 def fstat(fp):
1209 1210 '''stat file object that may not have fileno method.'''
1210 1211 try:
1211 1212 return os.fstat(fp.fileno())
1212 1213 except AttributeError:
1213 1214 return os.stat(fp.name)
1214 1215
1215 1216 # File system features
1216 1217
1217 1218 def fscasesensitive(path):
1218 1219 """
1219 1220 Return true if the given path is on a case-sensitive filesystem
1220 1221
1221 1222 Requires a path (like /foo/.hg) ending with a foldable final
1222 1223 directory component.
1223 1224 """
1224 1225 s1 = os.lstat(path)
1225 1226 d, b = os.path.split(path)
1226 1227 b2 = b.upper()
1227 1228 if b == b2:
1228 1229 b2 = b.lower()
1229 1230 if b == b2:
1230 1231 return True # no evidence against case sensitivity
1231 1232 p2 = os.path.join(d, b2)
1232 1233 try:
1233 1234 s2 = os.lstat(p2)
1234 1235 if s2 == s1:
1235 1236 return False
1236 1237 return True
1237 1238 except OSError:
1238 1239 return True
1239 1240
1240 1241 try:
1241 1242 import re2
1242 1243 _re2 = None
1243 1244 except ImportError:
1244 1245 _re2 = False
1245 1246
1246 1247 class _re(object):
1247 1248 def _checkre2(self):
1248 1249 global _re2
1249 1250 try:
1250 1251 # check if match works, see issue3964
1251 1252 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1252 1253 except ImportError:
1253 1254 _re2 = False
1254 1255
1255 1256 def compile(self, pat, flags=0):
1256 1257 '''Compile a regular expression, using re2 if possible
1257 1258
1258 1259 For best performance, use only re2-compatible regexp features. The
1259 1260 only flags from the re module that are re2-compatible are
1260 1261 IGNORECASE and MULTILINE.'''
1261 1262 if _re2 is None:
1262 1263 self._checkre2()
1263 1264 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1264 1265 if flags & remod.IGNORECASE:
1265 1266 pat = '(?i)' + pat
1266 1267 if flags & remod.MULTILINE:
1267 1268 pat = '(?m)' + pat
1268 1269 try:
1269 1270 return re2.compile(pat)
1270 1271 except re2.error:
1271 1272 pass
1272 1273 return remod.compile(pat, flags)
1273 1274
1274 1275 @propertycache
1275 1276 def escape(self):
1276 1277 '''Return the version of escape corresponding to self.compile.
1277 1278
1278 1279 This is imperfect because whether re2 or re is used for a particular
1279 1280 function depends on the flags, etc, but it's the best we can do.
1280 1281 '''
1281 1282 global _re2
1282 1283 if _re2 is None:
1283 1284 self._checkre2()
1284 1285 if _re2:
1285 1286 return re2.escape
1286 1287 else:
1287 1288 return remod.escape
1288 1289
1289 1290 re = _re()
1290 1291
1291 1292 _fspathcache = {}
1292 1293 def fspath(name, root):
1293 1294 '''Get name in the case stored in the filesystem
1294 1295
1295 1296 The name should be relative to root, and be normcase-ed for efficiency.
1296 1297
1297 1298 Note that this function is unnecessary, and should not be
1298 1299 called, for case-sensitive filesystems (simply because it's expensive).
1299 1300
1300 1301 The root should be normcase-ed, too.
1301 1302 '''
1302 1303 def _makefspathcacheentry(dir):
1303 1304 return dict((normcase(n), n) for n in os.listdir(dir))
1304 1305
1305 1306 seps = os.sep
1306 1307 if os.altsep:
1307 1308 seps = seps + os.altsep
1308 1309 # Protect backslashes. This gets silly very quickly.
1309 1310 seps.replace('\\','\\\\')
1310 1311 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1311 1312 dir = os.path.normpath(root)
1312 1313 result = []
1313 1314 for part, sep in pattern.findall(name):
1314 1315 if sep:
1315 1316 result.append(sep)
1316 1317 continue
1317 1318
1318 1319 if dir not in _fspathcache:
1319 1320 _fspathcache[dir] = _makefspathcacheentry(dir)
1320 1321 contents = _fspathcache[dir]
1321 1322
1322 1323 found = contents.get(part)
1323 1324 if not found:
1324 1325 # retry "once per directory" per "dirstate.walk" which
1325 1326 # may take place for each patches of "hg qpush", for example
1326 1327 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1327 1328 found = contents.get(part)
1328 1329
1329 1330 result.append(found or part)
1330 1331 dir = os.path.join(dir, part)
1331 1332
1332 1333 return ''.join(result)
1333 1334
1334 1335 def checknlink(testfile):
1335 1336 '''check whether hardlink count reporting works properly'''
1336 1337
1337 1338 # testfile may be open, so we need a separate file for checking to
1338 1339 # work around issue2543 (or testfile may get lost on Samba shares)
1339 1340 f1 = testfile + ".hgtmp1"
1340 1341 if os.path.lexists(f1):
1341 1342 return False
1342 1343 try:
1343 1344 posixfile(f1, 'w').close()
1344 1345 except IOError:
1345 1346 try:
1346 1347 os.unlink(f1)
1347 1348 except OSError:
1348 1349 pass
1349 1350 return False
1350 1351
1351 1352 f2 = testfile + ".hgtmp2"
1352 1353 fd = None
1353 1354 try:
1354 1355 oslink(f1, f2)
1355 1356 # nlinks() may behave differently for files on Windows shares if
1356 1357 # the file is open.
1357 1358 fd = posixfile(f2)
1358 1359 return nlinks(f2) > 1
1359 1360 except OSError:
1360 1361 return False
1361 1362 finally:
1362 1363 if fd is not None:
1363 1364 fd.close()
1364 1365 for f in (f1, f2):
1365 1366 try:
1366 1367 os.unlink(f)
1367 1368 except OSError:
1368 1369 pass
1369 1370
1370 1371 def endswithsep(path):
1371 1372 '''Check path ends with os.sep or os.altsep.'''
1372 1373 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1373 1374
1374 1375 def splitpath(path):
1375 1376 '''Split path by os.sep.
1376 1377 Note that this function does not use os.altsep because this is
1377 1378 an alternative of simple "xxx.split(os.sep)".
1378 1379 It is recommended to use os.path.normpath() before using this
1379 1380 function if need.'''
1380 1381 return path.split(os.sep)
1381 1382
1382 1383 def gui():
1383 1384 '''Are we running in a GUI?'''
1384 1385 if sys.platform == 'darwin':
1385 1386 if 'SSH_CONNECTION' in os.environ:
1386 1387 # handle SSH access to a box where the user is logged in
1387 1388 return False
1388 1389 elif getattr(osutil, 'isgui', None):
1389 1390 # check if a CoreGraphics session is available
1390 1391 return osutil.isgui()
1391 1392 else:
1392 1393 # pure build; use a safe default
1393 1394 return True
1394 1395 else:
1395 1396 return os.name == "nt" or os.environ.get("DISPLAY")
1396 1397
1397 1398 def mktempcopy(name, emptyok=False, createmode=None):
1398 1399 """Create a temporary file with the same contents from name
1399 1400
1400 1401 The permission bits are copied from the original file.
1401 1402
1402 1403 If the temporary file is going to be truncated immediately, you
1403 1404 can use emptyok=True as an optimization.
1404 1405
1405 1406 Returns the name of the temporary file.
1406 1407 """
1407 1408 d, fn = os.path.split(name)
1408 1409 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1409 1410 os.close(fd)
1410 1411 # Temporary files are created with mode 0600, which is usually not
1411 1412 # what we want. If the original file already exists, just copy
1412 1413 # its mode. Otherwise, manually obey umask.
1413 1414 copymode(name, temp, createmode)
1414 1415 if emptyok:
1415 1416 return temp
1416 1417 try:
1417 1418 try:
1418 1419 ifp = posixfile(name, "rb")
1419 1420 except IOError as inst:
1420 1421 if inst.errno == errno.ENOENT:
1421 1422 return temp
1422 1423 if not getattr(inst, 'filename', None):
1423 1424 inst.filename = name
1424 1425 raise
1425 1426 ofp = posixfile(temp, "wb")
1426 1427 for chunk in filechunkiter(ifp):
1427 1428 ofp.write(chunk)
1428 1429 ifp.close()
1429 1430 ofp.close()
1430 1431 except: # re-raises
1431 1432 try: os.unlink(temp)
1432 1433 except OSError: pass
1433 1434 raise
1434 1435 return temp
1435 1436
1436 1437 class filestat(object):
1437 1438 """help to exactly detect change of a file
1438 1439
1439 1440 'stat' attribute is result of 'os.stat()' if specified 'path'
1440 1441 exists. Otherwise, it is None. This can avoid preparative
1441 1442 'exists()' examination on client side of this class.
1442 1443 """
1443 1444 def __init__(self, path):
1444 1445 try:
1445 1446 self.stat = os.stat(path)
1446 1447 except OSError as err:
1447 1448 if err.errno != errno.ENOENT:
1448 1449 raise
1449 1450 self.stat = None
1450 1451
1451 1452 __hash__ = object.__hash__
1452 1453
1453 1454 def __eq__(self, old):
1454 1455 try:
1455 1456 # if ambiguity between stat of new and old file is
1456 1457 # avoided, comparision of size, ctime and mtime is enough
1457 1458 # to exactly detect change of a file regardless of platform
1458 1459 return (self.stat.st_size == old.stat.st_size and
1459 1460 self.stat.st_ctime == old.stat.st_ctime and
1460 1461 self.stat.st_mtime == old.stat.st_mtime)
1461 1462 except AttributeError:
1462 1463 return False
1463 1464
1464 1465 def isambig(self, old):
1465 1466 """Examine whether new (= self) stat is ambiguous against old one
1466 1467
1467 1468 "S[N]" below means stat of a file at N-th change:
1468 1469
1469 1470 - S[n-1].ctime < S[n].ctime: can detect change of a file
1470 1471 - S[n-1].ctime == S[n].ctime
1471 1472 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1472 1473 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1473 1474 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1474 1475 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1475 1476
1476 1477 Case (*2) above means that a file was changed twice or more at
1477 1478 same time in sec (= S[n-1].ctime), and comparison of timestamp
1478 1479 is ambiguous.
1479 1480
1480 1481 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1481 1482 timestamp is ambiguous".
1482 1483
1483 1484 But advancing mtime only in case (*2) doesn't work as
1484 1485 expected, because naturally advanced S[n].mtime in case (*1)
1485 1486 might be equal to manually advanced S[n-1 or earlier].mtime.
1486 1487
1487 1488 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1488 1489 treated as ambiguous regardless of mtime, to avoid overlooking
1489 1490 by confliction between such mtime.
1490 1491
1491 1492 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1492 1493 S[n].mtime", even if size of a file isn't changed.
1493 1494 """
1494 1495 try:
1495 1496 return (self.stat.st_ctime == old.stat.st_ctime)
1496 1497 except AttributeError:
1497 1498 return False
1498 1499
1499 1500 def __ne__(self, other):
1500 1501 return not self == other
1501 1502
1502 1503 class atomictempfile(object):
1503 1504 '''writable file object that atomically updates a file
1504 1505
1505 1506 All writes will go to a temporary copy of the original file. Call
1506 1507 close() when you are done writing, and atomictempfile will rename
1507 1508 the temporary copy to the original name, making the changes
1508 1509 visible. If the object is destroyed without being closed, all your
1509 1510 writes are discarded.
1510 1511
1511 1512 checkambig argument of constructor is used with filestat, and is
1512 1513 useful only if target file is guarded by any lock (e.g. repo.lock
1513 1514 or repo.wlock).
1514 1515 '''
1515 1516 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1516 1517 self.__name = name # permanent name
1517 1518 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1518 1519 createmode=createmode)
1519 1520 self._fp = posixfile(self._tempname, mode)
1520 1521 self._checkambig = checkambig
1521 1522
1522 1523 # delegated methods
1523 1524 self.read = self._fp.read
1524 1525 self.write = self._fp.write
1525 1526 self.seek = self._fp.seek
1526 1527 self.tell = self._fp.tell
1527 1528 self.fileno = self._fp.fileno
1528 1529
1529 1530 def close(self):
1530 1531 if not self._fp.closed:
1531 1532 self._fp.close()
1532 1533 filename = localpath(self.__name)
1533 1534 oldstat = self._checkambig and filestat(filename)
1534 1535 if oldstat and oldstat.stat:
1535 1536 rename(self._tempname, filename)
1536 1537 newstat = filestat(filename)
1537 1538 if newstat.isambig(oldstat):
1538 1539 # stat of changed file is ambiguous to original one
1539 1540 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1540 1541 os.utime(filename, (advanced, advanced))
1541 1542 else:
1542 1543 rename(self._tempname, filename)
1543 1544
1544 1545 def discard(self):
1545 1546 if not self._fp.closed:
1546 1547 try:
1547 1548 os.unlink(self._tempname)
1548 1549 except OSError:
1549 1550 pass
1550 1551 self._fp.close()
1551 1552
1552 1553 def __del__(self):
1553 1554 if safehasattr(self, '_fp'): # constructor actually did something
1554 1555 self.discard()
1555 1556
1556 1557 def __enter__(self):
1557 1558 return self
1558 1559
1559 1560 def __exit__(self, exctype, excvalue, traceback):
1560 1561 if exctype is not None:
1561 1562 self.discard()
1562 1563 else:
1563 1564 self.close()
1564 1565
1565 1566 def makedirs(name, mode=None, notindexed=False):
1566 1567 """recursive directory creation with parent mode inheritance
1567 1568
1568 1569 Newly created directories are marked as "not to be indexed by
1569 1570 the content indexing service", if ``notindexed`` is specified
1570 1571 for "write" mode access.
1571 1572 """
1572 1573 try:
1573 1574 makedir(name, notindexed)
1574 1575 except OSError as err:
1575 1576 if err.errno == errno.EEXIST:
1576 1577 return
1577 1578 if err.errno != errno.ENOENT or not name:
1578 1579 raise
1579 1580 parent = os.path.dirname(os.path.abspath(name))
1580 1581 if parent == name:
1581 1582 raise
1582 1583 makedirs(parent, mode, notindexed)
1583 1584 try:
1584 1585 makedir(name, notindexed)
1585 1586 except OSError as err:
1586 1587 # Catch EEXIST to handle races
1587 1588 if err.errno == errno.EEXIST:
1588 1589 return
1589 1590 raise
1590 1591 if mode is not None:
1591 1592 os.chmod(name, mode)
1592 1593
1593 1594 def readfile(path):
1594 1595 with open(path, 'rb') as fp:
1595 1596 return fp.read()
1596 1597
1597 1598 def writefile(path, text):
1598 1599 with open(path, 'wb') as fp:
1599 1600 fp.write(text)
1600 1601
1601 1602 def appendfile(path, text):
1602 1603 with open(path, 'ab') as fp:
1603 1604 fp.write(text)
1604 1605
1605 1606 class chunkbuffer(object):
1606 1607 """Allow arbitrary sized chunks of data to be efficiently read from an
1607 1608 iterator over chunks of arbitrary size."""
1608 1609
1609 1610 def __init__(self, in_iter):
1610 1611 """in_iter is the iterator that's iterating over the input chunks.
1611 1612 targetsize is how big a buffer to try to maintain."""
1612 1613 def splitbig(chunks):
1613 1614 for chunk in chunks:
1614 1615 if len(chunk) > 2**20:
1615 1616 pos = 0
1616 1617 while pos < len(chunk):
1617 1618 end = pos + 2 ** 18
1618 1619 yield chunk[pos:end]
1619 1620 pos = end
1620 1621 else:
1621 1622 yield chunk
1622 1623 self.iter = splitbig(in_iter)
1623 1624 self._queue = collections.deque()
1624 1625 self._chunkoffset = 0
1625 1626
1626 1627 def read(self, l=None):
1627 1628 """Read L bytes of data from the iterator of chunks of data.
1628 1629 Returns less than L bytes if the iterator runs dry.
1629 1630
1630 1631 If size parameter is omitted, read everything"""
1631 1632 if l is None:
1632 1633 return ''.join(self.iter)
1633 1634
1634 1635 left = l
1635 1636 buf = []
1636 1637 queue = self._queue
1637 1638 while left > 0:
1638 1639 # refill the queue
1639 1640 if not queue:
1640 1641 target = 2**18
1641 1642 for chunk in self.iter:
1642 1643 queue.append(chunk)
1643 1644 target -= len(chunk)
1644 1645 if target <= 0:
1645 1646 break
1646 1647 if not queue:
1647 1648 break
1648 1649
1649 1650 # The easy way to do this would be to queue.popleft(), modify the
1650 1651 # chunk (if necessary), then queue.appendleft(). However, for cases
1651 1652 # where we read partial chunk content, this incurs 2 dequeue
1652 1653 # mutations and creates a new str for the remaining chunk in the
1653 1654 # queue. Our code below avoids this overhead.
1654 1655
1655 1656 chunk = queue[0]
1656 1657 chunkl = len(chunk)
1657 1658 offset = self._chunkoffset
1658 1659
1659 1660 # Use full chunk.
1660 1661 if offset == 0 and left >= chunkl:
1661 1662 left -= chunkl
1662 1663 queue.popleft()
1663 1664 buf.append(chunk)
1664 1665 # self._chunkoffset remains at 0.
1665 1666 continue
1666 1667
1667 1668 chunkremaining = chunkl - offset
1668 1669
1669 1670 # Use all of unconsumed part of chunk.
1670 1671 if left >= chunkremaining:
1671 1672 left -= chunkremaining
1672 1673 queue.popleft()
1673 1674 # offset == 0 is enabled by block above, so this won't merely
1674 1675 # copy via ``chunk[0:]``.
1675 1676 buf.append(chunk[offset:])
1676 1677 self._chunkoffset = 0
1677 1678
1678 1679 # Partial chunk needed.
1679 1680 else:
1680 1681 buf.append(chunk[offset:offset + left])
1681 1682 self._chunkoffset += left
1682 1683 left -= chunkremaining
1683 1684
1684 1685 return ''.join(buf)
1685 1686
1686 1687 def filechunkiter(f, size=65536, limit=None):
1687 1688 """Create a generator that produces the data in the file size
1688 1689 (default 65536) bytes at a time, up to optional limit (default is
1689 1690 to read all data). Chunks may be less than size bytes if the
1690 1691 chunk is the last chunk in the file, or the file is a socket or
1691 1692 some other type of file that sometimes reads less data than is
1692 1693 requested."""
1693 1694 assert size >= 0
1694 1695 assert limit is None or limit >= 0
1695 1696 while True:
1696 1697 if limit is None:
1697 1698 nbytes = size
1698 1699 else:
1699 1700 nbytes = min(limit, size)
1700 1701 s = nbytes and f.read(nbytes)
1701 1702 if not s:
1702 1703 break
1703 1704 if limit:
1704 1705 limit -= len(s)
1705 1706 yield s
1706 1707
1707 1708 def makedate(timestamp=None):
1708 1709 '''Return a unix timestamp (or the current time) as a (unixtime,
1709 1710 offset) tuple based off the local timezone.'''
1710 1711 if timestamp is None:
1711 1712 timestamp = time.time()
1712 1713 if timestamp < 0:
1713 1714 hint = _("check your clock")
1714 1715 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1715 1716 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1716 1717 datetime.datetime.fromtimestamp(timestamp))
1717 1718 tz = delta.days * 86400 + delta.seconds
1718 1719 return timestamp, tz
1719 1720
1720 1721 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1721 1722 """represent a (unixtime, offset) tuple as a localized time.
1722 1723 unixtime is seconds since the epoch, and offset is the time zone's
1723 1724 number of seconds away from UTC.
1724 1725
1725 1726 >>> datestr((0, 0))
1726 1727 'Thu Jan 01 00:00:00 1970 +0000'
1727 1728 >>> datestr((42, 0))
1728 1729 'Thu Jan 01 00:00:42 1970 +0000'
1729 1730 >>> datestr((-42, 0))
1730 1731 'Wed Dec 31 23:59:18 1969 +0000'
1731 1732 >>> datestr((0x7fffffff, 0))
1732 1733 'Tue Jan 19 03:14:07 2038 +0000'
1733 1734 >>> datestr((-0x80000000, 0))
1734 1735 'Fri Dec 13 20:45:52 1901 +0000'
1735 1736 """
1736 1737 t, tz = date or makedate()
1737 1738 if "%1" in format or "%2" in format or "%z" in format:
1738 1739 sign = (tz > 0) and "-" or "+"
1739 1740 minutes = abs(tz) // 60
1740 1741 q, r = divmod(minutes, 60)
1741 1742 format = format.replace("%z", "%1%2")
1742 1743 format = format.replace("%1", "%c%02d" % (sign, q))
1743 1744 format = format.replace("%2", "%02d" % r)
1744 1745 d = t - tz
1745 1746 if d > 0x7fffffff:
1746 1747 d = 0x7fffffff
1747 1748 elif d < -0x80000000:
1748 1749 d = -0x80000000
1749 1750 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1750 1751 # because they use the gmtime() system call which is buggy on Windows
1751 1752 # for negative values.
1752 1753 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1753 1754 s = t.strftime(format)
1754 1755 return s
1755 1756
1756 1757 def shortdate(date=None):
1757 1758 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1758 1759 return datestr(date, format='%Y-%m-%d')
1759 1760
1760 1761 def parsetimezone(s):
1761 1762 """find a trailing timezone, if any, in string, and return a
1762 1763 (offset, remainder) pair"""
1763 1764
1764 1765 if s.endswith("GMT") or s.endswith("UTC"):
1765 1766 return 0, s[:-3].rstrip()
1766 1767
1767 1768 # Unix-style timezones [+-]hhmm
1768 1769 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1769 1770 sign = (s[-5] == "+") and 1 or -1
1770 1771 hours = int(s[-4:-2])
1771 1772 minutes = int(s[-2:])
1772 1773 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1773 1774
1774 1775 # ISO8601 trailing Z
1775 1776 if s.endswith("Z") and s[-2:-1].isdigit():
1776 1777 return 0, s[:-1]
1777 1778
1778 1779 # ISO8601-style [+-]hh:mm
1779 1780 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1780 1781 s[-5:-3].isdigit() and s[-2:].isdigit()):
1781 1782 sign = (s[-6] == "+") and 1 or -1
1782 1783 hours = int(s[-5:-3])
1783 1784 minutes = int(s[-2:])
1784 1785 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1785 1786
1786 1787 return None, s
1787 1788
1788 1789 def strdate(string, format, defaults=[]):
1789 1790 """parse a localized time string and return a (unixtime, offset) tuple.
1790 1791 if the string cannot be parsed, ValueError is raised."""
1791 1792 # NOTE: unixtime = localunixtime + offset
1792 1793 offset, date = parsetimezone(string)
1793 1794
1794 1795 # add missing elements from defaults
1795 1796 usenow = False # default to using biased defaults
1796 1797 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1797 1798 found = [True for p in part if ("%"+p) in format]
1798 1799 if not found:
1799 1800 date += "@" + defaults[part][usenow]
1800 1801 format += "@%" + part[0]
1801 1802 else:
1802 1803 # We've found a specific time element, less specific time
1803 1804 # elements are relative to today
1804 1805 usenow = True
1805 1806
1806 1807 timetuple = time.strptime(date, format)
1807 1808 localunixtime = int(calendar.timegm(timetuple))
1808 1809 if offset is None:
1809 1810 # local timezone
1810 1811 unixtime = int(time.mktime(timetuple))
1811 1812 offset = unixtime - localunixtime
1812 1813 else:
1813 1814 unixtime = localunixtime + offset
1814 1815 return unixtime, offset
1815 1816
1816 1817 def parsedate(date, formats=None, bias=None):
1817 1818 """parse a localized date/time and return a (unixtime, offset) tuple.
1818 1819
1819 1820 The date may be a "unixtime offset" string or in one of the specified
1820 1821 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1821 1822
1822 1823 >>> parsedate(' today ') == parsedate(\
1823 1824 datetime.date.today().strftime('%b %d'))
1824 1825 True
1825 1826 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1826 1827 datetime.timedelta(days=1)\
1827 1828 ).strftime('%b %d'))
1828 1829 True
1829 1830 >>> now, tz = makedate()
1830 1831 >>> strnow, strtz = parsedate('now')
1831 1832 >>> (strnow - now) < 1
1832 1833 True
1833 1834 >>> tz == strtz
1834 1835 True
1835 1836 """
1836 1837 if bias is None:
1837 1838 bias = {}
1838 1839 if not date:
1839 1840 return 0, 0
1840 1841 if isinstance(date, tuple) and len(date) == 2:
1841 1842 return date
1842 1843 if not formats:
1843 1844 formats = defaultdateformats
1844 1845 date = date.strip()
1845 1846
1846 1847 if date == 'now' or date == _('now'):
1847 1848 return makedate()
1848 1849 if date == 'today' or date == _('today'):
1849 1850 date = datetime.date.today().strftime('%b %d')
1850 1851 elif date == 'yesterday' or date == _('yesterday'):
1851 1852 date = (datetime.date.today() -
1852 1853 datetime.timedelta(days=1)).strftime('%b %d')
1853 1854
1854 1855 try:
1855 1856 when, offset = map(int, date.split(' '))
1856 1857 except ValueError:
1857 1858 # fill out defaults
1858 1859 now = makedate()
1859 1860 defaults = {}
1860 1861 for part in ("d", "mb", "yY", "HI", "M", "S"):
1861 1862 # this piece is for rounding the specific end of unknowns
1862 1863 b = bias.get(part)
1863 1864 if b is None:
1864 1865 if part[0] in "HMS":
1865 1866 b = "00"
1866 1867 else:
1867 1868 b = "0"
1868 1869
1869 1870 # this piece is for matching the generic end to today's date
1870 1871 n = datestr(now, "%" + part[0])
1871 1872
1872 1873 defaults[part] = (b, n)
1873 1874
1874 1875 for format in formats:
1875 1876 try:
1876 1877 when, offset = strdate(date, format, defaults)
1877 1878 except (ValueError, OverflowError):
1878 1879 pass
1879 1880 else:
1880 1881 break
1881 1882 else:
1882 1883 raise Abort(_('invalid date: %r') % date)
1883 1884 # validate explicit (probably user-specified) date and
1884 1885 # time zone offset. values must fit in signed 32 bits for
1885 1886 # current 32-bit linux runtimes. timezones go from UTC-12
1886 1887 # to UTC+14
1887 1888 if when < -0x80000000 or when > 0x7fffffff:
1888 1889 raise Abort(_('date exceeds 32 bits: %d') % when)
1889 1890 if offset < -50400 or offset > 43200:
1890 1891 raise Abort(_('impossible time zone offset: %d') % offset)
1891 1892 return when, offset
1892 1893
1893 1894 def matchdate(date):
1894 1895 """Return a function that matches a given date match specifier
1895 1896
1896 1897 Formats include:
1897 1898
1898 1899 '{date}' match a given date to the accuracy provided
1899 1900
1900 1901 '<{date}' on or before a given date
1901 1902
1902 1903 '>{date}' on or after a given date
1903 1904
1904 1905 >>> p1 = parsedate("10:29:59")
1905 1906 >>> p2 = parsedate("10:30:00")
1906 1907 >>> p3 = parsedate("10:30:59")
1907 1908 >>> p4 = parsedate("10:31:00")
1908 1909 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1909 1910 >>> f = matchdate("10:30")
1910 1911 >>> f(p1[0])
1911 1912 False
1912 1913 >>> f(p2[0])
1913 1914 True
1914 1915 >>> f(p3[0])
1915 1916 True
1916 1917 >>> f(p4[0])
1917 1918 False
1918 1919 >>> f(p5[0])
1919 1920 False
1920 1921 """
1921 1922
1922 1923 def lower(date):
1923 1924 d = {'mb': "1", 'd': "1"}
1924 1925 return parsedate(date, extendeddateformats, d)[0]
1925 1926
1926 1927 def upper(date):
1927 1928 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1928 1929 for days in ("31", "30", "29"):
1929 1930 try:
1930 1931 d["d"] = days
1931 1932 return parsedate(date, extendeddateformats, d)[0]
1932 1933 except Abort:
1933 1934 pass
1934 1935 d["d"] = "28"
1935 1936 return parsedate(date, extendeddateformats, d)[0]
1936 1937
1937 1938 date = date.strip()
1938 1939
1939 1940 if not date:
1940 1941 raise Abort(_("dates cannot consist entirely of whitespace"))
1941 1942 elif date[0] == "<":
1942 1943 if not date[1:]:
1943 1944 raise Abort(_("invalid day spec, use '<DATE'"))
1944 1945 when = upper(date[1:])
1945 1946 return lambda x: x <= when
1946 1947 elif date[0] == ">":
1947 1948 if not date[1:]:
1948 1949 raise Abort(_("invalid day spec, use '>DATE'"))
1949 1950 when = lower(date[1:])
1950 1951 return lambda x: x >= when
1951 1952 elif date[0] == "-":
1952 1953 try:
1953 1954 days = int(date[1:])
1954 1955 except ValueError:
1955 1956 raise Abort(_("invalid day spec: %s") % date[1:])
1956 1957 if days < 0:
1957 1958 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1958 1959 % date[1:])
1959 1960 when = makedate()[0] - days * 3600 * 24
1960 1961 return lambda x: x >= when
1961 1962 elif " to " in date:
1962 1963 a, b = date.split(" to ")
1963 1964 start, stop = lower(a), upper(b)
1964 1965 return lambda x: x >= start and x <= stop
1965 1966 else:
1966 1967 start, stop = lower(date), upper(date)
1967 1968 return lambda x: x >= start and x <= stop
1968 1969
1969 1970 def stringmatcher(pattern):
1970 1971 """
1971 1972 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1972 1973 returns the matcher name, pattern, and matcher function.
1973 1974 missing or unknown prefixes are treated as literal matches.
1974 1975
1975 1976 helper for tests:
1976 1977 >>> def test(pattern, *tests):
1977 1978 ... kind, pattern, matcher = stringmatcher(pattern)
1978 1979 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1979 1980
1980 1981 exact matching (no prefix):
1981 1982 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1982 1983 ('literal', 'abcdefg', [False, False, True])
1983 1984
1984 1985 regex matching ('re:' prefix)
1985 1986 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1986 1987 ('re', 'a.+b', [False, False, True])
1987 1988
1988 1989 force exact matches ('literal:' prefix)
1989 1990 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1990 1991 ('literal', 're:foobar', [False, True])
1991 1992
1992 1993 unknown prefixes are ignored and treated as literals
1993 1994 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1994 1995 ('literal', 'foo:bar', [False, False, True])
1995 1996 """
1996 1997 if pattern.startswith('re:'):
1997 1998 pattern = pattern[3:]
1998 1999 try:
1999 2000 regex = remod.compile(pattern)
2000 2001 except remod.error as e:
2001 2002 raise error.ParseError(_('invalid regular expression: %s')
2002 2003 % e)
2003 2004 return 're', pattern, regex.search
2004 2005 elif pattern.startswith('literal:'):
2005 2006 pattern = pattern[8:]
2006 2007 return 'literal', pattern, pattern.__eq__
2007 2008
2008 2009 def shortuser(user):
2009 2010 """Return a short representation of a user name or email address."""
2010 2011 f = user.find('@')
2011 2012 if f >= 0:
2012 2013 user = user[:f]
2013 2014 f = user.find('<')
2014 2015 if f >= 0:
2015 2016 user = user[f + 1:]
2016 2017 f = user.find(' ')
2017 2018 if f >= 0:
2018 2019 user = user[:f]
2019 2020 f = user.find('.')
2020 2021 if f >= 0:
2021 2022 user = user[:f]
2022 2023 return user
2023 2024
2024 2025 def emailuser(user):
2025 2026 """Return the user portion of an email address."""
2026 2027 f = user.find('@')
2027 2028 if f >= 0:
2028 2029 user = user[:f]
2029 2030 f = user.find('<')
2030 2031 if f >= 0:
2031 2032 user = user[f + 1:]
2032 2033 return user
2033 2034
2034 2035 def email(author):
2035 2036 '''get email of author.'''
2036 2037 r = author.find('>')
2037 2038 if r == -1:
2038 2039 r = None
2039 2040 return author[author.find('<') + 1:r]
2040 2041
2041 2042 def ellipsis(text, maxlength=400):
2042 2043 """Trim string to at most maxlength (default: 400) columns in display."""
2043 2044 return encoding.trim(text, maxlength, ellipsis='...')
2044 2045
2045 2046 def unitcountfn(*unittable):
2046 2047 '''return a function that renders a readable count of some quantity'''
2047 2048
2048 2049 def go(count):
2049 2050 for multiplier, divisor, format in unittable:
2050 2051 if count >= divisor * multiplier:
2051 2052 return format % (count / float(divisor))
2052 2053 return unittable[-1][2] % count
2053 2054
2054 2055 return go
2055 2056
2056 2057 bytecount = unitcountfn(
2057 2058 (100, 1 << 30, _('%.0f GB')),
2058 2059 (10, 1 << 30, _('%.1f GB')),
2059 2060 (1, 1 << 30, _('%.2f GB')),
2060 2061 (100, 1 << 20, _('%.0f MB')),
2061 2062 (10, 1 << 20, _('%.1f MB')),
2062 2063 (1, 1 << 20, _('%.2f MB')),
2063 2064 (100, 1 << 10, _('%.0f KB')),
2064 2065 (10, 1 << 10, _('%.1f KB')),
2065 2066 (1, 1 << 10, _('%.2f KB')),
2066 2067 (1, 1, _('%.0f bytes')),
2067 2068 )
2068 2069
2069 2070 def uirepr(s):
2070 2071 # Avoid double backslash in Windows path repr()
2071 2072 return repr(s).replace('\\\\', '\\')
2072 2073
2073 2074 # delay import of textwrap
2074 2075 def MBTextWrapper(**kwargs):
2075 2076 class tw(textwrap.TextWrapper):
2076 2077 """
2077 2078 Extend TextWrapper for width-awareness.
2078 2079
2079 2080 Neither number of 'bytes' in any encoding nor 'characters' is
2080 2081 appropriate to calculate terminal columns for specified string.
2081 2082
2082 2083 Original TextWrapper implementation uses built-in 'len()' directly,
2083 2084 so overriding is needed to use width information of each characters.
2084 2085
2085 2086 In addition, characters classified into 'ambiguous' width are
2086 2087 treated as wide in East Asian area, but as narrow in other.
2087 2088
2088 2089 This requires use decision to determine width of such characters.
2089 2090 """
2090 2091 def _cutdown(self, ucstr, space_left):
2091 2092 l = 0
2092 2093 colwidth = encoding.ucolwidth
2093 2094 for i in xrange(len(ucstr)):
2094 2095 l += colwidth(ucstr[i])
2095 2096 if space_left < l:
2096 2097 return (ucstr[:i], ucstr[i:])
2097 2098 return ucstr, ''
2098 2099
2099 2100 # overriding of base class
2100 2101 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2101 2102 space_left = max(width - cur_len, 1)
2102 2103
2103 2104 if self.break_long_words:
2104 2105 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2105 2106 cur_line.append(cut)
2106 2107 reversed_chunks[-1] = res
2107 2108 elif not cur_line:
2108 2109 cur_line.append(reversed_chunks.pop())
2109 2110
2110 2111 # this overriding code is imported from TextWrapper of Python 2.6
2111 2112 # to calculate columns of string by 'encoding.ucolwidth()'
2112 2113 def _wrap_chunks(self, chunks):
2113 2114 colwidth = encoding.ucolwidth
2114 2115
2115 2116 lines = []
2116 2117 if self.width <= 0:
2117 2118 raise ValueError("invalid width %r (must be > 0)" % self.width)
2118 2119
2119 2120 # Arrange in reverse order so items can be efficiently popped
2120 2121 # from a stack of chucks.
2121 2122 chunks.reverse()
2122 2123
2123 2124 while chunks:
2124 2125
2125 2126 # Start the list of chunks that will make up the current line.
2126 2127 # cur_len is just the length of all the chunks in cur_line.
2127 2128 cur_line = []
2128 2129 cur_len = 0
2129 2130
2130 2131 # Figure out which static string will prefix this line.
2131 2132 if lines:
2132 2133 indent = self.subsequent_indent
2133 2134 else:
2134 2135 indent = self.initial_indent
2135 2136
2136 2137 # Maximum width for this line.
2137 2138 width = self.width - len(indent)
2138 2139
2139 2140 # First chunk on line is whitespace -- drop it, unless this
2140 2141 # is the very beginning of the text (i.e. no lines started yet).
2141 2142 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2142 2143 del chunks[-1]
2143 2144
2144 2145 while chunks:
2145 2146 l = colwidth(chunks[-1])
2146 2147
2147 2148 # Can at least squeeze this chunk onto the current line.
2148 2149 if cur_len + l <= width:
2149 2150 cur_line.append(chunks.pop())
2150 2151 cur_len += l
2151 2152
2152 2153 # Nope, this line is full.
2153 2154 else:
2154 2155 break
2155 2156
2156 2157 # The current line is full, and the next chunk is too big to
2157 2158 # fit on *any* line (not just this one).
2158 2159 if chunks and colwidth(chunks[-1]) > width:
2159 2160 self._handle_long_word(chunks, cur_line, cur_len, width)
2160 2161
2161 2162 # If the last chunk on this line is all whitespace, drop it.
2162 2163 if (self.drop_whitespace and
2163 2164 cur_line and cur_line[-1].strip() == ''):
2164 2165 del cur_line[-1]
2165 2166
2166 2167 # Convert current line back to a string and store it in list
2167 2168 # of all lines (return value).
2168 2169 if cur_line:
2169 2170 lines.append(indent + ''.join(cur_line))
2170 2171
2171 2172 return lines
2172 2173
2173 2174 global MBTextWrapper
2174 2175 MBTextWrapper = tw
2175 2176 return tw(**kwargs)
2176 2177
2177 2178 def wrap(line, width, initindent='', hangindent=''):
2178 2179 maxindent = max(len(hangindent), len(initindent))
2179 2180 if width <= maxindent:
2180 2181 # adjust for weird terminal size
2181 2182 width = max(78, maxindent + 1)
2182 2183 line = line.decode(encoding.encoding, encoding.encodingmode)
2183 2184 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2184 2185 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2185 2186 wrapper = MBTextWrapper(width=width,
2186 2187 initial_indent=initindent,
2187 2188 subsequent_indent=hangindent)
2188 2189 return wrapper.fill(line).encode(encoding.encoding)
2189 2190
2190 2191 def iterlines(iterator):
2191 2192 for chunk in iterator:
2192 2193 for line in chunk.splitlines():
2193 2194 yield line
2194 2195
2195 2196 def expandpath(path):
2196 2197 return os.path.expanduser(os.path.expandvars(path))
2197 2198
2198 2199 def hgcmd():
2199 2200 """Return the command used to execute current hg
2200 2201
2201 2202 This is different from hgexecutable() because on Windows we want
2202 2203 to avoid things opening new shell windows like batch files, so we
2203 2204 get either the python call or current executable.
2204 2205 """
2205 2206 if mainfrozen():
2206 2207 if getattr(sys, 'frozen', None) == 'macosx_app':
2207 2208 # Env variable set by py2app
2208 2209 return [os.environ['EXECUTABLEPATH']]
2209 2210 else:
2210 2211 return [sys.executable]
2211 2212 return gethgcmd()
2212 2213
2213 2214 def rundetached(args, condfn):
2214 2215 """Execute the argument list in a detached process.
2215 2216
2216 2217 condfn is a callable which is called repeatedly and should return
2217 2218 True once the child process is known to have started successfully.
2218 2219 At this point, the child process PID is returned. If the child
2219 2220 process fails to start or finishes before condfn() evaluates to
2220 2221 True, return -1.
2221 2222 """
2222 2223 # Windows case is easier because the child process is either
2223 2224 # successfully starting and validating the condition or exiting
2224 2225 # on failure. We just poll on its PID. On Unix, if the child
2225 2226 # process fails to start, it will be left in a zombie state until
2226 2227 # the parent wait on it, which we cannot do since we expect a long
2227 2228 # running process on success. Instead we listen for SIGCHLD telling
2228 2229 # us our child process terminated.
2229 2230 terminated = set()
2230 2231 def handler(signum, frame):
2231 2232 terminated.add(os.wait())
2232 2233 prevhandler = None
2233 2234 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2234 2235 if SIGCHLD is not None:
2235 2236 prevhandler = signal.signal(SIGCHLD, handler)
2236 2237 try:
2237 2238 pid = spawndetached(args)
2238 2239 while not condfn():
2239 2240 if ((pid in terminated or not testpid(pid))
2240 2241 and not condfn()):
2241 2242 return -1
2242 2243 time.sleep(0.1)
2243 2244 return pid
2244 2245 finally:
2245 2246 if prevhandler is not None:
2246 2247 signal.signal(signal.SIGCHLD, prevhandler)
2247 2248
2248 2249 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2249 2250 """Return the result of interpolating items in the mapping into string s.
2250 2251
2251 2252 prefix is a single character string, or a two character string with
2252 2253 a backslash as the first character if the prefix needs to be escaped in
2253 2254 a regular expression.
2254 2255
2255 2256 fn is an optional function that will be applied to the replacement text
2256 2257 just before replacement.
2257 2258
2258 2259 escape_prefix is an optional flag that allows using doubled prefix for
2259 2260 its escaping.
2260 2261 """
2261 2262 fn = fn or (lambda s: s)
2262 2263 patterns = '|'.join(mapping.keys())
2263 2264 if escape_prefix:
2264 2265 patterns += '|' + prefix
2265 2266 if len(prefix) > 1:
2266 2267 prefix_char = prefix[1:]
2267 2268 else:
2268 2269 prefix_char = prefix
2269 2270 mapping[prefix_char] = prefix_char
2270 2271 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2271 2272 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2272 2273
2273 2274 def getport(port):
2274 2275 """Return the port for a given network service.
2275 2276
2276 2277 If port is an integer, it's returned as is. If it's a string, it's
2277 2278 looked up using socket.getservbyname(). If there's no matching
2278 2279 service, error.Abort is raised.
2279 2280 """
2280 2281 try:
2281 2282 return int(port)
2282 2283 except ValueError:
2283 2284 pass
2284 2285
2285 2286 try:
2286 2287 return socket.getservbyname(port)
2287 2288 except socket.error:
2288 2289 raise Abort(_("no port number associated with service '%s'") % port)
2289 2290
2290 2291 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2291 2292 '0': False, 'no': False, 'false': False, 'off': False,
2292 2293 'never': False}
2293 2294
2294 2295 def parsebool(s):
2295 2296 """Parse s into a boolean.
2296 2297
2297 2298 If s is not a valid boolean, returns None.
2298 2299 """
2299 2300 return _booleans.get(s.lower(), None)
2300 2301
2301 2302 _hextochr = dict((a + b, chr(int(a + b, 16)))
2302 2303 for a in string.hexdigits for b in string.hexdigits)
2303 2304
2304 2305 class url(object):
2305 2306 r"""Reliable URL parser.
2306 2307
2307 2308 This parses URLs and provides attributes for the following
2308 2309 components:
2309 2310
2310 2311 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2311 2312
2312 2313 Missing components are set to None. The only exception is
2313 2314 fragment, which is set to '' if present but empty.
2314 2315
2315 2316 If parsefragment is False, fragment is included in query. If
2316 2317 parsequery is False, query is included in path. If both are
2317 2318 False, both fragment and query are included in path.
2318 2319
2319 2320 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2320 2321
2321 2322 Note that for backward compatibility reasons, bundle URLs do not
2322 2323 take host names. That means 'bundle://../' has a path of '../'.
2323 2324
2324 2325 Examples:
2325 2326
2326 2327 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2327 2328 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2328 2329 >>> url('ssh://[::1]:2200//home/joe/repo')
2329 2330 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2330 2331 >>> url('file:///home/joe/repo')
2331 2332 <url scheme: 'file', path: '/home/joe/repo'>
2332 2333 >>> url('file:///c:/temp/foo/')
2333 2334 <url scheme: 'file', path: 'c:/temp/foo/'>
2334 2335 >>> url('bundle:foo')
2335 2336 <url scheme: 'bundle', path: 'foo'>
2336 2337 >>> url('bundle://../foo')
2337 2338 <url scheme: 'bundle', path: '../foo'>
2338 2339 >>> url(r'c:\foo\bar')
2339 2340 <url path: 'c:\\foo\\bar'>
2340 2341 >>> url(r'\\blah\blah\blah')
2341 2342 <url path: '\\\\blah\\blah\\blah'>
2342 2343 >>> url(r'\\blah\blah\blah#baz')
2343 2344 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2344 2345 >>> url(r'file:///C:\users\me')
2345 2346 <url scheme: 'file', path: 'C:\\users\\me'>
2346 2347
2347 2348 Authentication credentials:
2348 2349
2349 2350 >>> url('ssh://joe:xyz@x/repo')
2350 2351 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2351 2352 >>> url('ssh://joe@x/repo')
2352 2353 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2353 2354
2354 2355 Query strings and fragments:
2355 2356
2356 2357 >>> url('http://host/a?b#c')
2357 2358 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2358 2359 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2359 2360 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2360 2361
2361 2362 Empty path:
2362 2363
2363 2364 >>> url('')
2364 2365 <url path: ''>
2365 2366 >>> url('#a')
2366 2367 <url path: '', fragment: 'a'>
2367 2368 >>> url('http://host/')
2368 2369 <url scheme: 'http', host: 'host', path: ''>
2369 2370 >>> url('http://host/#a')
2370 2371 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2371 2372
2372 2373 Only scheme:
2373 2374
2374 2375 >>> url('http:')
2375 2376 <url scheme: 'http'>
2376 2377 """
2377 2378
2378 2379 _safechars = "!~*'()+"
2379 2380 _safepchars = "/!~*'()+:\\"
2380 2381 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2381 2382
2382 2383 def __init__(self, path, parsequery=True, parsefragment=True):
2383 2384 # We slowly chomp away at path until we have only the path left
2384 2385 self.scheme = self.user = self.passwd = self.host = None
2385 2386 self.port = self.path = self.query = self.fragment = None
2386 2387 self._localpath = True
2387 2388 self._hostport = ''
2388 2389 self._origpath = path
2389 2390
2390 2391 if parsefragment and '#' in path:
2391 2392 path, self.fragment = path.split('#', 1)
2392 2393
2393 2394 # special case for Windows drive letters and UNC paths
2394 2395 if hasdriveletter(path) or path.startswith(r'\\'):
2395 2396 self.path = path
2396 2397 return
2397 2398
2398 2399 # For compatibility reasons, we can't handle bundle paths as
2399 2400 # normal URLS
2400 2401 if path.startswith('bundle:'):
2401 2402 self.scheme = 'bundle'
2402 2403 path = path[7:]
2403 2404 if path.startswith('//'):
2404 2405 path = path[2:]
2405 2406 self.path = path
2406 2407 return
2407 2408
2408 2409 if self._matchscheme(path):
2409 2410 parts = path.split(':', 1)
2410 2411 if parts[0]:
2411 2412 self.scheme, path = parts
2412 2413 self._localpath = False
2413 2414
2414 2415 if not path:
2415 2416 path = None
2416 2417 if self._localpath:
2417 2418 self.path = ''
2418 2419 return
2419 2420 else:
2420 2421 if self._localpath:
2421 2422 self.path = path
2422 2423 return
2423 2424
2424 2425 if parsequery and '?' in path:
2425 2426 path, self.query = path.split('?', 1)
2426 2427 if not path:
2427 2428 path = None
2428 2429 if not self.query:
2429 2430 self.query = None
2430 2431
2431 2432 # // is required to specify a host/authority
2432 2433 if path and path.startswith('//'):
2433 2434 parts = path[2:].split('/', 1)
2434 2435 if len(parts) > 1:
2435 2436 self.host, path = parts
2436 2437 else:
2437 2438 self.host = parts[0]
2438 2439 path = None
2439 2440 if not self.host:
2440 2441 self.host = None
2441 2442 # path of file:///d is /d
2442 2443 # path of file:///d:/ is d:/, not /d:/
2443 2444 if path and not hasdriveletter(path):
2444 2445 path = '/' + path
2445 2446
2446 2447 if self.host and '@' in self.host:
2447 2448 self.user, self.host = self.host.rsplit('@', 1)
2448 2449 if ':' in self.user:
2449 2450 self.user, self.passwd = self.user.split(':', 1)
2450 2451 if not self.host:
2451 2452 self.host = None
2452 2453
2453 2454 # Don't split on colons in IPv6 addresses without ports
2454 2455 if (self.host and ':' in self.host and
2455 2456 not (self.host.startswith('[') and self.host.endswith(']'))):
2456 2457 self._hostport = self.host
2457 2458 self.host, self.port = self.host.rsplit(':', 1)
2458 2459 if not self.host:
2459 2460 self.host = None
2460 2461
2461 2462 if (self.host and self.scheme == 'file' and
2462 2463 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2463 2464 raise Abort(_('file:// URLs can only refer to localhost'))
2464 2465
2465 2466 self.path = path
2466 2467
2467 2468 # leave the query string escaped
2468 2469 for a in ('user', 'passwd', 'host', 'port',
2469 2470 'path', 'fragment'):
2470 2471 v = getattr(self, a)
2471 2472 if v is not None:
2472 2473 setattr(self, a, pycompat.urlparse.unquote(v))
2473 2474
2474 2475 def __repr__(self):
2475 2476 attrs = []
2476 2477 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2477 2478 'query', 'fragment'):
2478 2479 v = getattr(self, a)
2479 2480 if v is not None:
2480 2481 attrs.append('%s: %r' % (a, v))
2481 2482 return '<url %s>' % ', '.join(attrs)
2482 2483
2483 2484 def __str__(self):
2484 2485 r"""Join the URL's components back into a URL string.
2485 2486
2486 2487 Examples:
2487 2488
2488 2489 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2489 2490 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2490 2491 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2491 2492 'http://user:pw@host:80/?foo=bar&baz=42'
2492 2493 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2493 2494 'http://user:pw@host:80/?foo=bar%3dbaz'
2494 2495 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2495 2496 'ssh://user:pw@[::1]:2200//home/joe#'
2496 2497 >>> str(url('http://localhost:80//'))
2497 2498 'http://localhost:80//'
2498 2499 >>> str(url('http://localhost:80/'))
2499 2500 'http://localhost:80/'
2500 2501 >>> str(url('http://localhost:80'))
2501 2502 'http://localhost:80/'
2502 2503 >>> str(url('bundle:foo'))
2503 2504 'bundle:foo'
2504 2505 >>> str(url('bundle://../foo'))
2505 2506 'bundle:../foo'
2506 2507 >>> str(url('path'))
2507 2508 'path'
2508 2509 >>> str(url('file:///tmp/foo/bar'))
2509 2510 'file:///tmp/foo/bar'
2510 2511 >>> str(url('file:///c:/tmp/foo/bar'))
2511 2512 'file:///c:/tmp/foo/bar'
2512 2513 >>> print url(r'bundle:foo\bar')
2513 2514 bundle:foo\bar
2514 2515 >>> print url(r'file:///D:\data\hg')
2515 2516 file:///D:\data\hg
2516 2517 """
2517 2518 if self._localpath:
2518 2519 s = self.path
2519 2520 if self.scheme == 'bundle':
2520 2521 s = 'bundle:' + s
2521 2522 if self.fragment:
2522 2523 s += '#' + self.fragment
2523 2524 return s
2524 2525
2525 2526 s = self.scheme + ':'
2526 2527 if self.user or self.passwd or self.host:
2527 2528 s += '//'
2528 2529 elif self.scheme and (not self.path or self.path.startswith('/')
2529 2530 or hasdriveletter(self.path)):
2530 2531 s += '//'
2531 2532 if hasdriveletter(self.path):
2532 2533 s += '/'
2533 2534 if self.user:
2534 2535 s += urlreq.quote(self.user, safe=self._safechars)
2535 2536 if self.passwd:
2536 2537 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2537 2538 if self.user or self.passwd:
2538 2539 s += '@'
2539 2540 if self.host:
2540 2541 if not (self.host.startswith('[') and self.host.endswith(']')):
2541 2542 s += urlreq.quote(self.host)
2542 2543 else:
2543 2544 s += self.host
2544 2545 if self.port:
2545 2546 s += ':' + urlreq.quote(self.port)
2546 2547 if self.host:
2547 2548 s += '/'
2548 2549 if self.path:
2549 2550 # TODO: similar to the query string, we should not unescape the
2550 2551 # path when we store it, the path might contain '%2f' = '/',
2551 2552 # which we should *not* escape.
2552 2553 s += urlreq.quote(self.path, safe=self._safepchars)
2553 2554 if self.query:
2554 2555 # we store the query in escaped form.
2555 2556 s += '?' + self.query
2556 2557 if self.fragment is not None:
2557 2558 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2558 2559 return s
2559 2560
2560 2561 def authinfo(self):
2561 2562 user, passwd = self.user, self.passwd
2562 2563 try:
2563 2564 self.user, self.passwd = None, None
2564 2565 s = str(self)
2565 2566 finally:
2566 2567 self.user, self.passwd = user, passwd
2567 2568 if not self.user:
2568 2569 return (s, None)
2569 2570 # authinfo[1] is passed to urllib2 password manager, and its
2570 2571 # URIs must not contain credentials. The host is passed in the
2571 2572 # URIs list because Python < 2.4.3 uses only that to search for
2572 2573 # a password.
2573 2574 return (s, (None, (s, self.host),
2574 2575 self.user, self.passwd or ''))
2575 2576
2576 2577 def isabs(self):
2577 2578 if self.scheme and self.scheme != 'file':
2578 2579 return True # remote URL
2579 2580 if hasdriveletter(self.path):
2580 2581 return True # absolute for our purposes - can't be joined()
2581 2582 if self.path.startswith(r'\\'):
2582 2583 return True # Windows UNC path
2583 2584 if self.path.startswith('/'):
2584 2585 return True # POSIX-style
2585 2586 return False
2586 2587
2587 2588 def localpath(self):
2588 2589 if self.scheme == 'file' or self.scheme == 'bundle':
2589 2590 path = self.path or '/'
2590 2591 # For Windows, we need to promote hosts containing drive
2591 2592 # letters to paths with drive letters.
2592 2593 if hasdriveletter(self._hostport):
2593 2594 path = self._hostport + '/' + self.path
2594 2595 elif (self.host is not None and self.path
2595 2596 and not hasdriveletter(path)):
2596 2597 path = '/' + path
2597 2598 return path
2598 2599 return self._origpath
2599 2600
2600 2601 def islocal(self):
2601 2602 '''whether localpath will return something that posixfile can open'''
2602 2603 return (not self.scheme or self.scheme == 'file'
2603 2604 or self.scheme == 'bundle')
2604 2605
2605 2606 def hasscheme(path):
2606 2607 return bool(url(path).scheme)
2607 2608
2608 2609 def hasdriveletter(path):
2609 2610 return path and path[1:2] == ':' and path[0:1].isalpha()
2610 2611
2611 2612 def urllocalpath(path):
2612 2613 return url(path, parsequery=False, parsefragment=False).localpath()
2613 2614
2614 2615 def hidepassword(u):
2615 2616 '''hide user credential in a url string'''
2616 2617 u = url(u)
2617 2618 if u.passwd:
2618 2619 u.passwd = '***'
2619 2620 return str(u)
2620 2621
2621 2622 def removeauth(u):
2622 2623 '''remove all authentication information from a url string'''
2623 2624 u = url(u)
2624 2625 u.user = u.passwd = None
2625 2626 return str(u)
2626 2627
2627 2628 def isatty(fp):
2628 2629 try:
2629 2630 return fp.isatty()
2630 2631 except AttributeError:
2631 2632 return False
2632 2633
2633 2634 timecount = unitcountfn(
2634 2635 (1, 1e3, _('%.0f s')),
2635 2636 (100, 1, _('%.1f s')),
2636 2637 (10, 1, _('%.2f s')),
2637 2638 (1, 1, _('%.3f s')),
2638 2639 (100, 0.001, _('%.1f ms')),
2639 2640 (10, 0.001, _('%.2f ms')),
2640 2641 (1, 0.001, _('%.3f ms')),
2641 2642 (100, 0.000001, _('%.1f us')),
2642 2643 (10, 0.000001, _('%.2f us')),
2643 2644 (1, 0.000001, _('%.3f us')),
2644 2645 (100, 0.000000001, _('%.1f ns')),
2645 2646 (10, 0.000000001, _('%.2f ns')),
2646 2647 (1, 0.000000001, _('%.3f ns')),
2647 2648 )
2648 2649
2649 2650 _timenesting = [0]
2650 2651
2651 2652 def timed(func):
2652 2653 '''Report the execution time of a function call to stderr.
2653 2654
2654 2655 During development, use as a decorator when you need to measure
2655 2656 the cost of a function, e.g. as follows:
2656 2657
2657 2658 @util.timed
2658 2659 def foo(a, b, c):
2659 2660 pass
2660 2661 '''
2661 2662
2662 2663 def wrapper(*args, **kwargs):
2663 2664 start = time.time()
2664 2665 indent = 2
2665 2666 _timenesting[0] += indent
2666 2667 try:
2667 2668 return func(*args, **kwargs)
2668 2669 finally:
2669 2670 elapsed = time.time() - start
2670 2671 _timenesting[0] -= indent
2671 2672 sys.stderr.write('%s%s: %s\n' %
2672 2673 (' ' * _timenesting[0], func.__name__,
2673 2674 timecount(elapsed)))
2674 2675 return wrapper
2675 2676
2676 2677 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2677 2678 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2678 2679
2679 2680 def sizetoint(s):
2680 2681 '''Convert a space specifier to a byte count.
2681 2682
2682 2683 >>> sizetoint('30')
2683 2684 30
2684 2685 >>> sizetoint('2.2kb')
2685 2686 2252
2686 2687 >>> sizetoint('6M')
2687 2688 6291456
2688 2689 '''
2689 2690 t = s.strip().lower()
2690 2691 try:
2691 2692 for k, u in _sizeunits:
2692 2693 if t.endswith(k):
2693 2694 return int(float(t[:-len(k)]) * u)
2694 2695 return int(t)
2695 2696 except ValueError:
2696 2697 raise error.ParseError(_("couldn't parse size: %s") % s)
2697 2698
2698 2699 class hooks(object):
2699 2700 '''A collection of hook functions that can be used to extend a
2700 2701 function's behavior. Hooks are called in lexicographic order,
2701 2702 based on the names of their sources.'''
2702 2703
2703 2704 def __init__(self):
2704 2705 self._hooks = []
2705 2706
2706 2707 def add(self, source, hook):
2707 2708 self._hooks.append((source, hook))
2708 2709
2709 2710 def __call__(self, *args):
2710 2711 self._hooks.sort(key=lambda x: x[0])
2711 2712 results = []
2712 2713 for source, hook in self._hooks:
2713 2714 results.append(hook(*args))
2714 2715 return results
2715 2716
2716 2717 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2717 2718 '''Yields lines for a nicely formatted stacktrace.
2718 2719 Skips the 'skip' last entries.
2719 2720 Each file+linenumber is formatted according to fileline.
2720 2721 Each line is formatted according to line.
2721 2722 If line is None, it yields:
2722 2723 length of longest filepath+line number,
2723 2724 filepath+linenumber,
2724 2725 function
2725 2726
2726 2727 Not be used in production code but very convenient while developing.
2727 2728 '''
2728 2729 entries = [(fileline % (fn, ln), func)
2729 2730 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2730 2731 if entries:
2731 2732 fnmax = max(len(entry[0]) for entry in entries)
2732 2733 for fnln, func in entries:
2733 2734 if line is None:
2734 2735 yield (fnmax, fnln, func)
2735 2736 else:
2736 2737 yield line % (fnmax, fnln, func)
2737 2738
2738 2739 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2739 2740 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2740 2741 Skips the 'skip' last entries. By default it will flush stdout first.
2741 2742 It can be used everywhere and intentionally does not require an ui object.
2742 2743 Not be used in production code but very convenient while developing.
2743 2744 '''
2744 2745 if otherf:
2745 2746 otherf.flush()
2746 2747 f.write('%s at:\n' % msg)
2747 2748 for line in getstackframes(skip + 1):
2748 2749 f.write(line)
2749 2750 f.flush()
2750 2751
2751 2752 class dirs(object):
2752 2753 '''a multiset of directory names from a dirstate or manifest'''
2753 2754
2754 2755 def __init__(self, map, skip=None):
2755 2756 self._dirs = {}
2756 2757 addpath = self.addpath
2757 2758 if safehasattr(map, 'iteritems') and skip is not None:
2758 2759 for f, s in map.iteritems():
2759 2760 if s[0] != skip:
2760 2761 addpath(f)
2761 2762 else:
2762 2763 for f in map:
2763 2764 addpath(f)
2764 2765
2765 2766 def addpath(self, path):
2766 2767 dirs = self._dirs
2767 2768 for base in finddirs(path):
2768 2769 if base in dirs:
2769 2770 dirs[base] += 1
2770 2771 return
2771 2772 dirs[base] = 1
2772 2773
2773 2774 def delpath(self, path):
2774 2775 dirs = self._dirs
2775 2776 for base in finddirs(path):
2776 2777 if dirs[base] > 1:
2777 2778 dirs[base] -= 1
2778 2779 return
2779 2780 del dirs[base]
2780 2781
2781 2782 def __iter__(self):
2782 2783 return self._dirs.iterkeys()
2783 2784
2784 2785 def __contains__(self, d):
2785 2786 return d in self._dirs
2786 2787
2787 2788 if safehasattr(parsers, 'dirs'):
2788 2789 dirs = parsers.dirs
2789 2790
2790 2791 def finddirs(path):
2791 2792 pos = path.rfind('/')
2792 2793 while pos != -1:
2793 2794 yield path[:pos]
2794 2795 pos = path.rfind('/', 0, pos)
2795 2796
2796 2797 # compression utility
2797 2798
2798 2799 class nocompress(object):
2799 2800 def compress(self, x):
2800 2801 return x
2801 2802 def flush(self):
2802 2803 return ""
2803 2804
2804 2805 compressors = {
2805 2806 None: nocompress,
2806 2807 # lambda to prevent early import
2807 2808 'BZ': lambda: bz2.BZ2Compressor(),
2808 2809 'GZ': lambda: zlib.compressobj(),
2809 2810 }
2810 2811 # also support the old form by courtesies
2811 2812 compressors['UN'] = compressors[None]
2812 2813
2813 2814 def _makedecompressor(decompcls):
2814 2815 def generator(f):
2815 2816 d = decompcls()
2816 2817 for chunk in filechunkiter(f):
2817 2818 yield d.decompress(chunk)
2818 2819 def func(fh):
2819 2820 return chunkbuffer(generator(fh))
2820 2821 return func
2821 2822
2822 2823 class ctxmanager(object):
2823 2824 '''A context manager for use in 'with' blocks to allow multiple
2824 2825 contexts to be entered at once. This is both safer and more
2825 2826 flexible than contextlib.nested.
2826 2827
2827 2828 Once Mercurial supports Python 2.7+, this will become mostly
2828 2829 unnecessary.
2829 2830 '''
2830 2831
2831 2832 def __init__(self, *args):
2832 2833 '''Accepts a list of no-argument functions that return context
2833 2834 managers. These will be invoked at __call__ time.'''
2834 2835 self._pending = args
2835 2836 self._atexit = []
2836 2837
2837 2838 def __enter__(self):
2838 2839 return self
2839 2840
2840 2841 def enter(self):
2841 2842 '''Create and enter context managers in the order in which they were
2842 2843 passed to the constructor.'''
2843 2844 values = []
2844 2845 for func in self._pending:
2845 2846 obj = func()
2846 2847 values.append(obj.__enter__())
2847 2848 self._atexit.append(obj.__exit__)
2848 2849 del self._pending
2849 2850 return values
2850 2851
2851 2852 def atexit(self, func, *args, **kwargs):
2852 2853 '''Add a function to call when this context manager exits. The
2853 2854 ordering of multiple atexit calls is unspecified, save that
2854 2855 they will happen before any __exit__ functions.'''
2855 2856 def wrapper(exc_type, exc_val, exc_tb):
2856 2857 func(*args, **kwargs)
2857 2858 self._atexit.append(wrapper)
2858 2859 return func
2859 2860
2860 2861 def __exit__(self, exc_type, exc_val, exc_tb):
2861 2862 '''Context managers are exited in the reverse order from which
2862 2863 they were created.'''
2863 2864 received = exc_type is not None
2864 2865 suppressed = False
2865 2866 pending = None
2866 2867 self._atexit.reverse()
2867 2868 for exitfunc in self._atexit:
2868 2869 try:
2869 2870 if exitfunc(exc_type, exc_val, exc_tb):
2870 2871 suppressed = True
2871 2872 exc_type = None
2872 2873 exc_val = None
2873 2874 exc_tb = None
2874 2875 except BaseException:
2875 2876 pending = sys.exc_info()
2876 2877 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2877 2878 del self._atexit
2878 2879 if pending:
2879 2880 raise exc_val
2880 2881 return received and suppressed
2881 2882
2882 2883 def _bz2():
2883 2884 d = bz2.BZ2Decompressor()
2884 2885 # Bzip2 stream start with BZ, but we stripped it.
2885 2886 # we put it back for good measure.
2886 2887 d.decompress('BZ')
2887 2888 return d
2888 2889
2889 2890 decompressors = {None: lambda fh: fh,
2890 2891 '_truncatedBZ': _makedecompressor(_bz2),
2891 2892 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2892 2893 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2893 2894 }
2894 2895 # also support the old form by courtesies
2895 2896 decompressors['UN'] = decompressors[None]
2896 2897
2897 2898 # convenient shortcut
2898 2899 dst = debugstacktrace
@@ -1,141 +1,63
1 1 #require test-repo
2 2
3 3 $ . "$TESTDIR/helpers-testrepo.sh"
4 4 $ cd "$TESTDIR"/..
5 5
6 6 $ hg files 'set:(**.py)' | sed 's|\\|/|g' | xargs python contrib/check-py3-compat.py
7 7 hgext/fsmonitor/pywatchman/__init__.py not using absolute_import
8 8 hgext/fsmonitor/pywatchman/__init__.py requires print_function
9 9 hgext/fsmonitor/pywatchman/capabilities.py not using absolute_import
10 10 hgext/fsmonitor/pywatchman/pybser.py not using absolute_import
11 11 i18n/check-translation.py not using absolute_import
12 12 setup.py not using absolute_import
13 13 tests/test-demandimport.py not using absolute_import
14 14
15 15 #if py3exe
16 16 $ hg files 'set:(**.py) - grep(pygments)' | sed 's|\\|/|g' \
17 17 > | xargs $PYTHON3 contrib/check-py3-compat.py \
18 18 > | sed 's/[0-9][0-9]*)$/*)/'
19 hgext/automv.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
20 hgext/blackbox.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
21 hgext/bugzilla.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
22 hgext/censor.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
23 hgext/chgserver.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
24 hgext/children.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
25 hgext/churn.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
26 hgext/clonebundles.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
27 hgext/color.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
28 19 hgext/convert/bzr.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
29 hgext/convert/common.py: error importing module: <AttributeError> module 'mercurial.util' has no attribute 'pickle' (line *)
30 hgext/convert/convcmd.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
20 hgext/convert/convcmd.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
31 21 hgext/convert/cvs.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
32 hgext/convert/cvsps.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
33 22 hgext/convert/darcs.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
34 23 hgext/convert/filemap.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
35 24 hgext/convert/git.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
36 25 hgext/convert/gnuarch.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
37 hgext/convert/hg.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
26 hgext/convert/hg.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
38 27 hgext/convert/monotone.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
39 28 hgext/convert/p4.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
40 29 hgext/convert/subversion.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
41 30 hgext/convert/transport.py: error importing module: <ImportError> No module named 'svn.client' (line *)
42 hgext/eol.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
43 hgext/extdiff.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
44 hgext/factotum.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
45 hgext/fetch.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
46 31 hgext/fsmonitor/watchmanclient.py: error importing module: <SystemError> Parent module 'hgext.fsmonitor' not loaded, cannot perform relative import (line *)
47 hgext/gpg.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
48 hgext/graphlog.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
49 hgext/hgk.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
50 hgext/histedit.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
51 hgext/journal.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
52 hgext/keyword.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'httpserver' (error at common.py:*)
32 hgext/journal.py: error importing module: <SystemError> Parent module 'hgext' not loaded, cannot perform relative import (line *)
53 33 hgext/largefiles/basestore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *)
54 hgext/largefiles/lfcommands.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
55 hgext/largefiles/lfutil.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
34 hgext/largefiles/lfcommands.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *)
56 35 hgext/largefiles/localstore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *)
57 hgext/largefiles/overrides.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
58 hgext/largefiles/proto.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
59 hgext/largefiles/remotestore.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
60 hgext/largefiles/reposetup.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
61 hgext/largefiles/storefactory.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
62 hgext/largefiles/uisetup.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'httpserver' (error at common.py:*)
36 hgext/largefiles/overrides.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *)
37 hgext/largefiles/proto.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *)
38 hgext/largefiles/remotestore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *)
39 hgext/largefiles/reposetup.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *)
40 hgext/largefiles/storefactory.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *)
41 hgext/largefiles/uisetup.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *)
63 42 hgext/largefiles/wirestore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *)
64 hgext/mq.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
65 hgext/notify.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
66 hgext/pager.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
67 hgext/patchbomb.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
68 hgext/purge.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
69 hgext/rebase.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
70 hgext/record.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
71 hgext/relink.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
72 hgext/schemes.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
73 hgext/share.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
74 hgext/shelve.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
75 hgext/strip.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
76 hgext/transplant.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
77 mercurial/archival.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
78 mercurial/bundle2.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
79 mercurial/bundlerepo.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
80 mercurial/byterange.py: error importing module: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (line *)
81 mercurial/changelog.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
82 mercurial/cmdutil.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
83 mercurial/commands.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
84 mercurial/context.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
85 mercurial/crecord.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
86 mercurial/dispatch.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
43 hgext/mq.py: error importing: <TypeError> startswith first arg must be str or a tuple of str, not bytes (error at extensions.py:*)
44 hgext/rebase.py: error importing: <TypeError> Can't convert 'bytes' object to str implicitly (error at registrar.py:*)
45 hgext/record.py: error importing module: <KeyError> '^commit|ci' (line *)
46 hgext/shelve.py: error importing module: <SystemError> Parent module 'hgext' not loaded, cannot perform relative import (line *)
47 hgext/transplant.py: error importing: <TypeError> Can't convert 'bytes' object to str implicitly (error at registrar.py:*)
87 48 mercurial/encoding.py: error importing module: <TypeError> bytes expected, not str (line *)
88 mercurial/exchange.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
89 mercurial/extensions.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
90 mercurial/filelog.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
91 mercurial/filemerge.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
92 mercurial/fileset.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
93 mercurial/formatter.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
94 mercurial/help.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
95 mercurial/hg.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
96 mercurial/hgweb/common.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
97 mercurial/hgweb/hgweb_mod.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
98 mercurial/hgweb/hgwebdir_mod.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
99 mercurial/hgweb/protocol.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
100 mercurial/hgweb/request.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
101 mercurial/hgweb/server.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
102 mercurial/hgweb/webcommands.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
103 mercurial/hgweb/webutil.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
104 mercurial/hgweb/wsgicgi.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
105 mercurial/hook.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
106 mercurial/httpconnection.py: error importing module: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (line *)
107 mercurial/httppeer.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
49 mercurial/fileset.py: error importing: <TypeError> Can't convert 'bytes' object to str implicitly (error at registrar.py:*)
108 50 mercurial/i18n.py: error importing module: <TypeError> bytes expected, not str (line *)
109 mercurial/keepalive.py: error importing module: <AttributeError> module 'mercurial.util' has no attribute 'httplib' (line *)
110 mercurial/localrepo.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
111 mercurial/manifest.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
112 mercurial/merge.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
113 mercurial/namespaces.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
114 mercurial/patch.py: error importing module: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (line *)
115 mercurial/pvec.py: error importing module: <NameError> name 'xrange' is not defined (line *)
116 mercurial/repair.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
117 mercurial/revlog.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
118 51 mercurial/revset.py: error importing module: <AttributeError> 'dict' object has no attribute 'iteritems' (line *)
119 52 mercurial/scmwindows.py: error importing module: <ImportError> No module named 'winreg' (line *)
120 mercurial/sshpeer.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
121 mercurial/sshserver.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
122 mercurial/statichttprepo.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at byterange.py:*)
123 mercurial/store.py: error importing module: <NameError> name 'xrange' is not defined (line *)
124 mercurial/subrepo.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
125 mercurial/templatefilters.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
126 mercurial/templatekw.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
127 mercurial/templater.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
128 mercurial/ui.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
129 mercurial/unionrepo.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
130 mercurial/url.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
53 mercurial/store.py: error importing module: <TypeError> Can't convert 'bytes' object to str implicitly (line *)
131 54 mercurial/win32.py: error importing module: <ImportError> No module named 'msvcrt' (line *)
132 55 mercurial/windows.py: error importing module: <ImportError> No module named 'msvcrt' (line *)
133 mercurial/wireproto.py: error importing: <TypeError> %b requires bytes, or an object that implements __bytes__, not 'str' (error at bundle2.py:*)
134 56
135 57 #endif
136 58
137 59 #if py3exe py3pygments
138 60 $ hg files 'set:(**.py) and grep(pygments)' | sed 's|\\|/|g' \
139 61 > | xargs $PYTHON3 contrib/check-py3-compat.py \
140 62 > | sed 's/[0-9][0-9]*)$/*)/'
141 63 #endif
General Comments 0
You need to be logged in to leave comments. Login now