##// END OF EJS Templates
util: use string.hexdigits instead of defining it ourselves...
Augie Fackler -
r30054:8b89521a default
parent child Browse files
Show More
@@ -1,2918 +1,2918 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import collections
20 import collections
21 import datetime
21 import datetime
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import imp
25 import imp
26 import os
26 import os
27 import re as remod
27 import re as remod
28 import shutil
28 import shutil
29 import signal
29 import signal
30 import socket
30 import socket
31 import string
31 import subprocess
32 import subprocess
32 import sys
33 import sys
33 import tempfile
34 import tempfile
34 import textwrap
35 import textwrap
35 import time
36 import time
36 import traceback
37 import traceback
37 import zlib
38 import zlib
38
39
39 from . import (
40 from . import (
40 encoding,
41 encoding,
41 error,
42 error,
42 i18n,
43 i18n,
43 osutil,
44 osutil,
44 parsers,
45 parsers,
45 pycompat,
46 pycompat,
46 )
47 )
47
48
48 for attr in (
49 for attr in (
49 'empty',
50 'empty',
50 'httplib',
51 'httplib',
51 'httpserver',
52 'httpserver',
52 'pickle',
53 'pickle',
53 'queue',
54 'queue',
54 'urlerr',
55 'urlerr',
55 'urlparse',
56 'urlparse',
56 # we do import urlreq, but we do it outside the loop
57 # we do import urlreq, but we do it outside the loop
57 #'urlreq',
58 #'urlreq',
58 'stringio',
59 'stringio',
59 'socketserver',
60 'socketserver',
60 'xmlrpclib',
61 'xmlrpclib',
61 ):
62 ):
62 globals()[attr] = getattr(pycompat, attr)
63 globals()[attr] = getattr(pycompat, attr)
63
64
64 # This line is to make pyflakes happy:
65 # This line is to make pyflakes happy:
65 urlreq = pycompat.urlreq
66 urlreq = pycompat.urlreq
66
67
67 if os.name == 'nt':
68 if os.name == 'nt':
68 from . import windows as platform
69 from . import windows as platform
69 else:
70 else:
70 from . import posix as platform
71 from . import posix as platform
71
72
72 _ = i18n._
73 _ = i18n._
73
74
74 bindunixsocket = platform.bindunixsocket
75 bindunixsocket = platform.bindunixsocket
75 cachestat = platform.cachestat
76 cachestat = platform.cachestat
76 checkexec = platform.checkexec
77 checkexec = platform.checkexec
77 checklink = platform.checklink
78 checklink = platform.checklink
78 copymode = platform.copymode
79 copymode = platform.copymode
79 executablepath = platform.executablepath
80 executablepath = platform.executablepath
80 expandglobs = platform.expandglobs
81 expandglobs = platform.expandglobs
81 explainexit = platform.explainexit
82 explainexit = platform.explainexit
82 findexe = platform.findexe
83 findexe = platform.findexe
83 gethgcmd = platform.gethgcmd
84 gethgcmd = platform.gethgcmd
84 getuser = platform.getuser
85 getuser = platform.getuser
85 getpid = os.getpid
86 getpid = os.getpid
86 groupmembers = platform.groupmembers
87 groupmembers = platform.groupmembers
87 groupname = platform.groupname
88 groupname = platform.groupname
88 hidewindow = platform.hidewindow
89 hidewindow = platform.hidewindow
89 isexec = platform.isexec
90 isexec = platform.isexec
90 isowner = platform.isowner
91 isowner = platform.isowner
91 localpath = platform.localpath
92 localpath = platform.localpath
92 lookupreg = platform.lookupreg
93 lookupreg = platform.lookupreg
93 makedir = platform.makedir
94 makedir = platform.makedir
94 nlinks = platform.nlinks
95 nlinks = platform.nlinks
95 normpath = platform.normpath
96 normpath = platform.normpath
96 normcase = platform.normcase
97 normcase = platform.normcase
97 normcasespec = platform.normcasespec
98 normcasespec = platform.normcasespec
98 normcasefallback = platform.normcasefallback
99 normcasefallback = platform.normcasefallback
99 openhardlinks = platform.openhardlinks
100 openhardlinks = platform.openhardlinks
100 oslink = platform.oslink
101 oslink = platform.oslink
101 parsepatchoutput = platform.parsepatchoutput
102 parsepatchoutput = platform.parsepatchoutput
102 pconvert = platform.pconvert
103 pconvert = platform.pconvert
103 poll = platform.poll
104 poll = platform.poll
104 popen = platform.popen
105 popen = platform.popen
105 posixfile = platform.posixfile
106 posixfile = platform.posixfile
106 quotecommand = platform.quotecommand
107 quotecommand = platform.quotecommand
107 readpipe = platform.readpipe
108 readpipe = platform.readpipe
108 rename = platform.rename
109 rename = platform.rename
109 removedirs = platform.removedirs
110 removedirs = platform.removedirs
110 samedevice = platform.samedevice
111 samedevice = platform.samedevice
111 samefile = platform.samefile
112 samefile = platform.samefile
112 samestat = platform.samestat
113 samestat = platform.samestat
113 setbinary = platform.setbinary
114 setbinary = platform.setbinary
114 setflags = platform.setflags
115 setflags = platform.setflags
115 setsignalhandler = platform.setsignalhandler
116 setsignalhandler = platform.setsignalhandler
116 shellquote = platform.shellquote
117 shellquote = platform.shellquote
117 spawndetached = platform.spawndetached
118 spawndetached = platform.spawndetached
118 split = platform.split
119 split = platform.split
119 sshargs = platform.sshargs
120 sshargs = platform.sshargs
120 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
121 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
121 statisexec = platform.statisexec
122 statisexec = platform.statisexec
122 statislink = platform.statislink
123 statislink = platform.statislink
123 termwidth = platform.termwidth
124 termwidth = platform.termwidth
124 testpid = platform.testpid
125 testpid = platform.testpid
125 umask = platform.umask
126 umask = platform.umask
126 unlink = platform.unlink
127 unlink = platform.unlink
127 unlinkpath = platform.unlinkpath
128 unlinkpath = platform.unlinkpath
128 username = platform.username
129 username = platform.username
129
130
130 # Python compatibility
131 # Python compatibility
131
132
132 _notset = object()
133 _notset = object()
133
134
134 # disable Python's problematic floating point timestamps (issue4836)
135 # disable Python's problematic floating point timestamps (issue4836)
135 # (Python hypocritically says you shouldn't change this behavior in
136 # (Python hypocritically says you shouldn't change this behavior in
136 # libraries, and sure enough Mercurial is not a library.)
137 # libraries, and sure enough Mercurial is not a library.)
137 os.stat_float_times(False)
138 os.stat_float_times(False)
138
139
139 def safehasattr(thing, attr):
140 def safehasattr(thing, attr):
140 return getattr(thing, attr, _notset) is not _notset
141 return getattr(thing, attr, _notset) is not _notset
141
142
142 DIGESTS = {
143 DIGESTS = {
143 'md5': hashlib.md5,
144 'md5': hashlib.md5,
144 'sha1': hashlib.sha1,
145 'sha1': hashlib.sha1,
145 'sha512': hashlib.sha512,
146 'sha512': hashlib.sha512,
146 }
147 }
147 # List of digest types from strongest to weakest
148 # List of digest types from strongest to weakest
148 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
149 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
149
150
150 for k in DIGESTS_BY_STRENGTH:
151 for k in DIGESTS_BY_STRENGTH:
151 assert k in DIGESTS
152 assert k in DIGESTS
152
153
153 class digester(object):
154 class digester(object):
154 """helper to compute digests.
155 """helper to compute digests.
155
156
156 This helper can be used to compute one or more digests given their name.
157 This helper can be used to compute one or more digests given their name.
157
158
158 >>> d = digester(['md5', 'sha1'])
159 >>> d = digester(['md5', 'sha1'])
159 >>> d.update('foo')
160 >>> d.update('foo')
160 >>> [k for k in sorted(d)]
161 >>> [k for k in sorted(d)]
161 ['md5', 'sha1']
162 ['md5', 'sha1']
162 >>> d['md5']
163 >>> d['md5']
163 'acbd18db4cc2f85cedef654fccc4a4d8'
164 'acbd18db4cc2f85cedef654fccc4a4d8'
164 >>> d['sha1']
165 >>> d['sha1']
165 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
166 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
166 >>> digester.preferred(['md5', 'sha1'])
167 >>> digester.preferred(['md5', 'sha1'])
167 'sha1'
168 'sha1'
168 """
169 """
169
170
170 def __init__(self, digests, s=''):
171 def __init__(self, digests, s=''):
171 self._hashes = {}
172 self._hashes = {}
172 for k in digests:
173 for k in digests:
173 if k not in DIGESTS:
174 if k not in DIGESTS:
174 raise Abort(_('unknown digest type: %s') % k)
175 raise Abort(_('unknown digest type: %s') % k)
175 self._hashes[k] = DIGESTS[k]()
176 self._hashes[k] = DIGESTS[k]()
176 if s:
177 if s:
177 self.update(s)
178 self.update(s)
178
179
179 def update(self, data):
180 def update(self, data):
180 for h in self._hashes.values():
181 for h in self._hashes.values():
181 h.update(data)
182 h.update(data)
182
183
183 def __getitem__(self, key):
184 def __getitem__(self, key):
184 if key not in DIGESTS:
185 if key not in DIGESTS:
185 raise Abort(_('unknown digest type: %s') % k)
186 raise Abort(_('unknown digest type: %s') % k)
186 return self._hashes[key].hexdigest()
187 return self._hashes[key].hexdigest()
187
188
188 def __iter__(self):
189 def __iter__(self):
189 return iter(self._hashes)
190 return iter(self._hashes)
190
191
191 @staticmethod
192 @staticmethod
192 def preferred(supported):
193 def preferred(supported):
193 """returns the strongest digest type in both supported and DIGESTS."""
194 """returns the strongest digest type in both supported and DIGESTS."""
194
195
195 for k in DIGESTS_BY_STRENGTH:
196 for k in DIGESTS_BY_STRENGTH:
196 if k in supported:
197 if k in supported:
197 return k
198 return k
198 return None
199 return None
199
200
200 class digestchecker(object):
201 class digestchecker(object):
201 """file handle wrapper that additionally checks content against a given
202 """file handle wrapper that additionally checks content against a given
202 size and digests.
203 size and digests.
203
204
204 d = digestchecker(fh, size, {'md5': '...'})
205 d = digestchecker(fh, size, {'md5': '...'})
205
206
206 When multiple digests are given, all of them are validated.
207 When multiple digests are given, all of them are validated.
207 """
208 """
208
209
209 def __init__(self, fh, size, digests):
210 def __init__(self, fh, size, digests):
210 self._fh = fh
211 self._fh = fh
211 self._size = size
212 self._size = size
212 self._got = 0
213 self._got = 0
213 self._digests = dict(digests)
214 self._digests = dict(digests)
214 self._digester = digester(self._digests.keys())
215 self._digester = digester(self._digests.keys())
215
216
216 def read(self, length=-1):
217 def read(self, length=-1):
217 content = self._fh.read(length)
218 content = self._fh.read(length)
218 self._digester.update(content)
219 self._digester.update(content)
219 self._got += len(content)
220 self._got += len(content)
220 return content
221 return content
221
222
222 def validate(self):
223 def validate(self):
223 if self._size != self._got:
224 if self._size != self._got:
224 raise Abort(_('size mismatch: expected %d, got %d') %
225 raise Abort(_('size mismatch: expected %d, got %d') %
225 (self._size, self._got))
226 (self._size, self._got))
226 for k, v in self._digests.items():
227 for k, v in self._digests.items():
227 if v != self._digester[k]:
228 if v != self._digester[k]:
228 # i18n: first parameter is a digest name
229 # i18n: first parameter is a digest name
229 raise Abort(_('%s mismatch: expected %s, got %s') %
230 raise Abort(_('%s mismatch: expected %s, got %s') %
230 (k, v, self._digester[k]))
231 (k, v, self._digester[k]))
231
232
232 try:
233 try:
233 buffer = buffer
234 buffer = buffer
234 except NameError:
235 except NameError:
235 if not pycompat.ispy3:
236 if not pycompat.ispy3:
236 def buffer(sliceable, offset=0):
237 def buffer(sliceable, offset=0):
237 return sliceable[offset:]
238 return sliceable[offset:]
238 else:
239 else:
239 def buffer(sliceable, offset=0):
240 def buffer(sliceable, offset=0):
240 return memoryview(sliceable)[offset:]
241 return memoryview(sliceable)[offset:]
241
242
242 closefds = os.name == 'posix'
243 closefds = os.name == 'posix'
243
244
244 _chunksize = 4096
245 _chunksize = 4096
245
246
246 class bufferedinputpipe(object):
247 class bufferedinputpipe(object):
247 """a manually buffered input pipe
248 """a manually buffered input pipe
248
249
249 Python will not let us use buffered IO and lazy reading with 'polling' at
250 Python will not let us use buffered IO and lazy reading with 'polling' at
250 the same time. We cannot probe the buffer state and select will not detect
251 the same time. We cannot probe the buffer state and select will not detect
251 that data are ready to read if they are already buffered.
252 that data are ready to read if they are already buffered.
252
253
253 This class let us work around that by implementing its own buffering
254 This class let us work around that by implementing its own buffering
254 (allowing efficient readline) while offering a way to know if the buffer is
255 (allowing efficient readline) while offering a way to know if the buffer is
255 empty from the output (allowing collaboration of the buffer with polling).
256 empty from the output (allowing collaboration of the buffer with polling).
256
257
257 This class lives in the 'util' module because it makes use of the 'os'
258 This class lives in the 'util' module because it makes use of the 'os'
258 module from the python stdlib.
259 module from the python stdlib.
259 """
260 """
260
261
261 def __init__(self, input):
262 def __init__(self, input):
262 self._input = input
263 self._input = input
263 self._buffer = []
264 self._buffer = []
264 self._eof = False
265 self._eof = False
265 self._lenbuf = 0
266 self._lenbuf = 0
266
267
267 @property
268 @property
268 def hasbuffer(self):
269 def hasbuffer(self):
269 """True is any data is currently buffered
270 """True is any data is currently buffered
270
271
271 This will be used externally a pre-step for polling IO. If there is
272 This will be used externally a pre-step for polling IO. If there is
272 already data then no polling should be set in place."""
273 already data then no polling should be set in place."""
273 return bool(self._buffer)
274 return bool(self._buffer)
274
275
275 @property
276 @property
276 def closed(self):
277 def closed(self):
277 return self._input.closed
278 return self._input.closed
278
279
279 def fileno(self):
280 def fileno(self):
280 return self._input.fileno()
281 return self._input.fileno()
281
282
282 def close(self):
283 def close(self):
283 return self._input.close()
284 return self._input.close()
284
285
285 def read(self, size):
286 def read(self, size):
286 while (not self._eof) and (self._lenbuf < size):
287 while (not self._eof) and (self._lenbuf < size):
287 self._fillbuffer()
288 self._fillbuffer()
288 return self._frombuffer(size)
289 return self._frombuffer(size)
289
290
290 def readline(self, *args, **kwargs):
291 def readline(self, *args, **kwargs):
291 if 1 < len(self._buffer):
292 if 1 < len(self._buffer):
292 # this should not happen because both read and readline end with a
293 # this should not happen because both read and readline end with a
293 # _frombuffer call that collapse it.
294 # _frombuffer call that collapse it.
294 self._buffer = [''.join(self._buffer)]
295 self._buffer = [''.join(self._buffer)]
295 self._lenbuf = len(self._buffer[0])
296 self._lenbuf = len(self._buffer[0])
296 lfi = -1
297 lfi = -1
297 if self._buffer:
298 if self._buffer:
298 lfi = self._buffer[-1].find('\n')
299 lfi = self._buffer[-1].find('\n')
299 while (not self._eof) and lfi < 0:
300 while (not self._eof) and lfi < 0:
300 self._fillbuffer()
301 self._fillbuffer()
301 if self._buffer:
302 if self._buffer:
302 lfi = self._buffer[-1].find('\n')
303 lfi = self._buffer[-1].find('\n')
303 size = lfi + 1
304 size = lfi + 1
304 if lfi < 0: # end of file
305 if lfi < 0: # end of file
305 size = self._lenbuf
306 size = self._lenbuf
306 elif 1 < len(self._buffer):
307 elif 1 < len(self._buffer):
307 # we need to take previous chunks into account
308 # we need to take previous chunks into account
308 size += self._lenbuf - len(self._buffer[-1])
309 size += self._lenbuf - len(self._buffer[-1])
309 return self._frombuffer(size)
310 return self._frombuffer(size)
310
311
311 def _frombuffer(self, size):
312 def _frombuffer(self, size):
312 """return at most 'size' data from the buffer
313 """return at most 'size' data from the buffer
313
314
314 The data are removed from the buffer."""
315 The data are removed from the buffer."""
315 if size == 0 or not self._buffer:
316 if size == 0 or not self._buffer:
316 return ''
317 return ''
317 buf = self._buffer[0]
318 buf = self._buffer[0]
318 if 1 < len(self._buffer):
319 if 1 < len(self._buffer):
319 buf = ''.join(self._buffer)
320 buf = ''.join(self._buffer)
320
321
321 data = buf[:size]
322 data = buf[:size]
322 buf = buf[len(data):]
323 buf = buf[len(data):]
323 if buf:
324 if buf:
324 self._buffer = [buf]
325 self._buffer = [buf]
325 self._lenbuf = len(buf)
326 self._lenbuf = len(buf)
326 else:
327 else:
327 self._buffer = []
328 self._buffer = []
328 self._lenbuf = 0
329 self._lenbuf = 0
329 return data
330 return data
330
331
331 def _fillbuffer(self):
332 def _fillbuffer(self):
332 """read data to the buffer"""
333 """read data to the buffer"""
333 data = os.read(self._input.fileno(), _chunksize)
334 data = os.read(self._input.fileno(), _chunksize)
334 if not data:
335 if not data:
335 self._eof = True
336 self._eof = True
336 else:
337 else:
337 self._lenbuf += len(data)
338 self._lenbuf += len(data)
338 self._buffer.append(data)
339 self._buffer.append(data)
339
340
340 def popen2(cmd, env=None, newlines=False):
341 def popen2(cmd, env=None, newlines=False):
341 # Setting bufsize to -1 lets the system decide the buffer size.
342 # Setting bufsize to -1 lets the system decide the buffer size.
342 # The default for bufsize is 0, meaning unbuffered. This leads to
343 # The default for bufsize is 0, meaning unbuffered. This leads to
343 # poor performance on Mac OS X: http://bugs.python.org/issue4194
344 # poor performance on Mac OS X: http://bugs.python.org/issue4194
344 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
345 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
345 close_fds=closefds,
346 close_fds=closefds,
346 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
347 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
347 universal_newlines=newlines,
348 universal_newlines=newlines,
348 env=env)
349 env=env)
349 return p.stdin, p.stdout
350 return p.stdin, p.stdout
350
351
351 def popen3(cmd, env=None, newlines=False):
352 def popen3(cmd, env=None, newlines=False):
352 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
353 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
353 return stdin, stdout, stderr
354 return stdin, stdout, stderr
354
355
355 def popen4(cmd, env=None, newlines=False, bufsize=-1):
356 def popen4(cmd, env=None, newlines=False, bufsize=-1):
356 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
357 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
357 close_fds=closefds,
358 close_fds=closefds,
358 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
359 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
359 stderr=subprocess.PIPE,
360 stderr=subprocess.PIPE,
360 universal_newlines=newlines,
361 universal_newlines=newlines,
361 env=env)
362 env=env)
362 return p.stdin, p.stdout, p.stderr, p
363 return p.stdin, p.stdout, p.stderr, p
363
364
364 def version():
365 def version():
365 """Return version information if available."""
366 """Return version information if available."""
366 try:
367 try:
367 from . import __version__
368 from . import __version__
368 return __version__.version
369 return __version__.version
369 except ImportError:
370 except ImportError:
370 return 'unknown'
371 return 'unknown'
371
372
372 def versiontuple(v=None, n=4):
373 def versiontuple(v=None, n=4):
373 """Parses a Mercurial version string into an N-tuple.
374 """Parses a Mercurial version string into an N-tuple.
374
375
375 The version string to be parsed is specified with the ``v`` argument.
376 The version string to be parsed is specified with the ``v`` argument.
376 If it isn't defined, the current Mercurial version string will be parsed.
377 If it isn't defined, the current Mercurial version string will be parsed.
377
378
378 ``n`` can be 2, 3, or 4. Here is how some version strings map to
379 ``n`` can be 2, 3, or 4. Here is how some version strings map to
379 returned values:
380 returned values:
380
381
381 >>> v = '3.6.1+190-df9b73d2d444'
382 >>> v = '3.6.1+190-df9b73d2d444'
382 >>> versiontuple(v, 2)
383 >>> versiontuple(v, 2)
383 (3, 6)
384 (3, 6)
384 >>> versiontuple(v, 3)
385 >>> versiontuple(v, 3)
385 (3, 6, 1)
386 (3, 6, 1)
386 >>> versiontuple(v, 4)
387 >>> versiontuple(v, 4)
387 (3, 6, 1, '190-df9b73d2d444')
388 (3, 6, 1, '190-df9b73d2d444')
388
389
389 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
390 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
390 (3, 6, 1, '190-df9b73d2d444+20151118')
391 (3, 6, 1, '190-df9b73d2d444+20151118')
391
392
392 >>> v = '3.6'
393 >>> v = '3.6'
393 >>> versiontuple(v, 2)
394 >>> versiontuple(v, 2)
394 (3, 6)
395 (3, 6)
395 >>> versiontuple(v, 3)
396 >>> versiontuple(v, 3)
396 (3, 6, None)
397 (3, 6, None)
397 >>> versiontuple(v, 4)
398 >>> versiontuple(v, 4)
398 (3, 6, None, None)
399 (3, 6, None, None)
399
400
400 >>> v = '3.9-rc'
401 >>> v = '3.9-rc'
401 >>> versiontuple(v, 2)
402 >>> versiontuple(v, 2)
402 (3, 9)
403 (3, 9)
403 >>> versiontuple(v, 3)
404 >>> versiontuple(v, 3)
404 (3, 9, None)
405 (3, 9, None)
405 >>> versiontuple(v, 4)
406 >>> versiontuple(v, 4)
406 (3, 9, None, 'rc')
407 (3, 9, None, 'rc')
407
408
408 >>> v = '3.9-rc+2-02a8fea4289b'
409 >>> v = '3.9-rc+2-02a8fea4289b'
409 >>> versiontuple(v, 2)
410 >>> versiontuple(v, 2)
410 (3, 9)
411 (3, 9)
411 >>> versiontuple(v, 3)
412 >>> versiontuple(v, 3)
412 (3, 9, None)
413 (3, 9, None)
413 >>> versiontuple(v, 4)
414 >>> versiontuple(v, 4)
414 (3, 9, None, 'rc+2-02a8fea4289b')
415 (3, 9, None, 'rc+2-02a8fea4289b')
415 """
416 """
416 if not v:
417 if not v:
417 v = version()
418 v = version()
418 parts = remod.split('[\+-]', v, 1)
419 parts = remod.split('[\+-]', v, 1)
419 if len(parts) == 1:
420 if len(parts) == 1:
420 vparts, extra = parts[0], None
421 vparts, extra = parts[0], None
421 else:
422 else:
422 vparts, extra = parts
423 vparts, extra = parts
423
424
424 vints = []
425 vints = []
425 for i in vparts.split('.'):
426 for i in vparts.split('.'):
426 try:
427 try:
427 vints.append(int(i))
428 vints.append(int(i))
428 except ValueError:
429 except ValueError:
429 break
430 break
430 # (3, 6) -> (3, 6, None)
431 # (3, 6) -> (3, 6, None)
431 while len(vints) < 3:
432 while len(vints) < 3:
432 vints.append(None)
433 vints.append(None)
433
434
434 if n == 2:
435 if n == 2:
435 return (vints[0], vints[1])
436 return (vints[0], vints[1])
436 if n == 3:
437 if n == 3:
437 return (vints[0], vints[1], vints[2])
438 return (vints[0], vints[1], vints[2])
438 if n == 4:
439 if n == 4:
439 return (vints[0], vints[1], vints[2], extra)
440 return (vints[0], vints[1], vints[2], extra)
440
441
441 # used by parsedate
442 # used by parsedate
442 defaultdateformats = (
443 defaultdateformats = (
443 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
444 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
444 '%Y-%m-%dT%H:%M', # without seconds
445 '%Y-%m-%dT%H:%M', # without seconds
445 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
446 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
446 '%Y-%m-%dT%H%M', # without seconds
447 '%Y-%m-%dT%H%M', # without seconds
447 '%Y-%m-%d %H:%M:%S', # our common legal variant
448 '%Y-%m-%d %H:%M:%S', # our common legal variant
448 '%Y-%m-%d %H:%M', # without seconds
449 '%Y-%m-%d %H:%M', # without seconds
449 '%Y-%m-%d %H%M%S', # without :
450 '%Y-%m-%d %H%M%S', # without :
450 '%Y-%m-%d %H%M', # without seconds
451 '%Y-%m-%d %H%M', # without seconds
451 '%Y-%m-%d %I:%M:%S%p',
452 '%Y-%m-%d %I:%M:%S%p',
452 '%Y-%m-%d %H:%M',
453 '%Y-%m-%d %H:%M',
453 '%Y-%m-%d %I:%M%p',
454 '%Y-%m-%d %I:%M%p',
454 '%Y-%m-%d',
455 '%Y-%m-%d',
455 '%m-%d',
456 '%m-%d',
456 '%m/%d',
457 '%m/%d',
457 '%m/%d/%y',
458 '%m/%d/%y',
458 '%m/%d/%Y',
459 '%m/%d/%Y',
459 '%a %b %d %H:%M:%S %Y',
460 '%a %b %d %H:%M:%S %Y',
460 '%a %b %d %I:%M:%S%p %Y',
461 '%a %b %d %I:%M:%S%p %Y',
461 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
462 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
462 '%b %d %H:%M:%S %Y',
463 '%b %d %H:%M:%S %Y',
463 '%b %d %I:%M:%S%p %Y',
464 '%b %d %I:%M:%S%p %Y',
464 '%b %d %H:%M:%S',
465 '%b %d %H:%M:%S',
465 '%b %d %I:%M:%S%p',
466 '%b %d %I:%M:%S%p',
466 '%b %d %H:%M',
467 '%b %d %H:%M',
467 '%b %d %I:%M%p',
468 '%b %d %I:%M%p',
468 '%b %d %Y',
469 '%b %d %Y',
469 '%b %d',
470 '%b %d',
470 '%H:%M:%S',
471 '%H:%M:%S',
471 '%I:%M:%S%p',
472 '%I:%M:%S%p',
472 '%H:%M',
473 '%H:%M',
473 '%I:%M%p',
474 '%I:%M%p',
474 )
475 )
475
476
476 extendeddateformats = defaultdateformats + (
477 extendeddateformats = defaultdateformats + (
477 "%Y",
478 "%Y",
478 "%Y-%m",
479 "%Y-%m",
479 "%b",
480 "%b",
480 "%b %Y",
481 "%b %Y",
481 )
482 )
482
483
483 def cachefunc(func):
484 def cachefunc(func):
484 '''cache the result of function calls'''
485 '''cache the result of function calls'''
485 # XXX doesn't handle keywords args
486 # XXX doesn't handle keywords args
486 if func.__code__.co_argcount == 0:
487 if func.__code__.co_argcount == 0:
487 cache = []
488 cache = []
488 def f():
489 def f():
489 if len(cache) == 0:
490 if len(cache) == 0:
490 cache.append(func())
491 cache.append(func())
491 return cache[0]
492 return cache[0]
492 return f
493 return f
493 cache = {}
494 cache = {}
494 if func.__code__.co_argcount == 1:
495 if func.__code__.co_argcount == 1:
495 # we gain a small amount of time because
496 # we gain a small amount of time because
496 # we don't need to pack/unpack the list
497 # we don't need to pack/unpack the list
497 def f(arg):
498 def f(arg):
498 if arg not in cache:
499 if arg not in cache:
499 cache[arg] = func(arg)
500 cache[arg] = func(arg)
500 return cache[arg]
501 return cache[arg]
501 else:
502 else:
502 def f(*args):
503 def f(*args):
503 if args not in cache:
504 if args not in cache:
504 cache[args] = func(*args)
505 cache[args] = func(*args)
505 return cache[args]
506 return cache[args]
506
507
507 return f
508 return f
508
509
509 class sortdict(dict):
510 class sortdict(dict):
510 '''a simple sorted dictionary'''
511 '''a simple sorted dictionary'''
511 def __init__(self, data=None):
512 def __init__(self, data=None):
512 self._list = []
513 self._list = []
513 if data:
514 if data:
514 self.update(data)
515 self.update(data)
515 def copy(self):
516 def copy(self):
516 return sortdict(self)
517 return sortdict(self)
517 def __setitem__(self, key, val):
518 def __setitem__(self, key, val):
518 if key in self:
519 if key in self:
519 self._list.remove(key)
520 self._list.remove(key)
520 self._list.append(key)
521 self._list.append(key)
521 dict.__setitem__(self, key, val)
522 dict.__setitem__(self, key, val)
522 def __iter__(self):
523 def __iter__(self):
523 return self._list.__iter__()
524 return self._list.__iter__()
524 def update(self, src):
525 def update(self, src):
525 if isinstance(src, dict):
526 if isinstance(src, dict):
526 src = src.iteritems()
527 src = src.iteritems()
527 for k, v in src:
528 for k, v in src:
528 self[k] = v
529 self[k] = v
529 def clear(self):
530 def clear(self):
530 dict.clear(self)
531 dict.clear(self)
531 self._list = []
532 self._list = []
532 def items(self):
533 def items(self):
533 return [(k, self[k]) for k in self._list]
534 return [(k, self[k]) for k in self._list]
534 def __delitem__(self, key):
535 def __delitem__(self, key):
535 dict.__delitem__(self, key)
536 dict.__delitem__(self, key)
536 self._list.remove(key)
537 self._list.remove(key)
537 def pop(self, key, *args, **kwargs):
538 def pop(self, key, *args, **kwargs):
538 dict.pop(self, key, *args, **kwargs)
539 dict.pop(self, key, *args, **kwargs)
539 try:
540 try:
540 self._list.remove(key)
541 self._list.remove(key)
541 except ValueError:
542 except ValueError:
542 pass
543 pass
543 def keys(self):
544 def keys(self):
544 return self._list
545 return self._list
545 def iterkeys(self):
546 def iterkeys(self):
546 return self._list.__iter__()
547 return self._list.__iter__()
547 def iteritems(self):
548 def iteritems(self):
548 for k in self._list:
549 for k in self._list:
549 yield k, self[k]
550 yield k, self[k]
550 def insert(self, index, key, val):
551 def insert(self, index, key, val):
551 self._list.insert(index, key)
552 self._list.insert(index, key)
552 dict.__setitem__(self, key, val)
553 dict.__setitem__(self, key, val)
553 def __repr__(self):
554 def __repr__(self):
554 if not self:
555 if not self:
555 return '%s()' % self.__class__.__name__
556 return '%s()' % self.__class__.__name__
556 return '%s(%r)' % (self.__class__.__name__, self.items())
557 return '%s(%r)' % (self.__class__.__name__, self.items())
557
558
558 class _lrucachenode(object):
559 class _lrucachenode(object):
559 """A node in a doubly linked list.
560 """A node in a doubly linked list.
560
561
561 Holds a reference to nodes on either side as well as a key-value
562 Holds a reference to nodes on either side as well as a key-value
562 pair for the dictionary entry.
563 pair for the dictionary entry.
563 """
564 """
564 __slots__ = (u'next', u'prev', u'key', u'value')
565 __slots__ = (u'next', u'prev', u'key', u'value')
565
566
566 def __init__(self):
567 def __init__(self):
567 self.next = None
568 self.next = None
568 self.prev = None
569 self.prev = None
569
570
570 self.key = _notset
571 self.key = _notset
571 self.value = None
572 self.value = None
572
573
573 def markempty(self):
574 def markempty(self):
574 """Mark the node as emptied."""
575 """Mark the node as emptied."""
575 self.key = _notset
576 self.key = _notset
576
577
577 class lrucachedict(object):
578 class lrucachedict(object):
578 """Dict that caches most recent accesses and sets.
579 """Dict that caches most recent accesses and sets.
579
580
580 The dict consists of an actual backing dict - indexed by original
581 The dict consists of an actual backing dict - indexed by original
581 key - and a doubly linked circular list defining the order of entries in
582 key - and a doubly linked circular list defining the order of entries in
582 the cache.
583 the cache.
583
584
584 The head node is the newest entry in the cache. If the cache is full,
585 The head node is the newest entry in the cache. If the cache is full,
585 we recycle head.prev and make it the new head. Cache accesses result in
586 we recycle head.prev and make it the new head. Cache accesses result in
586 the node being moved to before the existing head and being marked as the
587 the node being moved to before the existing head and being marked as the
587 new head node.
588 new head node.
588 """
589 """
589 def __init__(self, max):
590 def __init__(self, max):
590 self._cache = {}
591 self._cache = {}
591
592
592 self._head = head = _lrucachenode()
593 self._head = head = _lrucachenode()
593 head.prev = head
594 head.prev = head
594 head.next = head
595 head.next = head
595 self._size = 1
596 self._size = 1
596 self._capacity = max
597 self._capacity = max
597
598
598 def __len__(self):
599 def __len__(self):
599 return len(self._cache)
600 return len(self._cache)
600
601
601 def __contains__(self, k):
602 def __contains__(self, k):
602 return k in self._cache
603 return k in self._cache
603
604
604 def __iter__(self):
605 def __iter__(self):
605 # We don't have to iterate in cache order, but why not.
606 # We don't have to iterate in cache order, but why not.
606 n = self._head
607 n = self._head
607 for i in range(len(self._cache)):
608 for i in range(len(self._cache)):
608 yield n.key
609 yield n.key
609 n = n.next
610 n = n.next
610
611
611 def __getitem__(self, k):
612 def __getitem__(self, k):
612 node = self._cache[k]
613 node = self._cache[k]
613 self._movetohead(node)
614 self._movetohead(node)
614 return node.value
615 return node.value
615
616
616 def __setitem__(self, k, v):
617 def __setitem__(self, k, v):
617 node = self._cache.get(k)
618 node = self._cache.get(k)
618 # Replace existing value and mark as newest.
619 # Replace existing value and mark as newest.
619 if node is not None:
620 if node is not None:
620 node.value = v
621 node.value = v
621 self._movetohead(node)
622 self._movetohead(node)
622 return
623 return
623
624
624 if self._size < self._capacity:
625 if self._size < self._capacity:
625 node = self._addcapacity()
626 node = self._addcapacity()
626 else:
627 else:
627 # Grab the last/oldest item.
628 # Grab the last/oldest item.
628 node = self._head.prev
629 node = self._head.prev
629
630
630 # At capacity. Kill the old entry.
631 # At capacity. Kill the old entry.
631 if node.key is not _notset:
632 if node.key is not _notset:
632 del self._cache[node.key]
633 del self._cache[node.key]
633
634
634 node.key = k
635 node.key = k
635 node.value = v
636 node.value = v
636 self._cache[k] = node
637 self._cache[k] = node
637 # And mark it as newest entry. No need to adjust order since it
638 # And mark it as newest entry. No need to adjust order since it
638 # is already self._head.prev.
639 # is already self._head.prev.
639 self._head = node
640 self._head = node
640
641
641 def __delitem__(self, k):
642 def __delitem__(self, k):
642 node = self._cache.pop(k)
643 node = self._cache.pop(k)
643 node.markempty()
644 node.markempty()
644
645
645 # Temporarily mark as newest item before re-adjusting head to make
646 # Temporarily mark as newest item before re-adjusting head to make
646 # this node the oldest item.
647 # this node the oldest item.
647 self._movetohead(node)
648 self._movetohead(node)
648 self._head = node.next
649 self._head = node.next
649
650
650 # Additional dict methods.
651 # Additional dict methods.
651
652
652 def get(self, k, default=None):
653 def get(self, k, default=None):
653 try:
654 try:
654 return self._cache[k].value
655 return self._cache[k].value
655 except KeyError:
656 except KeyError:
656 return default
657 return default
657
658
658 def clear(self):
659 def clear(self):
659 n = self._head
660 n = self._head
660 while n.key is not _notset:
661 while n.key is not _notset:
661 n.markempty()
662 n.markempty()
662 n = n.next
663 n = n.next
663
664
664 self._cache.clear()
665 self._cache.clear()
665
666
666 def copy(self):
667 def copy(self):
667 result = lrucachedict(self._capacity)
668 result = lrucachedict(self._capacity)
668 n = self._head.prev
669 n = self._head.prev
669 # Iterate in oldest-to-newest order, so the copy has the right ordering
670 # Iterate in oldest-to-newest order, so the copy has the right ordering
670 for i in range(len(self._cache)):
671 for i in range(len(self._cache)):
671 result[n.key] = n.value
672 result[n.key] = n.value
672 n = n.prev
673 n = n.prev
673 return result
674 return result
674
675
675 def _movetohead(self, node):
676 def _movetohead(self, node):
676 """Mark a node as the newest, making it the new head.
677 """Mark a node as the newest, making it the new head.
677
678
678 When a node is accessed, it becomes the freshest entry in the LRU
679 When a node is accessed, it becomes the freshest entry in the LRU
679 list, which is denoted by self._head.
680 list, which is denoted by self._head.
680
681
681 Visually, let's make ``N`` the new head node (* denotes head):
682 Visually, let's make ``N`` the new head node (* denotes head):
682
683
683 previous/oldest <-> head <-> next/next newest
684 previous/oldest <-> head <-> next/next newest
684
685
685 ----<->--- A* ---<->-----
686 ----<->--- A* ---<->-----
686 | |
687 | |
687 E <-> D <-> N <-> C <-> B
688 E <-> D <-> N <-> C <-> B
688
689
689 To:
690 To:
690
691
691 ----<->--- N* ---<->-----
692 ----<->--- N* ---<->-----
692 | |
693 | |
693 E <-> D <-> C <-> B <-> A
694 E <-> D <-> C <-> B <-> A
694
695
695 This requires the following moves:
696 This requires the following moves:
696
697
697 C.next = D (node.prev.next = node.next)
698 C.next = D (node.prev.next = node.next)
698 D.prev = C (node.next.prev = node.prev)
699 D.prev = C (node.next.prev = node.prev)
699 E.next = N (head.prev.next = node)
700 E.next = N (head.prev.next = node)
700 N.prev = E (node.prev = head.prev)
701 N.prev = E (node.prev = head.prev)
701 N.next = A (node.next = head)
702 N.next = A (node.next = head)
702 A.prev = N (head.prev = node)
703 A.prev = N (head.prev = node)
703 """
704 """
704 head = self._head
705 head = self._head
705 # C.next = D
706 # C.next = D
706 node.prev.next = node.next
707 node.prev.next = node.next
707 # D.prev = C
708 # D.prev = C
708 node.next.prev = node.prev
709 node.next.prev = node.prev
709 # N.prev = E
710 # N.prev = E
710 node.prev = head.prev
711 node.prev = head.prev
711 # N.next = A
712 # N.next = A
712 # It is tempting to do just "head" here, however if node is
713 # It is tempting to do just "head" here, however if node is
713 # adjacent to head, this will do bad things.
714 # adjacent to head, this will do bad things.
714 node.next = head.prev.next
715 node.next = head.prev.next
715 # E.next = N
716 # E.next = N
716 node.next.prev = node
717 node.next.prev = node
717 # A.prev = N
718 # A.prev = N
718 node.prev.next = node
719 node.prev.next = node
719
720
720 self._head = node
721 self._head = node
721
722
722 def _addcapacity(self):
723 def _addcapacity(self):
723 """Add a node to the circular linked list.
724 """Add a node to the circular linked list.
724
725
725 The new node is inserted before the head node.
726 The new node is inserted before the head node.
726 """
727 """
727 head = self._head
728 head = self._head
728 node = _lrucachenode()
729 node = _lrucachenode()
729 head.prev.next = node
730 head.prev.next = node
730 node.prev = head.prev
731 node.prev = head.prev
731 node.next = head
732 node.next = head
732 head.prev = node
733 head.prev = node
733 self._size += 1
734 self._size += 1
734 return node
735 return node
735
736
736 def lrucachefunc(func):
737 def lrucachefunc(func):
737 '''cache most recent results of function calls'''
738 '''cache most recent results of function calls'''
738 cache = {}
739 cache = {}
739 order = collections.deque()
740 order = collections.deque()
740 if func.__code__.co_argcount == 1:
741 if func.__code__.co_argcount == 1:
741 def f(arg):
742 def f(arg):
742 if arg not in cache:
743 if arg not in cache:
743 if len(cache) > 20:
744 if len(cache) > 20:
744 del cache[order.popleft()]
745 del cache[order.popleft()]
745 cache[arg] = func(arg)
746 cache[arg] = func(arg)
746 else:
747 else:
747 order.remove(arg)
748 order.remove(arg)
748 order.append(arg)
749 order.append(arg)
749 return cache[arg]
750 return cache[arg]
750 else:
751 else:
751 def f(*args):
752 def f(*args):
752 if args not in cache:
753 if args not in cache:
753 if len(cache) > 20:
754 if len(cache) > 20:
754 del cache[order.popleft()]
755 del cache[order.popleft()]
755 cache[args] = func(*args)
756 cache[args] = func(*args)
756 else:
757 else:
757 order.remove(args)
758 order.remove(args)
758 order.append(args)
759 order.append(args)
759 return cache[args]
760 return cache[args]
760
761
761 return f
762 return f
762
763
763 class propertycache(object):
764 class propertycache(object):
764 def __init__(self, func):
765 def __init__(self, func):
765 self.func = func
766 self.func = func
766 self.name = func.__name__
767 self.name = func.__name__
767 def __get__(self, obj, type=None):
768 def __get__(self, obj, type=None):
768 result = self.func(obj)
769 result = self.func(obj)
769 self.cachevalue(obj, result)
770 self.cachevalue(obj, result)
770 return result
771 return result
771
772
772 def cachevalue(self, obj, value):
773 def cachevalue(self, obj, value):
773 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
774 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
774 obj.__dict__[self.name] = value
775 obj.__dict__[self.name] = value
775
776
776 def pipefilter(s, cmd):
777 def pipefilter(s, cmd):
777 '''filter string S through command CMD, returning its output'''
778 '''filter string S through command CMD, returning its output'''
778 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
779 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
779 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
780 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
780 pout, perr = p.communicate(s)
781 pout, perr = p.communicate(s)
781 return pout
782 return pout
782
783
783 def tempfilter(s, cmd):
784 def tempfilter(s, cmd):
784 '''filter string S through a pair of temporary files with CMD.
785 '''filter string S through a pair of temporary files with CMD.
785 CMD is used as a template to create the real command to be run,
786 CMD is used as a template to create the real command to be run,
786 with the strings INFILE and OUTFILE replaced by the real names of
787 with the strings INFILE and OUTFILE replaced by the real names of
787 the temporary files generated.'''
788 the temporary files generated.'''
788 inname, outname = None, None
789 inname, outname = None, None
789 try:
790 try:
790 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
791 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
791 fp = os.fdopen(infd, 'wb')
792 fp = os.fdopen(infd, 'wb')
792 fp.write(s)
793 fp.write(s)
793 fp.close()
794 fp.close()
794 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
795 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
795 os.close(outfd)
796 os.close(outfd)
796 cmd = cmd.replace('INFILE', inname)
797 cmd = cmd.replace('INFILE', inname)
797 cmd = cmd.replace('OUTFILE', outname)
798 cmd = cmd.replace('OUTFILE', outname)
798 code = os.system(cmd)
799 code = os.system(cmd)
799 if sys.platform == 'OpenVMS' and code & 1:
800 if sys.platform == 'OpenVMS' and code & 1:
800 code = 0
801 code = 0
801 if code:
802 if code:
802 raise Abort(_("command '%s' failed: %s") %
803 raise Abort(_("command '%s' failed: %s") %
803 (cmd, explainexit(code)))
804 (cmd, explainexit(code)))
804 return readfile(outname)
805 return readfile(outname)
805 finally:
806 finally:
806 try:
807 try:
807 if inname:
808 if inname:
808 os.unlink(inname)
809 os.unlink(inname)
809 except OSError:
810 except OSError:
810 pass
811 pass
811 try:
812 try:
812 if outname:
813 if outname:
813 os.unlink(outname)
814 os.unlink(outname)
814 except OSError:
815 except OSError:
815 pass
816 pass
816
817
817 filtertable = {
818 filtertable = {
818 'tempfile:': tempfilter,
819 'tempfile:': tempfilter,
819 'pipe:': pipefilter,
820 'pipe:': pipefilter,
820 }
821 }
821
822
822 def filter(s, cmd):
823 def filter(s, cmd):
823 "filter a string through a command that transforms its input to its output"
824 "filter a string through a command that transforms its input to its output"
824 for name, fn in filtertable.iteritems():
825 for name, fn in filtertable.iteritems():
825 if cmd.startswith(name):
826 if cmd.startswith(name):
826 return fn(s, cmd[len(name):].lstrip())
827 return fn(s, cmd[len(name):].lstrip())
827 return pipefilter(s, cmd)
828 return pipefilter(s, cmd)
828
829
829 def binary(s):
830 def binary(s):
830 """return true if a string is binary data"""
831 """return true if a string is binary data"""
831 return bool(s and '\0' in s)
832 return bool(s and '\0' in s)
832
833
833 def increasingchunks(source, min=1024, max=65536):
834 def increasingchunks(source, min=1024, max=65536):
834 '''return no less than min bytes per chunk while data remains,
835 '''return no less than min bytes per chunk while data remains,
835 doubling min after each chunk until it reaches max'''
836 doubling min after each chunk until it reaches max'''
836 def log2(x):
837 def log2(x):
837 if not x:
838 if not x:
838 return 0
839 return 0
839 i = 0
840 i = 0
840 while x:
841 while x:
841 x >>= 1
842 x >>= 1
842 i += 1
843 i += 1
843 return i - 1
844 return i - 1
844
845
845 buf = []
846 buf = []
846 blen = 0
847 blen = 0
847 for chunk in source:
848 for chunk in source:
848 buf.append(chunk)
849 buf.append(chunk)
849 blen += len(chunk)
850 blen += len(chunk)
850 if blen >= min:
851 if blen >= min:
851 if min < max:
852 if min < max:
852 min = min << 1
853 min = min << 1
853 nmin = 1 << log2(blen)
854 nmin = 1 << log2(blen)
854 if nmin > min:
855 if nmin > min:
855 min = nmin
856 min = nmin
856 if min > max:
857 if min > max:
857 min = max
858 min = max
858 yield ''.join(buf)
859 yield ''.join(buf)
859 blen = 0
860 blen = 0
860 buf = []
861 buf = []
861 if buf:
862 if buf:
862 yield ''.join(buf)
863 yield ''.join(buf)
863
864
864 Abort = error.Abort
865 Abort = error.Abort
865
866
866 def always(fn):
867 def always(fn):
867 return True
868 return True
868
869
869 def never(fn):
870 def never(fn):
870 return False
871 return False
871
872
872 def nogc(func):
873 def nogc(func):
873 """disable garbage collector
874 """disable garbage collector
874
875
875 Python's garbage collector triggers a GC each time a certain number of
876 Python's garbage collector triggers a GC each time a certain number of
876 container objects (the number being defined by gc.get_threshold()) are
877 container objects (the number being defined by gc.get_threshold()) are
877 allocated even when marked not to be tracked by the collector. Tracking has
878 allocated even when marked not to be tracked by the collector. Tracking has
878 no effect on when GCs are triggered, only on what objects the GC looks
879 no effect on when GCs are triggered, only on what objects the GC looks
879 into. As a workaround, disable GC while building complex (huge)
880 into. As a workaround, disable GC while building complex (huge)
880 containers.
881 containers.
881
882
882 This garbage collector issue have been fixed in 2.7.
883 This garbage collector issue have been fixed in 2.7.
883 """
884 """
884 if sys.version_info >= (2, 7):
885 if sys.version_info >= (2, 7):
885 return func
886 return func
886 def wrapper(*args, **kwargs):
887 def wrapper(*args, **kwargs):
887 gcenabled = gc.isenabled()
888 gcenabled = gc.isenabled()
888 gc.disable()
889 gc.disable()
889 try:
890 try:
890 return func(*args, **kwargs)
891 return func(*args, **kwargs)
891 finally:
892 finally:
892 if gcenabled:
893 if gcenabled:
893 gc.enable()
894 gc.enable()
894 return wrapper
895 return wrapper
895
896
896 def pathto(root, n1, n2):
897 def pathto(root, n1, n2):
897 '''return the relative path from one place to another.
898 '''return the relative path from one place to another.
898 root should use os.sep to separate directories
899 root should use os.sep to separate directories
899 n1 should use os.sep to separate directories
900 n1 should use os.sep to separate directories
900 n2 should use "/" to separate directories
901 n2 should use "/" to separate directories
901 returns an os.sep-separated path.
902 returns an os.sep-separated path.
902
903
903 If n1 is a relative path, it's assumed it's
904 If n1 is a relative path, it's assumed it's
904 relative to root.
905 relative to root.
905 n2 should always be relative to root.
906 n2 should always be relative to root.
906 '''
907 '''
907 if not n1:
908 if not n1:
908 return localpath(n2)
909 return localpath(n2)
909 if os.path.isabs(n1):
910 if os.path.isabs(n1):
910 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
911 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
911 return os.path.join(root, localpath(n2))
912 return os.path.join(root, localpath(n2))
912 n2 = '/'.join((pconvert(root), n2))
913 n2 = '/'.join((pconvert(root), n2))
913 a, b = splitpath(n1), n2.split('/')
914 a, b = splitpath(n1), n2.split('/')
914 a.reverse()
915 a.reverse()
915 b.reverse()
916 b.reverse()
916 while a and b and a[-1] == b[-1]:
917 while a and b and a[-1] == b[-1]:
917 a.pop()
918 a.pop()
918 b.pop()
919 b.pop()
919 b.reverse()
920 b.reverse()
920 return os.sep.join((['..'] * len(a)) + b) or '.'
921 return os.sep.join((['..'] * len(a)) + b) or '.'
921
922
922 def mainfrozen():
923 def mainfrozen():
923 """return True if we are a frozen executable.
924 """return True if we are a frozen executable.
924
925
925 The code supports py2exe (most common, Windows only) and tools/freeze
926 The code supports py2exe (most common, Windows only) and tools/freeze
926 (portable, not much used).
927 (portable, not much used).
927 """
928 """
928 return (safehasattr(sys, "frozen") or # new py2exe
929 return (safehasattr(sys, "frozen") or # new py2exe
929 safehasattr(sys, "importers") or # old py2exe
930 safehasattr(sys, "importers") or # old py2exe
930 imp.is_frozen(u"__main__")) # tools/freeze
931 imp.is_frozen(u"__main__")) # tools/freeze
931
932
932 # the location of data files matching the source code
933 # the location of data files matching the source code
933 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
934 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
934 # executable version (py2exe) doesn't support __file__
935 # executable version (py2exe) doesn't support __file__
935 datapath = os.path.dirname(sys.executable)
936 datapath = os.path.dirname(sys.executable)
936 else:
937 else:
937 datapath = os.path.dirname(__file__)
938 datapath = os.path.dirname(__file__)
938
939
939 i18n.setdatapath(datapath)
940 i18n.setdatapath(datapath)
940
941
941 _hgexecutable = None
942 _hgexecutable = None
942
943
943 def hgexecutable():
944 def hgexecutable():
944 """return location of the 'hg' executable.
945 """return location of the 'hg' executable.
945
946
946 Defaults to $HG or 'hg' in the search path.
947 Defaults to $HG or 'hg' in the search path.
947 """
948 """
948 if _hgexecutable is None:
949 if _hgexecutable is None:
949 hg = os.environ.get('HG')
950 hg = os.environ.get('HG')
950 mainmod = sys.modules['__main__']
951 mainmod = sys.modules['__main__']
951 if hg:
952 if hg:
952 _sethgexecutable(hg)
953 _sethgexecutable(hg)
953 elif mainfrozen():
954 elif mainfrozen():
954 if getattr(sys, 'frozen', None) == 'macosx_app':
955 if getattr(sys, 'frozen', None) == 'macosx_app':
955 # Env variable set by py2app
956 # Env variable set by py2app
956 _sethgexecutable(os.environ['EXECUTABLEPATH'])
957 _sethgexecutable(os.environ['EXECUTABLEPATH'])
957 else:
958 else:
958 _sethgexecutable(sys.executable)
959 _sethgexecutable(sys.executable)
959 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
960 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
960 _sethgexecutable(mainmod.__file__)
961 _sethgexecutable(mainmod.__file__)
961 else:
962 else:
962 exe = findexe('hg') or os.path.basename(sys.argv[0])
963 exe = findexe('hg') or os.path.basename(sys.argv[0])
963 _sethgexecutable(exe)
964 _sethgexecutable(exe)
964 return _hgexecutable
965 return _hgexecutable
965
966
966 def _sethgexecutable(path):
967 def _sethgexecutable(path):
967 """set location of the 'hg' executable"""
968 """set location of the 'hg' executable"""
968 global _hgexecutable
969 global _hgexecutable
969 _hgexecutable = path
970 _hgexecutable = path
970
971
971 def _isstdout(f):
972 def _isstdout(f):
972 fileno = getattr(f, 'fileno', None)
973 fileno = getattr(f, 'fileno', None)
973 return fileno and fileno() == sys.__stdout__.fileno()
974 return fileno and fileno() == sys.__stdout__.fileno()
974
975
975 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
976 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
976 '''enhanced shell command execution.
977 '''enhanced shell command execution.
977 run with environment maybe modified, maybe in different dir.
978 run with environment maybe modified, maybe in different dir.
978
979
979 if command fails and onerr is None, return status, else raise onerr
980 if command fails and onerr is None, return status, else raise onerr
980 object as exception.
981 object as exception.
981
982
982 if out is specified, it is assumed to be a file-like object that has a
983 if out is specified, it is assumed to be a file-like object that has a
983 write() method. stdout and stderr will be redirected to out.'''
984 write() method. stdout and stderr will be redirected to out.'''
984 if environ is None:
985 if environ is None:
985 environ = {}
986 environ = {}
986 try:
987 try:
987 sys.stdout.flush()
988 sys.stdout.flush()
988 except Exception:
989 except Exception:
989 pass
990 pass
990 def py2shell(val):
991 def py2shell(val):
991 'convert python object into string that is useful to shell'
992 'convert python object into string that is useful to shell'
992 if val is None or val is False:
993 if val is None or val is False:
993 return '0'
994 return '0'
994 if val is True:
995 if val is True:
995 return '1'
996 return '1'
996 return str(val)
997 return str(val)
997 origcmd = cmd
998 origcmd = cmd
998 cmd = quotecommand(cmd)
999 cmd = quotecommand(cmd)
999 if sys.platform == 'plan9' and (sys.version_info[0] == 2
1000 if sys.platform == 'plan9' and (sys.version_info[0] == 2
1000 and sys.version_info[1] < 7):
1001 and sys.version_info[1] < 7):
1001 # subprocess kludge to work around issues in half-baked Python
1002 # subprocess kludge to work around issues in half-baked Python
1002 # ports, notably bichued/python:
1003 # ports, notably bichued/python:
1003 if not cwd is None:
1004 if not cwd is None:
1004 os.chdir(cwd)
1005 os.chdir(cwd)
1005 rc = os.system(cmd)
1006 rc = os.system(cmd)
1006 else:
1007 else:
1007 env = dict(os.environ)
1008 env = dict(os.environ)
1008 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1009 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1009 env['HG'] = hgexecutable()
1010 env['HG'] = hgexecutable()
1010 if out is None or _isstdout(out):
1011 if out is None or _isstdout(out):
1011 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1012 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1012 env=env, cwd=cwd)
1013 env=env, cwd=cwd)
1013 else:
1014 else:
1014 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1015 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1015 env=env, cwd=cwd, stdout=subprocess.PIPE,
1016 env=env, cwd=cwd, stdout=subprocess.PIPE,
1016 stderr=subprocess.STDOUT)
1017 stderr=subprocess.STDOUT)
1017 for line in iter(proc.stdout.readline, ''):
1018 for line in iter(proc.stdout.readline, ''):
1018 out.write(line)
1019 out.write(line)
1019 proc.wait()
1020 proc.wait()
1020 rc = proc.returncode
1021 rc = proc.returncode
1021 if sys.platform == 'OpenVMS' and rc & 1:
1022 if sys.platform == 'OpenVMS' and rc & 1:
1022 rc = 0
1023 rc = 0
1023 if rc and onerr:
1024 if rc and onerr:
1024 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
1025 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
1025 explainexit(rc)[0])
1026 explainexit(rc)[0])
1026 if errprefix:
1027 if errprefix:
1027 errmsg = '%s: %s' % (errprefix, errmsg)
1028 errmsg = '%s: %s' % (errprefix, errmsg)
1028 raise onerr(errmsg)
1029 raise onerr(errmsg)
1029 return rc
1030 return rc
1030
1031
1031 def checksignature(func):
1032 def checksignature(func):
1032 '''wrap a function with code to check for calling errors'''
1033 '''wrap a function with code to check for calling errors'''
1033 def check(*args, **kwargs):
1034 def check(*args, **kwargs):
1034 try:
1035 try:
1035 return func(*args, **kwargs)
1036 return func(*args, **kwargs)
1036 except TypeError:
1037 except TypeError:
1037 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1038 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1038 raise error.SignatureError
1039 raise error.SignatureError
1039 raise
1040 raise
1040
1041
1041 return check
1042 return check
1042
1043
1043 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1044 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1044 '''copy a file, preserving mode and optionally other stat info like
1045 '''copy a file, preserving mode and optionally other stat info like
1045 atime/mtime
1046 atime/mtime
1046
1047
1047 checkambig argument is used with filestat, and is useful only if
1048 checkambig argument is used with filestat, and is useful only if
1048 destination file is guarded by any lock (e.g. repo.lock or
1049 destination file is guarded by any lock (e.g. repo.lock or
1049 repo.wlock).
1050 repo.wlock).
1050
1051
1051 copystat and checkambig should be exclusive.
1052 copystat and checkambig should be exclusive.
1052 '''
1053 '''
1053 assert not (copystat and checkambig)
1054 assert not (copystat and checkambig)
1054 oldstat = None
1055 oldstat = None
1055 if os.path.lexists(dest):
1056 if os.path.lexists(dest):
1056 if checkambig:
1057 if checkambig:
1057 oldstat = checkambig and filestat(dest)
1058 oldstat = checkambig and filestat(dest)
1058 unlink(dest)
1059 unlink(dest)
1059 # hardlinks are problematic on CIFS, quietly ignore this flag
1060 # hardlinks are problematic on CIFS, quietly ignore this flag
1060 # until we find a way to work around it cleanly (issue4546)
1061 # until we find a way to work around it cleanly (issue4546)
1061 if False and hardlink:
1062 if False and hardlink:
1062 try:
1063 try:
1063 oslink(src, dest)
1064 oslink(src, dest)
1064 return
1065 return
1065 except (IOError, OSError):
1066 except (IOError, OSError):
1066 pass # fall back to normal copy
1067 pass # fall back to normal copy
1067 if os.path.islink(src):
1068 if os.path.islink(src):
1068 os.symlink(os.readlink(src), dest)
1069 os.symlink(os.readlink(src), dest)
1069 # copytime is ignored for symlinks, but in general copytime isn't needed
1070 # copytime is ignored for symlinks, but in general copytime isn't needed
1070 # for them anyway
1071 # for them anyway
1071 else:
1072 else:
1072 try:
1073 try:
1073 shutil.copyfile(src, dest)
1074 shutil.copyfile(src, dest)
1074 if copystat:
1075 if copystat:
1075 # copystat also copies mode
1076 # copystat also copies mode
1076 shutil.copystat(src, dest)
1077 shutil.copystat(src, dest)
1077 else:
1078 else:
1078 shutil.copymode(src, dest)
1079 shutil.copymode(src, dest)
1079 if oldstat and oldstat.stat:
1080 if oldstat and oldstat.stat:
1080 newstat = filestat(dest)
1081 newstat = filestat(dest)
1081 if newstat.isambig(oldstat):
1082 if newstat.isambig(oldstat):
1082 # stat of copied file is ambiguous to original one
1083 # stat of copied file is ambiguous to original one
1083 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1084 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1084 os.utime(dest, (advanced, advanced))
1085 os.utime(dest, (advanced, advanced))
1085 except shutil.Error as inst:
1086 except shutil.Error as inst:
1086 raise Abort(str(inst))
1087 raise Abort(str(inst))
1087
1088
1088 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1089 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1089 """Copy a directory tree using hardlinks if possible."""
1090 """Copy a directory tree using hardlinks if possible."""
1090 num = 0
1091 num = 0
1091
1092
1092 if hardlink is None:
1093 if hardlink is None:
1093 hardlink = (os.stat(src).st_dev ==
1094 hardlink = (os.stat(src).st_dev ==
1094 os.stat(os.path.dirname(dst)).st_dev)
1095 os.stat(os.path.dirname(dst)).st_dev)
1095 if hardlink:
1096 if hardlink:
1096 topic = _('linking')
1097 topic = _('linking')
1097 else:
1098 else:
1098 topic = _('copying')
1099 topic = _('copying')
1099
1100
1100 if os.path.isdir(src):
1101 if os.path.isdir(src):
1101 os.mkdir(dst)
1102 os.mkdir(dst)
1102 for name, kind in osutil.listdir(src):
1103 for name, kind in osutil.listdir(src):
1103 srcname = os.path.join(src, name)
1104 srcname = os.path.join(src, name)
1104 dstname = os.path.join(dst, name)
1105 dstname = os.path.join(dst, name)
1105 def nprog(t, pos):
1106 def nprog(t, pos):
1106 if pos is not None:
1107 if pos is not None:
1107 return progress(t, pos + num)
1108 return progress(t, pos + num)
1108 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1109 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1109 num += n
1110 num += n
1110 else:
1111 else:
1111 if hardlink:
1112 if hardlink:
1112 try:
1113 try:
1113 oslink(src, dst)
1114 oslink(src, dst)
1114 except (IOError, OSError):
1115 except (IOError, OSError):
1115 hardlink = False
1116 hardlink = False
1116 shutil.copy(src, dst)
1117 shutil.copy(src, dst)
1117 else:
1118 else:
1118 shutil.copy(src, dst)
1119 shutil.copy(src, dst)
1119 num += 1
1120 num += 1
1120 progress(topic, num)
1121 progress(topic, num)
1121 progress(topic, None)
1122 progress(topic, None)
1122
1123
1123 return hardlink, num
1124 return hardlink, num
1124
1125
1125 _winreservednames = '''con prn aux nul
1126 _winreservednames = '''con prn aux nul
1126 com1 com2 com3 com4 com5 com6 com7 com8 com9
1127 com1 com2 com3 com4 com5 com6 com7 com8 com9
1127 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1128 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1128 _winreservedchars = ':*?"<>|'
1129 _winreservedchars = ':*?"<>|'
1129 def checkwinfilename(path):
1130 def checkwinfilename(path):
1130 r'''Check that the base-relative path is a valid filename on Windows.
1131 r'''Check that the base-relative path is a valid filename on Windows.
1131 Returns None if the path is ok, or a UI string describing the problem.
1132 Returns None if the path is ok, or a UI string describing the problem.
1132
1133
1133 >>> checkwinfilename("just/a/normal/path")
1134 >>> checkwinfilename("just/a/normal/path")
1134 >>> checkwinfilename("foo/bar/con.xml")
1135 >>> checkwinfilename("foo/bar/con.xml")
1135 "filename contains 'con', which is reserved on Windows"
1136 "filename contains 'con', which is reserved on Windows"
1136 >>> checkwinfilename("foo/con.xml/bar")
1137 >>> checkwinfilename("foo/con.xml/bar")
1137 "filename contains 'con', which is reserved on Windows"
1138 "filename contains 'con', which is reserved on Windows"
1138 >>> checkwinfilename("foo/bar/xml.con")
1139 >>> checkwinfilename("foo/bar/xml.con")
1139 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1140 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1140 "filename contains 'AUX', which is reserved on Windows"
1141 "filename contains 'AUX', which is reserved on Windows"
1141 >>> checkwinfilename("foo/bar/bla:.txt")
1142 >>> checkwinfilename("foo/bar/bla:.txt")
1142 "filename contains ':', which is reserved on Windows"
1143 "filename contains ':', which is reserved on Windows"
1143 >>> checkwinfilename("foo/bar/b\07la.txt")
1144 >>> checkwinfilename("foo/bar/b\07la.txt")
1144 "filename contains '\\x07', which is invalid on Windows"
1145 "filename contains '\\x07', which is invalid on Windows"
1145 >>> checkwinfilename("foo/bar/bla ")
1146 >>> checkwinfilename("foo/bar/bla ")
1146 "filename ends with ' ', which is not allowed on Windows"
1147 "filename ends with ' ', which is not allowed on Windows"
1147 >>> checkwinfilename("../bar")
1148 >>> checkwinfilename("../bar")
1148 >>> checkwinfilename("foo\\")
1149 >>> checkwinfilename("foo\\")
1149 "filename ends with '\\', which is invalid on Windows"
1150 "filename ends with '\\', which is invalid on Windows"
1150 >>> checkwinfilename("foo\\/bar")
1151 >>> checkwinfilename("foo\\/bar")
1151 "directory name ends with '\\', which is invalid on Windows"
1152 "directory name ends with '\\', which is invalid on Windows"
1152 '''
1153 '''
1153 if path.endswith('\\'):
1154 if path.endswith('\\'):
1154 return _("filename ends with '\\', which is invalid on Windows")
1155 return _("filename ends with '\\', which is invalid on Windows")
1155 if '\\/' in path:
1156 if '\\/' in path:
1156 return _("directory name ends with '\\', which is invalid on Windows")
1157 return _("directory name ends with '\\', which is invalid on Windows")
1157 for n in path.replace('\\', '/').split('/'):
1158 for n in path.replace('\\', '/').split('/'):
1158 if not n:
1159 if not n:
1159 continue
1160 continue
1160 for c in n:
1161 for c in n:
1161 if c in _winreservedchars:
1162 if c in _winreservedchars:
1162 return _("filename contains '%s', which is reserved "
1163 return _("filename contains '%s', which is reserved "
1163 "on Windows") % c
1164 "on Windows") % c
1164 if ord(c) <= 31:
1165 if ord(c) <= 31:
1165 return _("filename contains %r, which is invalid "
1166 return _("filename contains %r, which is invalid "
1166 "on Windows") % c
1167 "on Windows") % c
1167 base = n.split('.')[0]
1168 base = n.split('.')[0]
1168 if base and base.lower() in _winreservednames:
1169 if base and base.lower() in _winreservednames:
1169 return _("filename contains '%s', which is reserved "
1170 return _("filename contains '%s', which is reserved "
1170 "on Windows") % base
1171 "on Windows") % base
1171 t = n[-1]
1172 t = n[-1]
1172 if t in '. ' and n not in '..':
1173 if t in '. ' and n not in '..':
1173 return _("filename ends with '%s', which is not allowed "
1174 return _("filename ends with '%s', which is not allowed "
1174 "on Windows") % t
1175 "on Windows") % t
1175
1176
1176 if os.name == 'nt':
1177 if os.name == 'nt':
1177 checkosfilename = checkwinfilename
1178 checkosfilename = checkwinfilename
1178 else:
1179 else:
1179 checkosfilename = platform.checkosfilename
1180 checkosfilename = platform.checkosfilename
1180
1181
1181 def makelock(info, pathname):
1182 def makelock(info, pathname):
1182 try:
1183 try:
1183 return os.symlink(info, pathname)
1184 return os.symlink(info, pathname)
1184 except OSError as why:
1185 except OSError as why:
1185 if why.errno == errno.EEXIST:
1186 if why.errno == errno.EEXIST:
1186 raise
1187 raise
1187 except AttributeError: # no symlink in os
1188 except AttributeError: # no symlink in os
1188 pass
1189 pass
1189
1190
1190 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1191 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1191 os.write(ld, info)
1192 os.write(ld, info)
1192 os.close(ld)
1193 os.close(ld)
1193
1194
1194 def readlock(pathname):
1195 def readlock(pathname):
1195 try:
1196 try:
1196 return os.readlink(pathname)
1197 return os.readlink(pathname)
1197 except OSError as why:
1198 except OSError as why:
1198 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1199 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1199 raise
1200 raise
1200 except AttributeError: # no symlink in os
1201 except AttributeError: # no symlink in os
1201 pass
1202 pass
1202 fp = posixfile(pathname)
1203 fp = posixfile(pathname)
1203 r = fp.read()
1204 r = fp.read()
1204 fp.close()
1205 fp.close()
1205 return r
1206 return r
1206
1207
1207 def fstat(fp):
1208 def fstat(fp):
1208 '''stat file object that may not have fileno method.'''
1209 '''stat file object that may not have fileno method.'''
1209 try:
1210 try:
1210 return os.fstat(fp.fileno())
1211 return os.fstat(fp.fileno())
1211 except AttributeError:
1212 except AttributeError:
1212 return os.stat(fp.name)
1213 return os.stat(fp.name)
1213
1214
1214 # File system features
1215 # File system features
1215
1216
1216 def fscasesensitive(path):
1217 def fscasesensitive(path):
1217 """
1218 """
1218 Return true if the given path is on a case-sensitive filesystem
1219 Return true if the given path is on a case-sensitive filesystem
1219
1220
1220 Requires a path (like /foo/.hg) ending with a foldable final
1221 Requires a path (like /foo/.hg) ending with a foldable final
1221 directory component.
1222 directory component.
1222 """
1223 """
1223 s1 = os.lstat(path)
1224 s1 = os.lstat(path)
1224 d, b = os.path.split(path)
1225 d, b = os.path.split(path)
1225 b2 = b.upper()
1226 b2 = b.upper()
1226 if b == b2:
1227 if b == b2:
1227 b2 = b.lower()
1228 b2 = b.lower()
1228 if b == b2:
1229 if b == b2:
1229 return True # no evidence against case sensitivity
1230 return True # no evidence against case sensitivity
1230 p2 = os.path.join(d, b2)
1231 p2 = os.path.join(d, b2)
1231 try:
1232 try:
1232 s2 = os.lstat(p2)
1233 s2 = os.lstat(p2)
1233 if s2 == s1:
1234 if s2 == s1:
1234 return False
1235 return False
1235 return True
1236 return True
1236 except OSError:
1237 except OSError:
1237 return True
1238 return True
1238
1239
1239 try:
1240 try:
1240 import re2
1241 import re2
1241 _re2 = None
1242 _re2 = None
1242 except ImportError:
1243 except ImportError:
1243 _re2 = False
1244 _re2 = False
1244
1245
1245 class _re(object):
1246 class _re(object):
1246 def _checkre2(self):
1247 def _checkre2(self):
1247 global _re2
1248 global _re2
1248 try:
1249 try:
1249 # check if match works, see issue3964
1250 # check if match works, see issue3964
1250 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1251 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1251 except ImportError:
1252 except ImportError:
1252 _re2 = False
1253 _re2 = False
1253
1254
1254 def compile(self, pat, flags=0):
1255 def compile(self, pat, flags=0):
1255 '''Compile a regular expression, using re2 if possible
1256 '''Compile a regular expression, using re2 if possible
1256
1257
1257 For best performance, use only re2-compatible regexp features. The
1258 For best performance, use only re2-compatible regexp features. The
1258 only flags from the re module that are re2-compatible are
1259 only flags from the re module that are re2-compatible are
1259 IGNORECASE and MULTILINE.'''
1260 IGNORECASE and MULTILINE.'''
1260 if _re2 is None:
1261 if _re2 is None:
1261 self._checkre2()
1262 self._checkre2()
1262 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1263 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1263 if flags & remod.IGNORECASE:
1264 if flags & remod.IGNORECASE:
1264 pat = '(?i)' + pat
1265 pat = '(?i)' + pat
1265 if flags & remod.MULTILINE:
1266 if flags & remod.MULTILINE:
1266 pat = '(?m)' + pat
1267 pat = '(?m)' + pat
1267 try:
1268 try:
1268 return re2.compile(pat)
1269 return re2.compile(pat)
1269 except re2.error:
1270 except re2.error:
1270 pass
1271 pass
1271 return remod.compile(pat, flags)
1272 return remod.compile(pat, flags)
1272
1273
1273 @propertycache
1274 @propertycache
1274 def escape(self):
1275 def escape(self):
1275 '''Return the version of escape corresponding to self.compile.
1276 '''Return the version of escape corresponding to self.compile.
1276
1277
1277 This is imperfect because whether re2 or re is used for a particular
1278 This is imperfect because whether re2 or re is used for a particular
1278 function depends on the flags, etc, but it's the best we can do.
1279 function depends on the flags, etc, but it's the best we can do.
1279 '''
1280 '''
1280 global _re2
1281 global _re2
1281 if _re2 is None:
1282 if _re2 is None:
1282 self._checkre2()
1283 self._checkre2()
1283 if _re2:
1284 if _re2:
1284 return re2.escape
1285 return re2.escape
1285 else:
1286 else:
1286 return remod.escape
1287 return remod.escape
1287
1288
1288 re = _re()
1289 re = _re()
1289
1290
1290 _fspathcache = {}
1291 _fspathcache = {}
1291 def fspath(name, root):
1292 def fspath(name, root):
1292 '''Get name in the case stored in the filesystem
1293 '''Get name in the case stored in the filesystem
1293
1294
1294 The name should be relative to root, and be normcase-ed for efficiency.
1295 The name should be relative to root, and be normcase-ed for efficiency.
1295
1296
1296 Note that this function is unnecessary, and should not be
1297 Note that this function is unnecessary, and should not be
1297 called, for case-sensitive filesystems (simply because it's expensive).
1298 called, for case-sensitive filesystems (simply because it's expensive).
1298
1299
1299 The root should be normcase-ed, too.
1300 The root should be normcase-ed, too.
1300 '''
1301 '''
1301 def _makefspathcacheentry(dir):
1302 def _makefspathcacheentry(dir):
1302 return dict((normcase(n), n) for n in os.listdir(dir))
1303 return dict((normcase(n), n) for n in os.listdir(dir))
1303
1304
1304 seps = os.sep
1305 seps = os.sep
1305 if os.altsep:
1306 if os.altsep:
1306 seps = seps + os.altsep
1307 seps = seps + os.altsep
1307 # Protect backslashes. This gets silly very quickly.
1308 # Protect backslashes. This gets silly very quickly.
1308 seps.replace('\\','\\\\')
1309 seps.replace('\\','\\\\')
1309 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1310 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1310 dir = os.path.normpath(root)
1311 dir = os.path.normpath(root)
1311 result = []
1312 result = []
1312 for part, sep in pattern.findall(name):
1313 for part, sep in pattern.findall(name):
1313 if sep:
1314 if sep:
1314 result.append(sep)
1315 result.append(sep)
1315 continue
1316 continue
1316
1317
1317 if dir not in _fspathcache:
1318 if dir not in _fspathcache:
1318 _fspathcache[dir] = _makefspathcacheentry(dir)
1319 _fspathcache[dir] = _makefspathcacheentry(dir)
1319 contents = _fspathcache[dir]
1320 contents = _fspathcache[dir]
1320
1321
1321 found = contents.get(part)
1322 found = contents.get(part)
1322 if not found:
1323 if not found:
1323 # retry "once per directory" per "dirstate.walk" which
1324 # retry "once per directory" per "dirstate.walk" which
1324 # may take place for each patches of "hg qpush", for example
1325 # may take place for each patches of "hg qpush", for example
1325 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1326 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1326 found = contents.get(part)
1327 found = contents.get(part)
1327
1328
1328 result.append(found or part)
1329 result.append(found or part)
1329 dir = os.path.join(dir, part)
1330 dir = os.path.join(dir, part)
1330
1331
1331 return ''.join(result)
1332 return ''.join(result)
1332
1333
1333 def checknlink(testfile):
1334 def checknlink(testfile):
1334 '''check whether hardlink count reporting works properly'''
1335 '''check whether hardlink count reporting works properly'''
1335
1336
1336 # testfile may be open, so we need a separate file for checking to
1337 # testfile may be open, so we need a separate file for checking to
1337 # work around issue2543 (or testfile may get lost on Samba shares)
1338 # work around issue2543 (or testfile may get lost on Samba shares)
1338 f1 = testfile + ".hgtmp1"
1339 f1 = testfile + ".hgtmp1"
1339 if os.path.lexists(f1):
1340 if os.path.lexists(f1):
1340 return False
1341 return False
1341 try:
1342 try:
1342 posixfile(f1, 'w').close()
1343 posixfile(f1, 'w').close()
1343 except IOError:
1344 except IOError:
1344 try:
1345 try:
1345 os.unlink(f1)
1346 os.unlink(f1)
1346 except OSError:
1347 except OSError:
1347 pass
1348 pass
1348 return False
1349 return False
1349
1350
1350 f2 = testfile + ".hgtmp2"
1351 f2 = testfile + ".hgtmp2"
1351 fd = None
1352 fd = None
1352 try:
1353 try:
1353 oslink(f1, f2)
1354 oslink(f1, f2)
1354 # nlinks() may behave differently for files on Windows shares if
1355 # nlinks() may behave differently for files on Windows shares if
1355 # the file is open.
1356 # the file is open.
1356 fd = posixfile(f2)
1357 fd = posixfile(f2)
1357 return nlinks(f2) > 1
1358 return nlinks(f2) > 1
1358 except OSError:
1359 except OSError:
1359 return False
1360 return False
1360 finally:
1361 finally:
1361 if fd is not None:
1362 if fd is not None:
1362 fd.close()
1363 fd.close()
1363 for f in (f1, f2):
1364 for f in (f1, f2):
1364 try:
1365 try:
1365 os.unlink(f)
1366 os.unlink(f)
1366 except OSError:
1367 except OSError:
1367 pass
1368 pass
1368
1369
1369 def endswithsep(path):
1370 def endswithsep(path):
1370 '''Check path ends with os.sep or os.altsep.'''
1371 '''Check path ends with os.sep or os.altsep.'''
1371 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1372 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1372
1373
1373 def splitpath(path):
1374 def splitpath(path):
1374 '''Split path by os.sep.
1375 '''Split path by os.sep.
1375 Note that this function does not use os.altsep because this is
1376 Note that this function does not use os.altsep because this is
1376 an alternative of simple "xxx.split(os.sep)".
1377 an alternative of simple "xxx.split(os.sep)".
1377 It is recommended to use os.path.normpath() before using this
1378 It is recommended to use os.path.normpath() before using this
1378 function if need.'''
1379 function if need.'''
1379 return path.split(os.sep)
1380 return path.split(os.sep)
1380
1381
1381 def gui():
1382 def gui():
1382 '''Are we running in a GUI?'''
1383 '''Are we running in a GUI?'''
1383 if sys.platform == 'darwin':
1384 if sys.platform == 'darwin':
1384 if 'SSH_CONNECTION' in os.environ:
1385 if 'SSH_CONNECTION' in os.environ:
1385 # handle SSH access to a box where the user is logged in
1386 # handle SSH access to a box where the user is logged in
1386 return False
1387 return False
1387 elif getattr(osutil, 'isgui', None):
1388 elif getattr(osutil, 'isgui', None):
1388 # check if a CoreGraphics session is available
1389 # check if a CoreGraphics session is available
1389 return osutil.isgui()
1390 return osutil.isgui()
1390 else:
1391 else:
1391 # pure build; use a safe default
1392 # pure build; use a safe default
1392 return True
1393 return True
1393 else:
1394 else:
1394 return os.name == "nt" or os.environ.get("DISPLAY")
1395 return os.name == "nt" or os.environ.get("DISPLAY")
1395
1396
1396 def mktempcopy(name, emptyok=False, createmode=None):
1397 def mktempcopy(name, emptyok=False, createmode=None):
1397 """Create a temporary file with the same contents from name
1398 """Create a temporary file with the same contents from name
1398
1399
1399 The permission bits are copied from the original file.
1400 The permission bits are copied from the original file.
1400
1401
1401 If the temporary file is going to be truncated immediately, you
1402 If the temporary file is going to be truncated immediately, you
1402 can use emptyok=True as an optimization.
1403 can use emptyok=True as an optimization.
1403
1404
1404 Returns the name of the temporary file.
1405 Returns the name of the temporary file.
1405 """
1406 """
1406 d, fn = os.path.split(name)
1407 d, fn = os.path.split(name)
1407 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1408 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1408 os.close(fd)
1409 os.close(fd)
1409 # Temporary files are created with mode 0600, which is usually not
1410 # Temporary files are created with mode 0600, which is usually not
1410 # what we want. If the original file already exists, just copy
1411 # what we want. If the original file already exists, just copy
1411 # its mode. Otherwise, manually obey umask.
1412 # its mode. Otherwise, manually obey umask.
1412 copymode(name, temp, createmode)
1413 copymode(name, temp, createmode)
1413 if emptyok:
1414 if emptyok:
1414 return temp
1415 return temp
1415 try:
1416 try:
1416 try:
1417 try:
1417 ifp = posixfile(name, "rb")
1418 ifp = posixfile(name, "rb")
1418 except IOError as inst:
1419 except IOError as inst:
1419 if inst.errno == errno.ENOENT:
1420 if inst.errno == errno.ENOENT:
1420 return temp
1421 return temp
1421 if not getattr(inst, 'filename', None):
1422 if not getattr(inst, 'filename', None):
1422 inst.filename = name
1423 inst.filename = name
1423 raise
1424 raise
1424 ofp = posixfile(temp, "wb")
1425 ofp = posixfile(temp, "wb")
1425 for chunk in filechunkiter(ifp):
1426 for chunk in filechunkiter(ifp):
1426 ofp.write(chunk)
1427 ofp.write(chunk)
1427 ifp.close()
1428 ifp.close()
1428 ofp.close()
1429 ofp.close()
1429 except: # re-raises
1430 except: # re-raises
1430 try: os.unlink(temp)
1431 try: os.unlink(temp)
1431 except OSError: pass
1432 except OSError: pass
1432 raise
1433 raise
1433 return temp
1434 return temp
1434
1435
1435 class filestat(object):
1436 class filestat(object):
1436 """help to exactly detect change of a file
1437 """help to exactly detect change of a file
1437
1438
1438 'stat' attribute is result of 'os.stat()' if specified 'path'
1439 'stat' attribute is result of 'os.stat()' if specified 'path'
1439 exists. Otherwise, it is None. This can avoid preparative
1440 exists. Otherwise, it is None. This can avoid preparative
1440 'exists()' examination on client side of this class.
1441 'exists()' examination on client side of this class.
1441 """
1442 """
1442 def __init__(self, path):
1443 def __init__(self, path):
1443 try:
1444 try:
1444 self.stat = os.stat(path)
1445 self.stat = os.stat(path)
1445 except OSError as err:
1446 except OSError as err:
1446 if err.errno != errno.ENOENT:
1447 if err.errno != errno.ENOENT:
1447 raise
1448 raise
1448 self.stat = None
1449 self.stat = None
1449
1450
1450 __hash__ = object.__hash__
1451 __hash__ = object.__hash__
1451
1452
1452 def __eq__(self, old):
1453 def __eq__(self, old):
1453 try:
1454 try:
1454 # if ambiguity between stat of new and old file is
1455 # if ambiguity between stat of new and old file is
1455 # avoided, comparision of size, ctime and mtime is enough
1456 # avoided, comparision of size, ctime and mtime is enough
1456 # to exactly detect change of a file regardless of platform
1457 # to exactly detect change of a file regardless of platform
1457 return (self.stat.st_size == old.stat.st_size and
1458 return (self.stat.st_size == old.stat.st_size and
1458 self.stat.st_ctime == old.stat.st_ctime and
1459 self.stat.st_ctime == old.stat.st_ctime and
1459 self.stat.st_mtime == old.stat.st_mtime)
1460 self.stat.st_mtime == old.stat.st_mtime)
1460 except AttributeError:
1461 except AttributeError:
1461 return False
1462 return False
1462
1463
1463 def isambig(self, old):
1464 def isambig(self, old):
1464 """Examine whether new (= self) stat is ambiguous against old one
1465 """Examine whether new (= self) stat is ambiguous against old one
1465
1466
1466 "S[N]" below means stat of a file at N-th change:
1467 "S[N]" below means stat of a file at N-th change:
1467
1468
1468 - S[n-1].ctime < S[n].ctime: can detect change of a file
1469 - S[n-1].ctime < S[n].ctime: can detect change of a file
1469 - S[n-1].ctime == S[n].ctime
1470 - S[n-1].ctime == S[n].ctime
1470 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1471 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1471 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1472 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1472 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1473 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1473 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1474 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1474
1475
1475 Case (*2) above means that a file was changed twice or more at
1476 Case (*2) above means that a file was changed twice or more at
1476 same time in sec (= S[n-1].ctime), and comparison of timestamp
1477 same time in sec (= S[n-1].ctime), and comparison of timestamp
1477 is ambiguous.
1478 is ambiguous.
1478
1479
1479 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1480 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1480 timestamp is ambiguous".
1481 timestamp is ambiguous".
1481
1482
1482 But advancing mtime only in case (*2) doesn't work as
1483 But advancing mtime only in case (*2) doesn't work as
1483 expected, because naturally advanced S[n].mtime in case (*1)
1484 expected, because naturally advanced S[n].mtime in case (*1)
1484 might be equal to manually advanced S[n-1 or earlier].mtime.
1485 might be equal to manually advanced S[n-1 or earlier].mtime.
1485
1486
1486 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1487 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1487 treated as ambiguous regardless of mtime, to avoid overlooking
1488 treated as ambiguous regardless of mtime, to avoid overlooking
1488 by confliction between such mtime.
1489 by confliction between such mtime.
1489
1490
1490 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1491 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1491 S[n].mtime", even if size of a file isn't changed.
1492 S[n].mtime", even if size of a file isn't changed.
1492 """
1493 """
1493 try:
1494 try:
1494 return (self.stat.st_ctime == old.stat.st_ctime)
1495 return (self.stat.st_ctime == old.stat.st_ctime)
1495 except AttributeError:
1496 except AttributeError:
1496 return False
1497 return False
1497
1498
1498 def __ne__(self, other):
1499 def __ne__(self, other):
1499 return not self == other
1500 return not self == other
1500
1501
1501 class atomictempfile(object):
1502 class atomictempfile(object):
1502 '''writable file object that atomically updates a file
1503 '''writable file object that atomically updates a file
1503
1504
1504 All writes will go to a temporary copy of the original file. Call
1505 All writes will go to a temporary copy of the original file. Call
1505 close() when you are done writing, and atomictempfile will rename
1506 close() when you are done writing, and atomictempfile will rename
1506 the temporary copy to the original name, making the changes
1507 the temporary copy to the original name, making the changes
1507 visible. If the object is destroyed without being closed, all your
1508 visible. If the object is destroyed without being closed, all your
1508 writes are discarded.
1509 writes are discarded.
1509
1510
1510 checkambig argument of constructor is used with filestat, and is
1511 checkambig argument of constructor is used with filestat, and is
1511 useful only if target file is guarded by any lock (e.g. repo.lock
1512 useful only if target file is guarded by any lock (e.g. repo.lock
1512 or repo.wlock).
1513 or repo.wlock).
1513 '''
1514 '''
1514 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1515 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1515 self.__name = name # permanent name
1516 self.__name = name # permanent name
1516 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1517 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1517 createmode=createmode)
1518 createmode=createmode)
1518 self._fp = posixfile(self._tempname, mode)
1519 self._fp = posixfile(self._tempname, mode)
1519 self._checkambig = checkambig
1520 self._checkambig = checkambig
1520
1521
1521 # delegated methods
1522 # delegated methods
1522 self.read = self._fp.read
1523 self.read = self._fp.read
1523 self.write = self._fp.write
1524 self.write = self._fp.write
1524 self.seek = self._fp.seek
1525 self.seek = self._fp.seek
1525 self.tell = self._fp.tell
1526 self.tell = self._fp.tell
1526 self.fileno = self._fp.fileno
1527 self.fileno = self._fp.fileno
1527
1528
1528 def close(self):
1529 def close(self):
1529 if not self._fp.closed:
1530 if not self._fp.closed:
1530 self._fp.close()
1531 self._fp.close()
1531 filename = localpath(self.__name)
1532 filename = localpath(self.__name)
1532 oldstat = self._checkambig and filestat(filename)
1533 oldstat = self._checkambig and filestat(filename)
1533 if oldstat and oldstat.stat:
1534 if oldstat and oldstat.stat:
1534 rename(self._tempname, filename)
1535 rename(self._tempname, filename)
1535 newstat = filestat(filename)
1536 newstat = filestat(filename)
1536 if newstat.isambig(oldstat):
1537 if newstat.isambig(oldstat):
1537 # stat of changed file is ambiguous to original one
1538 # stat of changed file is ambiguous to original one
1538 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1539 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1539 os.utime(filename, (advanced, advanced))
1540 os.utime(filename, (advanced, advanced))
1540 else:
1541 else:
1541 rename(self._tempname, filename)
1542 rename(self._tempname, filename)
1542
1543
1543 def discard(self):
1544 def discard(self):
1544 if not self._fp.closed:
1545 if not self._fp.closed:
1545 try:
1546 try:
1546 os.unlink(self._tempname)
1547 os.unlink(self._tempname)
1547 except OSError:
1548 except OSError:
1548 pass
1549 pass
1549 self._fp.close()
1550 self._fp.close()
1550
1551
1551 def __del__(self):
1552 def __del__(self):
1552 if safehasattr(self, '_fp'): # constructor actually did something
1553 if safehasattr(self, '_fp'): # constructor actually did something
1553 self.discard()
1554 self.discard()
1554
1555
1555 def __enter__(self):
1556 def __enter__(self):
1556 return self
1557 return self
1557
1558
1558 def __exit__(self, exctype, excvalue, traceback):
1559 def __exit__(self, exctype, excvalue, traceback):
1559 if exctype is not None:
1560 if exctype is not None:
1560 self.discard()
1561 self.discard()
1561 else:
1562 else:
1562 self.close()
1563 self.close()
1563
1564
1564 def makedirs(name, mode=None, notindexed=False):
1565 def makedirs(name, mode=None, notindexed=False):
1565 """recursive directory creation with parent mode inheritance
1566 """recursive directory creation with parent mode inheritance
1566
1567
1567 Newly created directories are marked as "not to be indexed by
1568 Newly created directories are marked as "not to be indexed by
1568 the content indexing service", if ``notindexed`` is specified
1569 the content indexing service", if ``notindexed`` is specified
1569 for "write" mode access.
1570 for "write" mode access.
1570 """
1571 """
1571 try:
1572 try:
1572 makedir(name, notindexed)
1573 makedir(name, notindexed)
1573 except OSError as err:
1574 except OSError as err:
1574 if err.errno == errno.EEXIST:
1575 if err.errno == errno.EEXIST:
1575 return
1576 return
1576 if err.errno != errno.ENOENT or not name:
1577 if err.errno != errno.ENOENT or not name:
1577 raise
1578 raise
1578 parent = os.path.dirname(os.path.abspath(name))
1579 parent = os.path.dirname(os.path.abspath(name))
1579 if parent == name:
1580 if parent == name:
1580 raise
1581 raise
1581 makedirs(parent, mode, notindexed)
1582 makedirs(parent, mode, notindexed)
1582 try:
1583 try:
1583 makedir(name, notindexed)
1584 makedir(name, notindexed)
1584 except OSError as err:
1585 except OSError as err:
1585 # Catch EEXIST to handle races
1586 # Catch EEXIST to handle races
1586 if err.errno == errno.EEXIST:
1587 if err.errno == errno.EEXIST:
1587 return
1588 return
1588 raise
1589 raise
1589 if mode is not None:
1590 if mode is not None:
1590 os.chmod(name, mode)
1591 os.chmod(name, mode)
1591
1592
1592 def readfile(path):
1593 def readfile(path):
1593 with open(path, 'rb') as fp:
1594 with open(path, 'rb') as fp:
1594 return fp.read()
1595 return fp.read()
1595
1596
1596 def writefile(path, text):
1597 def writefile(path, text):
1597 with open(path, 'wb') as fp:
1598 with open(path, 'wb') as fp:
1598 fp.write(text)
1599 fp.write(text)
1599
1600
1600 def appendfile(path, text):
1601 def appendfile(path, text):
1601 with open(path, 'ab') as fp:
1602 with open(path, 'ab') as fp:
1602 fp.write(text)
1603 fp.write(text)
1603
1604
1604 class chunkbuffer(object):
1605 class chunkbuffer(object):
1605 """Allow arbitrary sized chunks of data to be efficiently read from an
1606 """Allow arbitrary sized chunks of data to be efficiently read from an
1606 iterator over chunks of arbitrary size."""
1607 iterator over chunks of arbitrary size."""
1607
1608
1608 def __init__(self, in_iter):
1609 def __init__(self, in_iter):
1609 """in_iter is the iterator that's iterating over the input chunks.
1610 """in_iter is the iterator that's iterating over the input chunks.
1610 targetsize is how big a buffer to try to maintain."""
1611 targetsize is how big a buffer to try to maintain."""
1611 def splitbig(chunks):
1612 def splitbig(chunks):
1612 for chunk in chunks:
1613 for chunk in chunks:
1613 if len(chunk) > 2**20:
1614 if len(chunk) > 2**20:
1614 pos = 0
1615 pos = 0
1615 while pos < len(chunk):
1616 while pos < len(chunk):
1616 end = pos + 2 ** 18
1617 end = pos + 2 ** 18
1617 yield chunk[pos:end]
1618 yield chunk[pos:end]
1618 pos = end
1619 pos = end
1619 else:
1620 else:
1620 yield chunk
1621 yield chunk
1621 self.iter = splitbig(in_iter)
1622 self.iter = splitbig(in_iter)
1622 self._queue = collections.deque()
1623 self._queue = collections.deque()
1623 self._chunkoffset = 0
1624 self._chunkoffset = 0
1624
1625
1625 def read(self, l=None):
1626 def read(self, l=None):
1626 """Read L bytes of data from the iterator of chunks of data.
1627 """Read L bytes of data from the iterator of chunks of data.
1627 Returns less than L bytes if the iterator runs dry.
1628 Returns less than L bytes if the iterator runs dry.
1628
1629
1629 If size parameter is omitted, read everything"""
1630 If size parameter is omitted, read everything"""
1630 if l is None:
1631 if l is None:
1631 return ''.join(self.iter)
1632 return ''.join(self.iter)
1632
1633
1633 left = l
1634 left = l
1634 buf = []
1635 buf = []
1635 queue = self._queue
1636 queue = self._queue
1636 while left > 0:
1637 while left > 0:
1637 # refill the queue
1638 # refill the queue
1638 if not queue:
1639 if not queue:
1639 target = 2**18
1640 target = 2**18
1640 for chunk in self.iter:
1641 for chunk in self.iter:
1641 queue.append(chunk)
1642 queue.append(chunk)
1642 target -= len(chunk)
1643 target -= len(chunk)
1643 if target <= 0:
1644 if target <= 0:
1644 break
1645 break
1645 if not queue:
1646 if not queue:
1646 break
1647 break
1647
1648
1648 # The easy way to do this would be to queue.popleft(), modify the
1649 # The easy way to do this would be to queue.popleft(), modify the
1649 # chunk (if necessary), then queue.appendleft(). However, for cases
1650 # chunk (if necessary), then queue.appendleft(). However, for cases
1650 # where we read partial chunk content, this incurs 2 dequeue
1651 # where we read partial chunk content, this incurs 2 dequeue
1651 # mutations and creates a new str for the remaining chunk in the
1652 # mutations and creates a new str for the remaining chunk in the
1652 # queue. Our code below avoids this overhead.
1653 # queue. Our code below avoids this overhead.
1653
1654
1654 chunk = queue[0]
1655 chunk = queue[0]
1655 chunkl = len(chunk)
1656 chunkl = len(chunk)
1656 offset = self._chunkoffset
1657 offset = self._chunkoffset
1657
1658
1658 # Use full chunk.
1659 # Use full chunk.
1659 if offset == 0 and left >= chunkl:
1660 if offset == 0 and left >= chunkl:
1660 left -= chunkl
1661 left -= chunkl
1661 queue.popleft()
1662 queue.popleft()
1662 buf.append(chunk)
1663 buf.append(chunk)
1663 # self._chunkoffset remains at 0.
1664 # self._chunkoffset remains at 0.
1664 continue
1665 continue
1665
1666
1666 chunkremaining = chunkl - offset
1667 chunkremaining = chunkl - offset
1667
1668
1668 # Use all of unconsumed part of chunk.
1669 # Use all of unconsumed part of chunk.
1669 if left >= chunkremaining:
1670 if left >= chunkremaining:
1670 left -= chunkremaining
1671 left -= chunkremaining
1671 queue.popleft()
1672 queue.popleft()
1672 # offset == 0 is enabled by block above, so this won't merely
1673 # offset == 0 is enabled by block above, so this won't merely
1673 # copy via ``chunk[0:]``.
1674 # copy via ``chunk[0:]``.
1674 buf.append(chunk[offset:])
1675 buf.append(chunk[offset:])
1675 self._chunkoffset = 0
1676 self._chunkoffset = 0
1676
1677
1677 # Partial chunk needed.
1678 # Partial chunk needed.
1678 else:
1679 else:
1679 buf.append(chunk[offset:offset + left])
1680 buf.append(chunk[offset:offset + left])
1680 self._chunkoffset += left
1681 self._chunkoffset += left
1681 left -= chunkremaining
1682 left -= chunkremaining
1682
1683
1683 return ''.join(buf)
1684 return ''.join(buf)
1684
1685
1685 def filechunkiter(f, size=65536, limit=None):
1686 def filechunkiter(f, size=65536, limit=None):
1686 """Create a generator that produces the data in the file size
1687 """Create a generator that produces the data in the file size
1687 (default 65536) bytes at a time, up to optional limit (default is
1688 (default 65536) bytes at a time, up to optional limit (default is
1688 to read all data). Chunks may be less than size bytes if the
1689 to read all data). Chunks may be less than size bytes if the
1689 chunk is the last chunk in the file, or the file is a socket or
1690 chunk is the last chunk in the file, or the file is a socket or
1690 some other type of file that sometimes reads less data than is
1691 some other type of file that sometimes reads less data than is
1691 requested."""
1692 requested."""
1692 assert size >= 0
1693 assert size >= 0
1693 assert limit is None or limit >= 0
1694 assert limit is None or limit >= 0
1694 while True:
1695 while True:
1695 if limit is None:
1696 if limit is None:
1696 nbytes = size
1697 nbytes = size
1697 else:
1698 else:
1698 nbytes = min(limit, size)
1699 nbytes = min(limit, size)
1699 s = nbytes and f.read(nbytes)
1700 s = nbytes and f.read(nbytes)
1700 if not s:
1701 if not s:
1701 break
1702 break
1702 if limit:
1703 if limit:
1703 limit -= len(s)
1704 limit -= len(s)
1704 yield s
1705 yield s
1705
1706
1706 def makedate(timestamp=None):
1707 def makedate(timestamp=None):
1707 '''Return a unix timestamp (or the current time) as a (unixtime,
1708 '''Return a unix timestamp (or the current time) as a (unixtime,
1708 offset) tuple based off the local timezone.'''
1709 offset) tuple based off the local timezone.'''
1709 if timestamp is None:
1710 if timestamp is None:
1710 timestamp = time.time()
1711 timestamp = time.time()
1711 if timestamp < 0:
1712 if timestamp < 0:
1712 hint = _("check your clock")
1713 hint = _("check your clock")
1713 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1714 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1714 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1715 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1715 datetime.datetime.fromtimestamp(timestamp))
1716 datetime.datetime.fromtimestamp(timestamp))
1716 tz = delta.days * 86400 + delta.seconds
1717 tz = delta.days * 86400 + delta.seconds
1717 return timestamp, tz
1718 return timestamp, tz
1718
1719
1719 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1720 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1720 """represent a (unixtime, offset) tuple as a localized time.
1721 """represent a (unixtime, offset) tuple as a localized time.
1721 unixtime is seconds since the epoch, and offset is the time zone's
1722 unixtime is seconds since the epoch, and offset is the time zone's
1722 number of seconds away from UTC.
1723 number of seconds away from UTC.
1723
1724
1724 >>> datestr((0, 0))
1725 >>> datestr((0, 0))
1725 'Thu Jan 01 00:00:00 1970 +0000'
1726 'Thu Jan 01 00:00:00 1970 +0000'
1726 >>> datestr((42, 0))
1727 >>> datestr((42, 0))
1727 'Thu Jan 01 00:00:42 1970 +0000'
1728 'Thu Jan 01 00:00:42 1970 +0000'
1728 >>> datestr((-42, 0))
1729 >>> datestr((-42, 0))
1729 'Wed Dec 31 23:59:18 1969 +0000'
1730 'Wed Dec 31 23:59:18 1969 +0000'
1730 >>> datestr((0x7fffffff, 0))
1731 >>> datestr((0x7fffffff, 0))
1731 'Tue Jan 19 03:14:07 2038 +0000'
1732 'Tue Jan 19 03:14:07 2038 +0000'
1732 >>> datestr((-0x80000000, 0))
1733 >>> datestr((-0x80000000, 0))
1733 'Fri Dec 13 20:45:52 1901 +0000'
1734 'Fri Dec 13 20:45:52 1901 +0000'
1734 """
1735 """
1735 t, tz = date or makedate()
1736 t, tz = date or makedate()
1736 if "%1" in format or "%2" in format or "%z" in format:
1737 if "%1" in format or "%2" in format or "%z" in format:
1737 sign = (tz > 0) and "-" or "+"
1738 sign = (tz > 0) and "-" or "+"
1738 minutes = abs(tz) // 60
1739 minutes = abs(tz) // 60
1739 q, r = divmod(minutes, 60)
1740 q, r = divmod(minutes, 60)
1740 format = format.replace("%z", "%1%2")
1741 format = format.replace("%z", "%1%2")
1741 format = format.replace("%1", "%c%02d" % (sign, q))
1742 format = format.replace("%1", "%c%02d" % (sign, q))
1742 format = format.replace("%2", "%02d" % r)
1743 format = format.replace("%2", "%02d" % r)
1743 d = t - tz
1744 d = t - tz
1744 if d > 0x7fffffff:
1745 if d > 0x7fffffff:
1745 d = 0x7fffffff
1746 d = 0x7fffffff
1746 elif d < -0x80000000:
1747 elif d < -0x80000000:
1747 d = -0x80000000
1748 d = -0x80000000
1748 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1749 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1749 # because they use the gmtime() system call which is buggy on Windows
1750 # because they use the gmtime() system call which is buggy on Windows
1750 # for negative values.
1751 # for negative values.
1751 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1752 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1752 s = t.strftime(format)
1753 s = t.strftime(format)
1753 return s
1754 return s
1754
1755
1755 def shortdate(date=None):
1756 def shortdate(date=None):
1756 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1757 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1757 return datestr(date, format='%Y-%m-%d')
1758 return datestr(date, format='%Y-%m-%d')
1758
1759
1759 def parsetimezone(s):
1760 def parsetimezone(s):
1760 """find a trailing timezone, if any, in string, and return a
1761 """find a trailing timezone, if any, in string, and return a
1761 (offset, remainder) pair"""
1762 (offset, remainder) pair"""
1762
1763
1763 if s.endswith("GMT") or s.endswith("UTC"):
1764 if s.endswith("GMT") or s.endswith("UTC"):
1764 return 0, s[:-3].rstrip()
1765 return 0, s[:-3].rstrip()
1765
1766
1766 # Unix-style timezones [+-]hhmm
1767 # Unix-style timezones [+-]hhmm
1767 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1768 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1768 sign = (s[-5] == "+") and 1 or -1
1769 sign = (s[-5] == "+") and 1 or -1
1769 hours = int(s[-4:-2])
1770 hours = int(s[-4:-2])
1770 minutes = int(s[-2:])
1771 minutes = int(s[-2:])
1771 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1772 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1772
1773
1773 # ISO8601 trailing Z
1774 # ISO8601 trailing Z
1774 if s.endswith("Z") and s[-2:-1].isdigit():
1775 if s.endswith("Z") and s[-2:-1].isdigit():
1775 return 0, s[:-1]
1776 return 0, s[:-1]
1776
1777
1777 # ISO8601-style [+-]hh:mm
1778 # ISO8601-style [+-]hh:mm
1778 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1779 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1779 s[-5:-3].isdigit() and s[-2:].isdigit()):
1780 s[-5:-3].isdigit() and s[-2:].isdigit()):
1780 sign = (s[-6] == "+") and 1 or -1
1781 sign = (s[-6] == "+") and 1 or -1
1781 hours = int(s[-5:-3])
1782 hours = int(s[-5:-3])
1782 minutes = int(s[-2:])
1783 minutes = int(s[-2:])
1783 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1784 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1784
1785
1785 return None, s
1786 return None, s
1786
1787
1787 def strdate(string, format, defaults=[]):
1788 def strdate(string, format, defaults=[]):
1788 """parse a localized time string and return a (unixtime, offset) tuple.
1789 """parse a localized time string and return a (unixtime, offset) tuple.
1789 if the string cannot be parsed, ValueError is raised."""
1790 if the string cannot be parsed, ValueError is raised."""
1790 # NOTE: unixtime = localunixtime + offset
1791 # NOTE: unixtime = localunixtime + offset
1791 offset, date = parsetimezone(string)
1792 offset, date = parsetimezone(string)
1792
1793
1793 # add missing elements from defaults
1794 # add missing elements from defaults
1794 usenow = False # default to using biased defaults
1795 usenow = False # default to using biased defaults
1795 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1796 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1796 found = [True for p in part if ("%"+p) in format]
1797 found = [True for p in part if ("%"+p) in format]
1797 if not found:
1798 if not found:
1798 date += "@" + defaults[part][usenow]
1799 date += "@" + defaults[part][usenow]
1799 format += "@%" + part[0]
1800 format += "@%" + part[0]
1800 else:
1801 else:
1801 # We've found a specific time element, less specific time
1802 # We've found a specific time element, less specific time
1802 # elements are relative to today
1803 # elements are relative to today
1803 usenow = True
1804 usenow = True
1804
1805
1805 timetuple = time.strptime(date, format)
1806 timetuple = time.strptime(date, format)
1806 localunixtime = int(calendar.timegm(timetuple))
1807 localunixtime = int(calendar.timegm(timetuple))
1807 if offset is None:
1808 if offset is None:
1808 # local timezone
1809 # local timezone
1809 unixtime = int(time.mktime(timetuple))
1810 unixtime = int(time.mktime(timetuple))
1810 offset = unixtime - localunixtime
1811 offset = unixtime - localunixtime
1811 else:
1812 else:
1812 unixtime = localunixtime + offset
1813 unixtime = localunixtime + offset
1813 return unixtime, offset
1814 return unixtime, offset
1814
1815
1815 def parsedate(date, formats=None, bias=None):
1816 def parsedate(date, formats=None, bias=None):
1816 """parse a localized date/time and return a (unixtime, offset) tuple.
1817 """parse a localized date/time and return a (unixtime, offset) tuple.
1817
1818
1818 The date may be a "unixtime offset" string or in one of the specified
1819 The date may be a "unixtime offset" string or in one of the specified
1819 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1820 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1820
1821
1821 >>> parsedate(' today ') == parsedate(\
1822 >>> parsedate(' today ') == parsedate(\
1822 datetime.date.today().strftime('%b %d'))
1823 datetime.date.today().strftime('%b %d'))
1823 True
1824 True
1824 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1825 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1825 datetime.timedelta(days=1)\
1826 datetime.timedelta(days=1)\
1826 ).strftime('%b %d'))
1827 ).strftime('%b %d'))
1827 True
1828 True
1828 >>> now, tz = makedate()
1829 >>> now, tz = makedate()
1829 >>> strnow, strtz = parsedate('now')
1830 >>> strnow, strtz = parsedate('now')
1830 >>> (strnow - now) < 1
1831 >>> (strnow - now) < 1
1831 True
1832 True
1832 >>> tz == strtz
1833 >>> tz == strtz
1833 True
1834 True
1834 """
1835 """
1835 if bias is None:
1836 if bias is None:
1836 bias = {}
1837 bias = {}
1837 if not date:
1838 if not date:
1838 return 0, 0
1839 return 0, 0
1839 if isinstance(date, tuple) and len(date) == 2:
1840 if isinstance(date, tuple) and len(date) == 2:
1840 return date
1841 return date
1841 if not formats:
1842 if not formats:
1842 formats = defaultdateformats
1843 formats = defaultdateformats
1843 date = date.strip()
1844 date = date.strip()
1844
1845
1845 if date == 'now' or date == _('now'):
1846 if date == 'now' or date == _('now'):
1846 return makedate()
1847 return makedate()
1847 if date == 'today' or date == _('today'):
1848 if date == 'today' or date == _('today'):
1848 date = datetime.date.today().strftime('%b %d')
1849 date = datetime.date.today().strftime('%b %d')
1849 elif date == 'yesterday' or date == _('yesterday'):
1850 elif date == 'yesterday' or date == _('yesterday'):
1850 date = (datetime.date.today() -
1851 date = (datetime.date.today() -
1851 datetime.timedelta(days=1)).strftime('%b %d')
1852 datetime.timedelta(days=1)).strftime('%b %d')
1852
1853
1853 try:
1854 try:
1854 when, offset = map(int, date.split(' '))
1855 when, offset = map(int, date.split(' '))
1855 except ValueError:
1856 except ValueError:
1856 # fill out defaults
1857 # fill out defaults
1857 now = makedate()
1858 now = makedate()
1858 defaults = {}
1859 defaults = {}
1859 for part in ("d", "mb", "yY", "HI", "M", "S"):
1860 for part in ("d", "mb", "yY", "HI", "M", "S"):
1860 # this piece is for rounding the specific end of unknowns
1861 # this piece is for rounding the specific end of unknowns
1861 b = bias.get(part)
1862 b = bias.get(part)
1862 if b is None:
1863 if b is None:
1863 if part[0] in "HMS":
1864 if part[0] in "HMS":
1864 b = "00"
1865 b = "00"
1865 else:
1866 else:
1866 b = "0"
1867 b = "0"
1867
1868
1868 # this piece is for matching the generic end to today's date
1869 # this piece is for matching the generic end to today's date
1869 n = datestr(now, "%" + part[0])
1870 n = datestr(now, "%" + part[0])
1870
1871
1871 defaults[part] = (b, n)
1872 defaults[part] = (b, n)
1872
1873
1873 for format in formats:
1874 for format in formats:
1874 try:
1875 try:
1875 when, offset = strdate(date, format, defaults)
1876 when, offset = strdate(date, format, defaults)
1876 except (ValueError, OverflowError):
1877 except (ValueError, OverflowError):
1877 pass
1878 pass
1878 else:
1879 else:
1879 break
1880 break
1880 else:
1881 else:
1881 raise Abort(_('invalid date: %r') % date)
1882 raise Abort(_('invalid date: %r') % date)
1882 # validate explicit (probably user-specified) date and
1883 # validate explicit (probably user-specified) date and
1883 # time zone offset. values must fit in signed 32 bits for
1884 # time zone offset. values must fit in signed 32 bits for
1884 # current 32-bit linux runtimes. timezones go from UTC-12
1885 # current 32-bit linux runtimes. timezones go from UTC-12
1885 # to UTC+14
1886 # to UTC+14
1886 if when < -0x80000000 or when > 0x7fffffff:
1887 if when < -0x80000000 or when > 0x7fffffff:
1887 raise Abort(_('date exceeds 32 bits: %d') % when)
1888 raise Abort(_('date exceeds 32 bits: %d') % when)
1888 if offset < -50400 or offset > 43200:
1889 if offset < -50400 or offset > 43200:
1889 raise Abort(_('impossible time zone offset: %d') % offset)
1890 raise Abort(_('impossible time zone offset: %d') % offset)
1890 return when, offset
1891 return when, offset
1891
1892
1892 def matchdate(date):
1893 def matchdate(date):
1893 """Return a function that matches a given date match specifier
1894 """Return a function that matches a given date match specifier
1894
1895
1895 Formats include:
1896 Formats include:
1896
1897
1897 '{date}' match a given date to the accuracy provided
1898 '{date}' match a given date to the accuracy provided
1898
1899
1899 '<{date}' on or before a given date
1900 '<{date}' on or before a given date
1900
1901
1901 '>{date}' on or after a given date
1902 '>{date}' on or after a given date
1902
1903
1903 >>> p1 = parsedate("10:29:59")
1904 >>> p1 = parsedate("10:29:59")
1904 >>> p2 = parsedate("10:30:00")
1905 >>> p2 = parsedate("10:30:00")
1905 >>> p3 = parsedate("10:30:59")
1906 >>> p3 = parsedate("10:30:59")
1906 >>> p4 = parsedate("10:31:00")
1907 >>> p4 = parsedate("10:31:00")
1907 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1908 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1908 >>> f = matchdate("10:30")
1909 >>> f = matchdate("10:30")
1909 >>> f(p1[0])
1910 >>> f(p1[0])
1910 False
1911 False
1911 >>> f(p2[0])
1912 >>> f(p2[0])
1912 True
1913 True
1913 >>> f(p3[0])
1914 >>> f(p3[0])
1914 True
1915 True
1915 >>> f(p4[0])
1916 >>> f(p4[0])
1916 False
1917 False
1917 >>> f(p5[0])
1918 >>> f(p5[0])
1918 False
1919 False
1919 """
1920 """
1920
1921
1921 def lower(date):
1922 def lower(date):
1922 d = {'mb': "1", 'd': "1"}
1923 d = {'mb': "1", 'd': "1"}
1923 return parsedate(date, extendeddateformats, d)[0]
1924 return parsedate(date, extendeddateformats, d)[0]
1924
1925
1925 def upper(date):
1926 def upper(date):
1926 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1927 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1927 for days in ("31", "30", "29"):
1928 for days in ("31", "30", "29"):
1928 try:
1929 try:
1929 d["d"] = days
1930 d["d"] = days
1930 return parsedate(date, extendeddateformats, d)[0]
1931 return parsedate(date, extendeddateformats, d)[0]
1931 except Abort:
1932 except Abort:
1932 pass
1933 pass
1933 d["d"] = "28"
1934 d["d"] = "28"
1934 return parsedate(date, extendeddateformats, d)[0]
1935 return parsedate(date, extendeddateformats, d)[0]
1935
1936
1936 date = date.strip()
1937 date = date.strip()
1937
1938
1938 if not date:
1939 if not date:
1939 raise Abort(_("dates cannot consist entirely of whitespace"))
1940 raise Abort(_("dates cannot consist entirely of whitespace"))
1940 elif date[0] == "<":
1941 elif date[0] == "<":
1941 if not date[1:]:
1942 if not date[1:]:
1942 raise Abort(_("invalid day spec, use '<DATE'"))
1943 raise Abort(_("invalid day spec, use '<DATE'"))
1943 when = upper(date[1:])
1944 when = upper(date[1:])
1944 return lambda x: x <= when
1945 return lambda x: x <= when
1945 elif date[0] == ">":
1946 elif date[0] == ">":
1946 if not date[1:]:
1947 if not date[1:]:
1947 raise Abort(_("invalid day spec, use '>DATE'"))
1948 raise Abort(_("invalid day spec, use '>DATE'"))
1948 when = lower(date[1:])
1949 when = lower(date[1:])
1949 return lambda x: x >= when
1950 return lambda x: x >= when
1950 elif date[0] == "-":
1951 elif date[0] == "-":
1951 try:
1952 try:
1952 days = int(date[1:])
1953 days = int(date[1:])
1953 except ValueError:
1954 except ValueError:
1954 raise Abort(_("invalid day spec: %s") % date[1:])
1955 raise Abort(_("invalid day spec: %s") % date[1:])
1955 if days < 0:
1956 if days < 0:
1956 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1957 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1957 % date[1:])
1958 % date[1:])
1958 when = makedate()[0] - days * 3600 * 24
1959 when = makedate()[0] - days * 3600 * 24
1959 return lambda x: x >= when
1960 return lambda x: x >= when
1960 elif " to " in date:
1961 elif " to " in date:
1961 a, b = date.split(" to ")
1962 a, b = date.split(" to ")
1962 start, stop = lower(a), upper(b)
1963 start, stop = lower(a), upper(b)
1963 return lambda x: x >= start and x <= stop
1964 return lambda x: x >= start and x <= stop
1964 else:
1965 else:
1965 start, stop = lower(date), upper(date)
1966 start, stop = lower(date), upper(date)
1966 return lambda x: x >= start and x <= stop
1967 return lambda x: x >= start and x <= stop
1967
1968
1968 def stringmatcher(pattern):
1969 def stringmatcher(pattern):
1969 """
1970 """
1970 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1971 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1971 returns the matcher name, pattern, and matcher function.
1972 returns the matcher name, pattern, and matcher function.
1972 missing or unknown prefixes are treated as literal matches.
1973 missing or unknown prefixes are treated as literal matches.
1973
1974
1974 helper for tests:
1975 helper for tests:
1975 >>> def test(pattern, *tests):
1976 >>> def test(pattern, *tests):
1976 ... kind, pattern, matcher = stringmatcher(pattern)
1977 ... kind, pattern, matcher = stringmatcher(pattern)
1977 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1978 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1978
1979
1979 exact matching (no prefix):
1980 exact matching (no prefix):
1980 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1981 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1981 ('literal', 'abcdefg', [False, False, True])
1982 ('literal', 'abcdefg', [False, False, True])
1982
1983
1983 regex matching ('re:' prefix)
1984 regex matching ('re:' prefix)
1984 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1985 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1985 ('re', 'a.+b', [False, False, True])
1986 ('re', 'a.+b', [False, False, True])
1986
1987
1987 force exact matches ('literal:' prefix)
1988 force exact matches ('literal:' prefix)
1988 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1989 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1989 ('literal', 're:foobar', [False, True])
1990 ('literal', 're:foobar', [False, True])
1990
1991
1991 unknown prefixes are ignored and treated as literals
1992 unknown prefixes are ignored and treated as literals
1992 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1993 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1993 ('literal', 'foo:bar', [False, False, True])
1994 ('literal', 'foo:bar', [False, False, True])
1994 """
1995 """
1995 if pattern.startswith('re:'):
1996 if pattern.startswith('re:'):
1996 pattern = pattern[3:]
1997 pattern = pattern[3:]
1997 try:
1998 try:
1998 regex = remod.compile(pattern)
1999 regex = remod.compile(pattern)
1999 except remod.error as e:
2000 except remod.error as e:
2000 raise error.ParseError(_('invalid regular expression: %s')
2001 raise error.ParseError(_('invalid regular expression: %s')
2001 % e)
2002 % e)
2002 return 're', pattern, regex.search
2003 return 're', pattern, regex.search
2003 elif pattern.startswith('literal:'):
2004 elif pattern.startswith('literal:'):
2004 pattern = pattern[8:]
2005 pattern = pattern[8:]
2005 return 'literal', pattern, pattern.__eq__
2006 return 'literal', pattern, pattern.__eq__
2006
2007
2007 def shortuser(user):
2008 def shortuser(user):
2008 """Return a short representation of a user name or email address."""
2009 """Return a short representation of a user name or email address."""
2009 f = user.find('@')
2010 f = user.find('@')
2010 if f >= 0:
2011 if f >= 0:
2011 user = user[:f]
2012 user = user[:f]
2012 f = user.find('<')
2013 f = user.find('<')
2013 if f >= 0:
2014 if f >= 0:
2014 user = user[f + 1:]
2015 user = user[f + 1:]
2015 f = user.find(' ')
2016 f = user.find(' ')
2016 if f >= 0:
2017 if f >= 0:
2017 user = user[:f]
2018 user = user[:f]
2018 f = user.find('.')
2019 f = user.find('.')
2019 if f >= 0:
2020 if f >= 0:
2020 user = user[:f]
2021 user = user[:f]
2021 return user
2022 return user
2022
2023
2023 def emailuser(user):
2024 def emailuser(user):
2024 """Return the user portion of an email address."""
2025 """Return the user portion of an email address."""
2025 f = user.find('@')
2026 f = user.find('@')
2026 if f >= 0:
2027 if f >= 0:
2027 user = user[:f]
2028 user = user[:f]
2028 f = user.find('<')
2029 f = user.find('<')
2029 if f >= 0:
2030 if f >= 0:
2030 user = user[f + 1:]
2031 user = user[f + 1:]
2031 return user
2032 return user
2032
2033
2033 def email(author):
2034 def email(author):
2034 '''get email of author.'''
2035 '''get email of author.'''
2035 r = author.find('>')
2036 r = author.find('>')
2036 if r == -1:
2037 if r == -1:
2037 r = None
2038 r = None
2038 return author[author.find('<') + 1:r]
2039 return author[author.find('<') + 1:r]
2039
2040
2040 def ellipsis(text, maxlength=400):
2041 def ellipsis(text, maxlength=400):
2041 """Trim string to at most maxlength (default: 400) columns in display."""
2042 """Trim string to at most maxlength (default: 400) columns in display."""
2042 return encoding.trim(text, maxlength, ellipsis='...')
2043 return encoding.trim(text, maxlength, ellipsis='...')
2043
2044
2044 def unitcountfn(*unittable):
2045 def unitcountfn(*unittable):
2045 '''return a function that renders a readable count of some quantity'''
2046 '''return a function that renders a readable count of some quantity'''
2046
2047
2047 def go(count):
2048 def go(count):
2048 for multiplier, divisor, format in unittable:
2049 for multiplier, divisor, format in unittable:
2049 if count >= divisor * multiplier:
2050 if count >= divisor * multiplier:
2050 return format % (count / float(divisor))
2051 return format % (count / float(divisor))
2051 return unittable[-1][2] % count
2052 return unittable[-1][2] % count
2052
2053
2053 return go
2054 return go
2054
2055
2055 bytecount = unitcountfn(
2056 bytecount = unitcountfn(
2056 (100, 1 << 30, _('%.0f GB')),
2057 (100, 1 << 30, _('%.0f GB')),
2057 (10, 1 << 30, _('%.1f GB')),
2058 (10, 1 << 30, _('%.1f GB')),
2058 (1, 1 << 30, _('%.2f GB')),
2059 (1, 1 << 30, _('%.2f GB')),
2059 (100, 1 << 20, _('%.0f MB')),
2060 (100, 1 << 20, _('%.0f MB')),
2060 (10, 1 << 20, _('%.1f MB')),
2061 (10, 1 << 20, _('%.1f MB')),
2061 (1, 1 << 20, _('%.2f MB')),
2062 (1, 1 << 20, _('%.2f MB')),
2062 (100, 1 << 10, _('%.0f KB')),
2063 (100, 1 << 10, _('%.0f KB')),
2063 (10, 1 << 10, _('%.1f KB')),
2064 (10, 1 << 10, _('%.1f KB')),
2064 (1, 1 << 10, _('%.2f KB')),
2065 (1, 1 << 10, _('%.2f KB')),
2065 (1, 1, _('%.0f bytes')),
2066 (1, 1, _('%.0f bytes')),
2066 )
2067 )
2067
2068
2068 def uirepr(s):
2069 def uirepr(s):
2069 # Avoid double backslash in Windows path repr()
2070 # Avoid double backslash in Windows path repr()
2070 return repr(s).replace('\\\\', '\\')
2071 return repr(s).replace('\\\\', '\\')
2071
2072
2072 # delay import of textwrap
2073 # delay import of textwrap
2073 def MBTextWrapper(**kwargs):
2074 def MBTextWrapper(**kwargs):
2074 class tw(textwrap.TextWrapper):
2075 class tw(textwrap.TextWrapper):
2075 """
2076 """
2076 Extend TextWrapper for width-awareness.
2077 Extend TextWrapper for width-awareness.
2077
2078
2078 Neither number of 'bytes' in any encoding nor 'characters' is
2079 Neither number of 'bytes' in any encoding nor 'characters' is
2079 appropriate to calculate terminal columns for specified string.
2080 appropriate to calculate terminal columns for specified string.
2080
2081
2081 Original TextWrapper implementation uses built-in 'len()' directly,
2082 Original TextWrapper implementation uses built-in 'len()' directly,
2082 so overriding is needed to use width information of each characters.
2083 so overriding is needed to use width information of each characters.
2083
2084
2084 In addition, characters classified into 'ambiguous' width are
2085 In addition, characters classified into 'ambiguous' width are
2085 treated as wide in East Asian area, but as narrow in other.
2086 treated as wide in East Asian area, but as narrow in other.
2086
2087
2087 This requires use decision to determine width of such characters.
2088 This requires use decision to determine width of such characters.
2088 """
2089 """
2089 def _cutdown(self, ucstr, space_left):
2090 def _cutdown(self, ucstr, space_left):
2090 l = 0
2091 l = 0
2091 colwidth = encoding.ucolwidth
2092 colwidth = encoding.ucolwidth
2092 for i in xrange(len(ucstr)):
2093 for i in xrange(len(ucstr)):
2093 l += colwidth(ucstr[i])
2094 l += colwidth(ucstr[i])
2094 if space_left < l:
2095 if space_left < l:
2095 return (ucstr[:i], ucstr[i:])
2096 return (ucstr[:i], ucstr[i:])
2096 return ucstr, ''
2097 return ucstr, ''
2097
2098
2098 # overriding of base class
2099 # overriding of base class
2099 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2100 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2100 space_left = max(width - cur_len, 1)
2101 space_left = max(width - cur_len, 1)
2101
2102
2102 if self.break_long_words:
2103 if self.break_long_words:
2103 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2104 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2104 cur_line.append(cut)
2105 cur_line.append(cut)
2105 reversed_chunks[-1] = res
2106 reversed_chunks[-1] = res
2106 elif not cur_line:
2107 elif not cur_line:
2107 cur_line.append(reversed_chunks.pop())
2108 cur_line.append(reversed_chunks.pop())
2108
2109
2109 # this overriding code is imported from TextWrapper of Python 2.6
2110 # this overriding code is imported from TextWrapper of Python 2.6
2110 # to calculate columns of string by 'encoding.ucolwidth()'
2111 # to calculate columns of string by 'encoding.ucolwidth()'
2111 def _wrap_chunks(self, chunks):
2112 def _wrap_chunks(self, chunks):
2112 colwidth = encoding.ucolwidth
2113 colwidth = encoding.ucolwidth
2113
2114
2114 lines = []
2115 lines = []
2115 if self.width <= 0:
2116 if self.width <= 0:
2116 raise ValueError("invalid width %r (must be > 0)" % self.width)
2117 raise ValueError("invalid width %r (must be > 0)" % self.width)
2117
2118
2118 # Arrange in reverse order so items can be efficiently popped
2119 # Arrange in reverse order so items can be efficiently popped
2119 # from a stack of chucks.
2120 # from a stack of chucks.
2120 chunks.reverse()
2121 chunks.reverse()
2121
2122
2122 while chunks:
2123 while chunks:
2123
2124
2124 # Start the list of chunks that will make up the current line.
2125 # Start the list of chunks that will make up the current line.
2125 # cur_len is just the length of all the chunks in cur_line.
2126 # cur_len is just the length of all the chunks in cur_line.
2126 cur_line = []
2127 cur_line = []
2127 cur_len = 0
2128 cur_len = 0
2128
2129
2129 # Figure out which static string will prefix this line.
2130 # Figure out which static string will prefix this line.
2130 if lines:
2131 if lines:
2131 indent = self.subsequent_indent
2132 indent = self.subsequent_indent
2132 else:
2133 else:
2133 indent = self.initial_indent
2134 indent = self.initial_indent
2134
2135
2135 # Maximum width for this line.
2136 # Maximum width for this line.
2136 width = self.width - len(indent)
2137 width = self.width - len(indent)
2137
2138
2138 # First chunk on line is whitespace -- drop it, unless this
2139 # First chunk on line is whitespace -- drop it, unless this
2139 # is the very beginning of the text (i.e. no lines started yet).
2140 # is the very beginning of the text (i.e. no lines started yet).
2140 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2141 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2141 del chunks[-1]
2142 del chunks[-1]
2142
2143
2143 while chunks:
2144 while chunks:
2144 l = colwidth(chunks[-1])
2145 l = colwidth(chunks[-1])
2145
2146
2146 # Can at least squeeze this chunk onto the current line.
2147 # Can at least squeeze this chunk onto the current line.
2147 if cur_len + l <= width:
2148 if cur_len + l <= width:
2148 cur_line.append(chunks.pop())
2149 cur_line.append(chunks.pop())
2149 cur_len += l
2150 cur_len += l
2150
2151
2151 # Nope, this line is full.
2152 # Nope, this line is full.
2152 else:
2153 else:
2153 break
2154 break
2154
2155
2155 # The current line is full, and the next chunk is too big to
2156 # The current line is full, and the next chunk is too big to
2156 # fit on *any* line (not just this one).
2157 # fit on *any* line (not just this one).
2157 if chunks and colwidth(chunks[-1]) > width:
2158 if chunks and colwidth(chunks[-1]) > width:
2158 self._handle_long_word(chunks, cur_line, cur_len, width)
2159 self._handle_long_word(chunks, cur_line, cur_len, width)
2159
2160
2160 # If the last chunk on this line is all whitespace, drop it.
2161 # If the last chunk on this line is all whitespace, drop it.
2161 if (self.drop_whitespace and
2162 if (self.drop_whitespace and
2162 cur_line and cur_line[-1].strip() == ''):
2163 cur_line and cur_line[-1].strip() == ''):
2163 del cur_line[-1]
2164 del cur_line[-1]
2164
2165
2165 # Convert current line back to a string and store it in list
2166 # Convert current line back to a string and store it in list
2166 # of all lines (return value).
2167 # of all lines (return value).
2167 if cur_line:
2168 if cur_line:
2168 lines.append(indent + ''.join(cur_line))
2169 lines.append(indent + ''.join(cur_line))
2169
2170
2170 return lines
2171 return lines
2171
2172
2172 global MBTextWrapper
2173 global MBTextWrapper
2173 MBTextWrapper = tw
2174 MBTextWrapper = tw
2174 return tw(**kwargs)
2175 return tw(**kwargs)
2175
2176
2176 def wrap(line, width, initindent='', hangindent=''):
2177 def wrap(line, width, initindent='', hangindent=''):
2177 maxindent = max(len(hangindent), len(initindent))
2178 maxindent = max(len(hangindent), len(initindent))
2178 if width <= maxindent:
2179 if width <= maxindent:
2179 # adjust for weird terminal size
2180 # adjust for weird terminal size
2180 width = max(78, maxindent + 1)
2181 width = max(78, maxindent + 1)
2181 line = line.decode(encoding.encoding, encoding.encodingmode)
2182 line = line.decode(encoding.encoding, encoding.encodingmode)
2182 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2183 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2183 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2184 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2184 wrapper = MBTextWrapper(width=width,
2185 wrapper = MBTextWrapper(width=width,
2185 initial_indent=initindent,
2186 initial_indent=initindent,
2186 subsequent_indent=hangindent)
2187 subsequent_indent=hangindent)
2187 return wrapper.fill(line).encode(encoding.encoding)
2188 return wrapper.fill(line).encode(encoding.encoding)
2188
2189
2189 def iterlines(iterator):
2190 def iterlines(iterator):
2190 for chunk in iterator:
2191 for chunk in iterator:
2191 for line in chunk.splitlines():
2192 for line in chunk.splitlines():
2192 yield line
2193 yield line
2193
2194
2194 def expandpath(path):
2195 def expandpath(path):
2195 return os.path.expanduser(os.path.expandvars(path))
2196 return os.path.expanduser(os.path.expandvars(path))
2196
2197
2197 def hgcmd():
2198 def hgcmd():
2198 """Return the command used to execute current hg
2199 """Return the command used to execute current hg
2199
2200
2200 This is different from hgexecutable() because on Windows we want
2201 This is different from hgexecutable() because on Windows we want
2201 to avoid things opening new shell windows like batch files, so we
2202 to avoid things opening new shell windows like batch files, so we
2202 get either the python call or current executable.
2203 get either the python call or current executable.
2203 """
2204 """
2204 if mainfrozen():
2205 if mainfrozen():
2205 if getattr(sys, 'frozen', None) == 'macosx_app':
2206 if getattr(sys, 'frozen', None) == 'macosx_app':
2206 # Env variable set by py2app
2207 # Env variable set by py2app
2207 return [os.environ['EXECUTABLEPATH']]
2208 return [os.environ['EXECUTABLEPATH']]
2208 else:
2209 else:
2209 return [sys.executable]
2210 return [sys.executable]
2210 return gethgcmd()
2211 return gethgcmd()
2211
2212
2212 def rundetached(args, condfn):
2213 def rundetached(args, condfn):
2213 """Execute the argument list in a detached process.
2214 """Execute the argument list in a detached process.
2214
2215
2215 condfn is a callable which is called repeatedly and should return
2216 condfn is a callable which is called repeatedly and should return
2216 True once the child process is known to have started successfully.
2217 True once the child process is known to have started successfully.
2217 At this point, the child process PID is returned. If the child
2218 At this point, the child process PID is returned. If the child
2218 process fails to start or finishes before condfn() evaluates to
2219 process fails to start or finishes before condfn() evaluates to
2219 True, return -1.
2220 True, return -1.
2220 """
2221 """
2221 # Windows case is easier because the child process is either
2222 # Windows case is easier because the child process is either
2222 # successfully starting and validating the condition or exiting
2223 # successfully starting and validating the condition or exiting
2223 # on failure. We just poll on its PID. On Unix, if the child
2224 # on failure. We just poll on its PID. On Unix, if the child
2224 # process fails to start, it will be left in a zombie state until
2225 # process fails to start, it will be left in a zombie state until
2225 # the parent wait on it, which we cannot do since we expect a long
2226 # the parent wait on it, which we cannot do since we expect a long
2226 # running process on success. Instead we listen for SIGCHLD telling
2227 # running process on success. Instead we listen for SIGCHLD telling
2227 # us our child process terminated.
2228 # us our child process terminated.
2228 terminated = set()
2229 terminated = set()
2229 def handler(signum, frame):
2230 def handler(signum, frame):
2230 terminated.add(os.wait())
2231 terminated.add(os.wait())
2231 prevhandler = None
2232 prevhandler = None
2232 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2233 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2233 if SIGCHLD is not None:
2234 if SIGCHLD is not None:
2234 prevhandler = signal.signal(SIGCHLD, handler)
2235 prevhandler = signal.signal(SIGCHLD, handler)
2235 try:
2236 try:
2236 pid = spawndetached(args)
2237 pid = spawndetached(args)
2237 while not condfn():
2238 while not condfn():
2238 if ((pid in terminated or not testpid(pid))
2239 if ((pid in terminated or not testpid(pid))
2239 and not condfn()):
2240 and not condfn()):
2240 return -1
2241 return -1
2241 time.sleep(0.1)
2242 time.sleep(0.1)
2242 return pid
2243 return pid
2243 finally:
2244 finally:
2244 if prevhandler is not None:
2245 if prevhandler is not None:
2245 signal.signal(signal.SIGCHLD, prevhandler)
2246 signal.signal(signal.SIGCHLD, prevhandler)
2246
2247
2247 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2248 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2248 """Return the result of interpolating items in the mapping into string s.
2249 """Return the result of interpolating items in the mapping into string s.
2249
2250
2250 prefix is a single character string, or a two character string with
2251 prefix is a single character string, or a two character string with
2251 a backslash as the first character if the prefix needs to be escaped in
2252 a backslash as the first character if the prefix needs to be escaped in
2252 a regular expression.
2253 a regular expression.
2253
2254
2254 fn is an optional function that will be applied to the replacement text
2255 fn is an optional function that will be applied to the replacement text
2255 just before replacement.
2256 just before replacement.
2256
2257
2257 escape_prefix is an optional flag that allows using doubled prefix for
2258 escape_prefix is an optional flag that allows using doubled prefix for
2258 its escaping.
2259 its escaping.
2259 """
2260 """
2260 fn = fn or (lambda s: s)
2261 fn = fn or (lambda s: s)
2261 patterns = '|'.join(mapping.keys())
2262 patterns = '|'.join(mapping.keys())
2262 if escape_prefix:
2263 if escape_prefix:
2263 patterns += '|' + prefix
2264 patterns += '|' + prefix
2264 if len(prefix) > 1:
2265 if len(prefix) > 1:
2265 prefix_char = prefix[1:]
2266 prefix_char = prefix[1:]
2266 else:
2267 else:
2267 prefix_char = prefix
2268 prefix_char = prefix
2268 mapping[prefix_char] = prefix_char
2269 mapping[prefix_char] = prefix_char
2269 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2270 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2270 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2271 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2271
2272
2272 def getport(port):
2273 def getport(port):
2273 """Return the port for a given network service.
2274 """Return the port for a given network service.
2274
2275
2275 If port is an integer, it's returned as is. If it's a string, it's
2276 If port is an integer, it's returned as is. If it's a string, it's
2276 looked up using socket.getservbyname(). If there's no matching
2277 looked up using socket.getservbyname(). If there's no matching
2277 service, error.Abort is raised.
2278 service, error.Abort is raised.
2278 """
2279 """
2279 try:
2280 try:
2280 return int(port)
2281 return int(port)
2281 except ValueError:
2282 except ValueError:
2282 pass
2283 pass
2283
2284
2284 try:
2285 try:
2285 return socket.getservbyname(port)
2286 return socket.getservbyname(port)
2286 except socket.error:
2287 except socket.error:
2287 raise Abort(_("no port number associated with service '%s'") % port)
2288 raise Abort(_("no port number associated with service '%s'") % port)
2288
2289
2289 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2290 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2290 '0': False, 'no': False, 'false': False, 'off': False,
2291 '0': False, 'no': False, 'false': False, 'off': False,
2291 'never': False}
2292 'never': False}
2292
2293
2293 def parsebool(s):
2294 def parsebool(s):
2294 """Parse s into a boolean.
2295 """Parse s into a boolean.
2295
2296
2296 If s is not a valid boolean, returns None.
2297 If s is not a valid boolean, returns None.
2297 """
2298 """
2298 return _booleans.get(s.lower(), None)
2299 return _booleans.get(s.lower(), None)
2299
2300
2300 _hexdig = '0123456789ABCDEFabcdef'
2301 _hextochr = dict((a + b, chr(int(a + b, 16)))
2301 _hextochr = dict((a + b, chr(int(a + b, 16)))
2302 for a in _hexdig for b in _hexdig)
2302 for a in string.hexdigits for b in string.hexdigits)
2303
2303
2304 def _urlunquote(s):
2304 def _urlunquote(s):
2305 """Decode HTTP/HTML % encoding.
2305 """Decode HTTP/HTML % encoding.
2306
2306
2307 >>> _urlunquote('abc%20def')
2307 >>> _urlunquote('abc%20def')
2308 'abc def'
2308 'abc def'
2309 """
2309 """
2310 res = s.split('%')
2310 res = s.split('%')
2311 # fastpath
2311 # fastpath
2312 if len(res) == 1:
2312 if len(res) == 1:
2313 return s
2313 return s
2314 s = res[0]
2314 s = res[0]
2315 for item in res[1:]:
2315 for item in res[1:]:
2316 try:
2316 try:
2317 s += _hextochr[item[:2]] + item[2:]
2317 s += _hextochr[item[:2]] + item[2:]
2318 except KeyError:
2318 except KeyError:
2319 s += '%' + item
2319 s += '%' + item
2320 except UnicodeDecodeError:
2320 except UnicodeDecodeError:
2321 s += unichr(int(item[:2], 16)) + item[2:]
2321 s += unichr(int(item[:2], 16)) + item[2:]
2322 return s
2322 return s
2323
2323
2324 class url(object):
2324 class url(object):
2325 r"""Reliable URL parser.
2325 r"""Reliable URL parser.
2326
2326
2327 This parses URLs and provides attributes for the following
2327 This parses URLs and provides attributes for the following
2328 components:
2328 components:
2329
2329
2330 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2330 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2331
2331
2332 Missing components are set to None. The only exception is
2332 Missing components are set to None. The only exception is
2333 fragment, which is set to '' if present but empty.
2333 fragment, which is set to '' if present but empty.
2334
2334
2335 If parsefragment is False, fragment is included in query. If
2335 If parsefragment is False, fragment is included in query. If
2336 parsequery is False, query is included in path. If both are
2336 parsequery is False, query is included in path. If both are
2337 False, both fragment and query are included in path.
2337 False, both fragment and query are included in path.
2338
2338
2339 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2339 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2340
2340
2341 Note that for backward compatibility reasons, bundle URLs do not
2341 Note that for backward compatibility reasons, bundle URLs do not
2342 take host names. That means 'bundle://../' has a path of '../'.
2342 take host names. That means 'bundle://../' has a path of '../'.
2343
2343
2344 Examples:
2344 Examples:
2345
2345
2346 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2346 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2347 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2347 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2348 >>> url('ssh://[::1]:2200//home/joe/repo')
2348 >>> url('ssh://[::1]:2200//home/joe/repo')
2349 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2349 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2350 >>> url('file:///home/joe/repo')
2350 >>> url('file:///home/joe/repo')
2351 <url scheme: 'file', path: '/home/joe/repo'>
2351 <url scheme: 'file', path: '/home/joe/repo'>
2352 >>> url('file:///c:/temp/foo/')
2352 >>> url('file:///c:/temp/foo/')
2353 <url scheme: 'file', path: 'c:/temp/foo/'>
2353 <url scheme: 'file', path: 'c:/temp/foo/'>
2354 >>> url('bundle:foo')
2354 >>> url('bundle:foo')
2355 <url scheme: 'bundle', path: 'foo'>
2355 <url scheme: 'bundle', path: 'foo'>
2356 >>> url('bundle://../foo')
2356 >>> url('bundle://../foo')
2357 <url scheme: 'bundle', path: '../foo'>
2357 <url scheme: 'bundle', path: '../foo'>
2358 >>> url(r'c:\foo\bar')
2358 >>> url(r'c:\foo\bar')
2359 <url path: 'c:\\foo\\bar'>
2359 <url path: 'c:\\foo\\bar'>
2360 >>> url(r'\\blah\blah\blah')
2360 >>> url(r'\\blah\blah\blah')
2361 <url path: '\\\\blah\\blah\\blah'>
2361 <url path: '\\\\blah\\blah\\blah'>
2362 >>> url(r'\\blah\blah\blah#baz')
2362 >>> url(r'\\blah\blah\blah#baz')
2363 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2363 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2364 >>> url(r'file:///C:\users\me')
2364 >>> url(r'file:///C:\users\me')
2365 <url scheme: 'file', path: 'C:\\users\\me'>
2365 <url scheme: 'file', path: 'C:\\users\\me'>
2366
2366
2367 Authentication credentials:
2367 Authentication credentials:
2368
2368
2369 >>> url('ssh://joe:xyz@x/repo')
2369 >>> url('ssh://joe:xyz@x/repo')
2370 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2370 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2371 >>> url('ssh://joe@x/repo')
2371 >>> url('ssh://joe@x/repo')
2372 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2372 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2373
2373
2374 Query strings and fragments:
2374 Query strings and fragments:
2375
2375
2376 >>> url('http://host/a?b#c')
2376 >>> url('http://host/a?b#c')
2377 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2377 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2378 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2378 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2379 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2379 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2380
2380
2381 Empty path:
2381 Empty path:
2382
2382
2383 >>> url('')
2383 >>> url('')
2384 <url path: ''>
2384 <url path: ''>
2385 >>> url('#a')
2385 >>> url('#a')
2386 <url path: '', fragment: 'a'>
2386 <url path: '', fragment: 'a'>
2387 >>> url('http://host/')
2387 >>> url('http://host/')
2388 <url scheme: 'http', host: 'host', path: ''>
2388 <url scheme: 'http', host: 'host', path: ''>
2389 >>> url('http://host/#a')
2389 >>> url('http://host/#a')
2390 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2390 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2391
2391
2392 Only scheme:
2392 Only scheme:
2393
2393
2394 >>> url('http:')
2394 >>> url('http:')
2395 <url scheme: 'http'>
2395 <url scheme: 'http'>
2396 """
2396 """
2397
2397
2398 _safechars = "!~*'()+"
2398 _safechars = "!~*'()+"
2399 _safepchars = "/!~*'()+:\\"
2399 _safepchars = "/!~*'()+:\\"
2400 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2400 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2401
2401
2402 def __init__(self, path, parsequery=True, parsefragment=True):
2402 def __init__(self, path, parsequery=True, parsefragment=True):
2403 # We slowly chomp away at path until we have only the path left
2403 # We slowly chomp away at path until we have only the path left
2404 self.scheme = self.user = self.passwd = self.host = None
2404 self.scheme = self.user = self.passwd = self.host = None
2405 self.port = self.path = self.query = self.fragment = None
2405 self.port = self.path = self.query = self.fragment = None
2406 self._localpath = True
2406 self._localpath = True
2407 self._hostport = ''
2407 self._hostport = ''
2408 self._origpath = path
2408 self._origpath = path
2409
2409
2410 if parsefragment and '#' in path:
2410 if parsefragment and '#' in path:
2411 path, self.fragment = path.split('#', 1)
2411 path, self.fragment = path.split('#', 1)
2412
2412
2413 # special case for Windows drive letters and UNC paths
2413 # special case for Windows drive letters and UNC paths
2414 if hasdriveletter(path) or path.startswith(r'\\'):
2414 if hasdriveletter(path) or path.startswith(r'\\'):
2415 self.path = path
2415 self.path = path
2416 return
2416 return
2417
2417
2418 # For compatibility reasons, we can't handle bundle paths as
2418 # For compatibility reasons, we can't handle bundle paths as
2419 # normal URLS
2419 # normal URLS
2420 if path.startswith('bundle:'):
2420 if path.startswith('bundle:'):
2421 self.scheme = 'bundle'
2421 self.scheme = 'bundle'
2422 path = path[7:]
2422 path = path[7:]
2423 if path.startswith('//'):
2423 if path.startswith('//'):
2424 path = path[2:]
2424 path = path[2:]
2425 self.path = path
2425 self.path = path
2426 return
2426 return
2427
2427
2428 if self._matchscheme(path):
2428 if self._matchscheme(path):
2429 parts = path.split(':', 1)
2429 parts = path.split(':', 1)
2430 if parts[0]:
2430 if parts[0]:
2431 self.scheme, path = parts
2431 self.scheme, path = parts
2432 self._localpath = False
2432 self._localpath = False
2433
2433
2434 if not path:
2434 if not path:
2435 path = None
2435 path = None
2436 if self._localpath:
2436 if self._localpath:
2437 self.path = ''
2437 self.path = ''
2438 return
2438 return
2439 else:
2439 else:
2440 if self._localpath:
2440 if self._localpath:
2441 self.path = path
2441 self.path = path
2442 return
2442 return
2443
2443
2444 if parsequery and '?' in path:
2444 if parsequery and '?' in path:
2445 path, self.query = path.split('?', 1)
2445 path, self.query = path.split('?', 1)
2446 if not path:
2446 if not path:
2447 path = None
2447 path = None
2448 if not self.query:
2448 if not self.query:
2449 self.query = None
2449 self.query = None
2450
2450
2451 # // is required to specify a host/authority
2451 # // is required to specify a host/authority
2452 if path and path.startswith('//'):
2452 if path and path.startswith('//'):
2453 parts = path[2:].split('/', 1)
2453 parts = path[2:].split('/', 1)
2454 if len(parts) > 1:
2454 if len(parts) > 1:
2455 self.host, path = parts
2455 self.host, path = parts
2456 else:
2456 else:
2457 self.host = parts[0]
2457 self.host = parts[0]
2458 path = None
2458 path = None
2459 if not self.host:
2459 if not self.host:
2460 self.host = None
2460 self.host = None
2461 # path of file:///d is /d
2461 # path of file:///d is /d
2462 # path of file:///d:/ is d:/, not /d:/
2462 # path of file:///d:/ is d:/, not /d:/
2463 if path and not hasdriveletter(path):
2463 if path and not hasdriveletter(path):
2464 path = '/' + path
2464 path = '/' + path
2465
2465
2466 if self.host and '@' in self.host:
2466 if self.host and '@' in self.host:
2467 self.user, self.host = self.host.rsplit('@', 1)
2467 self.user, self.host = self.host.rsplit('@', 1)
2468 if ':' in self.user:
2468 if ':' in self.user:
2469 self.user, self.passwd = self.user.split(':', 1)
2469 self.user, self.passwd = self.user.split(':', 1)
2470 if not self.host:
2470 if not self.host:
2471 self.host = None
2471 self.host = None
2472
2472
2473 # Don't split on colons in IPv6 addresses without ports
2473 # Don't split on colons in IPv6 addresses without ports
2474 if (self.host and ':' in self.host and
2474 if (self.host and ':' in self.host and
2475 not (self.host.startswith('[') and self.host.endswith(']'))):
2475 not (self.host.startswith('[') and self.host.endswith(']'))):
2476 self._hostport = self.host
2476 self._hostport = self.host
2477 self.host, self.port = self.host.rsplit(':', 1)
2477 self.host, self.port = self.host.rsplit(':', 1)
2478 if not self.host:
2478 if not self.host:
2479 self.host = None
2479 self.host = None
2480
2480
2481 if (self.host and self.scheme == 'file' and
2481 if (self.host and self.scheme == 'file' and
2482 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2482 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2483 raise Abort(_('file:// URLs can only refer to localhost'))
2483 raise Abort(_('file:// URLs can only refer to localhost'))
2484
2484
2485 self.path = path
2485 self.path = path
2486
2486
2487 # leave the query string escaped
2487 # leave the query string escaped
2488 for a in ('user', 'passwd', 'host', 'port',
2488 for a in ('user', 'passwd', 'host', 'port',
2489 'path', 'fragment'):
2489 'path', 'fragment'):
2490 v = getattr(self, a)
2490 v = getattr(self, a)
2491 if v is not None:
2491 if v is not None:
2492 setattr(self, a, _urlunquote(v))
2492 setattr(self, a, _urlunquote(v))
2493
2493
2494 def __repr__(self):
2494 def __repr__(self):
2495 attrs = []
2495 attrs = []
2496 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2496 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2497 'query', 'fragment'):
2497 'query', 'fragment'):
2498 v = getattr(self, a)
2498 v = getattr(self, a)
2499 if v is not None:
2499 if v is not None:
2500 attrs.append('%s: %r' % (a, v))
2500 attrs.append('%s: %r' % (a, v))
2501 return '<url %s>' % ', '.join(attrs)
2501 return '<url %s>' % ', '.join(attrs)
2502
2502
2503 def __str__(self):
2503 def __str__(self):
2504 r"""Join the URL's components back into a URL string.
2504 r"""Join the URL's components back into a URL string.
2505
2505
2506 Examples:
2506 Examples:
2507
2507
2508 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2508 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2509 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2509 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2510 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2510 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2511 'http://user:pw@host:80/?foo=bar&baz=42'
2511 'http://user:pw@host:80/?foo=bar&baz=42'
2512 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2512 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2513 'http://user:pw@host:80/?foo=bar%3dbaz'
2513 'http://user:pw@host:80/?foo=bar%3dbaz'
2514 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2514 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2515 'ssh://user:pw@[::1]:2200//home/joe#'
2515 'ssh://user:pw@[::1]:2200//home/joe#'
2516 >>> str(url('http://localhost:80//'))
2516 >>> str(url('http://localhost:80//'))
2517 'http://localhost:80//'
2517 'http://localhost:80//'
2518 >>> str(url('http://localhost:80/'))
2518 >>> str(url('http://localhost:80/'))
2519 'http://localhost:80/'
2519 'http://localhost:80/'
2520 >>> str(url('http://localhost:80'))
2520 >>> str(url('http://localhost:80'))
2521 'http://localhost:80/'
2521 'http://localhost:80/'
2522 >>> str(url('bundle:foo'))
2522 >>> str(url('bundle:foo'))
2523 'bundle:foo'
2523 'bundle:foo'
2524 >>> str(url('bundle://../foo'))
2524 >>> str(url('bundle://../foo'))
2525 'bundle:../foo'
2525 'bundle:../foo'
2526 >>> str(url('path'))
2526 >>> str(url('path'))
2527 'path'
2527 'path'
2528 >>> str(url('file:///tmp/foo/bar'))
2528 >>> str(url('file:///tmp/foo/bar'))
2529 'file:///tmp/foo/bar'
2529 'file:///tmp/foo/bar'
2530 >>> str(url('file:///c:/tmp/foo/bar'))
2530 >>> str(url('file:///c:/tmp/foo/bar'))
2531 'file:///c:/tmp/foo/bar'
2531 'file:///c:/tmp/foo/bar'
2532 >>> print url(r'bundle:foo\bar')
2532 >>> print url(r'bundle:foo\bar')
2533 bundle:foo\bar
2533 bundle:foo\bar
2534 >>> print url(r'file:///D:\data\hg')
2534 >>> print url(r'file:///D:\data\hg')
2535 file:///D:\data\hg
2535 file:///D:\data\hg
2536 """
2536 """
2537 if self._localpath:
2537 if self._localpath:
2538 s = self.path
2538 s = self.path
2539 if self.scheme == 'bundle':
2539 if self.scheme == 'bundle':
2540 s = 'bundle:' + s
2540 s = 'bundle:' + s
2541 if self.fragment:
2541 if self.fragment:
2542 s += '#' + self.fragment
2542 s += '#' + self.fragment
2543 return s
2543 return s
2544
2544
2545 s = self.scheme + ':'
2545 s = self.scheme + ':'
2546 if self.user or self.passwd or self.host:
2546 if self.user or self.passwd or self.host:
2547 s += '//'
2547 s += '//'
2548 elif self.scheme and (not self.path or self.path.startswith('/')
2548 elif self.scheme and (not self.path or self.path.startswith('/')
2549 or hasdriveletter(self.path)):
2549 or hasdriveletter(self.path)):
2550 s += '//'
2550 s += '//'
2551 if hasdriveletter(self.path):
2551 if hasdriveletter(self.path):
2552 s += '/'
2552 s += '/'
2553 if self.user:
2553 if self.user:
2554 s += urlreq.quote(self.user, safe=self._safechars)
2554 s += urlreq.quote(self.user, safe=self._safechars)
2555 if self.passwd:
2555 if self.passwd:
2556 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2556 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2557 if self.user or self.passwd:
2557 if self.user or self.passwd:
2558 s += '@'
2558 s += '@'
2559 if self.host:
2559 if self.host:
2560 if not (self.host.startswith('[') and self.host.endswith(']')):
2560 if not (self.host.startswith('[') and self.host.endswith(']')):
2561 s += urlreq.quote(self.host)
2561 s += urlreq.quote(self.host)
2562 else:
2562 else:
2563 s += self.host
2563 s += self.host
2564 if self.port:
2564 if self.port:
2565 s += ':' + urlreq.quote(self.port)
2565 s += ':' + urlreq.quote(self.port)
2566 if self.host:
2566 if self.host:
2567 s += '/'
2567 s += '/'
2568 if self.path:
2568 if self.path:
2569 # TODO: similar to the query string, we should not unescape the
2569 # TODO: similar to the query string, we should not unescape the
2570 # path when we store it, the path might contain '%2f' = '/',
2570 # path when we store it, the path might contain '%2f' = '/',
2571 # which we should *not* escape.
2571 # which we should *not* escape.
2572 s += urlreq.quote(self.path, safe=self._safepchars)
2572 s += urlreq.quote(self.path, safe=self._safepchars)
2573 if self.query:
2573 if self.query:
2574 # we store the query in escaped form.
2574 # we store the query in escaped form.
2575 s += '?' + self.query
2575 s += '?' + self.query
2576 if self.fragment is not None:
2576 if self.fragment is not None:
2577 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2577 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2578 return s
2578 return s
2579
2579
2580 def authinfo(self):
2580 def authinfo(self):
2581 user, passwd = self.user, self.passwd
2581 user, passwd = self.user, self.passwd
2582 try:
2582 try:
2583 self.user, self.passwd = None, None
2583 self.user, self.passwd = None, None
2584 s = str(self)
2584 s = str(self)
2585 finally:
2585 finally:
2586 self.user, self.passwd = user, passwd
2586 self.user, self.passwd = user, passwd
2587 if not self.user:
2587 if not self.user:
2588 return (s, None)
2588 return (s, None)
2589 # authinfo[1] is passed to urllib2 password manager, and its
2589 # authinfo[1] is passed to urllib2 password manager, and its
2590 # URIs must not contain credentials. The host is passed in the
2590 # URIs must not contain credentials. The host is passed in the
2591 # URIs list because Python < 2.4.3 uses only that to search for
2591 # URIs list because Python < 2.4.3 uses only that to search for
2592 # a password.
2592 # a password.
2593 return (s, (None, (s, self.host),
2593 return (s, (None, (s, self.host),
2594 self.user, self.passwd or ''))
2594 self.user, self.passwd or ''))
2595
2595
2596 def isabs(self):
2596 def isabs(self):
2597 if self.scheme and self.scheme != 'file':
2597 if self.scheme and self.scheme != 'file':
2598 return True # remote URL
2598 return True # remote URL
2599 if hasdriveletter(self.path):
2599 if hasdriveletter(self.path):
2600 return True # absolute for our purposes - can't be joined()
2600 return True # absolute for our purposes - can't be joined()
2601 if self.path.startswith(r'\\'):
2601 if self.path.startswith(r'\\'):
2602 return True # Windows UNC path
2602 return True # Windows UNC path
2603 if self.path.startswith('/'):
2603 if self.path.startswith('/'):
2604 return True # POSIX-style
2604 return True # POSIX-style
2605 return False
2605 return False
2606
2606
2607 def localpath(self):
2607 def localpath(self):
2608 if self.scheme == 'file' or self.scheme == 'bundle':
2608 if self.scheme == 'file' or self.scheme == 'bundle':
2609 path = self.path or '/'
2609 path = self.path or '/'
2610 # For Windows, we need to promote hosts containing drive
2610 # For Windows, we need to promote hosts containing drive
2611 # letters to paths with drive letters.
2611 # letters to paths with drive letters.
2612 if hasdriveletter(self._hostport):
2612 if hasdriveletter(self._hostport):
2613 path = self._hostport + '/' + self.path
2613 path = self._hostport + '/' + self.path
2614 elif (self.host is not None and self.path
2614 elif (self.host is not None and self.path
2615 and not hasdriveletter(path)):
2615 and not hasdriveletter(path)):
2616 path = '/' + path
2616 path = '/' + path
2617 return path
2617 return path
2618 return self._origpath
2618 return self._origpath
2619
2619
2620 def islocal(self):
2620 def islocal(self):
2621 '''whether localpath will return something that posixfile can open'''
2621 '''whether localpath will return something that posixfile can open'''
2622 return (not self.scheme or self.scheme == 'file'
2622 return (not self.scheme or self.scheme == 'file'
2623 or self.scheme == 'bundle')
2623 or self.scheme == 'bundle')
2624
2624
2625 def hasscheme(path):
2625 def hasscheme(path):
2626 return bool(url(path).scheme)
2626 return bool(url(path).scheme)
2627
2627
2628 def hasdriveletter(path):
2628 def hasdriveletter(path):
2629 return path and path[1:2] == ':' and path[0:1].isalpha()
2629 return path and path[1:2] == ':' and path[0:1].isalpha()
2630
2630
2631 def urllocalpath(path):
2631 def urllocalpath(path):
2632 return url(path, parsequery=False, parsefragment=False).localpath()
2632 return url(path, parsequery=False, parsefragment=False).localpath()
2633
2633
2634 def hidepassword(u):
2634 def hidepassword(u):
2635 '''hide user credential in a url string'''
2635 '''hide user credential in a url string'''
2636 u = url(u)
2636 u = url(u)
2637 if u.passwd:
2637 if u.passwd:
2638 u.passwd = '***'
2638 u.passwd = '***'
2639 return str(u)
2639 return str(u)
2640
2640
2641 def removeauth(u):
2641 def removeauth(u):
2642 '''remove all authentication information from a url string'''
2642 '''remove all authentication information from a url string'''
2643 u = url(u)
2643 u = url(u)
2644 u.user = u.passwd = None
2644 u.user = u.passwd = None
2645 return str(u)
2645 return str(u)
2646
2646
2647 def isatty(fp):
2647 def isatty(fp):
2648 try:
2648 try:
2649 return fp.isatty()
2649 return fp.isatty()
2650 except AttributeError:
2650 except AttributeError:
2651 return False
2651 return False
2652
2652
2653 timecount = unitcountfn(
2653 timecount = unitcountfn(
2654 (1, 1e3, _('%.0f s')),
2654 (1, 1e3, _('%.0f s')),
2655 (100, 1, _('%.1f s')),
2655 (100, 1, _('%.1f s')),
2656 (10, 1, _('%.2f s')),
2656 (10, 1, _('%.2f s')),
2657 (1, 1, _('%.3f s')),
2657 (1, 1, _('%.3f s')),
2658 (100, 0.001, _('%.1f ms')),
2658 (100, 0.001, _('%.1f ms')),
2659 (10, 0.001, _('%.2f ms')),
2659 (10, 0.001, _('%.2f ms')),
2660 (1, 0.001, _('%.3f ms')),
2660 (1, 0.001, _('%.3f ms')),
2661 (100, 0.000001, _('%.1f us')),
2661 (100, 0.000001, _('%.1f us')),
2662 (10, 0.000001, _('%.2f us')),
2662 (10, 0.000001, _('%.2f us')),
2663 (1, 0.000001, _('%.3f us')),
2663 (1, 0.000001, _('%.3f us')),
2664 (100, 0.000000001, _('%.1f ns')),
2664 (100, 0.000000001, _('%.1f ns')),
2665 (10, 0.000000001, _('%.2f ns')),
2665 (10, 0.000000001, _('%.2f ns')),
2666 (1, 0.000000001, _('%.3f ns')),
2666 (1, 0.000000001, _('%.3f ns')),
2667 )
2667 )
2668
2668
2669 _timenesting = [0]
2669 _timenesting = [0]
2670
2670
2671 def timed(func):
2671 def timed(func):
2672 '''Report the execution time of a function call to stderr.
2672 '''Report the execution time of a function call to stderr.
2673
2673
2674 During development, use as a decorator when you need to measure
2674 During development, use as a decorator when you need to measure
2675 the cost of a function, e.g. as follows:
2675 the cost of a function, e.g. as follows:
2676
2676
2677 @util.timed
2677 @util.timed
2678 def foo(a, b, c):
2678 def foo(a, b, c):
2679 pass
2679 pass
2680 '''
2680 '''
2681
2681
2682 def wrapper(*args, **kwargs):
2682 def wrapper(*args, **kwargs):
2683 start = time.time()
2683 start = time.time()
2684 indent = 2
2684 indent = 2
2685 _timenesting[0] += indent
2685 _timenesting[0] += indent
2686 try:
2686 try:
2687 return func(*args, **kwargs)
2687 return func(*args, **kwargs)
2688 finally:
2688 finally:
2689 elapsed = time.time() - start
2689 elapsed = time.time() - start
2690 _timenesting[0] -= indent
2690 _timenesting[0] -= indent
2691 sys.stderr.write('%s%s: %s\n' %
2691 sys.stderr.write('%s%s: %s\n' %
2692 (' ' * _timenesting[0], func.__name__,
2692 (' ' * _timenesting[0], func.__name__,
2693 timecount(elapsed)))
2693 timecount(elapsed)))
2694 return wrapper
2694 return wrapper
2695
2695
2696 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2696 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2697 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2697 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2698
2698
2699 def sizetoint(s):
2699 def sizetoint(s):
2700 '''Convert a space specifier to a byte count.
2700 '''Convert a space specifier to a byte count.
2701
2701
2702 >>> sizetoint('30')
2702 >>> sizetoint('30')
2703 30
2703 30
2704 >>> sizetoint('2.2kb')
2704 >>> sizetoint('2.2kb')
2705 2252
2705 2252
2706 >>> sizetoint('6M')
2706 >>> sizetoint('6M')
2707 6291456
2707 6291456
2708 '''
2708 '''
2709 t = s.strip().lower()
2709 t = s.strip().lower()
2710 try:
2710 try:
2711 for k, u in _sizeunits:
2711 for k, u in _sizeunits:
2712 if t.endswith(k):
2712 if t.endswith(k):
2713 return int(float(t[:-len(k)]) * u)
2713 return int(float(t[:-len(k)]) * u)
2714 return int(t)
2714 return int(t)
2715 except ValueError:
2715 except ValueError:
2716 raise error.ParseError(_("couldn't parse size: %s") % s)
2716 raise error.ParseError(_("couldn't parse size: %s") % s)
2717
2717
2718 class hooks(object):
2718 class hooks(object):
2719 '''A collection of hook functions that can be used to extend a
2719 '''A collection of hook functions that can be used to extend a
2720 function's behavior. Hooks are called in lexicographic order,
2720 function's behavior. Hooks are called in lexicographic order,
2721 based on the names of their sources.'''
2721 based on the names of their sources.'''
2722
2722
2723 def __init__(self):
2723 def __init__(self):
2724 self._hooks = []
2724 self._hooks = []
2725
2725
2726 def add(self, source, hook):
2726 def add(self, source, hook):
2727 self._hooks.append((source, hook))
2727 self._hooks.append((source, hook))
2728
2728
2729 def __call__(self, *args):
2729 def __call__(self, *args):
2730 self._hooks.sort(key=lambda x: x[0])
2730 self._hooks.sort(key=lambda x: x[0])
2731 results = []
2731 results = []
2732 for source, hook in self._hooks:
2732 for source, hook in self._hooks:
2733 results.append(hook(*args))
2733 results.append(hook(*args))
2734 return results
2734 return results
2735
2735
2736 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2736 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2737 '''Yields lines for a nicely formatted stacktrace.
2737 '''Yields lines for a nicely formatted stacktrace.
2738 Skips the 'skip' last entries.
2738 Skips the 'skip' last entries.
2739 Each file+linenumber is formatted according to fileline.
2739 Each file+linenumber is formatted according to fileline.
2740 Each line is formatted according to line.
2740 Each line is formatted according to line.
2741 If line is None, it yields:
2741 If line is None, it yields:
2742 length of longest filepath+line number,
2742 length of longest filepath+line number,
2743 filepath+linenumber,
2743 filepath+linenumber,
2744 function
2744 function
2745
2745
2746 Not be used in production code but very convenient while developing.
2746 Not be used in production code but very convenient while developing.
2747 '''
2747 '''
2748 entries = [(fileline % (fn, ln), func)
2748 entries = [(fileline % (fn, ln), func)
2749 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2749 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2750 if entries:
2750 if entries:
2751 fnmax = max(len(entry[0]) for entry in entries)
2751 fnmax = max(len(entry[0]) for entry in entries)
2752 for fnln, func in entries:
2752 for fnln, func in entries:
2753 if line is None:
2753 if line is None:
2754 yield (fnmax, fnln, func)
2754 yield (fnmax, fnln, func)
2755 else:
2755 else:
2756 yield line % (fnmax, fnln, func)
2756 yield line % (fnmax, fnln, func)
2757
2757
2758 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2758 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2759 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2759 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2760 Skips the 'skip' last entries. By default it will flush stdout first.
2760 Skips the 'skip' last entries. By default it will flush stdout first.
2761 It can be used everywhere and intentionally does not require an ui object.
2761 It can be used everywhere and intentionally does not require an ui object.
2762 Not be used in production code but very convenient while developing.
2762 Not be used in production code but very convenient while developing.
2763 '''
2763 '''
2764 if otherf:
2764 if otherf:
2765 otherf.flush()
2765 otherf.flush()
2766 f.write('%s at:\n' % msg)
2766 f.write('%s at:\n' % msg)
2767 for line in getstackframes(skip + 1):
2767 for line in getstackframes(skip + 1):
2768 f.write(line)
2768 f.write(line)
2769 f.flush()
2769 f.flush()
2770
2770
2771 class dirs(object):
2771 class dirs(object):
2772 '''a multiset of directory names from a dirstate or manifest'''
2772 '''a multiset of directory names from a dirstate or manifest'''
2773
2773
2774 def __init__(self, map, skip=None):
2774 def __init__(self, map, skip=None):
2775 self._dirs = {}
2775 self._dirs = {}
2776 addpath = self.addpath
2776 addpath = self.addpath
2777 if safehasattr(map, 'iteritems') and skip is not None:
2777 if safehasattr(map, 'iteritems') and skip is not None:
2778 for f, s in map.iteritems():
2778 for f, s in map.iteritems():
2779 if s[0] != skip:
2779 if s[0] != skip:
2780 addpath(f)
2780 addpath(f)
2781 else:
2781 else:
2782 for f in map:
2782 for f in map:
2783 addpath(f)
2783 addpath(f)
2784
2784
2785 def addpath(self, path):
2785 def addpath(self, path):
2786 dirs = self._dirs
2786 dirs = self._dirs
2787 for base in finddirs(path):
2787 for base in finddirs(path):
2788 if base in dirs:
2788 if base in dirs:
2789 dirs[base] += 1
2789 dirs[base] += 1
2790 return
2790 return
2791 dirs[base] = 1
2791 dirs[base] = 1
2792
2792
2793 def delpath(self, path):
2793 def delpath(self, path):
2794 dirs = self._dirs
2794 dirs = self._dirs
2795 for base in finddirs(path):
2795 for base in finddirs(path):
2796 if dirs[base] > 1:
2796 if dirs[base] > 1:
2797 dirs[base] -= 1
2797 dirs[base] -= 1
2798 return
2798 return
2799 del dirs[base]
2799 del dirs[base]
2800
2800
2801 def __iter__(self):
2801 def __iter__(self):
2802 return self._dirs.iterkeys()
2802 return self._dirs.iterkeys()
2803
2803
2804 def __contains__(self, d):
2804 def __contains__(self, d):
2805 return d in self._dirs
2805 return d in self._dirs
2806
2806
2807 if safehasattr(parsers, 'dirs'):
2807 if safehasattr(parsers, 'dirs'):
2808 dirs = parsers.dirs
2808 dirs = parsers.dirs
2809
2809
2810 def finddirs(path):
2810 def finddirs(path):
2811 pos = path.rfind('/')
2811 pos = path.rfind('/')
2812 while pos != -1:
2812 while pos != -1:
2813 yield path[:pos]
2813 yield path[:pos]
2814 pos = path.rfind('/', 0, pos)
2814 pos = path.rfind('/', 0, pos)
2815
2815
2816 # compression utility
2816 # compression utility
2817
2817
2818 class nocompress(object):
2818 class nocompress(object):
2819 def compress(self, x):
2819 def compress(self, x):
2820 return x
2820 return x
2821 def flush(self):
2821 def flush(self):
2822 return ""
2822 return ""
2823
2823
2824 compressors = {
2824 compressors = {
2825 None: nocompress,
2825 None: nocompress,
2826 # lambda to prevent early import
2826 # lambda to prevent early import
2827 'BZ': lambda: bz2.BZ2Compressor(),
2827 'BZ': lambda: bz2.BZ2Compressor(),
2828 'GZ': lambda: zlib.compressobj(),
2828 'GZ': lambda: zlib.compressobj(),
2829 }
2829 }
2830 # also support the old form by courtesies
2830 # also support the old form by courtesies
2831 compressors['UN'] = compressors[None]
2831 compressors['UN'] = compressors[None]
2832
2832
2833 def _makedecompressor(decompcls):
2833 def _makedecompressor(decompcls):
2834 def generator(f):
2834 def generator(f):
2835 d = decompcls()
2835 d = decompcls()
2836 for chunk in filechunkiter(f):
2836 for chunk in filechunkiter(f):
2837 yield d.decompress(chunk)
2837 yield d.decompress(chunk)
2838 def func(fh):
2838 def func(fh):
2839 return chunkbuffer(generator(fh))
2839 return chunkbuffer(generator(fh))
2840 return func
2840 return func
2841
2841
2842 class ctxmanager(object):
2842 class ctxmanager(object):
2843 '''A context manager for use in 'with' blocks to allow multiple
2843 '''A context manager for use in 'with' blocks to allow multiple
2844 contexts to be entered at once. This is both safer and more
2844 contexts to be entered at once. This is both safer and more
2845 flexible than contextlib.nested.
2845 flexible than contextlib.nested.
2846
2846
2847 Once Mercurial supports Python 2.7+, this will become mostly
2847 Once Mercurial supports Python 2.7+, this will become mostly
2848 unnecessary.
2848 unnecessary.
2849 '''
2849 '''
2850
2850
2851 def __init__(self, *args):
2851 def __init__(self, *args):
2852 '''Accepts a list of no-argument functions that return context
2852 '''Accepts a list of no-argument functions that return context
2853 managers. These will be invoked at __call__ time.'''
2853 managers. These will be invoked at __call__ time.'''
2854 self._pending = args
2854 self._pending = args
2855 self._atexit = []
2855 self._atexit = []
2856
2856
2857 def __enter__(self):
2857 def __enter__(self):
2858 return self
2858 return self
2859
2859
2860 def enter(self):
2860 def enter(self):
2861 '''Create and enter context managers in the order in which they were
2861 '''Create and enter context managers in the order in which they were
2862 passed to the constructor.'''
2862 passed to the constructor.'''
2863 values = []
2863 values = []
2864 for func in self._pending:
2864 for func in self._pending:
2865 obj = func()
2865 obj = func()
2866 values.append(obj.__enter__())
2866 values.append(obj.__enter__())
2867 self._atexit.append(obj.__exit__)
2867 self._atexit.append(obj.__exit__)
2868 del self._pending
2868 del self._pending
2869 return values
2869 return values
2870
2870
2871 def atexit(self, func, *args, **kwargs):
2871 def atexit(self, func, *args, **kwargs):
2872 '''Add a function to call when this context manager exits. The
2872 '''Add a function to call when this context manager exits. The
2873 ordering of multiple atexit calls is unspecified, save that
2873 ordering of multiple atexit calls is unspecified, save that
2874 they will happen before any __exit__ functions.'''
2874 they will happen before any __exit__ functions.'''
2875 def wrapper(exc_type, exc_val, exc_tb):
2875 def wrapper(exc_type, exc_val, exc_tb):
2876 func(*args, **kwargs)
2876 func(*args, **kwargs)
2877 self._atexit.append(wrapper)
2877 self._atexit.append(wrapper)
2878 return func
2878 return func
2879
2879
2880 def __exit__(self, exc_type, exc_val, exc_tb):
2880 def __exit__(self, exc_type, exc_val, exc_tb):
2881 '''Context managers are exited in the reverse order from which
2881 '''Context managers are exited in the reverse order from which
2882 they were created.'''
2882 they were created.'''
2883 received = exc_type is not None
2883 received = exc_type is not None
2884 suppressed = False
2884 suppressed = False
2885 pending = None
2885 pending = None
2886 self._atexit.reverse()
2886 self._atexit.reverse()
2887 for exitfunc in self._atexit:
2887 for exitfunc in self._atexit:
2888 try:
2888 try:
2889 if exitfunc(exc_type, exc_val, exc_tb):
2889 if exitfunc(exc_type, exc_val, exc_tb):
2890 suppressed = True
2890 suppressed = True
2891 exc_type = None
2891 exc_type = None
2892 exc_val = None
2892 exc_val = None
2893 exc_tb = None
2893 exc_tb = None
2894 except BaseException:
2894 except BaseException:
2895 pending = sys.exc_info()
2895 pending = sys.exc_info()
2896 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2896 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2897 del self._atexit
2897 del self._atexit
2898 if pending:
2898 if pending:
2899 raise exc_val
2899 raise exc_val
2900 return received and suppressed
2900 return received and suppressed
2901
2901
2902 def _bz2():
2902 def _bz2():
2903 d = bz2.BZ2Decompressor()
2903 d = bz2.BZ2Decompressor()
2904 # Bzip2 stream start with BZ, but we stripped it.
2904 # Bzip2 stream start with BZ, but we stripped it.
2905 # we put it back for good measure.
2905 # we put it back for good measure.
2906 d.decompress('BZ')
2906 d.decompress('BZ')
2907 return d
2907 return d
2908
2908
2909 decompressors = {None: lambda fh: fh,
2909 decompressors = {None: lambda fh: fh,
2910 '_truncatedBZ': _makedecompressor(_bz2),
2910 '_truncatedBZ': _makedecompressor(_bz2),
2911 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2911 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2912 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2912 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2913 }
2913 }
2914 # also support the old form by courtesies
2914 # also support the old form by courtesies
2915 decompressors['UN'] = decompressors[None]
2915 decompressors['UN'] = decompressors[None]
2916
2916
2917 # convenient shortcut
2917 # convenient shortcut
2918 dst = debugstacktrace
2918 dst = debugstacktrace
@@ -1,177 +1,161 b''
1 #require test-repo
1 #require test-repo
2
2
3 $ . "$TESTDIR/helpers-testrepo.sh"
3 $ . "$TESTDIR/helpers-testrepo.sh"
4 $ cd "$TESTDIR"/..
4 $ cd "$TESTDIR"/..
5
5
6 $ hg files 'set:(**.py)' | sed 's|\\|/|g' | xargs python contrib/check-py3-compat.py
6 $ hg files 'set:(**.py)' | sed 's|\\|/|g' | xargs python contrib/check-py3-compat.py
7 hgext/fsmonitor/pywatchman/__init__.py not using absolute_import
7 hgext/fsmonitor/pywatchman/__init__.py not using absolute_import
8 hgext/fsmonitor/pywatchman/__init__.py requires print_function
8 hgext/fsmonitor/pywatchman/__init__.py requires print_function
9 hgext/fsmonitor/pywatchman/capabilities.py not using absolute_import
9 hgext/fsmonitor/pywatchman/capabilities.py not using absolute_import
10 hgext/fsmonitor/pywatchman/pybser.py not using absolute_import
10 hgext/fsmonitor/pywatchman/pybser.py not using absolute_import
11 i18n/check-translation.py not using absolute_import
11 i18n/check-translation.py not using absolute_import
12 setup.py not using absolute_import
12 setup.py not using absolute_import
13 tests/test-demandimport.py not using absolute_import
13 tests/test-demandimport.py not using absolute_import
14
14
15 #if py3exe
15 #if py3exe
16 $ hg files 'set:(**.py) - grep(pygments)' | sed 's|\\|/|g' \
16 $ hg files 'set:(**.py) - grep(pygments)' | sed 's|\\|/|g' \
17 > | xargs $PYTHON3 contrib/check-py3-compat.py \
17 > | xargs $PYTHON3 contrib/check-py3-compat.py \
18 > | sed 's/[0-9][0-9]*)$/*)/'
18 > | sed 's/[0-9][0-9]*)$/*)/'
19 doc/hgmanpage.py: invalid syntax: invalid syntax (<unknown>, line *)
19 doc/hgmanpage.py: invalid syntax: invalid syntax (<unknown>, line *)
20 hgext/acl.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
20 hgext/acl.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
21 hgext/automv.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
21 hgext/automv.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
22 hgext/blackbox.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
22 hgext/blackbox.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
23 hgext/bugzilla.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
23 hgext/bugzilla.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
24 hgext/censor.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
24 hgext/censor.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
25 hgext/chgserver.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
25 hgext/chgserver.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
26 hgext/children.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
26 hgext/children.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
27 hgext/churn.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
27 hgext/churn.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
28 hgext/clonebundles.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
28 hgext/clonebundles.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
29 hgext/color.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
29 hgext/color.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
30 hgext/convert/bzr.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
30 hgext/convert/bzr.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
31 hgext/convert/common.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
31 hgext/convert/common.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
32 hgext/convert/convcmd.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
32 hgext/convert/convcmd.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
33 hgext/convert/cvs.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
33 hgext/convert/cvs.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
34 hgext/convert/cvsps.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
34 hgext/convert/cvsps.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
35 hgext/convert/darcs.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
35 hgext/convert/darcs.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
36 hgext/convert/filemap.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
36 hgext/convert/filemap.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
37 hgext/convert/git.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
37 hgext/convert/git.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
38 hgext/convert/gnuarch.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
38 hgext/convert/gnuarch.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
39 hgext/convert/hg.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
39 hgext/convert/hg.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
40 hgext/convert/monotone.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
40 hgext/convert/monotone.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
41 hgext/convert/p4.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
41 hgext/convert/p4.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
42 hgext/convert/subversion.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
42 hgext/convert/subversion.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
43 hgext/convert/transport.py: error importing module: <ImportError> No module named 'svn.client' (line *)
43 hgext/convert/transport.py: error importing module: <ImportError> No module named 'svn.client' (line *)
44 hgext/eol.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
44 hgext/eol.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
45 hgext/extdiff.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
45 hgext/extdiff.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
46 hgext/factotum.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
46 hgext/factotum.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
47 hgext/fetch.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
47 hgext/fetch.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
48 hgext/fsmonitor/state.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
48 hgext/fsmonitor/state.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
49 hgext/fsmonitor/watchmanclient.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
49 hgext/fsmonitor/watchmanclient.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
50 hgext/gpg.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
50 hgext/gpg.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
51 hgext/graphlog.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
51 hgext/graphlog.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
52 hgext/hgk.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
52 hgext/hgk.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
53 hgext/histedit.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
53 hgext/histedit.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
54 hgext/journal.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
54 hgext/journal.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
55 hgext/keyword.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
55 hgext/keyword.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
56 hgext/largefiles/basestore.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
56 hgext/largefiles/basestore.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
57 hgext/largefiles/lfcommands.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
57 hgext/largefiles/lfcommands.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
58 hgext/largefiles/lfutil.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
58 hgext/largefiles/lfutil.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
59 hgext/largefiles/localstore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *)
59 hgext/largefiles/localstore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *)
60 hgext/largefiles/overrides.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
60 hgext/largefiles/overrides.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
61 hgext/largefiles/proto.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
61 hgext/largefiles/proto.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
62 hgext/largefiles/remotestore.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
62 hgext/largefiles/remotestore.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
63 hgext/largefiles/reposetup.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
63 hgext/largefiles/reposetup.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
64 hgext/largefiles/storefactory.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
64 hgext/largefiles/storefactory.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
65 hgext/largefiles/uisetup.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
65 hgext/largefiles/uisetup.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
66 hgext/largefiles/wirestore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *)
66 hgext/largefiles/wirestore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *)
67 hgext/mq.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
67 hgext/mq.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
68 hgext/notify.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
68 hgext/notify.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
69 hgext/pager.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
69 hgext/pager.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
70 hgext/patchbomb.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
70 hgext/patchbomb.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
71 hgext/purge.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
71 hgext/purge.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
72 hgext/rebase.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
72 hgext/rebase.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
73 hgext/record.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
73 hgext/record.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
74 hgext/relink.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
74 hgext/relink.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
75 hgext/schemes.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
75 hgext/schemes.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
76 hgext/share.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
76 hgext/share.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
77 hgext/shelve.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
77 hgext/shelve.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
78 hgext/strip.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
78 hgext/strip.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
79 hgext/transplant.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
79 hgext/transplant.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
80 hgext/win32text.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
80 hgext/win32text.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
81 mercurial/archival.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
81 mercurial/archival.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
82 mercurial/bookmarks.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
82 mercurial/bookmarks.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
83 mercurial/branchmap.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
83 mercurial/branchmap.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
84 mercurial/bundle2.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
84 mercurial/bundle2.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
85 mercurial/bundlerepo.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
85 mercurial/bundlerepo.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
86 mercurial/byterange.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
86 mercurial/byterange.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
87 mercurial/changegroup.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
87 mercurial/changegroup.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
88 mercurial/changelog.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
88 mercurial/changelog.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
89 mercurial/cmdutil.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
89 mercurial/cmdutil.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
90 mercurial/commands.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
90 mercurial/commands.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
91 mercurial/commandserver.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
91 mercurial/commandserver.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
92 mercurial/config.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
92 mercurial/config.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
93 mercurial/context.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
93 mercurial/context.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
94 mercurial/copies.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
94 mercurial/copies.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
95 mercurial/crecord.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
95 mercurial/crecord.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
96 mercurial/destutil.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
96 mercurial/destutil.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
97 mercurial/dirstate.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
97 mercurial/dirstate.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
98 mercurial/discovery.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
98 mercurial/discovery.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
99 mercurial/dispatch.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
99 mercurial/dispatch.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
100 mercurial/encoding.py: error importing module: <TypeError> bytes expected, not str (line *)
100 mercurial/encoding.py: error importing module: <TypeError> bytes expected, not str (line *)
101 mercurial/exchange.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
101 mercurial/exchange.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
102 mercurial/extensions.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
102 mercurial/extensions.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
103 mercurial/filelog.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
103 mercurial/filelog.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
104 mercurial/filemerge.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
104 mercurial/filemerge.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
105 mercurial/fileset.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
105 mercurial/fileset.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
106 mercurial/formatter.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
106 mercurial/formatter.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
107 mercurial/graphmod.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
107 mercurial/graphmod.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
108 mercurial/help.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
108 mercurial/help.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
109 mercurial/hg.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
109 mercurial/hg.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
110 mercurial/hgweb/common.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
110 mercurial/hgweb/common.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
111 mercurial/hgweb/hgweb_mod.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
111 mercurial/hgweb/hgweb_mod.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
112 mercurial/hgweb/hgwebdir_mod.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
112 mercurial/hgweb/hgwebdir_mod.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
113 mercurial/hgweb/protocol.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
113 mercurial/hgweb/protocol.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
114 mercurial/hgweb/request.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
114 mercurial/hgweb/request.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
115 mercurial/hgweb/server.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
115 mercurial/hgweb/server.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
116 mercurial/hgweb/webcommands.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
116 mercurial/hgweb/webcommands.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
117 mercurial/hgweb/webutil.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
117 mercurial/hgweb/webutil.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
118 mercurial/hgweb/wsgicgi.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
118 mercurial/hgweb/wsgicgi.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
119 mercurial/hook.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
119 mercurial/hook.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
120 mercurial/httpconnection.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
120 mercurial/httpconnection.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
121 mercurial/httppeer.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
121 mercurial/httppeer.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
122 mercurial/i18n.py: error importing module: <TypeError> bytes expected, not str (line *)
122 mercurial/i18n.py: error importing module: <TypeError> bytes expected, not str (line *)
123 mercurial/keepalive.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
123 mercurial/keepalive.py: error importing module: <AttributeError> module 'mercurial.util' has no attribute 'httplib' (line *)
124 mercurial/localrepo.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
124 mercurial/localrepo.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
125 mercurial/lock.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
125 mercurial/mail.py: error importing module: <AttributeError> module 'email' has no attribute 'Header' (line *)
126 mercurial/mail.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
126 mercurial/manifest.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
127 mercurial/manifest.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
127 mercurial/merge.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
128 mercurial/match.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
128 mercurial/namespaces.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
129 mercurial/mdiff.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
129 mercurial/patch.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
130 mercurial/merge.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
130 mercurial/pvec.py: error importing module: <NameError> name 'xrange' is not defined (line *)
131 mercurial/minirst.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
131 mercurial/repair.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
132 mercurial/namespaces.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
132 mercurial/revlog.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
133 mercurial/obsolete.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
133 mercurial/revset.py: error importing module: <NameError> name 'xrange' is not defined (line *)
134 mercurial/patch.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
134 mercurial/scmutil.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
135 mercurial/pathutil.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
135 mercurial/scmwindows.py: error importing module: <ImportError> No module named 'winreg' (line *)
136 mercurial/peer.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
136 mercurial/simplemerge.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
137 mercurial/profiling.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
137 mercurial/sshpeer.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
138 mercurial/pushkey.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
138 mercurial/sshserver.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
139 mercurial/pvec.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
139 mercurial/statichttprepo.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at byterange.py:*)
140 mercurial/registrar.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
140 mercurial/store.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
141 mercurial/repair.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
141 mercurial/streamclone.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
142 mercurial/repoview.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
142 mercurial/subrepo.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
143 mercurial/revlog.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
143 mercurial/templatefilters.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
144 mercurial/revset.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
144 mercurial/templatekw.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
145 mercurial/scmutil.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
145 mercurial/templater.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
146 mercurial/scmwindows.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
146 mercurial/ui.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
147 mercurial/similar.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
147 mercurial/unionrepo.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
148 mercurial/simplemerge.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
148 mercurial/url.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
149 mercurial/sshpeer.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
150 mercurial/sshserver.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
151 mercurial/sslutil.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
152 mercurial/statichttprepo.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
153 mercurial/store.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
154 mercurial/streamclone.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
155 mercurial/subrepo.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
156 mercurial/tagmerge.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
157 mercurial/tags.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
158 mercurial/templatefilters.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
159 mercurial/templatekw.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
160 mercurial/templater.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
161 mercurial/transaction.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
162 mercurial/ui.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
163 mercurial/unionrepo.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
164 mercurial/url.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
165 mercurial/verify.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
149 mercurial/verify.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
166 mercurial/win32.py: error importing module: <ImportError> No module named 'msvcrt' (line *)
150 mercurial/win32.py: error importing module: <ImportError> No module named 'msvcrt' (line *)
167 mercurial/windows.py: error importing module: <ImportError> No module named 'msvcrt' (line *)
151 mercurial/windows.py: error importing module: <ImportError> No module named 'msvcrt' (line *)
168 mercurial/wireproto.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
152 mercurial/wireproto.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
169
153
170 #endif
154 #endif
171
155
172 #if py3exe py3pygments
156 #if py3exe py3pygments
173 $ hg files 'set:(**.py) and grep(pygments)' | sed 's|\\|/|g' \
157 $ hg files 'set:(**.py) and grep(pygments)' | sed 's|\\|/|g' \
174 > | xargs $PYTHON3 contrib/check-py3-compat.py \
158 > | xargs $PYTHON3 contrib/check-py3-compat.py \
175 > | sed 's/[0-9][0-9]*)$/*)/'
159 > | sed 's/[0-9][0-9]*)$/*)/'
176 hgext/highlight/highlight.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
160 hgext/highlight/highlight.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
177 #endif
161 #endif
General Comments 0
You need to be logged in to leave comments. Login now