##// END OF EJS Templates
util: ensure forwarded attrs are set in globals() as sysstr...
Augie Fackler -
r30087:9b230a8e default
parent child Browse files
Show More
@@ -1,2898 +1,2899
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import collections
20 import collections
21 import datetime
21 import datetime
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import imp
25 import imp
26 import os
26 import os
27 import re as remod
27 import re as remod
28 import shutil
28 import shutil
29 import signal
29 import signal
30 import socket
30 import socket
31 import string
31 import string
32 import subprocess
32 import subprocess
33 import sys
33 import sys
34 import tempfile
34 import tempfile
35 import textwrap
35 import textwrap
36 import time
36 import time
37 import traceback
37 import traceback
38 import zlib
38 import zlib
39
39
40 from . import (
40 from . import (
41 encoding,
41 encoding,
42 error,
42 error,
43 i18n,
43 i18n,
44 osutil,
44 osutil,
45 parsers,
45 parsers,
46 pycompat,
46 pycompat,
47 )
47 )
48
48
49 for attr in (
49 for attr in (
50 'empty',
50 'empty',
51 'httplib',
51 'httplib',
52 'httpserver',
52 'httpserver',
53 'pickle',
53 'pickle',
54 'queue',
54 'queue',
55 'urlerr',
55 'urlerr',
56 'urlparse',
56 'urlparse',
57 # we do import urlreq, but we do it outside the loop
57 # we do import urlreq, but we do it outside the loop
58 #'urlreq',
58 #'urlreq',
59 'stringio',
59 'stringio',
60 'socketserver',
60 'socketserver',
61 'xmlrpclib',
61 'xmlrpclib',
62 ):
62 ):
63 globals()[attr] = getattr(pycompat, attr)
63 a = pycompat.sysstr(attr)
64 globals()[a] = getattr(pycompat, a)
64
65
65 # This line is to make pyflakes happy:
66 # This line is to make pyflakes happy:
66 urlreq = pycompat.urlreq
67 urlreq = pycompat.urlreq
67
68
68 if os.name == 'nt':
69 if os.name == 'nt':
69 from . import windows as platform
70 from . import windows as platform
70 else:
71 else:
71 from . import posix as platform
72 from . import posix as platform
72
73
73 _ = i18n._
74 _ = i18n._
74
75
75 bindunixsocket = platform.bindunixsocket
76 bindunixsocket = platform.bindunixsocket
76 cachestat = platform.cachestat
77 cachestat = platform.cachestat
77 checkexec = platform.checkexec
78 checkexec = platform.checkexec
78 checklink = platform.checklink
79 checklink = platform.checklink
79 copymode = platform.copymode
80 copymode = platform.copymode
80 executablepath = platform.executablepath
81 executablepath = platform.executablepath
81 expandglobs = platform.expandglobs
82 expandglobs = platform.expandglobs
82 explainexit = platform.explainexit
83 explainexit = platform.explainexit
83 findexe = platform.findexe
84 findexe = platform.findexe
84 gethgcmd = platform.gethgcmd
85 gethgcmd = platform.gethgcmd
85 getuser = platform.getuser
86 getuser = platform.getuser
86 getpid = os.getpid
87 getpid = os.getpid
87 groupmembers = platform.groupmembers
88 groupmembers = platform.groupmembers
88 groupname = platform.groupname
89 groupname = platform.groupname
89 hidewindow = platform.hidewindow
90 hidewindow = platform.hidewindow
90 isexec = platform.isexec
91 isexec = platform.isexec
91 isowner = platform.isowner
92 isowner = platform.isowner
92 localpath = platform.localpath
93 localpath = platform.localpath
93 lookupreg = platform.lookupreg
94 lookupreg = platform.lookupreg
94 makedir = platform.makedir
95 makedir = platform.makedir
95 nlinks = platform.nlinks
96 nlinks = platform.nlinks
96 normpath = platform.normpath
97 normpath = platform.normpath
97 normcase = platform.normcase
98 normcase = platform.normcase
98 normcasespec = platform.normcasespec
99 normcasespec = platform.normcasespec
99 normcasefallback = platform.normcasefallback
100 normcasefallback = platform.normcasefallback
100 openhardlinks = platform.openhardlinks
101 openhardlinks = platform.openhardlinks
101 oslink = platform.oslink
102 oslink = platform.oslink
102 parsepatchoutput = platform.parsepatchoutput
103 parsepatchoutput = platform.parsepatchoutput
103 pconvert = platform.pconvert
104 pconvert = platform.pconvert
104 poll = platform.poll
105 poll = platform.poll
105 popen = platform.popen
106 popen = platform.popen
106 posixfile = platform.posixfile
107 posixfile = platform.posixfile
107 quotecommand = platform.quotecommand
108 quotecommand = platform.quotecommand
108 readpipe = platform.readpipe
109 readpipe = platform.readpipe
109 rename = platform.rename
110 rename = platform.rename
110 removedirs = platform.removedirs
111 removedirs = platform.removedirs
111 samedevice = platform.samedevice
112 samedevice = platform.samedevice
112 samefile = platform.samefile
113 samefile = platform.samefile
113 samestat = platform.samestat
114 samestat = platform.samestat
114 setbinary = platform.setbinary
115 setbinary = platform.setbinary
115 setflags = platform.setflags
116 setflags = platform.setflags
116 setsignalhandler = platform.setsignalhandler
117 setsignalhandler = platform.setsignalhandler
117 shellquote = platform.shellquote
118 shellquote = platform.shellquote
118 spawndetached = platform.spawndetached
119 spawndetached = platform.spawndetached
119 split = platform.split
120 split = platform.split
120 sshargs = platform.sshargs
121 sshargs = platform.sshargs
121 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
122 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
122 statisexec = platform.statisexec
123 statisexec = platform.statisexec
123 statislink = platform.statislink
124 statislink = platform.statislink
124 termwidth = platform.termwidth
125 termwidth = platform.termwidth
125 testpid = platform.testpid
126 testpid = platform.testpid
126 umask = platform.umask
127 umask = platform.umask
127 unlink = platform.unlink
128 unlink = platform.unlink
128 unlinkpath = platform.unlinkpath
129 unlinkpath = platform.unlinkpath
129 username = platform.username
130 username = platform.username
130
131
131 # Python compatibility
132 # Python compatibility
132
133
133 _notset = object()
134 _notset = object()
134
135
135 # disable Python's problematic floating point timestamps (issue4836)
136 # disable Python's problematic floating point timestamps (issue4836)
136 # (Python hypocritically says you shouldn't change this behavior in
137 # (Python hypocritically says you shouldn't change this behavior in
137 # libraries, and sure enough Mercurial is not a library.)
138 # libraries, and sure enough Mercurial is not a library.)
138 os.stat_float_times(False)
139 os.stat_float_times(False)
139
140
140 def safehasattr(thing, attr):
141 def safehasattr(thing, attr):
141 return getattr(thing, attr, _notset) is not _notset
142 return getattr(thing, attr, _notset) is not _notset
142
143
143 DIGESTS = {
144 DIGESTS = {
144 'md5': hashlib.md5,
145 'md5': hashlib.md5,
145 'sha1': hashlib.sha1,
146 'sha1': hashlib.sha1,
146 'sha512': hashlib.sha512,
147 'sha512': hashlib.sha512,
147 }
148 }
148 # List of digest types from strongest to weakest
149 # List of digest types from strongest to weakest
149 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
150 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
150
151
151 for k in DIGESTS_BY_STRENGTH:
152 for k in DIGESTS_BY_STRENGTH:
152 assert k in DIGESTS
153 assert k in DIGESTS
153
154
154 class digester(object):
155 class digester(object):
155 """helper to compute digests.
156 """helper to compute digests.
156
157
157 This helper can be used to compute one or more digests given their name.
158 This helper can be used to compute one or more digests given their name.
158
159
159 >>> d = digester(['md5', 'sha1'])
160 >>> d = digester(['md5', 'sha1'])
160 >>> d.update('foo')
161 >>> d.update('foo')
161 >>> [k for k in sorted(d)]
162 >>> [k for k in sorted(d)]
162 ['md5', 'sha1']
163 ['md5', 'sha1']
163 >>> d['md5']
164 >>> d['md5']
164 'acbd18db4cc2f85cedef654fccc4a4d8'
165 'acbd18db4cc2f85cedef654fccc4a4d8'
165 >>> d['sha1']
166 >>> d['sha1']
166 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
167 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
167 >>> digester.preferred(['md5', 'sha1'])
168 >>> digester.preferred(['md5', 'sha1'])
168 'sha1'
169 'sha1'
169 """
170 """
170
171
171 def __init__(self, digests, s=''):
172 def __init__(self, digests, s=''):
172 self._hashes = {}
173 self._hashes = {}
173 for k in digests:
174 for k in digests:
174 if k not in DIGESTS:
175 if k not in DIGESTS:
175 raise Abort(_('unknown digest type: %s') % k)
176 raise Abort(_('unknown digest type: %s') % k)
176 self._hashes[k] = DIGESTS[k]()
177 self._hashes[k] = DIGESTS[k]()
177 if s:
178 if s:
178 self.update(s)
179 self.update(s)
179
180
180 def update(self, data):
181 def update(self, data):
181 for h in self._hashes.values():
182 for h in self._hashes.values():
182 h.update(data)
183 h.update(data)
183
184
184 def __getitem__(self, key):
185 def __getitem__(self, key):
185 if key not in DIGESTS:
186 if key not in DIGESTS:
186 raise Abort(_('unknown digest type: %s') % k)
187 raise Abort(_('unknown digest type: %s') % k)
187 return self._hashes[key].hexdigest()
188 return self._hashes[key].hexdigest()
188
189
189 def __iter__(self):
190 def __iter__(self):
190 return iter(self._hashes)
191 return iter(self._hashes)
191
192
192 @staticmethod
193 @staticmethod
193 def preferred(supported):
194 def preferred(supported):
194 """returns the strongest digest type in both supported and DIGESTS."""
195 """returns the strongest digest type in both supported and DIGESTS."""
195
196
196 for k in DIGESTS_BY_STRENGTH:
197 for k in DIGESTS_BY_STRENGTH:
197 if k in supported:
198 if k in supported:
198 return k
199 return k
199 return None
200 return None
200
201
201 class digestchecker(object):
202 class digestchecker(object):
202 """file handle wrapper that additionally checks content against a given
203 """file handle wrapper that additionally checks content against a given
203 size and digests.
204 size and digests.
204
205
205 d = digestchecker(fh, size, {'md5': '...'})
206 d = digestchecker(fh, size, {'md5': '...'})
206
207
207 When multiple digests are given, all of them are validated.
208 When multiple digests are given, all of them are validated.
208 """
209 """
209
210
210 def __init__(self, fh, size, digests):
211 def __init__(self, fh, size, digests):
211 self._fh = fh
212 self._fh = fh
212 self._size = size
213 self._size = size
213 self._got = 0
214 self._got = 0
214 self._digests = dict(digests)
215 self._digests = dict(digests)
215 self._digester = digester(self._digests.keys())
216 self._digester = digester(self._digests.keys())
216
217
217 def read(self, length=-1):
218 def read(self, length=-1):
218 content = self._fh.read(length)
219 content = self._fh.read(length)
219 self._digester.update(content)
220 self._digester.update(content)
220 self._got += len(content)
221 self._got += len(content)
221 return content
222 return content
222
223
223 def validate(self):
224 def validate(self):
224 if self._size != self._got:
225 if self._size != self._got:
225 raise Abort(_('size mismatch: expected %d, got %d') %
226 raise Abort(_('size mismatch: expected %d, got %d') %
226 (self._size, self._got))
227 (self._size, self._got))
227 for k, v in self._digests.items():
228 for k, v in self._digests.items():
228 if v != self._digester[k]:
229 if v != self._digester[k]:
229 # i18n: first parameter is a digest name
230 # i18n: first parameter is a digest name
230 raise Abort(_('%s mismatch: expected %s, got %s') %
231 raise Abort(_('%s mismatch: expected %s, got %s') %
231 (k, v, self._digester[k]))
232 (k, v, self._digester[k]))
232
233
233 try:
234 try:
234 buffer = buffer
235 buffer = buffer
235 except NameError:
236 except NameError:
236 if not pycompat.ispy3:
237 if not pycompat.ispy3:
237 def buffer(sliceable, offset=0):
238 def buffer(sliceable, offset=0):
238 return sliceable[offset:]
239 return sliceable[offset:]
239 else:
240 else:
240 def buffer(sliceable, offset=0):
241 def buffer(sliceable, offset=0):
241 return memoryview(sliceable)[offset:]
242 return memoryview(sliceable)[offset:]
242
243
243 closefds = os.name == 'posix'
244 closefds = os.name == 'posix'
244
245
245 _chunksize = 4096
246 _chunksize = 4096
246
247
247 class bufferedinputpipe(object):
248 class bufferedinputpipe(object):
248 """a manually buffered input pipe
249 """a manually buffered input pipe
249
250
250 Python will not let us use buffered IO and lazy reading with 'polling' at
251 Python will not let us use buffered IO and lazy reading with 'polling' at
251 the same time. We cannot probe the buffer state and select will not detect
252 the same time. We cannot probe the buffer state and select will not detect
252 that data are ready to read if they are already buffered.
253 that data are ready to read if they are already buffered.
253
254
254 This class let us work around that by implementing its own buffering
255 This class let us work around that by implementing its own buffering
255 (allowing efficient readline) while offering a way to know if the buffer is
256 (allowing efficient readline) while offering a way to know if the buffer is
256 empty from the output (allowing collaboration of the buffer with polling).
257 empty from the output (allowing collaboration of the buffer with polling).
257
258
258 This class lives in the 'util' module because it makes use of the 'os'
259 This class lives in the 'util' module because it makes use of the 'os'
259 module from the python stdlib.
260 module from the python stdlib.
260 """
261 """
261
262
262 def __init__(self, input):
263 def __init__(self, input):
263 self._input = input
264 self._input = input
264 self._buffer = []
265 self._buffer = []
265 self._eof = False
266 self._eof = False
266 self._lenbuf = 0
267 self._lenbuf = 0
267
268
268 @property
269 @property
269 def hasbuffer(self):
270 def hasbuffer(self):
270 """True is any data is currently buffered
271 """True is any data is currently buffered
271
272
272 This will be used externally a pre-step for polling IO. If there is
273 This will be used externally a pre-step for polling IO. If there is
273 already data then no polling should be set in place."""
274 already data then no polling should be set in place."""
274 return bool(self._buffer)
275 return bool(self._buffer)
275
276
276 @property
277 @property
277 def closed(self):
278 def closed(self):
278 return self._input.closed
279 return self._input.closed
279
280
280 def fileno(self):
281 def fileno(self):
281 return self._input.fileno()
282 return self._input.fileno()
282
283
283 def close(self):
284 def close(self):
284 return self._input.close()
285 return self._input.close()
285
286
286 def read(self, size):
287 def read(self, size):
287 while (not self._eof) and (self._lenbuf < size):
288 while (not self._eof) and (self._lenbuf < size):
288 self._fillbuffer()
289 self._fillbuffer()
289 return self._frombuffer(size)
290 return self._frombuffer(size)
290
291
291 def readline(self, *args, **kwargs):
292 def readline(self, *args, **kwargs):
292 if 1 < len(self._buffer):
293 if 1 < len(self._buffer):
293 # this should not happen because both read and readline end with a
294 # this should not happen because both read and readline end with a
294 # _frombuffer call that collapse it.
295 # _frombuffer call that collapse it.
295 self._buffer = [''.join(self._buffer)]
296 self._buffer = [''.join(self._buffer)]
296 self._lenbuf = len(self._buffer[0])
297 self._lenbuf = len(self._buffer[0])
297 lfi = -1
298 lfi = -1
298 if self._buffer:
299 if self._buffer:
299 lfi = self._buffer[-1].find('\n')
300 lfi = self._buffer[-1].find('\n')
300 while (not self._eof) and lfi < 0:
301 while (not self._eof) and lfi < 0:
301 self._fillbuffer()
302 self._fillbuffer()
302 if self._buffer:
303 if self._buffer:
303 lfi = self._buffer[-1].find('\n')
304 lfi = self._buffer[-1].find('\n')
304 size = lfi + 1
305 size = lfi + 1
305 if lfi < 0: # end of file
306 if lfi < 0: # end of file
306 size = self._lenbuf
307 size = self._lenbuf
307 elif 1 < len(self._buffer):
308 elif 1 < len(self._buffer):
308 # we need to take previous chunks into account
309 # we need to take previous chunks into account
309 size += self._lenbuf - len(self._buffer[-1])
310 size += self._lenbuf - len(self._buffer[-1])
310 return self._frombuffer(size)
311 return self._frombuffer(size)
311
312
312 def _frombuffer(self, size):
313 def _frombuffer(self, size):
313 """return at most 'size' data from the buffer
314 """return at most 'size' data from the buffer
314
315
315 The data are removed from the buffer."""
316 The data are removed from the buffer."""
316 if size == 0 or not self._buffer:
317 if size == 0 or not self._buffer:
317 return ''
318 return ''
318 buf = self._buffer[0]
319 buf = self._buffer[0]
319 if 1 < len(self._buffer):
320 if 1 < len(self._buffer):
320 buf = ''.join(self._buffer)
321 buf = ''.join(self._buffer)
321
322
322 data = buf[:size]
323 data = buf[:size]
323 buf = buf[len(data):]
324 buf = buf[len(data):]
324 if buf:
325 if buf:
325 self._buffer = [buf]
326 self._buffer = [buf]
326 self._lenbuf = len(buf)
327 self._lenbuf = len(buf)
327 else:
328 else:
328 self._buffer = []
329 self._buffer = []
329 self._lenbuf = 0
330 self._lenbuf = 0
330 return data
331 return data
331
332
332 def _fillbuffer(self):
333 def _fillbuffer(self):
333 """read data to the buffer"""
334 """read data to the buffer"""
334 data = os.read(self._input.fileno(), _chunksize)
335 data = os.read(self._input.fileno(), _chunksize)
335 if not data:
336 if not data:
336 self._eof = True
337 self._eof = True
337 else:
338 else:
338 self._lenbuf += len(data)
339 self._lenbuf += len(data)
339 self._buffer.append(data)
340 self._buffer.append(data)
340
341
341 def popen2(cmd, env=None, newlines=False):
342 def popen2(cmd, env=None, newlines=False):
342 # Setting bufsize to -1 lets the system decide the buffer size.
343 # Setting bufsize to -1 lets the system decide the buffer size.
343 # The default for bufsize is 0, meaning unbuffered. This leads to
344 # The default for bufsize is 0, meaning unbuffered. This leads to
344 # poor performance on Mac OS X: http://bugs.python.org/issue4194
345 # poor performance on Mac OS X: http://bugs.python.org/issue4194
345 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
346 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
346 close_fds=closefds,
347 close_fds=closefds,
347 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
348 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
348 universal_newlines=newlines,
349 universal_newlines=newlines,
349 env=env)
350 env=env)
350 return p.stdin, p.stdout
351 return p.stdin, p.stdout
351
352
352 def popen3(cmd, env=None, newlines=False):
353 def popen3(cmd, env=None, newlines=False):
353 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
354 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
354 return stdin, stdout, stderr
355 return stdin, stdout, stderr
355
356
356 def popen4(cmd, env=None, newlines=False, bufsize=-1):
357 def popen4(cmd, env=None, newlines=False, bufsize=-1):
357 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
358 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
358 close_fds=closefds,
359 close_fds=closefds,
359 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
360 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
360 stderr=subprocess.PIPE,
361 stderr=subprocess.PIPE,
361 universal_newlines=newlines,
362 universal_newlines=newlines,
362 env=env)
363 env=env)
363 return p.stdin, p.stdout, p.stderr, p
364 return p.stdin, p.stdout, p.stderr, p
364
365
365 def version():
366 def version():
366 """Return version information if available."""
367 """Return version information if available."""
367 try:
368 try:
368 from . import __version__
369 from . import __version__
369 return __version__.version
370 return __version__.version
370 except ImportError:
371 except ImportError:
371 return 'unknown'
372 return 'unknown'
372
373
373 def versiontuple(v=None, n=4):
374 def versiontuple(v=None, n=4):
374 """Parses a Mercurial version string into an N-tuple.
375 """Parses a Mercurial version string into an N-tuple.
375
376
376 The version string to be parsed is specified with the ``v`` argument.
377 The version string to be parsed is specified with the ``v`` argument.
377 If it isn't defined, the current Mercurial version string will be parsed.
378 If it isn't defined, the current Mercurial version string will be parsed.
378
379
379 ``n`` can be 2, 3, or 4. Here is how some version strings map to
380 ``n`` can be 2, 3, or 4. Here is how some version strings map to
380 returned values:
381 returned values:
381
382
382 >>> v = '3.6.1+190-df9b73d2d444'
383 >>> v = '3.6.1+190-df9b73d2d444'
383 >>> versiontuple(v, 2)
384 >>> versiontuple(v, 2)
384 (3, 6)
385 (3, 6)
385 >>> versiontuple(v, 3)
386 >>> versiontuple(v, 3)
386 (3, 6, 1)
387 (3, 6, 1)
387 >>> versiontuple(v, 4)
388 >>> versiontuple(v, 4)
388 (3, 6, 1, '190-df9b73d2d444')
389 (3, 6, 1, '190-df9b73d2d444')
389
390
390 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
391 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
391 (3, 6, 1, '190-df9b73d2d444+20151118')
392 (3, 6, 1, '190-df9b73d2d444+20151118')
392
393
393 >>> v = '3.6'
394 >>> v = '3.6'
394 >>> versiontuple(v, 2)
395 >>> versiontuple(v, 2)
395 (3, 6)
396 (3, 6)
396 >>> versiontuple(v, 3)
397 >>> versiontuple(v, 3)
397 (3, 6, None)
398 (3, 6, None)
398 >>> versiontuple(v, 4)
399 >>> versiontuple(v, 4)
399 (3, 6, None, None)
400 (3, 6, None, None)
400
401
401 >>> v = '3.9-rc'
402 >>> v = '3.9-rc'
402 >>> versiontuple(v, 2)
403 >>> versiontuple(v, 2)
403 (3, 9)
404 (3, 9)
404 >>> versiontuple(v, 3)
405 >>> versiontuple(v, 3)
405 (3, 9, None)
406 (3, 9, None)
406 >>> versiontuple(v, 4)
407 >>> versiontuple(v, 4)
407 (3, 9, None, 'rc')
408 (3, 9, None, 'rc')
408
409
409 >>> v = '3.9-rc+2-02a8fea4289b'
410 >>> v = '3.9-rc+2-02a8fea4289b'
410 >>> versiontuple(v, 2)
411 >>> versiontuple(v, 2)
411 (3, 9)
412 (3, 9)
412 >>> versiontuple(v, 3)
413 >>> versiontuple(v, 3)
413 (3, 9, None)
414 (3, 9, None)
414 >>> versiontuple(v, 4)
415 >>> versiontuple(v, 4)
415 (3, 9, None, 'rc+2-02a8fea4289b')
416 (3, 9, None, 'rc+2-02a8fea4289b')
416 """
417 """
417 if not v:
418 if not v:
418 v = version()
419 v = version()
419 parts = remod.split('[\+-]', v, 1)
420 parts = remod.split('[\+-]', v, 1)
420 if len(parts) == 1:
421 if len(parts) == 1:
421 vparts, extra = parts[0], None
422 vparts, extra = parts[0], None
422 else:
423 else:
423 vparts, extra = parts
424 vparts, extra = parts
424
425
425 vints = []
426 vints = []
426 for i in vparts.split('.'):
427 for i in vparts.split('.'):
427 try:
428 try:
428 vints.append(int(i))
429 vints.append(int(i))
429 except ValueError:
430 except ValueError:
430 break
431 break
431 # (3, 6) -> (3, 6, None)
432 # (3, 6) -> (3, 6, None)
432 while len(vints) < 3:
433 while len(vints) < 3:
433 vints.append(None)
434 vints.append(None)
434
435
435 if n == 2:
436 if n == 2:
436 return (vints[0], vints[1])
437 return (vints[0], vints[1])
437 if n == 3:
438 if n == 3:
438 return (vints[0], vints[1], vints[2])
439 return (vints[0], vints[1], vints[2])
439 if n == 4:
440 if n == 4:
440 return (vints[0], vints[1], vints[2], extra)
441 return (vints[0], vints[1], vints[2], extra)
441
442
442 # used by parsedate
443 # used by parsedate
443 defaultdateformats = (
444 defaultdateformats = (
444 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
445 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
445 '%Y-%m-%dT%H:%M', # without seconds
446 '%Y-%m-%dT%H:%M', # without seconds
446 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
447 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
447 '%Y-%m-%dT%H%M', # without seconds
448 '%Y-%m-%dT%H%M', # without seconds
448 '%Y-%m-%d %H:%M:%S', # our common legal variant
449 '%Y-%m-%d %H:%M:%S', # our common legal variant
449 '%Y-%m-%d %H:%M', # without seconds
450 '%Y-%m-%d %H:%M', # without seconds
450 '%Y-%m-%d %H%M%S', # without :
451 '%Y-%m-%d %H%M%S', # without :
451 '%Y-%m-%d %H%M', # without seconds
452 '%Y-%m-%d %H%M', # without seconds
452 '%Y-%m-%d %I:%M:%S%p',
453 '%Y-%m-%d %I:%M:%S%p',
453 '%Y-%m-%d %H:%M',
454 '%Y-%m-%d %H:%M',
454 '%Y-%m-%d %I:%M%p',
455 '%Y-%m-%d %I:%M%p',
455 '%Y-%m-%d',
456 '%Y-%m-%d',
456 '%m-%d',
457 '%m-%d',
457 '%m/%d',
458 '%m/%d',
458 '%m/%d/%y',
459 '%m/%d/%y',
459 '%m/%d/%Y',
460 '%m/%d/%Y',
460 '%a %b %d %H:%M:%S %Y',
461 '%a %b %d %H:%M:%S %Y',
461 '%a %b %d %I:%M:%S%p %Y',
462 '%a %b %d %I:%M:%S%p %Y',
462 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
463 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
463 '%b %d %H:%M:%S %Y',
464 '%b %d %H:%M:%S %Y',
464 '%b %d %I:%M:%S%p %Y',
465 '%b %d %I:%M:%S%p %Y',
465 '%b %d %H:%M:%S',
466 '%b %d %H:%M:%S',
466 '%b %d %I:%M:%S%p',
467 '%b %d %I:%M:%S%p',
467 '%b %d %H:%M',
468 '%b %d %H:%M',
468 '%b %d %I:%M%p',
469 '%b %d %I:%M%p',
469 '%b %d %Y',
470 '%b %d %Y',
470 '%b %d',
471 '%b %d',
471 '%H:%M:%S',
472 '%H:%M:%S',
472 '%I:%M:%S%p',
473 '%I:%M:%S%p',
473 '%H:%M',
474 '%H:%M',
474 '%I:%M%p',
475 '%I:%M%p',
475 )
476 )
476
477
477 extendeddateformats = defaultdateformats + (
478 extendeddateformats = defaultdateformats + (
478 "%Y",
479 "%Y",
479 "%Y-%m",
480 "%Y-%m",
480 "%b",
481 "%b",
481 "%b %Y",
482 "%b %Y",
482 )
483 )
483
484
484 def cachefunc(func):
485 def cachefunc(func):
485 '''cache the result of function calls'''
486 '''cache the result of function calls'''
486 # XXX doesn't handle keywords args
487 # XXX doesn't handle keywords args
487 if func.__code__.co_argcount == 0:
488 if func.__code__.co_argcount == 0:
488 cache = []
489 cache = []
489 def f():
490 def f():
490 if len(cache) == 0:
491 if len(cache) == 0:
491 cache.append(func())
492 cache.append(func())
492 return cache[0]
493 return cache[0]
493 return f
494 return f
494 cache = {}
495 cache = {}
495 if func.__code__.co_argcount == 1:
496 if func.__code__.co_argcount == 1:
496 # we gain a small amount of time because
497 # we gain a small amount of time because
497 # we don't need to pack/unpack the list
498 # we don't need to pack/unpack the list
498 def f(arg):
499 def f(arg):
499 if arg not in cache:
500 if arg not in cache:
500 cache[arg] = func(arg)
501 cache[arg] = func(arg)
501 return cache[arg]
502 return cache[arg]
502 else:
503 else:
503 def f(*args):
504 def f(*args):
504 if args not in cache:
505 if args not in cache:
505 cache[args] = func(*args)
506 cache[args] = func(*args)
506 return cache[args]
507 return cache[args]
507
508
508 return f
509 return f
509
510
510 class sortdict(dict):
511 class sortdict(dict):
511 '''a simple sorted dictionary'''
512 '''a simple sorted dictionary'''
512 def __init__(self, data=None):
513 def __init__(self, data=None):
513 self._list = []
514 self._list = []
514 if data:
515 if data:
515 self.update(data)
516 self.update(data)
516 def copy(self):
517 def copy(self):
517 return sortdict(self)
518 return sortdict(self)
518 def __setitem__(self, key, val):
519 def __setitem__(self, key, val):
519 if key in self:
520 if key in self:
520 self._list.remove(key)
521 self._list.remove(key)
521 self._list.append(key)
522 self._list.append(key)
522 dict.__setitem__(self, key, val)
523 dict.__setitem__(self, key, val)
523 def __iter__(self):
524 def __iter__(self):
524 return self._list.__iter__()
525 return self._list.__iter__()
525 def update(self, src):
526 def update(self, src):
526 if isinstance(src, dict):
527 if isinstance(src, dict):
527 src = src.iteritems()
528 src = src.iteritems()
528 for k, v in src:
529 for k, v in src:
529 self[k] = v
530 self[k] = v
530 def clear(self):
531 def clear(self):
531 dict.clear(self)
532 dict.clear(self)
532 self._list = []
533 self._list = []
533 def items(self):
534 def items(self):
534 return [(k, self[k]) for k in self._list]
535 return [(k, self[k]) for k in self._list]
535 def __delitem__(self, key):
536 def __delitem__(self, key):
536 dict.__delitem__(self, key)
537 dict.__delitem__(self, key)
537 self._list.remove(key)
538 self._list.remove(key)
538 def pop(self, key, *args, **kwargs):
539 def pop(self, key, *args, **kwargs):
539 dict.pop(self, key, *args, **kwargs)
540 dict.pop(self, key, *args, **kwargs)
540 try:
541 try:
541 self._list.remove(key)
542 self._list.remove(key)
542 except ValueError:
543 except ValueError:
543 pass
544 pass
544 def keys(self):
545 def keys(self):
545 return self._list
546 return self._list
546 def iterkeys(self):
547 def iterkeys(self):
547 return self._list.__iter__()
548 return self._list.__iter__()
548 def iteritems(self):
549 def iteritems(self):
549 for k in self._list:
550 for k in self._list:
550 yield k, self[k]
551 yield k, self[k]
551 def insert(self, index, key, val):
552 def insert(self, index, key, val):
552 self._list.insert(index, key)
553 self._list.insert(index, key)
553 dict.__setitem__(self, key, val)
554 dict.__setitem__(self, key, val)
554 def __repr__(self):
555 def __repr__(self):
555 if not self:
556 if not self:
556 return '%s()' % self.__class__.__name__
557 return '%s()' % self.__class__.__name__
557 return '%s(%r)' % (self.__class__.__name__, self.items())
558 return '%s(%r)' % (self.__class__.__name__, self.items())
558
559
559 class _lrucachenode(object):
560 class _lrucachenode(object):
560 """A node in a doubly linked list.
561 """A node in a doubly linked list.
561
562
562 Holds a reference to nodes on either side as well as a key-value
563 Holds a reference to nodes on either side as well as a key-value
563 pair for the dictionary entry.
564 pair for the dictionary entry.
564 """
565 """
565 __slots__ = (u'next', u'prev', u'key', u'value')
566 __slots__ = (u'next', u'prev', u'key', u'value')
566
567
567 def __init__(self):
568 def __init__(self):
568 self.next = None
569 self.next = None
569 self.prev = None
570 self.prev = None
570
571
571 self.key = _notset
572 self.key = _notset
572 self.value = None
573 self.value = None
573
574
574 def markempty(self):
575 def markempty(self):
575 """Mark the node as emptied."""
576 """Mark the node as emptied."""
576 self.key = _notset
577 self.key = _notset
577
578
578 class lrucachedict(object):
579 class lrucachedict(object):
579 """Dict that caches most recent accesses and sets.
580 """Dict that caches most recent accesses and sets.
580
581
581 The dict consists of an actual backing dict - indexed by original
582 The dict consists of an actual backing dict - indexed by original
582 key - and a doubly linked circular list defining the order of entries in
583 key - and a doubly linked circular list defining the order of entries in
583 the cache.
584 the cache.
584
585
585 The head node is the newest entry in the cache. If the cache is full,
586 The head node is the newest entry in the cache. If the cache is full,
586 we recycle head.prev and make it the new head. Cache accesses result in
587 we recycle head.prev and make it the new head. Cache accesses result in
587 the node being moved to before the existing head and being marked as the
588 the node being moved to before the existing head and being marked as the
588 new head node.
589 new head node.
589 """
590 """
590 def __init__(self, max):
591 def __init__(self, max):
591 self._cache = {}
592 self._cache = {}
592
593
593 self._head = head = _lrucachenode()
594 self._head = head = _lrucachenode()
594 head.prev = head
595 head.prev = head
595 head.next = head
596 head.next = head
596 self._size = 1
597 self._size = 1
597 self._capacity = max
598 self._capacity = max
598
599
599 def __len__(self):
600 def __len__(self):
600 return len(self._cache)
601 return len(self._cache)
601
602
602 def __contains__(self, k):
603 def __contains__(self, k):
603 return k in self._cache
604 return k in self._cache
604
605
605 def __iter__(self):
606 def __iter__(self):
606 # We don't have to iterate in cache order, but why not.
607 # We don't have to iterate in cache order, but why not.
607 n = self._head
608 n = self._head
608 for i in range(len(self._cache)):
609 for i in range(len(self._cache)):
609 yield n.key
610 yield n.key
610 n = n.next
611 n = n.next
611
612
612 def __getitem__(self, k):
613 def __getitem__(self, k):
613 node = self._cache[k]
614 node = self._cache[k]
614 self._movetohead(node)
615 self._movetohead(node)
615 return node.value
616 return node.value
616
617
617 def __setitem__(self, k, v):
618 def __setitem__(self, k, v):
618 node = self._cache.get(k)
619 node = self._cache.get(k)
619 # Replace existing value and mark as newest.
620 # Replace existing value and mark as newest.
620 if node is not None:
621 if node is not None:
621 node.value = v
622 node.value = v
622 self._movetohead(node)
623 self._movetohead(node)
623 return
624 return
624
625
625 if self._size < self._capacity:
626 if self._size < self._capacity:
626 node = self._addcapacity()
627 node = self._addcapacity()
627 else:
628 else:
628 # Grab the last/oldest item.
629 # Grab the last/oldest item.
629 node = self._head.prev
630 node = self._head.prev
630
631
631 # At capacity. Kill the old entry.
632 # At capacity. Kill the old entry.
632 if node.key is not _notset:
633 if node.key is not _notset:
633 del self._cache[node.key]
634 del self._cache[node.key]
634
635
635 node.key = k
636 node.key = k
636 node.value = v
637 node.value = v
637 self._cache[k] = node
638 self._cache[k] = node
638 # And mark it as newest entry. No need to adjust order since it
639 # And mark it as newest entry. No need to adjust order since it
639 # is already self._head.prev.
640 # is already self._head.prev.
640 self._head = node
641 self._head = node
641
642
642 def __delitem__(self, k):
643 def __delitem__(self, k):
643 node = self._cache.pop(k)
644 node = self._cache.pop(k)
644 node.markempty()
645 node.markempty()
645
646
646 # Temporarily mark as newest item before re-adjusting head to make
647 # Temporarily mark as newest item before re-adjusting head to make
647 # this node the oldest item.
648 # this node the oldest item.
648 self._movetohead(node)
649 self._movetohead(node)
649 self._head = node.next
650 self._head = node.next
650
651
651 # Additional dict methods.
652 # Additional dict methods.
652
653
653 def get(self, k, default=None):
654 def get(self, k, default=None):
654 try:
655 try:
655 return self._cache[k].value
656 return self._cache[k].value
656 except KeyError:
657 except KeyError:
657 return default
658 return default
658
659
659 def clear(self):
660 def clear(self):
660 n = self._head
661 n = self._head
661 while n.key is not _notset:
662 while n.key is not _notset:
662 n.markempty()
663 n.markempty()
663 n = n.next
664 n = n.next
664
665
665 self._cache.clear()
666 self._cache.clear()
666
667
667 def copy(self):
668 def copy(self):
668 result = lrucachedict(self._capacity)
669 result = lrucachedict(self._capacity)
669 n = self._head.prev
670 n = self._head.prev
670 # Iterate in oldest-to-newest order, so the copy has the right ordering
671 # Iterate in oldest-to-newest order, so the copy has the right ordering
671 for i in range(len(self._cache)):
672 for i in range(len(self._cache)):
672 result[n.key] = n.value
673 result[n.key] = n.value
673 n = n.prev
674 n = n.prev
674 return result
675 return result
675
676
676 def _movetohead(self, node):
677 def _movetohead(self, node):
677 """Mark a node as the newest, making it the new head.
678 """Mark a node as the newest, making it the new head.
678
679
679 When a node is accessed, it becomes the freshest entry in the LRU
680 When a node is accessed, it becomes the freshest entry in the LRU
680 list, which is denoted by self._head.
681 list, which is denoted by self._head.
681
682
682 Visually, let's make ``N`` the new head node (* denotes head):
683 Visually, let's make ``N`` the new head node (* denotes head):
683
684
684 previous/oldest <-> head <-> next/next newest
685 previous/oldest <-> head <-> next/next newest
685
686
686 ----<->--- A* ---<->-----
687 ----<->--- A* ---<->-----
687 | |
688 | |
688 E <-> D <-> N <-> C <-> B
689 E <-> D <-> N <-> C <-> B
689
690
690 To:
691 To:
691
692
692 ----<->--- N* ---<->-----
693 ----<->--- N* ---<->-----
693 | |
694 | |
694 E <-> D <-> C <-> B <-> A
695 E <-> D <-> C <-> B <-> A
695
696
696 This requires the following moves:
697 This requires the following moves:
697
698
698 C.next = D (node.prev.next = node.next)
699 C.next = D (node.prev.next = node.next)
699 D.prev = C (node.next.prev = node.prev)
700 D.prev = C (node.next.prev = node.prev)
700 E.next = N (head.prev.next = node)
701 E.next = N (head.prev.next = node)
701 N.prev = E (node.prev = head.prev)
702 N.prev = E (node.prev = head.prev)
702 N.next = A (node.next = head)
703 N.next = A (node.next = head)
703 A.prev = N (head.prev = node)
704 A.prev = N (head.prev = node)
704 """
705 """
705 head = self._head
706 head = self._head
706 # C.next = D
707 # C.next = D
707 node.prev.next = node.next
708 node.prev.next = node.next
708 # D.prev = C
709 # D.prev = C
709 node.next.prev = node.prev
710 node.next.prev = node.prev
710 # N.prev = E
711 # N.prev = E
711 node.prev = head.prev
712 node.prev = head.prev
712 # N.next = A
713 # N.next = A
713 # It is tempting to do just "head" here, however if node is
714 # It is tempting to do just "head" here, however if node is
714 # adjacent to head, this will do bad things.
715 # adjacent to head, this will do bad things.
715 node.next = head.prev.next
716 node.next = head.prev.next
716 # E.next = N
717 # E.next = N
717 node.next.prev = node
718 node.next.prev = node
718 # A.prev = N
719 # A.prev = N
719 node.prev.next = node
720 node.prev.next = node
720
721
721 self._head = node
722 self._head = node
722
723
723 def _addcapacity(self):
724 def _addcapacity(self):
724 """Add a node to the circular linked list.
725 """Add a node to the circular linked list.
725
726
726 The new node is inserted before the head node.
727 The new node is inserted before the head node.
727 """
728 """
728 head = self._head
729 head = self._head
729 node = _lrucachenode()
730 node = _lrucachenode()
730 head.prev.next = node
731 head.prev.next = node
731 node.prev = head.prev
732 node.prev = head.prev
732 node.next = head
733 node.next = head
733 head.prev = node
734 head.prev = node
734 self._size += 1
735 self._size += 1
735 return node
736 return node
736
737
737 def lrucachefunc(func):
738 def lrucachefunc(func):
738 '''cache most recent results of function calls'''
739 '''cache most recent results of function calls'''
739 cache = {}
740 cache = {}
740 order = collections.deque()
741 order = collections.deque()
741 if func.__code__.co_argcount == 1:
742 if func.__code__.co_argcount == 1:
742 def f(arg):
743 def f(arg):
743 if arg not in cache:
744 if arg not in cache:
744 if len(cache) > 20:
745 if len(cache) > 20:
745 del cache[order.popleft()]
746 del cache[order.popleft()]
746 cache[arg] = func(arg)
747 cache[arg] = func(arg)
747 else:
748 else:
748 order.remove(arg)
749 order.remove(arg)
749 order.append(arg)
750 order.append(arg)
750 return cache[arg]
751 return cache[arg]
751 else:
752 else:
752 def f(*args):
753 def f(*args):
753 if args not in cache:
754 if args not in cache:
754 if len(cache) > 20:
755 if len(cache) > 20:
755 del cache[order.popleft()]
756 del cache[order.popleft()]
756 cache[args] = func(*args)
757 cache[args] = func(*args)
757 else:
758 else:
758 order.remove(args)
759 order.remove(args)
759 order.append(args)
760 order.append(args)
760 return cache[args]
761 return cache[args]
761
762
762 return f
763 return f
763
764
764 class propertycache(object):
765 class propertycache(object):
765 def __init__(self, func):
766 def __init__(self, func):
766 self.func = func
767 self.func = func
767 self.name = func.__name__
768 self.name = func.__name__
768 def __get__(self, obj, type=None):
769 def __get__(self, obj, type=None):
769 result = self.func(obj)
770 result = self.func(obj)
770 self.cachevalue(obj, result)
771 self.cachevalue(obj, result)
771 return result
772 return result
772
773
773 def cachevalue(self, obj, value):
774 def cachevalue(self, obj, value):
774 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
775 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
775 obj.__dict__[self.name] = value
776 obj.__dict__[self.name] = value
776
777
777 def pipefilter(s, cmd):
778 def pipefilter(s, cmd):
778 '''filter string S through command CMD, returning its output'''
779 '''filter string S through command CMD, returning its output'''
779 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
780 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
780 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
781 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
781 pout, perr = p.communicate(s)
782 pout, perr = p.communicate(s)
782 return pout
783 return pout
783
784
784 def tempfilter(s, cmd):
785 def tempfilter(s, cmd):
785 '''filter string S through a pair of temporary files with CMD.
786 '''filter string S through a pair of temporary files with CMD.
786 CMD is used as a template to create the real command to be run,
787 CMD is used as a template to create the real command to be run,
787 with the strings INFILE and OUTFILE replaced by the real names of
788 with the strings INFILE and OUTFILE replaced by the real names of
788 the temporary files generated.'''
789 the temporary files generated.'''
789 inname, outname = None, None
790 inname, outname = None, None
790 try:
791 try:
791 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
792 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
792 fp = os.fdopen(infd, 'wb')
793 fp = os.fdopen(infd, 'wb')
793 fp.write(s)
794 fp.write(s)
794 fp.close()
795 fp.close()
795 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
796 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
796 os.close(outfd)
797 os.close(outfd)
797 cmd = cmd.replace('INFILE', inname)
798 cmd = cmd.replace('INFILE', inname)
798 cmd = cmd.replace('OUTFILE', outname)
799 cmd = cmd.replace('OUTFILE', outname)
799 code = os.system(cmd)
800 code = os.system(cmd)
800 if sys.platform == 'OpenVMS' and code & 1:
801 if sys.platform == 'OpenVMS' and code & 1:
801 code = 0
802 code = 0
802 if code:
803 if code:
803 raise Abort(_("command '%s' failed: %s") %
804 raise Abort(_("command '%s' failed: %s") %
804 (cmd, explainexit(code)))
805 (cmd, explainexit(code)))
805 return readfile(outname)
806 return readfile(outname)
806 finally:
807 finally:
807 try:
808 try:
808 if inname:
809 if inname:
809 os.unlink(inname)
810 os.unlink(inname)
810 except OSError:
811 except OSError:
811 pass
812 pass
812 try:
813 try:
813 if outname:
814 if outname:
814 os.unlink(outname)
815 os.unlink(outname)
815 except OSError:
816 except OSError:
816 pass
817 pass
817
818
818 filtertable = {
819 filtertable = {
819 'tempfile:': tempfilter,
820 'tempfile:': tempfilter,
820 'pipe:': pipefilter,
821 'pipe:': pipefilter,
821 }
822 }
822
823
823 def filter(s, cmd):
824 def filter(s, cmd):
824 "filter a string through a command that transforms its input to its output"
825 "filter a string through a command that transforms its input to its output"
825 for name, fn in filtertable.iteritems():
826 for name, fn in filtertable.iteritems():
826 if cmd.startswith(name):
827 if cmd.startswith(name):
827 return fn(s, cmd[len(name):].lstrip())
828 return fn(s, cmd[len(name):].lstrip())
828 return pipefilter(s, cmd)
829 return pipefilter(s, cmd)
829
830
830 def binary(s):
831 def binary(s):
831 """return true if a string is binary data"""
832 """return true if a string is binary data"""
832 return bool(s and '\0' in s)
833 return bool(s and '\0' in s)
833
834
834 def increasingchunks(source, min=1024, max=65536):
835 def increasingchunks(source, min=1024, max=65536):
835 '''return no less than min bytes per chunk while data remains,
836 '''return no less than min bytes per chunk while data remains,
836 doubling min after each chunk until it reaches max'''
837 doubling min after each chunk until it reaches max'''
837 def log2(x):
838 def log2(x):
838 if not x:
839 if not x:
839 return 0
840 return 0
840 i = 0
841 i = 0
841 while x:
842 while x:
842 x >>= 1
843 x >>= 1
843 i += 1
844 i += 1
844 return i - 1
845 return i - 1
845
846
846 buf = []
847 buf = []
847 blen = 0
848 blen = 0
848 for chunk in source:
849 for chunk in source:
849 buf.append(chunk)
850 buf.append(chunk)
850 blen += len(chunk)
851 blen += len(chunk)
851 if blen >= min:
852 if blen >= min:
852 if min < max:
853 if min < max:
853 min = min << 1
854 min = min << 1
854 nmin = 1 << log2(blen)
855 nmin = 1 << log2(blen)
855 if nmin > min:
856 if nmin > min:
856 min = nmin
857 min = nmin
857 if min > max:
858 if min > max:
858 min = max
859 min = max
859 yield ''.join(buf)
860 yield ''.join(buf)
860 blen = 0
861 blen = 0
861 buf = []
862 buf = []
862 if buf:
863 if buf:
863 yield ''.join(buf)
864 yield ''.join(buf)
864
865
865 Abort = error.Abort
866 Abort = error.Abort
866
867
867 def always(fn):
868 def always(fn):
868 return True
869 return True
869
870
870 def never(fn):
871 def never(fn):
871 return False
872 return False
872
873
873 def nogc(func):
874 def nogc(func):
874 """disable garbage collector
875 """disable garbage collector
875
876
876 Python's garbage collector triggers a GC each time a certain number of
877 Python's garbage collector triggers a GC each time a certain number of
877 container objects (the number being defined by gc.get_threshold()) are
878 container objects (the number being defined by gc.get_threshold()) are
878 allocated even when marked not to be tracked by the collector. Tracking has
879 allocated even when marked not to be tracked by the collector. Tracking has
879 no effect on when GCs are triggered, only on what objects the GC looks
880 no effect on when GCs are triggered, only on what objects the GC looks
880 into. As a workaround, disable GC while building complex (huge)
881 into. As a workaround, disable GC while building complex (huge)
881 containers.
882 containers.
882
883
883 This garbage collector issue have been fixed in 2.7.
884 This garbage collector issue have been fixed in 2.7.
884 """
885 """
885 if sys.version_info >= (2, 7):
886 if sys.version_info >= (2, 7):
886 return func
887 return func
887 def wrapper(*args, **kwargs):
888 def wrapper(*args, **kwargs):
888 gcenabled = gc.isenabled()
889 gcenabled = gc.isenabled()
889 gc.disable()
890 gc.disable()
890 try:
891 try:
891 return func(*args, **kwargs)
892 return func(*args, **kwargs)
892 finally:
893 finally:
893 if gcenabled:
894 if gcenabled:
894 gc.enable()
895 gc.enable()
895 return wrapper
896 return wrapper
896
897
897 def pathto(root, n1, n2):
898 def pathto(root, n1, n2):
898 '''return the relative path from one place to another.
899 '''return the relative path from one place to another.
899 root should use os.sep to separate directories
900 root should use os.sep to separate directories
900 n1 should use os.sep to separate directories
901 n1 should use os.sep to separate directories
901 n2 should use "/" to separate directories
902 n2 should use "/" to separate directories
902 returns an os.sep-separated path.
903 returns an os.sep-separated path.
903
904
904 If n1 is a relative path, it's assumed it's
905 If n1 is a relative path, it's assumed it's
905 relative to root.
906 relative to root.
906 n2 should always be relative to root.
907 n2 should always be relative to root.
907 '''
908 '''
908 if not n1:
909 if not n1:
909 return localpath(n2)
910 return localpath(n2)
910 if os.path.isabs(n1):
911 if os.path.isabs(n1):
911 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
912 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
912 return os.path.join(root, localpath(n2))
913 return os.path.join(root, localpath(n2))
913 n2 = '/'.join((pconvert(root), n2))
914 n2 = '/'.join((pconvert(root), n2))
914 a, b = splitpath(n1), n2.split('/')
915 a, b = splitpath(n1), n2.split('/')
915 a.reverse()
916 a.reverse()
916 b.reverse()
917 b.reverse()
917 while a and b and a[-1] == b[-1]:
918 while a and b and a[-1] == b[-1]:
918 a.pop()
919 a.pop()
919 b.pop()
920 b.pop()
920 b.reverse()
921 b.reverse()
921 return os.sep.join((['..'] * len(a)) + b) or '.'
922 return os.sep.join((['..'] * len(a)) + b) or '.'
922
923
923 def mainfrozen():
924 def mainfrozen():
924 """return True if we are a frozen executable.
925 """return True if we are a frozen executable.
925
926
926 The code supports py2exe (most common, Windows only) and tools/freeze
927 The code supports py2exe (most common, Windows only) and tools/freeze
927 (portable, not much used).
928 (portable, not much used).
928 """
929 """
929 return (safehasattr(sys, "frozen") or # new py2exe
930 return (safehasattr(sys, "frozen") or # new py2exe
930 safehasattr(sys, "importers") or # old py2exe
931 safehasattr(sys, "importers") or # old py2exe
931 imp.is_frozen(u"__main__")) # tools/freeze
932 imp.is_frozen(u"__main__")) # tools/freeze
932
933
933 # the location of data files matching the source code
934 # the location of data files matching the source code
934 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
935 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
935 # executable version (py2exe) doesn't support __file__
936 # executable version (py2exe) doesn't support __file__
936 datapath = os.path.dirname(sys.executable)
937 datapath = os.path.dirname(sys.executable)
937 else:
938 else:
938 datapath = os.path.dirname(__file__)
939 datapath = os.path.dirname(__file__)
939
940
940 i18n.setdatapath(datapath)
941 i18n.setdatapath(datapath)
941
942
942 _hgexecutable = None
943 _hgexecutable = None
943
944
944 def hgexecutable():
945 def hgexecutable():
945 """return location of the 'hg' executable.
946 """return location of the 'hg' executable.
946
947
947 Defaults to $HG or 'hg' in the search path.
948 Defaults to $HG or 'hg' in the search path.
948 """
949 """
949 if _hgexecutable is None:
950 if _hgexecutable is None:
950 hg = os.environ.get('HG')
951 hg = os.environ.get('HG')
951 mainmod = sys.modules['__main__']
952 mainmod = sys.modules['__main__']
952 if hg:
953 if hg:
953 _sethgexecutable(hg)
954 _sethgexecutable(hg)
954 elif mainfrozen():
955 elif mainfrozen():
955 if getattr(sys, 'frozen', None) == 'macosx_app':
956 if getattr(sys, 'frozen', None) == 'macosx_app':
956 # Env variable set by py2app
957 # Env variable set by py2app
957 _sethgexecutable(os.environ['EXECUTABLEPATH'])
958 _sethgexecutable(os.environ['EXECUTABLEPATH'])
958 else:
959 else:
959 _sethgexecutable(sys.executable)
960 _sethgexecutable(sys.executable)
960 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
961 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
961 _sethgexecutable(mainmod.__file__)
962 _sethgexecutable(mainmod.__file__)
962 else:
963 else:
963 exe = findexe('hg') or os.path.basename(sys.argv[0])
964 exe = findexe('hg') or os.path.basename(sys.argv[0])
964 _sethgexecutable(exe)
965 _sethgexecutable(exe)
965 return _hgexecutable
966 return _hgexecutable
966
967
967 def _sethgexecutable(path):
968 def _sethgexecutable(path):
968 """set location of the 'hg' executable"""
969 """set location of the 'hg' executable"""
969 global _hgexecutable
970 global _hgexecutable
970 _hgexecutable = path
971 _hgexecutable = path
971
972
972 def _isstdout(f):
973 def _isstdout(f):
973 fileno = getattr(f, 'fileno', None)
974 fileno = getattr(f, 'fileno', None)
974 return fileno and fileno() == sys.__stdout__.fileno()
975 return fileno and fileno() == sys.__stdout__.fileno()
975
976
976 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
977 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
977 '''enhanced shell command execution.
978 '''enhanced shell command execution.
978 run with environment maybe modified, maybe in different dir.
979 run with environment maybe modified, maybe in different dir.
979
980
980 if command fails and onerr is None, return status, else raise onerr
981 if command fails and onerr is None, return status, else raise onerr
981 object as exception.
982 object as exception.
982
983
983 if out is specified, it is assumed to be a file-like object that has a
984 if out is specified, it is assumed to be a file-like object that has a
984 write() method. stdout and stderr will be redirected to out.'''
985 write() method. stdout and stderr will be redirected to out.'''
985 if environ is None:
986 if environ is None:
986 environ = {}
987 environ = {}
987 try:
988 try:
988 sys.stdout.flush()
989 sys.stdout.flush()
989 except Exception:
990 except Exception:
990 pass
991 pass
991 def py2shell(val):
992 def py2shell(val):
992 'convert python object into string that is useful to shell'
993 'convert python object into string that is useful to shell'
993 if val is None or val is False:
994 if val is None or val is False:
994 return '0'
995 return '0'
995 if val is True:
996 if val is True:
996 return '1'
997 return '1'
997 return str(val)
998 return str(val)
998 origcmd = cmd
999 origcmd = cmd
999 cmd = quotecommand(cmd)
1000 cmd = quotecommand(cmd)
1000 if sys.platform == 'plan9' and (sys.version_info[0] == 2
1001 if sys.platform == 'plan9' and (sys.version_info[0] == 2
1001 and sys.version_info[1] < 7):
1002 and sys.version_info[1] < 7):
1002 # subprocess kludge to work around issues in half-baked Python
1003 # subprocess kludge to work around issues in half-baked Python
1003 # ports, notably bichued/python:
1004 # ports, notably bichued/python:
1004 if not cwd is None:
1005 if not cwd is None:
1005 os.chdir(cwd)
1006 os.chdir(cwd)
1006 rc = os.system(cmd)
1007 rc = os.system(cmd)
1007 else:
1008 else:
1008 env = dict(os.environ)
1009 env = dict(os.environ)
1009 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1010 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1010 env['HG'] = hgexecutable()
1011 env['HG'] = hgexecutable()
1011 if out is None or _isstdout(out):
1012 if out is None or _isstdout(out):
1012 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1013 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1013 env=env, cwd=cwd)
1014 env=env, cwd=cwd)
1014 else:
1015 else:
1015 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1016 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1016 env=env, cwd=cwd, stdout=subprocess.PIPE,
1017 env=env, cwd=cwd, stdout=subprocess.PIPE,
1017 stderr=subprocess.STDOUT)
1018 stderr=subprocess.STDOUT)
1018 for line in iter(proc.stdout.readline, ''):
1019 for line in iter(proc.stdout.readline, ''):
1019 out.write(line)
1020 out.write(line)
1020 proc.wait()
1021 proc.wait()
1021 rc = proc.returncode
1022 rc = proc.returncode
1022 if sys.platform == 'OpenVMS' and rc & 1:
1023 if sys.platform == 'OpenVMS' and rc & 1:
1023 rc = 0
1024 rc = 0
1024 if rc and onerr:
1025 if rc and onerr:
1025 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
1026 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
1026 explainexit(rc)[0])
1027 explainexit(rc)[0])
1027 if errprefix:
1028 if errprefix:
1028 errmsg = '%s: %s' % (errprefix, errmsg)
1029 errmsg = '%s: %s' % (errprefix, errmsg)
1029 raise onerr(errmsg)
1030 raise onerr(errmsg)
1030 return rc
1031 return rc
1031
1032
1032 def checksignature(func):
1033 def checksignature(func):
1033 '''wrap a function with code to check for calling errors'''
1034 '''wrap a function with code to check for calling errors'''
1034 def check(*args, **kwargs):
1035 def check(*args, **kwargs):
1035 try:
1036 try:
1036 return func(*args, **kwargs)
1037 return func(*args, **kwargs)
1037 except TypeError:
1038 except TypeError:
1038 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1039 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1039 raise error.SignatureError
1040 raise error.SignatureError
1040 raise
1041 raise
1041
1042
1042 return check
1043 return check
1043
1044
1044 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1045 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1045 '''copy a file, preserving mode and optionally other stat info like
1046 '''copy a file, preserving mode and optionally other stat info like
1046 atime/mtime
1047 atime/mtime
1047
1048
1048 checkambig argument is used with filestat, and is useful only if
1049 checkambig argument is used with filestat, and is useful only if
1049 destination file is guarded by any lock (e.g. repo.lock or
1050 destination file is guarded by any lock (e.g. repo.lock or
1050 repo.wlock).
1051 repo.wlock).
1051
1052
1052 copystat and checkambig should be exclusive.
1053 copystat and checkambig should be exclusive.
1053 '''
1054 '''
1054 assert not (copystat and checkambig)
1055 assert not (copystat and checkambig)
1055 oldstat = None
1056 oldstat = None
1056 if os.path.lexists(dest):
1057 if os.path.lexists(dest):
1057 if checkambig:
1058 if checkambig:
1058 oldstat = checkambig and filestat(dest)
1059 oldstat = checkambig and filestat(dest)
1059 unlink(dest)
1060 unlink(dest)
1060 # hardlinks are problematic on CIFS, quietly ignore this flag
1061 # hardlinks are problematic on CIFS, quietly ignore this flag
1061 # until we find a way to work around it cleanly (issue4546)
1062 # until we find a way to work around it cleanly (issue4546)
1062 if False and hardlink:
1063 if False and hardlink:
1063 try:
1064 try:
1064 oslink(src, dest)
1065 oslink(src, dest)
1065 return
1066 return
1066 except (IOError, OSError):
1067 except (IOError, OSError):
1067 pass # fall back to normal copy
1068 pass # fall back to normal copy
1068 if os.path.islink(src):
1069 if os.path.islink(src):
1069 os.symlink(os.readlink(src), dest)
1070 os.symlink(os.readlink(src), dest)
1070 # copytime is ignored for symlinks, but in general copytime isn't needed
1071 # copytime is ignored for symlinks, but in general copytime isn't needed
1071 # for them anyway
1072 # for them anyway
1072 else:
1073 else:
1073 try:
1074 try:
1074 shutil.copyfile(src, dest)
1075 shutil.copyfile(src, dest)
1075 if copystat:
1076 if copystat:
1076 # copystat also copies mode
1077 # copystat also copies mode
1077 shutil.copystat(src, dest)
1078 shutil.copystat(src, dest)
1078 else:
1079 else:
1079 shutil.copymode(src, dest)
1080 shutil.copymode(src, dest)
1080 if oldstat and oldstat.stat:
1081 if oldstat and oldstat.stat:
1081 newstat = filestat(dest)
1082 newstat = filestat(dest)
1082 if newstat.isambig(oldstat):
1083 if newstat.isambig(oldstat):
1083 # stat of copied file is ambiguous to original one
1084 # stat of copied file is ambiguous to original one
1084 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1085 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1085 os.utime(dest, (advanced, advanced))
1086 os.utime(dest, (advanced, advanced))
1086 except shutil.Error as inst:
1087 except shutil.Error as inst:
1087 raise Abort(str(inst))
1088 raise Abort(str(inst))
1088
1089
1089 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1090 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1090 """Copy a directory tree using hardlinks if possible."""
1091 """Copy a directory tree using hardlinks if possible."""
1091 num = 0
1092 num = 0
1092
1093
1093 if hardlink is None:
1094 if hardlink is None:
1094 hardlink = (os.stat(src).st_dev ==
1095 hardlink = (os.stat(src).st_dev ==
1095 os.stat(os.path.dirname(dst)).st_dev)
1096 os.stat(os.path.dirname(dst)).st_dev)
1096 if hardlink:
1097 if hardlink:
1097 topic = _('linking')
1098 topic = _('linking')
1098 else:
1099 else:
1099 topic = _('copying')
1100 topic = _('copying')
1100
1101
1101 if os.path.isdir(src):
1102 if os.path.isdir(src):
1102 os.mkdir(dst)
1103 os.mkdir(dst)
1103 for name, kind in osutil.listdir(src):
1104 for name, kind in osutil.listdir(src):
1104 srcname = os.path.join(src, name)
1105 srcname = os.path.join(src, name)
1105 dstname = os.path.join(dst, name)
1106 dstname = os.path.join(dst, name)
1106 def nprog(t, pos):
1107 def nprog(t, pos):
1107 if pos is not None:
1108 if pos is not None:
1108 return progress(t, pos + num)
1109 return progress(t, pos + num)
1109 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1110 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1110 num += n
1111 num += n
1111 else:
1112 else:
1112 if hardlink:
1113 if hardlink:
1113 try:
1114 try:
1114 oslink(src, dst)
1115 oslink(src, dst)
1115 except (IOError, OSError):
1116 except (IOError, OSError):
1116 hardlink = False
1117 hardlink = False
1117 shutil.copy(src, dst)
1118 shutil.copy(src, dst)
1118 else:
1119 else:
1119 shutil.copy(src, dst)
1120 shutil.copy(src, dst)
1120 num += 1
1121 num += 1
1121 progress(topic, num)
1122 progress(topic, num)
1122 progress(topic, None)
1123 progress(topic, None)
1123
1124
1124 return hardlink, num
1125 return hardlink, num
1125
1126
1126 _winreservednames = '''con prn aux nul
1127 _winreservednames = '''con prn aux nul
1127 com1 com2 com3 com4 com5 com6 com7 com8 com9
1128 com1 com2 com3 com4 com5 com6 com7 com8 com9
1128 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1129 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1129 _winreservedchars = ':*?"<>|'
1130 _winreservedchars = ':*?"<>|'
1130 def checkwinfilename(path):
1131 def checkwinfilename(path):
1131 r'''Check that the base-relative path is a valid filename on Windows.
1132 r'''Check that the base-relative path is a valid filename on Windows.
1132 Returns None if the path is ok, or a UI string describing the problem.
1133 Returns None if the path is ok, or a UI string describing the problem.
1133
1134
1134 >>> checkwinfilename("just/a/normal/path")
1135 >>> checkwinfilename("just/a/normal/path")
1135 >>> checkwinfilename("foo/bar/con.xml")
1136 >>> checkwinfilename("foo/bar/con.xml")
1136 "filename contains 'con', which is reserved on Windows"
1137 "filename contains 'con', which is reserved on Windows"
1137 >>> checkwinfilename("foo/con.xml/bar")
1138 >>> checkwinfilename("foo/con.xml/bar")
1138 "filename contains 'con', which is reserved on Windows"
1139 "filename contains 'con', which is reserved on Windows"
1139 >>> checkwinfilename("foo/bar/xml.con")
1140 >>> checkwinfilename("foo/bar/xml.con")
1140 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1141 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1141 "filename contains 'AUX', which is reserved on Windows"
1142 "filename contains 'AUX', which is reserved on Windows"
1142 >>> checkwinfilename("foo/bar/bla:.txt")
1143 >>> checkwinfilename("foo/bar/bla:.txt")
1143 "filename contains ':', which is reserved on Windows"
1144 "filename contains ':', which is reserved on Windows"
1144 >>> checkwinfilename("foo/bar/b\07la.txt")
1145 >>> checkwinfilename("foo/bar/b\07la.txt")
1145 "filename contains '\\x07', which is invalid on Windows"
1146 "filename contains '\\x07', which is invalid on Windows"
1146 >>> checkwinfilename("foo/bar/bla ")
1147 >>> checkwinfilename("foo/bar/bla ")
1147 "filename ends with ' ', which is not allowed on Windows"
1148 "filename ends with ' ', which is not allowed on Windows"
1148 >>> checkwinfilename("../bar")
1149 >>> checkwinfilename("../bar")
1149 >>> checkwinfilename("foo\\")
1150 >>> checkwinfilename("foo\\")
1150 "filename ends with '\\', which is invalid on Windows"
1151 "filename ends with '\\', which is invalid on Windows"
1151 >>> checkwinfilename("foo\\/bar")
1152 >>> checkwinfilename("foo\\/bar")
1152 "directory name ends with '\\', which is invalid on Windows"
1153 "directory name ends with '\\', which is invalid on Windows"
1153 '''
1154 '''
1154 if path.endswith('\\'):
1155 if path.endswith('\\'):
1155 return _("filename ends with '\\', which is invalid on Windows")
1156 return _("filename ends with '\\', which is invalid on Windows")
1156 if '\\/' in path:
1157 if '\\/' in path:
1157 return _("directory name ends with '\\', which is invalid on Windows")
1158 return _("directory name ends with '\\', which is invalid on Windows")
1158 for n in path.replace('\\', '/').split('/'):
1159 for n in path.replace('\\', '/').split('/'):
1159 if not n:
1160 if not n:
1160 continue
1161 continue
1161 for c in n:
1162 for c in n:
1162 if c in _winreservedchars:
1163 if c in _winreservedchars:
1163 return _("filename contains '%s', which is reserved "
1164 return _("filename contains '%s', which is reserved "
1164 "on Windows") % c
1165 "on Windows") % c
1165 if ord(c) <= 31:
1166 if ord(c) <= 31:
1166 return _("filename contains %r, which is invalid "
1167 return _("filename contains %r, which is invalid "
1167 "on Windows") % c
1168 "on Windows") % c
1168 base = n.split('.')[0]
1169 base = n.split('.')[0]
1169 if base and base.lower() in _winreservednames:
1170 if base and base.lower() in _winreservednames:
1170 return _("filename contains '%s', which is reserved "
1171 return _("filename contains '%s', which is reserved "
1171 "on Windows") % base
1172 "on Windows") % base
1172 t = n[-1]
1173 t = n[-1]
1173 if t in '. ' and n not in '..':
1174 if t in '. ' and n not in '..':
1174 return _("filename ends with '%s', which is not allowed "
1175 return _("filename ends with '%s', which is not allowed "
1175 "on Windows") % t
1176 "on Windows") % t
1176
1177
1177 if os.name == 'nt':
1178 if os.name == 'nt':
1178 checkosfilename = checkwinfilename
1179 checkosfilename = checkwinfilename
1179 else:
1180 else:
1180 checkosfilename = platform.checkosfilename
1181 checkosfilename = platform.checkosfilename
1181
1182
1182 def makelock(info, pathname):
1183 def makelock(info, pathname):
1183 try:
1184 try:
1184 return os.symlink(info, pathname)
1185 return os.symlink(info, pathname)
1185 except OSError as why:
1186 except OSError as why:
1186 if why.errno == errno.EEXIST:
1187 if why.errno == errno.EEXIST:
1187 raise
1188 raise
1188 except AttributeError: # no symlink in os
1189 except AttributeError: # no symlink in os
1189 pass
1190 pass
1190
1191
1191 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1192 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1192 os.write(ld, info)
1193 os.write(ld, info)
1193 os.close(ld)
1194 os.close(ld)
1194
1195
1195 def readlock(pathname):
1196 def readlock(pathname):
1196 try:
1197 try:
1197 return os.readlink(pathname)
1198 return os.readlink(pathname)
1198 except OSError as why:
1199 except OSError as why:
1199 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1200 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1200 raise
1201 raise
1201 except AttributeError: # no symlink in os
1202 except AttributeError: # no symlink in os
1202 pass
1203 pass
1203 fp = posixfile(pathname)
1204 fp = posixfile(pathname)
1204 r = fp.read()
1205 r = fp.read()
1205 fp.close()
1206 fp.close()
1206 return r
1207 return r
1207
1208
1208 def fstat(fp):
1209 def fstat(fp):
1209 '''stat file object that may not have fileno method.'''
1210 '''stat file object that may not have fileno method.'''
1210 try:
1211 try:
1211 return os.fstat(fp.fileno())
1212 return os.fstat(fp.fileno())
1212 except AttributeError:
1213 except AttributeError:
1213 return os.stat(fp.name)
1214 return os.stat(fp.name)
1214
1215
1215 # File system features
1216 # File system features
1216
1217
1217 def fscasesensitive(path):
1218 def fscasesensitive(path):
1218 """
1219 """
1219 Return true if the given path is on a case-sensitive filesystem
1220 Return true if the given path is on a case-sensitive filesystem
1220
1221
1221 Requires a path (like /foo/.hg) ending with a foldable final
1222 Requires a path (like /foo/.hg) ending with a foldable final
1222 directory component.
1223 directory component.
1223 """
1224 """
1224 s1 = os.lstat(path)
1225 s1 = os.lstat(path)
1225 d, b = os.path.split(path)
1226 d, b = os.path.split(path)
1226 b2 = b.upper()
1227 b2 = b.upper()
1227 if b == b2:
1228 if b == b2:
1228 b2 = b.lower()
1229 b2 = b.lower()
1229 if b == b2:
1230 if b == b2:
1230 return True # no evidence against case sensitivity
1231 return True # no evidence against case sensitivity
1231 p2 = os.path.join(d, b2)
1232 p2 = os.path.join(d, b2)
1232 try:
1233 try:
1233 s2 = os.lstat(p2)
1234 s2 = os.lstat(p2)
1234 if s2 == s1:
1235 if s2 == s1:
1235 return False
1236 return False
1236 return True
1237 return True
1237 except OSError:
1238 except OSError:
1238 return True
1239 return True
1239
1240
1240 try:
1241 try:
1241 import re2
1242 import re2
1242 _re2 = None
1243 _re2 = None
1243 except ImportError:
1244 except ImportError:
1244 _re2 = False
1245 _re2 = False
1245
1246
1246 class _re(object):
1247 class _re(object):
1247 def _checkre2(self):
1248 def _checkre2(self):
1248 global _re2
1249 global _re2
1249 try:
1250 try:
1250 # check if match works, see issue3964
1251 # check if match works, see issue3964
1251 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1252 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1252 except ImportError:
1253 except ImportError:
1253 _re2 = False
1254 _re2 = False
1254
1255
1255 def compile(self, pat, flags=0):
1256 def compile(self, pat, flags=0):
1256 '''Compile a regular expression, using re2 if possible
1257 '''Compile a regular expression, using re2 if possible
1257
1258
1258 For best performance, use only re2-compatible regexp features. The
1259 For best performance, use only re2-compatible regexp features. The
1259 only flags from the re module that are re2-compatible are
1260 only flags from the re module that are re2-compatible are
1260 IGNORECASE and MULTILINE.'''
1261 IGNORECASE and MULTILINE.'''
1261 if _re2 is None:
1262 if _re2 is None:
1262 self._checkre2()
1263 self._checkre2()
1263 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1264 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1264 if flags & remod.IGNORECASE:
1265 if flags & remod.IGNORECASE:
1265 pat = '(?i)' + pat
1266 pat = '(?i)' + pat
1266 if flags & remod.MULTILINE:
1267 if flags & remod.MULTILINE:
1267 pat = '(?m)' + pat
1268 pat = '(?m)' + pat
1268 try:
1269 try:
1269 return re2.compile(pat)
1270 return re2.compile(pat)
1270 except re2.error:
1271 except re2.error:
1271 pass
1272 pass
1272 return remod.compile(pat, flags)
1273 return remod.compile(pat, flags)
1273
1274
1274 @propertycache
1275 @propertycache
1275 def escape(self):
1276 def escape(self):
1276 '''Return the version of escape corresponding to self.compile.
1277 '''Return the version of escape corresponding to self.compile.
1277
1278
1278 This is imperfect because whether re2 or re is used for a particular
1279 This is imperfect because whether re2 or re is used for a particular
1279 function depends on the flags, etc, but it's the best we can do.
1280 function depends on the flags, etc, but it's the best we can do.
1280 '''
1281 '''
1281 global _re2
1282 global _re2
1282 if _re2 is None:
1283 if _re2 is None:
1283 self._checkre2()
1284 self._checkre2()
1284 if _re2:
1285 if _re2:
1285 return re2.escape
1286 return re2.escape
1286 else:
1287 else:
1287 return remod.escape
1288 return remod.escape
1288
1289
1289 re = _re()
1290 re = _re()
1290
1291
1291 _fspathcache = {}
1292 _fspathcache = {}
1292 def fspath(name, root):
1293 def fspath(name, root):
1293 '''Get name in the case stored in the filesystem
1294 '''Get name in the case stored in the filesystem
1294
1295
1295 The name should be relative to root, and be normcase-ed for efficiency.
1296 The name should be relative to root, and be normcase-ed for efficiency.
1296
1297
1297 Note that this function is unnecessary, and should not be
1298 Note that this function is unnecessary, and should not be
1298 called, for case-sensitive filesystems (simply because it's expensive).
1299 called, for case-sensitive filesystems (simply because it's expensive).
1299
1300
1300 The root should be normcase-ed, too.
1301 The root should be normcase-ed, too.
1301 '''
1302 '''
1302 def _makefspathcacheentry(dir):
1303 def _makefspathcacheentry(dir):
1303 return dict((normcase(n), n) for n in os.listdir(dir))
1304 return dict((normcase(n), n) for n in os.listdir(dir))
1304
1305
1305 seps = os.sep
1306 seps = os.sep
1306 if os.altsep:
1307 if os.altsep:
1307 seps = seps + os.altsep
1308 seps = seps + os.altsep
1308 # Protect backslashes. This gets silly very quickly.
1309 # Protect backslashes. This gets silly very quickly.
1309 seps.replace('\\','\\\\')
1310 seps.replace('\\','\\\\')
1310 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1311 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1311 dir = os.path.normpath(root)
1312 dir = os.path.normpath(root)
1312 result = []
1313 result = []
1313 for part, sep in pattern.findall(name):
1314 for part, sep in pattern.findall(name):
1314 if sep:
1315 if sep:
1315 result.append(sep)
1316 result.append(sep)
1316 continue
1317 continue
1317
1318
1318 if dir not in _fspathcache:
1319 if dir not in _fspathcache:
1319 _fspathcache[dir] = _makefspathcacheentry(dir)
1320 _fspathcache[dir] = _makefspathcacheentry(dir)
1320 contents = _fspathcache[dir]
1321 contents = _fspathcache[dir]
1321
1322
1322 found = contents.get(part)
1323 found = contents.get(part)
1323 if not found:
1324 if not found:
1324 # retry "once per directory" per "dirstate.walk" which
1325 # retry "once per directory" per "dirstate.walk" which
1325 # may take place for each patches of "hg qpush", for example
1326 # may take place for each patches of "hg qpush", for example
1326 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1327 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1327 found = contents.get(part)
1328 found = contents.get(part)
1328
1329
1329 result.append(found or part)
1330 result.append(found or part)
1330 dir = os.path.join(dir, part)
1331 dir = os.path.join(dir, part)
1331
1332
1332 return ''.join(result)
1333 return ''.join(result)
1333
1334
1334 def checknlink(testfile):
1335 def checknlink(testfile):
1335 '''check whether hardlink count reporting works properly'''
1336 '''check whether hardlink count reporting works properly'''
1336
1337
1337 # testfile may be open, so we need a separate file for checking to
1338 # testfile may be open, so we need a separate file for checking to
1338 # work around issue2543 (or testfile may get lost on Samba shares)
1339 # work around issue2543 (or testfile may get lost on Samba shares)
1339 f1 = testfile + ".hgtmp1"
1340 f1 = testfile + ".hgtmp1"
1340 if os.path.lexists(f1):
1341 if os.path.lexists(f1):
1341 return False
1342 return False
1342 try:
1343 try:
1343 posixfile(f1, 'w').close()
1344 posixfile(f1, 'w').close()
1344 except IOError:
1345 except IOError:
1345 try:
1346 try:
1346 os.unlink(f1)
1347 os.unlink(f1)
1347 except OSError:
1348 except OSError:
1348 pass
1349 pass
1349 return False
1350 return False
1350
1351
1351 f2 = testfile + ".hgtmp2"
1352 f2 = testfile + ".hgtmp2"
1352 fd = None
1353 fd = None
1353 try:
1354 try:
1354 oslink(f1, f2)
1355 oslink(f1, f2)
1355 # nlinks() may behave differently for files on Windows shares if
1356 # nlinks() may behave differently for files on Windows shares if
1356 # the file is open.
1357 # the file is open.
1357 fd = posixfile(f2)
1358 fd = posixfile(f2)
1358 return nlinks(f2) > 1
1359 return nlinks(f2) > 1
1359 except OSError:
1360 except OSError:
1360 return False
1361 return False
1361 finally:
1362 finally:
1362 if fd is not None:
1363 if fd is not None:
1363 fd.close()
1364 fd.close()
1364 for f in (f1, f2):
1365 for f in (f1, f2):
1365 try:
1366 try:
1366 os.unlink(f)
1367 os.unlink(f)
1367 except OSError:
1368 except OSError:
1368 pass
1369 pass
1369
1370
1370 def endswithsep(path):
1371 def endswithsep(path):
1371 '''Check path ends with os.sep or os.altsep.'''
1372 '''Check path ends with os.sep or os.altsep.'''
1372 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1373 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1373
1374
1374 def splitpath(path):
1375 def splitpath(path):
1375 '''Split path by os.sep.
1376 '''Split path by os.sep.
1376 Note that this function does not use os.altsep because this is
1377 Note that this function does not use os.altsep because this is
1377 an alternative of simple "xxx.split(os.sep)".
1378 an alternative of simple "xxx.split(os.sep)".
1378 It is recommended to use os.path.normpath() before using this
1379 It is recommended to use os.path.normpath() before using this
1379 function if need.'''
1380 function if need.'''
1380 return path.split(os.sep)
1381 return path.split(os.sep)
1381
1382
1382 def gui():
1383 def gui():
1383 '''Are we running in a GUI?'''
1384 '''Are we running in a GUI?'''
1384 if sys.platform == 'darwin':
1385 if sys.platform == 'darwin':
1385 if 'SSH_CONNECTION' in os.environ:
1386 if 'SSH_CONNECTION' in os.environ:
1386 # handle SSH access to a box where the user is logged in
1387 # handle SSH access to a box where the user is logged in
1387 return False
1388 return False
1388 elif getattr(osutil, 'isgui', None):
1389 elif getattr(osutil, 'isgui', None):
1389 # check if a CoreGraphics session is available
1390 # check if a CoreGraphics session is available
1390 return osutil.isgui()
1391 return osutil.isgui()
1391 else:
1392 else:
1392 # pure build; use a safe default
1393 # pure build; use a safe default
1393 return True
1394 return True
1394 else:
1395 else:
1395 return os.name == "nt" or os.environ.get("DISPLAY")
1396 return os.name == "nt" or os.environ.get("DISPLAY")
1396
1397
1397 def mktempcopy(name, emptyok=False, createmode=None):
1398 def mktempcopy(name, emptyok=False, createmode=None):
1398 """Create a temporary file with the same contents from name
1399 """Create a temporary file with the same contents from name
1399
1400
1400 The permission bits are copied from the original file.
1401 The permission bits are copied from the original file.
1401
1402
1402 If the temporary file is going to be truncated immediately, you
1403 If the temporary file is going to be truncated immediately, you
1403 can use emptyok=True as an optimization.
1404 can use emptyok=True as an optimization.
1404
1405
1405 Returns the name of the temporary file.
1406 Returns the name of the temporary file.
1406 """
1407 """
1407 d, fn = os.path.split(name)
1408 d, fn = os.path.split(name)
1408 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1409 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1409 os.close(fd)
1410 os.close(fd)
1410 # Temporary files are created with mode 0600, which is usually not
1411 # Temporary files are created with mode 0600, which is usually not
1411 # what we want. If the original file already exists, just copy
1412 # what we want. If the original file already exists, just copy
1412 # its mode. Otherwise, manually obey umask.
1413 # its mode. Otherwise, manually obey umask.
1413 copymode(name, temp, createmode)
1414 copymode(name, temp, createmode)
1414 if emptyok:
1415 if emptyok:
1415 return temp
1416 return temp
1416 try:
1417 try:
1417 try:
1418 try:
1418 ifp = posixfile(name, "rb")
1419 ifp = posixfile(name, "rb")
1419 except IOError as inst:
1420 except IOError as inst:
1420 if inst.errno == errno.ENOENT:
1421 if inst.errno == errno.ENOENT:
1421 return temp
1422 return temp
1422 if not getattr(inst, 'filename', None):
1423 if not getattr(inst, 'filename', None):
1423 inst.filename = name
1424 inst.filename = name
1424 raise
1425 raise
1425 ofp = posixfile(temp, "wb")
1426 ofp = posixfile(temp, "wb")
1426 for chunk in filechunkiter(ifp):
1427 for chunk in filechunkiter(ifp):
1427 ofp.write(chunk)
1428 ofp.write(chunk)
1428 ifp.close()
1429 ifp.close()
1429 ofp.close()
1430 ofp.close()
1430 except: # re-raises
1431 except: # re-raises
1431 try: os.unlink(temp)
1432 try: os.unlink(temp)
1432 except OSError: pass
1433 except OSError: pass
1433 raise
1434 raise
1434 return temp
1435 return temp
1435
1436
1436 class filestat(object):
1437 class filestat(object):
1437 """help to exactly detect change of a file
1438 """help to exactly detect change of a file
1438
1439
1439 'stat' attribute is result of 'os.stat()' if specified 'path'
1440 'stat' attribute is result of 'os.stat()' if specified 'path'
1440 exists. Otherwise, it is None. This can avoid preparative
1441 exists. Otherwise, it is None. This can avoid preparative
1441 'exists()' examination on client side of this class.
1442 'exists()' examination on client side of this class.
1442 """
1443 """
1443 def __init__(self, path):
1444 def __init__(self, path):
1444 try:
1445 try:
1445 self.stat = os.stat(path)
1446 self.stat = os.stat(path)
1446 except OSError as err:
1447 except OSError as err:
1447 if err.errno != errno.ENOENT:
1448 if err.errno != errno.ENOENT:
1448 raise
1449 raise
1449 self.stat = None
1450 self.stat = None
1450
1451
1451 __hash__ = object.__hash__
1452 __hash__ = object.__hash__
1452
1453
1453 def __eq__(self, old):
1454 def __eq__(self, old):
1454 try:
1455 try:
1455 # if ambiguity between stat of new and old file is
1456 # if ambiguity between stat of new and old file is
1456 # avoided, comparision of size, ctime and mtime is enough
1457 # avoided, comparision of size, ctime and mtime is enough
1457 # to exactly detect change of a file regardless of platform
1458 # to exactly detect change of a file regardless of platform
1458 return (self.stat.st_size == old.stat.st_size and
1459 return (self.stat.st_size == old.stat.st_size and
1459 self.stat.st_ctime == old.stat.st_ctime and
1460 self.stat.st_ctime == old.stat.st_ctime and
1460 self.stat.st_mtime == old.stat.st_mtime)
1461 self.stat.st_mtime == old.stat.st_mtime)
1461 except AttributeError:
1462 except AttributeError:
1462 return False
1463 return False
1463
1464
1464 def isambig(self, old):
1465 def isambig(self, old):
1465 """Examine whether new (= self) stat is ambiguous against old one
1466 """Examine whether new (= self) stat is ambiguous against old one
1466
1467
1467 "S[N]" below means stat of a file at N-th change:
1468 "S[N]" below means stat of a file at N-th change:
1468
1469
1469 - S[n-1].ctime < S[n].ctime: can detect change of a file
1470 - S[n-1].ctime < S[n].ctime: can detect change of a file
1470 - S[n-1].ctime == S[n].ctime
1471 - S[n-1].ctime == S[n].ctime
1471 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1472 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1472 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1473 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1473 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1474 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1474 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1475 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1475
1476
1476 Case (*2) above means that a file was changed twice or more at
1477 Case (*2) above means that a file was changed twice or more at
1477 same time in sec (= S[n-1].ctime), and comparison of timestamp
1478 same time in sec (= S[n-1].ctime), and comparison of timestamp
1478 is ambiguous.
1479 is ambiguous.
1479
1480
1480 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1481 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1481 timestamp is ambiguous".
1482 timestamp is ambiguous".
1482
1483
1483 But advancing mtime only in case (*2) doesn't work as
1484 But advancing mtime only in case (*2) doesn't work as
1484 expected, because naturally advanced S[n].mtime in case (*1)
1485 expected, because naturally advanced S[n].mtime in case (*1)
1485 might be equal to manually advanced S[n-1 or earlier].mtime.
1486 might be equal to manually advanced S[n-1 or earlier].mtime.
1486
1487
1487 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1488 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1488 treated as ambiguous regardless of mtime, to avoid overlooking
1489 treated as ambiguous regardless of mtime, to avoid overlooking
1489 by confliction between such mtime.
1490 by confliction between such mtime.
1490
1491
1491 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1492 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1492 S[n].mtime", even if size of a file isn't changed.
1493 S[n].mtime", even if size of a file isn't changed.
1493 """
1494 """
1494 try:
1495 try:
1495 return (self.stat.st_ctime == old.stat.st_ctime)
1496 return (self.stat.st_ctime == old.stat.st_ctime)
1496 except AttributeError:
1497 except AttributeError:
1497 return False
1498 return False
1498
1499
1499 def __ne__(self, other):
1500 def __ne__(self, other):
1500 return not self == other
1501 return not self == other
1501
1502
1502 class atomictempfile(object):
1503 class atomictempfile(object):
1503 '''writable file object that atomically updates a file
1504 '''writable file object that atomically updates a file
1504
1505
1505 All writes will go to a temporary copy of the original file. Call
1506 All writes will go to a temporary copy of the original file. Call
1506 close() when you are done writing, and atomictempfile will rename
1507 close() when you are done writing, and atomictempfile will rename
1507 the temporary copy to the original name, making the changes
1508 the temporary copy to the original name, making the changes
1508 visible. If the object is destroyed without being closed, all your
1509 visible. If the object is destroyed without being closed, all your
1509 writes are discarded.
1510 writes are discarded.
1510
1511
1511 checkambig argument of constructor is used with filestat, and is
1512 checkambig argument of constructor is used with filestat, and is
1512 useful only if target file is guarded by any lock (e.g. repo.lock
1513 useful only if target file is guarded by any lock (e.g. repo.lock
1513 or repo.wlock).
1514 or repo.wlock).
1514 '''
1515 '''
1515 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1516 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1516 self.__name = name # permanent name
1517 self.__name = name # permanent name
1517 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1518 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1518 createmode=createmode)
1519 createmode=createmode)
1519 self._fp = posixfile(self._tempname, mode)
1520 self._fp = posixfile(self._tempname, mode)
1520 self._checkambig = checkambig
1521 self._checkambig = checkambig
1521
1522
1522 # delegated methods
1523 # delegated methods
1523 self.read = self._fp.read
1524 self.read = self._fp.read
1524 self.write = self._fp.write
1525 self.write = self._fp.write
1525 self.seek = self._fp.seek
1526 self.seek = self._fp.seek
1526 self.tell = self._fp.tell
1527 self.tell = self._fp.tell
1527 self.fileno = self._fp.fileno
1528 self.fileno = self._fp.fileno
1528
1529
1529 def close(self):
1530 def close(self):
1530 if not self._fp.closed:
1531 if not self._fp.closed:
1531 self._fp.close()
1532 self._fp.close()
1532 filename = localpath(self.__name)
1533 filename = localpath(self.__name)
1533 oldstat = self._checkambig and filestat(filename)
1534 oldstat = self._checkambig and filestat(filename)
1534 if oldstat and oldstat.stat:
1535 if oldstat and oldstat.stat:
1535 rename(self._tempname, filename)
1536 rename(self._tempname, filename)
1536 newstat = filestat(filename)
1537 newstat = filestat(filename)
1537 if newstat.isambig(oldstat):
1538 if newstat.isambig(oldstat):
1538 # stat of changed file is ambiguous to original one
1539 # stat of changed file is ambiguous to original one
1539 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1540 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1540 os.utime(filename, (advanced, advanced))
1541 os.utime(filename, (advanced, advanced))
1541 else:
1542 else:
1542 rename(self._tempname, filename)
1543 rename(self._tempname, filename)
1543
1544
1544 def discard(self):
1545 def discard(self):
1545 if not self._fp.closed:
1546 if not self._fp.closed:
1546 try:
1547 try:
1547 os.unlink(self._tempname)
1548 os.unlink(self._tempname)
1548 except OSError:
1549 except OSError:
1549 pass
1550 pass
1550 self._fp.close()
1551 self._fp.close()
1551
1552
1552 def __del__(self):
1553 def __del__(self):
1553 if safehasattr(self, '_fp'): # constructor actually did something
1554 if safehasattr(self, '_fp'): # constructor actually did something
1554 self.discard()
1555 self.discard()
1555
1556
1556 def __enter__(self):
1557 def __enter__(self):
1557 return self
1558 return self
1558
1559
1559 def __exit__(self, exctype, excvalue, traceback):
1560 def __exit__(self, exctype, excvalue, traceback):
1560 if exctype is not None:
1561 if exctype is not None:
1561 self.discard()
1562 self.discard()
1562 else:
1563 else:
1563 self.close()
1564 self.close()
1564
1565
1565 def makedirs(name, mode=None, notindexed=False):
1566 def makedirs(name, mode=None, notindexed=False):
1566 """recursive directory creation with parent mode inheritance
1567 """recursive directory creation with parent mode inheritance
1567
1568
1568 Newly created directories are marked as "not to be indexed by
1569 Newly created directories are marked as "not to be indexed by
1569 the content indexing service", if ``notindexed`` is specified
1570 the content indexing service", if ``notindexed`` is specified
1570 for "write" mode access.
1571 for "write" mode access.
1571 """
1572 """
1572 try:
1573 try:
1573 makedir(name, notindexed)
1574 makedir(name, notindexed)
1574 except OSError as err:
1575 except OSError as err:
1575 if err.errno == errno.EEXIST:
1576 if err.errno == errno.EEXIST:
1576 return
1577 return
1577 if err.errno != errno.ENOENT or not name:
1578 if err.errno != errno.ENOENT or not name:
1578 raise
1579 raise
1579 parent = os.path.dirname(os.path.abspath(name))
1580 parent = os.path.dirname(os.path.abspath(name))
1580 if parent == name:
1581 if parent == name:
1581 raise
1582 raise
1582 makedirs(parent, mode, notindexed)
1583 makedirs(parent, mode, notindexed)
1583 try:
1584 try:
1584 makedir(name, notindexed)
1585 makedir(name, notindexed)
1585 except OSError as err:
1586 except OSError as err:
1586 # Catch EEXIST to handle races
1587 # Catch EEXIST to handle races
1587 if err.errno == errno.EEXIST:
1588 if err.errno == errno.EEXIST:
1588 return
1589 return
1589 raise
1590 raise
1590 if mode is not None:
1591 if mode is not None:
1591 os.chmod(name, mode)
1592 os.chmod(name, mode)
1592
1593
1593 def readfile(path):
1594 def readfile(path):
1594 with open(path, 'rb') as fp:
1595 with open(path, 'rb') as fp:
1595 return fp.read()
1596 return fp.read()
1596
1597
1597 def writefile(path, text):
1598 def writefile(path, text):
1598 with open(path, 'wb') as fp:
1599 with open(path, 'wb') as fp:
1599 fp.write(text)
1600 fp.write(text)
1600
1601
1601 def appendfile(path, text):
1602 def appendfile(path, text):
1602 with open(path, 'ab') as fp:
1603 with open(path, 'ab') as fp:
1603 fp.write(text)
1604 fp.write(text)
1604
1605
1605 class chunkbuffer(object):
1606 class chunkbuffer(object):
1606 """Allow arbitrary sized chunks of data to be efficiently read from an
1607 """Allow arbitrary sized chunks of data to be efficiently read from an
1607 iterator over chunks of arbitrary size."""
1608 iterator over chunks of arbitrary size."""
1608
1609
1609 def __init__(self, in_iter):
1610 def __init__(self, in_iter):
1610 """in_iter is the iterator that's iterating over the input chunks.
1611 """in_iter is the iterator that's iterating over the input chunks.
1611 targetsize is how big a buffer to try to maintain."""
1612 targetsize is how big a buffer to try to maintain."""
1612 def splitbig(chunks):
1613 def splitbig(chunks):
1613 for chunk in chunks:
1614 for chunk in chunks:
1614 if len(chunk) > 2**20:
1615 if len(chunk) > 2**20:
1615 pos = 0
1616 pos = 0
1616 while pos < len(chunk):
1617 while pos < len(chunk):
1617 end = pos + 2 ** 18
1618 end = pos + 2 ** 18
1618 yield chunk[pos:end]
1619 yield chunk[pos:end]
1619 pos = end
1620 pos = end
1620 else:
1621 else:
1621 yield chunk
1622 yield chunk
1622 self.iter = splitbig(in_iter)
1623 self.iter = splitbig(in_iter)
1623 self._queue = collections.deque()
1624 self._queue = collections.deque()
1624 self._chunkoffset = 0
1625 self._chunkoffset = 0
1625
1626
1626 def read(self, l=None):
1627 def read(self, l=None):
1627 """Read L bytes of data from the iterator of chunks of data.
1628 """Read L bytes of data from the iterator of chunks of data.
1628 Returns less than L bytes if the iterator runs dry.
1629 Returns less than L bytes if the iterator runs dry.
1629
1630
1630 If size parameter is omitted, read everything"""
1631 If size parameter is omitted, read everything"""
1631 if l is None:
1632 if l is None:
1632 return ''.join(self.iter)
1633 return ''.join(self.iter)
1633
1634
1634 left = l
1635 left = l
1635 buf = []
1636 buf = []
1636 queue = self._queue
1637 queue = self._queue
1637 while left > 0:
1638 while left > 0:
1638 # refill the queue
1639 # refill the queue
1639 if not queue:
1640 if not queue:
1640 target = 2**18
1641 target = 2**18
1641 for chunk in self.iter:
1642 for chunk in self.iter:
1642 queue.append(chunk)
1643 queue.append(chunk)
1643 target -= len(chunk)
1644 target -= len(chunk)
1644 if target <= 0:
1645 if target <= 0:
1645 break
1646 break
1646 if not queue:
1647 if not queue:
1647 break
1648 break
1648
1649
1649 # The easy way to do this would be to queue.popleft(), modify the
1650 # The easy way to do this would be to queue.popleft(), modify the
1650 # chunk (if necessary), then queue.appendleft(). However, for cases
1651 # chunk (if necessary), then queue.appendleft(). However, for cases
1651 # where we read partial chunk content, this incurs 2 dequeue
1652 # where we read partial chunk content, this incurs 2 dequeue
1652 # mutations and creates a new str for the remaining chunk in the
1653 # mutations and creates a new str for the remaining chunk in the
1653 # queue. Our code below avoids this overhead.
1654 # queue. Our code below avoids this overhead.
1654
1655
1655 chunk = queue[0]
1656 chunk = queue[0]
1656 chunkl = len(chunk)
1657 chunkl = len(chunk)
1657 offset = self._chunkoffset
1658 offset = self._chunkoffset
1658
1659
1659 # Use full chunk.
1660 # Use full chunk.
1660 if offset == 0 and left >= chunkl:
1661 if offset == 0 and left >= chunkl:
1661 left -= chunkl
1662 left -= chunkl
1662 queue.popleft()
1663 queue.popleft()
1663 buf.append(chunk)
1664 buf.append(chunk)
1664 # self._chunkoffset remains at 0.
1665 # self._chunkoffset remains at 0.
1665 continue
1666 continue
1666
1667
1667 chunkremaining = chunkl - offset
1668 chunkremaining = chunkl - offset
1668
1669
1669 # Use all of unconsumed part of chunk.
1670 # Use all of unconsumed part of chunk.
1670 if left >= chunkremaining:
1671 if left >= chunkremaining:
1671 left -= chunkremaining
1672 left -= chunkremaining
1672 queue.popleft()
1673 queue.popleft()
1673 # offset == 0 is enabled by block above, so this won't merely
1674 # offset == 0 is enabled by block above, so this won't merely
1674 # copy via ``chunk[0:]``.
1675 # copy via ``chunk[0:]``.
1675 buf.append(chunk[offset:])
1676 buf.append(chunk[offset:])
1676 self._chunkoffset = 0
1677 self._chunkoffset = 0
1677
1678
1678 # Partial chunk needed.
1679 # Partial chunk needed.
1679 else:
1680 else:
1680 buf.append(chunk[offset:offset + left])
1681 buf.append(chunk[offset:offset + left])
1681 self._chunkoffset += left
1682 self._chunkoffset += left
1682 left -= chunkremaining
1683 left -= chunkremaining
1683
1684
1684 return ''.join(buf)
1685 return ''.join(buf)
1685
1686
1686 def filechunkiter(f, size=65536, limit=None):
1687 def filechunkiter(f, size=65536, limit=None):
1687 """Create a generator that produces the data in the file size
1688 """Create a generator that produces the data in the file size
1688 (default 65536) bytes at a time, up to optional limit (default is
1689 (default 65536) bytes at a time, up to optional limit (default is
1689 to read all data). Chunks may be less than size bytes if the
1690 to read all data). Chunks may be less than size bytes if the
1690 chunk is the last chunk in the file, or the file is a socket or
1691 chunk is the last chunk in the file, or the file is a socket or
1691 some other type of file that sometimes reads less data than is
1692 some other type of file that sometimes reads less data than is
1692 requested."""
1693 requested."""
1693 assert size >= 0
1694 assert size >= 0
1694 assert limit is None or limit >= 0
1695 assert limit is None or limit >= 0
1695 while True:
1696 while True:
1696 if limit is None:
1697 if limit is None:
1697 nbytes = size
1698 nbytes = size
1698 else:
1699 else:
1699 nbytes = min(limit, size)
1700 nbytes = min(limit, size)
1700 s = nbytes and f.read(nbytes)
1701 s = nbytes and f.read(nbytes)
1701 if not s:
1702 if not s:
1702 break
1703 break
1703 if limit:
1704 if limit:
1704 limit -= len(s)
1705 limit -= len(s)
1705 yield s
1706 yield s
1706
1707
1707 def makedate(timestamp=None):
1708 def makedate(timestamp=None):
1708 '''Return a unix timestamp (or the current time) as a (unixtime,
1709 '''Return a unix timestamp (or the current time) as a (unixtime,
1709 offset) tuple based off the local timezone.'''
1710 offset) tuple based off the local timezone.'''
1710 if timestamp is None:
1711 if timestamp is None:
1711 timestamp = time.time()
1712 timestamp = time.time()
1712 if timestamp < 0:
1713 if timestamp < 0:
1713 hint = _("check your clock")
1714 hint = _("check your clock")
1714 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1715 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1715 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1716 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1716 datetime.datetime.fromtimestamp(timestamp))
1717 datetime.datetime.fromtimestamp(timestamp))
1717 tz = delta.days * 86400 + delta.seconds
1718 tz = delta.days * 86400 + delta.seconds
1718 return timestamp, tz
1719 return timestamp, tz
1719
1720
1720 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1721 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1721 """represent a (unixtime, offset) tuple as a localized time.
1722 """represent a (unixtime, offset) tuple as a localized time.
1722 unixtime is seconds since the epoch, and offset is the time zone's
1723 unixtime is seconds since the epoch, and offset is the time zone's
1723 number of seconds away from UTC.
1724 number of seconds away from UTC.
1724
1725
1725 >>> datestr((0, 0))
1726 >>> datestr((0, 0))
1726 'Thu Jan 01 00:00:00 1970 +0000'
1727 'Thu Jan 01 00:00:00 1970 +0000'
1727 >>> datestr((42, 0))
1728 >>> datestr((42, 0))
1728 'Thu Jan 01 00:00:42 1970 +0000'
1729 'Thu Jan 01 00:00:42 1970 +0000'
1729 >>> datestr((-42, 0))
1730 >>> datestr((-42, 0))
1730 'Wed Dec 31 23:59:18 1969 +0000'
1731 'Wed Dec 31 23:59:18 1969 +0000'
1731 >>> datestr((0x7fffffff, 0))
1732 >>> datestr((0x7fffffff, 0))
1732 'Tue Jan 19 03:14:07 2038 +0000'
1733 'Tue Jan 19 03:14:07 2038 +0000'
1733 >>> datestr((-0x80000000, 0))
1734 >>> datestr((-0x80000000, 0))
1734 'Fri Dec 13 20:45:52 1901 +0000'
1735 'Fri Dec 13 20:45:52 1901 +0000'
1735 """
1736 """
1736 t, tz = date or makedate()
1737 t, tz = date or makedate()
1737 if "%1" in format or "%2" in format or "%z" in format:
1738 if "%1" in format or "%2" in format or "%z" in format:
1738 sign = (tz > 0) and "-" or "+"
1739 sign = (tz > 0) and "-" or "+"
1739 minutes = abs(tz) // 60
1740 minutes = abs(tz) // 60
1740 q, r = divmod(minutes, 60)
1741 q, r = divmod(minutes, 60)
1741 format = format.replace("%z", "%1%2")
1742 format = format.replace("%z", "%1%2")
1742 format = format.replace("%1", "%c%02d" % (sign, q))
1743 format = format.replace("%1", "%c%02d" % (sign, q))
1743 format = format.replace("%2", "%02d" % r)
1744 format = format.replace("%2", "%02d" % r)
1744 d = t - tz
1745 d = t - tz
1745 if d > 0x7fffffff:
1746 if d > 0x7fffffff:
1746 d = 0x7fffffff
1747 d = 0x7fffffff
1747 elif d < -0x80000000:
1748 elif d < -0x80000000:
1748 d = -0x80000000
1749 d = -0x80000000
1749 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1750 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1750 # because they use the gmtime() system call which is buggy on Windows
1751 # because they use the gmtime() system call which is buggy on Windows
1751 # for negative values.
1752 # for negative values.
1752 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1753 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1753 s = t.strftime(format)
1754 s = t.strftime(format)
1754 return s
1755 return s
1755
1756
1756 def shortdate(date=None):
1757 def shortdate(date=None):
1757 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1758 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1758 return datestr(date, format='%Y-%m-%d')
1759 return datestr(date, format='%Y-%m-%d')
1759
1760
1760 def parsetimezone(s):
1761 def parsetimezone(s):
1761 """find a trailing timezone, if any, in string, and return a
1762 """find a trailing timezone, if any, in string, and return a
1762 (offset, remainder) pair"""
1763 (offset, remainder) pair"""
1763
1764
1764 if s.endswith("GMT") or s.endswith("UTC"):
1765 if s.endswith("GMT") or s.endswith("UTC"):
1765 return 0, s[:-3].rstrip()
1766 return 0, s[:-3].rstrip()
1766
1767
1767 # Unix-style timezones [+-]hhmm
1768 # Unix-style timezones [+-]hhmm
1768 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1769 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1769 sign = (s[-5] == "+") and 1 or -1
1770 sign = (s[-5] == "+") and 1 or -1
1770 hours = int(s[-4:-2])
1771 hours = int(s[-4:-2])
1771 minutes = int(s[-2:])
1772 minutes = int(s[-2:])
1772 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1773 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1773
1774
1774 # ISO8601 trailing Z
1775 # ISO8601 trailing Z
1775 if s.endswith("Z") and s[-2:-1].isdigit():
1776 if s.endswith("Z") and s[-2:-1].isdigit():
1776 return 0, s[:-1]
1777 return 0, s[:-1]
1777
1778
1778 # ISO8601-style [+-]hh:mm
1779 # ISO8601-style [+-]hh:mm
1779 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1780 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1780 s[-5:-3].isdigit() and s[-2:].isdigit()):
1781 s[-5:-3].isdigit() and s[-2:].isdigit()):
1781 sign = (s[-6] == "+") and 1 or -1
1782 sign = (s[-6] == "+") and 1 or -1
1782 hours = int(s[-5:-3])
1783 hours = int(s[-5:-3])
1783 minutes = int(s[-2:])
1784 minutes = int(s[-2:])
1784 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1785 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1785
1786
1786 return None, s
1787 return None, s
1787
1788
1788 def strdate(string, format, defaults=[]):
1789 def strdate(string, format, defaults=[]):
1789 """parse a localized time string and return a (unixtime, offset) tuple.
1790 """parse a localized time string and return a (unixtime, offset) tuple.
1790 if the string cannot be parsed, ValueError is raised."""
1791 if the string cannot be parsed, ValueError is raised."""
1791 # NOTE: unixtime = localunixtime + offset
1792 # NOTE: unixtime = localunixtime + offset
1792 offset, date = parsetimezone(string)
1793 offset, date = parsetimezone(string)
1793
1794
1794 # add missing elements from defaults
1795 # add missing elements from defaults
1795 usenow = False # default to using biased defaults
1796 usenow = False # default to using biased defaults
1796 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1797 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1797 found = [True for p in part if ("%"+p) in format]
1798 found = [True for p in part if ("%"+p) in format]
1798 if not found:
1799 if not found:
1799 date += "@" + defaults[part][usenow]
1800 date += "@" + defaults[part][usenow]
1800 format += "@%" + part[0]
1801 format += "@%" + part[0]
1801 else:
1802 else:
1802 # We've found a specific time element, less specific time
1803 # We've found a specific time element, less specific time
1803 # elements are relative to today
1804 # elements are relative to today
1804 usenow = True
1805 usenow = True
1805
1806
1806 timetuple = time.strptime(date, format)
1807 timetuple = time.strptime(date, format)
1807 localunixtime = int(calendar.timegm(timetuple))
1808 localunixtime = int(calendar.timegm(timetuple))
1808 if offset is None:
1809 if offset is None:
1809 # local timezone
1810 # local timezone
1810 unixtime = int(time.mktime(timetuple))
1811 unixtime = int(time.mktime(timetuple))
1811 offset = unixtime - localunixtime
1812 offset = unixtime - localunixtime
1812 else:
1813 else:
1813 unixtime = localunixtime + offset
1814 unixtime = localunixtime + offset
1814 return unixtime, offset
1815 return unixtime, offset
1815
1816
1816 def parsedate(date, formats=None, bias=None):
1817 def parsedate(date, formats=None, bias=None):
1817 """parse a localized date/time and return a (unixtime, offset) tuple.
1818 """parse a localized date/time and return a (unixtime, offset) tuple.
1818
1819
1819 The date may be a "unixtime offset" string or in one of the specified
1820 The date may be a "unixtime offset" string or in one of the specified
1820 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1821 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1821
1822
1822 >>> parsedate(' today ') == parsedate(\
1823 >>> parsedate(' today ') == parsedate(\
1823 datetime.date.today().strftime('%b %d'))
1824 datetime.date.today().strftime('%b %d'))
1824 True
1825 True
1825 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1826 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1826 datetime.timedelta(days=1)\
1827 datetime.timedelta(days=1)\
1827 ).strftime('%b %d'))
1828 ).strftime('%b %d'))
1828 True
1829 True
1829 >>> now, tz = makedate()
1830 >>> now, tz = makedate()
1830 >>> strnow, strtz = parsedate('now')
1831 >>> strnow, strtz = parsedate('now')
1831 >>> (strnow - now) < 1
1832 >>> (strnow - now) < 1
1832 True
1833 True
1833 >>> tz == strtz
1834 >>> tz == strtz
1834 True
1835 True
1835 """
1836 """
1836 if bias is None:
1837 if bias is None:
1837 bias = {}
1838 bias = {}
1838 if not date:
1839 if not date:
1839 return 0, 0
1840 return 0, 0
1840 if isinstance(date, tuple) and len(date) == 2:
1841 if isinstance(date, tuple) and len(date) == 2:
1841 return date
1842 return date
1842 if not formats:
1843 if not formats:
1843 formats = defaultdateformats
1844 formats = defaultdateformats
1844 date = date.strip()
1845 date = date.strip()
1845
1846
1846 if date == 'now' or date == _('now'):
1847 if date == 'now' or date == _('now'):
1847 return makedate()
1848 return makedate()
1848 if date == 'today' or date == _('today'):
1849 if date == 'today' or date == _('today'):
1849 date = datetime.date.today().strftime('%b %d')
1850 date = datetime.date.today().strftime('%b %d')
1850 elif date == 'yesterday' or date == _('yesterday'):
1851 elif date == 'yesterday' or date == _('yesterday'):
1851 date = (datetime.date.today() -
1852 date = (datetime.date.today() -
1852 datetime.timedelta(days=1)).strftime('%b %d')
1853 datetime.timedelta(days=1)).strftime('%b %d')
1853
1854
1854 try:
1855 try:
1855 when, offset = map(int, date.split(' '))
1856 when, offset = map(int, date.split(' '))
1856 except ValueError:
1857 except ValueError:
1857 # fill out defaults
1858 # fill out defaults
1858 now = makedate()
1859 now = makedate()
1859 defaults = {}
1860 defaults = {}
1860 for part in ("d", "mb", "yY", "HI", "M", "S"):
1861 for part in ("d", "mb", "yY", "HI", "M", "S"):
1861 # this piece is for rounding the specific end of unknowns
1862 # this piece is for rounding the specific end of unknowns
1862 b = bias.get(part)
1863 b = bias.get(part)
1863 if b is None:
1864 if b is None:
1864 if part[0] in "HMS":
1865 if part[0] in "HMS":
1865 b = "00"
1866 b = "00"
1866 else:
1867 else:
1867 b = "0"
1868 b = "0"
1868
1869
1869 # this piece is for matching the generic end to today's date
1870 # this piece is for matching the generic end to today's date
1870 n = datestr(now, "%" + part[0])
1871 n = datestr(now, "%" + part[0])
1871
1872
1872 defaults[part] = (b, n)
1873 defaults[part] = (b, n)
1873
1874
1874 for format in formats:
1875 for format in formats:
1875 try:
1876 try:
1876 when, offset = strdate(date, format, defaults)
1877 when, offset = strdate(date, format, defaults)
1877 except (ValueError, OverflowError):
1878 except (ValueError, OverflowError):
1878 pass
1879 pass
1879 else:
1880 else:
1880 break
1881 break
1881 else:
1882 else:
1882 raise Abort(_('invalid date: %r') % date)
1883 raise Abort(_('invalid date: %r') % date)
1883 # validate explicit (probably user-specified) date and
1884 # validate explicit (probably user-specified) date and
1884 # time zone offset. values must fit in signed 32 bits for
1885 # time zone offset. values must fit in signed 32 bits for
1885 # current 32-bit linux runtimes. timezones go from UTC-12
1886 # current 32-bit linux runtimes. timezones go from UTC-12
1886 # to UTC+14
1887 # to UTC+14
1887 if when < -0x80000000 or when > 0x7fffffff:
1888 if when < -0x80000000 or when > 0x7fffffff:
1888 raise Abort(_('date exceeds 32 bits: %d') % when)
1889 raise Abort(_('date exceeds 32 bits: %d') % when)
1889 if offset < -50400 or offset > 43200:
1890 if offset < -50400 or offset > 43200:
1890 raise Abort(_('impossible time zone offset: %d') % offset)
1891 raise Abort(_('impossible time zone offset: %d') % offset)
1891 return when, offset
1892 return when, offset
1892
1893
1893 def matchdate(date):
1894 def matchdate(date):
1894 """Return a function that matches a given date match specifier
1895 """Return a function that matches a given date match specifier
1895
1896
1896 Formats include:
1897 Formats include:
1897
1898
1898 '{date}' match a given date to the accuracy provided
1899 '{date}' match a given date to the accuracy provided
1899
1900
1900 '<{date}' on or before a given date
1901 '<{date}' on or before a given date
1901
1902
1902 '>{date}' on or after a given date
1903 '>{date}' on or after a given date
1903
1904
1904 >>> p1 = parsedate("10:29:59")
1905 >>> p1 = parsedate("10:29:59")
1905 >>> p2 = parsedate("10:30:00")
1906 >>> p2 = parsedate("10:30:00")
1906 >>> p3 = parsedate("10:30:59")
1907 >>> p3 = parsedate("10:30:59")
1907 >>> p4 = parsedate("10:31:00")
1908 >>> p4 = parsedate("10:31:00")
1908 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1909 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1909 >>> f = matchdate("10:30")
1910 >>> f = matchdate("10:30")
1910 >>> f(p1[0])
1911 >>> f(p1[0])
1911 False
1912 False
1912 >>> f(p2[0])
1913 >>> f(p2[0])
1913 True
1914 True
1914 >>> f(p3[0])
1915 >>> f(p3[0])
1915 True
1916 True
1916 >>> f(p4[0])
1917 >>> f(p4[0])
1917 False
1918 False
1918 >>> f(p5[0])
1919 >>> f(p5[0])
1919 False
1920 False
1920 """
1921 """
1921
1922
1922 def lower(date):
1923 def lower(date):
1923 d = {'mb': "1", 'd': "1"}
1924 d = {'mb': "1", 'd': "1"}
1924 return parsedate(date, extendeddateformats, d)[0]
1925 return parsedate(date, extendeddateformats, d)[0]
1925
1926
1926 def upper(date):
1927 def upper(date):
1927 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1928 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1928 for days in ("31", "30", "29"):
1929 for days in ("31", "30", "29"):
1929 try:
1930 try:
1930 d["d"] = days
1931 d["d"] = days
1931 return parsedate(date, extendeddateformats, d)[0]
1932 return parsedate(date, extendeddateformats, d)[0]
1932 except Abort:
1933 except Abort:
1933 pass
1934 pass
1934 d["d"] = "28"
1935 d["d"] = "28"
1935 return parsedate(date, extendeddateformats, d)[0]
1936 return parsedate(date, extendeddateformats, d)[0]
1936
1937
1937 date = date.strip()
1938 date = date.strip()
1938
1939
1939 if not date:
1940 if not date:
1940 raise Abort(_("dates cannot consist entirely of whitespace"))
1941 raise Abort(_("dates cannot consist entirely of whitespace"))
1941 elif date[0] == "<":
1942 elif date[0] == "<":
1942 if not date[1:]:
1943 if not date[1:]:
1943 raise Abort(_("invalid day spec, use '<DATE'"))
1944 raise Abort(_("invalid day spec, use '<DATE'"))
1944 when = upper(date[1:])
1945 when = upper(date[1:])
1945 return lambda x: x <= when
1946 return lambda x: x <= when
1946 elif date[0] == ">":
1947 elif date[0] == ">":
1947 if not date[1:]:
1948 if not date[1:]:
1948 raise Abort(_("invalid day spec, use '>DATE'"))
1949 raise Abort(_("invalid day spec, use '>DATE'"))
1949 when = lower(date[1:])
1950 when = lower(date[1:])
1950 return lambda x: x >= when
1951 return lambda x: x >= when
1951 elif date[0] == "-":
1952 elif date[0] == "-":
1952 try:
1953 try:
1953 days = int(date[1:])
1954 days = int(date[1:])
1954 except ValueError:
1955 except ValueError:
1955 raise Abort(_("invalid day spec: %s") % date[1:])
1956 raise Abort(_("invalid day spec: %s") % date[1:])
1956 if days < 0:
1957 if days < 0:
1957 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1958 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1958 % date[1:])
1959 % date[1:])
1959 when = makedate()[0] - days * 3600 * 24
1960 when = makedate()[0] - days * 3600 * 24
1960 return lambda x: x >= when
1961 return lambda x: x >= when
1961 elif " to " in date:
1962 elif " to " in date:
1962 a, b = date.split(" to ")
1963 a, b = date.split(" to ")
1963 start, stop = lower(a), upper(b)
1964 start, stop = lower(a), upper(b)
1964 return lambda x: x >= start and x <= stop
1965 return lambda x: x >= start and x <= stop
1965 else:
1966 else:
1966 start, stop = lower(date), upper(date)
1967 start, stop = lower(date), upper(date)
1967 return lambda x: x >= start and x <= stop
1968 return lambda x: x >= start and x <= stop
1968
1969
1969 def stringmatcher(pattern):
1970 def stringmatcher(pattern):
1970 """
1971 """
1971 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1972 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1972 returns the matcher name, pattern, and matcher function.
1973 returns the matcher name, pattern, and matcher function.
1973 missing or unknown prefixes are treated as literal matches.
1974 missing or unknown prefixes are treated as literal matches.
1974
1975
1975 helper for tests:
1976 helper for tests:
1976 >>> def test(pattern, *tests):
1977 >>> def test(pattern, *tests):
1977 ... kind, pattern, matcher = stringmatcher(pattern)
1978 ... kind, pattern, matcher = stringmatcher(pattern)
1978 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1979 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1979
1980
1980 exact matching (no prefix):
1981 exact matching (no prefix):
1981 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1982 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1982 ('literal', 'abcdefg', [False, False, True])
1983 ('literal', 'abcdefg', [False, False, True])
1983
1984
1984 regex matching ('re:' prefix)
1985 regex matching ('re:' prefix)
1985 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1986 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1986 ('re', 'a.+b', [False, False, True])
1987 ('re', 'a.+b', [False, False, True])
1987
1988
1988 force exact matches ('literal:' prefix)
1989 force exact matches ('literal:' prefix)
1989 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1990 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1990 ('literal', 're:foobar', [False, True])
1991 ('literal', 're:foobar', [False, True])
1991
1992
1992 unknown prefixes are ignored and treated as literals
1993 unknown prefixes are ignored and treated as literals
1993 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1994 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1994 ('literal', 'foo:bar', [False, False, True])
1995 ('literal', 'foo:bar', [False, False, True])
1995 """
1996 """
1996 if pattern.startswith('re:'):
1997 if pattern.startswith('re:'):
1997 pattern = pattern[3:]
1998 pattern = pattern[3:]
1998 try:
1999 try:
1999 regex = remod.compile(pattern)
2000 regex = remod.compile(pattern)
2000 except remod.error as e:
2001 except remod.error as e:
2001 raise error.ParseError(_('invalid regular expression: %s')
2002 raise error.ParseError(_('invalid regular expression: %s')
2002 % e)
2003 % e)
2003 return 're', pattern, regex.search
2004 return 're', pattern, regex.search
2004 elif pattern.startswith('literal:'):
2005 elif pattern.startswith('literal:'):
2005 pattern = pattern[8:]
2006 pattern = pattern[8:]
2006 return 'literal', pattern, pattern.__eq__
2007 return 'literal', pattern, pattern.__eq__
2007
2008
2008 def shortuser(user):
2009 def shortuser(user):
2009 """Return a short representation of a user name or email address."""
2010 """Return a short representation of a user name or email address."""
2010 f = user.find('@')
2011 f = user.find('@')
2011 if f >= 0:
2012 if f >= 0:
2012 user = user[:f]
2013 user = user[:f]
2013 f = user.find('<')
2014 f = user.find('<')
2014 if f >= 0:
2015 if f >= 0:
2015 user = user[f + 1:]
2016 user = user[f + 1:]
2016 f = user.find(' ')
2017 f = user.find(' ')
2017 if f >= 0:
2018 if f >= 0:
2018 user = user[:f]
2019 user = user[:f]
2019 f = user.find('.')
2020 f = user.find('.')
2020 if f >= 0:
2021 if f >= 0:
2021 user = user[:f]
2022 user = user[:f]
2022 return user
2023 return user
2023
2024
2024 def emailuser(user):
2025 def emailuser(user):
2025 """Return the user portion of an email address."""
2026 """Return the user portion of an email address."""
2026 f = user.find('@')
2027 f = user.find('@')
2027 if f >= 0:
2028 if f >= 0:
2028 user = user[:f]
2029 user = user[:f]
2029 f = user.find('<')
2030 f = user.find('<')
2030 if f >= 0:
2031 if f >= 0:
2031 user = user[f + 1:]
2032 user = user[f + 1:]
2032 return user
2033 return user
2033
2034
2034 def email(author):
2035 def email(author):
2035 '''get email of author.'''
2036 '''get email of author.'''
2036 r = author.find('>')
2037 r = author.find('>')
2037 if r == -1:
2038 if r == -1:
2038 r = None
2039 r = None
2039 return author[author.find('<') + 1:r]
2040 return author[author.find('<') + 1:r]
2040
2041
2041 def ellipsis(text, maxlength=400):
2042 def ellipsis(text, maxlength=400):
2042 """Trim string to at most maxlength (default: 400) columns in display."""
2043 """Trim string to at most maxlength (default: 400) columns in display."""
2043 return encoding.trim(text, maxlength, ellipsis='...')
2044 return encoding.trim(text, maxlength, ellipsis='...')
2044
2045
2045 def unitcountfn(*unittable):
2046 def unitcountfn(*unittable):
2046 '''return a function that renders a readable count of some quantity'''
2047 '''return a function that renders a readable count of some quantity'''
2047
2048
2048 def go(count):
2049 def go(count):
2049 for multiplier, divisor, format in unittable:
2050 for multiplier, divisor, format in unittable:
2050 if count >= divisor * multiplier:
2051 if count >= divisor * multiplier:
2051 return format % (count / float(divisor))
2052 return format % (count / float(divisor))
2052 return unittable[-1][2] % count
2053 return unittable[-1][2] % count
2053
2054
2054 return go
2055 return go
2055
2056
2056 bytecount = unitcountfn(
2057 bytecount = unitcountfn(
2057 (100, 1 << 30, _('%.0f GB')),
2058 (100, 1 << 30, _('%.0f GB')),
2058 (10, 1 << 30, _('%.1f GB')),
2059 (10, 1 << 30, _('%.1f GB')),
2059 (1, 1 << 30, _('%.2f GB')),
2060 (1, 1 << 30, _('%.2f GB')),
2060 (100, 1 << 20, _('%.0f MB')),
2061 (100, 1 << 20, _('%.0f MB')),
2061 (10, 1 << 20, _('%.1f MB')),
2062 (10, 1 << 20, _('%.1f MB')),
2062 (1, 1 << 20, _('%.2f MB')),
2063 (1, 1 << 20, _('%.2f MB')),
2063 (100, 1 << 10, _('%.0f KB')),
2064 (100, 1 << 10, _('%.0f KB')),
2064 (10, 1 << 10, _('%.1f KB')),
2065 (10, 1 << 10, _('%.1f KB')),
2065 (1, 1 << 10, _('%.2f KB')),
2066 (1, 1 << 10, _('%.2f KB')),
2066 (1, 1, _('%.0f bytes')),
2067 (1, 1, _('%.0f bytes')),
2067 )
2068 )
2068
2069
2069 def uirepr(s):
2070 def uirepr(s):
2070 # Avoid double backslash in Windows path repr()
2071 # Avoid double backslash in Windows path repr()
2071 return repr(s).replace('\\\\', '\\')
2072 return repr(s).replace('\\\\', '\\')
2072
2073
2073 # delay import of textwrap
2074 # delay import of textwrap
2074 def MBTextWrapper(**kwargs):
2075 def MBTextWrapper(**kwargs):
2075 class tw(textwrap.TextWrapper):
2076 class tw(textwrap.TextWrapper):
2076 """
2077 """
2077 Extend TextWrapper for width-awareness.
2078 Extend TextWrapper for width-awareness.
2078
2079
2079 Neither number of 'bytes' in any encoding nor 'characters' is
2080 Neither number of 'bytes' in any encoding nor 'characters' is
2080 appropriate to calculate terminal columns for specified string.
2081 appropriate to calculate terminal columns for specified string.
2081
2082
2082 Original TextWrapper implementation uses built-in 'len()' directly,
2083 Original TextWrapper implementation uses built-in 'len()' directly,
2083 so overriding is needed to use width information of each characters.
2084 so overriding is needed to use width information of each characters.
2084
2085
2085 In addition, characters classified into 'ambiguous' width are
2086 In addition, characters classified into 'ambiguous' width are
2086 treated as wide in East Asian area, but as narrow in other.
2087 treated as wide in East Asian area, but as narrow in other.
2087
2088
2088 This requires use decision to determine width of such characters.
2089 This requires use decision to determine width of such characters.
2089 """
2090 """
2090 def _cutdown(self, ucstr, space_left):
2091 def _cutdown(self, ucstr, space_left):
2091 l = 0
2092 l = 0
2092 colwidth = encoding.ucolwidth
2093 colwidth = encoding.ucolwidth
2093 for i in xrange(len(ucstr)):
2094 for i in xrange(len(ucstr)):
2094 l += colwidth(ucstr[i])
2095 l += colwidth(ucstr[i])
2095 if space_left < l:
2096 if space_left < l:
2096 return (ucstr[:i], ucstr[i:])
2097 return (ucstr[:i], ucstr[i:])
2097 return ucstr, ''
2098 return ucstr, ''
2098
2099
2099 # overriding of base class
2100 # overriding of base class
2100 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2101 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2101 space_left = max(width - cur_len, 1)
2102 space_left = max(width - cur_len, 1)
2102
2103
2103 if self.break_long_words:
2104 if self.break_long_words:
2104 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2105 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2105 cur_line.append(cut)
2106 cur_line.append(cut)
2106 reversed_chunks[-1] = res
2107 reversed_chunks[-1] = res
2107 elif not cur_line:
2108 elif not cur_line:
2108 cur_line.append(reversed_chunks.pop())
2109 cur_line.append(reversed_chunks.pop())
2109
2110
2110 # this overriding code is imported from TextWrapper of Python 2.6
2111 # this overriding code is imported from TextWrapper of Python 2.6
2111 # to calculate columns of string by 'encoding.ucolwidth()'
2112 # to calculate columns of string by 'encoding.ucolwidth()'
2112 def _wrap_chunks(self, chunks):
2113 def _wrap_chunks(self, chunks):
2113 colwidth = encoding.ucolwidth
2114 colwidth = encoding.ucolwidth
2114
2115
2115 lines = []
2116 lines = []
2116 if self.width <= 0:
2117 if self.width <= 0:
2117 raise ValueError("invalid width %r (must be > 0)" % self.width)
2118 raise ValueError("invalid width %r (must be > 0)" % self.width)
2118
2119
2119 # Arrange in reverse order so items can be efficiently popped
2120 # Arrange in reverse order so items can be efficiently popped
2120 # from a stack of chucks.
2121 # from a stack of chucks.
2121 chunks.reverse()
2122 chunks.reverse()
2122
2123
2123 while chunks:
2124 while chunks:
2124
2125
2125 # Start the list of chunks that will make up the current line.
2126 # Start the list of chunks that will make up the current line.
2126 # cur_len is just the length of all the chunks in cur_line.
2127 # cur_len is just the length of all the chunks in cur_line.
2127 cur_line = []
2128 cur_line = []
2128 cur_len = 0
2129 cur_len = 0
2129
2130
2130 # Figure out which static string will prefix this line.
2131 # Figure out which static string will prefix this line.
2131 if lines:
2132 if lines:
2132 indent = self.subsequent_indent
2133 indent = self.subsequent_indent
2133 else:
2134 else:
2134 indent = self.initial_indent
2135 indent = self.initial_indent
2135
2136
2136 # Maximum width for this line.
2137 # Maximum width for this line.
2137 width = self.width - len(indent)
2138 width = self.width - len(indent)
2138
2139
2139 # First chunk on line is whitespace -- drop it, unless this
2140 # First chunk on line is whitespace -- drop it, unless this
2140 # is the very beginning of the text (i.e. no lines started yet).
2141 # is the very beginning of the text (i.e. no lines started yet).
2141 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2142 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2142 del chunks[-1]
2143 del chunks[-1]
2143
2144
2144 while chunks:
2145 while chunks:
2145 l = colwidth(chunks[-1])
2146 l = colwidth(chunks[-1])
2146
2147
2147 # Can at least squeeze this chunk onto the current line.
2148 # Can at least squeeze this chunk onto the current line.
2148 if cur_len + l <= width:
2149 if cur_len + l <= width:
2149 cur_line.append(chunks.pop())
2150 cur_line.append(chunks.pop())
2150 cur_len += l
2151 cur_len += l
2151
2152
2152 # Nope, this line is full.
2153 # Nope, this line is full.
2153 else:
2154 else:
2154 break
2155 break
2155
2156
2156 # The current line is full, and the next chunk is too big to
2157 # The current line is full, and the next chunk is too big to
2157 # fit on *any* line (not just this one).
2158 # fit on *any* line (not just this one).
2158 if chunks and colwidth(chunks[-1]) > width:
2159 if chunks and colwidth(chunks[-1]) > width:
2159 self._handle_long_word(chunks, cur_line, cur_len, width)
2160 self._handle_long_word(chunks, cur_line, cur_len, width)
2160
2161
2161 # If the last chunk on this line is all whitespace, drop it.
2162 # If the last chunk on this line is all whitespace, drop it.
2162 if (self.drop_whitespace and
2163 if (self.drop_whitespace and
2163 cur_line and cur_line[-1].strip() == ''):
2164 cur_line and cur_line[-1].strip() == ''):
2164 del cur_line[-1]
2165 del cur_line[-1]
2165
2166
2166 # Convert current line back to a string and store it in list
2167 # Convert current line back to a string and store it in list
2167 # of all lines (return value).
2168 # of all lines (return value).
2168 if cur_line:
2169 if cur_line:
2169 lines.append(indent + ''.join(cur_line))
2170 lines.append(indent + ''.join(cur_line))
2170
2171
2171 return lines
2172 return lines
2172
2173
2173 global MBTextWrapper
2174 global MBTextWrapper
2174 MBTextWrapper = tw
2175 MBTextWrapper = tw
2175 return tw(**kwargs)
2176 return tw(**kwargs)
2176
2177
2177 def wrap(line, width, initindent='', hangindent=''):
2178 def wrap(line, width, initindent='', hangindent=''):
2178 maxindent = max(len(hangindent), len(initindent))
2179 maxindent = max(len(hangindent), len(initindent))
2179 if width <= maxindent:
2180 if width <= maxindent:
2180 # adjust for weird terminal size
2181 # adjust for weird terminal size
2181 width = max(78, maxindent + 1)
2182 width = max(78, maxindent + 1)
2182 line = line.decode(encoding.encoding, encoding.encodingmode)
2183 line = line.decode(encoding.encoding, encoding.encodingmode)
2183 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2184 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2184 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2185 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2185 wrapper = MBTextWrapper(width=width,
2186 wrapper = MBTextWrapper(width=width,
2186 initial_indent=initindent,
2187 initial_indent=initindent,
2187 subsequent_indent=hangindent)
2188 subsequent_indent=hangindent)
2188 return wrapper.fill(line).encode(encoding.encoding)
2189 return wrapper.fill(line).encode(encoding.encoding)
2189
2190
2190 def iterlines(iterator):
2191 def iterlines(iterator):
2191 for chunk in iterator:
2192 for chunk in iterator:
2192 for line in chunk.splitlines():
2193 for line in chunk.splitlines():
2193 yield line
2194 yield line
2194
2195
2195 def expandpath(path):
2196 def expandpath(path):
2196 return os.path.expanduser(os.path.expandvars(path))
2197 return os.path.expanduser(os.path.expandvars(path))
2197
2198
2198 def hgcmd():
2199 def hgcmd():
2199 """Return the command used to execute current hg
2200 """Return the command used to execute current hg
2200
2201
2201 This is different from hgexecutable() because on Windows we want
2202 This is different from hgexecutable() because on Windows we want
2202 to avoid things opening new shell windows like batch files, so we
2203 to avoid things opening new shell windows like batch files, so we
2203 get either the python call or current executable.
2204 get either the python call or current executable.
2204 """
2205 """
2205 if mainfrozen():
2206 if mainfrozen():
2206 if getattr(sys, 'frozen', None) == 'macosx_app':
2207 if getattr(sys, 'frozen', None) == 'macosx_app':
2207 # Env variable set by py2app
2208 # Env variable set by py2app
2208 return [os.environ['EXECUTABLEPATH']]
2209 return [os.environ['EXECUTABLEPATH']]
2209 else:
2210 else:
2210 return [sys.executable]
2211 return [sys.executable]
2211 return gethgcmd()
2212 return gethgcmd()
2212
2213
2213 def rundetached(args, condfn):
2214 def rundetached(args, condfn):
2214 """Execute the argument list in a detached process.
2215 """Execute the argument list in a detached process.
2215
2216
2216 condfn is a callable which is called repeatedly and should return
2217 condfn is a callable which is called repeatedly and should return
2217 True once the child process is known to have started successfully.
2218 True once the child process is known to have started successfully.
2218 At this point, the child process PID is returned. If the child
2219 At this point, the child process PID is returned. If the child
2219 process fails to start or finishes before condfn() evaluates to
2220 process fails to start or finishes before condfn() evaluates to
2220 True, return -1.
2221 True, return -1.
2221 """
2222 """
2222 # Windows case is easier because the child process is either
2223 # Windows case is easier because the child process is either
2223 # successfully starting and validating the condition or exiting
2224 # successfully starting and validating the condition or exiting
2224 # on failure. We just poll on its PID. On Unix, if the child
2225 # on failure. We just poll on its PID. On Unix, if the child
2225 # process fails to start, it will be left in a zombie state until
2226 # process fails to start, it will be left in a zombie state until
2226 # the parent wait on it, which we cannot do since we expect a long
2227 # the parent wait on it, which we cannot do since we expect a long
2227 # running process on success. Instead we listen for SIGCHLD telling
2228 # running process on success. Instead we listen for SIGCHLD telling
2228 # us our child process terminated.
2229 # us our child process terminated.
2229 terminated = set()
2230 terminated = set()
2230 def handler(signum, frame):
2231 def handler(signum, frame):
2231 terminated.add(os.wait())
2232 terminated.add(os.wait())
2232 prevhandler = None
2233 prevhandler = None
2233 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2234 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2234 if SIGCHLD is not None:
2235 if SIGCHLD is not None:
2235 prevhandler = signal.signal(SIGCHLD, handler)
2236 prevhandler = signal.signal(SIGCHLD, handler)
2236 try:
2237 try:
2237 pid = spawndetached(args)
2238 pid = spawndetached(args)
2238 while not condfn():
2239 while not condfn():
2239 if ((pid in terminated or not testpid(pid))
2240 if ((pid in terminated or not testpid(pid))
2240 and not condfn()):
2241 and not condfn()):
2241 return -1
2242 return -1
2242 time.sleep(0.1)
2243 time.sleep(0.1)
2243 return pid
2244 return pid
2244 finally:
2245 finally:
2245 if prevhandler is not None:
2246 if prevhandler is not None:
2246 signal.signal(signal.SIGCHLD, prevhandler)
2247 signal.signal(signal.SIGCHLD, prevhandler)
2247
2248
2248 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2249 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2249 """Return the result of interpolating items in the mapping into string s.
2250 """Return the result of interpolating items in the mapping into string s.
2250
2251
2251 prefix is a single character string, or a two character string with
2252 prefix is a single character string, or a two character string with
2252 a backslash as the first character if the prefix needs to be escaped in
2253 a backslash as the first character if the prefix needs to be escaped in
2253 a regular expression.
2254 a regular expression.
2254
2255
2255 fn is an optional function that will be applied to the replacement text
2256 fn is an optional function that will be applied to the replacement text
2256 just before replacement.
2257 just before replacement.
2257
2258
2258 escape_prefix is an optional flag that allows using doubled prefix for
2259 escape_prefix is an optional flag that allows using doubled prefix for
2259 its escaping.
2260 its escaping.
2260 """
2261 """
2261 fn = fn or (lambda s: s)
2262 fn = fn or (lambda s: s)
2262 patterns = '|'.join(mapping.keys())
2263 patterns = '|'.join(mapping.keys())
2263 if escape_prefix:
2264 if escape_prefix:
2264 patterns += '|' + prefix
2265 patterns += '|' + prefix
2265 if len(prefix) > 1:
2266 if len(prefix) > 1:
2266 prefix_char = prefix[1:]
2267 prefix_char = prefix[1:]
2267 else:
2268 else:
2268 prefix_char = prefix
2269 prefix_char = prefix
2269 mapping[prefix_char] = prefix_char
2270 mapping[prefix_char] = prefix_char
2270 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2271 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2271 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2272 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2272
2273
2273 def getport(port):
2274 def getport(port):
2274 """Return the port for a given network service.
2275 """Return the port for a given network service.
2275
2276
2276 If port is an integer, it's returned as is. If it's a string, it's
2277 If port is an integer, it's returned as is. If it's a string, it's
2277 looked up using socket.getservbyname(). If there's no matching
2278 looked up using socket.getservbyname(). If there's no matching
2278 service, error.Abort is raised.
2279 service, error.Abort is raised.
2279 """
2280 """
2280 try:
2281 try:
2281 return int(port)
2282 return int(port)
2282 except ValueError:
2283 except ValueError:
2283 pass
2284 pass
2284
2285
2285 try:
2286 try:
2286 return socket.getservbyname(port)
2287 return socket.getservbyname(port)
2287 except socket.error:
2288 except socket.error:
2288 raise Abort(_("no port number associated with service '%s'") % port)
2289 raise Abort(_("no port number associated with service '%s'") % port)
2289
2290
2290 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2291 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2291 '0': False, 'no': False, 'false': False, 'off': False,
2292 '0': False, 'no': False, 'false': False, 'off': False,
2292 'never': False}
2293 'never': False}
2293
2294
2294 def parsebool(s):
2295 def parsebool(s):
2295 """Parse s into a boolean.
2296 """Parse s into a boolean.
2296
2297
2297 If s is not a valid boolean, returns None.
2298 If s is not a valid boolean, returns None.
2298 """
2299 """
2299 return _booleans.get(s.lower(), None)
2300 return _booleans.get(s.lower(), None)
2300
2301
2301 _hextochr = dict((a + b, chr(int(a + b, 16)))
2302 _hextochr = dict((a + b, chr(int(a + b, 16)))
2302 for a in string.hexdigits for b in string.hexdigits)
2303 for a in string.hexdigits for b in string.hexdigits)
2303
2304
2304 class url(object):
2305 class url(object):
2305 r"""Reliable URL parser.
2306 r"""Reliable URL parser.
2306
2307
2307 This parses URLs and provides attributes for the following
2308 This parses URLs and provides attributes for the following
2308 components:
2309 components:
2309
2310
2310 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2311 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2311
2312
2312 Missing components are set to None. The only exception is
2313 Missing components are set to None. The only exception is
2313 fragment, which is set to '' if present but empty.
2314 fragment, which is set to '' if present but empty.
2314
2315
2315 If parsefragment is False, fragment is included in query. If
2316 If parsefragment is False, fragment is included in query. If
2316 parsequery is False, query is included in path. If both are
2317 parsequery is False, query is included in path. If both are
2317 False, both fragment and query are included in path.
2318 False, both fragment and query are included in path.
2318
2319
2319 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2320 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2320
2321
2321 Note that for backward compatibility reasons, bundle URLs do not
2322 Note that for backward compatibility reasons, bundle URLs do not
2322 take host names. That means 'bundle://../' has a path of '../'.
2323 take host names. That means 'bundle://../' has a path of '../'.
2323
2324
2324 Examples:
2325 Examples:
2325
2326
2326 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2327 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2327 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2328 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2328 >>> url('ssh://[::1]:2200//home/joe/repo')
2329 >>> url('ssh://[::1]:2200//home/joe/repo')
2329 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2330 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2330 >>> url('file:///home/joe/repo')
2331 >>> url('file:///home/joe/repo')
2331 <url scheme: 'file', path: '/home/joe/repo'>
2332 <url scheme: 'file', path: '/home/joe/repo'>
2332 >>> url('file:///c:/temp/foo/')
2333 >>> url('file:///c:/temp/foo/')
2333 <url scheme: 'file', path: 'c:/temp/foo/'>
2334 <url scheme: 'file', path: 'c:/temp/foo/'>
2334 >>> url('bundle:foo')
2335 >>> url('bundle:foo')
2335 <url scheme: 'bundle', path: 'foo'>
2336 <url scheme: 'bundle', path: 'foo'>
2336 >>> url('bundle://../foo')
2337 >>> url('bundle://../foo')
2337 <url scheme: 'bundle', path: '../foo'>
2338 <url scheme: 'bundle', path: '../foo'>
2338 >>> url(r'c:\foo\bar')
2339 >>> url(r'c:\foo\bar')
2339 <url path: 'c:\\foo\\bar'>
2340 <url path: 'c:\\foo\\bar'>
2340 >>> url(r'\\blah\blah\blah')
2341 >>> url(r'\\blah\blah\blah')
2341 <url path: '\\\\blah\\blah\\blah'>
2342 <url path: '\\\\blah\\blah\\blah'>
2342 >>> url(r'\\blah\blah\blah#baz')
2343 >>> url(r'\\blah\blah\blah#baz')
2343 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2344 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2344 >>> url(r'file:///C:\users\me')
2345 >>> url(r'file:///C:\users\me')
2345 <url scheme: 'file', path: 'C:\\users\\me'>
2346 <url scheme: 'file', path: 'C:\\users\\me'>
2346
2347
2347 Authentication credentials:
2348 Authentication credentials:
2348
2349
2349 >>> url('ssh://joe:xyz@x/repo')
2350 >>> url('ssh://joe:xyz@x/repo')
2350 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2351 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2351 >>> url('ssh://joe@x/repo')
2352 >>> url('ssh://joe@x/repo')
2352 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2353 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2353
2354
2354 Query strings and fragments:
2355 Query strings and fragments:
2355
2356
2356 >>> url('http://host/a?b#c')
2357 >>> url('http://host/a?b#c')
2357 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2358 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2358 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2359 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2359 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2360 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2360
2361
2361 Empty path:
2362 Empty path:
2362
2363
2363 >>> url('')
2364 >>> url('')
2364 <url path: ''>
2365 <url path: ''>
2365 >>> url('#a')
2366 >>> url('#a')
2366 <url path: '', fragment: 'a'>
2367 <url path: '', fragment: 'a'>
2367 >>> url('http://host/')
2368 >>> url('http://host/')
2368 <url scheme: 'http', host: 'host', path: ''>
2369 <url scheme: 'http', host: 'host', path: ''>
2369 >>> url('http://host/#a')
2370 >>> url('http://host/#a')
2370 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2371 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2371
2372
2372 Only scheme:
2373 Only scheme:
2373
2374
2374 >>> url('http:')
2375 >>> url('http:')
2375 <url scheme: 'http'>
2376 <url scheme: 'http'>
2376 """
2377 """
2377
2378
2378 _safechars = "!~*'()+"
2379 _safechars = "!~*'()+"
2379 _safepchars = "/!~*'()+:\\"
2380 _safepchars = "/!~*'()+:\\"
2380 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2381 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2381
2382
2382 def __init__(self, path, parsequery=True, parsefragment=True):
2383 def __init__(self, path, parsequery=True, parsefragment=True):
2383 # We slowly chomp away at path until we have only the path left
2384 # We slowly chomp away at path until we have only the path left
2384 self.scheme = self.user = self.passwd = self.host = None
2385 self.scheme = self.user = self.passwd = self.host = None
2385 self.port = self.path = self.query = self.fragment = None
2386 self.port = self.path = self.query = self.fragment = None
2386 self._localpath = True
2387 self._localpath = True
2387 self._hostport = ''
2388 self._hostport = ''
2388 self._origpath = path
2389 self._origpath = path
2389
2390
2390 if parsefragment and '#' in path:
2391 if parsefragment and '#' in path:
2391 path, self.fragment = path.split('#', 1)
2392 path, self.fragment = path.split('#', 1)
2392
2393
2393 # special case for Windows drive letters and UNC paths
2394 # special case for Windows drive letters and UNC paths
2394 if hasdriveletter(path) or path.startswith(r'\\'):
2395 if hasdriveletter(path) or path.startswith(r'\\'):
2395 self.path = path
2396 self.path = path
2396 return
2397 return
2397
2398
2398 # For compatibility reasons, we can't handle bundle paths as
2399 # For compatibility reasons, we can't handle bundle paths as
2399 # normal URLS
2400 # normal URLS
2400 if path.startswith('bundle:'):
2401 if path.startswith('bundle:'):
2401 self.scheme = 'bundle'
2402 self.scheme = 'bundle'
2402 path = path[7:]
2403 path = path[7:]
2403 if path.startswith('//'):
2404 if path.startswith('//'):
2404 path = path[2:]
2405 path = path[2:]
2405 self.path = path
2406 self.path = path
2406 return
2407 return
2407
2408
2408 if self._matchscheme(path):
2409 if self._matchscheme(path):
2409 parts = path.split(':', 1)
2410 parts = path.split(':', 1)
2410 if parts[0]:
2411 if parts[0]:
2411 self.scheme, path = parts
2412 self.scheme, path = parts
2412 self._localpath = False
2413 self._localpath = False
2413
2414
2414 if not path:
2415 if not path:
2415 path = None
2416 path = None
2416 if self._localpath:
2417 if self._localpath:
2417 self.path = ''
2418 self.path = ''
2418 return
2419 return
2419 else:
2420 else:
2420 if self._localpath:
2421 if self._localpath:
2421 self.path = path
2422 self.path = path
2422 return
2423 return
2423
2424
2424 if parsequery and '?' in path:
2425 if parsequery and '?' in path:
2425 path, self.query = path.split('?', 1)
2426 path, self.query = path.split('?', 1)
2426 if not path:
2427 if not path:
2427 path = None
2428 path = None
2428 if not self.query:
2429 if not self.query:
2429 self.query = None
2430 self.query = None
2430
2431
2431 # // is required to specify a host/authority
2432 # // is required to specify a host/authority
2432 if path and path.startswith('//'):
2433 if path and path.startswith('//'):
2433 parts = path[2:].split('/', 1)
2434 parts = path[2:].split('/', 1)
2434 if len(parts) > 1:
2435 if len(parts) > 1:
2435 self.host, path = parts
2436 self.host, path = parts
2436 else:
2437 else:
2437 self.host = parts[0]
2438 self.host = parts[0]
2438 path = None
2439 path = None
2439 if not self.host:
2440 if not self.host:
2440 self.host = None
2441 self.host = None
2441 # path of file:///d is /d
2442 # path of file:///d is /d
2442 # path of file:///d:/ is d:/, not /d:/
2443 # path of file:///d:/ is d:/, not /d:/
2443 if path and not hasdriveletter(path):
2444 if path and not hasdriveletter(path):
2444 path = '/' + path
2445 path = '/' + path
2445
2446
2446 if self.host and '@' in self.host:
2447 if self.host and '@' in self.host:
2447 self.user, self.host = self.host.rsplit('@', 1)
2448 self.user, self.host = self.host.rsplit('@', 1)
2448 if ':' in self.user:
2449 if ':' in self.user:
2449 self.user, self.passwd = self.user.split(':', 1)
2450 self.user, self.passwd = self.user.split(':', 1)
2450 if not self.host:
2451 if not self.host:
2451 self.host = None
2452 self.host = None
2452
2453
2453 # Don't split on colons in IPv6 addresses without ports
2454 # Don't split on colons in IPv6 addresses without ports
2454 if (self.host and ':' in self.host and
2455 if (self.host and ':' in self.host and
2455 not (self.host.startswith('[') and self.host.endswith(']'))):
2456 not (self.host.startswith('[') and self.host.endswith(']'))):
2456 self._hostport = self.host
2457 self._hostport = self.host
2457 self.host, self.port = self.host.rsplit(':', 1)
2458 self.host, self.port = self.host.rsplit(':', 1)
2458 if not self.host:
2459 if not self.host:
2459 self.host = None
2460 self.host = None
2460
2461
2461 if (self.host and self.scheme == 'file' and
2462 if (self.host and self.scheme == 'file' and
2462 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2463 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2463 raise Abort(_('file:// URLs can only refer to localhost'))
2464 raise Abort(_('file:// URLs can only refer to localhost'))
2464
2465
2465 self.path = path
2466 self.path = path
2466
2467
2467 # leave the query string escaped
2468 # leave the query string escaped
2468 for a in ('user', 'passwd', 'host', 'port',
2469 for a in ('user', 'passwd', 'host', 'port',
2469 'path', 'fragment'):
2470 'path', 'fragment'):
2470 v = getattr(self, a)
2471 v = getattr(self, a)
2471 if v is not None:
2472 if v is not None:
2472 setattr(self, a, pycompat.urlparse.unquote(v))
2473 setattr(self, a, pycompat.urlparse.unquote(v))
2473
2474
2474 def __repr__(self):
2475 def __repr__(self):
2475 attrs = []
2476 attrs = []
2476 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2477 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2477 'query', 'fragment'):
2478 'query', 'fragment'):
2478 v = getattr(self, a)
2479 v = getattr(self, a)
2479 if v is not None:
2480 if v is not None:
2480 attrs.append('%s: %r' % (a, v))
2481 attrs.append('%s: %r' % (a, v))
2481 return '<url %s>' % ', '.join(attrs)
2482 return '<url %s>' % ', '.join(attrs)
2482
2483
2483 def __str__(self):
2484 def __str__(self):
2484 r"""Join the URL's components back into a URL string.
2485 r"""Join the URL's components back into a URL string.
2485
2486
2486 Examples:
2487 Examples:
2487
2488
2488 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2489 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2489 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2490 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2490 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2491 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2491 'http://user:pw@host:80/?foo=bar&baz=42'
2492 'http://user:pw@host:80/?foo=bar&baz=42'
2492 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2493 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2493 'http://user:pw@host:80/?foo=bar%3dbaz'
2494 'http://user:pw@host:80/?foo=bar%3dbaz'
2494 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2495 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2495 'ssh://user:pw@[::1]:2200//home/joe#'
2496 'ssh://user:pw@[::1]:2200//home/joe#'
2496 >>> str(url('http://localhost:80//'))
2497 >>> str(url('http://localhost:80//'))
2497 'http://localhost:80//'
2498 'http://localhost:80//'
2498 >>> str(url('http://localhost:80/'))
2499 >>> str(url('http://localhost:80/'))
2499 'http://localhost:80/'
2500 'http://localhost:80/'
2500 >>> str(url('http://localhost:80'))
2501 >>> str(url('http://localhost:80'))
2501 'http://localhost:80/'
2502 'http://localhost:80/'
2502 >>> str(url('bundle:foo'))
2503 >>> str(url('bundle:foo'))
2503 'bundle:foo'
2504 'bundle:foo'
2504 >>> str(url('bundle://../foo'))
2505 >>> str(url('bundle://../foo'))
2505 'bundle:../foo'
2506 'bundle:../foo'
2506 >>> str(url('path'))
2507 >>> str(url('path'))
2507 'path'
2508 'path'
2508 >>> str(url('file:///tmp/foo/bar'))
2509 >>> str(url('file:///tmp/foo/bar'))
2509 'file:///tmp/foo/bar'
2510 'file:///tmp/foo/bar'
2510 >>> str(url('file:///c:/tmp/foo/bar'))
2511 >>> str(url('file:///c:/tmp/foo/bar'))
2511 'file:///c:/tmp/foo/bar'
2512 'file:///c:/tmp/foo/bar'
2512 >>> print url(r'bundle:foo\bar')
2513 >>> print url(r'bundle:foo\bar')
2513 bundle:foo\bar
2514 bundle:foo\bar
2514 >>> print url(r'file:///D:\data\hg')
2515 >>> print url(r'file:///D:\data\hg')
2515 file:///D:\data\hg
2516 file:///D:\data\hg
2516 """
2517 """
2517 if self._localpath:
2518 if self._localpath:
2518 s = self.path
2519 s = self.path
2519 if self.scheme == 'bundle':
2520 if self.scheme == 'bundle':
2520 s = 'bundle:' + s
2521 s = 'bundle:' + s
2521 if self.fragment:
2522 if self.fragment:
2522 s += '#' + self.fragment
2523 s += '#' + self.fragment
2523 return s
2524 return s
2524
2525
2525 s = self.scheme + ':'
2526 s = self.scheme + ':'
2526 if self.user or self.passwd or self.host:
2527 if self.user or self.passwd or self.host:
2527 s += '//'
2528 s += '//'
2528 elif self.scheme and (not self.path or self.path.startswith('/')
2529 elif self.scheme and (not self.path or self.path.startswith('/')
2529 or hasdriveletter(self.path)):
2530 or hasdriveletter(self.path)):
2530 s += '//'
2531 s += '//'
2531 if hasdriveletter(self.path):
2532 if hasdriveletter(self.path):
2532 s += '/'
2533 s += '/'
2533 if self.user:
2534 if self.user:
2534 s += urlreq.quote(self.user, safe=self._safechars)
2535 s += urlreq.quote(self.user, safe=self._safechars)
2535 if self.passwd:
2536 if self.passwd:
2536 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2537 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2537 if self.user or self.passwd:
2538 if self.user or self.passwd:
2538 s += '@'
2539 s += '@'
2539 if self.host:
2540 if self.host:
2540 if not (self.host.startswith('[') and self.host.endswith(']')):
2541 if not (self.host.startswith('[') and self.host.endswith(']')):
2541 s += urlreq.quote(self.host)
2542 s += urlreq.quote(self.host)
2542 else:
2543 else:
2543 s += self.host
2544 s += self.host
2544 if self.port:
2545 if self.port:
2545 s += ':' + urlreq.quote(self.port)
2546 s += ':' + urlreq.quote(self.port)
2546 if self.host:
2547 if self.host:
2547 s += '/'
2548 s += '/'
2548 if self.path:
2549 if self.path:
2549 # TODO: similar to the query string, we should not unescape the
2550 # TODO: similar to the query string, we should not unescape the
2550 # path when we store it, the path might contain '%2f' = '/',
2551 # path when we store it, the path might contain '%2f' = '/',
2551 # which we should *not* escape.
2552 # which we should *not* escape.
2552 s += urlreq.quote(self.path, safe=self._safepchars)
2553 s += urlreq.quote(self.path, safe=self._safepchars)
2553 if self.query:
2554 if self.query:
2554 # we store the query in escaped form.
2555 # we store the query in escaped form.
2555 s += '?' + self.query
2556 s += '?' + self.query
2556 if self.fragment is not None:
2557 if self.fragment is not None:
2557 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2558 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2558 return s
2559 return s
2559
2560
2560 def authinfo(self):
2561 def authinfo(self):
2561 user, passwd = self.user, self.passwd
2562 user, passwd = self.user, self.passwd
2562 try:
2563 try:
2563 self.user, self.passwd = None, None
2564 self.user, self.passwd = None, None
2564 s = str(self)
2565 s = str(self)
2565 finally:
2566 finally:
2566 self.user, self.passwd = user, passwd
2567 self.user, self.passwd = user, passwd
2567 if not self.user:
2568 if not self.user:
2568 return (s, None)
2569 return (s, None)
2569 # authinfo[1] is passed to urllib2 password manager, and its
2570 # authinfo[1] is passed to urllib2 password manager, and its
2570 # URIs must not contain credentials. The host is passed in the
2571 # URIs must not contain credentials. The host is passed in the
2571 # URIs list because Python < 2.4.3 uses only that to search for
2572 # URIs list because Python < 2.4.3 uses only that to search for
2572 # a password.
2573 # a password.
2573 return (s, (None, (s, self.host),
2574 return (s, (None, (s, self.host),
2574 self.user, self.passwd or ''))
2575 self.user, self.passwd or ''))
2575
2576
2576 def isabs(self):
2577 def isabs(self):
2577 if self.scheme and self.scheme != 'file':
2578 if self.scheme and self.scheme != 'file':
2578 return True # remote URL
2579 return True # remote URL
2579 if hasdriveletter(self.path):
2580 if hasdriveletter(self.path):
2580 return True # absolute for our purposes - can't be joined()
2581 return True # absolute for our purposes - can't be joined()
2581 if self.path.startswith(r'\\'):
2582 if self.path.startswith(r'\\'):
2582 return True # Windows UNC path
2583 return True # Windows UNC path
2583 if self.path.startswith('/'):
2584 if self.path.startswith('/'):
2584 return True # POSIX-style
2585 return True # POSIX-style
2585 return False
2586 return False
2586
2587
2587 def localpath(self):
2588 def localpath(self):
2588 if self.scheme == 'file' or self.scheme == 'bundle':
2589 if self.scheme == 'file' or self.scheme == 'bundle':
2589 path = self.path or '/'
2590 path = self.path or '/'
2590 # For Windows, we need to promote hosts containing drive
2591 # For Windows, we need to promote hosts containing drive
2591 # letters to paths with drive letters.
2592 # letters to paths with drive letters.
2592 if hasdriveletter(self._hostport):
2593 if hasdriveletter(self._hostport):
2593 path = self._hostport + '/' + self.path
2594 path = self._hostport + '/' + self.path
2594 elif (self.host is not None and self.path
2595 elif (self.host is not None and self.path
2595 and not hasdriveletter(path)):
2596 and not hasdriveletter(path)):
2596 path = '/' + path
2597 path = '/' + path
2597 return path
2598 return path
2598 return self._origpath
2599 return self._origpath
2599
2600
2600 def islocal(self):
2601 def islocal(self):
2601 '''whether localpath will return something that posixfile can open'''
2602 '''whether localpath will return something that posixfile can open'''
2602 return (not self.scheme or self.scheme == 'file'
2603 return (not self.scheme or self.scheme == 'file'
2603 or self.scheme == 'bundle')
2604 or self.scheme == 'bundle')
2604
2605
2605 def hasscheme(path):
2606 def hasscheme(path):
2606 return bool(url(path).scheme)
2607 return bool(url(path).scheme)
2607
2608
2608 def hasdriveletter(path):
2609 def hasdriveletter(path):
2609 return path and path[1:2] == ':' and path[0:1].isalpha()
2610 return path and path[1:2] == ':' and path[0:1].isalpha()
2610
2611
2611 def urllocalpath(path):
2612 def urllocalpath(path):
2612 return url(path, parsequery=False, parsefragment=False).localpath()
2613 return url(path, parsequery=False, parsefragment=False).localpath()
2613
2614
2614 def hidepassword(u):
2615 def hidepassword(u):
2615 '''hide user credential in a url string'''
2616 '''hide user credential in a url string'''
2616 u = url(u)
2617 u = url(u)
2617 if u.passwd:
2618 if u.passwd:
2618 u.passwd = '***'
2619 u.passwd = '***'
2619 return str(u)
2620 return str(u)
2620
2621
2621 def removeauth(u):
2622 def removeauth(u):
2622 '''remove all authentication information from a url string'''
2623 '''remove all authentication information from a url string'''
2623 u = url(u)
2624 u = url(u)
2624 u.user = u.passwd = None
2625 u.user = u.passwd = None
2625 return str(u)
2626 return str(u)
2626
2627
2627 def isatty(fp):
2628 def isatty(fp):
2628 try:
2629 try:
2629 return fp.isatty()
2630 return fp.isatty()
2630 except AttributeError:
2631 except AttributeError:
2631 return False
2632 return False
2632
2633
2633 timecount = unitcountfn(
2634 timecount = unitcountfn(
2634 (1, 1e3, _('%.0f s')),
2635 (1, 1e3, _('%.0f s')),
2635 (100, 1, _('%.1f s')),
2636 (100, 1, _('%.1f s')),
2636 (10, 1, _('%.2f s')),
2637 (10, 1, _('%.2f s')),
2637 (1, 1, _('%.3f s')),
2638 (1, 1, _('%.3f s')),
2638 (100, 0.001, _('%.1f ms')),
2639 (100, 0.001, _('%.1f ms')),
2639 (10, 0.001, _('%.2f ms')),
2640 (10, 0.001, _('%.2f ms')),
2640 (1, 0.001, _('%.3f ms')),
2641 (1, 0.001, _('%.3f ms')),
2641 (100, 0.000001, _('%.1f us')),
2642 (100, 0.000001, _('%.1f us')),
2642 (10, 0.000001, _('%.2f us')),
2643 (10, 0.000001, _('%.2f us')),
2643 (1, 0.000001, _('%.3f us')),
2644 (1, 0.000001, _('%.3f us')),
2644 (100, 0.000000001, _('%.1f ns')),
2645 (100, 0.000000001, _('%.1f ns')),
2645 (10, 0.000000001, _('%.2f ns')),
2646 (10, 0.000000001, _('%.2f ns')),
2646 (1, 0.000000001, _('%.3f ns')),
2647 (1, 0.000000001, _('%.3f ns')),
2647 )
2648 )
2648
2649
2649 _timenesting = [0]
2650 _timenesting = [0]
2650
2651
2651 def timed(func):
2652 def timed(func):
2652 '''Report the execution time of a function call to stderr.
2653 '''Report the execution time of a function call to stderr.
2653
2654
2654 During development, use as a decorator when you need to measure
2655 During development, use as a decorator when you need to measure
2655 the cost of a function, e.g. as follows:
2656 the cost of a function, e.g. as follows:
2656
2657
2657 @util.timed
2658 @util.timed
2658 def foo(a, b, c):
2659 def foo(a, b, c):
2659 pass
2660 pass
2660 '''
2661 '''
2661
2662
2662 def wrapper(*args, **kwargs):
2663 def wrapper(*args, **kwargs):
2663 start = time.time()
2664 start = time.time()
2664 indent = 2
2665 indent = 2
2665 _timenesting[0] += indent
2666 _timenesting[0] += indent
2666 try:
2667 try:
2667 return func(*args, **kwargs)
2668 return func(*args, **kwargs)
2668 finally:
2669 finally:
2669 elapsed = time.time() - start
2670 elapsed = time.time() - start
2670 _timenesting[0] -= indent
2671 _timenesting[0] -= indent
2671 sys.stderr.write('%s%s: %s\n' %
2672 sys.stderr.write('%s%s: %s\n' %
2672 (' ' * _timenesting[0], func.__name__,
2673 (' ' * _timenesting[0], func.__name__,
2673 timecount(elapsed)))
2674 timecount(elapsed)))
2674 return wrapper
2675 return wrapper
2675
2676
2676 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2677 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2677 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2678 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2678
2679
2679 def sizetoint(s):
2680 def sizetoint(s):
2680 '''Convert a space specifier to a byte count.
2681 '''Convert a space specifier to a byte count.
2681
2682
2682 >>> sizetoint('30')
2683 >>> sizetoint('30')
2683 30
2684 30
2684 >>> sizetoint('2.2kb')
2685 >>> sizetoint('2.2kb')
2685 2252
2686 2252
2686 >>> sizetoint('6M')
2687 >>> sizetoint('6M')
2687 6291456
2688 6291456
2688 '''
2689 '''
2689 t = s.strip().lower()
2690 t = s.strip().lower()
2690 try:
2691 try:
2691 for k, u in _sizeunits:
2692 for k, u in _sizeunits:
2692 if t.endswith(k):
2693 if t.endswith(k):
2693 return int(float(t[:-len(k)]) * u)
2694 return int(float(t[:-len(k)]) * u)
2694 return int(t)
2695 return int(t)
2695 except ValueError:
2696 except ValueError:
2696 raise error.ParseError(_("couldn't parse size: %s") % s)
2697 raise error.ParseError(_("couldn't parse size: %s") % s)
2697
2698
2698 class hooks(object):
2699 class hooks(object):
2699 '''A collection of hook functions that can be used to extend a
2700 '''A collection of hook functions that can be used to extend a
2700 function's behavior. Hooks are called in lexicographic order,
2701 function's behavior. Hooks are called in lexicographic order,
2701 based on the names of their sources.'''
2702 based on the names of their sources.'''
2702
2703
2703 def __init__(self):
2704 def __init__(self):
2704 self._hooks = []
2705 self._hooks = []
2705
2706
2706 def add(self, source, hook):
2707 def add(self, source, hook):
2707 self._hooks.append((source, hook))
2708 self._hooks.append((source, hook))
2708
2709
2709 def __call__(self, *args):
2710 def __call__(self, *args):
2710 self._hooks.sort(key=lambda x: x[0])
2711 self._hooks.sort(key=lambda x: x[0])
2711 results = []
2712 results = []
2712 for source, hook in self._hooks:
2713 for source, hook in self._hooks:
2713 results.append(hook(*args))
2714 results.append(hook(*args))
2714 return results
2715 return results
2715
2716
2716 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2717 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2717 '''Yields lines for a nicely formatted stacktrace.
2718 '''Yields lines for a nicely formatted stacktrace.
2718 Skips the 'skip' last entries.
2719 Skips the 'skip' last entries.
2719 Each file+linenumber is formatted according to fileline.
2720 Each file+linenumber is formatted according to fileline.
2720 Each line is formatted according to line.
2721 Each line is formatted according to line.
2721 If line is None, it yields:
2722 If line is None, it yields:
2722 length of longest filepath+line number,
2723 length of longest filepath+line number,
2723 filepath+linenumber,
2724 filepath+linenumber,
2724 function
2725 function
2725
2726
2726 Not be used in production code but very convenient while developing.
2727 Not be used in production code but very convenient while developing.
2727 '''
2728 '''
2728 entries = [(fileline % (fn, ln), func)
2729 entries = [(fileline % (fn, ln), func)
2729 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2730 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2730 if entries:
2731 if entries:
2731 fnmax = max(len(entry[0]) for entry in entries)
2732 fnmax = max(len(entry[0]) for entry in entries)
2732 for fnln, func in entries:
2733 for fnln, func in entries:
2733 if line is None:
2734 if line is None:
2734 yield (fnmax, fnln, func)
2735 yield (fnmax, fnln, func)
2735 else:
2736 else:
2736 yield line % (fnmax, fnln, func)
2737 yield line % (fnmax, fnln, func)
2737
2738
2738 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2739 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2739 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2740 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2740 Skips the 'skip' last entries. By default it will flush stdout first.
2741 Skips the 'skip' last entries. By default it will flush stdout first.
2741 It can be used everywhere and intentionally does not require an ui object.
2742 It can be used everywhere and intentionally does not require an ui object.
2742 Not be used in production code but very convenient while developing.
2743 Not be used in production code but very convenient while developing.
2743 '''
2744 '''
2744 if otherf:
2745 if otherf:
2745 otherf.flush()
2746 otherf.flush()
2746 f.write('%s at:\n' % msg)
2747 f.write('%s at:\n' % msg)
2747 for line in getstackframes(skip + 1):
2748 for line in getstackframes(skip + 1):
2748 f.write(line)
2749 f.write(line)
2749 f.flush()
2750 f.flush()
2750
2751
2751 class dirs(object):
2752 class dirs(object):
2752 '''a multiset of directory names from a dirstate or manifest'''
2753 '''a multiset of directory names from a dirstate or manifest'''
2753
2754
2754 def __init__(self, map, skip=None):
2755 def __init__(self, map, skip=None):
2755 self._dirs = {}
2756 self._dirs = {}
2756 addpath = self.addpath
2757 addpath = self.addpath
2757 if safehasattr(map, 'iteritems') and skip is not None:
2758 if safehasattr(map, 'iteritems') and skip is not None:
2758 for f, s in map.iteritems():
2759 for f, s in map.iteritems():
2759 if s[0] != skip:
2760 if s[0] != skip:
2760 addpath(f)
2761 addpath(f)
2761 else:
2762 else:
2762 for f in map:
2763 for f in map:
2763 addpath(f)
2764 addpath(f)
2764
2765
2765 def addpath(self, path):
2766 def addpath(self, path):
2766 dirs = self._dirs
2767 dirs = self._dirs
2767 for base in finddirs(path):
2768 for base in finddirs(path):
2768 if base in dirs:
2769 if base in dirs:
2769 dirs[base] += 1
2770 dirs[base] += 1
2770 return
2771 return
2771 dirs[base] = 1
2772 dirs[base] = 1
2772
2773
2773 def delpath(self, path):
2774 def delpath(self, path):
2774 dirs = self._dirs
2775 dirs = self._dirs
2775 for base in finddirs(path):
2776 for base in finddirs(path):
2776 if dirs[base] > 1:
2777 if dirs[base] > 1:
2777 dirs[base] -= 1
2778 dirs[base] -= 1
2778 return
2779 return
2779 del dirs[base]
2780 del dirs[base]
2780
2781
2781 def __iter__(self):
2782 def __iter__(self):
2782 return self._dirs.iterkeys()
2783 return self._dirs.iterkeys()
2783
2784
2784 def __contains__(self, d):
2785 def __contains__(self, d):
2785 return d in self._dirs
2786 return d in self._dirs
2786
2787
2787 if safehasattr(parsers, 'dirs'):
2788 if safehasattr(parsers, 'dirs'):
2788 dirs = parsers.dirs
2789 dirs = parsers.dirs
2789
2790
2790 def finddirs(path):
2791 def finddirs(path):
2791 pos = path.rfind('/')
2792 pos = path.rfind('/')
2792 while pos != -1:
2793 while pos != -1:
2793 yield path[:pos]
2794 yield path[:pos]
2794 pos = path.rfind('/', 0, pos)
2795 pos = path.rfind('/', 0, pos)
2795
2796
2796 # compression utility
2797 # compression utility
2797
2798
2798 class nocompress(object):
2799 class nocompress(object):
2799 def compress(self, x):
2800 def compress(self, x):
2800 return x
2801 return x
2801 def flush(self):
2802 def flush(self):
2802 return ""
2803 return ""
2803
2804
2804 compressors = {
2805 compressors = {
2805 None: nocompress,
2806 None: nocompress,
2806 # lambda to prevent early import
2807 # lambda to prevent early import
2807 'BZ': lambda: bz2.BZ2Compressor(),
2808 'BZ': lambda: bz2.BZ2Compressor(),
2808 'GZ': lambda: zlib.compressobj(),
2809 'GZ': lambda: zlib.compressobj(),
2809 }
2810 }
2810 # also support the old form by courtesies
2811 # also support the old form by courtesies
2811 compressors['UN'] = compressors[None]
2812 compressors['UN'] = compressors[None]
2812
2813
2813 def _makedecompressor(decompcls):
2814 def _makedecompressor(decompcls):
2814 def generator(f):
2815 def generator(f):
2815 d = decompcls()
2816 d = decompcls()
2816 for chunk in filechunkiter(f):
2817 for chunk in filechunkiter(f):
2817 yield d.decompress(chunk)
2818 yield d.decompress(chunk)
2818 def func(fh):
2819 def func(fh):
2819 return chunkbuffer(generator(fh))
2820 return chunkbuffer(generator(fh))
2820 return func
2821 return func
2821
2822
2822 class ctxmanager(object):
2823 class ctxmanager(object):
2823 '''A context manager for use in 'with' blocks to allow multiple
2824 '''A context manager for use in 'with' blocks to allow multiple
2824 contexts to be entered at once. This is both safer and more
2825 contexts to be entered at once. This is both safer and more
2825 flexible than contextlib.nested.
2826 flexible than contextlib.nested.
2826
2827
2827 Once Mercurial supports Python 2.7+, this will become mostly
2828 Once Mercurial supports Python 2.7+, this will become mostly
2828 unnecessary.
2829 unnecessary.
2829 '''
2830 '''
2830
2831
2831 def __init__(self, *args):
2832 def __init__(self, *args):
2832 '''Accepts a list of no-argument functions that return context
2833 '''Accepts a list of no-argument functions that return context
2833 managers. These will be invoked at __call__ time.'''
2834 managers. These will be invoked at __call__ time.'''
2834 self._pending = args
2835 self._pending = args
2835 self._atexit = []
2836 self._atexit = []
2836
2837
2837 def __enter__(self):
2838 def __enter__(self):
2838 return self
2839 return self
2839
2840
2840 def enter(self):
2841 def enter(self):
2841 '''Create and enter context managers in the order in which they were
2842 '''Create and enter context managers in the order in which they were
2842 passed to the constructor.'''
2843 passed to the constructor.'''
2843 values = []
2844 values = []
2844 for func in self._pending:
2845 for func in self._pending:
2845 obj = func()
2846 obj = func()
2846 values.append(obj.__enter__())
2847 values.append(obj.__enter__())
2847 self._atexit.append(obj.__exit__)
2848 self._atexit.append(obj.__exit__)
2848 del self._pending
2849 del self._pending
2849 return values
2850 return values
2850
2851
2851 def atexit(self, func, *args, **kwargs):
2852 def atexit(self, func, *args, **kwargs):
2852 '''Add a function to call when this context manager exits. The
2853 '''Add a function to call when this context manager exits. The
2853 ordering of multiple atexit calls is unspecified, save that
2854 ordering of multiple atexit calls is unspecified, save that
2854 they will happen before any __exit__ functions.'''
2855 they will happen before any __exit__ functions.'''
2855 def wrapper(exc_type, exc_val, exc_tb):
2856 def wrapper(exc_type, exc_val, exc_tb):
2856 func(*args, **kwargs)
2857 func(*args, **kwargs)
2857 self._atexit.append(wrapper)
2858 self._atexit.append(wrapper)
2858 return func
2859 return func
2859
2860
2860 def __exit__(self, exc_type, exc_val, exc_tb):
2861 def __exit__(self, exc_type, exc_val, exc_tb):
2861 '''Context managers are exited in the reverse order from which
2862 '''Context managers are exited in the reverse order from which
2862 they were created.'''
2863 they were created.'''
2863 received = exc_type is not None
2864 received = exc_type is not None
2864 suppressed = False
2865 suppressed = False
2865 pending = None
2866 pending = None
2866 self._atexit.reverse()
2867 self._atexit.reverse()
2867 for exitfunc in self._atexit:
2868 for exitfunc in self._atexit:
2868 try:
2869 try:
2869 if exitfunc(exc_type, exc_val, exc_tb):
2870 if exitfunc(exc_type, exc_val, exc_tb):
2870 suppressed = True
2871 suppressed = True
2871 exc_type = None
2872 exc_type = None
2872 exc_val = None
2873 exc_val = None
2873 exc_tb = None
2874 exc_tb = None
2874 except BaseException:
2875 except BaseException:
2875 pending = sys.exc_info()
2876 pending = sys.exc_info()
2876 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2877 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2877 del self._atexit
2878 del self._atexit
2878 if pending:
2879 if pending:
2879 raise exc_val
2880 raise exc_val
2880 return received and suppressed
2881 return received and suppressed
2881
2882
2882 def _bz2():
2883 def _bz2():
2883 d = bz2.BZ2Decompressor()
2884 d = bz2.BZ2Decompressor()
2884 # Bzip2 stream start with BZ, but we stripped it.
2885 # Bzip2 stream start with BZ, but we stripped it.
2885 # we put it back for good measure.
2886 # we put it back for good measure.
2886 d.decompress('BZ')
2887 d.decompress('BZ')
2887 return d
2888 return d
2888
2889
2889 decompressors = {None: lambda fh: fh,
2890 decompressors = {None: lambda fh: fh,
2890 '_truncatedBZ': _makedecompressor(_bz2),
2891 '_truncatedBZ': _makedecompressor(_bz2),
2891 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2892 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2892 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2893 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2893 }
2894 }
2894 # also support the old form by courtesies
2895 # also support the old form by courtesies
2895 decompressors['UN'] = decompressors[None]
2896 decompressors['UN'] = decompressors[None]
2896
2897
2897 # convenient shortcut
2898 # convenient shortcut
2898 dst = debugstacktrace
2899 dst = debugstacktrace
@@ -1,141 +1,63
1 #require test-repo
1 #require test-repo
2
2
3 $ . "$TESTDIR/helpers-testrepo.sh"
3 $ . "$TESTDIR/helpers-testrepo.sh"
4 $ cd "$TESTDIR"/..
4 $ cd "$TESTDIR"/..
5
5
6 $ hg files 'set:(**.py)' | sed 's|\\|/|g' | xargs python contrib/check-py3-compat.py
6 $ hg files 'set:(**.py)' | sed 's|\\|/|g' | xargs python contrib/check-py3-compat.py
7 hgext/fsmonitor/pywatchman/__init__.py not using absolute_import
7 hgext/fsmonitor/pywatchman/__init__.py not using absolute_import
8 hgext/fsmonitor/pywatchman/__init__.py requires print_function
8 hgext/fsmonitor/pywatchman/__init__.py requires print_function
9 hgext/fsmonitor/pywatchman/capabilities.py not using absolute_import
9 hgext/fsmonitor/pywatchman/capabilities.py not using absolute_import
10 hgext/fsmonitor/pywatchman/pybser.py not using absolute_import
10 hgext/fsmonitor/pywatchman/pybser.py not using absolute_import
11 i18n/check-translation.py not using absolute_import
11 i18n/check-translation.py not using absolute_import
12 setup.py not using absolute_import
12 setup.py not using absolute_import
13 tests/test-demandimport.py not using absolute_import
13 tests/test-demandimport.py not using absolute_import
14
14
15 #if py3exe
15 #if py3exe
16 $ hg files 'set:(**.py) - grep(pygments)' | sed 's|\\|/|g' \
16 $ hg files 'set:(**.py) - grep(pygments)' | sed 's|\\|/|g' \
17 > | xargs $PYTHON3 contrib/check-py3-compat.py \
17 > | xargs $PYTHON3 contrib/check-py3-compat.py \
18 > | sed 's/[0-9][0-9]*)$/*)/'
18 > | sed 's/[0-9][0-9]*)$/*)/'
19 hgext/automv.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
20 hgext/blackbox.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
21 hgext/bugzilla.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
22 hgext/censor.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
23 hgext/chgserver.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
24 hgext/children.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
25 hgext/churn.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
26 hgext/clonebundles.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
27 hgext/color.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
28 hgext/convert/bzr.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
19 hgext/convert/bzr.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
29 hgext/convert/common.py: error importing module: <AttributeError> module 'mercurial.util' has no attribute 'pickle' (line *)
20 hgext/convert/convcmd.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
30 hgext/convert/convcmd.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
31 hgext/convert/cvs.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
21 hgext/convert/cvs.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
32 hgext/convert/cvsps.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
33 hgext/convert/darcs.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
22 hgext/convert/darcs.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
34 hgext/convert/filemap.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
23 hgext/convert/filemap.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
35 hgext/convert/git.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
24 hgext/convert/git.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
36 hgext/convert/gnuarch.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
25 hgext/convert/gnuarch.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
37 hgext/convert/hg.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
26 hgext/convert/hg.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
38 hgext/convert/monotone.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
27 hgext/convert/monotone.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
39 hgext/convert/p4.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
28 hgext/convert/p4.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
40 hgext/convert/subversion.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
29 hgext/convert/subversion.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
41 hgext/convert/transport.py: error importing module: <ImportError> No module named 'svn.client' (line *)
30 hgext/convert/transport.py: error importing module: <ImportError> No module named 'svn.client' (line *)
42 hgext/eol.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
43 hgext/extdiff.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
44 hgext/factotum.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
45 hgext/fetch.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
46 hgext/fsmonitor/watchmanclient.py: error importing module: <SystemError> Parent module 'hgext.fsmonitor' not loaded, cannot perform relative import (line *)
31 hgext/fsmonitor/watchmanclient.py: error importing module: <SystemError> Parent module 'hgext.fsmonitor' not loaded, cannot perform relative import (line *)
47 hgext/gpg.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
32 hgext/journal.py: error importing module: <SystemError> Parent module 'hgext' not loaded, cannot perform relative import (line *)
48 hgext/graphlog.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
49 hgext/hgk.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
50 hgext/histedit.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
51 hgext/journal.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
52 hgext/keyword.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'httpserver' (error at common.py:*)
53 hgext/largefiles/basestore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *)
33 hgext/largefiles/basestore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *)
54 hgext/largefiles/lfcommands.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
34 hgext/largefiles/lfcommands.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *)
55 hgext/largefiles/lfutil.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
56 hgext/largefiles/localstore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *)
35 hgext/largefiles/localstore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *)
57 hgext/largefiles/overrides.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
36 hgext/largefiles/overrides.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *)
58 hgext/largefiles/proto.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
37 hgext/largefiles/proto.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *)
59 hgext/largefiles/remotestore.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
38 hgext/largefiles/remotestore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *)
60 hgext/largefiles/reposetup.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
39 hgext/largefiles/reposetup.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *)
61 hgext/largefiles/storefactory.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
40 hgext/largefiles/storefactory.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *)
62 hgext/largefiles/uisetup.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'httpserver' (error at common.py:*)
41 hgext/largefiles/uisetup.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *)
63 hgext/largefiles/wirestore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *)
42 hgext/largefiles/wirestore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *)
64 hgext/mq.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
43 hgext/mq.py: error importing: <TypeError> startswith first arg must be str or a tuple of str, not bytes (error at extensions.py:*)
65 hgext/notify.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
44 hgext/rebase.py: error importing: <TypeError> Can't convert 'bytes' object to str implicitly (error at registrar.py:*)
66 hgext/pager.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
45 hgext/record.py: error importing module: <KeyError> '^commit|ci' (line *)
67 hgext/patchbomb.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
46 hgext/shelve.py: error importing module: <SystemError> Parent module 'hgext' not loaded, cannot perform relative import (line *)
68 hgext/purge.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
47 hgext/transplant.py: error importing: <TypeError> Can't convert 'bytes' object to str implicitly (error at registrar.py:*)
69 hgext/rebase.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
70 hgext/record.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
71 hgext/relink.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
72 hgext/schemes.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
73 hgext/share.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
74 hgext/shelve.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
75 hgext/strip.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
76 hgext/transplant.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
77 mercurial/archival.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
78 mercurial/bundle2.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
79 mercurial/bundlerepo.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
80 mercurial/byterange.py: error importing module: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (line *)
81 mercurial/changelog.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
82 mercurial/cmdutil.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
83 mercurial/commands.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
84 mercurial/context.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
85 mercurial/crecord.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
86 mercurial/dispatch.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
87 mercurial/encoding.py: error importing module: <TypeError> bytes expected, not str (line *)
48 mercurial/encoding.py: error importing module: <TypeError> bytes expected, not str (line *)
88 mercurial/exchange.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
49 mercurial/fileset.py: error importing: <TypeError> Can't convert 'bytes' object to str implicitly (error at registrar.py:*)
89 mercurial/extensions.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
90 mercurial/filelog.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
91 mercurial/filemerge.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
92 mercurial/fileset.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
93 mercurial/formatter.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
94 mercurial/help.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
95 mercurial/hg.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
96 mercurial/hgweb/common.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
97 mercurial/hgweb/hgweb_mod.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
98 mercurial/hgweb/hgwebdir_mod.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
99 mercurial/hgweb/protocol.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
100 mercurial/hgweb/request.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
101 mercurial/hgweb/server.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
102 mercurial/hgweb/webcommands.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
103 mercurial/hgweb/webutil.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
104 mercurial/hgweb/wsgicgi.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
105 mercurial/hook.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
106 mercurial/httpconnection.py: error importing module: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (line *)
107 mercurial/httppeer.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
108 mercurial/i18n.py: error importing module: <TypeError> bytes expected, not str (line *)
50 mercurial/i18n.py: error importing module: <TypeError> bytes expected, not str (line *)
109 mercurial/keepalive.py: error importing module: <AttributeError> module 'mercurial.util' has no attribute 'httplib' (line *)
110 mercurial/localrepo.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
111 mercurial/manifest.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
112 mercurial/merge.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
113 mercurial/namespaces.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
114 mercurial/patch.py: error importing module: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (line *)
115 mercurial/pvec.py: error importing module: <NameError> name 'xrange' is not defined (line *)
116 mercurial/repair.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
117 mercurial/revlog.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
118 mercurial/revset.py: error importing module: <AttributeError> 'dict' object has no attribute 'iteritems' (line *)
51 mercurial/revset.py: error importing module: <AttributeError> 'dict' object has no attribute 'iteritems' (line *)
119 mercurial/scmwindows.py: error importing module: <ImportError> No module named 'winreg' (line *)
52 mercurial/scmwindows.py: error importing module: <ImportError> No module named 'winreg' (line *)
120 mercurial/sshpeer.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
53 mercurial/store.py: error importing module: <TypeError> Can't convert 'bytes' object to str implicitly (line *)
121 mercurial/sshserver.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
122 mercurial/statichttprepo.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at byterange.py:*)
123 mercurial/store.py: error importing module: <NameError> name 'xrange' is not defined (line *)
124 mercurial/subrepo.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
125 mercurial/templatefilters.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
126 mercurial/templatekw.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
127 mercurial/templater.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
128 mercurial/ui.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
129 mercurial/unionrepo.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'stringio' (error at patch.py:*)
130 mercurial/url.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
131 mercurial/win32.py: error importing module: <ImportError> No module named 'msvcrt' (line *)
54 mercurial/win32.py: error importing module: <ImportError> No module named 'msvcrt' (line *)
132 mercurial/windows.py: error importing module: <ImportError> No module named 'msvcrt' (line *)
55 mercurial/windows.py: error importing module: <ImportError> No module named 'msvcrt' (line *)
133 mercurial/wireproto.py: error importing: <TypeError> %b requires bytes, or an object that implements __bytes__, not 'str' (error at bundle2.py:*)
134
56
135 #endif
57 #endif
136
58
137 #if py3exe py3pygments
59 #if py3exe py3pygments
138 $ hg files 'set:(**.py) and grep(pygments)' | sed 's|\\|/|g' \
60 $ hg files 'set:(**.py) and grep(pygments)' | sed 's|\\|/|g' \
139 > | xargs $PYTHON3 contrib/check-py3-compat.py \
61 > | xargs $PYTHON3 contrib/check-py3-compat.py \
140 > | sed 's/[0-9][0-9]*)$/*)/'
62 > | sed 's/[0-9][0-9]*)$/*)/'
141 #endif
63 #endif
General Comments 0
You need to be logged in to leave comments. Login now