##// END OF EJS Templates
pycompat: add util.stringio to handle py3 divergence...
timeless -
r28835:68a946e8 default
parent child Browse files
Show More
@@ -1,25 +1,32
1 # pycompat.py - portability shim for python 3
1 # pycompat.py - portability shim for python 3
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 """Mercurial portability shim for python 3.
6 """Mercurial portability shim for python 3.
7
7
8 This contains aliases to hide python version-specific details from the core.
8 This contains aliases to hide python version-specific details from the core.
9 """
9 """
10
10
11 from __future__ import absolute_import
11 from __future__ import absolute_import
12
12
13 try:
13 try:
14 import cStringIO as io
15 stringio = io.StringIO
16 except ImportError:
17 import io
18 stringio = io.StringIO
19
20 try:
14 import Queue as _queue
21 import Queue as _queue
15 _queue.Queue
22 _queue.Queue
16 except ImportError:
23 except ImportError:
17 import queue as _queue
24 import queue as _queue
18 empty = _queue.Empty
25 empty = _queue.Empty
19 queue = _queue.Queue
26 queue = _queue.Queue
20
27
21 try:
28 try:
22 xrange
29 xrange
23 except NameError:
30 except NameError:
24 import builtins
31 import builtins
25 builtins.xrange = range
32 builtins.xrange = range
@@ -1,2740 +1,2741
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import collections
20 import collections
21 import datetime
21 import datetime
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import imp
25 import imp
26 import os
26 import os
27 import re as remod
27 import re as remod
28 import shutil
28 import shutil
29 import signal
29 import signal
30 import socket
30 import socket
31 import subprocess
31 import subprocess
32 import sys
32 import sys
33 import tempfile
33 import tempfile
34 import textwrap
34 import textwrap
35 import time
35 import time
36 import traceback
36 import traceback
37 import urllib
37 import urllib
38 import zlib
38 import zlib
39
39
40 from . import (
40 from . import (
41 encoding,
41 encoding,
42 error,
42 error,
43 i18n,
43 i18n,
44 osutil,
44 osutil,
45 parsers,
45 parsers,
46 pycompat,
46 pycompat,
47 )
47 )
48
48
49 for attr in (
49 for attr in (
50 'empty',
50 'empty',
51 'queue',
51 'queue',
52 'stringio',
52 ):
53 ):
53 globals()[attr] = getattr(pycompat, attr)
54 globals()[attr] = getattr(pycompat, attr)
54
55
55 if os.name == 'nt':
56 if os.name == 'nt':
56 from . import windows as platform
57 from . import windows as platform
57 else:
58 else:
58 from . import posix as platform
59 from . import posix as platform
59
60
60 md5 = hashlib.md5
61 md5 = hashlib.md5
61 sha1 = hashlib.sha1
62 sha1 = hashlib.sha1
62 sha512 = hashlib.sha512
63 sha512 = hashlib.sha512
63 _ = i18n._
64 _ = i18n._
64
65
65 cachestat = platform.cachestat
66 cachestat = platform.cachestat
66 checkexec = platform.checkexec
67 checkexec = platform.checkexec
67 checklink = platform.checklink
68 checklink = platform.checklink
68 copymode = platform.copymode
69 copymode = platform.copymode
69 executablepath = platform.executablepath
70 executablepath = platform.executablepath
70 expandglobs = platform.expandglobs
71 expandglobs = platform.expandglobs
71 explainexit = platform.explainexit
72 explainexit = platform.explainexit
72 findexe = platform.findexe
73 findexe = platform.findexe
73 gethgcmd = platform.gethgcmd
74 gethgcmd = platform.gethgcmd
74 getuser = platform.getuser
75 getuser = platform.getuser
75 getpid = os.getpid
76 getpid = os.getpid
76 groupmembers = platform.groupmembers
77 groupmembers = platform.groupmembers
77 groupname = platform.groupname
78 groupname = platform.groupname
78 hidewindow = platform.hidewindow
79 hidewindow = platform.hidewindow
79 isexec = platform.isexec
80 isexec = platform.isexec
80 isowner = platform.isowner
81 isowner = platform.isowner
81 localpath = platform.localpath
82 localpath = platform.localpath
82 lookupreg = platform.lookupreg
83 lookupreg = platform.lookupreg
83 makedir = platform.makedir
84 makedir = platform.makedir
84 nlinks = platform.nlinks
85 nlinks = platform.nlinks
85 normpath = platform.normpath
86 normpath = platform.normpath
86 normcase = platform.normcase
87 normcase = platform.normcase
87 normcasespec = platform.normcasespec
88 normcasespec = platform.normcasespec
88 normcasefallback = platform.normcasefallback
89 normcasefallback = platform.normcasefallback
89 openhardlinks = platform.openhardlinks
90 openhardlinks = platform.openhardlinks
90 oslink = platform.oslink
91 oslink = platform.oslink
91 parsepatchoutput = platform.parsepatchoutput
92 parsepatchoutput = platform.parsepatchoutput
92 pconvert = platform.pconvert
93 pconvert = platform.pconvert
93 poll = platform.poll
94 poll = platform.poll
94 popen = platform.popen
95 popen = platform.popen
95 posixfile = platform.posixfile
96 posixfile = platform.posixfile
96 quotecommand = platform.quotecommand
97 quotecommand = platform.quotecommand
97 readpipe = platform.readpipe
98 readpipe = platform.readpipe
98 rename = platform.rename
99 rename = platform.rename
99 removedirs = platform.removedirs
100 removedirs = platform.removedirs
100 samedevice = platform.samedevice
101 samedevice = platform.samedevice
101 samefile = platform.samefile
102 samefile = platform.samefile
102 samestat = platform.samestat
103 samestat = platform.samestat
103 setbinary = platform.setbinary
104 setbinary = platform.setbinary
104 setflags = platform.setflags
105 setflags = platform.setflags
105 setsignalhandler = platform.setsignalhandler
106 setsignalhandler = platform.setsignalhandler
106 shellquote = platform.shellquote
107 shellquote = platform.shellquote
107 spawndetached = platform.spawndetached
108 spawndetached = platform.spawndetached
108 split = platform.split
109 split = platform.split
109 sshargs = platform.sshargs
110 sshargs = platform.sshargs
110 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
111 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
111 statisexec = platform.statisexec
112 statisexec = platform.statisexec
112 statislink = platform.statislink
113 statislink = platform.statislink
113 termwidth = platform.termwidth
114 termwidth = platform.termwidth
114 testpid = platform.testpid
115 testpid = platform.testpid
115 umask = platform.umask
116 umask = platform.umask
116 unlink = platform.unlink
117 unlink = platform.unlink
117 unlinkpath = platform.unlinkpath
118 unlinkpath = platform.unlinkpath
118 username = platform.username
119 username = platform.username
119
120
120 # Python compatibility
121 # Python compatibility
121
122
122 _notset = object()
123 _notset = object()
123
124
124 # disable Python's problematic floating point timestamps (issue4836)
125 # disable Python's problematic floating point timestamps (issue4836)
125 # (Python hypocritically says you shouldn't change this behavior in
126 # (Python hypocritically says you shouldn't change this behavior in
126 # libraries, and sure enough Mercurial is not a library.)
127 # libraries, and sure enough Mercurial is not a library.)
127 os.stat_float_times(False)
128 os.stat_float_times(False)
128
129
129 def safehasattr(thing, attr):
130 def safehasattr(thing, attr):
130 return getattr(thing, attr, _notset) is not _notset
131 return getattr(thing, attr, _notset) is not _notset
131
132
132 DIGESTS = {
133 DIGESTS = {
133 'md5': md5,
134 'md5': md5,
134 'sha1': sha1,
135 'sha1': sha1,
135 'sha512': sha512,
136 'sha512': sha512,
136 }
137 }
137 # List of digest types from strongest to weakest
138 # List of digest types from strongest to weakest
138 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
139 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
139
140
140 for k in DIGESTS_BY_STRENGTH:
141 for k in DIGESTS_BY_STRENGTH:
141 assert k in DIGESTS
142 assert k in DIGESTS
142
143
143 class digester(object):
144 class digester(object):
144 """helper to compute digests.
145 """helper to compute digests.
145
146
146 This helper can be used to compute one or more digests given their name.
147 This helper can be used to compute one or more digests given their name.
147
148
148 >>> d = digester(['md5', 'sha1'])
149 >>> d = digester(['md5', 'sha1'])
149 >>> d.update('foo')
150 >>> d.update('foo')
150 >>> [k for k in sorted(d)]
151 >>> [k for k in sorted(d)]
151 ['md5', 'sha1']
152 ['md5', 'sha1']
152 >>> d['md5']
153 >>> d['md5']
153 'acbd18db4cc2f85cedef654fccc4a4d8'
154 'acbd18db4cc2f85cedef654fccc4a4d8'
154 >>> d['sha1']
155 >>> d['sha1']
155 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
156 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
156 >>> digester.preferred(['md5', 'sha1'])
157 >>> digester.preferred(['md5', 'sha1'])
157 'sha1'
158 'sha1'
158 """
159 """
159
160
160 def __init__(self, digests, s=''):
161 def __init__(self, digests, s=''):
161 self._hashes = {}
162 self._hashes = {}
162 for k in digests:
163 for k in digests:
163 if k not in DIGESTS:
164 if k not in DIGESTS:
164 raise Abort(_('unknown digest type: %s') % k)
165 raise Abort(_('unknown digest type: %s') % k)
165 self._hashes[k] = DIGESTS[k]()
166 self._hashes[k] = DIGESTS[k]()
166 if s:
167 if s:
167 self.update(s)
168 self.update(s)
168
169
169 def update(self, data):
170 def update(self, data):
170 for h in self._hashes.values():
171 for h in self._hashes.values():
171 h.update(data)
172 h.update(data)
172
173
173 def __getitem__(self, key):
174 def __getitem__(self, key):
174 if key not in DIGESTS:
175 if key not in DIGESTS:
175 raise Abort(_('unknown digest type: %s') % k)
176 raise Abort(_('unknown digest type: %s') % k)
176 return self._hashes[key].hexdigest()
177 return self._hashes[key].hexdigest()
177
178
178 def __iter__(self):
179 def __iter__(self):
179 return iter(self._hashes)
180 return iter(self._hashes)
180
181
181 @staticmethod
182 @staticmethod
182 def preferred(supported):
183 def preferred(supported):
183 """returns the strongest digest type in both supported and DIGESTS."""
184 """returns the strongest digest type in both supported and DIGESTS."""
184
185
185 for k in DIGESTS_BY_STRENGTH:
186 for k in DIGESTS_BY_STRENGTH:
186 if k in supported:
187 if k in supported:
187 return k
188 return k
188 return None
189 return None
189
190
190 class digestchecker(object):
191 class digestchecker(object):
191 """file handle wrapper that additionally checks content against a given
192 """file handle wrapper that additionally checks content against a given
192 size and digests.
193 size and digests.
193
194
194 d = digestchecker(fh, size, {'md5': '...'})
195 d = digestchecker(fh, size, {'md5': '...'})
195
196
196 When multiple digests are given, all of them are validated.
197 When multiple digests are given, all of them are validated.
197 """
198 """
198
199
199 def __init__(self, fh, size, digests):
200 def __init__(self, fh, size, digests):
200 self._fh = fh
201 self._fh = fh
201 self._size = size
202 self._size = size
202 self._got = 0
203 self._got = 0
203 self._digests = dict(digests)
204 self._digests = dict(digests)
204 self._digester = digester(self._digests.keys())
205 self._digester = digester(self._digests.keys())
205
206
206 def read(self, length=-1):
207 def read(self, length=-1):
207 content = self._fh.read(length)
208 content = self._fh.read(length)
208 self._digester.update(content)
209 self._digester.update(content)
209 self._got += len(content)
210 self._got += len(content)
210 return content
211 return content
211
212
212 def validate(self):
213 def validate(self):
213 if self._size != self._got:
214 if self._size != self._got:
214 raise Abort(_('size mismatch: expected %d, got %d') %
215 raise Abort(_('size mismatch: expected %d, got %d') %
215 (self._size, self._got))
216 (self._size, self._got))
216 for k, v in self._digests.items():
217 for k, v in self._digests.items():
217 if v != self._digester[k]:
218 if v != self._digester[k]:
218 # i18n: first parameter is a digest name
219 # i18n: first parameter is a digest name
219 raise Abort(_('%s mismatch: expected %s, got %s') %
220 raise Abort(_('%s mismatch: expected %s, got %s') %
220 (k, v, self._digester[k]))
221 (k, v, self._digester[k]))
221
222
222 try:
223 try:
223 buffer = buffer
224 buffer = buffer
224 except NameError:
225 except NameError:
225 if sys.version_info[0] < 3:
226 if sys.version_info[0] < 3:
226 def buffer(sliceable, offset=0):
227 def buffer(sliceable, offset=0):
227 return sliceable[offset:]
228 return sliceable[offset:]
228 else:
229 else:
229 def buffer(sliceable, offset=0):
230 def buffer(sliceable, offset=0):
230 return memoryview(sliceable)[offset:]
231 return memoryview(sliceable)[offset:]
231
232
232 closefds = os.name == 'posix'
233 closefds = os.name == 'posix'
233
234
234 _chunksize = 4096
235 _chunksize = 4096
235
236
236 class bufferedinputpipe(object):
237 class bufferedinputpipe(object):
237 """a manually buffered input pipe
238 """a manually buffered input pipe
238
239
239 Python will not let us use buffered IO and lazy reading with 'polling' at
240 Python will not let us use buffered IO and lazy reading with 'polling' at
240 the same time. We cannot probe the buffer state and select will not detect
241 the same time. We cannot probe the buffer state and select will not detect
241 that data are ready to read if they are already buffered.
242 that data are ready to read if they are already buffered.
242
243
243 This class let us work around that by implementing its own buffering
244 This class let us work around that by implementing its own buffering
244 (allowing efficient readline) while offering a way to know if the buffer is
245 (allowing efficient readline) while offering a way to know if the buffer is
245 empty from the output (allowing collaboration of the buffer with polling).
246 empty from the output (allowing collaboration of the buffer with polling).
246
247
247 This class lives in the 'util' module because it makes use of the 'os'
248 This class lives in the 'util' module because it makes use of the 'os'
248 module from the python stdlib.
249 module from the python stdlib.
249 """
250 """
250
251
251 def __init__(self, input):
252 def __init__(self, input):
252 self._input = input
253 self._input = input
253 self._buffer = []
254 self._buffer = []
254 self._eof = False
255 self._eof = False
255 self._lenbuf = 0
256 self._lenbuf = 0
256
257
257 @property
258 @property
258 def hasbuffer(self):
259 def hasbuffer(self):
259 """True is any data is currently buffered
260 """True is any data is currently buffered
260
261
261 This will be used externally a pre-step for polling IO. If there is
262 This will be used externally a pre-step for polling IO. If there is
262 already data then no polling should be set in place."""
263 already data then no polling should be set in place."""
263 return bool(self._buffer)
264 return bool(self._buffer)
264
265
265 @property
266 @property
266 def closed(self):
267 def closed(self):
267 return self._input.closed
268 return self._input.closed
268
269
269 def fileno(self):
270 def fileno(self):
270 return self._input.fileno()
271 return self._input.fileno()
271
272
272 def close(self):
273 def close(self):
273 return self._input.close()
274 return self._input.close()
274
275
275 def read(self, size):
276 def read(self, size):
276 while (not self._eof) and (self._lenbuf < size):
277 while (not self._eof) and (self._lenbuf < size):
277 self._fillbuffer()
278 self._fillbuffer()
278 return self._frombuffer(size)
279 return self._frombuffer(size)
279
280
280 def readline(self, *args, **kwargs):
281 def readline(self, *args, **kwargs):
281 if 1 < len(self._buffer):
282 if 1 < len(self._buffer):
282 # this should not happen because both read and readline end with a
283 # this should not happen because both read and readline end with a
283 # _frombuffer call that collapse it.
284 # _frombuffer call that collapse it.
284 self._buffer = [''.join(self._buffer)]
285 self._buffer = [''.join(self._buffer)]
285 self._lenbuf = len(self._buffer[0])
286 self._lenbuf = len(self._buffer[0])
286 lfi = -1
287 lfi = -1
287 if self._buffer:
288 if self._buffer:
288 lfi = self._buffer[-1].find('\n')
289 lfi = self._buffer[-1].find('\n')
289 while (not self._eof) and lfi < 0:
290 while (not self._eof) and lfi < 0:
290 self._fillbuffer()
291 self._fillbuffer()
291 if self._buffer:
292 if self._buffer:
292 lfi = self._buffer[-1].find('\n')
293 lfi = self._buffer[-1].find('\n')
293 size = lfi + 1
294 size = lfi + 1
294 if lfi < 0: # end of file
295 if lfi < 0: # end of file
295 size = self._lenbuf
296 size = self._lenbuf
296 elif 1 < len(self._buffer):
297 elif 1 < len(self._buffer):
297 # we need to take previous chunks into account
298 # we need to take previous chunks into account
298 size += self._lenbuf - len(self._buffer[-1])
299 size += self._lenbuf - len(self._buffer[-1])
299 return self._frombuffer(size)
300 return self._frombuffer(size)
300
301
301 def _frombuffer(self, size):
302 def _frombuffer(self, size):
302 """return at most 'size' data from the buffer
303 """return at most 'size' data from the buffer
303
304
304 The data are removed from the buffer."""
305 The data are removed from the buffer."""
305 if size == 0 or not self._buffer:
306 if size == 0 or not self._buffer:
306 return ''
307 return ''
307 buf = self._buffer[0]
308 buf = self._buffer[0]
308 if 1 < len(self._buffer):
309 if 1 < len(self._buffer):
309 buf = ''.join(self._buffer)
310 buf = ''.join(self._buffer)
310
311
311 data = buf[:size]
312 data = buf[:size]
312 buf = buf[len(data):]
313 buf = buf[len(data):]
313 if buf:
314 if buf:
314 self._buffer = [buf]
315 self._buffer = [buf]
315 self._lenbuf = len(buf)
316 self._lenbuf = len(buf)
316 else:
317 else:
317 self._buffer = []
318 self._buffer = []
318 self._lenbuf = 0
319 self._lenbuf = 0
319 return data
320 return data
320
321
321 def _fillbuffer(self):
322 def _fillbuffer(self):
322 """read data to the buffer"""
323 """read data to the buffer"""
323 data = os.read(self._input.fileno(), _chunksize)
324 data = os.read(self._input.fileno(), _chunksize)
324 if not data:
325 if not data:
325 self._eof = True
326 self._eof = True
326 else:
327 else:
327 self._lenbuf += len(data)
328 self._lenbuf += len(data)
328 self._buffer.append(data)
329 self._buffer.append(data)
329
330
330 def popen2(cmd, env=None, newlines=False):
331 def popen2(cmd, env=None, newlines=False):
331 # Setting bufsize to -1 lets the system decide the buffer size.
332 # Setting bufsize to -1 lets the system decide the buffer size.
332 # The default for bufsize is 0, meaning unbuffered. This leads to
333 # The default for bufsize is 0, meaning unbuffered. This leads to
333 # poor performance on Mac OS X: http://bugs.python.org/issue4194
334 # poor performance on Mac OS X: http://bugs.python.org/issue4194
334 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
335 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
335 close_fds=closefds,
336 close_fds=closefds,
336 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
337 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
337 universal_newlines=newlines,
338 universal_newlines=newlines,
338 env=env)
339 env=env)
339 return p.stdin, p.stdout
340 return p.stdin, p.stdout
340
341
341 def popen3(cmd, env=None, newlines=False):
342 def popen3(cmd, env=None, newlines=False):
342 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
343 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
343 return stdin, stdout, stderr
344 return stdin, stdout, stderr
344
345
345 def popen4(cmd, env=None, newlines=False, bufsize=-1):
346 def popen4(cmd, env=None, newlines=False, bufsize=-1):
346 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
347 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
347 close_fds=closefds,
348 close_fds=closefds,
348 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
349 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
349 stderr=subprocess.PIPE,
350 stderr=subprocess.PIPE,
350 universal_newlines=newlines,
351 universal_newlines=newlines,
351 env=env)
352 env=env)
352 return p.stdin, p.stdout, p.stderr, p
353 return p.stdin, p.stdout, p.stderr, p
353
354
354 def version():
355 def version():
355 """Return version information if available."""
356 """Return version information if available."""
356 try:
357 try:
357 from . import __version__
358 from . import __version__
358 return __version__.version
359 return __version__.version
359 except ImportError:
360 except ImportError:
360 return 'unknown'
361 return 'unknown'
361
362
362 def versiontuple(v=None, n=4):
363 def versiontuple(v=None, n=4):
363 """Parses a Mercurial version string into an N-tuple.
364 """Parses a Mercurial version string into an N-tuple.
364
365
365 The version string to be parsed is specified with the ``v`` argument.
366 The version string to be parsed is specified with the ``v`` argument.
366 If it isn't defined, the current Mercurial version string will be parsed.
367 If it isn't defined, the current Mercurial version string will be parsed.
367
368
368 ``n`` can be 2, 3, or 4. Here is how some version strings map to
369 ``n`` can be 2, 3, or 4. Here is how some version strings map to
369 returned values:
370 returned values:
370
371
371 >>> v = '3.6.1+190-df9b73d2d444'
372 >>> v = '3.6.1+190-df9b73d2d444'
372 >>> versiontuple(v, 2)
373 >>> versiontuple(v, 2)
373 (3, 6)
374 (3, 6)
374 >>> versiontuple(v, 3)
375 >>> versiontuple(v, 3)
375 (3, 6, 1)
376 (3, 6, 1)
376 >>> versiontuple(v, 4)
377 >>> versiontuple(v, 4)
377 (3, 6, 1, '190-df9b73d2d444')
378 (3, 6, 1, '190-df9b73d2d444')
378
379
379 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
380 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
380 (3, 6, 1, '190-df9b73d2d444+20151118')
381 (3, 6, 1, '190-df9b73d2d444+20151118')
381
382
382 >>> v = '3.6'
383 >>> v = '3.6'
383 >>> versiontuple(v, 2)
384 >>> versiontuple(v, 2)
384 (3, 6)
385 (3, 6)
385 >>> versiontuple(v, 3)
386 >>> versiontuple(v, 3)
386 (3, 6, None)
387 (3, 6, None)
387 >>> versiontuple(v, 4)
388 >>> versiontuple(v, 4)
388 (3, 6, None, None)
389 (3, 6, None, None)
389 """
390 """
390 if not v:
391 if not v:
391 v = version()
392 v = version()
392 parts = v.split('+', 1)
393 parts = v.split('+', 1)
393 if len(parts) == 1:
394 if len(parts) == 1:
394 vparts, extra = parts[0], None
395 vparts, extra = parts[0], None
395 else:
396 else:
396 vparts, extra = parts
397 vparts, extra = parts
397
398
398 vints = []
399 vints = []
399 for i in vparts.split('.'):
400 for i in vparts.split('.'):
400 try:
401 try:
401 vints.append(int(i))
402 vints.append(int(i))
402 except ValueError:
403 except ValueError:
403 break
404 break
404 # (3, 6) -> (3, 6, None)
405 # (3, 6) -> (3, 6, None)
405 while len(vints) < 3:
406 while len(vints) < 3:
406 vints.append(None)
407 vints.append(None)
407
408
408 if n == 2:
409 if n == 2:
409 return (vints[0], vints[1])
410 return (vints[0], vints[1])
410 if n == 3:
411 if n == 3:
411 return (vints[0], vints[1], vints[2])
412 return (vints[0], vints[1], vints[2])
412 if n == 4:
413 if n == 4:
413 return (vints[0], vints[1], vints[2], extra)
414 return (vints[0], vints[1], vints[2], extra)
414
415
415 # used by parsedate
416 # used by parsedate
416 defaultdateformats = (
417 defaultdateformats = (
417 '%Y-%m-%d %H:%M:%S',
418 '%Y-%m-%d %H:%M:%S',
418 '%Y-%m-%d %I:%M:%S%p',
419 '%Y-%m-%d %I:%M:%S%p',
419 '%Y-%m-%d %H:%M',
420 '%Y-%m-%d %H:%M',
420 '%Y-%m-%d %I:%M%p',
421 '%Y-%m-%d %I:%M%p',
421 '%Y-%m-%d',
422 '%Y-%m-%d',
422 '%m-%d',
423 '%m-%d',
423 '%m/%d',
424 '%m/%d',
424 '%m/%d/%y',
425 '%m/%d/%y',
425 '%m/%d/%Y',
426 '%m/%d/%Y',
426 '%a %b %d %H:%M:%S %Y',
427 '%a %b %d %H:%M:%S %Y',
427 '%a %b %d %I:%M:%S%p %Y',
428 '%a %b %d %I:%M:%S%p %Y',
428 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
429 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
429 '%b %d %H:%M:%S %Y',
430 '%b %d %H:%M:%S %Y',
430 '%b %d %I:%M:%S%p %Y',
431 '%b %d %I:%M:%S%p %Y',
431 '%b %d %H:%M:%S',
432 '%b %d %H:%M:%S',
432 '%b %d %I:%M:%S%p',
433 '%b %d %I:%M:%S%p',
433 '%b %d %H:%M',
434 '%b %d %H:%M',
434 '%b %d %I:%M%p',
435 '%b %d %I:%M%p',
435 '%b %d %Y',
436 '%b %d %Y',
436 '%b %d',
437 '%b %d',
437 '%H:%M:%S',
438 '%H:%M:%S',
438 '%I:%M:%S%p',
439 '%I:%M:%S%p',
439 '%H:%M',
440 '%H:%M',
440 '%I:%M%p',
441 '%I:%M%p',
441 )
442 )
442
443
443 extendeddateformats = defaultdateformats + (
444 extendeddateformats = defaultdateformats + (
444 "%Y",
445 "%Y",
445 "%Y-%m",
446 "%Y-%m",
446 "%b",
447 "%b",
447 "%b %Y",
448 "%b %Y",
448 )
449 )
449
450
450 def cachefunc(func):
451 def cachefunc(func):
451 '''cache the result of function calls'''
452 '''cache the result of function calls'''
452 # XXX doesn't handle keywords args
453 # XXX doesn't handle keywords args
453 if func.__code__.co_argcount == 0:
454 if func.__code__.co_argcount == 0:
454 cache = []
455 cache = []
455 def f():
456 def f():
456 if len(cache) == 0:
457 if len(cache) == 0:
457 cache.append(func())
458 cache.append(func())
458 return cache[0]
459 return cache[0]
459 return f
460 return f
460 cache = {}
461 cache = {}
461 if func.__code__.co_argcount == 1:
462 if func.__code__.co_argcount == 1:
462 # we gain a small amount of time because
463 # we gain a small amount of time because
463 # we don't need to pack/unpack the list
464 # we don't need to pack/unpack the list
464 def f(arg):
465 def f(arg):
465 if arg not in cache:
466 if arg not in cache:
466 cache[arg] = func(arg)
467 cache[arg] = func(arg)
467 return cache[arg]
468 return cache[arg]
468 else:
469 else:
469 def f(*args):
470 def f(*args):
470 if args not in cache:
471 if args not in cache:
471 cache[args] = func(*args)
472 cache[args] = func(*args)
472 return cache[args]
473 return cache[args]
473
474
474 return f
475 return f
475
476
476 class sortdict(dict):
477 class sortdict(dict):
477 '''a simple sorted dictionary'''
478 '''a simple sorted dictionary'''
478 def __init__(self, data=None):
479 def __init__(self, data=None):
479 self._list = []
480 self._list = []
480 if data:
481 if data:
481 self.update(data)
482 self.update(data)
482 def copy(self):
483 def copy(self):
483 return sortdict(self)
484 return sortdict(self)
484 def __setitem__(self, key, val):
485 def __setitem__(self, key, val):
485 if key in self:
486 if key in self:
486 self._list.remove(key)
487 self._list.remove(key)
487 self._list.append(key)
488 self._list.append(key)
488 dict.__setitem__(self, key, val)
489 dict.__setitem__(self, key, val)
489 def __iter__(self):
490 def __iter__(self):
490 return self._list.__iter__()
491 return self._list.__iter__()
491 def update(self, src):
492 def update(self, src):
492 if isinstance(src, dict):
493 if isinstance(src, dict):
493 src = src.iteritems()
494 src = src.iteritems()
494 for k, v in src:
495 for k, v in src:
495 self[k] = v
496 self[k] = v
496 def clear(self):
497 def clear(self):
497 dict.clear(self)
498 dict.clear(self)
498 self._list = []
499 self._list = []
499 def items(self):
500 def items(self):
500 return [(k, self[k]) for k in self._list]
501 return [(k, self[k]) for k in self._list]
501 def __delitem__(self, key):
502 def __delitem__(self, key):
502 dict.__delitem__(self, key)
503 dict.__delitem__(self, key)
503 self._list.remove(key)
504 self._list.remove(key)
504 def pop(self, key, *args, **kwargs):
505 def pop(self, key, *args, **kwargs):
505 dict.pop(self, key, *args, **kwargs)
506 dict.pop(self, key, *args, **kwargs)
506 try:
507 try:
507 self._list.remove(key)
508 self._list.remove(key)
508 except ValueError:
509 except ValueError:
509 pass
510 pass
510 def keys(self):
511 def keys(self):
511 return self._list
512 return self._list
512 def iterkeys(self):
513 def iterkeys(self):
513 return self._list.__iter__()
514 return self._list.__iter__()
514 def iteritems(self):
515 def iteritems(self):
515 for k in self._list:
516 for k in self._list:
516 yield k, self[k]
517 yield k, self[k]
517 def insert(self, index, key, val):
518 def insert(self, index, key, val):
518 self._list.insert(index, key)
519 self._list.insert(index, key)
519 dict.__setitem__(self, key, val)
520 dict.__setitem__(self, key, val)
520
521
521 class _lrucachenode(object):
522 class _lrucachenode(object):
522 """A node in a doubly linked list.
523 """A node in a doubly linked list.
523
524
524 Holds a reference to nodes on either side as well as a key-value
525 Holds a reference to nodes on either side as well as a key-value
525 pair for the dictionary entry.
526 pair for the dictionary entry.
526 """
527 """
527 __slots__ = ('next', 'prev', 'key', 'value')
528 __slots__ = ('next', 'prev', 'key', 'value')
528
529
529 def __init__(self):
530 def __init__(self):
530 self.next = None
531 self.next = None
531 self.prev = None
532 self.prev = None
532
533
533 self.key = _notset
534 self.key = _notset
534 self.value = None
535 self.value = None
535
536
536 def markempty(self):
537 def markempty(self):
537 """Mark the node as emptied."""
538 """Mark the node as emptied."""
538 self.key = _notset
539 self.key = _notset
539
540
540 class lrucachedict(object):
541 class lrucachedict(object):
541 """Dict that caches most recent accesses and sets.
542 """Dict that caches most recent accesses and sets.
542
543
543 The dict consists of an actual backing dict - indexed by original
544 The dict consists of an actual backing dict - indexed by original
544 key - and a doubly linked circular list defining the order of entries in
545 key - and a doubly linked circular list defining the order of entries in
545 the cache.
546 the cache.
546
547
547 The head node is the newest entry in the cache. If the cache is full,
548 The head node is the newest entry in the cache. If the cache is full,
548 we recycle head.prev and make it the new head. Cache accesses result in
549 we recycle head.prev and make it the new head. Cache accesses result in
549 the node being moved to before the existing head and being marked as the
550 the node being moved to before the existing head and being marked as the
550 new head node.
551 new head node.
551 """
552 """
552 def __init__(self, max):
553 def __init__(self, max):
553 self._cache = {}
554 self._cache = {}
554
555
555 self._head = head = _lrucachenode()
556 self._head = head = _lrucachenode()
556 head.prev = head
557 head.prev = head
557 head.next = head
558 head.next = head
558 self._size = 1
559 self._size = 1
559 self._capacity = max
560 self._capacity = max
560
561
561 def __len__(self):
562 def __len__(self):
562 return len(self._cache)
563 return len(self._cache)
563
564
564 def __contains__(self, k):
565 def __contains__(self, k):
565 return k in self._cache
566 return k in self._cache
566
567
567 def __iter__(self):
568 def __iter__(self):
568 # We don't have to iterate in cache order, but why not.
569 # We don't have to iterate in cache order, but why not.
569 n = self._head
570 n = self._head
570 for i in range(len(self._cache)):
571 for i in range(len(self._cache)):
571 yield n.key
572 yield n.key
572 n = n.next
573 n = n.next
573
574
574 def __getitem__(self, k):
575 def __getitem__(self, k):
575 node = self._cache[k]
576 node = self._cache[k]
576 self._movetohead(node)
577 self._movetohead(node)
577 return node.value
578 return node.value
578
579
579 def __setitem__(self, k, v):
580 def __setitem__(self, k, v):
580 node = self._cache.get(k)
581 node = self._cache.get(k)
581 # Replace existing value and mark as newest.
582 # Replace existing value and mark as newest.
582 if node is not None:
583 if node is not None:
583 node.value = v
584 node.value = v
584 self._movetohead(node)
585 self._movetohead(node)
585 return
586 return
586
587
587 if self._size < self._capacity:
588 if self._size < self._capacity:
588 node = self._addcapacity()
589 node = self._addcapacity()
589 else:
590 else:
590 # Grab the last/oldest item.
591 # Grab the last/oldest item.
591 node = self._head.prev
592 node = self._head.prev
592
593
593 # At capacity. Kill the old entry.
594 # At capacity. Kill the old entry.
594 if node.key is not _notset:
595 if node.key is not _notset:
595 del self._cache[node.key]
596 del self._cache[node.key]
596
597
597 node.key = k
598 node.key = k
598 node.value = v
599 node.value = v
599 self._cache[k] = node
600 self._cache[k] = node
600 # And mark it as newest entry. No need to adjust order since it
601 # And mark it as newest entry. No need to adjust order since it
601 # is already self._head.prev.
602 # is already self._head.prev.
602 self._head = node
603 self._head = node
603
604
604 def __delitem__(self, k):
605 def __delitem__(self, k):
605 node = self._cache.pop(k)
606 node = self._cache.pop(k)
606 node.markempty()
607 node.markempty()
607
608
608 # Temporarily mark as newest item before re-adjusting head to make
609 # Temporarily mark as newest item before re-adjusting head to make
609 # this node the oldest item.
610 # this node the oldest item.
610 self._movetohead(node)
611 self._movetohead(node)
611 self._head = node.next
612 self._head = node.next
612
613
613 # Additional dict methods.
614 # Additional dict methods.
614
615
615 def get(self, k, default=None):
616 def get(self, k, default=None):
616 try:
617 try:
617 return self._cache[k]
618 return self._cache[k]
618 except KeyError:
619 except KeyError:
619 return default
620 return default
620
621
621 def clear(self):
622 def clear(self):
622 n = self._head
623 n = self._head
623 while n.key is not _notset:
624 while n.key is not _notset:
624 n.markempty()
625 n.markempty()
625 n = n.next
626 n = n.next
626
627
627 self._cache.clear()
628 self._cache.clear()
628
629
629 def copy(self):
630 def copy(self):
630 result = lrucachedict(self._capacity)
631 result = lrucachedict(self._capacity)
631 n = self._head.prev
632 n = self._head.prev
632 # Iterate in oldest-to-newest order, so the copy has the right ordering
633 # Iterate in oldest-to-newest order, so the copy has the right ordering
633 for i in range(len(self._cache)):
634 for i in range(len(self._cache)):
634 result[n.key] = n.value
635 result[n.key] = n.value
635 n = n.prev
636 n = n.prev
636 return result
637 return result
637
638
638 def _movetohead(self, node):
639 def _movetohead(self, node):
639 """Mark a node as the newest, making it the new head.
640 """Mark a node as the newest, making it the new head.
640
641
641 When a node is accessed, it becomes the freshest entry in the LRU
642 When a node is accessed, it becomes the freshest entry in the LRU
642 list, which is denoted by self._head.
643 list, which is denoted by self._head.
643
644
644 Visually, let's make ``N`` the new head node (* denotes head):
645 Visually, let's make ``N`` the new head node (* denotes head):
645
646
646 previous/oldest <-> head <-> next/next newest
647 previous/oldest <-> head <-> next/next newest
647
648
648 ----<->--- A* ---<->-----
649 ----<->--- A* ---<->-----
649 | |
650 | |
650 E <-> D <-> N <-> C <-> B
651 E <-> D <-> N <-> C <-> B
651
652
652 To:
653 To:
653
654
654 ----<->--- N* ---<->-----
655 ----<->--- N* ---<->-----
655 | |
656 | |
656 E <-> D <-> C <-> B <-> A
657 E <-> D <-> C <-> B <-> A
657
658
658 This requires the following moves:
659 This requires the following moves:
659
660
660 C.next = D (node.prev.next = node.next)
661 C.next = D (node.prev.next = node.next)
661 D.prev = C (node.next.prev = node.prev)
662 D.prev = C (node.next.prev = node.prev)
662 E.next = N (head.prev.next = node)
663 E.next = N (head.prev.next = node)
663 N.prev = E (node.prev = head.prev)
664 N.prev = E (node.prev = head.prev)
664 N.next = A (node.next = head)
665 N.next = A (node.next = head)
665 A.prev = N (head.prev = node)
666 A.prev = N (head.prev = node)
666 """
667 """
667 head = self._head
668 head = self._head
668 # C.next = D
669 # C.next = D
669 node.prev.next = node.next
670 node.prev.next = node.next
670 # D.prev = C
671 # D.prev = C
671 node.next.prev = node.prev
672 node.next.prev = node.prev
672 # N.prev = E
673 # N.prev = E
673 node.prev = head.prev
674 node.prev = head.prev
674 # N.next = A
675 # N.next = A
675 # It is tempting to do just "head" here, however if node is
676 # It is tempting to do just "head" here, however if node is
676 # adjacent to head, this will do bad things.
677 # adjacent to head, this will do bad things.
677 node.next = head.prev.next
678 node.next = head.prev.next
678 # E.next = N
679 # E.next = N
679 node.next.prev = node
680 node.next.prev = node
680 # A.prev = N
681 # A.prev = N
681 node.prev.next = node
682 node.prev.next = node
682
683
683 self._head = node
684 self._head = node
684
685
685 def _addcapacity(self):
686 def _addcapacity(self):
686 """Add a node to the circular linked list.
687 """Add a node to the circular linked list.
687
688
688 The new node is inserted before the head node.
689 The new node is inserted before the head node.
689 """
690 """
690 head = self._head
691 head = self._head
691 node = _lrucachenode()
692 node = _lrucachenode()
692 head.prev.next = node
693 head.prev.next = node
693 node.prev = head.prev
694 node.prev = head.prev
694 node.next = head
695 node.next = head
695 head.prev = node
696 head.prev = node
696 self._size += 1
697 self._size += 1
697 return node
698 return node
698
699
699 def lrucachefunc(func):
700 def lrucachefunc(func):
700 '''cache most recent results of function calls'''
701 '''cache most recent results of function calls'''
701 cache = {}
702 cache = {}
702 order = collections.deque()
703 order = collections.deque()
703 if func.__code__.co_argcount == 1:
704 if func.__code__.co_argcount == 1:
704 def f(arg):
705 def f(arg):
705 if arg not in cache:
706 if arg not in cache:
706 if len(cache) > 20:
707 if len(cache) > 20:
707 del cache[order.popleft()]
708 del cache[order.popleft()]
708 cache[arg] = func(arg)
709 cache[arg] = func(arg)
709 else:
710 else:
710 order.remove(arg)
711 order.remove(arg)
711 order.append(arg)
712 order.append(arg)
712 return cache[arg]
713 return cache[arg]
713 else:
714 else:
714 def f(*args):
715 def f(*args):
715 if args not in cache:
716 if args not in cache:
716 if len(cache) > 20:
717 if len(cache) > 20:
717 del cache[order.popleft()]
718 del cache[order.popleft()]
718 cache[args] = func(*args)
719 cache[args] = func(*args)
719 else:
720 else:
720 order.remove(args)
721 order.remove(args)
721 order.append(args)
722 order.append(args)
722 return cache[args]
723 return cache[args]
723
724
724 return f
725 return f
725
726
726 class propertycache(object):
727 class propertycache(object):
727 def __init__(self, func):
728 def __init__(self, func):
728 self.func = func
729 self.func = func
729 self.name = func.__name__
730 self.name = func.__name__
730 def __get__(self, obj, type=None):
731 def __get__(self, obj, type=None):
731 result = self.func(obj)
732 result = self.func(obj)
732 self.cachevalue(obj, result)
733 self.cachevalue(obj, result)
733 return result
734 return result
734
735
735 def cachevalue(self, obj, value):
736 def cachevalue(self, obj, value):
736 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
737 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
737 obj.__dict__[self.name] = value
738 obj.__dict__[self.name] = value
738
739
739 def pipefilter(s, cmd):
740 def pipefilter(s, cmd):
740 '''filter string S through command CMD, returning its output'''
741 '''filter string S through command CMD, returning its output'''
741 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
742 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
742 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
743 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
743 pout, perr = p.communicate(s)
744 pout, perr = p.communicate(s)
744 return pout
745 return pout
745
746
746 def tempfilter(s, cmd):
747 def tempfilter(s, cmd):
747 '''filter string S through a pair of temporary files with CMD.
748 '''filter string S through a pair of temporary files with CMD.
748 CMD is used as a template to create the real command to be run,
749 CMD is used as a template to create the real command to be run,
749 with the strings INFILE and OUTFILE replaced by the real names of
750 with the strings INFILE and OUTFILE replaced by the real names of
750 the temporary files generated.'''
751 the temporary files generated.'''
751 inname, outname = None, None
752 inname, outname = None, None
752 try:
753 try:
753 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
754 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
754 fp = os.fdopen(infd, 'wb')
755 fp = os.fdopen(infd, 'wb')
755 fp.write(s)
756 fp.write(s)
756 fp.close()
757 fp.close()
757 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
758 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
758 os.close(outfd)
759 os.close(outfd)
759 cmd = cmd.replace('INFILE', inname)
760 cmd = cmd.replace('INFILE', inname)
760 cmd = cmd.replace('OUTFILE', outname)
761 cmd = cmd.replace('OUTFILE', outname)
761 code = os.system(cmd)
762 code = os.system(cmd)
762 if sys.platform == 'OpenVMS' and code & 1:
763 if sys.platform == 'OpenVMS' and code & 1:
763 code = 0
764 code = 0
764 if code:
765 if code:
765 raise Abort(_("command '%s' failed: %s") %
766 raise Abort(_("command '%s' failed: %s") %
766 (cmd, explainexit(code)))
767 (cmd, explainexit(code)))
767 return readfile(outname)
768 return readfile(outname)
768 finally:
769 finally:
769 try:
770 try:
770 if inname:
771 if inname:
771 os.unlink(inname)
772 os.unlink(inname)
772 except OSError:
773 except OSError:
773 pass
774 pass
774 try:
775 try:
775 if outname:
776 if outname:
776 os.unlink(outname)
777 os.unlink(outname)
777 except OSError:
778 except OSError:
778 pass
779 pass
779
780
780 filtertable = {
781 filtertable = {
781 'tempfile:': tempfilter,
782 'tempfile:': tempfilter,
782 'pipe:': pipefilter,
783 'pipe:': pipefilter,
783 }
784 }
784
785
785 def filter(s, cmd):
786 def filter(s, cmd):
786 "filter a string through a command that transforms its input to its output"
787 "filter a string through a command that transforms its input to its output"
787 for name, fn in filtertable.iteritems():
788 for name, fn in filtertable.iteritems():
788 if cmd.startswith(name):
789 if cmd.startswith(name):
789 return fn(s, cmd[len(name):].lstrip())
790 return fn(s, cmd[len(name):].lstrip())
790 return pipefilter(s, cmd)
791 return pipefilter(s, cmd)
791
792
792 def binary(s):
793 def binary(s):
793 """return true if a string is binary data"""
794 """return true if a string is binary data"""
794 return bool(s and '\0' in s)
795 return bool(s and '\0' in s)
795
796
796 def increasingchunks(source, min=1024, max=65536):
797 def increasingchunks(source, min=1024, max=65536):
797 '''return no less than min bytes per chunk while data remains,
798 '''return no less than min bytes per chunk while data remains,
798 doubling min after each chunk until it reaches max'''
799 doubling min after each chunk until it reaches max'''
799 def log2(x):
800 def log2(x):
800 if not x:
801 if not x:
801 return 0
802 return 0
802 i = 0
803 i = 0
803 while x:
804 while x:
804 x >>= 1
805 x >>= 1
805 i += 1
806 i += 1
806 return i - 1
807 return i - 1
807
808
808 buf = []
809 buf = []
809 blen = 0
810 blen = 0
810 for chunk in source:
811 for chunk in source:
811 buf.append(chunk)
812 buf.append(chunk)
812 blen += len(chunk)
813 blen += len(chunk)
813 if blen >= min:
814 if blen >= min:
814 if min < max:
815 if min < max:
815 min = min << 1
816 min = min << 1
816 nmin = 1 << log2(blen)
817 nmin = 1 << log2(blen)
817 if nmin > min:
818 if nmin > min:
818 min = nmin
819 min = nmin
819 if min > max:
820 if min > max:
820 min = max
821 min = max
821 yield ''.join(buf)
822 yield ''.join(buf)
822 blen = 0
823 blen = 0
823 buf = []
824 buf = []
824 if buf:
825 if buf:
825 yield ''.join(buf)
826 yield ''.join(buf)
826
827
827 Abort = error.Abort
828 Abort = error.Abort
828
829
829 def always(fn):
830 def always(fn):
830 return True
831 return True
831
832
832 def never(fn):
833 def never(fn):
833 return False
834 return False
834
835
835 def nogc(func):
836 def nogc(func):
836 """disable garbage collector
837 """disable garbage collector
837
838
838 Python's garbage collector triggers a GC each time a certain number of
839 Python's garbage collector triggers a GC each time a certain number of
839 container objects (the number being defined by gc.get_threshold()) are
840 container objects (the number being defined by gc.get_threshold()) are
840 allocated even when marked not to be tracked by the collector. Tracking has
841 allocated even when marked not to be tracked by the collector. Tracking has
841 no effect on when GCs are triggered, only on what objects the GC looks
842 no effect on when GCs are triggered, only on what objects the GC looks
842 into. As a workaround, disable GC while building complex (huge)
843 into. As a workaround, disable GC while building complex (huge)
843 containers.
844 containers.
844
845
845 This garbage collector issue have been fixed in 2.7.
846 This garbage collector issue have been fixed in 2.7.
846 """
847 """
847 def wrapper(*args, **kwargs):
848 def wrapper(*args, **kwargs):
848 gcenabled = gc.isenabled()
849 gcenabled = gc.isenabled()
849 gc.disable()
850 gc.disable()
850 try:
851 try:
851 return func(*args, **kwargs)
852 return func(*args, **kwargs)
852 finally:
853 finally:
853 if gcenabled:
854 if gcenabled:
854 gc.enable()
855 gc.enable()
855 return wrapper
856 return wrapper
856
857
857 def pathto(root, n1, n2):
858 def pathto(root, n1, n2):
858 '''return the relative path from one place to another.
859 '''return the relative path from one place to another.
859 root should use os.sep to separate directories
860 root should use os.sep to separate directories
860 n1 should use os.sep to separate directories
861 n1 should use os.sep to separate directories
861 n2 should use "/" to separate directories
862 n2 should use "/" to separate directories
862 returns an os.sep-separated path.
863 returns an os.sep-separated path.
863
864
864 If n1 is a relative path, it's assumed it's
865 If n1 is a relative path, it's assumed it's
865 relative to root.
866 relative to root.
866 n2 should always be relative to root.
867 n2 should always be relative to root.
867 '''
868 '''
868 if not n1:
869 if not n1:
869 return localpath(n2)
870 return localpath(n2)
870 if os.path.isabs(n1):
871 if os.path.isabs(n1):
871 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
872 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
872 return os.path.join(root, localpath(n2))
873 return os.path.join(root, localpath(n2))
873 n2 = '/'.join((pconvert(root), n2))
874 n2 = '/'.join((pconvert(root), n2))
874 a, b = splitpath(n1), n2.split('/')
875 a, b = splitpath(n1), n2.split('/')
875 a.reverse()
876 a.reverse()
876 b.reverse()
877 b.reverse()
877 while a and b and a[-1] == b[-1]:
878 while a and b and a[-1] == b[-1]:
878 a.pop()
879 a.pop()
879 b.pop()
880 b.pop()
880 b.reverse()
881 b.reverse()
881 return os.sep.join((['..'] * len(a)) + b) or '.'
882 return os.sep.join((['..'] * len(a)) + b) or '.'
882
883
883 def mainfrozen():
884 def mainfrozen():
884 """return True if we are a frozen executable.
885 """return True if we are a frozen executable.
885
886
886 The code supports py2exe (most common, Windows only) and tools/freeze
887 The code supports py2exe (most common, Windows only) and tools/freeze
887 (portable, not much used).
888 (portable, not much used).
888 """
889 """
889 return (safehasattr(sys, "frozen") or # new py2exe
890 return (safehasattr(sys, "frozen") or # new py2exe
890 safehasattr(sys, "importers") or # old py2exe
891 safehasattr(sys, "importers") or # old py2exe
891 imp.is_frozen("__main__")) # tools/freeze
892 imp.is_frozen("__main__")) # tools/freeze
892
893
893 # the location of data files matching the source code
894 # the location of data files matching the source code
894 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
895 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
895 # executable version (py2exe) doesn't support __file__
896 # executable version (py2exe) doesn't support __file__
896 datapath = os.path.dirname(sys.executable)
897 datapath = os.path.dirname(sys.executable)
897 else:
898 else:
898 datapath = os.path.dirname(__file__)
899 datapath = os.path.dirname(__file__)
899
900
900 i18n.setdatapath(datapath)
901 i18n.setdatapath(datapath)
901
902
902 _hgexecutable = None
903 _hgexecutable = None
903
904
904 def hgexecutable():
905 def hgexecutable():
905 """return location of the 'hg' executable.
906 """return location of the 'hg' executable.
906
907
907 Defaults to $HG or 'hg' in the search path.
908 Defaults to $HG or 'hg' in the search path.
908 """
909 """
909 if _hgexecutable is None:
910 if _hgexecutable is None:
910 hg = os.environ.get('HG')
911 hg = os.environ.get('HG')
911 mainmod = sys.modules['__main__']
912 mainmod = sys.modules['__main__']
912 if hg:
913 if hg:
913 _sethgexecutable(hg)
914 _sethgexecutable(hg)
914 elif mainfrozen():
915 elif mainfrozen():
915 if getattr(sys, 'frozen', None) == 'macosx_app':
916 if getattr(sys, 'frozen', None) == 'macosx_app':
916 # Env variable set by py2app
917 # Env variable set by py2app
917 _sethgexecutable(os.environ['EXECUTABLEPATH'])
918 _sethgexecutable(os.environ['EXECUTABLEPATH'])
918 else:
919 else:
919 _sethgexecutable(sys.executable)
920 _sethgexecutable(sys.executable)
920 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
921 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
921 _sethgexecutable(mainmod.__file__)
922 _sethgexecutable(mainmod.__file__)
922 else:
923 else:
923 exe = findexe('hg') or os.path.basename(sys.argv[0])
924 exe = findexe('hg') or os.path.basename(sys.argv[0])
924 _sethgexecutable(exe)
925 _sethgexecutable(exe)
925 return _hgexecutable
926 return _hgexecutable
926
927
927 def _sethgexecutable(path):
928 def _sethgexecutable(path):
928 """set location of the 'hg' executable"""
929 """set location of the 'hg' executable"""
929 global _hgexecutable
930 global _hgexecutable
930 _hgexecutable = path
931 _hgexecutable = path
931
932
932 def _isstdout(f):
933 def _isstdout(f):
933 fileno = getattr(f, 'fileno', None)
934 fileno = getattr(f, 'fileno', None)
934 return fileno and fileno() == sys.__stdout__.fileno()
935 return fileno and fileno() == sys.__stdout__.fileno()
935
936
936 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
937 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
937 '''enhanced shell command execution.
938 '''enhanced shell command execution.
938 run with environment maybe modified, maybe in different dir.
939 run with environment maybe modified, maybe in different dir.
939
940
940 if command fails and onerr is None, return status, else raise onerr
941 if command fails and onerr is None, return status, else raise onerr
941 object as exception.
942 object as exception.
942
943
943 if out is specified, it is assumed to be a file-like object that has a
944 if out is specified, it is assumed to be a file-like object that has a
944 write() method. stdout and stderr will be redirected to out.'''
945 write() method. stdout and stderr will be redirected to out.'''
945 if environ is None:
946 if environ is None:
946 environ = {}
947 environ = {}
947 try:
948 try:
948 sys.stdout.flush()
949 sys.stdout.flush()
949 except Exception:
950 except Exception:
950 pass
951 pass
951 def py2shell(val):
952 def py2shell(val):
952 'convert python object into string that is useful to shell'
953 'convert python object into string that is useful to shell'
953 if val is None or val is False:
954 if val is None or val is False:
954 return '0'
955 return '0'
955 if val is True:
956 if val is True:
956 return '1'
957 return '1'
957 return str(val)
958 return str(val)
958 origcmd = cmd
959 origcmd = cmd
959 cmd = quotecommand(cmd)
960 cmd = quotecommand(cmd)
960 if sys.platform == 'plan9' and (sys.version_info[0] == 2
961 if sys.platform == 'plan9' and (sys.version_info[0] == 2
961 and sys.version_info[1] < 7):
962 and sys.version_info[1] < 7):
962 # subprocess kludge to work around issues in half-baked Python
963 # subprocess kludge to work around issues in half-baked Python
963 # ports, notably bichued/python:
964 # ports, notably bichued/python:
964 if not cwd is None:
965 if not cwd is None:
965 os.chdir(cwd)
966 os.chdir(cwd)
966 rc = os.system(cmd)
967 rc = os.system(cmd)
967 else:
968 else:
968 env = dict(os.environ)
969 env = dict(os.environ)
969 env.update((k, py2shell(v)) for k, v in environ.iteritems())
970 env.update((k, py2shell(v)) for k, v in environ.iteritems())
970 env['HG'] = hgexecutable()
971 env['HG'] = hgexecutable()
971 if out is None or _isstdout(out):
972 if out is None or _isstdout(out):
972 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
973 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
973 env=env, cwd=cwd)
974 env=env, cwd=cwd)
974 else:
975 else:
975 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
976 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
976 env=env, cwd=cwd, stdout=subprocess.PIPE,
977 env=env, cwd=cwd, stdout=subprocess.PIPE,
977 stderr=subprocess.STDOUT)
978 stderr=subprocess.STDOUT)
978 while True:
979 while True:
979 line = proc.stdout.readline()
980 line = proc.stdout.readline()
980 if not line:
981 if not line:
981 break
982 break
982 out.write(line)
983 out.write(line)
983 proc.wait()
984 proc.wait()
984 rc = proc.returncode
985 rc = proc.returncode
985 if sys.platform == 'OpenVMS' and rc & 1:
986 if sys.platform == 'OpenVMS' and rc & 1:
986 rc = 0
987 rc = 0
987 if rc and onerr:
988 if rc and onerr:
988 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
989 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
989 explainexit(rc)[0])
990 explainexit(rc)[0])
990 if errprefix:
991 if errprefix:
991 errmsg = '%s: %s' % (errprefix, errmsg)
992 errmsg = '%s: %s' % (errprefix, errmsg)
992 raise onerr(errmsg)
993 raise onerr(errmsg)
993 return rc
994 return rc
994
995
995 def checksignature(func):
996 def checksignature(func):
996 '''wrap a function with code to check for calling errors'''
997 '''wrap a function with code to check for calling errors'''
997 def check(*args, **kwargs):
998 def check(*args, **kwargs):
998 try:
999 try:
999 return func(*args, **kwargs)
1000 return func(*args, **kwargs)
1000 except TypeError:
1001 except TypeError:
1001 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1002 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1002 raise error.SignatureError
1003 raise error.SignatureError
1003 raise
1004 raise
1004
1005
1005 return check
1006 return check
1006
1007
1007 def copyfile(src, dest, hardlink=False, copystat=False):
1008 def copyfile(src, dest, hardlink=False, copystat=False):
1008 '''copy a file, preserving mode and optionally other stat info like
1009 '''copy a file, preserving mode and optionally other stat info like
1009 atime/mtime'''
1010 atime/mtime'''
1010 if os.path.lexists(dest):
1011 if os.path.lexists(dest):
1011 unlink(dest)
1012 unlink(dest)
1012 # hardlinks are problematic on CIFS, quietly ignore this flag
1013 # hardlinks are problematic on CIFS, quietly ignore this flag
1013 # until we find a way to work around it cleanly (issue4546)
1014 # until we find a way to work around it cleanly (issue4546)
1014 if False and hardlink:
1015 if False and hardlink:
1015 try:
1016 try:
1016 oslink(src, dest)
1017 oslink(src, dest)
1017 return
1018 return
1018 except (IOError, OSError):
1019 except (IOError, OSError):
1019 pass # fall back to normal copy
1020 pass # fall back to normal copy
1020 if os.path.islink(src):
1021 if os.path.islink(src):
1021 os.symlink(os.readlink(src), dest)
1022 os.symlink(os.readlink(src), dest)
1022 # copytime is ignored for symlinks, but in general copytime isn't needed
1023 # copytime is ignored for symlinks, but in general copytime isn't needed
1023 # for them anyway
1024 # for them anyway
1024 else:
1025 else:
1025 try:
1026 try:
1026 shutil.copyfile(src, dest)
1027 shutil.copyfile(src, dest)
1027 if copystat:
1028 if copystat:
1028 # copystat also copies mode
1029 # copystat also copies mode
1029 shutil.copystat(src, dest)
1030 shutil.copystat(src, dest)
1030 else:
1031 else:
1031 shutil.copymode(src, dest)
1032 shutil.copymode(src, dest)
1032 except shutil.Error as inst:
1033 except shutil.Error as inst:
1033 raise Abort(str(inst))
1034 raise Abort(str(inst))
1034
1035
1035 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1036 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1036 """Copy a directory tree using hardlinks if possible."""
1037 """Copy a directory tree using hardlinks if possible."""
1037 num = 0
1038 num = 0
1038
1039
1039 if hardlink is None:
1040 if hardlink is None:
1040 hardlink = (os.stat(src).st_dev ==
1041 hardlink = (os.stat(src).st_dev ==
1041 os.stat(os.path.dirname(dst)).st_dev)
1042 os.stat(os.path.dirname(dst)).st_dev)
1042 if hardlink:
1043 if hardlink:
1043 topic = _('linking')
1044 topic = _('linking')
1044 else:
1045 else:
1045 topic = _('copying')
1046 topic = _('copying')
1046
1047
1047 if os.path.isdir(src):
1048 if os.path.isdir(src):
1048 os.mkdir(dst)
1049 os.mkdir(dst)
1049 for name, kind in osutil.listdir(src):
1050 for name, kind in osutil.listdir(src):
1050 srcname = os.path.join(src, name)
1051 srcname = os.path.join(src, name)
1051 dstname = os.path.join(dst, name)
1052 dstname = os.path.join(dst, name)
1052 def nprog(t, pos):
1053 def nprog(t, pos):
1053 if pos is not None:
1054 if pos is not None:
1054 return progress(t, pos + num)
1055 return progress(t, pos + num)
1055 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1056 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1056 num += n
1057 num += n
1057 else:
1058 else:
1058 if hardlink:
1059 if hardlink:
1059 try:
1060 try:
1060 oslink(src, dst)
1061 oslink(src, dst)
1061 except (IOError, OSError):
1062 except (IOError, OSError):
1062 hardlink = False
1063 hardlink = False
1063 shutil.copy(src, dst)
1064 shutil.copy(src, dst)
1064 else:
1065 else:
1065 shutil.copy(src, dst)
1066 shutil.copy(src, dst)
1066 num += 1
1067 num += 1
1067 progress(topic, num)
1068 progress(topic, num)
1068 progress(topic, None)
1069 progress(topic, None)
1069
1070
1070 return hardlink, num
1071 return hardlink, num
1071
1072
1072 _winreservednames = '''con prn aux nul
1073 _winreservednames = '''con prn aux nul
1073 com1 com2 com3 com4 com5 com6 com7 com8 com9
1074 com1 com2 com3 com4 com5 com6 com7 com8 com9
1074 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1075 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1075 _winreservedchars = ':*?"<>|'
1076 _winreservedchars = ':*?"<>|'
1076 def checkwinfilename(path):
1077 def checkwinfilename(path):
1077 r'''Check that the base-relative path is a valid filename on Windows.
1078 r'''Check that the base-relative path is a valid filename on Windows.
1078 Returns None if the path is ok, or a UI string describing the problem.
1079 Returns None if the path is ok, or a UI string describing the problem.
1079
1080
1080 >>> checkwinfilename("just/a/normal/path")
1081 >>> checkwinfilename("just/a/normal/path")
1081 >>> checkwinfilename("foo/bar/con.xml")
1082 >>> checkwinfilename("foo/bar/con.xml")
1082 "filename contains 'con', which is reserved on Windows"
1083 "filename contains 'con', which is reserved on Windows"
1083 >>> checkwinfilename("foo/con.xml/bar")
1084 >>> checkwinfilename("foo/con.xml/bar")
1084 "filename contains 'con', which is reserved on Windows"
1085 "filename contains 'con', which is reserved on Windows"
1085 >>> checkwinfilename("foo/bar/xml.con")
1086 >>> checkwinfilename("foo/bar/xml.con")
1086 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1087 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1087 "filename contains 'AUX', which is reserved on Windows"
1088 "filename contains 'AUX', which is reserved on Windows"
1088 >>> checkwinfilename("foo/bar/bla:.txt")
1089 >>> checkwinfilename("foo/bar/bla:.txt")
1089 "filename contains ':', which is reserved on Windows"
1090 "filename contains ':', which is reserved on Windows"
1090 >>> checkwinfilename("foo/bar/b\07la.txt")
1091 >>> checkwinfilename("foo/bar/b\07la.txt")
1091 "filename contains '\\x07', which is invalid on Windows"
1092 "filename contains '\\x07', which is invalid on Windows"
1092 >>> checkwinfilename("foo/bar/bla ")
1093 >>> checkwinfilename("foo/bar/bla ")
1093 "filename ends with ' ', which is not allowed on Windows"
1094 "filename ends with ' ', which is not allowed on Windows"
1094 >>> checkwinfilename("../bar")
1095 >>> checkwinfilename("../bar")
1095 >>> checkwinfilename("foo\\")
1096 >>> checkwinfilename("foo\\")
1096 "filename ends with '\\', which is invalid on Windows"
1097 "filename ends with '\\', which is invalid on Windows"
1097 >>> checkwinfilename("foo\\/bar")
1098 >>> checkwinfilename("foo\\/bar")
1098 "directory name ends with '\\', which is invalid on Windows"
1099 "directory name ends with '\\', which is invalid on Windows"
1099 '''
1100 '''
1100 if path.endswith('\\'):
1101 if path.endswith('\\'):
1101 return _("filename ends with '\\', which is invalid on Windows")
1102 return _("filename ends with '\\', which is invalid on Windows")
1102 if '\\/' in path:
1103 if '\\/' in path:
1103 return _("directory name ends with '\\', which is invalid on Windows")
1104 return _("directory name ends with '\\', which is invalid on Windows")
1104 for n in path.replace('\\', '/').split('/'):
1105 for n in path.replace('\\', '/').split('/'):
1105 if not n:
1106 if not n:
1106 continue
1107 continue
1107 for c in n:
1108 for c in n:
1108 if c in _winreservedchars:
1109 if c in _winreservedchars:
1109 return _("filename contains '%s', which is reserved "
1110 return _("filename contains '%s', which is reserved "
1110 "on Windows") % c
1111 "on Windows") % c
1111 if ord(c) <= 31:
1112 if ord(c) <= 31:
1112 return _("filename contains %r, which is invalid "
1113 return _("filename contains %r, which is invalid "
1113 "on Windows") % c
1114 "on Windows") % c
1114 base = n.split('.')[0]
1115 base = n.split('.')[0]
1115 if base and base.lower() in _winreservednames:
1116 if base and base.lower() in _winreservednames:
1116 return _("filename contains '%s', which is reserved "
1117 return _("filename contains '%s', which is reserved "
1117 "on Windows") % base
1118 "on Windows") % base
1118 t = n[-1]
1119 t = n[-1]
1119 if t in '. ' and n not in '..':
1120 if t in '. ' and n not in '..':
1120 return _("filename ends with '%s', which is not allowed "
1121 return _("filename ends with '%s', which is not allowed "
1121 "on Windows") % t
1122 "on Windows") % t
1122
1123
1123 if os.name == 'nt':
1124 if os.name == 'nt':
1124 checkosfilename = checkwinfilename
1125 checkosfilename = checkwinfilename
1125 else:
1126 else:
1126 checkosfilename = platform.checkosfilename
1127 checkosfilename = platform.checkosfilename
1127
1128
1128 def makelock(info, pathname):
1129 def makelock(info, pathname):
1129 try:
1130 try:
1130 return os.symlink(info, pathname)
1131 return os.symlink(info, pathname)
1131 except OSError as why:
1132 except OSError as why:
1132 if why.errno == errno.EEXIST:
1133 if why.errno == errno.EEXIST:
1133 raise
1134 raise
1134 except AttributeError: # no symlink in os
1135 except AttributeError: # no symlink in os
1135 pass
1136 pass
1136
1137
1137 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1138 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1138 os.write(ld, info)
1139 os.write(ld, info)
1139 os.close(ld)
1140 os.close(ld)
1140
1141
1141 def readlock(pathname):
1142 def readlock(pathname):
1142 try:
1143 try:
1143 return os.readlink(pathname)
1144 return os.readlink(pathname)
1144 except OSError as why:
1145 except OSError as why:
1145 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1146 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1146 raise
1147 raise
1147 except AttributeError: # no symlink in os
1148 except AttributeError: # no symlink in os
1148 pass
1149 pass
1149 fp = posixfile(pathname)
1150 fp = posixfile(pathname)
1150 r = fp.read()
1151 r = fp.read()
1151 fp.close()
1152 fp.close()
1152 return r
1153 return r
1153
1154
1154 def fstat(fp):
1155 def fstat(fp):
1155 '''stat file object that may not have fileno method.'''
1156 '''stat file object that may not have fileno method.'''
1156 try:
1157 try:
1157 return os.fstat(fp.fileno())
1158 return os.fstat(fp.fileno())
1158 except AttributeError:
1159 except AttributeError:
1159 return os.stat(fp.name)
1160 return os.stat(fp.name)
1160
1161
1161 # File system features
1162 # File system features
1162
1163
1163 def checkcase(path):
1164 def checkcase(path):
1164 """
1165 """
1165 Return true if the given path is on a case-sensitive filesystem
1166 Return true if the given path is on a case-sensitive filesystem
1166
1167
1167 Requires a path (like /foo/.hg) ending with a foldable final
1168 Requires a path (like /foo/.hg) ending with a foldable final
1168 directory component.
1169 directory component.
1169 """
1170 """
1170 s1 = os.lstat(path)
1171 s1 = os.lstat(path)
1171 d, b = os.path.split(path)
1172 d, b = os.path.split(path)
1172 b2 = b.upper()
1173 b2 = b.upper()
1173 if b == b2:
1174 if b == b2:
1174 b2 = b.lower()
1175 b2 = b.lower()
1175 if b == b2:
1176 if b == b2:
1176 return True # no evidence against case sensitivity
1177 return True # no evidence against case sensitivity
1177 p2 = os.path.join(d, b2)
1178 p2 = os.path.join(d, b2)
1178 try:
1179 try:
1179 s2 = os.lstat(p2)
1180 s2 = os.lstat(p2)
1180 if s2 == s1:
1181 if s2 == s1:
1181 return False
1182 return False
1182 return True
1183 return True
1183 except OSError:
1184 except OSError:
1184 return True
1185 return True
1185
1186
1186 try:
1187 try:
1187 import re2
1188 import re2
1188 _re2 = None
1189 _re2 = None
1189 except ImportError:
1190 except ImportError:
1190 _re2 = False
1191 _re2 = False
1191
1192
1192 class _re(object):
1193 class _re(object):
1193 def _checkre2(self):
1194 def _checkre2(self):
1194 global _re2
1195 global _re2
1195 try:
1196 try:
1196 # check if match works, see issue3964
1197 # check if match works, see issue3964
1197 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1198 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1198 except ImportError:
1199 except ImportError:
1199 _re2 = False
1200 _re2 = False
1200
1201
1201 def compile(self, pat, flags=0):
1202 def compile(self, pat, flags=0):
1202 '''Compile a regular expression, using re2 if possible
1203 '''Compile a regular expression, using re2 if possible
1203
1204
1204 For best performance, use only re2-compatible regexp features. The
1205 For best performance, use only re2-compatible regexp features. The
1205 only flags from the re module that are re2-compatible are
1206 only flags from the re module that are re2-compatible are
1206 IGNORECASE and MULTILINE.'''
1207 IGNORECASE and MULTILINE.'''
1207 if _re2 is None:
1208 if _re2 is None:
1208 self._checkre2()
1209 self._checkre2()
1209 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1210 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1210 if flags & remod.IGNORECASE:
1211 if flags & remod.IGNORECASE:
1211 pat = '(?i)' + pat
1212 pat = '(?i)' + pat
1212 if flags & remod.MULTILINE:
1213 if flags & remod.MULTILINE:
1213 pat = '(?m)' + pat
1214 pat = '(?m)' + pat
1214 try:
1215 try:
1215 return re2.compile(pat)
1216 return re2.compile(pat)
1216 except re2.error:
1217 except re2.error:
1217 pass
1218 pass
1218 return remod.compile(pat, flags)
1219 return remod.compile(pat, flags)
1219
1220
1220 @propertycache
1221 @propertycache
1221 def escape(self):
1222 def escape(self):
1222 '''Return the version of escape corresponding to self.compile.
1223 '''Return the version of escape corresponding to self.compile.
1223
1224
1224 This is imperfect because whether re2 or re is used for a particular
1225 This is imperfect because whether re2 or re is used for a particular
1225 function depends on the flags, etc, but it's the best we can do.
1226 function depends on the flags, etc, but it's the best we can do.
1226 '''
1227 '''
1227 global _re2
1228 global _re2
1228 if _re2 is None:
1229 if _re2 is None:
1229 self._checkre2()
1230 self._checkre2()
1230 if _re2:
1231 if _re2:
1231 return re2.escape
1232 return re2.escape
1232 else:
1233 else:
1233 return remod.escape
1234 return remod.escape
1234
1235
1235 re = _re()
1236 re = _re()
1236
1237
1237 _fspathcache = {}
1238 _fspathcache = {}
1238 def fspath(name, root):
1239 def fspath(name, root):
1239 '''Get name in the case stored in the filesystem
1240 '''Get name in the case stored in the filesystem
1240
1241
1241 The name should be relative to root, and be normcase-ed for efficiency.
1242 The name should be relative to root, and be normcase-ed for efficiency.
1242
1243
1243 Note that this function is unnecessary, and should not be
1244 Note that this function is unnecessary, and should not be
1244 called, for case-sensitive filesystems (simply because it's expensive).
1245 called, for case-sensitive filesystems (simply because it's expensive).
1245
1246
1246 The root should be normcase-ed, too.
1247 The root should be normcase-ed, too.
1247 '''
1248 '''
1248 def _makefspathcacheentry(dir):
1249 def _makefspathcacheentry(dir):
1249 return dict((normcase(n), n) for n in os.listdir(dir))
1250 return dict((normcase(n), n) for n in os.listdir(dir))
1250
1251
1251 seps = os.sep
1252 seps = os.sep
1252 if os.altsep:
1253 if os.altsep:
1253 seps = seps + os.altsep
1254 seps = seps + os.altsep
1254 # Protect backslashes. This gets silly very quickly.
1255 # Protect backslashes. This gets silly very quickly.
1255 seps.replace('\\','\\\\')
1256 seps.replace('\\','\\\\')
1256 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1257 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1257 dir = os.path.normpath(root)
1258 dir = os.path.normpath(root)
1258 result = []
1259 result = []
1259 for part, sep in pattern.findall(name):
1260 for part, sep in pattern.findall(name):
1260 if sep:
1261 if sep:
1261 result.append(sep)
1262 result.append(sep)
1262 continue
1263 continue
1263
1264
1264 if dir not in _fspathcache:
1265 if dir not in _fspathcache:
1265 _fspathcache[dir] = _makefspathcacheentry(dir)
1266 _fspathcache[dir] = _makefspathcacheentry(dir)
1266 contents = _fspathcache[dir]
1267 contents = _fspathcache[dir]
1267
1268
1268 found = contents.get(part)
1269 found = contents.get(part)
1269 if not found:
1270 if not found:
1270 # retry "once per directory" per "dirstate.walk" which
1271 # retry "once per directory" per "dirstate.walk" which
1271 # may take place for each patches of "hg qpush", for example
1272 # may take place for each patches of "hg qpush", for example
1272 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1273 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1273 found = contents.get(part)
1274 found = contents.get(part)
1274
1275
1275 result.append(found or part)
1276 result.append(found or part)
1276 dir = os.path.join(dir, part)
1277 dir = os.path.join(dir, part)
1277
1278
1278 return ''.join(result)
1279 return ''.join(result)
1279
1280
1280 def checknlink(testfile):
1281 def checknlink(testfile):
1281 '''check whether hardlink count reporting works properly'''
1282 '''check whether hardlink count reporting works properly'''
1282
1283
1283 # testfile may be open, so we need a separate file for checking to
1284 # testfile may be open, so we need a separate file for checking to
1284 # work around issue2543 (or testfile may get lost on Samba shares)
1285 # work around issue2543 (or testfile may get lost on Samba shares)
1285 f1 = testfile + ".hgtmp1"
1286 f1 = testfile + ".hgtmp1"
1286 if os.path.lexists(f1):
1287 if os.path.lexists(f1):
1287 return False
1288 return False
1288 try:
1289 try:
1289 posixfile(f1, 'w').close()
1290 posixfile(f1, 'w').close()
1290 except IOError:
1291 except IOError:
1291 return False
1292 return False
1292
1293
1293 f2 = testfile + ".hgtmp2"
1294 f2 = testfile + ".hgtmp2"
1294 fd = None
1295 fd = None
1295 try:
1296 try:
1296 oslink(f1, f2)
1297 oslink(f1, f2)
1297 # nlinks() may behave differently for files on Windows shares if
1298 # nlinks() may behave differently for files on Windows shares if
1298 # the file is open.
1299 # the file is open.
1299 fd = posixfile(f2)
1300 fd = posixfile(f2)
1300 return nlinks(f2) > 1
1301 return nlinks(f2) > 1
1301 except OSError:
1302 except OSError:
1302 return False
1303 return False
1303 finally:
1304 finally:
1304 if fd is not None:
1305 if fd is not None:
1305 fd.close()
1306 fd.close()
1306 for f in (f1, f2):
1307 for f in (f1, f2):
1307 try:
1308 try:
1308 os.unlink(f)
1309 os.unlink(f)
1309 except OSError:
1310 except OSError:
1310 pass
1311 pass
1311
1312
1312 def endswithsep(path):
1313 def endswithsep(path):
1313 '''Check path ends with os.sep or os.altsep.'''
1314 '''Check path ends with os.sep or os.altsep.'''
1314 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1315 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1315
1316
1316 def splitpath(path):
1317 def splitpath(path):
1317 '''Split path by os.sep.
1318 '''Split path by os.sep.
1318 Note that this function does not use os.altsep because this is
1319 Note that this function does not use os.altsep because this is
1319 an alternative of simple "xxx.split(os.sep)".
1320 an alternative of simple "xxx.split(os.sep)".
1320 It is recommended to use os.path.normpath() before using this
1321 It is recommended to use os.path.normpath() before using this
1321 function if need.'''
1322 function if need.'''
1322 return path.split(os.sep)
1323 return path.split(os.sep)
1323
1324
1324 def gui():
1325 def gui():
1325 '''Are we running in a GUI?'''
1326 '''Are we running in a GUI?'''
1326 if sys.platform == 'darwin':
1327 if sys.platform == 'darwin':
1327 if 'SSH_CONNECTION' in os.environ:
1328 if 'SSH_CONNECTION' in os.environ:
1328 # handle SSH access to a box where the user is logged in
1329 # handle SSH access to a box where the user is logged in
1329 return False
1330 return False
1330 elif getattr(osutil, 'isgui', None):
1331 elif getattr(osutil, 'isgui', None):
1331 # check if a CoreGraphics session is available
1332 # check if a CoreGraphics session is available
1332 return osutil.isgui()
1333 return osutil.isgui()
1333 else:
1334 else:
1334 # pure build; use a safe default
1335 # pure build; use a safe default
1335 return True
1336 return True
1336 else:
1337 else:
1337 return os.name == "nt" or os.environ.get("DISPLAY")
1338 return os.name == "nt" or os.environ.get("DISPLAY")
1338
1339
1339 def mktempcopy(name, emptyok=False, createmode=None):
1340 def mktempcopy(name, emptyok=False, createmode=None):
1340 """Create a temporary file with the same contents from name
1341 """Create a temporary file with the same contents from name
1341
1342
1342 The permission bits are copied from the original file.
1343 The permission bits are copied from the original file.
1343
1344
1344 If the temporary file is going to be truncated immediately, you
1345 If the temporary file is going to be truncated immediately, you
1345 can use emptyok=True as an optimization.
1346 can use emptyok=True as an optimization.
1346
1347
1347 Returns the name of the temporary file.
1348 Returns the name of the temporary file.
1348 """
1349 """
1349 d, fn = os.path.split(name)
1350 d, fn = os.path.split(name)
1350 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1351 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1351 os.close(fd)
1352 os.close(fd)
1352 # Temporary files are created with mode 0600, which is usually not
1353 # Temporary files are created with mode 0600, which is usually not
1353 # what we want. If the original file already exists, just copy
1354 # what we want. If the original file already exists, just copy
1354 # its mode. Otherwise, manually obey umask.
1355 # its mode. Otherwise, manually obey umask.
1355 copymode(name, temp, createmode)
1356 copymode(name, temp, createmode)
1356 if emptyok:
1357 if emptyok:
1357 return temp
1358 return temp
1358 try:
1359 try:
1359 try:
1360 try:
1360 ifp = posixfile(name, "rb")
1361 ifp = posixfile(name, "rb")
1361 except IOError as inst:
1362 except IOError as inst:
1362 if inst.errno == errno.ENOENT:
1363 if inst.errno == errno.ENOENT:
1363 return temp
1364 return temp
1364 if not getattr(inst, 'filename', None):
1365 if not getattr(inst, 'filename', None):
1365 inst.filename = name
1366 inst.filename = name
1366 raise
1367 raise
1367 ofp = posixfile(temp, "wb")
1368 ofp = posixfile(temp, "wb")
1368 for chunk in filechunkiter(ifp):
1369 for chunk in filechunkiter(ifp):
1369 ofp.write(chunk)
1370 ofp.write(chunk)
1370 ifp.close()
1371 ifp.close()
1371 ofp.close()
1372 ofp.close()
1372 except: # re-raises
1373 except: # re-raises
1373 try: os.unlink(temp)
1374 try: os.unlink(temp)
1374 except OSError: pass
1375 except OSError: pass
1375 raise
1376 raise
1376 return temp
1377 return temp
1377
1378
1378 class atomictempfile(object):
1379 class atomictempfile(object):
1379 '''writable file object that atomically updates a file
1380 '''writable file object that atomically updates a file
1380
1381
1381 All writes will go to a temporary copy of the original file. Call
1382 All writes will go to a temporary copy of the original file. Call
1382 close() when you are done writing, and atomictempfile will rename
1383 close() when you are done writing, and atomictempfile will rename
1383 the temporary copy to the original name, making the changes
1384 the temporary copy to the original name, making the changes
1384 visible. If the object is destroyed without being closed, all your
1385 visible. If the object is destroyed without being closed, all your
1385 writes are discarded.
1386 writes are discarded.
1386 '''
1387 '''
1387 def __init__(self, name, mode='w+b', createmode=None):
1388 def __init__(self, name, mode='w+b', createmode=None):
1388 self.__name = name # permanent name
1389 self.__name = name # permanent name
1389 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1390 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1390 createmode=createmode)
1391 createmode=createmode)
1391 self._fp = posixfile(self._tempname, mode)
1392 self._fp = posixfile(self._tempname, mode)
1392
1393
1393 # delegated methods
1394 # delegated methods
1394 self.write = self._fp.write
1395 self.write = self._fp.write
1395 self.seek = self._fp.seek
1396 self.seek = self._fp.seek
1396 self.tell = self._fp.tell
1397 self.tell = self._fp.tell
1397 self.fileno = self._fp.fileno
1398 self.fileno = self._fp.fileno
1398
1399
1399 def close(self):
1400 def close(self):
1400 if not self._fp.closed:
1401 if not self._fp.closed:
1401 self._fp.close()
1402 self._fp.close()
1402 rename(self._tempname, localpath(self.__name))
1403 rename(self._tempname, localpath(self.__name))
1403
1404
1404 def discard(self):
1405 def discard(self):
1405 if not self._fp.closed:
1406 if not self._fp.closed:
1406 try:
1407 try:
1407 os.unlink(self._tempname)
1408 os.unlink(self._tempname)
1408 except OSError:
1409 except OSError:
1409 pass
1410 pass
1410 self._fp.close()
1411 self._fp.close()
1411
1412
1412 def __del__(self):
1413 def __del__(self):
1413 if safehasattr(self, '_fp'): # constructor actually did something
1414 if safehasattr(self, '_fp'): # constructor actually did something
1414 self.discard()
1415 self.discard()
1415
1416
1416 def makedirs(name, mode=None, notindexed=False):
1417 def makedirs(name, mode=None, notindexed=False):
1417 """recursive directory creation with parent mode inheritance"""
1418 """recursive directory creation with parent mode inheritance"""
1418 try:
1419 try:
1419 makedir(name, notindexed)
1420 makedir(name, notindexed)
1420 except OSError as err:
1421 except OSError as err:
1421 if err.errno == errno.EEXIST:
1422 if err.errno == errno.EEXIST:
1422 return
1423 return
1423 if err.errno != errno.ENOENT or not name:
1424 if err.errno != errno.ENOENT or not name:
1424 raise
1425 raise
1425 parent = os.path.dirname(os.path.abspath(name))
1426 parent = os.path.dirname(os.path.abspath(name))
1426 if parent == name:
1427 if parent == name:
1427 raise
1428 raise
1428 makedirs(parent, mode, notindexed)
1429 makedirs(parent, mode, notindexed)
1429 makedir(name, notindexed)
1430 makedir(name, notindexed)
1430 if mode is not None:
1431 if mode is not None:
1431 os.chmod(name, mode)
1432 os.chmod(name, mode)
1432
1433
1433 def ensuredirs(name, mode=None, notindexed=False):
1434 def ensuredirs(name, mode=None, notindexed=False):
1434 """race-safe recursive directory creation
1435 """race-safe recursive directory creation
1435
1436
1436 Newly created directories are marked as "not to be indexed by
1437 Newly created directories are marked as "not to be indexed by
1437 the content indexing service", if ``notindexed`` is specified
1438 the content indexing service", if ``notindexed`` is specified
1438 for "write" mode access.
1439 for "write" mode access.
1439 """
1440 """
1440 if os.path.isdir(name):
1441 if os.path.isdir(name):
1441 return
1442 return
1442 parent = os.path.dirname(os.path.abspath(name))
1443 parent = os.path.dirname(os.path.abspath(name))
1443 if parent != name:
1444 if parent != name:
1444 ensuredirs(parent, mode, notindexed)
1445 ensuredirs(parent, mode, notindexed)
1445 try:
1446 try:
1446 makedir(name, notindexed)
1447 makedir(name, notindexed)
1447 except OSError as err:
1448 except OSError as err:
1448 if err.errno == errno.EEXIST and os.path.isdir(name):
1449 if err.errno == errno.EEXIST and os.path.isdir(name):
1449 # someone else seems to have won a directory creation race
1450 # someone else seems to have won a directory creation race
1450 return
1451 return
1451 raise
1452 raise
1452 if mode is not None:
1453 if mode is not None:
1453 os.chmod(name, mode)
1454 os.chmod(name, mode)
1454
1455
1455 def readfile(path):
1456 def readfile(path):
1456 with open(path, 'rb') as fp:
1457 with open(path, 'rb') as fp:
1457 return fp.read()
1458 return fp.read()
1458
1459
1459 def writefile(path, text):
1460 def writefile(path, text):
1460 with open(path, 'wb') as fp:
1461 with open(path, 'wb') as fp:
1461 fp.write(text)
1462 fp.write(text)
1462
1463
1463 def appendfile(path, text):
1464 def appendfile(path, text):
1464 with open(path, 'ab') as fp:
1465 with open(path, 'ab') as fp:
1465 fp.write(text)
1466 fp.write(text)
1466
1467
1467 class chunkbuffer(object):
1468 class chunkbuffer(object):
1468 """Allow arbitrary sized chunks of data to be efficiently read from an
1469 """Allow arbitrary sized chunks of data to be efficiently read from an
1469 iterator over chunks of arbitrary size."""
1470 iterator over chunks of arbitrary size."""
1470
1471
1471 def __init__(self, in_iter):
1472 def __init__(self, in_iter):
1472 """in_iter is the iterator that's iterating over the input chunks.
1473 """in_iter is the iterator that's iterating over the input chunks.
1473 targetsize is how big a buffer to try to maintain."""
1474 targetsize is how big a buffer to try to maintain."""
1474 def splitbig(chunks):
1475 def splitbig(chunks):
1475 for chunk in chunks:
1476 for chunk in chunks:
1476 if len(chunk) > 2**20:
1477 if len(chunk) > 2**20:
1477 pos = 0
1478 pos = 0
1478 while pos < len(chunk):
1479 while pos < len(chunk):
1479 end = pos + 2 ** 18
1480 end = pos + 2 ** 18
1480 yield chunk[pos:end]
1481 yield chunk[pos:end]
1481 pos = end
1482 pos = end
1482 else:
1483 else:
1483 yield chunk
1484 yield chunk
1484 self.iter = splitbig(in_iter)
1485 self.iter = splitbig(in_iter)
1485 self._queue = collections.deque()
1486 self._queue = collections.deque()
1486 self._chunkoffset = 0
1487 self._chunkoffset = 0
1487
1488
1488 def read(self, l=None):
1489 def read(self, l=None):
1489 """Read L bytes of data from the iterator of chunks of data.
1490 """Read L bytes of data from the iterator of chunks of data.
1490 Returns less than L bytes if the iterator runs dry.
1491 Returns less than L bytes if the iterator runs dry.
1491
1492
1492 If size parameter is omitted, read everything"""
1493 If size parameter is omitted, read everything"""
1493 if l is None:
1494 if l is None:
1494 return ''.join(self.iter)
1495 return ''.join(self.iter)
1495
1496
1496 left = l
1497 left = l
1497 buf = []
1498 buf = []
1498 queue = self._queue
1499 queue = self._queue
1499 while left > 0:
1500 while left > 0:
1500 # refill the queue
1501 # refill the queue
1501 if not queue:
1502 if not queue:
1502 target = 2**18
1503 target = 2**18
1503 for chunk in self.iter:
1504 for chunk in self.iter:
1504 queue.append(chunk)
1505 queue.append(chunk)
1505 target -= len(chunk)
1506 target -= len(chunk)
1506 if target <= 0:
1507 if target <= 0:
1507 break
1508 break
1508 if not queue:
1509 if not queue:
1509 break
1510 break
1510
1511
1511 # The easy way to do this would be to queue.popleft(), modify the
1512 # The easy way to do this would be to queue.popleft(), modify the
1512 # chunk (if necessary), then queue.appendleft(). However, for cases
1513 # chunk (if necessary), then queue.appendleft(). However, for cases
1513 # where we read partial chunk content, this incurs 2 dequeue
1514 # where we read partial chunk content, this incurs 2 dequeue
1514 # mutations and creates a new str for the remaining chunk in the
1515 # mutations and creates a new str for the remaining chunk in the
1515 # queue. Our code below avoids this overhead.
1516 # queue. Our code below avoids this overhead.
1516
1517
1517 chunk = queue[0]
1518 chunk = queue[0]
1518 chunkl = len(chunk)
1519 chunkl = len(chunk)
1519 offset = self._chunkoffset
1520 offset = self._chunkoffset
1520
1521
1521 # Use full chunk.
1522 # Use full chunk.
1522 if offset == 0 and left >= chunkl:
1523 if offset == 0 and left >= chunkl:
1523 left -= chunkl
1524 left -= chunkl
1524 queue.popleft()
1525 queue.popleft()
1525 buf.append(chunk)
1526 buf.append(chunk)
1526 # self._chunkoffset remains at 0.
1527 # self._chunkoffset remains at 0.
1527 continue
1528 continue
1528
1529
1529 chunkremaining = chunkl - offset
1530 chunkremaining = chunkl - offset
1530
1531
1531 # Use all of unconsumed part of chunk.
1532 # Use all of unconsumed part of chunk.
1532 if left >= chunkremaining:
1533 if left >= chunkremaining:
1533 left -= chunkremaining
1534 left -= chunkremaining
1534 queue.popleft()
1535 queue.popleft()
1535 # offset == 0 is enabled by block above, so this won't merely
1536 # offset == 0 is enabled by block above, so this won't merely
1536 # copy via ``chunk[0:]``.
1537 # copy via ``chunk[0:]``.
1537 buf.append(chunk[offset:])
1538 buf.append(chunk[offset:])
1538 self._chunkoffset = 0
1539 self._chunkoffset = 0
1539
1540
1540 # Partial chunk needed.
1541 # Partial chunk needed.
1541 else:
1542 else:
1542 buf.append(chunk[offset:offset + left])
1543 buf.append(chunk[offset:offset + left])
1543 self._chunkoffset += left
1544 self._chunkoffset += left
1544 left -= chunkremaining
1545 left -= chunkremaining
1545
1546
1546 return ''.join(buf)
1547 return ''.join(buf)
1547
1548
1548 def filechunkiter(f, size=65536, limit=None):
1549 def filechunkiter(f, size=65536, limit=None):
1549 """Create a generator that produces the data in the file size
1550 """Create a generator that produces the data in the file size
1550 (default 65536) bytes at a time, up to optional limit (default is
1551 (default 65536) bytes at a time, up to optional limit (default is
1551 to read all data). Chunks may be less than size bytes if the
1552 to read all data). Chunks may be less than size bytes if the
1552 chunk is the last chunk in the file, or the file is a socket or
1553 chunk is the last chunk in the file, or the file is a socket or
1553 some other type of file that sometimes reads less data than is
1554 some other type of file that sometimes reads less data than is
1554 requested."""
1555 requested."""
1555 assert size >= 0
1556 assert size >= 0
1556 assert limit is None or limit >= 0
1557 assert limit is None or limit >= 0
1557 while True:
1558 while True:
1558 if limit is None:
1559 if limit is None:
1559 nbytes = size
1560 nbytes = size
1560 else:
1561 else:
1561 nbytes = min(limit, size)
1562 nbytes = min(limit, size)
1562 s = nbytes and f.read(nbytes)
1563 s = nbytes and f.read(nbytes)
1563 if not s:
1564 if not s:
1564 break
1565 break
1565 if limit:
1566 if limit:
1566 limit -= len(s)
1567 limit -= len(s)
1567 yield s
1568 yield s
1568
1569
1569 def makedate(timestamp=None):
1570 def makedate(timestamp=None):
1570 '''Return a unix timestamp (or the current time) as a (unixtime,
1571 '''Return a unix timestamp (or the current time) as a (unixtime,
1571 offset) tuple based off the local timezone.'''
1572 offset) tuple based off the local timezone.'''
1572 if timestamp is None:
1573 if timestamp is None:
1573 timestamp = time.time()
1574 timestamp = time.time()
1574 if timestamp < 0:
1575 if timestamp < 0:
1575 hint = _("check your clock")
1576 hint = _("check your clock")
1576 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1577 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1577 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1578 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1578 datetime.datetime.fromtimestamp(timestamp))
1579 datetime.datetime.fromtimestamp(timestamp))
1579 tz = delta.days * 86400 + delta.seconds
1580 tz = delta.days * 86400 + delta.seconds
1580 return timestamp, tz
1581 return timestamp, tz
1581
1582
1582 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1583 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1583 """represent a (unixtime, offset) tuple as a localized time.
1584 """represent a (unixtime, offset) tuple as a localized time.
1584 unixtime is seconds since the epoch, and offset is the time zone's
1585 unixtime is seconds since the epoch, and offset is the time zone's
1585 number of seconds away from UTC."""
1586 number of seconds away from UTC."""
1586 t, tz = date or makedate()
1587 t, tz = date or makedate()
1587 if "%1" in format or "%2" in format or "%z" in format:
1588 if "%1" in format or "%2" in format or "%z" in format:
1588 sign = (tz > 0) and "-" or "+"
1589 sign = (tz > 0) and "-" or "+"
1589 minutes = abs(tz) // 60
1590 minutes = abs(tz) // 60
1590 q, r = divmod(minutes, 60)
1591 q, r = divmod(minutes, 60)
1591 format = format.replace("%z", "%1%2")
1592 format = format.replace("%z", "%1%2")
1592 format = format.replace("%1", "%c%02d" % (sign, q))
1593 format = format.replace("%1", "%c%02d" % (sign, q))
1593 format = format.replace("%2", "%02d" % r)
1594 format = format.replace("%2", "%02d" % r)
1594 d = t - tz
1595 d = t - tz
1595 if d > 0x7fffffff:
1596 if d > 0x7fffffff:
1596 d = 0x7fffffff
1597 d = 0x7fffffff
1597 elif d < -0x7fffffff:
1598 elif d < -0x7fffffff:
1598 d = -0x7fffffff
1599 d = -0x7fffffff
1599 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1600 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1600 # because they use the gmtime() system call which is buggy on Windows
1601 # because they use the gmtime() system call which is buggy on Windows
1601 # for negative values.
1602 # for negative values.
1602 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1603 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1603 s = t.strftime(format)
1604 s = t.strftime(format)
1604 return s
1605 return s
1605
1606
1606 def shortdate(date=None):
1607 def shortdate(date=None):
1607 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1608 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1608 return datestr(date, format='%Y-%m-%d')
1609 return datestr(date, format='%Y-%m-%d')
1609
1610
1610 def parsetimezone(tz):
1611 def parsetimezone(tz):
1611 """parse a timezone string and return an offset integer"""
1612 """parse a timezone string and return an offset integer"""
1612 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1613 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1613 sign = (tz[0] == "+") and 1 or -1
1614 sign = (tz[0] == "+") and 1 or -1
1614 hours = int(tz[1:3])
1615 hours = int(tz[1:3])
1615 minutes = int(tz[3:5])
1616 minutes = int(tz[3:5])
1616 return -sign * (hours * 60 + minutes) * 60
1617 return -sign * (hours * 60 + minutes) * 60
1617 if tz == "GMT" or tz == "UTC":
1618 if tz == "GMT" or tz == "UTC":
1618 return 0
1619 return 0
1619 return None
1620 return None
1620
1621
1621 def strdate(string, format, defaults=[]):
1622 def strdate(string, format, defaults=[]):
1622 """parse a localized time string and return a (unixtime, offset) tuple.
1623 """parse a localized time string and return a (unixtime, offset) tuple.
1623 if the string cannot be parsed, ValueError is raised."""
1624 if the string cannot be parsed, ValueError is raised."""
1624 # NOTE: unixtime = localunixtime + offset
1625 # NOTE: unixtime = localunixtime + offset
1625 offset, date = parsetimezone(string.split()[-1]), string
1626 offset, date = parsetimezone(string.split()[-1]), string
1626 if offset is not None:
1627 if offset is not None:
1627 date = " ".join(string.split()[:-1])
1628 date = " ".join(string.split()[:-1])
1628
1629
1629 # add missing elements from defaults
1630 # add missing elements from defaults
1630 usenow = False # default to using biased defaults
1631 usenow = False # default to using biased defaults
1631 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1632 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1632 found = [True for p in part if ("%"+p) in format]
1633 found = [True for p in part if ("%"+p) in format]
1633 if not found:
1634 if not found:
1634 date += "@" + defaults[part][usenow]
1635 date += "@" + defaults[part][usenow]
1635 format += "@%" + part[0]
1636 format += "@%" + part[0]
1636 else:
1637 else:
1637 # We've found a specific time element, less specific time
1638 # We've found a specific time element, less specific time
1638 # elements are relative to today
1639 # elements are relative to today
1639 usenow = True
1640 usenow = True
1640
1641
1641 timetuple = time.strptime(date, format)
1642 timetuple = time.strptime(date, format)
1642 localunixtime = int(calendar.timegm(timetuple))
1643 localunixtime = int(calendar.timegm(timetuple))
1643 if offset is None:
1644 if offset is None:
1644 # local timezone
1645 # local timezone
1645 unixtime = int(time.mktime(timetuple))
1646 unixtime = int(time.mktime(timetuple))
1646 offset = unixtime - localunixtime
1647 offset = unixtime - localunixtime
1647 else:
1648 else:
1648 unixtime = localunixtime + offset
1649 unixtime = localunixtime + offset
1649 return unixtime, offset
1650 return unixtime, offset
1650
1651
1651 def parsedate(date, formats=None, bias=None):
1652 def parsedate(date, formats=None, bias=None):
1652 """parse a localized date/time and return a (unixtime, offset) tuple.
1653 """parse a localized date/time and return a (unixtime, offset) tuple.
1653
1654
1654 The date may be a "unixtime offset" string or in one of the specified
1655 The date may be a "unixtime offset" string or in one of the specified
1655 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1656 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1656
1657
1657 >>> parsedate(' today ') == parsedate(\
1658 >>> parsedate(' today ') == parsedate(\
1658 datetime.date.today().strftime('%b %d'))
1659 datetime.date.today().strftime('%b %d'))
1659 True
1660 True
1660 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1661 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1661 datetime.timedelta(days=1)\
1662 datetime.timedelta(days=1)\
1662 ).strftime('%b %d'))
1663 ).strftime('%b %d'))
1663 True
1664 True
1664 >>> now, tz = makedate()
1665 >>> now, tz = makedate()
1665 >>> strnow, strtz = parsedate('now')
1666 >>> strnow, strtz = parsedate('now')
1666 >>> (strnow - now) < 1
1667 >>> (strnow - now) < 1
1667 True
1668 True
1668 >>> tz == strtz
1669 >>> tz == strtz
1669 True
1670 True
1670 """
1671 """
1671 if bias is None:
1672 if bias is None:
1672 bias = {}
1673 bias = {}
1673 if not date:
1674 if not date:
1674 return 0, 0
1675 return 0, 0
1675 if isinstance(date, tuple) and len(date) == 2:
1676 if isinstance(date, tuple) and len(date) == 2:
1676 return date
1677 return date
1677 if not formats:
1678 if not formats:
1678 formats = defaultdateformats
1679 formats = defaultdateformats
1679 date = date.strip()
1680 date = date.strip()
1680
1681
1681 if date == 'now' or date == _('now'):
1682 if date == 'now' or date == _('now'):
1682 return makedate()
1683 return makedate()
1683 if date == 'today' or date == _('today'):
1684 if date == 'today' or date == _('today'):
1684 date = datetime.date.today().strftime('%b %d')
1685 date = datetime.date.today().strftime('%b %d')
1685 elif date == 'yesterday' or date == _('yesterday'):
1686 elif date == 'yesterday' or date == _('yesterday'):
1686 date = (datetime.date.today() -
1687 date = (datetime.date.today() -
1687 datetime.timedelta(days=1)).strftime('%b %d')
1688 datetime.timedelta(days=1)).strftime('%b %d')
1688
1689
1689 try:
1690 try:
1690 when, offset = map(int, date.split(' '))
1691 when, offset = map(int, date.split(' '))
1691 except ValueError:
1692 except ValueError:
1692 # fill out defaults
1693 # fill out defaults
1693 now = makedate()
1694 now = makedate()
1694 defaults = {}
1695 defaults = {}
1695 for part in ("d", "mb", "yY", "HI", "M", "S"):
1696 for part in ("d", "mb", "yY", "HI", "M", "S"):
1696 # this piece is for rounding the specific end of unknowns
1697 # this piece is for rounding the specific end of unknowns
1697 b = bias.get(part)
1698 b = bias.get(part)
1698 if b is None:
1699 if b is None:
1699 if part[0] in "HMS":
1700 if part[0] in "HMS":
1700 b = "00"
1701 b = "00"
1701 else:
1702 else:
1702 b = "0"
1703 b = "0"
1703
1704
1704 # this piece is for matching the generic end to today's date
1705 # this piece is for matching the generic end to today's date
1705 n = datestr(now, "%" + part[0])
1706 n = datestr(now, "%" + part[0])
1706
1707
1707 defaults[part] = (b, n)
1708 defaults[part] = (b, n)
1708
1709
1709 for format in formats:
1710 for format in formats:
1710 try:
1711 try:
1711 when, offset = strdate(date, format, defaults)
1712 when, offset = strdate(date, format, defaults)
1712 except (ValueError, OverflowError):
1713 except (ValueError, OverflowError):
1713 pass
1714 pass
1714 else:
1715 else:
1715 break
1716 break
1716 else:
1717 else:
1717 raise Abort(_('invalid date: %r') % date)
1718 raise Abort(_('invalid date: %r') % date)
1718 # validate explicit (probably user-specified) date and
1719 # validate explicit (probably user-specified) date and
1719 # time zone offset. values must fit in signed 32 bits for
1720 # time zone offset. values must fit in signed 32 bits for
1720 # current 32-bit linux runtimes. timezones go from UTC-12
1721 # current 32-bit linux runtimes. timezones go from UTC-12
1721 # to UTC+14
1722 # to UTC+14
1722 if abs(when) > 0x7fffffff:
1723 if abs(when) > 0x7fffffff:
1723 raise Abort(_('date exceeds 32 bits: %d') % when)
1724 raise Abort(_('date exceeds 32 bits: %d') % when)
1724 if offset < -50400 or offset > 43200:
1725 if offset < -50400 or offset > 43200:
1725 raise Abort(_('impossible time zone offset: %d') % offset)
1726 raise Abort(_('impossible time zone offset: %d') % offset)
1726 return when, offset
1727 return when, offset
1727
1728
1728 def matchdate(date):
1729 def matchdate(date):
1729 """Return a function that matches a given date match specifier
1730 """Return a function that matches a given date match specifier
1730
1731
1731 Formats include:
1732 Formats include:
1732
1733
1733 '{date}' match a given date to the accuracy provided
1734 '{date}' match a given date to the accuracy provided
1734
1735
1735 '<{date}' on or before a given date
1736 '<{date}' on or before a given date
1736
1737
1737 '>{date}' on or after a given date
1738 '>{date}' on or after a given date
1738
1739
1739 >>> p1 = parsedate("10:29:59")
1740 >>> p1 = parsedate("10:29:59")
1740 >>> p2 = parsedate("10:30:00")
1741 >>> p2 = parsedate("10:30:00")
1741 >>> p3 = parsedate("10:30:59")
1742 >>> p3 = parsedate("10:30:59")
1742 >>> p4 = parsedate("10:31:00")
1743 >>> p4 = parsedate("10:31:00")
1743 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1744 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1744 >>> f = matchdate("10:30")
1745 >>> f = matchdate("10:30")
1745 >>> f(p1[0])
1746 >>> f(p1[0])
1746 False
1747 False
1747 >>> f(p2[0])
1748 >>> f(p2[0])
1748 True
1749 True
1749 >>> f(p3[0])
1750 >>> f(p3[0])
1750 True
1751 True
1751 >>> f(p4[0])
1752 >>> f(p4[0])
1752 False
1753 False
1753 >>> f(p5[0])
1754 >>> f(p5[0])
1754 False
1755 False
1755 """
1756 """
1756
1757
1757 def lower(date):
1758 def lower(date):
1758 d = {'mb': "1", 'd': "1"}
1759 d = {'mb': "1", 'd': "1"}
1759 return parsedate(date, extendeddateformats, d)[0]
1760 return parsedate(date, extendeddateformats, d)[0]
1760
1761
1761 def upper(date):
1762 def upper(date):
1762 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1763 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1763 for days in ("31", "30", "29"):
1764 for days in ("31", "30", "29"):
1764 try:
1765 try:
1765 d["d"] = days
1766 d["d"] = days
1766 return parsedate(date, extendeddateformats, d)[0]
1767 return parsedate(date, extendeddateformats, d)[0]
1767 except Abort:
1768 except Abort:
1768 pass
1769 pass
1769 d["d"] = "28"
1770 d["d"] = "28"
1770 return parsedate(date, extendeddateformats, d)[0]
1771 return parsedate(date, extendeddateformats, d)[0]
1771
1772
1772 date = date.strip()
1773 date = date.strip()
1773
1774
1774 if not date:
1775 if not date:
1775 raise Abort(_("dates cannot consist entirely of whitespace"))
1776 raise Abort(_("dates cannot consist entirely of whitespace"))
1776 elif date[0] == "<":
1777 elif date[0] == "<":
1777 if not date[1:]:
1778 if not date[1:]:
1778 raise Abort(_("invalid day spec, use '<DATE'"))
1779 raise Abort(_("invalid day spec, use '<DATE'"))
1779 when = upper(date[1:])
1780 when = upper(date[1:])
1780 return lambda x: x <= when
1781 return lambda x: x <= when
1781 elif date[0] == ">":
1782 elif date[0] == ">":
1782 if not date[1:]:
1783 if not date[1:]:
1783 raise Abort(_("invalid day spec, use '>DATE'"))
1784 raise Abort(_("invalid day spec, use '>DATE'"))
1784 when = lower(date[1:])
1785 when = lower(date[1:])
1785 return lambda x: x >= when
1786 return lambda x: x >= when
1786 elif date[0] == "-":
1787 elif date[0] == "-":
1787 try:
1788 try:
1788 days = int(date[1:])
1789 days = int(date[1:])
1789 except ValueError:
1790 except ValueError:
1790 raise Abort(_("invalid day spec: %s") % date[1:])
1791 raise Abort(_("invalid day spec: %s") % date[1:])
1791 if days < 0:
1792 if days < 0:
1792 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1793 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1793 % date[1:])
1794 % date[1:])
1794 when = makedate()[0] - days * 3600 * 24
1795 when = makedate()[0] - days * 3600 * 24
1795 return lambda x: x >= when
1796 return lambda x: x >= when
1796 elif " to " in date:
1797 elif " to " in date:
1797 a, b = date.split(" to ")
1798 a, b = date.split(" to ")
1798 start, stop = lower(a), upper(b)
1799 start, stop = lower(a), upper(b)
1799 return lambda x: x >= start and x <= stop
1800 return lambda x: x >= start and x <= stop
1800 else:
1801 else:
1801 start, stop = lower(date), upper(date)
1802 start, stop = lower(date), upper(date)
1802 return lambda x: x >= start and x <= stop
1803 return lambda x: x >= start and x <= stop
1803
1804
1804 def stringmatcher(pattern):
1805 def stringmatcher(pattern):
1805 """
1806 """
1806 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1807 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1807 returns the matcher name, pattern, and matcher function.
1808 returns the matcher name, pattern, and matcher function.
1808 missing or unknown prefixes are treated as literal matches.
1809 missing or unknown prefixes are treated as literal matches.
1809
1810
1810 helper for tests:
1811 helper for tests:
1811 >>> def test(pattern, *tests):
1812 >>> def test(pattern, *tests):
1812 ... kind, pattern, matcher = stringmatcher(pattern)
1813 ... kind, pattern, matcher = stringmatcher(pattern)
1813 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1814 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1814
1815
1815 exact matching (no prefix):
1816 exact matching (no prefix):
1816 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1817 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1817 ('literal', 'abcdefg', [False, False, True])
1818 ('literal', 'abcdefg', [False, False, True])
1818
1819
1819 regex matching ('re:' prefix)
1820 regex matching ('re:' prefix)
1820 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1821 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1821 ('re', 'a.+b', [False, False, True])
1822 ('re', 'a.+b', [False, False, True])
1822
1823
1823 force exact matches ('literal:' prefix)
1824 force exact matches ('literal:' prefix)
1824 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1825 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1825 ('literal', 're:foobar', [False, True])
1826 ('literal', 're:foobar', [False, True])
1826
1827
1827 unknown prefixes are ignored and treated as literals
1828 unknown prefixes are ignored and treated as literals
1828 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1829 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1829 ('literal', 'foo:bar', [False, False, True])
1830 ('literal', 'foo:bar', [False, False, True])
1830 """
1831 """
1831 if pattern.startswith('re:'):
1832 if pattern.startswith('re:'):
1832 pattern = pattern[3:]
1833 pattern = pattern[3:]
1833 try:
1834 try:
1834 regex = remod.compile(pattern)
1835 regex = remod.compile(pattern)
1835 except remod.error as e:
1836 except remod.error as e:
1836 raise error.ParseError(_('invalid regular expression: %s')
1837 raise error.ParseError(_('invalid regular expression: %s')
1837 % e)
1838 % e)
1838 return 're', pattern, regex.search
1839 return 're', pattern, regex.search
1839 elif pattern.startswith('literal:'):
1840 elif pattern.startswith('literal:'):
1840 pattern = pattern[8:]
1841 pattern = pattern[8:]
1841 return 'literal', pattern, pattern.__eq__
1842 return 'literal', pattern, pattern.__eq__
1842
1843
1843 def shortuser(user):
1844 def shortuser(user):
1844 """Return a short representation of a user name or email address."""
1845 """Return a short representation of a user name or email address."""
1845 f = user.find('@')
1846 f = user.find('@')
1846 if f >= 0:
1847 if f >= 0:
1847 user = user[:f]
1848 user = user[:f]
1848 f = user.find('<')
1849 f = user.find('<')
1849 if f >= 0:
1850 if f >= 0:
1850 user = user[f + 1:]
1851 user = user[f + 1:]
1851 f = user.find(' ')
1852 f = user.find(' ')
1852 if f >= 0:
1853 if f >= 0:
1853 user = user[:f]
1854 user = user[:f]
1854 f = user.find('.')
1855 f = user.find('.')
1855 if f >= 0:
1856 if f >= 0:
1856 user = user[:f]
1857 user = user[:f]
1857 return user
1858 return user
1858
1859
1859 def emailuser(user):
1860 def emailuser(user):
1860 """Return the user portion of an email address."""
1861 """Return the user portion of an email address."""
1861 f = user.find('@')
1862 f = user.find('@')
1862 if f >= 0:
1863 if f >= 0:
1863 user = user[:f]
1864 user = user[:f]
1864 f = user.find('<')
1865 f = user.find('<')
1865 if f >= 0:
1866 if f >= 0:
1866 user = user[f + 1:]
1867 user = user[f + 1:]
1867 return user
1868 return user
1868
1869
1869 def email(author):
1870 def email(author):
1870 '''get email of author.'''
1871 '''get email of author.'''
1871 r = author.find('>')
1872 r = author.find('>')
1872 if r == -1:
1873 if r == -1:
1873 r = None
1874 r = None
1874 return author[author.find('<') + 1:r]
1875 return author[author.find('<') + 1:r]
1875
1876
1876 def ellipsis(text, maxlength=400):
1877 def ellipsis(text, maxlength=400):
1877 """Trim string to at most maxlength (default: 400) columns in display."""
1878 """Trim string to at most maxlength (default: 400) columns in display."""
1878 return encoding.trim(text, maxlength, ellipsis='...')
1879 return encoding.trim(text, maxlength, ellipsis='...')
1879
1880
1880 def unitcountfn(*unittable):
1881 def unitcountfn(*unittable):
1881 '''return a function that renders a readable count of some quantity'''
1882 '''return a function that renders a readable count of some quantity'''
1882
1883
1883 def go(count):
1884 def go(count):
1884 for multiplier, divisor, format in unittable:
1885 for multiplier, divisor, format in unittable:
1885 if count >= divisor * multiplier:
1886 if count >= divisor * multiplier:
1886 return format % (count / float(divisor))
1887 return format % (count / float(divisor))
1887 return unittable[-1][2] % count
1888 return unittable[-1][2] % count
1888
1889
1889 return go
1890 return go
1890
1891
1891 bytecount = unitcountfn(
1892 bytecount = unitcountfn(
1892 (100, 1 << 30, _('%.0f GB')),
1893 (100, 1 << 30, _('%.0f GB')),
1893 (10, 1 << 30, _('%.1f GB')),
1894 (10, 1 << 30, _('%.1f GB')),
1894 (1, 1 << 30, _('%.2f GB')),
1895 (1, 1 << 30, _('%.2f GB')),
1895 (100, 1 << 20, _('%.0f MB')),
1896 (100, 1 << 20, _('%.0f MB')),
1896 (10, 1 << 20, _('%.1f MB')),
1897 (10, 1 << 20, _('%.1f MB')),
1897 (1, 1 << 20, _('%.2f MB')),
1898 (1, 1 << 20, _('%.2f MB')),
1898 (100, 1 << 10, _('%.0f KB')),
1899 (100, 1 << 10, _('%.0f KB')),
1899 (10, 1 << 10, _('%.1f KB')),
1900 (10, 1 << 10, _('%.1f KB')),
1900 (1, 1 << 10, _('%.2f KB')),
1901 (1, 1 << 10, _('%.2f KB')),
1901 (1, 1, _('%.0f bytes')),
1902 (1, 1, _('%.0f bytes')),
1902 )
1903 )
1903
1904
1904 def uirepr(s):
1905 def uirepr(s):
1905 # Avoid double backslash in Windows path repr()
1906 # Avoid double backslash in Windows path repr()
1906 return repr(s).replace('\\\\', '\\')
1907 return repr(s).replace('\\\\', '\\')
1907
1908
1908 # delay import of textwrap
1909 # delay import of textwrap
1909 def MBTextWrapper(**kwargs):
1910 def MBTextWrapper(**kwargs):
1910 class tw(textwrap.TextWrapper):
1911 class tw(textwrap.TextWrapper):
1911 """
1912 """
1912 Extend TextWrapper for width-awareness.
1913 Extend TextWrapper for width-awareness.
1913
1914
1914 Neither number of 'bytes' in any encoding nor 'characters' is
1915 Neither number of 'bytes' in any encoding nor 'characters' is
1915 appropriate to calculate terminal columns for specified string.
1916 appropriate to calculate terminal columns for specified string.
1916
1917
1917 Original TextWrapper implementation uses built-in 'len()' directly,
1918 Original TextWrapper implementation uses built-in 'len()' directly,
1918 so overriding is needed to use width information of each characters.
1919 so overriding is needed to use width information of each characters.
1919
1920
1920 In addition, characters classified into 'ambiguous' width are
1921 In addition, characters classified into 'ambiguous' width are
1921 treated as wide in East Asian area, but as narrow in other.
1922 treated as wide in East Asian area, but as narrow in other.
1922
1923
1923 This requires use decision to determine width of such characters.
1924 This requires use decision to determine width of such characters.
1924 """
1925 """
1925 def _cutdown(self, ucstr, space_left):
1926 def _cutdown(self, ucstr, space_left):
1926 l = 0
1927 l = 0
1927 colwidth = encoding.ucolwidth
1928 colwidth = encoding.ucolwidth
1928 for i in xrange(len(ucstr)):
1929 for i in xrange(len(ucstr)):
1929 l += colwidth(ucstr[i])
1930 l += colwidth(ucstr[i])
1930 if space_left < l:
1931 if space_left < l:
1931 return (ucstr[:i], ucstr[i:])
1932 return (ucstr[:i], ucstr[i:])
1932 return ucstr, ''
1933 return ucstr, ''
1933
1934
1934 # overriding of base class
1935 # overriding of base class
1935 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1936 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1936 space_left = max(width - cur_len, 1)
1937 space_left = max(width - cur_len, 1)
1937
1938
1938 if self.break_long_words:
1939 if self.break_long_words:
1939 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1940 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1940 cur_line.append(cut)
1941 cur_line.append(cut)
1941 reversed_chunks[-1] = res
1942 reversed_chunks[-1] = res
1942 elif not cur_line:
1943 elif not cur_line:
1943 cur_line.append(reversed_chunks.pop())
1944 cur_line.append(reversed_chunks.pop())
1944
1945
1945 # this overriding code is imported from TextWrapper of Python 2.6
1946 # this overriding code is imported from TextWrapper of Python 2.6
1946 # to calculate columns of string by 'encoding.ucolwidth()'
1947 # to calculate columns of string by 'encoding.ucolwidth()'
1947 def _wrap_chunks(self, chunks):
1948 def _wrap_chunks(self, chunks):
1948 colwidth = encoding.ucolwidth
1949 colwidth = encoding.ucolwidth
1949
1950
1950 lines = []
1951 lines = []
1951 if self.width <= 0:
1952 if self.width <= 0:
1952 raise ValueError("invalid width %r (must be > 0)" % self.width)
1953 raise ValueError("invalid width %r (must be > 0)" % self.width)
1953
1954
1954 # Arrange in reverse order so items can be efficiently popped
1955 # Arrange in reverse order so items can be efficiently popped
1955 # from a stack of chucks.
1956 # from a stack of chucks.
1956 chunks.reverse()
1957 chunks.reverse()
1957
1958
1958 while chunks:
1959 while chunks:
1959
1960
1960 # Start the list of chunks that will make up the current line.
1961 # Start the list of chunks that will make up the current line.
1961 # cur_len is just the length of all the chunks in cur_line.
1962 # cur_len is just the length of all the chunks in cur_line.
1962 cur_line = []
1963 cur_line = []
1963 cur_len = 0
1964 cur_len = 0
1964
1965
1965 # Figure out which static string will prefix this line.
1966 # Figure out which static string will prefix this line.
1966 if lines:
1967 if lines:
1967 indent = self.subsequent_indent
1968 indent = self.subsequent_indent
1968 else:
1969 else:
1969 indent = self.initial_indent
1970 indent = self.initial_indent
1970
1971
1971 # Maximum width for this line.
1972 # Maximum width for this line.
1972 width = self.width - len(indent)
1973 width = self.width - len(indent)
1973
1974
1974 # First chunk on line is whitespace -- drop it, unless this
1975 # First chunk on line is whitespace -- drop it, unless this
1975 # is the very beginning of the text (i.e. no lines started yet).
1976 # is the very beginning of the text (i.e. no lines started yet).
1976 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1977 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1977 del chunks[-1]
1978 del chunks[-1]
1978
1979
1979 while chunks:
1980 while chunks:
1980 l = colwidth(chunks[-1])
1981 l = colwidth(chunks[-1])
1981
1982
1982 # Can at least squeeze this chunk onto the current line.
1983 # Can at least squeeze this chunk onto the current line.
1983 if cur_len + l <= width:
1984 if cur_len + l <= width:
1984 cur_line.append(chunks.pop())
1985 cur_line.append(chunks.pop())
1985 cur_len += l
1986 cur_len += l
1986
1987
1987 # Nope, this line is full.
1988 # Nope, this line is full.
1988 else:
1989 else:
1989 break
1990 break
1990
1991
1991 # The current line is full, and the next chunk is too big to
1992 # The current line is full, and the next chunk is too big to
1992 # fit on *any* line (not just this one).
1993 # fit on *any* line (not just this one).
1993 if chunks and colwidth(chunks[-1]) > width:
1994 if chunks and colwidth(chunks[-1]) > width:
1994 self._handle_long_word(chunks, cur_line, cur_len, width)
1995 self._handle_long_word(chunks, cur_line, cur_len, width)
1995
1996
1996 # If the last chunk on this line is all whitespace, drop it.
1997 # If the last chunk on this line is all whitespace, drop it.
1997 if (self.drop_whitespace and
1998 if (self.drop_whitespace and
1998 cur_line and cur_line[-1].strip() == ''):
1999 cur_line and cur_line[-1].strip() == ''):
1999 del cur_line[-1]
2000 del cur_line[-1]
2000
2001
2001 # Convert current line back to a string and store it in list
2002 # Convert current line back to a string and store it in list
2002 # of all lines (return value).
2003 # of all lines (return value).
2003 if cur_line:
2004 if cur_line:
2004 lines.append(indent + ''.join(cur_line))
2005 lines.append(indent + ''.join(cur_line))
2005
2006
2006 return lines
2007 return lines
2007
2008
2008 global MBTextWrapper
2009 global MBTextWrapper
2009 MBTextWrapper = tw
2010 MBTextWrapper = tw
2010 return tw(**kwargs)
2011 return tw(**kwargs)
2011
2012
2012 def wrap(line, width, initindent='', hangindent=''):
2013 def wrap(line, width, initindent='', hangindent=''):
2013 maxindent = max(len(hangindent), len(initindent))
2014 maxindent = max(len(hangindent), len(initindent))
2014 if width <= maxindent:
2015 if width <= maxindent:
2015 # adjust for weird terminal size
2016 # adjust for weird terminal size
2016 width = max(78, maxindent + 1)
2017 width = max(78, maxindent + 1)
2017 line = line.decode(encoding.encoding, encoding.encodingmode)
2018 line = line.decode(encoding.encoding, encoding.encodingmode)
2018 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2019 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2019 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2020 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2020 wrapper = MBTextWrapper(width=width,
2021 wrapper = MBTextWrapper(width=width,
2021 initial_indent=initindent,
2022 initial_indent=initindent,
2022 subsequent_indent=hangindent)
2023 subsequent_indent=hangindent)
2023 return wrapper.fill(line).encode(encoding.encoding)
2024 return wrapper.fill(line).encode(encoding.encoding)
2024
2025
2025 def iterlines(iterator):
2026 def iterlines(iterator):
2026 for chunk in iterator:
2027 for chunk in iterator:
2027 for line in chunk.splitlines():
2028 for line in chunk.splitlines():
2028 yield line
2029 yield line
2029
2030
2030 def expandpath(path):
2031 def expandpath(path):
2031 return os.path.expanduser(os.path.expandvars(path))
2032 return os.path.expanduser(os.path.expandvars(path))
2032
2033
2033 def hgcmd():
2034 def hgcmd():
2034 """Return the command used to execute current hg
2035 """Return the command used to execute current hg
2035
2036
2036 This is different from hgexecutable() because on Windows we want
2037 This is different from hgexecutable() because on Windows we want
2037 to avoid things opening new shell windows like batch files, so we
2038 to avoid things opening new shell windows like batch files, so we
2038 get either the python call or current executable.
2039 get either the python call or current executable.
2039 """
2040 """
2040 if mainfrozen():
2041 if mainfrozen():
2041 if getattr(sys, 'frozen', None) == 'macosx_app':
2042 if getattr(sys, 'frozen', None) == 'macosx_app':
2042 # Env variable set by py2app
2043 # Env variable set by py2app
2043 return [os.environ['EXECUTABLEPATH']]
2044 return [os.environ['EXECUTABLEPATH']]
2044 else:
2045 else:
2045 return [sys.executable]
2046 return [sys.executable]
2046 return gethgcmd()
2047 return gethgcmd()
2047
2048
2048 def rundetached(args, condfn):
2049 def rundetached(args, condfn):
2049 """Execute the argument list in a detached process.
2050 """Execute the argument list in a detached process.
2050
2051
2051 condfn is a callable which is called repeatedly and should return
2052 condfn is a callable which is called repeatedly and should return
2052 True once the child process is known to have started successfully.
2053 True once the child process is known to have started successfully.
2053 At this point, the child process PID is returned. If the child
2054 At this point, the child process PID is returned. If the child
2054 process fails to start or finishes before condfn() evaluates to
2055 process fails to start or finishes before condfn() evaluates to
2055 True, return -1.
2056 True, return -1.
2056 """
2057 """
2057 # Windows case is easier because the child process is either
2058 # Windows case is easier because the child process is either
2058 # successfully starting and validating the condition or exiting
2059 # successfully starting and validating the condition or exiting
2059 # on failure. We just poll on its PID. On Unix, if the child
2060 # on failure. We just poll on its PID. On Unix, if the child
2060 # process fails to start, it will be left in a zombie state until
2061 # process fails to start, it will be left in a zombie state until
2061 # the parent wait on it, which we cannot do since we expect a long
2062 # the parent wait on it, which we cannot do since we expect a long
2062 # running process on success. Instead we listen for SIGCHLD telling
2063 # running process on success. Instead we listen for SIGCHLD telling
2063 # us our child process terminated.
2064 # us our child process terminated.
2064 terminated = set()
2065 terminated = set()
2065 def handler(signum, frame):
2066 def handler(signum, frame):
2066 terminated.add(os.wait())
2067 terminated.add(os.wait())
2067 prevhandler = None
2068 prevhandler = None
2068 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2069 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2069 if SIGCHLD is not None:
2070 if SIGCHLD is not None:
2070 prevhandler = signal.signal(SIGCHLD, handler)
2071 prevhandler = signal.signal(SIGCHLD, handler)
2071 try:
2072 try:
2072 pid = spawndetached(args)
2073 pid = spawndetached(args)
2073 while not condfn():
2074 while not condfn():
2074 if ((pid in terminated or not testpid(pid))
2075 if ((pid in terminated or not testpid(pid))
2075 and not condfn()):
2076 and not condfn()):
2076 return -1
2077 return -1
2077 time.sleep(0.1)
2078 time.sleep(0.1)
2078 return pid
2079 return pid
2079 finally:
2080 finally:
2080 if prevhandler is not None:
2081 if prevhandler is not None:
2081 signal.signal(signal.SIGCHLD, prevhandler)
2082 signal.signal(signal.SIGCHLD, prevhandler)
2082
2083
2083 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2084 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2084 """Return the result of interpolating items in the mapping into string s.
2085 """Return the result of interpolating items in the mapping into string s.
2085
2086
2086 prefix is a single character string, or a two character string with
2087 prefix is a single character string, or a two character string with
2087 a backslash as the first character if the prefix needs to be escaped in
2088 a backslash as the first character if the prefix needs to be escaped in
2088 a regular expression.
2089 a regular expression.
2089
2090
2090 fn is an optional function that will be applied to the replacement text
2091 fn is an optional function that will be applied to the replacement text
2091 just before replacement.
2092 just before replacement.
2092
2093
2093 escape_prefix is an optional flag that allows using doubled prefix for
2094 escape_prefix is an optional flag that allows using doubled prefix for
2094 its escaping.
2095 its escaping.
2095 """
2096 """
2096 fn = fn or (lambda s: s)
2097 fn = fn or (lambda s: s)
2097 patterns = '|'.join(mapping.keys())
2098 patterns = '|'.join(mapping.keys())
2098 if escape_prefix:
2099 if escape_prefix:
2099 patterns += '|' + prefix
2100 patterns += '|' + prefix
2100 if len(prefix) > 1:
2101 if len(prefix) > 1:
2101 prefix_char = prefix[1:]
2102 prefix_char = prefix[1:]
2102 else:
2103 else:
2103 prefix_char = prefix
2104 prefix_char = prefix
2104 mapping[prefix_char] = prefix_char
2105 mapping[prefix_char] = prefix_char
2105 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2106 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2106 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2107 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2107
2108
2108 def getport(port):
2109 def getport(port):
2109 """Return the port for a given network service.
2110 """Return the port for a given network service.
2110
2111
2111 If port is an integer, it's returned as is. If it's a string, it's
2112 If port is an integer, it's returned as is. If it's a string, it's
2112 looked up using socket.getservbyname(). If there's no matching
2113 looked up using socket.getservbyname(). If there's no matching
2113 service, error.Abort is raised.
2114 service, error.Abort is raised.
2114 """
2115 """
2115 try:
2116 try:
2116 return int(port)
2117 return int(port)
2117 except ValueError:
2118 except ValueError:
2118 pass
2119 pass
2119
2120
2120 try:
2121 try:
2121 return socket.getservbyname(port)
2122 return socket.getservbyname(port)
2122 except socket.error:
2123 except socket.error:
2123 raise Abort(_("no port number associated with service '%s'") % port)
2124 raise Abort(_("no port number associated with service '%s'") % port)
2124
2125
2125 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2126 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2126 '0': False, 'no': False, 'false': False, 'off': False,
2127 '0': False, 'no': False, 'false': False, 'off': False,
2127 'never': False}
2128 'never': False}
2128
2129
2129 def parsebool(s):
2130 def parsebool(s):
2130 """Parse s into a boolean.
2131 """Parse s into a boolean.
2131
2132
2132 If s is not a valid boolean, returns None.
2133 If s is not a valid boolean, returns None.
2133 """
2134 """
2134 return _booleans.get(s.lower(), None)
2135 return _booleans.get(s.lower(), None)
2135
2136
2136 _hexdig = '0123456789ABCDEFabcdef'
2137 _hexdig = '0123456789ABCDEFabcdef'
2137 _hextochr = dict((a + b, chr(int(a + b, 16)))
2138 _hextochr = dict((a + b, chr(int(a + b, 16)))
2138 for a in _hexdig for b in _hexdig)
2139 for a in _hexdig for b in _hexdig)
2139
2140
2140 def _urlunquote(s):
2141 def _urlunquote(s):
2141 """Decode HTTP/HTML % encoding.
2142 """Decode HTTP/HTML % encoding.
2142
2143
2143 >>> _urlunquote('abc%20def')
2144 >>> _urlunquote('abc%20def')
2144 'abc def'
2145 'abc def'
2145 """
2146 """
2146 res = s.split('%')
2147 res = s.split('%')
2147 # fastpath
2148 # fastpath
2148 if len(res) == 1:
2149 if len(res) == 1:
2149 return s
2150 return s
2150 s = res[0]
2151 s = res[0]
2151 for item in res[1:]:
2152 for item in res[1:]:
2152 try:
2153 try:
2153 s += _hextochr[item[:2]] + item[2:]
2154 s += _hextochr[item[:2]] + item[2:]
2154 except KeyError:
2155 except KeyError:
2155 s += '%' + item
2156 s += '%' + item
2156 except UnicodeDecodeError:
2157 except UnicodeDecodeError:
2157 s += unichr(int(item[:2], 16)) + item[2:]
2158 s += unichr(int(item[:2], 16)) + item[2:]
2158 return s
2159 return s
2159
2160
2160 class url(object):
2161 class url(object):
2161 r"""Reliable URL parser.
2162 r"""Reliable URL parser.
2162
2163
2163 This parses URLs and provides attributes for the following
2164 This parses URLs and provides attributes for the following
2164 components:
2165 components:
2165
2166
2166 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2167 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2167
2168
2168 Missing components are set to None. The only exception is
2169 Missing components are set to None. The only exception is
2169 fragment, which is set to '' if present but empty.
2170 fragment, which is set to '' if present but empty.
2170
2171
2171 If parsefragment is False, fragment is included in query. If
2172 If parsefragment is False, fragment is included in query. If
2172 parsequery is False, query is included in path. If both are
2173 parsequery is False, query is included in path. If both are
2173 False, both fragment and query are included in path.
2174 False, both fragment and query are included in path.
2174
2175
2175 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2176 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2176
2177
2177 Note that for backward compatibility reasons, bundle URLs do not
2178 Note that for backward compatibility reasons, bundle URLs do not
2178 take host names. That means 'bundle://../' has a path of '../'.
2179 take host names. That means 'bundle://../' has a path of '../'.
2179
2180
2180 Examples:
2181 Examples:
2181
2182
2182 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2183 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2183 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2184 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2184 >>> url('ssh://[::1]:2200//home/joe/repo')
2185 >>> url('ssh://[::1]:2200//home/joe/repo')
2185 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2186 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2186 >>> url('file:///home/joe/repo')
2187 >>> url('file:///home/joe/repo')
2187 <url scheme: 'file', path: '/home/joe/repo'>
2188 <url scheme: 'file', path: '/home/joe/repo'>
2188 >>> url('file:///c:/temp/foo/')
2189 >>> url('file:///c:/temp/foo/')
2189 <url scheme: 'file', path: 'c:/temp/foo/'>
2190 <url scheme: 'file', path: 'c:/temp/foo/'>
2190 >>> url('bundle:foo')
2191 >>> url('bundle:foo')
2191 <url scheme: 'bundle', path: 'foo'>
2192 <url scheme: 'bundle', path: 'foo'>
2192 >>> url('bundle://../foo')
2193 >>> url('bundle://../foo')
2193 <url scheme: 'bundle', path: '../foo'>
2194 <url scheme: 'bundle', path: '../foo'>
2194 >>> url(r'c:\foo\bar')
2195 >>> url(r'c:\foo\bar')
2195 <url path: 'c:\\foo\\bar'>
2196 <url path: 'c:\\foo\\bar'>
2196 >>> url(r'\\blah\blah\blah')
2197 >>> url(r'\\blah\blah\blah')
2197 <url path: '\\\\blah\\blah\\blah'>
2198 <url path: '\\\\blah\\blah\\blah'>
2198 >>> url(r'\\blah\blah\blah#baz')
2199 >>> url(r'\\blah\blah\blah#baz')
2199 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2200 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2200 >>> url(r'file:///C:\users\me')
2201 >>> url(r'file:///C:\users\me')
2201 <url scheme: 'file', path: 'C:\\users\\me'>
2202 <url scheme: 'file', path: 'C:\\users\\me'>
2202
2203
2203 Authentication credentials:
2204 Authentication credentials:
2204
2205
2205 >>> url('ssh://joe:xyz@x/repo')
2206 >>> url('ssh://joe:xyz@x/repo')
2206 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2207 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2207 >>> url('ssh://joe@x/repo')
2208 >>> url('ssh://joe@x/repo')
2208 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2209 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2209
2210
2210 Query strings and fragments:
2211 Query strings and fragments:
2211
2212
2212 >>> url('http://host/a?b#c')
2213 >>> url('http://host/a?b#c')
2213 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2214 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2214 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2215 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2215 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2216 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2216 """
2217 """
2217
2218
2218 _safechars = "!~*'()+"
2219 _safechars = "!~*'()+"
2219 _safepchars = "/!~*'()+:\\"
2220 _safepchars = "/!~*'()+:\\"
2220 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2221 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2221
2222
2222 def __init__(self, path, parsequery=True, parsefragment=True):
2223 def __init__(self, path, parsequery=True, parsefragment=True):
2223 # We slowly chomp away at path until we have only the path left
2224 # We slowly chomp away at path until we have only the path left
2224 self.scheme = self.user = self.passwd = self.host = None
2225 self.scheme = self.user = self.passwd = self.host = None
2225 self.port = self.path = self.query = self.fragment = None
2226 self.port = self.path = self.query = self.fragment = None
2226 self._localpath = True
2227 self._localpath = True
2227 self._hostport = ''
2228 self._hostport = ''
2228 self._origpath = path
2229 self._origpath = path
2229
2230
2230 if parsefragment and '#' in path:
2231 if parsefragment and '#' in path:
2231 path, self.fragment = path.split('#', 1)
2232 path, self.fragment = path.split('#', 1)
2232 if not path:
2233 if not path:
2233 path = None
2234 path = None
2234
2235
2235 # special case for Windows drive letters and UNC paths
2236 # special case for Windows drive letters and UNC paths
2236 if hasdriveletter(path) or path.startswith(r'\\'):
2237 if hasdriveletter(path) or path.startswith(r'\\'):
2237 self.path = path
2238 self.path = path
2238 return
2239 return
2239
2240
2240 # For compatibility reasons, we can't handle bundle paths as
2241 # For compatibility reasons, we can't handle bundle paths as
2241 # normal URLS
2242 # normal URLS
2242 if path.startswith('bundle:'):
2243 if path.startswith('bundle:'):
2243 self.scheme = 'bundle'
2244 self.scheme = 'bundle'
2244 path = path[7:]
2245 path = path[7:]
2245 if path.startswith('//'):
2246 if path.startswith('//'):
2246 path = path[2:]
2247 path = path[2:]
2247 self.path = path
2248 self.path = path
2248 return
2249 return
2249
2250
2250 if self._matchscheme(path):
2251 if self._matchscheme(path):
2251 parts = path.split(':', 1)
2252 parts = path.split(':', 1)
2252 if parts[0]:
2253 if parts[0]:
2253 self.scheme, path = parts
2254 self.scheme, path = parts
2254 self._localpath = False
2255 self._localpath = False
2255
2256
2256 if not path:
2257 if not path:
2257 path = None
2258 path = None
2258 if self._localpath:
2259 if self._localpath:
2259 self.path = ''
2260 self.path = ''
2260 return
2261 return
2261 else:
2262 else:
2262 if self._localpath:
2263 if self._localpath:
2263 self.path = path
2264 self.path = path
2264 return
2265 return
2265
2266
2266 if parsequery and '?' in path:
2267 if parsequery and '?' in path:
2267 path, self.query = path.split('?', 1)
2268 path, self.query = path.split('?', 1)
2268 if not path:
2269 if not path:
2269 path = None
2270 path = None
2270 if not self.query:
2271 if not self.query:
2271 self.query = None
2272 self.query = None
2272
2273
2273 # // is required to specify a host/authority
2274 # // is required to specify a host/authority
2274 if path and path.startswith('//'):
2275 if path and path.startswith('//'):
2275 parts = path[2:].split('/', 1)
2276 parts = path[2:].split('/', 1)
2276 if len(parts) > 1:
2277 if len(parts) > 1:
2277 self.host, path = parts
2278 self.host, path = parts
2278 else:
2279 else:
2279 self.host = parts[0]
2280 self.host = parts[0]
2280 path = None
2281 path = None
2281 if not self.host:
2282 if not self.host:
2282 self.host = None
2283 self.host = None
2283 # path of file:///d is /d
2284 # path of file:///d is /d
2284 # path of file:///d:/ is d:/, not /d:/
2285 # path of file:///d:/ is d:/, not /d:/
2285 if path and not hasdriveletter(path):
2286 if path and not hasdriveletter(path):
2286 path = '/' + path
2287 path = '/' + path
2287
2288
2288 if self.host and '@' in self.host:
2289 if self.host and '@' in self.host:
2289 self.user, self.host = self.host.rsplit('@', 1)
2290 self.user, self.host = self.host.rsplit('@', 1)
2290 if ':' in self.user:
2291 if ':' in self.user:
2291 self.user, self.passwd = self.user.split(':', 1)
2292 self.user, self.passwd = self.user.split(':', 1)
2292 if not self.host:
2293 if not self.host:
2293 self.host = None
2294 self.host = None
2294
2295
2295 # Don't split on colons in IPv6 addresses without ports
2296 # Don't split on colons in IPv6 addresses without ports
2296 if (self.host and ':' in self.host and
2297 if (self.host and ':' in self.host and
2297 not (self.host.startswith('[') and self.host.endswith(']'))):
2298 not (self.host.startswith('[') and self.host.endswith(']'))):
2298 self._hostport = self.host
2299 self._hostport = self.host
2299 self.host, self.port = self.host.rsplit(':', 1)
2300 self.host, self.port = self.host.rsplit(':', 1)
2300 if not self.host:
2301 if not self.host:
2301 self.host = None
2302 self.host = None
2302
2303
2303 if (self.host and self.scheme == 'file' and
2304 if (self.host and self.scheme == 'file' and
2304 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2305 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2305 raise Abort(_('file:// URLs can only refer to localhost'))
2306 raise Abort(_('file:// URLs can only refer to localhost'))
2306
2307
2307 self.path = path
2308 self.path = path
2308
2309
2309 # leave the query string escaped
2310 # leave the query string escaped
2310 for a in ('user', 'passwd', 'host', 'port',
2311 for a in ('user', 'passwd', 'host', 'port',
2311 'path', 'fragment'):
2312 'path', 'fragment'):
2312 v = getattr(self, a)
2313 v = getattr(self, a)
2313 if v is not None:
2314 if v is not None:
2314 setattr(self, a, _urlunquote(v))
2315 setattr(self, a, _urlunquote(v))
2315
2316
2316 def __repr__(self):
2317 def __repr__(self):
2317 attrs = []
2318 attrs = []
2318 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2319 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2319 'query', 'fragment'):
2320 'query', 'fragment'):
2320 v = getattr(self, a)
2321 v = getattr(self, a)
2321 if v is not None:
2322 if v is not None:
2322 attrs.append('%s: %r' % (a, v))
2323 attrs.append('%s: %r' % (a, v))
2323 return '<url %s>' % ', '.join(attrs)
2324 return '<url %s>' % ', '.join(attrs)
2324
2325
2325 def __str__(self):
2326 def __str__(self):
2326 r"""Join the URL's components back into a URL string.
2327 r"""Join the URL's components back into a URL string.
2327
2328
2328 Examples:
2329 Examples:
2329
2330
2330 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2331 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2331 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2332 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2332 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2333 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2333 'http://user:pw@host:80/?foo=bar&baz=42'
2334 'http://user:pw@host:80/?foo=bar&baz=42'
2334 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2335 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2335 'http://user:pw@host:80/?foo=bar%3dbaz'
2336 'http://user:pw@host:80/?foo=bar%3dbaz'
2336 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2337 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2337 'ssh://user:pw@[::1]:2200//home/joe#'
2338 'ssh://user:pw@[::1]:2200//home/joe#'
2338 >>> str(url('http://localhost:80//'))
2339 >>> str(url('http://localhost:80//'))
2339 'http://localhost:80//'
2340 'http://localhost:80//'
2340 >>> str(url('http://localhost:80/'))
2341 >>> str(url('http://localhost:80/'))
2341 'http://localhost:80/'
2342 'http://localhost:80/'
2342 >>> str(url('http://localhost:80'))
2343 >>> str(url('http://localhost:80'))
2343 'http://localhost:80/'
2344 'http://localhost:80/'
2344 >>> str(url('bundle:foo'))
2345 >>> str(url('bundle:foo'))
2345 'bundle:foo'
2346 'bundle:foo'
2346 >>> str(url('bundle://../foo'))
2347 >>> str(url('bundle://../foo'))
2347 'bundle:../foo'
2348 'bundle:../foo'
2348 >>> str(url('path'))
2349 >>> str(url('path'))
2349 'path'
2350 'path'
2350 >>> str(url('file:///tmp/foo/bar'))
2351 >>> str(url('file:///tmp/foo/bar'))
2351 'file:///tmp/foo/bar'
2352 'file:///tmp/foo/bar'
2352 >>> str(url('file:///c:/tmp/foo/bar'))
2353 >>> str(url('file:///c:/tmp/foo/bar'))
2353 'file:///c:/tmp/foo/bar'
2354 'file:///c:/tmp/foo/bar'
2354 >>> print url(r'bundle:foo\bar')
2355 >>> print url(r'bundle:foo\bar')
2355 bundle:foo\bar
2356 bundle:foo\bar
2356 >>> print url(r'file:///D:\data\hg')
2357 >>> print url(r'file:///D:\data\hg')
2357 file:///D:\data\hg
2358 file:///D:\data\hg
2358 """
2359 """
2359 if self._localpath:
2360 if self._localpath:
2360 s = self.path
2361 s = self.path
2361 if self.scheme == 'bundle':
2362 if self.scheme == 'bundle':
2362 s = 'bundle:' + s
2363 s = 'bundle:' + s
2363 if self.fragment:
2364 if self.fragment:
2364 s += '#' + self.fragment
2365 s += '#' + self.fragment
2365 return s
2366 return s
2366
2367
2367 s = self.scheme + ':'
2368 s = self.scheme + ':'
2368 if self.user or self.passwd or self.host:
2369 if self.user or self.passwd or self.host:
2369 s += '//'
2370 s += '//'
2370 elif self.scheme and (not self.path or self.path.startswith('/')
2371 elif self.scheme and (not self.path or self.path.startswith('/')
2371 or hasdriveletter(self.path)):
2372 or hasdriveletter(self.path)):
2372 s += '//'
2373 s += '//'
2373 if hasdriveletter(self.path):
2374 if hasdriveletter(self.path):
2374 s += '/'
2375 s += '/'
2375 if self.user:
2376 if self.user:
2376 s += urllib.quote(self.user, safe=self._safechars)
2377 s += urllib.quote(self.user, safe=self._safechars)
2377 if self.passwd:
2378 if self.passwd:
2378 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2379 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2379 if self.user or self.passwd:
2380 if self.user or self.passwd:
2380 s += '@'
2381 s += '@'
2381 if self.host:
2382 if self.host:
2382 if not (self.host.startswith('[') and self.host.endswith(']')):
2383 if not (self.host.startswith('[') and self.host.endswith(']')):
2383 s += urllib.quote(self.host)
2384 s += urllib.quote(self.host)
2384 else:
2385 else:
2385 s += self.host
2386 s += self.host
2386 if self.port:
2387 if self.port:
2387 s += ':' + urllib.quote(self.port)
2388 s += ':' + urllib.quote(self.port)
2388 if self.host:
2389 if self.host:
2389 s += '/'
2390 s += '/'
2390 if self.path:
2391 if self.path:
2391 # TODO: similar to the query string, we should not unescape the
2392 # TODO: similar to the query string, we should not unescape the
2392 # path when we store it, the path might contain '%2f' = '/',
2393 # path when we store it, the path might contain '%2f' = '/',
2393 # which we should *not* escape.
2394 # which we should *not* escape.
2394 s += urllib.quote(self.path, safe=self._safepchars)
2395 s += urllib.quote(self.path, safe=self._safepchars)
2395 if self.query:
2396 if self.query:
2396 # we store the query in escaped form.
2397 # we store the query in escaped form.
2397 s += '?' + self.query
2398 s += '?' + self.query
2398 if self.fragment is not None:
2399 if self.fragment is not None:
2399 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2400 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2400 return s
2401 return s
2401
2402
2402 def authinfo(self):
2403 def authinfo(self):
2403 user, passwd = self.user, self.passwd
2404 user, passwd = self.user, self.passwd
2404 try:
2405 try:
2405 self.user, self.passwd = None, None
2406 self.user, self.passwd = None, None
2406 s = str(self)
2407 s = str(self)
2407 finally:
2408 finally:
2408 self.user, self.passwd = user, passwd
2409 self.user, self.passwd = user, passwd
2409 if not self.user:
2410 if not self.user:
2410 return (s, None)
2411 return (s, None)
2411 # authinfo[1] is passed to urllib2 password manager, and its
2412 # authinfo[1] is passed to urllib2 password manager, and its
2412 # URIs must not contain credentials. The host is passed in the
2413 # URIs must not contain credentials. The host is passed in the
2413 # URIs list because Python < 2.4.3 uses only that to search for
2414 # URIs list because Python < 2.4.3 uses only that to search for
2414 # a password.
2415 # a password.
2415 return (s, (None, (s, self.host),
2416 return (s, (None, (s, self.host),
2416 self.user, self.passwd or ''))
2417 self.user, self.passwd or ''))
2417
2418
2418 def isabs(self):
2419 def isabs(self):
2419 if self.scheme and self.scheme != 'file':
2420 if self.scheme and self.scheme != 'file':
2420 return True # remote URL
2421 return True # remote URL
2421 if hasdriveletter(self.path):
2422 if hasdriveletter(self.path):
2422 return True # absolute for our purposes - can't be joined()
2423 return True # absolute for our purposes - can't be joined()
2423 if self.path.startswith(r'\\'):
2424 if self.path.startswith(r'\\'):
2424 return True # Windows UNC path
2425 return True # Windows UNC path
2425 if self.path.startswith('/'):
2426 if self.path.startswith('/'):
2426 return True # POSIX-style
2427 return True # POSIX-style
2427 return False
2428 return False
2428
2429
2429 def localpath(self):
2430 def localpath(self):
2430 if self.scheme == 'file' or self.scheme == 'bundle':
2431 if self.scheme == 'file' or self.scheme == 'bundle':
2431 path = self.path or '/'
2432 path = self.path or '/'
2432 # For Windows, we need to promote hosts containing drive
2433 # For Windows, we need to promote hosts containing drive
2433 # letters to paths with drive letters.
2434 # letters to paths with drive letters.
2434 if hasdriveletter(self._hostport):
2435 if hasdriveletter(self._hostport):
2435 path = self._hostport + '/' + self.path
2436 path = self._hostport + '/' + self.path
2436 elif (self.host is not None and self.path
2437 elif (self.host is not None and self.path
2437 and not hasdriveletter(path)):
2438 and not hasdriveletter(path)):
2438 path = '/' + path
2439 path = '/' + path
2439 return path
2440 return path
2440 return self._origpath
2441 return self._origpath
2441
2442
2442 def islocal(self):
2443 def islocal(self):
2443 '''whether localpath will return something that posixfile can open'''
2444 '''whether localpath will return something that posixfile can open'''
2444 return (not self.scheme or self.scheme == 'file'
2445 return (not self.scheme or self.scheme == 'file'
2445 or self.scheme == 'bundle')
2446 or self.scheme == 'bundle')
2446
2447
2447 def hasscheme(path):
2448 def hasscheme(path):
2448 return bool(url(path).scheme)
2449 return bool(url(path).scheme)
2449
2450
2450 def hasdriveletter(path):
2451 def hasdriveletter(path):
2451 return path and path[1:2] == ':' and path[0:1].isalpha()
2452 return path and path[1:2] == ':' and path[0:1].isalpha()
2452
2453
2453 def urllocalpath(path):
2454 def urllocalpath(path):
2454 return url(path, parsequery=False, parsefragment=False).localpath()
2455 return url(path, parsequery=False, parsefragment=False).localpath()
2455
2456
2456 def hidepassword(u):
2457 def hidepassword(u):
2457 '''hide user credential in a url string'''
2458 '''hide user credential in a url string'''
2458 u = url(u)
2459 u = url(u)
2459 if u.passwd:
2460 if u.passwd:
2460 u.passwd = '***'
2461 u.passwd = '***'
2461 return str(u)
2462 return str(u)
2462
2463
2463 def removeauth(u):
2464 def removeauth(u):
2464 '''remove all authentication information from a url string'''
2465 '''remove all authentication information from a url string'''
2465 u = url(u)
2466 u = url(u)
2466 u.user = u.passwd = None
2467 u.user = u.passwd = None
2467 return str(u)
2468 return str(u)
2468
2469
2469 def isatty(fp):
2470 def isatty(fp):
2470 try:
2471 try:
2471 return fp.isatty()
2472 return fp.isatty()
2472 except AttributeError:
2473 except AttributeError:
2473 return False
2474 return False
2474
2475
2475 timecount = unitcountfn(
2476 timecount = unitcountfn(
2476 (1, 1e3, _('%.0f s')),
2477 (1, 1e3, _('%.0f s')),
2477 (100, 1, _('%.1f s')),
2478 (100, 1, _('%.1f s')),
2478 (10, 1, _('%.2f s')),
2479 (10, 1, _('%.2f s')),
2479 (1, 1, _('%.3f s')),
2480 (1, 1, _('%.3f s')),
2480 (100, 0.001, _('%.1f ms')),
2481 (100, 0.001, _('%.1f ms')),
2481 (10, 0.001, _('%.2f ms')),
2482 (10, 0.001, _('%.2f ms')),
2482 (1, 0.001, _('%.3f ms')),
2483 (1, 0.001, _('%.3f ms')),
2483 (100, 0.000001, _('%.1f us')),
2484 (100, 0.000001, _('%.1f us')),
2484 (10, 0.000001, _('%.2f us')),
2485 (10, 0.000001, _('%.2f us')),
2485 (1, 0.000001, _('%.3f us')),
2486 (1, 0.000001, _('%.3f us')),
2486 (100, 0.000000001, _('%.1f ns')),
2487 (100, 0.000000001, _('%.1f ns')),
2487 (10, 0.000000001, _('%.2f ns')),
2488 (10, 0.000000001, _('%.2f ns')),
2488 (1, 0.000000001, _('%.3f ns')),
2489 (1, 0.000000001, _('%.3f ns')),
2489 )
2490 )
2490
2491
2491 _timenesting = [0]
2492 _timenesting = [0]
2492
2493
2493 def timed(func):
2494 def timed(func):
2494 '''Report the execution time of a function call to stderr.
2495 '''Report the execution time of a function call to stderr.
2495
2496
2496 During development, use as a decorator when you need to measure
2497 During development, use as a decorator when you need to measure
2497 the cost of a function, e.g. as follows:
2498 the cost of a function, e.g. as follows:
2498
2499
2499 @util.timed
2500 @util.timed
2500 def foo(a, b, c):
2501 def foo(a, b, c):
2501 pass
2502 pass
2502 '''
2503 '''
2503
2504
2504 def wrapper(*args, **kwargs):
2505 def wrapper(*args, **kwargs):
2505 start = time.time()
2506 start = time.time()
2506 indent = 2
2507 indent = 2
2507 _timenesting[0] += indent
2508 _timenesting[0] += indent
2508 try:
2509 try:
2509 return func(*args, **kwargs)
2510 return func(*args, **kwargs)
2510 finally:
2511 finally:
2511 elapsed = time.time() - start
2512 elapsed = time.time() - start
2512 _timenesting[0] -= indent
2513 _timenesting[0] -= indent
2513 sys.stderr.write('%s%s: %s\n' %
2514 sys.stderr.write('%s%s: %s\n' %
2514 (' ' * _timenesting[0], func.__name__,
2515 (' ' * _timenesting[0], func.__name__,
2515 timecount(elapsed)))
2516 timecount(elapsed)))
2516 return wrapper
2517 return wrapper
2517
2518
2518 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2519 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2519 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2520 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2520
2521
2521 def sizetoint(s):
2522 def sizetoint(s):
2522 '''Convert a space specifier to a byte count.
2523 '''Convert a space specifier to a byte count.
2523
2524
2524 >>> sizetoint('30')
2525 >>> sizetoint('30')
2525 30
2526 30
2526 >>> sizetoint('2.2kb')
2527 >>> sizetoint('2.2kb')
2527 2252
2528 2252
2528 >>> sizetoint('6M')
2529 >>> sizetoint('6M')
2529 6291456
2530 6291456
2530 '''
2531 '''
2531 t = s.strip().lower()
2532 t = s.strip().lower()
2532 try:
2533 try:
2533 for k, u in _sizeunits:
2534 for k, u in _sizeunits:
2534 if t.endswith(k):
2535 if t.endswith(k):
2535 return int(float(t[:-len(k)]) * u)
2536 return int(float(t[:-len(k)]) * u)
2536 return int(t)
2537 return int(t)
2537 except ValueError:
2538 except ValueError:
2538 raise error.ParseError(_("couldn't parse size: %s") % s)
2539 raise error.ParseError(_("couldn't parse size: %s") % s)
2539
2540
2540 class hooks(object):
2541 class hooks(object):
2541 '''A collection of hook functions that can be used to extend a
2542 '''A collection of hook functions that can be used to extend a
2542 function's behavior. Hooks are called in lexicographic order,
2543 function's behavior. Hooks are called in lexicographic order,
2543 based on the names of their sources.'''
2544 based on the names of their sources.'''
2544
2545
2545 def __init__(self):
2546 def __init__(self):
2546 self._hooks = []
2547 self._hooks = []
2547
2548
2548 def add(self, source, hook):
2549 def add(self, source, hook):
2549 self._hooks.append((source, hook))
2550 self._hooks.append((source, hook))
2550
2551
2551 def __call__(self, *args):
2552 def __call__(self, *args):
2552 self._hooks.sort(key=lambda x: x[0])
2553 self._hooks.sort(key=lambda x: x[0])
2553 results = []
2554 results = []
2554 for source, hook in self._hooks:
2555 for source, hook in self._hooks:
2555 results.append(hook(*args))
2556 results.append(hook(*args))
2556 return results
2557 return results
2557
2558
2558 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2559 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2559 '''Yields lines for a nicely formatted stacktrace.
2560 '''Yields lines for a nicely formatted stacktrace.
2560 Skips the 'skip' last entries.
2561 Skips the 'skip' last entries.
2561 Each file+linenumber is formatted according to fileline.
2562 Each file+linenumber is formatted according to fileline.
2562 Each line is formatted according to line.
2563 Each line is formatted according to line.
2563 If line is None, it yields:
2564 If line is None, it yields:
2564 length of longest filepath+line number,
2565 length of longest filepath+line number,
2565 filepath+linenumber,
2566 filepath+linenumber,
2566 function
2567 function
2567
2568
2568 Not be used in production code but very convenient while developing.
2569 Not be used in production code but very convenient while developing.
2569 '''
2570 '''
2570 entries = [(fileline % (fn, ln), func)
2571 entries = [(fileline % (fn, ln), func)
2571 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2572 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2572 if entries:
2573 if entries:
2573 fnmax = max(len(entry[0]) for entry in entries)
2574 fnmax = max(len(entry[0]) for entry in entries)
2574 for fnln, func in entries:
2575 for fnln, func in entries:
2575 if line is None:
2576 if line is None:
2576 yield (fnmax, fnln, func)
2577 yield (fnmax, fnln, func)
2577 else:
2578 else:
2578 yield line % (fnmax, fnln, func)
2579 yield line % (fnmax, fnln, func)
2579
2580
2580 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2581 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2581 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2582 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2582 Skips the 'skip' last entries. By default it will flush stdout first.
2583 Skips the 'skip' last entries. By default it will flush stdout first.
2583 It can be used everywhere and intentionally does not require an ui object.
2584 It can be used everywhere and intentionally does not require an ui object.
2584 Not be used in production code but very convenient while developing.
2585 Not be used in production code but very convenient while developing.
2585 '''
2586 '''
2586 if otherf:
2587 if otherf:
2587 otherf.flush()
2588 otherf.flush()
2588 f.write('%s at:\n' % msg)
2589 f.write('%s at:\n' % msg)
2589 for line in getstackframes(skip + 1):
2590 for line in getstackframes(skip + 1):
2590 f.write(line)
2591 f.write(line)
2591 f.flush()
2592 f.flush()
2592
2593
2593 class dirs(object):
2594 class dirs(object):
2594 '''a multiset of directory names from a dirstate or manifest'''
2595 '''a multiset of directory names from a dirstate or manifest'''
2595
2596
2596 def __init__(self, map, skip=None):
2597 def __init__(self, map, skip=None):
2597 self._dirs = {}
2598 self._dirs = {}
2598 addpath = self.addpath
2599 addpath = self.addpath
2599 if safehasattr(map, 'iteritems') and skip is not None:
2600 if safehasattr(map, 'iteritems') and skip is not None:
2600 for f, s in map.iteritems():
2601 for f, s in map.iteritems():
2601 if s[0] != skip:
2602 if s[0] != skip:
2602 addpath(f)
2603 addpath(f)
2603 else:
2604 else:
2604 for f in map:
2605 for f in map:
2605 addpath(f)
2606 addpath(f)
2606
2607
2607 def addpath(self, path):
2608 def addpath(self, path):
2608 dirs = self._dirs
2609 dirs = self._dirs
2609 for base in finddirs(path):
2610 for base in finddirs(path):
2610 if base in dirs:
2611 if base in dirs:
2611 dirs[base] += 1
2612 dirs[base] += 1
2612 return
2613 return
2613 dirs[base] = 1
2614 dirs[base] = 1
2614
2615
2615 def delpath(self, path):
2616 def delpath(self, path):
2616 dirs = self._dirs
2617 dirs = self._dirs
2617 for base in finddirs(path):
2618 for base in finddirs(path):
2618 if dirs[base] > 1:
2619 if dirs[base] > 1:
2619 dirs[base] -= 1
2620 dirs[base] -= 1
2620 return
2621 return
2621 del dirs[base]
2622 del dirs[base]
2622
2623
2623 def __iter__(self):
2624 def __iter__(self):
2624 return self._dirs.iterkeys()
2625 return self._dirs.iterkeys()
2625
2626
2626 def __contains__(self, d):
2627 def __contains__(self, d):
2627 return d in self._dirs
2628 return d in self._dirs
2628
2629
2629 if safehasattr(parsers, 'dirs'):
2630 if safehasattr(parsers, 'dirs'):
2630 dirs = parsers.dirs
2631 dirs = parsers.dirs
2631
2632
2632 def finddirs(path):
2633 def finddirs(path):
2633 pos = path.rfind('/')
2634 pos = path.rfind('/')
2634 while pos != -1:
2635 while pos != -1:
2635 yield path[:pos]
2636 yield path[:pos]
2636 pos = path.rfind('/', 0, pos)
2637 pos = path.rfind('/', 0, pos)
2637
2638
2638 # compression utility
2639 # compression utility
2639
2640
2640 class nocompress(object):
2641 class nocompress(object):
2641 def compress(self, x):
2642 def compress(self, x):
2642 return x
2643 return x
2643 def flush(self):
2644 def flush(self):
2644 return ""
2645 return ""
2645
2646
2646 compressors = {
2647 compressors = {
2647 None: nocompress,
2648 None: nocompress,
2648 # lambda to prevent early import
2649 # lambda to prevent early import
2649 'BZ': lambda: bz2.BZ2Compressor(),
2650 'BZ': lambda: bz2.BZ2Compressor(),
2650 'GZ': lambda: zlib.compressobj(),
2651 'GZ': lambda: zlib.compressobj(),
2651 }
2652 }
2652 # also support the old form by courtesies
2653 # also support the old form by courtesies
2653 compressors['UN'] = compressors[None]
2654 compressors['UN'] = compressors[None]
2654
2655
2655 def _makedecompressor(decompcls):
2656 def _makedecompressor(decompcls):
2656 def generator(f):
2657 def generator(f):
2657 d = decompcls()
2658 d = decompcls()
2658 for chunk in filechunkiter(f):
2659 for chunk in filechunkiter(f):
2659 yield d.decompress(chunk)
2660 yield d.decompress(chunk)
2660 def func(fh):
2661 def func(fh):
2661 return chunkbuffer(generator(fh))
2662 return chunkbuffer(generator(fh))
2662 return func
2663 return func
2663
2664
2664 class ctxmanager(object):
2665 class ctxmanager(object):
2665 '''A context manager for use in 'with' blocks to allow multiple
2666 '''A context manager for use in 'with' blocks to allow multiple
2666 contexts to be entered at once. This is both safer and more
2667 contexts to be entered at once. This is both safer and more
2667 flexible than contextlib.nested.
2668 flexible than contextlib.nested.
2668
2669
2669 Once Mercurial supports Python 2.7+, this will become mostly
2670 Once Mercurial supports Python 2.7+, this will become mostly
2670 unnecessary.
2671 unnecessary.
2671 '''
2672 '''
2672
2673
2673 def __init__(self, *args):
2674 def __init__(self, *args):
2674 '''Accepts a list of no-argument functions that return context
2675 '''Accepts a list of no-argument functions that return context
2675 managers. These will be invoked at __call__ time.'''
2676 managers. These will be invoked at __call__ time.'''
2676 self._pending = args
2677 self._pending = args
2677 self._atexit = []
2678 self._atexit = []
2678
2679
2679 def __enter__(self):
2680 def __enter__(self):
2680 return self
2681 return self
2681
2682
2682 def enter(self):
2683 def enter(self):
2683 '''Create and enter context managers in the order in which they were
2684 '''Create and enter context managers in the order in which they were
2684 passed to the constructor.'''
2685 passed to the constructor.'''
2685 values = []
2686 values = []
2686 for func in self._pending:
2687 for func in self._pending:
2687 obj = func()
2688 obj = func()
2688 values.append(obj.__enter__())
2689 values.append(obj.__enter__())
2689 self._atexit.append(obj.__exit__)
2690 self._atexit.append(obj.__exit__)
2690 del self._pending
2691 del self._pending
2691 return values
2692 return values
2692
2693
2693 def atexit(self, func, *args, **kwargs):
2694 def atexit(self, func, *args, **kwargs):
2694 '''Add a function to call when this context manager exits. The
2695 '''Add a function to call when this context manager exits. The
2695 ordering of multiple atexit calls is unspecified, save that
2696 ordering of multiple atexit calls is unspecified, save that
2696 they will happen before any __exit__ functions.'''
2697 they will happen before any __exit__ functions.'''
2697 def wrapper(exc_type, exc_val, exc_tb):
2698 def wrapper(exc_type, exc_val, exc_tb):
2698 func(*args, **kwargs)
2699 func(*args, **kwargs)
2699 self._atexit.append(wrapper)
2700 self._atexit.append(wrapper)
2700 return func
2701 return func
2701
2702
2702 def __exit__(self, exc_type, exc_val, exc_tb):
2703 def __exit__(self, exc_type, exc_val, exc_tb):
2703 '''Context managers are exited in the reverse order from which
2704 '''Context managers are exited in the reverse order from which
2704 they were created.'''
2705 they were created.'''
2705 received = exc_type is not None
2706 received = exc_type is not None
2706 suppressed = False
2707 suppressed = False
2707 pending = None
2708 pending = None
2708 self._atexit.reverse()
2709 self._atexit.reverse()
2709 for exitfunc in self._atexit:
2710 for exitfunc in self._atexit:
2710 try:
2711 try:
2711 if exitfunc(exc_type, exc_val, exc_tb):
2712 if exitfunc(exc_type, exc_val, exc_tb):
2712 suppressed = True
2713 suppressed = True
2713 exc_type = None
2714 exc_type = None
2714 exc_val = None
2715 exc_val = None
2715 exc_tb = None
2716 exc_tb = None
2716 except BaseException:
2717 except BaseException:
2717 pending = sys.exc_info()
2718 pending = sys.exc_info()
2718 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2719 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2719 del self._atexit
2720 del self._atexit
2720 if pending:
2721 if pending:
2721 raise exc_val
2722 raise exc_val
2722 return received and suppressed
2723 return received and suppressed
2723
2724
2724 def _bz2():
2725 def _bz2():
2725 d = bz2.BZ2Decompressor()
2726 d = bz2.BZ2Decompressor()
2726 # Bzip2 stream start with BZ, but we stripped it.
2727 # Bzip2 stream start with BZ, but we stripped it.
2727 # we put it back for good measure.
2728 # we put it back for good measure.
2728 d.decompress('BZ')
2729 d.decompress('BZ')
2729 return d
2730 return d
2730
2731
2731 decompressors = {None: lambda fh: fh,
2732 decompressors = {None: lambda fh: fh,
2732 '_truncatedBZ': _makedecompressor(_bz2),
2733 '_truncatedBZ': _makedecompressor(_bz2),
2733 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2734 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2734 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2735 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2735 }
2736 }
2736 # also support the old form by courtesies
2737 # also support the old form by courtesies
2737 decompressors['UN'] = decompressors[None]
2738 decompressors['UN'] = decompressors[None]
2738
2739
2739 # convenient shortcut
2740 # convenient shortcut
2740 dst = debugstacktrace
2741 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now