##// END OF EJS Templates
util: add filestat class to detect ambiguity of file stat...
FUJIWARA Katsunori -
r29200:ca406502 default
parent child Browse files
Show More
@@ -1,2747 +1,2810 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import collections
20 import collections
21 import datetime
21 import datetime
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import imp
25 import imp
26 import os
26 import os
27 import re as remod
27 import re as remod
28 import shutil
28 import shutil
29 import signal
29 import signal
30 import socket
30 import socket
31 import subprocess
31 import subprocess
32 import sys
32 import sys
33 import tempfile
33 import tempfile
34 import textwrap
34 import textwrap
35 import time
35 import time
36 import traceback
36 import traceback
37 import zlib
37 import zlib
38
38
39 from . import (
39 from . import (
40 encoding,
40 encoding,
41 error,
41 error,
42 i18n,
42 i18n,
43 osutil,
43 osutil,
44 parsers,
44 parsers,
45 pycompat,
45 pycompat,
46 )
46 )
47
47
48 for attr in (
48 for attr in (
49 'empty',
49 'empty',
50 'queue',
50 'queue',
51 'urlerr',
51 'urlerr',
52 # we do import urlreq, but we do it outside the loop
52 # we do import urlreq, but we do it outside the loop
53 #'urlreq',
53 #'urlreq',
54 'stringio',
54 'stringio',
55 ):
55 ):
56 globals()[attr] = getattr(pycompat, attr)
56 globals()[attr] = getattr(pycompat, attr)
57
57
58 # This line is to make pyflakes happy:
58 # This line is to make pyflakes happy:
59 urlreq = pycompat.urlreq
59 urlreq = pycompat.urlreq
60
60
61 if os.name == 'nt':
61 if os.name == 'nt':
62 from . import windows as platform
62 from . import windows as platform
63 else:
63 else:
64 from . import posix as platform
64 from . import posix as platform
65
65
66 md5 = hashlib.md5
66 md5 = hashlib.md5
67 sha1 = hashlib.sha1
67 sha1 = hashlib.sha1
68 sha512 = hashlib.sha512
68 sha512 = hashlib.sha512
69 _ = i18n._
69 _ = i18n._
70
70
71 cachestat = platform.cachestat
71 cachestat = platform.cachestat
72 checkexec = platform.checkexec
72 checkexec = platform.checkexec
73 checklink = platform.checklink
73 checklink = platform.checklink
74 copymode = platform.copymode
74 copymode = platform.copymode
75 executablepath = platform.executablepath
75 executablepath = platform.executablepath
76 expandglobs = platform.expandglobs
76 expandglobs = platform.expandglobs
77 explainexit = platform.explainexit
77 explainexit = platform.explainexit
78 findexe = platform.findexe
78 findexe = platform.findexe
79 gethgcmd = platform.gethgcmd
79 gethgcmd = platform.gethgcmd
80 getuser = platform.getuser
80 getuser = platform.getuser
81 getpid = os.getpid
81 getpid = os.getpid
82 groupmembers = platform.groupmembers
82 groupmembers = platform.groupmembers
83 groupname = platform.groupname
83 groupname = platform.groupname
84 hidewindow = platform.hidewindow
84 hidewindow = platform.hidewindow
85 isexec = platform.isexec
85 isexec = platform.isexec
86 isowner = platform.isowner
86 isowner = platform.isowner
87 localpath = platform.localpath
87 localpath = platform.localpath
88 lookupreg = platform.lookupreg
88 lookupreg = platform.lookupreg
89 makedir = platform.makedir
89 makedir = platform.makedir
90 nlinks = platform.nlinks
90 nlinks = platform.nlinks
91 normpath = platform.normpath
91 normpath = platform.normpath
92 normcase = platform.normcase
92 normcase = platform.normcase
93 normcasespec = platform.normcasespec
93 normcasespec = platform.normcasespec
94 normcasefallback = platform.normcasefallback
94 normcasefallback = platform.normcasefallback
95 openhardlinks = platform.openhardlinks
95 openhardlinks = platform.openhardlinks
96 oslink = platform.oslink
96 oslink = platform.oslink
97 parsepatchoutput = platform.parsepatchoutput
97 parsepatchoutput = platform.parsepatchoutput
98 pconvert = platform.pconvert
98 pconvert = platform.pconvert
99 poll = platform.poll
99 poll = platform.poll
100 popen = platform.popen
100 popen = platform.popen
101 posixfile = platform.posixfile
101 posixfile = platform.posixfile
102 quotecommand = platform.quotecommand
102 quotecommand = platform.quotecommand
103 readpipe = platform.readpipe
103 readpipe = platform.readpipe
104 rename = platform.rename
104 rename = platform.rename
105 removedirs = platform.removedirs
105 removedirs = platform.removedirs
106 samedevice = platform.samedevice
106 samedevice = platform.samedevice
107 samefile = platform.samefile
107 samefile = platform.samefile
108 samestat = platform.samestat
108 samestat = platform.samestat
109 setbinary = platform.setbinary
109 setbinary = platform.setbinary
110 setflags = platform.setflags
110 setflags = platform.setflags
111 setsignalhandler = platform.setsignalhandler
111 setsignalhandler = platform.setsignalhandler
112 shellquote = platform.shellquote
112 shellquote = platform.shellquote
113 spawndetached = platform.spawndetached
113 spawndetached = platform.spawndetached
114 split = platform.split
114 split = platform.split
115 sshargs = platform.sshargs
115 sshargs = platform.sshargs
116 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
116 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
117 statisexec = platform.statisexec
117 statisexec = platform.statisexec
118 statislink = platform.statislink
118 statislink = platform.statislink
119 termwidth = platform.termwidth
119 termwidth = platform.termwidth
120 testpid = platform.testpid
120 testpid = platform.testpid
121 umask = platform.umask
121 umask = platform.umask
122 unlink = platform.unlink
122 unlink = platform.unlink
123 unlinkpath = platform.unlinkpath
123 unlinkpath = platform.unlinkpath
124 username = platform.username
124 username = platform.username
125
125
126 # Python compatibility
126 # Python compatibility
127
127
128 _notset = object()
128 _notset = object()
129
129
130 # disable Python's problematic floating point timestamps (issue4836)
130 # disable Python's problematic floating point timestamps (issue4836)
131 # (Python hypocritically says you shouldn't change this behavior in
131 # (Python hypocritically says you shouldn't change this behavior in
132 # libraries, and sure enough Mercurial is not a library.)
132 # libraries, and sure enough Mercurial is not a library.)
133 os.stat_float_times(False)
133 os.stat_float_times(False)
134
134
135 def safehasattr(thing, attr):
135 def safehasattr(thing, attr):
136 return getattr(thing, attr, _notset) is not _notset
136 return getattr(thing, attr, _notset) is not _notset
137
137
138 DIGESTS = {
138 DIGESTS = {
139 'md5': md5,
139 'md5': md5,
140 'sha1': sha1,
140 'sha1': sha1,
141 'sha512': sha512,
141 'sha512': sha512,
142 }
142 }
143 # List of digest types from strongest to weakest
143 # List of digest types from strongest to weakest
144 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
144 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
145
145
146 for k in DIGESTS_BY_STRENGTH:
146 for k in DIGESTS_BY_STRENGTH:
147 assert k in DIGESTS
147 assert k in DIGESTS
148
148
149 class digester(object):
149 class digester(object):
150 """helper to compute digests.
150 """helper to compute digests.
151
151
152 This helper can be used to compute one or more digests given their name.
152 This helper can be used to compute one or more digests given their name.
153
153
154 >>> d = digester(['md5', 'sha1'])
154 >>> d = digester(['md5', 'sha1'])
155 >>> d.update('foo')
155 >>> d.update('foo')
156 >>> [k for k in sorted(d)]
156 >>> [k for k in sorted(d)]
157 ['md5', 'sha1']
157 ['md5', 'sha1']
158 >>> d['md5']
158 >>> d['md5']
159 'acbd18db4cc2f85cedef654fccc4a4d8'
159 'acbd18db4cc2f85cedef654fccc4a4d8'
160 >>> d['sha1']
160 >>> d['sha1']
161 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
161 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
162 >>> digester.preferred(['md5', 'sha1'])
162 >>> digester.preferred(['md5', 'sha1'])
163 'sha1'
163 'sha1'
164 """
164 """
165
165
166 def __init__(self, digests, s=''):
166 def __init__(self, digests, s=''):
167 self._hashes = {}
167 self._hashes = {}
168 for k in digests:
168 for k in digests:
169 if k not in DIGESTS:
169 if k not in DIGESTS:
170 raise Abort(_('unknown digest type: %s') % k)
170 raise Abort(_('unknown digest type: %s') % k)
171 self._hashes[k] = DIGESTS[k]()
171 self._hashes[k] = DIGESTS[k]()
172 if s:
172 if s:
173 self.update(s)
173 self.update(s)
174
174
175 def update(self, data):
175 def update(self, data):
176 for h in self._hashes.values():
176 for h in self._hashes.values():
177 h.update(data)
177 h.update(data)
178
178
179 def __getitem__(self, key):
179 def __getitem__(self, key):
180 if key not in DIGESTS:
180 if key not in DIGESTS:
181 raise Abort(_('unknown digest type: %s') % k)
181 raise Abort(_('unknown digest type: %s') % k)
182 return self._hashes[key].hexdigest()
182 return self._hashes[key].hexdigest()
183
183
184 def __iter__(self):
184 def __iter__(self):
185 return iter(self._hashes)
185 return iter(self._hashes)
186
186
187 @staticmethod
187 @staticmethod
188 def preferred(supported):
188 def preferred(supported):
189 """returns the strongest digest type in both supported and DIGESTS."""
189 """returns the strongest digest type in both supported and DIGESTS."""
190
190
191 for k in DIGESTS_BY_STRENGTH:
191 for k in DIGESTS_BY_STRENGTH:
192 if k in supported:
192 if k in supported:
193 return k
193 return k
194 return None
194 return None
195
195
196 class digestchecker(object):
196 class digestchecker(object):
197 """file handle wrapper that additionally checks content against a given
197 """file handle wrapper that additionally checks content against a given
198 size and digests.
198 size and digests.
199
199
200 d = digestchecker(fh, size, {'md5': '...'})
200 d = digestchecker(fh, size, {'md5': '...'})
201
201
202 When multiple digests are given, all of them are validated.
202 When multiple digests are given, all of them are validated.
203 """
203 """
204
204
205 def __init__(self, fh, size, digests):
205 def __init__(self, fh, size, digests):
206 self._fh = fh
206 self._fh = fh
207 self._size = size
207 self._size = size
208 self._got = 0
208 self._got = 0
209 self._digests = dict(digests)
209 self._digests = dict(digests)
210 self._digester = digester(self._digests.keys())
210 self._digester = digester(self._digests.keys())
211
211
212 def read(self, length=-1):
212 def read(self, length=-1):
213 content = self._fh.read(length)
213 content = self._fh.read(length)
214 self._digester.update(content)
214 self._digester.update(content)
215 self._got += len(content)
215 self._got += len(content)
216 return content
216 return content
217
217
218 def validate(self):
218 def validate(self):
219 if self._size != self._got:
219 if self._size != self._got:
220 raise Abort(_('size mismatch: expected %d, got %d') %
220 raise Abort(_('size mismatch: expected %d, got %d') %
221 (self._size, self._got))
221 (self._size, self._got))
222 for k, v in self._digests.items():
222 for k, v in self._digests.items():
223 if v != self._digester[k]:
223 if v != self._digester[k]:
224 # i18n: first parameter is a digest name
224 # i18n: first parameter is a digest name
225 raise Abort(_('%s mismatch: expected %s, got %s') %
225 raise Abort(_('%s mismatch: expected %s, got %s') %
226 (k, v, self._digester[k]))
226 (k, v, self._digester[k]))
227
227
228 try:
228 try:
229 buffer = buffer
229 buffer = buffer
230 except NameError:
230 except NameError:
231 if sys.version_info[0] < 3:
231 if sys.version_info[0] < 3:
232 def buffer(sliceable, offset=0):
232 def buffer(sliceable, offset=0):
233 return sliceable[offset:]
233 return sliceable[offset:]
234 else:
234 else:
235 def buffer(sliceable, offset=0):
235 def buffer(sliceable, offset=0):
236 return memoryview(sliceable)[offset:]
236 return memoryview(sliceable)[offset:]
237
237
238 closefds = os.name == 'posix'
238 closefds = os.name == 'posix'
239
239
240 _chunksize = 4096
240 _chunksize = 4096
241
241
242 class bufferedinputpipe(object):
242 class bufferedinputpipe(object):
243 """a manually buffered input pipe
243 """a manually buffered input pipe
244
244
245 Python will not let us use buffered IO and lazy reading with 'polling' at
245 Python will not let us use buffered IO and lazy reading with 'polling' at
246 the same time. We cannot probe the buffer state and select will not detect
246 the same time. We cannot probe the buffer state and select will not detect
247 that data are ready to read if they are already buffered.
247 that data are ready to read if they are already buffered.
248
248
249 This class let us work around that by implementing its own buffering
249 This class let us work around that by implementing its own buffering
250 (allowing efficient readline) while offering a way to know if the buffer is
250 (allowing efficient readline) while offering a way to know if the buffer is
251 empty from the output (allowing collaboration of the buffer with polling).
251 empty from the output (allowing collaboration of the buffer with polling).
252
252
253 This class lives in the 'util' module because it makes use of the 'os'
253 This class lives in the 'util' module because it makes use of the 'os'
254 module from the python stdlib.
254 module from the python stdlib.
255 """
255 """
256
256
257 def __init__(self, input):
257 def __init__(self, input):
258 self._input = input
258 self._input = input
259 self._buffer = []
259 self._buffer = []
260 self._eof = False
260 self._eof = False
261 self._lenbuf = 0
261 self._lenbuf = 0
262
262
263 @property
263 @property
264 def hasbuffer(self):
264 def hasbuffer(self):
265 """True is any data is currently buffered
265 """True is any data is currently buffered
266
266
267 This will be used externally a pre-step for polling IO. If there is
267 This will be used externally a pre-step for polling IO. If there is
268 already data then no polling should be set in place."""
268 already data then no polling should be set in place."""
269 return bool(self._buffer)
269 return bool(self._buffer)
270
270
271 @property
271 @property
272 def closed(self):
272 def closed(self):
273 return self._input.closed
273 return self._input.closed
274
274
275 def fileno(self):
275 def fileno(self):
276 return self._input.fileno()
276 return self._input.fileno()
277
277
278 def close(self):
278 def close(self):
279 return self._input.close()
279 return self._input.close()
280
280
281 def read(self, size):
281 def read(self, size):
282 while (not self._eof) and (self._lenbuf < size):
282 while (not self._eof) and (self._lenbuf < size):
283 self._fillbuffer()
283 self._fillbuffer()
284 return self._frombuffer(size)
284 return self._frombuffer(size)
285
285
286 def readline(self, *args, **kwargs):
286 def readline(self, *args, **kwargs):
287 if 1 < len(self._buffer):
287 if 1 < len(self._buffer):
288 # this should not happen because both read and readline end with a
288 # this should not happen because both read and readline end with a
289 # _frombuffer call that collapse it.
289 # _frombuffer call that collapse it.
290 self._buffer = [''.join(self._buffer)]
290 self._buffer = [''.join(self._buffer)]
291 self._lenbuf = len(self._buffer[0])
291 self._lenbuf = len(self._buffer[0])
292 lfi = -1
292 lfi = -1
293 if self._buffer:
293 if self._buffer:
294 lfi = self._buffer[-1].find('\n')
294 lfi = self._buffer[-1].find('\n')
295 while (not self._eof) and lfi < 0:
295 while (not self._eof) and lfi < 0:
296 self._fillbuffer()
296 self._fillbuffer()
297 if self._buffer:
297 if self._buffer:
298 lfi = self._buffer[-1].find('\n')
298 lfi = self._buffer[-1].find('\n')
299 size = lfi + 1
299 size = lfi + 1
300 if lfi < 0: # end of file
300 if lfi < 0: # end of file
301 size = self._lenbuf
301 size = self._lenbuf
302 elif 1 < len(self._buffer):
302 elif 1 < len(self._buffer):
303 # we need to take previous chunks into account
303 # we need to take previous chunks into account
304 size += self._lenbuf - len(self._buffer[-1])
304 size += self._lenbuf - len(self._buffer[-1])
305 return self._frombuffer(size)
305 return self._frombuffer(size)
306
306
307 def _frombuffer(self, size):
307 def _frombuffer(self, size):
308 """return at most 'size' data from the buffer
308 """return at most 'size' data from the buffer
309
309
310 The data are removed from the buffer."""
310 The data are removed from the buffer."""
311 if size == 0 or not self._buffer:
311 if size == 0 or not self._buffer:
312 return ''
312 return ''
313 buf = self._buffer[0]
313 buf = self._buffer[0]
314 if 1 < len(self._buffer):
314 if 1 < len(self._buffer):
315 buf = ''.join(self._buffer)
315 buf = ''.join(self._buffer)
316
316
317 data = buf[:size]
317 data = buf[:size]
318 buf = buf[len(data):]
318 buf = buf[len(data):]
319 if buf:
319 if buf:
320 self._buffer = [buf]
320 self._buffer = [buf]
321 self._lenbuf = len(buf)
321 self._lenbuf = len(buf)
322 else:
322 else:
323 self._buffer = []
323 self._buffer = []
324 self._lenbuf = 0
324 self._lenbuf = 0
325 return data
325 return data
326
326
327 def _fillbuffer(self):
327 def _fillbuffer(self):
328 """read data to the buffer"""
328 """read data to the buffer"""
329 data = os.read(self._input.fileno(), _chunksize)
329 data = os.read(self._input.fileno(), _chunksize)
330 if not data:
330 if not data:
331 self._eof = True
331 self._eof = True
332 else:
332 else:
333 self._lenbuf += len(data)
333 self._lenbuf += len(data)
334 self._buffer.append(data)
334 self._buffer.append(data)
335
335
336 def popen2(cmd, env=None, newlines=False):
336 def popen2(cmd, env=None, newlines=False):
337 # Setting bufsize to -1 lets the system decide the buffer size.
337 # Setting bufsize to -1 lets the system decide the buffer size.
338 # The default for bufsize is 0, meaning unbuffered. This leads to
338 # The default for bufsize is 0, meaning unbuffered. This leads to
339 # poor performance on Mac OS X: http://bugs.python.org/issue4194
339 # poor performance on Mac OS X: http://bugs.python.org/issue4194
340 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
340 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
341 close_fds=closefds,
341 close_fds=closefds,
342 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
342 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
343 universal_newlines=newlines,
343 universal_newlines=newlines,
344 env=env)
344 env=env)
345 return p.stdin, p.stdout
345 return p.stdin, p.stdout
346
346
347 def popen3(cmd, env=None, newlines=False):
347 def popen3(cmd, env=None, newlines=False):
348 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
348 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
349 return stdin, stdout, stderr
349 return stdin, stdout, stderr
350
350
351 def popen4(cmd, env=None, newlines=False, bufsize=-1):
351 def popen4(cmd, env=None, newlines=False, bufsize=-1):
352 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
352 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
353 close_fds=closefds,
353 close_fds=closefds,
354 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
354 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
355 stderr=subprocess.PIPE,
355 stderr=subprocess.PIPE,
356 universal_newlines=newlines,
356 universal_newlines=newlines,
357 env=env)
357 env=env)
358 return p.stdin, p.stdout, p.stderr, p
358 return p.stdin, p.stdout, p.stderr, p
359
359
360 def version():
360 def version():
361 """Return version information if available."""
361 """Return version information if available."""
362 try:
362 try:
363 from . import __version__
363 from . import __version__
364 return __version__.version
364 return __version__.version
365 except ImportError:
365 except ImportError:
366 return 'unknown'
366 return 'unknown'
367
367
368 def versiontuple(v=None, n=4):
368 def versiontuple(v=None, n=4):
369 """Parses a Mercurial version string into an N-tuple.
369 """Parses a Mercurial version string into an N-tuple.
370
370
371 The version string to be parsed is specified with the ``v`` argument.
371 The version string to be parsed is specified with the ``v`` argument.
372 If it isn't defined, the current Mercurial version string will be parsed.
372 If it isn't defined, the current Mercurial version string will be parsed.
373
373
374 ``n`` can be 2, 3, or 4. Here is how some version strings map to
374 ``n`` can be 2, 3, or 4. Here is how some version strings map to
375 returned values:
375 returned values:
376
376
377 >>> v = '3.6.1+190-df9b73d2d444'
377 >>> v = '3.6.1+190-df9b73d2d444'
378 >>> versiontuple(v, 2)
378 >>> versiontuple(v, 2)
379 (3, 6)
379 (3, 6)
380 >>> versiontuple(v, 3)
380 >>> versiontuple(v, 3)
381 (3, 6, 1)
381 (3, 6, 1)
382 >>> versiontuple(v, 4)
382 >>> versiontuple(v, 4)
383 (3, 6, 1, '190-df9b73d2d444')
383 (3, 6, 1, '190-df9b73d2d444')
384
384
385 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
385 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
386 (3, 6, 1, '190-df9b73d2d444+20151118')
386 (3, 6, 1, '190-df9b73d2d444+20151118')
387
387
388 >>> v = '3.6'
388 >>> v = '3.6'
389 >>> versiontuple(v, 2)
389 >>> versiontuple(v, 2)
390 (3, 6)
390 (3, 6)
391 >>> versiontuple(v, 3)
391 >>> versiontuple(v, 3)
392 (3, 6, None)
392 (3, 6, None)
393 >>> versiontuple(v, 4)
393 >>> versiontuple(v, 4)
394 (3, 6, None, None)
394 (3, 6, None, None)
395 """
395 """
396 if not v:
396 if not v:
397 v = version()
397 v = version()
398 parts = v.split('+', 1)
398 parts = v.split('+', 1)
399 if len(parts) == 1:
399 if len(parts) == 1:
400 vparts, extra = parts[0], None
400 vparts, extra = parts[0], None
401 else:
401 else:
402 vparts, extra = parts
402 vparts, extra = parts
403
403
404 vints = []
404 vints = []
405 for i in vparts.split('.'):
405 for i in vparts.split('.'):
406 try:
406 try:
407 vints.append(int(i))
407 vints.append(int(i))
408 except ValueError:
408 except ValueError:
409 break
409 break
410 # (3, 6) -> (3, 6, None)
410 # (3, 6) -> (3, 6, None)
411 while len(vints) < 3:
411 while len(vints) < 3:
412 vints.append(None)
412 vints.append(None)
413
413
414 if n == 2:
414 if n == 2:
415 return (vints[0], vints[1])
415 return (vints[0], vints[1])
416 if n == 3:
416 if n == 3:
417 return (vints[0], vints[1], vints[2])
417 return (vints[0], vints[1], vints[2])
418 if n == 4:
418 if n == 4:
419 return (vints[0], vints[1], vints[2], extra)
419 return (vints[0], vints[1], vints[2], extra)
420
420
421 # used by parsedate
421 # used by parsedate
422 defaultdateformats = (
422 defaultdateformats = (
423 '%Y-%m-%d %H:%M:%S',
423 '%Y-%m-%d %H:%M:%S',
424 '%Y-%m-%d %I:%M:%S%p',
424 '%Y-%m-%d %I:%M:%S%p',
425 '%Y-%m-%d %H:%M',
425 '%Y-%m-%d %H:%M',
426 '%Y-%m-%d %I:%M%p',
426 '%Y-%m-%d %I:%M%p',
427 '%Y-%m-%d',
427 '%Y-%m-%d',
428 '%m-%d',
428 '%m-%d',
429 '%m/%d',
429 '%m/%d',
430 '%m/%d/%y',
430 '%m/%d/%y',
431 '%m/%d/%Y',
431 '%m/%d/%Y',
432 '%a %b %d %H:%M:%S %Y',
432 '%a %b %d %H:%M:%S %Y',
433 '%a %b %d %I:%M:%S%p %Y',
433 '%a %b %d %I:%M:%S%p %Y',
434 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
434 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
435 '%b %d %H:%M:%S %Y',
435 '%b %d %H:%M:%S %Y',
436 '%b %d %I:%M:%S%p %Y',
436 '%b %d %I:%M:%S%p %Y',
437 '%b %d %H:%M:%S',
437 '%b %d %H:%M:%S',
438 '%b %d %I:%M:%S%p',
438 '%b %d %I:%M:%S%p',
439 '%b %d %H:%M',
439 '%b %d %H:%M',
440 '%b %d %I:%M%p',
440 '%b %d %I:%M%p',
441 '%b %d %Y',
441 '%b %d %Y',
442 '%b %d',
442 '%b %d',
443 '%H:%M:%S',
443 '%H:%M:%S',
444 '%I:%M:%S%p',
444 '%I:%M:%S%p',
445 '%H:%M',
445 '%H:%M',
446 '%I:%M%p',
446 '%I:%M%p',
447 )
447 )
448
448
449 extendeddateformats = defaultdateformats + (
449 extendeddateformats = defaultdateformats + (
450 "%Y",
450 "%Y",
451 "%Y-%m",
451 "%Y-%m",
452 "%b",
452 "%b",
453 "%b %Y",
453 "%b %Y",
454 )
454 )
455
455
456 def cachefunc(func):
456 def cachefunc(func):
457 '''cache the result of function calls'''
457 '''cache the result of function calls'''
458 # XXX doesn't handle keywords args
458 # XXX doesn't handle keywords args
459 if func.__code__.co_argcount == 0:
459 if func.__code__.co_argcount == 0:
460 cache = []
460 cache = []
461 def f():
461 def f():
462 if len(cache) == 0:
462 if len(cache) == 0:
463 cache.append(func())
463 cache.append(func())
464 return cache[0]
464 return cache[0]
465 return f
465 return f
466 cache = {}
466 cache = {}
467 if func.__code__.co_argcount == 1:
467 if func.__code__.co_argcount == 1:
468 # we gain a small amount of time because
468 # we gain a small amount of time because
469 # we don't need to pack/unpack the list
469 # we don't need to pack/unpack the list
470 def f(arg):
470 def f(arg):
471 if arg not in cache:
471 if arg not in cache:
472 cache[arg] = func(arg)
472 cache[arg] = func(arg)
473 return cache[arg]
473 return cache[arg]
474 else:
474 else:
475 def f(*args):
475 def f(*args):
476 if args not in cache:
476 if args not in cache:
477 cache[args] = func(*args)
477 cache[args] = func(*args)
478 return cache[args]
478 return cache[args]
479
479
480 return f
480 return f
481
481
482 class sortdict(dict):
482 class sortdict(dict):
483 '''a simple sorted dictionary'''
483 '''a simple sorted dictionary'''
484 def __init__(self, data=None):
484 def __init__(self, data=None):
485 self._list = []
485 self._list = []
486 if data:
486 if data:
487 self.update(data)
487 self.update(data)
488 def copy(self):
488 def copy(self):
489 return sortdict(self)
489 return sortdict(self)
490 def __setitem__(self, key, val):
490 def __setitem__(self, key, val):
491 if key in self:
491 if key in self:
492 self._list.remove(key)
492 self._list.remove(key)
493 self._list.append(key)
493 self._list.append(key)
494 dict.__setitem__(self, key, val)
494 dict.__setitem__(self, key, val)
495 def __iter__(self):
495 def __iter__(self):
496 return self._list.__iter__()
496 return self._list.__iter__()
497 def update(self, src):
497 def update(self, src):
498 if isinstance(src, dict):
498 if isinstance(src, dict):
499 src = src.iteritems()
499 src = src.iteritems()
500 for k, v in src:
500 for k, v in src:
501 self[k] = v
501 self[k] = v
502 def clear(self):
502 def clear(self):
503 dict.clear(self)
503 dict.clear(self)
504 self._list = []
504 self._list = []
505 def items(self):
505 def items(self):
506 return [(k, self[k]) for k in self._list]
506 return [(k, self[k]) for k in self._list]
507 def __delitem__(self, key):
507 def __delitem__(self, key):
508 dict.__delitem__(self, key)
508 dict.__delitem__(self, key)
509 self._list.remove(key)
509 self._list.remove(key)
510 def pop(self, key, *args, **kwargs):
510 def pop(self, key, *args, **kwargs):
511 dict.pop(self, key, *args, **kwargs)
511 dict.pop(self, key, *args, **kwargs)
512 try:
512 try:
513 self._list.remove(key)
513 self._list.remove(key)
514 except ValueError:
514 except ValueError:
515 pass
515 pass
516 def keys(self):
516 def keys(self):
517 return self._list
517 return self._list
518 def iterkeys(self):
518 def iterkeys(self):
519 return self._list.__iter__()
519 return self._list.__iter__()
520 def iteritems(self):
520 def iteritems(self):
521 for k in self._list:
521 for k in self._list:
522 yield k, self[k]
522 yield k, self[k]
523 def insert(self, index, key, val):
523 def insert(self, index, key, val):
524 self._list.insert(index, key)
524 self._list.insert(index, key)
525 dict.__setitem__(self, key, val)
525 dict.__setitem__(self, key, val)
526
526
527 class _lrucachenode(object):
527 class _lrucachenode(object):
528 """A node in a doubly linked list.
528 """A node in a doubly linked list.
529
529
530 Holds a reference to nodes on either side as well as a key-value
530 Holds a reference to nodes on either side as well as a key-value
531 pair for the dictionary entry.
531 pair for the dictionary entry.
532 """
532 """
533 __slots__ = ('next', 'prev', 'key', 'value')
533 __slots__ = ('next', 'prev', 'key', 'value')
534
534
535 def __init__(self):
535 def __init__(self):
536 self.next = None
536 self.next = None
537 self.prev = None
537 self.prev = None
538
538
539 self.key = _notset
539 self.key = _notset
540 self.value = None
540 self.value = None
541
541
542 def markempty(self):
542 def markempty(self):
543 """Mark the node as emptied."""
543 """Mark the node as emptied."""
544 self.key = _notset
544 self.key = _notset
545
545
546 class lrucachedict(object):
546 class lrucachedict(object):
547 """Dict that caches most recent accesses and sets.
547 """Dict that caches most recent accesses and sets.
548
548
549 The dict consists of an actual backing dict - indexed by original
549 The dict consists of an actual backing dict - indexed by original
550 key - and a doubly linked circular list defining the order of entries in
550 key - and a doubly linked circular list defining the order of entries in
551 the cache.
551 the cache.
552
552
553 The head node is the newest entry in the cache. If the cache is full,
553 The head node is the newest entry in the cache. If the cache is full,
554 we recycle head.prev and make it the new head. Cache accesses result in
554 we recycle head.prev and make it the new head. Cache accesses result in
555 the node being moved to before the existing head and being marked as the
555 the node being moved to before the existing head and being marked as the
556 new head node.
556 new head node.
557 """
557 """
558 def __init__(self, max):
558 def __init__(self, max):
559 self._cache = {}
559 self._cache = {}
560
560
561 self._head = head = _lrucachenode()
561 self._head = head = _lrucachenode()
562 head.prev = head
562 head.prev = head
563 head.next = head
563 head.next = head
564 self._size = 1
564 self._size = 1
565 self._capacity = max
565 self._capacity = max
566
566
567 def __len__(self):
567 def __len__(self):
568 return len(self._cache)
568 return len(self._cache)
569
569
570 def __contains__(self, k):
570 def __contains__(self, k):
571 return k in self._cache
571 return k in self._cache
572
572
573 def __iter__(self):
573 def __iter__(self):
574 # We don't have to iterate in cache order, but why not.
574 # We don't have to iterate in cache order, but why not.
575 n = self._head
575 n = self._head
576 for i in range(len(self._cache)):
576 for i in range(len(self._cache)):
577 yield n.key
577 yield n.key
578 n = n.next
578 n = n.next
579
579
580 def __getitem__(self, k):
580 def __getitem__(self, k):
581 node = self._cache[k]
581 node = self._cache[k]
582 self._movetohead(node)
582 self._movetohead(node)
583 return node.value
583 return node.value
584
584
585 def __setitem__(self, k, v):
585 def __setitem__(self, k, v):
586 node = self._cache.get(k)
586 node = self._cache.get(k)
587 # Replace existing value and mark as newest.
587 # Replace existing value and mark as newest.
588 if node is not None:
588 if node is not None:
589 node.value = v
589 node.value = v
590 self._movetohead(node)
590 self._movetohead(node)
591 return
591 return
592
592
593 if self._size < self._capacity:
593 if self._size < self._capacity:
594 node = self._addcapacity()
594 node = self._addcapacity()
595 else:
595 else:
596 # Grab the last/oldest item.
596 # Grab the last/oldest item.
597 node = self._head.prev
597 node = self._head.prev
598
598
599 # At capacity. Kill the old entry.
599 # At capacity. Kill the old entry.
600 if node.key is not _notset:
600 if node.key is not _notset:
601 del self._cache[node.key]
601 del self._cache[node.key]
602
602
603 node.key = k
603 node.key = k
604 node.value = v
604 node.value = v
605 self._cache[k] = node
605 self._cache[k] = node
606 # And mark it as newest entry. No need to adjust order since it
606 # And mark it as newest entry. No need to adjust order since it
607 # is already self._head.prev.
607 # is already self._head.prev.
608 self._head = node
608 self._head = node
609
609
610 def __delitem__(self, k):
610 def __delitem__(self, k):
611 node = self._cache.pop(k)
611 node = self._cache.pop(k)
612 node.markempty()
612 node.markempty()
613
613
614 # Temporarily mark as newest item before re-adjusting head to make
614 # Temporarily mark as newest item before re-adjusting head to make
615 # this node the oldest item.
615 # this node the oldest item.
616 self._movetohead(node)
616 self._movetohead(node)
617 self._head = node.next
617 self._head = node.next
618
618
619 # Additional dict methods.
619 # Additional dict methods.
620
620
621 def get(self, k, default=None):
621 def get(self, k, default=None):
622 try:
622 try:
623 return self._cache[k]
623 return self._cache[k]
624 except KeyError:
624 except KeyError:
625 return default
625 return default
626
626
627 def clear(self):
627 def clear(self):
628 n = self._head
628 n = self._head
629 while n.key is not _notset:
629 while n.key is not _notset:
630 n.markempty()
630 n.markempty()
631 n = n.next
631 n = n.next
632
632
633 self._cache.clear()
633 self._cache.clear()
634
634
635 def copy(self):
635 def copy(self):
636 result = lrucachedict(self._capacity)
636 result = lrucachedict(self._capacity)
637 n = self._head.prev
637 n = self._head.prev
638 # Iterate in oldest-to-newest order, so the copy has the right ordering
638 # Iterate in oldest-to-newest order, so the copy has the right ordering
639 for i in range(len(self._cache)):
639 for i in range(len(self._cache)):
640 result[n.key] = n.value
640 result[n.key] = n.value
641 n = n.prev
641 n = n.prev
642 return result
642 return result
643
643
644 def _movetohead(self, node):
644 def _movetohead(self, node):
645 """Mark a node as the newest, making it the new head.
645 """Mark a node as the newest, making it the new head.
646
646
647 When a node is accessed, it becomes the freshest entry in the LRU
647 When a node is accessed, it becomes the freshest entry in the LRU
648 list, which is denoted by self._head.
648 list, which is denoted by self._head.
649
649
650 Visually, let's make ``N`` the new head node (* denotes head):
650 Visually, let's make ``N`` the new head node (* denotes head):
651
651
652 previous/oldest <-> head <-> next/next newest
652 previous/oldest <-> head <-> next/next newest
653
653
654 ----<->--- A* ---<->-----
654 ----<->--- A* ---<->-----
655 | |
655 | |
656 E <-> D <-> N <-> C <-> B
656 E <-> D <-> N <-> C <-> B
657
657
658 To:
658 To:
659
659
660 ----<->--- N* ---<->-----
660 ----<->--- N* ---<->-----
661 | |
661 | |
662 E <-> D <-> C <-> B <-> A
662 E <-> D <-> C <-> B <-> A
663
663
664 This requires the following moves:
664 This requires the following moves:
665
665
666 C.next = D (node.prev.next = node.next)
666 C.next = D (node.prev.next = node.next)
667 D.prev = C (node.next.prev = node.prev)
667 D.prev = C (node.next.prev = node.prev)
668 E.next = N (head.prev.next = node)
668 E.next = N (head.prev.next = node)
669 N.prev = E (node.prev = head.prev)
669 N.prev = E (node.prev = head.prev)
670 N.next = A (node.next = head)
670 N.next = A (node.next = head)
671 A.prev = N (head.prev = node)
671 A.prev = N (head.prev = node)
672 """
672 """
673 head = self._head
673 head = self._head
674 # C.next = D
674 # C.next = D
675 node.prev.next = node.next
675 node.prev.next = node.next
676 # D.prev = C
676 # D.prev = C
677 node.next.prev = node.prev
677 node.next.prev = node.prev
678 # N.prev = E
678 # N.prev = E
679 node.prev = head.prev
679 node.prev = head.prev
680 # N.next = A
680 # N.next = A
681 # It is tempting to do just "head" here, however if node is
681 # It is tempting to do just "head" here, however if node is
682 # adjacent to head, this will do bad things.
682 # adjacent to head, this will do bad things.
683 node.next = head.prev.next
683 node.next = head.prev.next
684 # E.next = N
684 # E.next = N
685 node.next.prev = node
685 node.next.prev = node
686 # A.prev = N
686 # A.prev = N
687 node.prev.next = node
687 node.prev.next = node
688
688
689 self._head = node
689 self._head = node
690
690
691 def _addcapacity(self):
691 def _addcapacity(self):
692 """Add a node to the circular linked list.
692 """Add a node to the circular linked list.
693
693
694 The new node is inserted before the head node.
694 The new node is inserted before the head node.
695 """
695 """
696 head = self._head
696 head = self._head
697 node = _lrucachenode()
697 node = _lrucachenode()
698 head.prev.next = node
698 head.prev.next = node
699 node.prev = head.prev
699 node.prev = head.prev
700 node.next = head
700 node.next = head
701 head.prev = node
701 head.prev = node
702 self._size += 1
702 self._size += 1
703 return node
703 return node
704
704
705 def lrucachefunc(func):
705 def lrucachefunc(func):
706 '''cache most recent results of function calls'''
706 '''cache most recent results of function calls'''
707 cache = {}
707 cache = {}
708 order = collections.deque()
708 order = collections.deque()
709 if func.__code__.co_argcount == 1:
709 if func.__code__.co_argcount == 1:
710 def f(arg):
710 def f(arg):
711 if arg not in cache:
711 if arg not in cache:
712 if len(cache) > 20:
712 if len(cache) > 20:
713 del cache[order.popleft()]
713 del cache[order.popleft()]
714 cache[arg] = func(arg)
714 cache[arg] = func(arg)
715 else:
715 else:
716 order.remove(arg)
716 order.remove(arg)
717 order.append(arg)
717 order.append(arg)
718 return cache[arg]
718 return cache[arg]
719 else:
719 else:
720 def f(*args):
720 def f(*args):
721 if args not in cache:
721 if args not in cache:
722 if len(cache) > 20:
722 if len(cache) > 20:
723 del cache[order.popleft()]
723 del cache[order.popleft()]
724 cache[args] = func(*args)
724 cache[args] = func(*args)
725 else:
725 else:
726 order.remove(args)
726 order.remove(args)
727 order.append(args)
727 order.append(args)
728 return cache[args]
728 return cache[args]
729
729
730 return f
730 return f
731
731
732 class propertycache(object):
732 class propertycache(object):
733 def __init__(self, func):
733 def __init__(self, func):
734 self.func = func
734 self.func = func
735 self.name = func.__name__
735 self.name = func.__name__
736 def __get__(self, obj, type=None):
736 def __get__(self, obj, type=None):
737 result = self.func(obj)
737 result = self.func(obj)
738 self.cachevalue(obj, result)
738 self.cachevalue(obj, result)
739 return result
739 return result
740
740
741 def cachevalue(self, obj, value):
741 def cachevalue(self, obj, value):
742 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
742 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
743 obj.__dict__[self.name] = value
743 obj.__dict__[self.name] = value
744
744
745 def pipefilter(s, cmd):
745 def pipefilter(s, cmd):
746 '''filter string S through command CMD, returning its output'''
746 '''filter string S through command CMD, returning its output'''
747 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
747 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
748 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
748 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
749 pout, perr = p.communicate(s)
749 pout, perr = p.communicate(s)
750 return pout
750 return pout
751
751
752 def tempfilter(s, cmd):
752 def tempfilter(s, cmd):
753 '''filter string S through a pair of temporary files with CMD.
753 '''filter string S through a pair of temporary files with CMD.
754 CMD is used as a template to create the real command to be run,
754 CMD is used as a template to create the real command to be run,
755 with the strings INFILE and OUTFILE replaced by the real names of
755 with the strings INFILE and OUTFILE replaced by the real names of
756 the temporary files generated.'''
756 the temporary files generated.'''
757 inname, outname = None, None
757 inname, outname = None, None
758 try:
758 try:
759 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
759 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
760 fp = os.fdopen(infd, 'wb')
760 fp = os.fdopen(infd, 'wb')
761 fp.write(s)
761 fp.write(s)
762 fp.close()
762 fp.close()
763 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
763 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
764 os.close(outfd)
764 os.close(outfd)
765 cmd = cmd.replace('INFILE', inname)
765 cmd = cmd.replace('INFILE', inname)
766 cmd = cmd.replace('OUTFILE', outname)
766 cmd = cmd.replace('OUTFILE', outname)
767 code = os.system(cmd)
767 code = os.system(cmd)
768 if sys.platform == 'OpenVMS' and code & 1:
768 if sys.platform == 'OpenVMS' and code & 1:
769 code = 0
769 code = 0
770 if code:
770 if code:
771 raise Abort(_("command '%s' failed: %s") %
771 raise Abort(_("command '%s' failed: %s") %
772 (cmd, explainexit(code)))
772 (cmd, explainexit(code)))
773 return readfile(outname)
773 return readfile(outname)
774 finally:
774 finally:
775 try:
775 try:
776 if inname:
776 if inname:
777 os.unlink(inname)
777 os.unlink(inname)
778 except OSError:
778 except OSError:
779 pass
779 pass
780 try:
780 try:
781 if outname:
781 if outname:
782 os.unlink(outname)
782 os.unlink(outname)
783 except OSError:
783 except OSError:
784 pass
784 pass
785
785
786 filtertable = {
786 filtertable = {
787 'tempfile:': tempfilter,
787 'tempfile:': tempfilter,
788 'pipe:': pipefilter,
788 'pipe:': pipefilter,
789 }
789 }
790
790
791 def filter(s, cmd):
791 def filter(s, cmd):
792 "filter a string through a command that transforms its input to its output"
792 "filter a string through a command that transforms its input to its output"
793 for name, fn in filtertable.iteritems():
793 for name, fn in filtertable.iteritems():
794 if cmd.startswith(name):
794 if cmd.startswith(name):
795 return fn(s, cmd[len(name):].lstrip())
795 return fn(s, cmd[len(name):].lstrip())
796 return pipefilter(s, cmd)
796 return pipefilter(s, cmd)
797
797
798 def binary(s):
798 def binary(s):
799 """return true if a string is binary data"""
799 """return true if a string is binary data"""
800 return bool(s and '\0' in s)
800 return bool(s and '\0' in s)
801
801
802 def increasingchunks(source, min=1024, max=65536):
802 def increasingchunks(source, min=1024, max=65536):
803 '''return no less than min bytes per chunk while data remains,
803 '''return no less than min bytes per chunk while data remains,
804 doubling min after each chunk until it reaches max'''
804 doubling min after each chunk until it reaches max'''
805 def log2(x):
805 def log2(x):
806 if not x:
806 if not x:
807 return 0
807 return 0
808 i = 0
808 i = 0
809 while x:
809 while x:
810 x >>= 1
810 x >>= 1
811 i += 1
811 i += 1
812 return i - 1
812 return i - 1
813
813
814 buf = []
814 buf = []
815 blen = 0
815 blen = 0
816 for chunk in source:
816 for chunk in source:
817 buf.append(chunk)
817 buf.append(chunk)
818 blen += len(chunk)
818 blen += len(chunk)
819 if blen >= min:
819 if blen >= min:
820 if min < max:
820 if min < max:
821 min = min << 1
821 min = min << 1
822 nmin = 1 << log2(blen)
822 nmin = 1 << log2(blen)
823 if nmin > min:
823 if nmin > min:
824 min = nmin
824 min = nmin
825 if min > max:
825 if min > max:
826 min = max
826 min = max
827 yield ''.join(buf)
827 yield ''.join(buf)
828 blen = 0
828 blen = 0
829 buf = []
829 buf = []
830 if buf:
830 if buf:
831 yield ''.join(buf)
831 yield ''.join(buf)
832
832
833 Abort = error.Abort
833 Abort = error.Abort
834
834
835 def always(fn):
835 def always(fn):
836 return True
836 return True
837
837
838 def never(fn):
838 def never(fn):
839 return False
839 return False
840
840
841 def nogc(func):
841 def nogc(func):
842 """disable garbage collector
842 """disable garbage collector
843
843
844 Python's garbage collector triggers a GC each time a certain number of
844 Python's garbage collector triggers a GC each time a certain number of
845 container objects (the number being defined by gc.get_threshold()) are
845 container objects (the number being defined by gc.get_threshold()) are
846 allocated even when marked not to be tracked by the collector. Tracking has
846 allocated even when marked not to be tracked by the collector. Tracking has
847 no effect on when GCs are triggered, only on what objects the GC looks
847 no effect on when GCs are triggered, only on what objects the GC looks
848 into. As a workaround, disable GC while building complex (huge)
848 into. As a workaround, disable GC while building complex (huge)
849 containers.
849 containers.
850
850
851 This garbage collector issue have been fixed in 2.7.
851 This garbage collector issue have been fixed in 2.7.
852 """
852 """
853 def wrapper(*args, **kwargs):
853 def wrapper(*args, **kwargs):
854 gcenabled = gc.isenabled()
854 gcenabled = gc.isenabled()
855 gc.disable()
855 gc.disable()
856 try:
856 try:
857 return func(*args, **kwargs)
857 return func(*args, **kwargs)
858 finally:
858 finally:
859 if gcenabled:
859 if gcenabled:
860 gc.enable()
860 gc.enable()
861 return wrapper
861 return wrapper
862
862
863 def pathto(root, n1, n2):
863 def pathto(root, n1, n2):
864 '''return the relative path from one place to another.
864 '''return the relative path from one place to another.
865 root should use os.sep to separate directories
865 root should use os.sep to separate directories
866 n1 should use os.sep to separate directories
866 n1 should use os.sep to separate directories
867 n2 should use "/" to separate directories
867 n2 should use "/" to separate directories
868 returns an os.sep-separated path.
868 returns an os.sep-separated path.
869
869
870 If n1 is a relative path, it's assumed it's
870 If n1 is a relative path, it's assumed it's
871 relative to root.
871 relative to root.
872 n2 should always be relative to root.
872 n2 should always be relative to root.
873 '''
873 '''
874 if not n1:
874 if not n1:
875 return localpath(n2)
875 return localpath(n2)
876 if os.path.isabs(n1):
876 if os.path.isabs(n1):
877 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
877 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
878 return os.path.join(root, localpath(n2))
878 return os.path.join(root, localpath(n2))
879 n2 = '/'.join((pconvert(root), n2))
879 n2 = '/'.join((pconvert(root), n2))
880 a, b = splitpath(n1), n2.split('/')
880 a, b = splitpath(n1), n2.split('/')
881 a.reverse()
881 a.reverse()
882 b.reverse()
882 b.reverse()
883 while a and b and a[-1] == b[-1]:
883 while a and b and a[-1] == b[-1]:
884 a.pop()
884 a.pop()
885 b.pop()
885 b.pop()
886 b.reverse()
886 b.reverse()
887 return os.sep.join((['..'] * len(a)) + b) or '.'
887 return os.sep.join((['..'] * len(a)) + b) or '.'
888
888
889 def mainfrozen():
889 def mainfrozen():
890 """return True if we are a frozen executable.
890 """return True if we are a frozen executable.
891
891
892 The code supports py2exe (most common, Windows only) and tools/freeze
892 The code supports py2exe (most common, Windows only) and tools/freeze
893 (portable, not much used).
893 (portable, not much used).
894 """
894 """
895 return (safehasattr(sys, "frozen") or # new py2exe
895 return (safehasattr(sys, "frozen") or # new py2exe
896 safehasattr(sys, "importers") or # old py2exe
896 safehasattr(sys, "importers") or # old py2exe
897 imp.is_frozen("__main__")) # tools/freeze
897 imp.is_frozen("__main__")) # tools/freeze
898
898
899 # the location of data files matching the source code
899 # the location of data files matching the source code
900 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
900 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
901 # executable version (py2exe) doesn't support __file__
901 # executable version (py2exe) doesn't support __file__
902 datapath = os.path.dirname(sys.executable)
902 datapath = os.path.dirname(sys.executable)
903 else:
903 else:
904 datapath = os.path.dirname(__file__)
904 datapath = os.path.dirname(__file__)
905
905
906 i18n.setdatapath(datapath)
906 i18n.setdatapath(datapath)
907
907
908 _hgexecutable = None
908 _hgexecutable = None
909
909
910 def hgexecutable():
910 def hgexecutable():
911 """return location of the 'hg' executable.
911 """return location of the 'hg' executable.
912
912
913 Defaults to $HG or 'hg' in the search path.
913 Defaults to $HG or 'hg' in the search path.
914 """
914 """
915 if _hgexecutable is None:
915 if _hgexecutable is None:
916 hg = os.environ.get('HG')
916 hg = os.environ.get('HG')
917 mainmod = sys.modules['__main__']
917 mainmod = sys.modules['__main__']
918 if hg:
918 if hg:
919 _sethgexecutable(hg)
919 _sethgexecutable(hg)
920 elif mainfrozen():
920 elif mainfrozen():
921 if getattr(sys, 'frozen', None) == 'macosx_app':
921 if getattr(sys, 'frozen', None) == 'macosx_app':
922 # Env variable set by py2app
922 # Env variable set by py2app
923 _sethgexecutable(os.environ['EXECUTABLEPATH'])
923 _sethgexecutable(os.environ['EXECUTABLEPATH'])
924 else:
924 else:
925 _sethgexecutable(sys.executable)
925 _sethgexecutable(sys.executable)
926 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
926 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
927 _sethgexecutable(mainmod.__file__)
927 _sethgexecutable(mainmod.__file__)
928 else:
928 else:
929 exe = findexe('hg') or os.path.basename(sys.argv[0])
929 exe = findexe('hg') or os.path.basename(sys.argv[0])
930 _sethgexecutable(exe)
930 _sethgexecutable(exe)
931 return _hgexecutable
931 return _hgexecutable
932
932
933 def _sethgexecutable(path):
933 def _sethgexecutable(path):
934 """set location of the 'hg' executable"""
934 """set location of the 'hg' executable"""
935 global _hgexecutable
935 global _hgexecutable
936 _hgexecutable = path
936 _hgexecutable = path
937
937
938 def _isstdout(f):
938 def _isstdout(f):
939 fileno = getattr(f, 'fileno', None)
939 fileno = getattr(f, 'fileno', None)
940 return fileno and fileno() == sys.__stdout__.fileno()
940 return fileno and fileno() == sys.__stdout__.fileno()
941
941
942 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
942 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
943 '''enhanced shell command execution.
943 '''enhanced shell command execution.
944 run with environment maybe modified, maybe in different dir.
944 run with environment maybe modified, maybe in different dir.
945
945
946 if command fails and onerr is None, return status, else raise onerr
946 if command fails and onerr is None, return status, else raise onerr
947 object as exception.
947 object as exception.
948
948
949 if out is specified, it is assumed to be a file-like object that has a
949 if out is specified, it is assumed to be a file-like object that has a
950 write() method. stdout and stderr will be redirected to out.'''
950 write() method. stdout and stderr will be redirected to out.'''
951 if environ is None:
951 if environ is None:
952 environ = {}
952 environ = {}
953 try:
953 try:
954 sys.stdout.flush()
954 sys.stdout.flush()
955 except Exception:
955 except Exception:
956 pass
956 pass
957 def py2shell(val):
957 def py2shell(val):
958 'convert python object into string that is useful to shell'
958 'convert python object into string that is useful to shell'
959 if val is None or val is False:
959 if val is None or val is False:
960 return '0'
960 return '0'
961 if val is True:
961 if val is True:
962 return '1'
962 return '1'
963 return str(val)
963 return str(val)
964 origcmd = cmd
964 origcmd = cmd
965 cmd = quotecommand(cmd)
965 cmd = quotecommand(cmd)
966 if sys.platform == 'plan9' and (sys.version_info[0] == 2
966 if sys.platform == 'plan9' and (sys.version_info[0] == 2
967 and sys.version_info[1] < 7):
967 and sys.version_info[1] < 7):
968 # subprocess kludge to work around issues in half-baked Python
968 # subprocess kludge to work around issues in half-baked Python
969 # ports, notably bichued/python:
969 # ports, notably bichued/python:
970 if not cwd is None:
970 if not cwd is None:
971 os.chdir(cwd)
971 os.chdir(cwd)
972 rc = os.system(cmd)
972 rc = os.system(cmd)
973 else:
973 else:
974 env = dict(os.environ)
974 env = dict(os.environ)
975 env.update((k, py2shell(v)) for k, v in environ.iteritems())
975 env.update((k, py2shell(v)) for k, v in environ.iteritems())
976 env['HG'] = hgexecutable()
976 env['HG'] = hgexecutable()
977 if out is None or _isstdout(out):
977 if out is None or _isstdout(out):
978 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
978 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
979 env=env, cwd=cwd)
979 env=env, cwd=cwd)
980 else:
980 else:
981 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
981 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
982 env=env, cwd=cwd, stdout=subprocess.PIPE,
982 env=env, cwd=cwd, stdout=subprocess.PIPE,
983 stderr=subprocess.STDOUT)
983 stderr=subprocess.STDOUT)
984 while True:
984 while True:
985 line = proc.stdout.readline()
985 line = proc.stdout.readline()
986 if not line:
986 if not line:
987 break
987 break
988 out.write(line)
988 out.write(line)
989 proc.wait()
989 proc.wait()
990 rc = proc.returncode
990 rc = proc.returncode
991 if sys.platform == 'OpenVMS' and rc & 1:
991 if sys.platform == 'OpenVMS' and rc & 1:
992 rc = 0
992 rc = 0
993 if rc and onerr:
993 if rc and onerr:
994 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
994 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
995 explainexit(rc)[0])
995 explainexit(rc)[0])
996 if errprefix:
996 if errprefix:
997 errmsg = '%s: %s' % (errprefix, errmsg)
997 errmsg = '%s: %s' % (errprefix, errmsg)
998 raise onerr(errmsg)
998 raise onerr(errmsg)
999 return rc
999 return rc
1000
1000
1001 def checksignature(func):
1001 def checksignature(func):
1002 '''wrap a function with code to check for calling errors'''
1002 '''wrap a function with code to check for calling errors'''
1003 def check(*args, **kwargs):
1003 def check(*args, **kwargs):
1004 try:
1004 try:
1005 return func(*args, **kwargs)
1005 return func(*args, **kwargs)
1006 except TypeError:
1006 except TypeError:
1007 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1007 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1008 raise error.SignatureError
1008 raise error.SignatureError
1009 raise
1009 raise
1010
1010
1011 return check
1011 return check
1012
1012
1013 def copyfile(src, dest, hardlink=False, copystat=False):
1013 def copyfile(src, dest, hardlink=False, copystat=False):
1014 '''copy a file, preserving mode and optionally other stat info like
1014 '''copy a file, preserving mode and optionally other stat info like
1015 atime/mtime'''
1015 atime/mtime'''
1016 if os.path.lexists(dest):
1016 if os.path.lexists(dest):
1017 unlink(dest)
1017 unlink(dest)
1018 # hardlinks are problematic on CIFS, quietly ignore this flag
1018 # hardlinks are problematic on CIFS, quietly ignore this flag
1019 # until we find a way to work around it cleanly (issue4546)
1019 # until we find a way to work around it cleanly (issue4546)
1020 if False and hardlink:
1020 if False and hardlink:
1021 try:
1021 try:
1022 oslink(src, dest)
1022 oslink(src, dest)
1023 return
1023 return
1024 except (IOError, OSError):
1024 except (IOError, OSError):
1025 pass # fall back to normal copy
1025 pass # fall back to normal copy
1026 if os.path.islink(src):
1026 if os.path.islink(src):
1027 os.symlink(os.readlink(src), dest)
1027 os.symlink(os.readlink(src), dest)
1028 # copytime is ignored for symlinks, but in general copytime isn't needed
1028 # copytime is ignored for symlinks, but in general copytime isn't needed
1029 # for them anyway
1029 # for them anyway
1030 else:
1030 else:
1031 try:
1031 try:
1032 shutil.copyfile(src, dest)
1032 shutil.copyfile(src, dest)
1033 if copystat:
1033 if copystat:
1034 # copystat also copies mode
1034 # copystat also copies mode
1035 shutil.copystat(src, dest)
1035 shutil.copystat(src, dest)
1036 else:
1036 else:
1037 shutil.copymode(src, dest)
1037 shutil.copymode(src, dest)
1038 except shutil.Error as inst:
1038 except shutil.Error as inst:
1039 raise Abort(str(inst))
1039 raise Abort(str(inst))
1040
1040
1041 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1041 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1042 """Copy a directory tree using hardlinks if possible."""
1042 """Copy a directory tree using hardlinks if possible."""
1043 num = 0
1043 num = 0
1044
1044
1045 if hardlink is None:
1045 if hardlink is None:
1046 hardlink = (os.stat(src).st_dev ==
1046 hardlink = (os.stat(src).st_dev ==
1047 os.stat(os.path.dirname(dst)).st_dev)
1047 os.stat(os.path.dirname(dst)).st_dev)
1048 if hardlink:
1048 if hardlink:
1049 topic = _('linking')
1049 topic = _('linking')
1050 else:
1050 else:
1051 topic = _('copying')
1051 topic = _('copying')
1052
1052
1053 if os.path.isdir(src):
1053 if os.path.isdir(src):
1054 os.mkdir(dst)
1054 os.mkdir(dst)
1055 for name, kind in osutil.listdir(src):
1055 for name, kind in osutil.listdir(src):
1056 srcname = os.path.join(src, name)
1056 srcname = os.path.join(src, name)
1057 dstname = os.path.join(dst, name)
1057 dstname = os.path.join(dst, name)
1058 def nprog(t, pos):
1058 def nprog(t, pos):
1059 if pos is not None:
1059 if pos is not None:
1060 return progress(t, pos + num)
1060 return progress(t, pos + num)
1061 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1061 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1062 num += n
1062 num += n
1063 else:
1063 else:
1064 if hardlink:
1064 if hardlink:
1065 try:
1065 try:
1066 oslink(src, dst)
1066 oslink(src, dst)
1067 except (IOError, OSError):
1067 except (IOError, OSError):
1068 hardlink = False
1068 hardlink = False
1069 shutil.copy(src, dst)
1069 shutil.copy(src, dst)
1070 else:
1070 else:
1071 shutil.copy(src, dst)
1071 shutil.copy(src, dst)
1072 num += 1
1072 num += 1
1073 progress(topic, num)
1073 progress(topic, num)
1074 progress(topic, None)
1074 progress(topic, None)
1075
1075
1076 return hardlink, num
1076 return hardlink, num
1077
1077
1078 _winreservednames = '''con prn aux nul
1078 _winreservednames = '''con prn aux nul
1079 com1 com2 com3 com4 com5 com6 com7 com8 com9
1079 com1 com2 com3 com4 com5 com6 com7 com8 com9
1080 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1080 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1081 _winreservedchars = ':*?"<>|'
1081 _winreservedchars = ':*?"<>|'
1082 def checkwinfilename(path):
1082 def checkwinfilename(path):
1083 r'''Check that the base-relative path is a valid filename on Windows.
1083 r'''Check that the base-relative path is a valid filename on Windows.
1084 Returns None if the path is ok, or a UI string describing the problem.
1084 Returns None if the path is ok, or a UI string describing the problem.
1085
1085
1086 >>> checkwinfilename("just/a/normal/path")
1086 >>> checkwinfilename("just/a/normal/path")
1087 >>> checkwinfilename("foo/bar/con.xml")
1087 >>> checkwinfilename("foo/bar/con.xml")
1088 "filename contains 'con', which is reserved on Windows"
1088 "filename contains 'con', which is reserved on Windows"
1089 >>> checkwinfilename("foo/con.xml/bar")
1089 >>> checkwinfilename("foo/con.xml/bar")
1090 "filename contains 'con', which is reserved on Windows"
1090 "filename contains 'con', which is reserved on Windows"
1091 >>> checkwinfilename("foo/bar/xml.con")
1091 >>> checkwinfilename("foo/bar/xml.con")
1092 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1092 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1093 "filename contains 'AUX', which is reserved on Windows"
1093 "filename contains 'AUX', which is reserved on Windows"
1094 >>> checkwinfilename("foo/bar/bla:.txt")
1094 >>> checkwinfilename("foo/bar/bla:.txt")
1095 "filename contains ':', which is reserved on Windows"
1095 "filename contains ':', which is reserved on Windows"
1096 >>> checkwinfilename("foo/bar/b\07la.txt")
1096 >>> checkwinfilename("foo/bar/b\07la.txt")
1097 "filename contains '\\x07', which is invalid on Windows"
1097 "filename contains '\\x07', which is invalid on Windows"
1098 >>> checkwinfilename("foo/bar/bla ")
1098 >>> checkwinfilename("foo/bar/bla ")
1099 "filename ends with ' ', which is not allowed on Windows"
1099 "filename ends with ' ', which is not allowed on Windows"
1100 >>> checkwinfilename("../bar")
1100 >>> checkwinfilename("../bar")
1101 >>> checkwinfilename("foo\\")
1101 >>> checkwinfilename("foo\\")
1102 "filename ends with '\\', which is invalid on Windows"
1102 "filename ends with '\\', which is invalid on Windows"
1103 >>> checkwinfilename("foo\\/bar")
1103 >>> checkwinfilename("foo\\/bar")
1104 "directory name ends with '\\', which is invalid on Windows"
1104 "directory name ends with '\\', which is invalid on Windows"
1105 '''
1105 '''
1106 if path.endswith('\\'):
1106 if path.endswith('\\'):
1107 return _("filename ends with '\\', which is invalid on Windows")
1107 return _("filename ends with '\\', which is invalid on Windows")
1108 if '\\/' in path:
1108 if '\\/' in path:
1109 return _("directory name ends with '\\', which is invalid on Windows")
1109 return _("directory name ends with '\\', which is invalid on Windows")
1110 for n in path.replace('\\', '/').split('/'):
1110 for n in path.replace('\\', '/').split('/'):
1111 if not n:
1111 if not n:
1112 continue
1112 continue
1113 for c in n:
1113 for c in n:
1114 if c in _winreservedchars:
1114 if c in _winreservedchars:
1115 return _("filename contains '%s', which is reserved "
1115 return _("filename contains '%s', which is reserved "
1116 "on Windows") % c
1116 "on Windows") % c
1117 if ord(c) <= 31:
1117 if ord(c) <= 31:
1118 return _("filename contains %r, which is invalid "
1118 return _("filename contains %r, which is invalid "
1119 "on Windows") % c
1119 "on Windows") % c
1120 base = n.split('.')[0]
1120 base = n.split('.')[0]
1121 if base and base.lower() in _winreservednames:
1121 if base and base.lower() in _winreservednames:
1122 return _("filename contains '%s', which is reserved "
1122 return _("filename contains '%s', which is reserved "
1123 "on Windows") % base
1123 "on Windows") % base
1124 t = n[-1]
1124 t = n[-1]
1125 if t in '. ' and n not in '..':
1125 if t in '. ' and n not in '..':
1126 return _("filename ends with '%s', which is not allowed "
1126 return _("filename ends with '%s', which is not allowed "
1127 "on Windows") % t
1127 "on Windows") % t
1128
1128
1129 if os.name == 'nt':
1129 if os.name == 'nt':
1130 checkosfilename = checkwinfilename
1130 checkosfilename = checkwinfilename
1131 else:
1131 else:
1132 checkosfilename = platform.checkosfilename
1132 checkosfilename = platform.checkosfilename
1133
1133
1134 def makelock(info, pathname):
1134 def makelock(info, pathname):
1135 try:
1135 try:
1136 return os.symlink(info, pathname)
1136 return os.symlink(info, pathname)
1137 except OSError as why:
1137 except OSError as why:
1138 if why.errno == errno.EEXIST:
1138 if why.errno == errno.EEXIST:
1139 raise
1139 raise
1140 except AttributeError: # no symlink in os
1140 except AttributeError: # no symlink in os
1141 pass
1141 pass
1142
1142
1143 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1143 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1144 os.write(ld, info)
1144 os.write(ld, info)
1145 os.close(ld)
1145 os.close(ld)
1146
1146
1147 def readlock(pathname):
1147 def readlock(pathname):
1148 try:
1148 try:
1149 return os.readlink(pathname)
1149 return os.readlink(pathname)
1150 except OSError as why:
1150 except OSError as why:
1151 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1151 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1152 raise
1152 raise
1153 except AttributeError: # no symlink in os
1153 except AttributeError: # no symlink in os
1154 pass
1154 pass
1155 fp = posixfile(pathname)
1155 fp = posixfile(pathname)
1156 r = fp.read()
1156 r = fp.read()
1157 fp.close()
1157 fp.close()
1158 return r
1158 return r
1159
1159
1160 def fstat(fp):
1160 def fstat(fp):
1161 '''stat file object that may not have fileno method.'''
1161 '''stat file object that may not have fileno method.'''
1162 try:
1162 try:
1163 return os.fstat(fp.fileno())
1163 return os.fstat(fp.fileno())
1164 except AttributeError:
1164 except AttributeError:
1165 return os.stat(fp.name)
1165 return os.stat(fp.name)
1166
1166
1167 # File system features
1167 # File system features
1168
1168
1169 def checkcase(path):
1169 def checkcase(path):
1170 """
1170 """
1171 Return true if the given path is on a case-sensitive filesystem
1171 Return true if the given path is on a case-sensitive filesystem
1172
1172
1173 Requires a path (like /foo/.hg) ending with a foldable final
1173 Requires a path (like /foo/.hg) ending with a foldable final
1174 directory component.
1174 directory component.
1175 """
1175 """
1176 s1 = os.lstat(path)
1176 s1 = os.lstat(path)
1177 d, b = os.path.split(path)
1177 d, b = os.path.split(path)
1178 b2 = b.upper()
1178 b2 = b.upper()
1179 if b == b2:
1179 if b == b2:
1180 b2 = b.lower()
1180 b2 = b.lower()
1181 if b == b2:
1181 if b == b2:
1182 return True # no evidence against case sensitivity
1182 return True # no evidence against case sensitivity
1183 p2 = os.path.join(d, b2)
1183 p2 = os.path.join(d, b2)
1184 try:
1184 try:
1185 s2 = os.lstat(p2)
1185 s2 = os.lstat(p2)
1186 if s2 == s1:
1186 if s2 == s1:
1187 return False
1187 return False
1188 return True
1188 return True
1189 except OSError:
1189 except OSError:
1190 return True
1190 return True
1191
1191
1192 try:
1192 try:
1193 import re2
1193 import re2
1194 _re2 = None
1194 _re2 = None
1195 except ImportError:
1195 except ImportError:
1196 _re2 = False
1196 _re2 = False
1197
1197
1198 class _re(object):
1198 class _re(object):
1199 def _checkre2(self):
1199 def _checkre2(self):
1200 global _re2
1200 global _re2
1201 try:
1201 try:
1202 # check if match works, see issue3964
1202 # check if match works, see issue3964
1203 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1203 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1204 except ImportError:
1204 except ImportError:
1205 _re2 = False
1205 _re2 = False
1206
1206
1207 def compile(self, pat, flags=0):
1207 def compile(self, pat, flags=0):
1208 '''Compile a regular expression, using re2 if possible
1208 '''Compile a regular expression, using re2 if possible
1209
1209
1210 For best performance, use only re2-compatible regexp features. The
1210 For best performance, use only re2-compatible regexp features. The
1211 only flags from the re module that are re2-compatible are
1211 only flags from the re module that are re2-compatible are
1212 IGNORECASE and MULTILINE.'''
1212 IGNORECASE and MULTILINE.'''
1213 if _re2 is None:
1213 if _re2 is None:
1214 self._checkre2()
1214 self._checkre2()
1215 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1215 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1216 if flags & remod.IGNORECASE:
1216 if flags & remod.IGNORECASE:
1217 pat = '(?i)' + pat
1217 pat = '(?i)' + pat
1218 if flags & remod.MULTILINE:
1218 if flags & remod.MULTILINE:
1219 pat = '(?m)' + pat
1219 pat = '(?m)' + pat
1220 try:
1220 try:
1221 return re2.compile(pat)
1221 return re2.compile(pat)
1222 except re2.error:
1222 except re2.error:
1223 pass
1223 pass
1224 return remod.compile(pat, flags)
1224 return remod.compile(pat, flags)
1225
1225
1226 @propertycache
1226 @propertycache
1227 def escape(self):
1227 def escape(self):
1228 '''Return the version of escape corresponding to self.compile.
1228 '''Return the version of escape corresponding to self.compile.
1229
1229
1230 This is imperfect because whether re2 or re is used for a particular
1230 This is imperfect because whether re2 or re is used for a particular
1231 function depends on the flags, etc, but it's the best we can do.
1231 function depends on the flags, etc, but it's the best we can do.
1232 '''
1232 '''
1233 global _re2
1233 global _re2
1234 if _re2 is None:
1234 if _re2 is None:
1235 self._checkre2()
1235 self._checkre2()
1236 if _re2:
1236 if _re2:
1237 return re2.escape
1237 return re2.escape
1238 else:
1238 else:
1239 return remod.escape
1239 return remod.escape
1240
1240
1241 re = _re()
1241 re = _re()
1242
1242
1243 _fspathcache = {}
1243 _fspathcache = {}
1244 def fspath(name, root):
1244 def fspath(name, root):
1245 '''Get name in the case stored in the filesystem
1245 '''Get name in the case stored in the filesystem
1246
1246
1247 The name should be relative to root, and be normcase-ed for efficiency.
1247 The name should be relative to root, and be normcase-ed for efficiency.
1248
1248
1249 Note that this function is unnecessary, and should not be
1249 Note that this function is unnecessary, and should not be
1250 called, for case-sensitive filesystems (simply because it's expensive).
1250 called, for case-sensitive filesystems (simply because it's expensive).
1251
1251
1252 The root should be normcase-ed, too.
1252 The root should be normcase-ed, too.
1253 '''
1253 '''
1254 def _makefspathcacheentry(dir):
1254 def _makefspathcacheentry(dir):
1255 return dict((normcase(n), n) for n in os.listdir(dir))
1255 return dict((normcase(n), n) for n in os.listdir(dir))
1256
1256
1257 seps = os.sep
1257 seps = os.sep
1258 if os.altsep:
1258 if os.altsep:
1259 seps = seps + os.altsep
1259 seps = seps + os.altsep
1260 # Protect backslashes. This gets silly very quickly.
1260 # Protect backslashes. This gets silly very quickly.
1261 seps.replace('\\','\\\\')
1261 seps.replace('\\','\\\\')
1262 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1262 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1263 dir = os.path.normpath(root)
1263 dir = os.path.normpath(root)
1264 result = []
1264 result = []
1265 for part, sep in pattern.findall(name):
1265 for part, sep in pattern.findall(name):
1266 if sep:
1266 if sep:
1267 result.append(sep)
1267 result.append(sep)
1268 continue
1268 continue
1269
1269
1270 if dir not in _fspathcache:
1270 if dir not in _fspathcache:
1271 _fspathcache[dir] = _makefspathcacheentry(dir)
1271 _fspathcache[dir] = _makefspathcacheentry(dir)
1272 contents = _fspathcache[dir]
1272 contents = _fspathcache[dir]
1273
1273
1274 found = contents.get(part)
1274 found = contents.get(part)
1275 if not found:
1275 if not found:
1276 # retry "once per directory" per "dirstate.walk" which
1276 # retry "once per directory" per "dirstate.walk" which
1277 # may take place for each patches of "hg qpush", for example
1277 # may take place for each patches of "hg qpush", for example
1278 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1278 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1279 found = contents.get(part)
1279 found = contents.get(part)
1280
1280
1281 result.append(found or part)
1281 result.append(found or part)
1282 dir = os.path.join(dir, part)
1282 dir = os.path.join(dir, part)
1283
1283
1284 return ''.join(result)
1284 return ''.join(result)
1285
1285
1286 def checknlink(testfile):
1286 def checknlink(testfile):
1287 '''check whether hardlink count reporting works properly'''
1287 '''check whether hardlink count reporting works properly'''
1288
1288
1289 # testfile may be open, so we need a separate file for checking to
1289 # testfile may be open, so we need a separate file for checking to
1290 # work around issue2543 (or testfile may get lost on Samba shares)
1290 # work around issue2543 (or testfile may get lost on Samba shares)
1291 f1 = testfile + ".hgtmp1"
1291 f1 = testfile + ".hgtmp1"
1292 if os.path.lexists(f1):
1292 if os.path.lexists(f1):
1293 return False
1293 return False
1294 try:
1294 try:
1295 posixfile(f1, 'w').close()
1295 posixfile(f1, 'w').close()
1296 except IOError:
1296 except IOError:
1297 return False
1297 return False
1298
1298
1299 f2 = testfile + ".hgtmp2"
1299 f2 = testfile + ".hgtmp2"
1300 fd = None
1300 fd = None
1301 try:
1301 try:
1302 oslink(f1, f2)
1302 oslink(f1, f2)
1303 # nlinks() may behave differently for files on Windows shares if
1303 # nlinks() may behave differently for files on Windows shares if
1304 # the file is open.
1304 # the file is open.
1305 fd = posixfile(f2)
1305 fd = posixfile(f2)
1306 return nlinks(f2) > 1
1306 return nlinks(f2) > 1
1307 except OSError:
1307 except OSError:
1308 return False
1308 return False
1309 finally:
1309 finally:
1310 if fd is not None:
1310 if fd is not None:
1311 fd.close()
1311 fd.close()
1312 for f in (f1, f2):
1312 for f in (f1, f2):
1313 try:
1313 try:
1314 os.unlink(f)
1314 os.unlink(f)
1315 except OSError:
1315 except OSError:
1316 pass
1316 pass
1317
1317
1318 def endswithsep(path):
1318 def endswithsep(path):
1319 '''Check path ends with os.sep or os.altsep.'''
1319 '''Check path ends with os.sep or os.altsep.'''
1320 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1320 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1321
1321
1322 def splitpath(path):
1322 def splitpath(path):
1323 '''Split path by os.sep.
1323 '''Split path by os.sep.
1324 Note that this function does not use os.altsep because this is
1324 Note that this function does not use os.altsep because this is
1325 an alternative of simple "xxx.split(os.sep)".
1325 an alternative of simple "xxx.split(os.sep)".
1326 It is recommended to use os.path.normpath() before using this
1326 It is recommended to use os.path.normpath() before using this
1327 function if need.'''
1327 function if need.'''
1328 return path.split(os.sep)
1328 return path.split(os.sep)
1329
1329
1330 def gui():
1330 def gui():
1331 '''Are we running in a GUI?'''
1331 '''Are we running in a GUI?'''
1332 if sys.platform == 'darwin':
1332 if sys.platform == 'darwin':
1333 if 'SSH_CONNECTION' in os.environ:
1333 if 'SSH_CONNECTION' in os.environ:
1334 # handle SSH access to a box where the user is logged in
1334 # handle SSH access to a box where the user is logged in
1335 return False
1335 return False
1336 elif getattr(osutil, 'isgui', None):
1336 elif getattr(osutil, 'isgui', None):
1337 # check if a CoreGraphics session is available
1337 # check if a CoreGraphics session is available
1338 return osutil.isgui()
1338 return osutil.isgui()
1339 else:
1339 else:
1340 # pure build; use a safe default
1340 # pure build; use a safe default
1341 return True
1341 return True
1342 else:
1342 else:
1343 return os.name == "nt" or os.environ.get("DISPLAY")
1343 return os.name == "nt" or os.environ.get("DISPLAY")
1344
1344
1345 def mktempcopy(name, emptyok=False, createmode=None):
1345 def mktempcopy(name, emptyok=False, createmode=None):
1346 """Create a temporary file with the same contents from name
1346 """Create a temporary file with the same contents from name
1347
1347
1348 The permission bits are copied from the original file.
1348 The permission bits are copied from the original file.
1349
1349
1350 If the temporary file is going to be truncated immediately, you
1350 If the temporary file is going to be truncated immediately, you
1351 can use emptyok=True as an optimization.
1351 can use emptyok=True as an optimization.
1352
1352
1353 Returns the name of the temporary file.
1353 Returns the name of the temporary file.
1354 """
1354 """
1355 d, fn = os.path.split(name)
1355 d, fn = os.path.split(name)
1356 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1356 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1357 os.close(fd)
1357 os.close(fd)
1358 # Temporary files are created with mode 0600, which is usually not
1358 # Temporary files are created with mode 0600, which is usually not
1359 # what we want. If the original file already exists, just copy
1359 # what we want. If the original file already exists, just copy
1360 # its mode. Otherwise, manually obey umask.
1360 # its mode. Otherwise, manually obey umask.
1361 copymode(name, temp, createmode)
1361 copymode(name, temp, createmode)
1362 if emptyok:
1362 if emptyok:
1363 return temp
1363 return temp
1364 try:
1364 try:
1365 try:
1365 try:
1366 ifp = posixfile(name, "rb")
1366 ifp = posixfile(name, "rb")
1367 except IOError as inst:
1367 except IOError as inst:
1368 if inst.errno == errno.ENOENT:
1368 if inst.errno == errno.ENOENT:
1369 return temp
1369 return temp
1370 if not getattr(inst, 'filename', None):
1370 if not getattr(inst, 'filename', None):
1371 inst.filename = name
1371 inst.filename = name
1372 raise
1372 raise
1373 ofp = posixfile(temp, "wb")
1373 ofp = posixfile(temp, "wb")
1374 for chunk in filechunkiter(ifp):
1374 for chunk in filechunkiter(ifp):
1375 ofp.write(chunk)
1375 ofp.write(chunk)
1376 ifp.close()
1376 ifp.close()
1377 ofp.close()
1377 ofp.close()
1378 except: # re-raises
1378 except: # re-raises
1379 try: os.unlink(temp)
1379 try: os.unlink(temp)
1380 except OSError: pass
1380 except OSError: pass
1381 raise
1381 raise
1382 return temp
1382 return temp
1383
1383
1384 class filestat(object):
1385 """help to exactly detect change of a file
1386
1387 'stat' attribute is result of 'os.stat()' if specified 'path'
1388 exists. Otherwise, it is None. This can avoid preparative
1389 'exists()' examination on client side of this class.
1390 """
1391 def __init__(self, path):
1392 try:
1393 self.stat = os.stat(path)
1394 except OSError as err:
1395 if err.errno != errno.ENOENT:
1396 raise
1397 self.stat = None
1398
1399 __hash__ = object.__hash__
1400
1401 def __eq__(self, old):
1402 try:
1403 # if ambiguity between stat of new and old file is
1404 # avoided, comparision of size, ctime and mtime is enough
1405 # to exactly detect change of a file regardless of platform
1406 return (self.stat.st_size == old.stat.st_size and
1407 self.stat.st_ctime == old.stat.st_ctime and
1408 self.stat.st_mtime == old.stat.st_mtime)
1409 except AttributeError:
1410 return False
1411
1412 def isambig(self, old):
1413 """Examine whether new (= self) stat is ambiguous against old one
1414
1415 "S[N]" below means stat of a file at N-th change:
1416
1417 - S[n-1].ctime < S[n].ctime: can detect change of a file
1418 - S[n-1].ctime == S[n].ctime
1419 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1420 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1421 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1422 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1423
1424 Case (*2) above means that a file was changed twice or more at
1425 same time in sec (= S[n-1].ctime), and comparison of timestamp
1426 is ambiguous.
1427
1428 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1429 timestamp is ambiguous".
1430
1431 But advancing mtime only in case (*2) doesn't work as
1432 expected, because naturally advanced S[n].mtime in case (*1)
1433 might be equal to manually advanced S[n-1 or earlier].mtime.
1434
1435 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1436 treated as ambiguous regardless of mtime, to avoid overlooking
1437 by confliction between such mtime.
1438
1439 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1440 S[n].mtime", even if size of a file isn't changed.
1441 """
1442 try:
1443 return (self.stat.st_ctime == old.stat.st_ctime)
1444 except AttributeError:
1445 return False
1446
1384 class atomictempfile(object):
1447 class atomictempfile(object):
1385 '''writable file object that atomically updates a file
1448 '''writable file object that atomically updates a file
1386
1449
1387 All writes will go to a temporary copy of the original file. Call
1450 All writes will go to a temporary copy of the original file. Call
1388 close() when you are done writing, and atomictempfile will rename
1451 close() when you are done writing, and atomictempfile will rename
1389 the temporary copy to the original name, making the changes
1452 the temporary copy to the original name, making the changes
1390 visible. If the object is destroyed without being closed, all your
1453 visible. If the object is destroyed without being closed, all your
1391 writes are discarded.
1454 writes are discarded.
1392 '''
1455 '''
1393 def __init__(self, name, mode='w+b', createmode=None):
1456 def __init__(self, name, mode='w+b', createmode=None):
1394 self.__name = name # permanent name
1457 self.__name = name # permanent name
1395 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1458 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1396 createmode=createmode)
1459 createmode=createmode)
1397 self._fp = posixfile(self._tempname, mode)
1460 self._fp = posixfile(self._tempname, mode)
1398
1461
1399 # delegated methods
1462 # delegated methods
1400 self.write = self._fp.write
1463 self.write = self._fp.write
1401 self.seek = self._fp.seek
1464 self.seek = self._fp.seek
1402 self.tell = self._fp.tell
1465 self.tell = self._fp.tell
1403 self.fileno = self._fp.fileno
1466 self.fileno = self._fp.fileno
1404
1467
1405 def close(self):
1468 def close(self):
1406 if not self._fp.closed:
1469 if not self._fp.closed:
1407 self._fp.close()
1470 self._fp.close()
1408 rename(self._tempname, localpath(self.__name))
1471 rename(self._tempname, localpath(self.__name))
1409
1472
1410 def discard(self):
1473 def discard(self):
1411 if not self._fp.closed:
1474 if not self._fp.closed:
1412 try:
1475 try:
1413 os.unlink(self._tempname)
1476 os.unlink(self._tempname)
1414 except OSError:
1477 except OSError:
1415 pass
1478 pass
1416 self._fp.close()
1479 self._fp.close()
1417
1480
1418 def __del__(self):
1481 def __del__(self):
1419 if safehasattr(self, '_fp'): # constructor actually did something
1482 if safehasattr(self, '_fp'): # constructor actually did something
1420 self.discard()
1483 self.discard()
1421
1484
1422 def makedirs(name, mode=None, notindexed=False):
1485 def makedirs(name, mode=None, notindexed=False):
1423 """recursive directory creation with parent mode inheritance
1486 """recursive directory creation with parent mode inheritance
1424
1487
1425 Newly created directories are marked as "not to be indexed by
1488 Newly created directories are marked as "not to be indexed by
1426 the content indexing service", if ``notindexed`` is specified
1489 the content indexing service", if ``notindexed`` is specified
1427 for "write" mode access.
1490 for "write" mode access.
1428 """
1491 """
1429 try:
1492 try:
1430 makedir(name, notindexed)
1493 makedir(name, notindexed)
1431 except OSError as err:
1494 except OSError as err:
1432 if err.errno == errno.EEXIST:
1495 if err.errno == errno.EEXIST:
1433 return
1496 return
1434 if err.errno != errno.ENOENT or not name:
1497 if err.errno != errno.ENOENT or not name:
1435 raise
1498 raise
1436 parent = os.path.dirname(os.path.abspath(name))
1499 parent = os.path.dirname(os.path.abspath(name))
1437 if parent == name:
1500 if parent == name:
1438 raise
1501 raise
1439 makedirs(parent, mode, notindexed)
1502 makedirs(parent, mode, notindexed)
1440 try:
1503 try:
1441 makedir(name, notindexed)
1504 makedir(name, notindexed)
1442 except OSError as err:
1505 except OSError as err:
1443 # Catch EEXIST to handle races
1506 # Catch EEXIST to handle races
1444 if err.errno == errno.EEXIST:
1507 if err.errno == errno.EEXIST:
1445 return
1508 return
1446 raise
1509 raise
1447 if mode is not None:
1510 if mode is not None:
1448 os.chmod(name, mode)
1511 os.chmod(name, mode)
1449
1512
1450 def readfile(path):
1513 def readfile(path):
1451 with open(path, 'rb') as fp:
1514 with open(path, 'rb') as fp:
1452 return fp.read()
1515 return fp.read()
1453
1516
1454 def writefile(path, text):
1517 def writefile(path, text):
1455 with open(path, 'wb') as fp:
1518 with open(path, 'wb') as fp:
1456 fp.write(text)
1519 fp.write(text)
1457
1520
1458 def appendfile(path, text):
1521 def appendfile(path, text):
1459 with open(path, 'ab') as fp:
1522 with open(path, 'ab') as fp:
1460 fp.write(text)
1523 fp.write(text)
1461
1524
1462 class chunkbuffer(object):
1525 class chunkbuffer(object):
1463 """Allow arbitrary sized chunks of data to be efficiently read from an
1526 """Allow arbitrary sized chunks of data to be efficiently read from an
1464 iterator over chunks of arbitrary size."""
1527 iterator over chunks of arbitrary size."""
1465
1528
1466 def __init__(self, in_iter):
1529 def __init__(self, in_iter):
1467 """in_iter is the iterator that's iterating over the input chunks.
1530 """in_iter is the iterator that's iterating over the input chunks.
1468 targetsize is how big a buffer to try to maintain."""
1531 targetsize is how big a buffer to try to maintain."""
1469 def splitbig(chunks):
1532 def splitbig(chunks):
1470 for chunk in chunks:
1533 for chunk in chunks:
1471 if len(chunk) > 2**20:
1534 if len(chunk) > 2**20:
1472 pos = 0
1535 pos = 0
1473 while pos < len(chunk):
1536 while pos < len(chunk):
1474 end = pos + 2 ** 18
1537 end = pos + 2 ** 18
1475 yield chunk[pos:end]
1538 yield chunk[pos:end]
1476 pos = end
1539 pos = end
1477 else:
1540 else:
1478 yield chunk
1541 yield chunk
1479 self.iter = splitbig(in_iter)
1542 self.iter = splitbig(in_iter)
1480 self._queue = collections.deque()
1543 self._queue = collections.deque()
1481 self._chunkoffset = 0
1544 self._chunkoffset = 0
1482
1545
1483 def read(self, l=None):
1546 def read(self, l=None):
1484 """Read L bytes of data from the iterator of chunks of data.
1547 """Read L bytes of data from the iterator of chunks of data.
1485 Returns less than L bytes if the iterator runs dry.
1548 Returns less than L bytes if the iterator runs dry.
1486
1549
1487 If size parameter is omitted, read everything"""
1550 If size parameter is omitted, read everything"""
1488 if l is None:
1551 if l is None:
1489 return ''.join(self.iter)
1552 return ''.join(self.iter)
1490
1553
1491 left = l
1554 left = l
1492 buf = []
1555 buf = []
1493 queue = self._queue
1556 queue = self._queue
1494 while left > 0:
1557 while left > 0:
1495 # refill the queue
1558 # refill the queue
1496 if not queue:
1559 if not queue:
1497 target = 2**18
1560 target = 2**18
1498 for chunk in self.iter:
1561 for chunk in self.iter:
1499 queue.append(chunk)
1562 queue.append(chunk)
1500 target -= len(chunk)
1563 target -= len(chunk)
1501 if target <= 0:
1564 if target <= 0:
1502 break
1565 break
1503 if not queue:
1566 if not queue:
1504 break
1567 break
1505
1568
1506 # The easy way to do this would be to queue.popleft(), modify the
1569 # The easy way to do this would be to queue.popleft(), modify the
1507 # chunk (if necessary), then queue.appendleft(). However, for cases
1570 # chunk (if necessary), then queue.appendleft(). However, for cases
1508 # where we read partial chunk content, this incurs 2 dequeue
1571 # where we read partial chunk content, this incurs 2 dequeue
1509 # mutations and creates a new str for the remaining chunk in the
1572 # mutations and creates a new str for the remaining chunk in the
1510 # queue. Our code below avoids this overhead.
1573 # queue. Our code below avoids this overhead.
1511
1574
1512 chunk = queue[0]
1575 chunk = queue[0]
1513 chunkl = len(chunk)
1576 chunkl = len(chunk)
1514 offset = self._chunkoffset
1577 offset = self._chunkoffset
1515
1578
1516 # Use full chunk.
1579 # Use full chunk.
1517 if offset == 0 and left >= chunkl:
1580 if offset == 0 and left >= chunkl:
1518 left -= chunkl
1581 left -= chunkl
1519 queue.popleft()
1582 queue.popleft()
1520 buf.append(chunk)
1583 buf.append(chunk)
1521 # self._chunkoffset remains at 0.
1584 # self._chunkoffset remains at 0.
1522 continue
1585 continue
1523
1586
1524 chunkremaining = chunkl - offset
1587 chunkremaining = chunkl - offset
1525
1588
1526 # Use all of unconsumed part of chunk.
1589 # Use all of unconsumed part of chunk.
1527 if left >= chunkremaining:
1590 if left >= chunkremaining:
1528 left -= chunkremaining
1591 left -= chunkremaining
1529 queue.popleft()
1592 queue.popleft()
1530 # offset == 0 is enabled by block above, so this won't merely
1593 # offset == 0 is enabled by block above, so this won't merely
1531 # copy via ``chunk[0:]``.
1594 # copy via ``chunk[0:]``.
1532 buf.append(chunk[offset:])
1595 buf.append(chunk[offset:])
1533 self._chunkoffset = 0
1596 self._chunkoffset = 0
1534
1597
1535 # Partial chunk needed.
1598 # Partial chunk needed.
1536 else:
1599 else:
1537 buf.append(chunk[offset:offset + left])
1600 buf.append(chunk[offset:offset + left])
1538 self._chunkoffset += left
1601 self._chunkoffset += left
1539 left -= chunkremaining
1602 left -= chunkremaining
1540
1603
1541 return ''.join(buf)
1604 return ''.join(buf)
1542
1605
1543 def filechunkiter(f, size=65536, limit=None):
1606 def filechunkiter(f, size=65536, limit=None):
1544 """Create a generator that produces the data in the file size
1607 """Create a generator that produces the data in the file size
1545 (default 65536) bytes at a time, up to optional limit (default is
1608 (default 65536) bytes at a time, up to optional limit (default is
1546 to read all data). Chunks may be less than size bytes if the
1609 to read all data). Chunks may be less than size bytes if the
1547 chunk is the last chunk in the file, or the file is a socket or
1610 chunk is the last chunk in the file, or the file is a socket or
1548 some other type of file that sometimes reads less data than is
1611 some other type of file that sometimes reads less data than is
1549 requested."""
1612 requested."""
1550 assert size >= 0
1613 assert size >= 0
1551 assert limit is None or limit >= 0
1614 assert limit is None or limit >= 0
1552 while True:
1615 while True:
1553 if limit is None:
1616 if limit is None:
1554 nbytes = size
1617 nbytes = size
1555 else:
1618 else:
1556 nbytes = min(limit, size)
1619 nbytes = min(limit, size)
1557 s = nbytes and f.read(nbytes)
1620 s = nbytes and f.read(nbytes)
1558 if not s:
1621 if not s:
1559 break
1622 break
1560 if limit:
1623 if limit:
1561 limit -= len(s)
1624 limit -= len(s)
1562 yield s
1625 yield s
1563
1626
1564 def makedate(timestamp=None):
1627 def makedate(timestamp=None):
1565 '''Return a unix timestamp (or the current time) as a (unixtime,
1628 '''Return a unix timestamp (or the current time) as a (unixtime,
1566 offset) tuple based off the local timezone.'''
1629 offset) tuple based off the local timezone.'''
1567 if timestamp is None:
1630 if timestamp is None:
1568 timestamp = time.time()
1631 timestamp = time.time()
1569 if timestamp < 0:
1632 if timestamp < 0:
1570 hint = _("check your clock")
1633 hint = _("check your clock")
1571 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1634 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1572 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1635 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1573 datetime.datetime.fromtimestamp(timestamp))
1636 datetime.datetime.fromtimestamp(timestamp))
1574 tz = delta.days * 86400 + delta.seconds
1637 tz = delta.days * 86400 + delta.seconds
1575 return timestamp, tz
1638 return timestamp, tz
1576
1639
1577 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1640 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1578 """represent a (unixtime, offset) tuple as a localized time.
1641 """represent a (unixtime, offset) tuple as a localized time.
1579 unixtime is seconds since the epoch, and offset is the time zone's
1642 unixtime is seconds since the epoch, and offset is the time zone's
1580 number of seconds away from UTC.
1643 number of seconds away from UTC.
1581
1644
1582 >>> datestr((0, 0))
1645 >>> datestr((0, 0))
1583 'Thu Jan 01 00:00:00 1970 +0000'
1646 'Thu Jan 01 00:00:00 1970 +0000'
1584 >>> datestr((42, 0))
1647 >>> datestr((42, 0))
1585 'Thu Jan 01 00:00:42 1970 +0000'
1648 'Thu Jan 01 00:00:42 1970 +0000'
1586 >>> datestr((-42, 0))
1649 >>> datestr((-42, 0))
1587 'Wed Dec 31 23:59:18 1969 +0000'
1650 'Wed Dec 31 23:59:18 1969 +0000'
1588 >>> datestr((0x7fffffff, 0))
1651 >>> datestr((0x7fffffff, 0))
1589 'Tue Jan 19 03:14:07 2038 +0000'
1652 'Tue Jan 19 03:14:07 2038 +0000'
1590 >>> datestr((-0x80000000, 0))
1653 >>> datestr((-0x80000000, 0))
1591 'Fri Dec 13 20:45:52 1901 +0000'
1654 'Fri Dec 13 20:45:52 1901 +0000'
1592 """
1655 """
1593 t, tz = date or makedate()
1656 t, tz = date or makedate()
1594 if "%1" in format or "%2" in format or "%z" in format:
1657 if "%1" in format or "%2" in format or "%z" in format:
1595 sign = (tz > 0) and "-" or "+"
1658 sign = (tz > 0) and "-" or "+"
1596 minutes = abs(tz) // 60
1659 minutes = abs(tz) // 60
1597 q, r = divmod(minutes, 60)
1660 q, r = divmod(minutes, 60)
1598 format = format.replace("%z", "%1%2")
1661 format = format.replace("%z", "%1%2")
1599 format = format.replace("%1", "%c%02d" % (sign, q))
1662 format = format.replace("%1", "%c%02d" % (sign, q))
1600 format = format.replace("%2", "%02d" % r)
1663 format = format.replace("%2", "%02d" % r)
1601 d = t - tz
1664 d = t - tz
1602 if d > 0x7fffffff:
1665 if d > 0x7fffffff:
1603 d = 0x7fffffff
1666 d = 0x7fffffff
1604 elif d < -0x80000000:
1667 elif d < -0x80000000:
1605 d = -0x80000000
1668 d = -0x80000000
1606 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1669 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1607 # because they use the gmtime() system call which is buggy on Windows
1670 # because they use the gmtime() system call which is buggy on Windows
1608 # for negative values.
1671 # for negative values.
1609 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1672 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1610 s = t.strftime(format)
1673 s = t.strftime(format)
1611 return s
1674 return s
1612
1675
1613 def shortdate(date=None):
1676 def shortdate(date=None):
1614 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1677 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1615 return datestr(date, format='%Y-%m-%d')
1678 return datestr(date, format='%Y-%m-%d')
1616
1679
1617 def parsetimezone(tz):
1680 def parsetimezone(tz):
1618 """parse a timezone string and return an offset integer"""
1681 """parse a timezone string and return an offset integer"""
1619 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1682 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1620 sign = (tz[0] == "+") and 1 or -1
1683 sign = (tz[0] == "+") and 1 or -1
1621 hours = int(tz[1:3])
1684 hours = int(tz[1:3])
1622 minutes = int(tz[3:5])
1685 minutes = int(tz[3:5])
1623 return -sign * (hours * 60 + minutes) * 60
1686 return -sign * (hours * 60 + minutes) * 60
1624 if tz == "GMT" or tz == "UTC":
1687 if tz == "GMT" or tz == "UTC":
1625 return 0
1688 return 0
1626 return None
1689 return None
1627
1690
1628 def strdate(string, format, defaults=[]):
1691 def strdate(string, format, defaults=[]):
1629 """parse a localized time string and return a (unixtime, offset) tuple.
1692 """parse a localized time string and return a (unixtime, offset) tuple.
1630 if the string cannot be parsed, ValueError is raised."""
1693 if the string cannot be parsed, ValueError is raised."""
1631 # NOTE: unixtime = localunixtime + offset
1694 # NOTE: unixtime = localunixtime + offset
1632 offset, date = parsetimezone(string.split()[-1]), string
1695 offset, date = parsetimezone(string.split()[-1]), string
1633 if offset is not None:
1696 if offset is not None:
1634 date = " ".join(string.split()[:-1])
1697 date = " ".join(string.split()[:-1])
1635
1698
1636 # add missing elements from defaults
1699 # add missing elements from defaults
1637 usenow = False # default to using biased defaults
1700 usenow = False # default to using biased defaults
1638 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1701 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1639 found = [True for p in part if ("%"+p) in format]
1702 found = [True for p in part if ("%"+p) in format]
1640 if not found:
1703 if not found:
1641 date += "@" + defaults[part][usenow]
1704 date += "@" + defaults[part][usenow]
1642 format += "@%" + part[0]
1705 format += "@%" + part[0]
1643 else:
1706 else:
1644 # We've found a specific time element, less specific time
1707 # We've found a specific time element, less specific time
1645 # elements are relative to today
1708 # elements are relative to today
1646 usenow = True
1709 usenow = True
1647
1710
1648 timetuple = time.strptime(date, format)
1711 timetuple = time.strptime(date, format)
1649 localunixtime = int(calendar.timegm(timetuple))
1712 localunixtime = int(calendar.timegm(timetuple))
1650 if offset is None:
1713 if offset is None:
1651 # local timezone
1714 # local timezone
1652 unixtime = int(time.mktime(timetuple))
1715 unixtime = int(time.mktime(timetuple))
1653 offset = unixtime - localunixtime
1716 offset = unixtime - localunixtime
1654 else:
1717 else:
1655 unixtime = localunixtime + offset
1718 unixtime = localunixtime + offset
1656 return unixtime, offset
1719 return unixtime, offset
1657
1720
1658 def parsedate(date, formats=None, bias=None):
1721 def parsedate(date, formats=None, bias=None):
1659 """parse a localized date/time and return a (unixtime, offset) tuple.
1722 """parse a localized date/time and return a (unixtime, offset) tuple.
1660
1723
1661 The date may be a "unixtime offset" string or in one of the specified
1724 The date may be a "unixtime offset" string or in one of the specified
1662 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1725 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1663
1726
1664 >>> parsedate(' today ') == parsedate(\
1727 >>> parsedate(' today ') == parsedate(\
1665 datetime.date.today().strftime('%b %d'))
1728 datetime.date.today().strftime('%b %d'))
1666 True
1729 True
1667 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1730 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1668 datetime.timedelta(days=1)\
1731 datetime.timedelta(days=1)\
1669 ).strftime('%b %d'))
1732 ).strftime('%b %d'))
1670 True
1733 True
1671 >>> now, tz = makedate()
1734 >>> now, tz = makedate()
1672 >>> strnow, strtz = parsedate('now')
1735 >>> strnow, strtz = parsedate('now')
1673 >>> (strnow - now) < 1
1736 >>> (strnow - now) < 1
1674 True
1737 True
1675 >>> tz == strtz
1738 >>> tz == strtz
1676 True
1739 True
1677 """
1740 """
1678 if bias is None:
1741 if bias is None:
1679 bias = {}
1742 bias = {}
1680 if not date:
1743 if not date:
1681 return 0, 0
1744 return 0, 0
1682 if isinstance(date, tuple) and len(date) == 2:
1745 if isinstance(date, tuple) and len(date) == 2:
1683 return date
1746 return date
1684 if not formats:
1747 if not formats:
1685 formats = defaultdateformats
1748 formats = defaultdateformats
1686 date = date.strip()
1749 date = date.strip()
1687
1750
1688 if date == 'now' or date == _('now'):
1751 if date == 'now' or date == _('now'):
1689 return makedate()
1752 return makedate()
1690 if date == 'today' or date == _('today'):
1753 if date == 'today' or date == _('today'):
1691 date = datetime.date.today().strftime('%b %d')
1754 date = datetime.date.today().strftime('%b %d')
1692 elif date == 'yesterday' or date == _('yesterday'):
1755 elif date == 'yesterday' or date == _('yesterday'):
1693 date = (datetime.date.today() -
1756 date = (datetime.date.today() -
1694 datetime.timedelta(days=1)).strftime('%b %d')
1757 datetime.timedelta(days=1)).strftime('%b %d')
1695
1758
1696 try:
1759 try:
1697 when, offset = map(int, date.split(' '))
1760 when, offset = map(int, date.split(' '))
1698 except ValueError:
1761 except ValueError:
1699 # fill out defaults
1762 # fill out defaults
1700 now = makedate()
1763 now = makedate()
1701 defaults = {}
1764 defaults = {}
1702 for part in ("d", "mb", "yY", "HI", "M", "S"):
1765 for part in ("d", "mb", "yY", "HI", "M", "S"):
1703 # this piece is for rounding the specific end of unknowns
1766 # this piece is for rounding the specific end of unknowns
1704 b = bias.get(part)
1767 b = bias.get(part)
1705 if b is None:
1768 if b is None:
1706 if part[0] in "HMS":
1769 if part[0] in "HMS":
1707 b = "00"
1770 b = "00"
1708 else:
1771 else:
1709 b = "0"
1772 b = "0"
1710
1773
1711 # this piece is for matching the generic end to today's date
1774 # this piece is for matching the generic end to today's date
1712 n = datestr(now, "%" + part[0])
1775 n = datestr(now, "%" + part[0])
1713
1776
1714 defaults[part] = (b, n)
1777 defaults[part] = (b, n)
1715
1778
1716 for format in formats:
1779 for format in formats:
1717 try:
1780 try:
1718 when, offset = strdate(date, format, defaults)
1781 when, offset = strdate(date, format, defaults)
1719 except (ValueError, OverflowError):
1782 except (ValueError, OverflowError):
1720 pass
1783 pass
1721 else:
1784 else:
1722 break
1785 break
1723 else:
1786 else:
1724 raise Abort(_('invalid date: %r') % date)
1787 raise Abort(_('invalid date: %r') % date)
1725 # validate explicit (probably user-specified) date and
1788 # validate explicit (probably user-specified) date and
1726 # time zone offset. values must fit in signed 32 bits for
1789 # time zone offset. values must fit in signed 32 bits for
1727 # current 32-bit linux runtimes. timezones go from UTC-12
1790 # current 32-bit linux runtimes. timezones go from UTC-12
1728 # to UTC+14
1791 # to UTC+14
1729 if when < -0x80000000 or when > 0x7fffffff:
1792 if when < -0x80000000 or when > 0x7fffffff:
1730 raise Abort(_('date exceeds 32 bits: %d') % when)
1793 raise Abort(_('date exceeds 32 bits: %d') % when)
1731 if offset < -50400 or offset > 43200:
1794 if offset < -50400 or offset > 43200:
1732 raise Abort(_('impossible time zone offset: %d') % offset)
1795 raise Abort(_('impossible time zone offset: %d') % offset)
1733 return when, offset
1796 return when, offset
1734
1797
1735 def matchdate(date):
1798 def matchdate(date):
1736 """Return a function that matches a given date match specifier
1799 """Return a function that matches a given date match specifier
1737
1800
1738 Formats include:
1801 Formats include:
1739
1802
1740 '{date}' match a given date to the accuracy provided
1803 '{date}' match a given date to the accuracy provided
1741
1804
1742 '<{date}' on or before a given date
1805 '<{date}' on or before a given date
1743
1806
1744 '>{date}' on or after a given date
1807 '>{date}' on or after a given date
1745
1808
1746 >>> p1 = parsedate("10:29:59")
1809 >>> p1 = parsedate("10:29:59")
1747 >>> p2 = parsedate("10:30:00")
1810 >>> p2 = parsedate("10:30:00")
1748 >>> p3 = parsedate("10:30:59")
1811 >>> p3 = parsedate("10:30:59")
1749 >>> p4 = parsedate("10:31:00")
1812 >>> p4 = parsedate("10:31:00")
1750 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1813 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1751 >>> f = matchdate("10:30")
1814 >>> f = matchdate("10:30")
1752 >>> f(p1[0])
1815 >>> f(p1[0])
1753 False
1816 False
1754 >>> f(p2[0])
1817 >>> f(p2[0])
1755 True
1818 True
1756 >>> f(p3[0])
1819 >>> f(p3[0])
1757 True
1820 True
1758 >>> f(p4[0])
1821 >>> f(p4[0])
1759 False
1822 False
1760 >>> f(p5[0])
1823 >>> f(p5[0])
1761 False
1824 False
1762 """
1825 """
1763
1826
1764 def lower(date):
1827 def lower(date):
1765 d = {'mb': "1", 'd': "1"}
1828 d = {'mb': "1", 'd': "1"}
1766 return parsedate(date, extendeddateformats, d)[0]
1829 return parsedate(date, extendeddateformats, d)[0]
1767
1830
1768 def upper(date):
1831 def upper(date):
1769 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1832 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1770 for days in ("31", "30", "29"):
1833 for days in ("31", "30", "29"):
1771 try:
1834 try:
1772 d["d"] = days
1835 d["d"] = days
1773 return parsedate(date, extendeddateformats, d)[0]
1836 return parsedate(date, extendeddateformats, d)[0]
1774 except Abort:
1837 except Abort:
1775 pass
1838 pass
1776 d["d"] = "28"
1839 d["d"] = "28"
1777 return parsedate(date, extendeddateformats, d)[0]
1840 return parsedate(date, extendeddateformats, d)[0]
1778
1841
1779 date = date.strip()
1842 date = date.strip()
1780
1843
1781 if not date:
1844 if not date:
1782 raise Abort(_("dates cannot consist entirely of whitespace"))
1845 raise Abort(_("dates cannot consist entirely of whitespace"))
1783 elif date[0] == "<":
1846 elif date[0] == "<":
1784 if not date[1:]:
1847 if not date[1:]:
1785 raise Abort(_("invalid day spec, use '<DATE'"))
1848 raise Abort(_("invalid day spec, use '<DATE'"))
1786 when = upper(date[1:])
1849 when = upper(date[1:])
1787 return lambda x: x <= when
1850 return lambda x: x <= when
1788 elif date[0] == ">":
1851 elif date[0] == ">":
1789 if not date[1:]:
1852 if not date[1:]:
1790 raise Abort(_("invalid day spec, use '>DATE'"))
1853 raise Abort(_("invalid day spec, use '>DATE'"))
1791 when = lower(date[1:])
1854 when = lower(date[1:])
1792 return lambda x: x >= when
1855 return lambda x: x >= when
1793 elif date[0] == "-":
1856 elif date[0] == "-":
1794 try:
1857 try:
1795 days = int(date[1:])
1858 days = int(date[1:])
1796 except ValueError:
1859 except ValueError:
1797 raise Abort(_("invalid day spec: %s") % date[1:])
1860 raise Abort(_("invalid day spec: %s") % date[1:])
1798 if days < 0:
1861 if days < 0:
1799 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1862 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1800 % date[1:])
1863 % date[1:])
1801 when = makedate()[0] - days * 3600 * 24
1864 when = makedate()[0] - days * 3600 * 24
1802 return lambda x: x >= when
1865 return lambda x: x >= when
1803 elif " to " in date:
1866 elif " to " in date:
1804 a, b = date.split(" to ")
1867 a, b = date.split(" to ")
1805 start, stop = lower(a), upper(b)
1868 start, stop = lower(a), upper(b)
1806 return lambda x: x >= start and x <= stop
1869 return lambda x: x >= start and x <= stop
1807 else:
1870 else:
1808 start, stop = lower(date), upper(date)
1871 start, stop = lower(date), upper(date)
1809 return lambda x: x >= start and x <= stop
1872 return lambda x: x >= start and x <= stop
1810
1873
1811 def stringmatcher(pattern):
1874 def stringmatcher(pattern):
1812 """
1875 """
1813 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1876 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1814 returns the matcher name, pattern, and matcher function.
1877 returns the matcher name, pattern, and matcher function.
1815 missing or unknown prefixes are treated as literal matches.
1878 missing or unknown prefixes are treated as literal matches.
1816
1879
1817 helper for tests:
1880 helper for tests:
1818 >>> def test(pattern, *tests):
1881 >>> def test(pattern, *tests):
1819 ... kind, pattern, matcher = stringmatcher(pattern)
1882 ... kind, pattern, matcher = stringmatcher(pattern)
1820 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1883 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1821
1884
1822 exact matching (no prefix):
1885 exact matching (no prefix):
1823 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1886 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1824 ('literal', 'abcdefg', [False, False, True])
1887 ('literal', 'abcdefg', [False, False, True])
1825
1888
1826 regex matching ('re:' prefix)
1889 regex matching ('re:' prefix)
1827 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1890 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1828 ('re', 'a.+b', [False, False, True])
1891 ('re', 'a.+b', [False, False, True])
1829
1892
1830 force exact matches ('literal:' prefix)
1893 force exact matches ('literal:' prefix)
1831 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1894 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1832 ('literal', 're:foobar', [False, True])
1895 ('literal', 're:foobar', [False, True])
1833
1896
1834 unknown prefixes are ignored and treated as literals
1897 unknown prefixes are ignored and treated as literals
1835 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1898 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1836 ('literal', 'foo:bar', [False, False, True])
1899 ('literal', 'foo:bar', [False, False, True])
1837 """
1900 """
1838 if pattern.startswith('re:'):
1901 if pattern.startswith('re:'):
1839 pattern = pattern[3:]
1902 pattern = pattern[3:]
1840 try:
1903 try:
1841 regex = remod.compile(pattern)
1904 regex = remod.compile(pattern)
1842 except remod.error as e:
1905 except remod.error as e:
1843 raise error.ParseError(_('invalid regular expression: %s')
1906 raise error.ParseError(_('invalid regular expression: %s')
1844 % e)
1907 % e)
1845 return 're', pattern, regex.search
1908 return 're', pattern, regex.search
1846 elif pattern.startswith('literal:'):
1909 elif pattern.startswith('literal:'):
1847 pattern = pattern[8:]
1910 pattern = pattern[8:]
1848 return 'literal', pattern, pattern.__eq__
1911 return 'literal', pattern, pattern.__eq__
1849
1912
1850 def shortuser(user):
1913 def shortuser(user):
1851 """Return a short representation of a user name or email address."""
1914 """Return a short representation of a user name or email address."""
1852 f = user.find('@')
1915 f = user.find('@')
1853 if f >= 0:
1916 if f >= 0:
1854 user = user[:f]
1917 user = user[:f]
1855 f = user.find('<')
1918 f = user.find('<')
1856 if f >= 0:
1919 if f >= 0:
1857 user = user[f + 1:]
1920 user = user[f + 1:]
1858 f = user.find(' ')
1921 f = user.find(' ')
1859 if f >= 0:
1922 if f >= 0:
1860 user = user[:f]
1923 user = user[:f]
1861 f = user.find('.')
1924 f = user.find('.')
1862 if f >= 0:
1925 if f >= 0:
1863 user = user[:f]
1926 user = user[:f]
1864 return user
1927 return user
1865
1928
1866 def emailuser(user):
1929 def emailuser(user):
1867 """Return the user portion of an email address."""
1930 """Return the user portion of an email address."""
1868 f = user.find('@')
1931 f = user.find('@')
1869 if f >= 0:
1932 if f >= 0:
1870 user = user[:f]
1933 user = user[:f]
1871 f = user.find('<')
1934 f = user.find('<')
1872 if f >= 0:
1935 if f >= 0:
1873 user = user[f + 1:]
1936 user = user[f + 1:]
1874 return user
1937 return user
1875
1938
1876 def email(author):
1939 def email(author):
1877 '''get email of author.'''
1940 '''get email of author.'''
1878 r = author.find('>')
1941 r = author.find('>')
1879 if r == -1:
1942 if r == -1:
1880 r = None
1943 r = None
1881 return author[author.find('<') + 1:r]
1944 return author[author.find('<') + 1:r]
1882
1945
1883 def ellipsis(text, maxlength=400):
1946 def ellipsis(text, maxlength=400):
1884 """Trim string to at most maxlength (default: 400) columns in display."""
1947 """Trim string to at most maxlength (default: 400) columns in display."""
1885 return encoding.trim(text, maxlength, ellipsis='...')
1948 return encoding.trim(text, maxlength, ellipsis='...')
1886
1949
1887 def unitcountfn(*unittable):
1950 def unitcountfn(*unittable):
1888 '''return a function that renders a readable count of some quantity'''
1951 '''return a function that renders a readable count of some quantity'''
1889
1952
1890 def go(count):
1953 def go(count):
1891 for multiplier, divisor, format in unittable:
1954 for multiplier, divisor, format in unittable:
1892 if count >= divisor * multiplier:
1955 if count >= divisor * multiplier:
1893 return format % (count / float(divisor))
1956 return format % (count / float(divisor))
1894 return unittable[-1][2] % count
1957 return unittable[-1][2] % count
1895
1958
1896 return go
1959 return go
1897
1960
1898 bytecount = unitcountfn(
1961 bytecount = unitcountfn(
1899 (100, 1 << 30, _('%.0f GB')),
1962 (100, 1 << 30, _('%.0f GB')),
1900 (10, 1 << 30, _('%.1f GB')),
1963 (10, 1 << 30, _('%.1f GB')),
1901 (1, 1 << 30, _('%.2f GB')),
1964 (1, 1 << 30, _('%.2f GB')),
1902 (100, 1 << 20, _('%.0f MB')),
1965 (100, 1 << 20, _('%.0f MB')),
1903 (10, 1 << 20, _('%.1f MB')),
1966 (10, 1 << 20, _('%.1f MB')),
1904 (1, 1 << 20, _('%.2f MB')),
1967 (1, 1 << 20, _('%.2f MB')),
1905 (100, 1 << 10, _('%.0f KB')),
1968 (100, 1 << 10, _('%.0f KB')),
1906 (10, 1 << 10, _('%.1f KB')),
1969 (10, 1 << 10, _('%.1f KB')),
1907 (1, 1 << 10, _('%.2f KB')),
1970 (1, 1 << 10, _('%.2f KB')),
1908 (1, 1, _('%.0f bytes')),
1971 (1, 1, _('%.0f bytes')),
1909 )
1972 )
1910
1973
1911 def uirepr(s):
1974 def uirepr(s):
1912 # Avoid double backslash in Windows path repr()
1975 # Avoid double backslash in Windows path repr()
1913 return repr(s).replace('\\\\', '\\')
1976 return repr(s).replace('\\\\', '\\')
1914
1977
1915 # delay import of textwrap
1978 # delay import of textwrap
1916 def MBTextWrapper(**kwargs):
1979 def MBTextWrapper(**kwargs):
1917 class tw(textwrap.TextWrapper):
1980 class tw(textwrap.TextWrapper):
1918 """
1981 """
1919 Extend TextWrapper for width-awareness.
1982 Extend TextWrapper for width-awareness.
1920
1983
1921 Neither number of 'bytes' in any encoding nor 'characters' is
1984 Neither number of 'bytes' in any encoding nor 'characters' is
1922 appropriate to calculate terminal columns for specified string.
1985 appropriate to calculate terminal columns for specified string.
1923
1986
1924 Original TextWrapper implementation uses built-in 'len()' directly,
1987 Original TextWrapper implementation uses built-in 'len()' directly,
1925 so overriding is needed to use width information of each characters.
1988 so overriding is needed to use width information of each characters.
1926
1989
1927 In addition, characters classified into 'ambiguous' width are
1990 In addition, characters classified into 'ambiguous' width are
1928 treated as wide in East Asian area, but as narrow in other.
1991 treated as wide in East Asian area, but as narrow in other.
1929
1992
1930 This requires use decision to determine width of such characters.
1993 This requires use decision to determine width of such characters.
1931 """
1994 """
1932 def _cutdown(self, ucstr, space_left):
1995 def _cutdown(self, ucstr, space_left):
1933 l = 0
1996 l = 0
1934 colwidth = encoding.ucolwidth
1997 colwidth = encoding.ucolwidth
1935 for i in xrange(len(ucstr)):
1998 for i in xrange(len(ucstr)):
1936 l += colwidth(ucstr[i])
1999 l += colwidth(ucstr[i])
1937 if space_left < l:
2000 if space_left < l:
1938 return (ucstr[:i], ucstr[i:])
2001 return (ucstr[:i], ucstr[i:])
1939 return ucstr, ''
2002 return ucstr, ''
1940
2003
1941 # overriding of base class
2004 # overriding of base class
1942 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2005 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1943 space_left = max(width - cur_len, 1)
2006 space_left = max(width - cur_len, 1)
1944
2007
1945 if self.break_long_words:
2008 if self.break_long_words:
1946 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2009 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1947 cur_line.append(cut)
2010 cur_line.append(cut)
1948 reversed_chunks[-1] = res
2011 reversed_chunks[-1] = res
1949 elif not cur_line:
2012 elif not cur_line:
1950 cur_line.append(reversed_chunks.pop())
2013 cur_line.append(reversed_chunks.pop())
1951
2014
1952 # this overriding code is imported from TextWrapper of Python 2.6
2015 # this overriding code is imported from TextWrapper of Python 2.6
1953 # to calculate columns of string by 'encoding.ucolwidth()'
2016 # to calculate columns of string by 'encoding.ucolwidth()'
1954 def _wrap_chunks(self, chunks):
2017 def _wrap_chunks(self, chunks):
1955 colwidth = encoding.ucolwidth
2018 colwidth = encoding.ucolwidth
1956
2019
1957 lines = []
2020 lines = []
1958 if self.width <= 0:
2021 if self.width <= 0:
1959 raise ValueError("invalid width %r (must be > 0)" % self.width)
2022 raise ValueError("invalid width %r (must be > 0)" % self.width)
1960
2023
1961 # Arrange in reverse order so items can be efficiently popped
2024 # Arrange in reverse order so items can be efficiently popped
1962 # from a stack of chucks.
2025 # from a stack of chucks.
1963 chunks.reverse()
2026 chunks.reverse()
1964
2027
1965 while chunks:
2028 while chunks:
1966
2029
1967 # Start the list of chunks that will make up the current line.
2030 # Start the list of chunks that will make up the current line.
1968 # cur_len is just the length of all the chunks in cur_line.
2031 # cur_len is just the length of all the chunks in cur_line.
1969 cur_line = []
2032 cur_line = []
1970 cur_len = 0
2033 cur_len = 0
1971
2034
1972 # Figure out which static string will prefix this line.
2035 # Figure out which static string will prefix this line.
1973 if lines:
2036 if lines:
1974 indent = self.subsequent_indent
2037 indent = self.subsequent_indent
1975 else:
2038 else:
1976 indent = self.initial_indent
2039 indent = self.initial_indent
1977
2040
1978 # Maximum width for this line.
2041 # Maximum width for this line.
1979 width = self.width - len(indent)
2042 width = self.width - len(indent)
1980
2043
1981 # First chunk on line is whitespace -- drop it, unless this
2044 # First chunk on line is whitespace -- drop it, unless this
1982 # is the very beginning of the text (i.e. no lines started yet).
2045 # is the very beginning of the text (i.e. no lines started yet).
1983 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2046 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1984 del chunks[-1]
2047 del chunks[-1]
1985
2048
1986 while chunks:
2049 while chunks:
1987 l = colwidth(chunks[-1])
2050 l = colwidth(chunks[-1])
1988
2051
1989 # Can at least squeeze this chunk onto the current line.
2052 # Can at least squeeze this chunk onto the current line.
1990 if cur_len + l <= width:
2053 if cur_len + l <= width:
1991 cur_line.append(chunks.pop())
2054 cur_line.append(chunks.pop())
1992 cur_len += l
2055 cur_len += l
1993
2056
1994 # Nope, this line is full.
2057 # Nope, this line is full.
1995 else:
2058 else:
1996 break
2059 break
1997
2060
1998 # The current line is full, and the next chunk is too big to
2061 # The current line is full, and the next chunk is too big to
1999 # fit on *any* line (not just this one).
2062 # fit on *any* line (not just this one).
2000 if chunks and colwidth(chunks[-1]) > width:
2063 if chunks and colwidth(chunks[-1]) > width:
2001 self._handle_long_word(chunks, cur_line, cur_len, width)
2064 self._handle_long_word(chunks, cur_line, cur_len, width)
2002
2065
2003 # If the last chunk on this line is all whitespace, drop it.
2066 # If the last chunk on this line is all whitespace, drop it.
2004 if (self.drop_whitespace and
2067 if (self.drop_whitespace and
2005 cur_line and cur_line[-1].strip() == ''):
2068 cur_line and cur_line[-1].strip() == ''):
2006 del cur_line[-1]
2069 del cur_line[-1]
2007
2070
2008 # Convert current line back to a string and store it in list
2071 # Convert current line back to a string and store it in list
2009 # of all lines (return value).
2072 # of all lines (return value).
2010 if cur_line:
2073 if cur_line:
2011 lines.append(indent + ''.join(cur_line))
2074 lines.append(indent + ''.join(cur_line))
2012
2075
2013 return lines
2076 return lines
2014
2077
2015 global MBTextWrapper
2078 global MBTextWrapper
2016 MBTextWrapper = tw
2079 MBTextWrapper = tw
2017 return tw(**kwargs)
2080 return tw(**kwargs)
2018
2081
2019 def wrap(line, width, initindent='', hangindent=''):
2082 def wrap(line, width, initindent='', hangindent=''):
2020 maxindent = max(len(hangindent), len(initindent))
2083 maxindent = max(len(hangindent), len(initindent))
2021 if width <= maxindent:
2084 if width <= maxindent:
2022 # adjust for weird terminal size
2085 # adjust for weird terminal size
2023 width = max(78, maxindent + 1)
2086 width = max(78, maxindent + 1)
2024 line = line.decode(encoding.encoding, encoding.encodingmode)
2087 line = line.decode(encoding.encoding, encoding.encodingmode)
2025 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2088 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2026 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2089 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2027 wrapper = MBTextWrapper(width=width,
2090 wrapper = MBTextWrapper(width=width,
2028 initial_indent=initindent,
2091 initial_indent=initindent,
2029 subsequent_indent=hangindent)
2092 subsequent_indent=hangindent)
2030 return wrapper.fill(line).encode(encoding.encoding)
2093 return wrapper.fill(line).encode(encoding.encoding)
2031
2094
2032 def iterlines(iterator):
2095 def iterlines(iterator):
2033 for chunk in iterator:
2096 for chunk in iterator:
2034 for line in chunk.splitlines():
2097 for line in chunk.splitlines():
2035 yield line
2098 yield line
2036
2099
2037 def expandpath(path):
2100 def expandpath(path):
2038 return os.path.expanduser(os.path.expandvars(path))
2101 return os.path.expanduser(os.path.expandvars(path))
2039
2102
2040 def hgcmd():
2103 def hgcmd():
2041 """Return the command used to execute current hg
2104 """Return the command used to execute current hg
2042
2105
2043 This is different from hgexecutable() because on Windows we want
2106 This is different from hgexecutable() because on Windows we want
2044 to avoid things opening new shell windows like batch files, so we
2107 to avoid things opening new shell windows like batch files, so we
2045 get either the python call or current executable.
2108 get either the python call or current executable.
2046 """
2109 """
2047 if mainfrozen():
2110 if mainfrozen():
2048 if getattr(sys, 'frozen', None) == 'macosx_app':
2111 if getattr(sys, 'frozen', None) == 'macosx_app':
2049 # Env variable set by py2app
2112 # Env variable set by py2app
2050 return [os.environ['EXECUTABLEPATH']]
2113 return [os.environ['EXECUTABLEPATH']]
2051 else:
2114 else:
2052 return [sys.executable]
2115 return [sys.executable]
2053 return gethgcmd()
2116 return gethgcmd()
2054
2117
2055 def rundetached(args, condfn):
2118 def rundetached(args, condfn):
2056 """Execute the argument list in a detached process.
2119 """Execute the argument list in a detached process.
2057
2120
2058 condfn is a callable which is called repeatedly and should return
2121 condfn is a callable which is called repeatedly and should return
2059 True once the child process is known to have started successfully.
2122 True once the child process is known to have started successfully.
2060 At this point, the child process PID is returned. If the child
2123 At this point, the child process PID is returned. If the child
2061 process fails to start or finishes before condfn() evaluates to
2124 process fails to start or finishes before condfn() evaluates to
2062 True, return -1.
2125 True, return -1.
2063 """
2126 """
2064 # Windows case is easier because the child process is either
2127 # Windows case is easier because the child process is either
2065 # successfully starting and validating the condition or exiting
2128 # successfully starting and validating the condition or exiting
2066 # on failure. We just poll on its PID. On Unix, if the child
2129 # on failure. We just poll on its PID. On Unix, if the child
2067 # process fails to start, it will be left in a zombie state until
2130 # process fails to start, it will be left in a zombie state until
2068 # the parent wait on it, which we cannot do since we expect a long
2131 # the parent wait on it, which we cannot do since we expect a long
2069 # running process on success. Instead we listen for SIGCHLD telling
2132 # running process on success. Instead we listen for SIGCHLD telling
2070 # us our child process terminated.
2133 # us our child process terminated.
2071 terminated = set()
2134 terminated = set()
2072 def handler(signum, frame):
2135 def handler(signum, frame):
2073 terminated.add(os.wait())
2136 terminated.add(os.wait())
2074 prevhandler = None
2137 prevhandler = None
2075 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2138 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2076 if SIGCHLD is not None:
2139 if SIGCHLD is not None:
2077 prevhandler = signal.signal(SIGCHLD, handler)
2140 prevhandler = signal.signal(SIGCHLD, handler)
2078 try:
2141 try:
2079 pid = spawndetached(args)
2142 pid = spawndetached(args)
2080 while not condfn():
2143 while not condfn():
2081 if ((pid in terminated or not testpid(pid))
2144 if ((pid in terminated or not testpid(pid))
2082 and not condfn()):
2145 and not condfn()):
2083 return -1
2146 return -1
2084 time.sleep(0.1)
2147 time.sleep(0.1)
2085 return pid
2148 return pid
2086 finally:
2149 finally:
2087 if prevhandler is not None:
2150 if prevhandler is not None:
2088 signal.signal(signal.SIGCHLD, prevhandler)
2151 signal.signal(signal.SIGCHLD, prevhandler)
2089
2152
2090 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2153 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2091 """Return the result of interpolating items in the mapping into string s.
2154 """Return the result of interpolating items in the mapping into string s.
2092
2155
2093 prefix is a single character string, or a two character string with
2156 prefix is a single character string, or a two character string with
2094 a backslash as the first character if the prefix needs to be escaped in
2157 a backslash as the first character if the prefix needs to be escaped in
2095 a regular expression.
2158 a regular expression.
2096
2159
2097 fn is an optional function that will be applied to the replacement text
2160 fn is an optional function that will be applied to the replacement text
2098 just before replacement.
2161 just before replacement.
2099
2162
2100 escape_prefix is an optional flag that allows using doubled prefix for
2163 escape_prefix is an optional flag that allows using doubled prefix for
2101 its escaping.
2164 its escaping.
2102 """
2165 """
2103 fn = fn or (lambda s: s)
2166 fn = fn or (lambda s: s)
2104 patterns = '|'.join(mapping.keys())
2167 patterns = '|'.join(mapping.keys())
2105 if escape_prefix:
2168 if escape_prefix:
2106 patterns += '|' + prefix
2169 patterns += '|' + prefix
2107 if len(prefix) > 1:
2170 if len(prefix) > 1:
2108 prefix_char = prefix[1:]
2171 prefix_char = prefix[1:]
2109 else:
2172 else:
2110 prefix_char = prefix
2173 prefix_char = prefix
2111 mapping[prefix_char] = prefix_char
2174 mapping[prefix_char] = prefix_char
2112 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2175 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2113 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2176 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2114
2177
2115 def getport(port):
2178 def getport(port):
2116 """Return the port for a given network service.
2179 """Return the port for a given network service.
2117
2180
2118 If port is an integer, it's returned as is. If it's a string, it's
2181 If port is an integer, it's returned as is. If it's a string, it's
2119 looked up using socket.getservbyname(). If there's no matching
2182 looked up using socket.getservbyname(). If there's no matching
2120 service, error.Abort is raised.
2183 service, error.Abort is raised.
2121 """
2184 """
2122 try:
2185 try:
2123 return int(port)
2186 return int(port)
2124 except ValueError:
2187 except ValueError:
2125 pass
2188 pass
2126
2189
2127 try:
2190 try:
2128 return socket.getservbyname(port)
2191 return socket.getservbyname(port)
2129 except socket.error:
2192 except socket.error:
2130 raise Abort(_("no port number associated with service '%s'") % port)
2193 raise Abort(_("no port number associated with service '%s'") % port)
2131
2194
2132 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2195 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2133 '0': False, 'no': False, 'false': False, 'off': False,
2196 '0': False, 'no': False, 'false': False, 'off': False,
2134 'never': False}
2197 'never': False}
2135
2198
2136 def parsebool(s):
2199 def parsebool(s):
2137 """Parse s into a boolean.
2200 """Parse s into a boolean.
2138
2201
2139 If s is not a valid boolean, returns None.
2202 If s is not a valid boolean, returns None.
2140 """
2203 """
2141 return _booleans.get(s.lower(), None)
2204 return _booleans.get(s.lower(), None)
2142
2205
2143 _hexdig = '0123456789ABCDEFabcdef'
2206 _hexdig = '0123456789ABCDEFabcdef'
2144 _hextochr = dict((a + b, chr(int(a + b, 16)))
2207 _hextochr = dict((a + b, chr(int(a + b, 16)))
2145 for a in _hexdig for b in _hexdig)
2208 for a in _hexdig for b in _hexdig)
2146
2209
2147 def _urlunquote(s):
2210 def _urlunquote(s):
2148 """Decode HTTP/HTML % encoding.
2211 """Decode HTTP/HTML % encoding.
2149
2212
2150 >>> _urlunquote('abc%20def')
2213 >>> _urlunquote('abc%20def')
2151 'abc def'
2214 'abc def'
2152 """
2215 """
2153 res = s.split('%')
2216 res = s.split('%')
2154 # fastpath
2217 # fastpath
2155 if len(res) == 1:
2218 if len(res) == 1:
2156 return s
2219 return s
2157 s = res[0]
2220 s = res[0]
2158 for item in res[1:]:
2221 for item in res[1:]:
2159 try:
2222 try:
2160 s += _hextochr[item[:2]] + item[2:]
2223 s += _hextochr[item[:2]] + item[2:]
2161 except KeyError:
2224 except KeyError:
2162 s += '%' + item
2225 s += '%' + item
2163 except UnicodeDecodeError:
2226 except UnicodeDecodeError:
2164 s += unichr(int(item[:2], 16)) + item[2:]
2227 s += unichr(int(item[:2], 16)) + item[2:]
2165 return s
2228 return s
2166
2229
2167 class url(object):
2230 class url(object):
2168 r"""Reliable URL parser.
2231 r"""Reliable URL parser.
2169
2232
2170 This parses URLs and provides attributes for the following
2233 This parses URLs and provides attributes for the following
2171 components:
2234 components:
2172
2235
2173 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2236 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2174
2237
2175 Missing components are set to None. The only exception is
2238 Missing components are set to None. The only exception is
2176 fragment, which is set to '' if present but empty.
2239 fragment, which is set to '' if present but empty.
2177
2240
2178 If parsefragment is False, fragment is included in query. If
2241 If parsefragment is False, fragment is included in query. If
2179 parsequery is False, query is included in path. If both are
2242 parsequery is False, query is included in path. If both are
2180 False, both fragment and query are included in path.
2243 False, both fragment and query are included in path.
2181
2244
2182 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2245 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2183
2246
2184 Note that for backward compatibility reasons, bundle URLs do not
2247 Note that for backward compatibility reasons, bundle URLs do not
2185 take host names. That means 'bundle://../' has a path of '../'.
2248 take host names. That means 'bundle://../' has a path of '../'.
2186
2249
2187 Examples:
2250 Examples:
2188
2251
2189 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2252 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2190 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2253 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2191 >>> url('ssh://[::1]:2200//home/joe/repo')
2254 >>> url('ssh://[::1]:2200//home/joe/repo')
2192 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2255 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2193 >>> url('file:///home/joe/repo')
2256 >>> url('file:///home/joe/repo')
2194 <url scheme: 'file', path: '/home/joe/repo'>
2257 <url scheme: 'file', path: '/home/joe/repo'>
2195 >>> url('file:///c:/temp/foo/')
2258 >>> url('file:///c:/temp/foo/')
2196 <url scheme: 'file', path: 'c:/temp/foo/'>
2259 <url scheme: 'file', path: 'c:/temp/foo/'>
2197 >>> url('bundle:foo')
2260 >>> url('bundle:foo')
2198 <url scheme: 'bundle', path: 'foo'>
2261 <url scheme: 'bundle', path: 'foo'>
2199 >>> url('bundle://../foo')
2262 >>> url('bundle://../foo')
2200 <url scheme: 'bundle', path: '../foo'>
2263 <url scheme: 'bundle', path: '../foo'>
2201 >>> url(r'c:\foo\bar')
2264 >>> url(r'c:\foo\bar')
2202 <url path: 'c:\\foo\\bar'>
2265 <url path: 'c:\\foo\\bar'>
2203 >>> url(r'\\blah\blah\blah')
2266 >>> url(r'\\blah\blah\blah')
2204 <url path: '\\\\blah\\blah\\blah'>
2267 <url path: '\\\\blah\\blah\\blah'>
2205 >>> url(r'\\blah\blah\blah#baz')
2268 >>> url(r'\\blah\blah\blah#baz')
2206 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2269 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2207 >>> url(r'file:///C:\users\me')
2270 >>> url(r'file:///C:\users\me')
2208 <url scheme: 'file', path: 'C:\\users\\me'>
2271 <url scheme: 'file', path: 'C:\\users\\me'>
2209
2272
2210 Authentication credentials:
2273 Authentication credentials:
2211
2274
2212 >>> url('ssh://joe:xyz@x/repo')
2275 >>> url('ssh://joe:xyz@x/repo')
2213 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2276 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2214 >>> url('ssh://joe@x/repo')
2277 >>> url('ssh://joe@x/repo')
2215 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2278 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2216
2279
2217 Query strings and fragments:
2280 Query strings and fragments:
2218
2281
2219 >>> url('http://host/a?b#c')
2282 >>> url('http://host/a?b#c')
2220 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2283 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2221 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2284 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2222 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2285 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2223 """
2286 """
2224
2287
2225 _safechars = "!~*'()+"
2288 _safechars = "!~*'()+"
2226 _safepchars = "/!~*'()+:\\"
2289 _safepchars = "/!~*'()+:\\"
2227 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2290 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2228
2291
2229 def __init__(self, path, parsequery=True, parsefragment=True):
2292 def __init__(self, path, parsequery=True, parsefragment=True):
2230 # We slowly chomp away at path until we have only the path left
2293 # We slowly chomp away at path until we have only the path left
2231 self.scheme = self.user = self.passwd = self.host = None
2294 self.scheme = self.user = self.passwd = self.host = None
2232 self.port = self.path = self.query = self.fragment = None
2295 self.port = self.path = self.query = self.fragment = None
2233 self._localpath = True
2296 self._localpath = True
2234 self._hostport = ''
2297 self._hostport = ''
2235 self._origpath = path
2298 self._origpath = path
2236
2299
2237 if parsefragment and '#' in path:
2300 if parsefragment and '#' in path:
2238 path, self.fragment = path.split('#', 1)
2301 path, self.fragment = path.split('#', 1)
2239 if not path:
2302 if not path:
2240 path = None
2303 path = None
2241
2304
2242 # special case for Windows drive letters and UNC paths
2305 # special case for Windows drive letters and UNC paths
2243 if hasdriveletter(path) or path.startswith(r'\\'):
2306 if hasdriveletter(path) or path.startswith(r'\\'):
2244 self.path = path
2307 self.path = path
2245 return
2308 return
2246
2309
2247 # For compatibility reasons, we can't handle bundle paths as
2310 # For compatibility reasons, we can't handle bundle paths as
2248 # normal URLS
2311 # normal URLS
2249 if path.startswith('bundle:'):
2312 if path.startswith('bundle:'):
2250 self.scheme = 'bundle'
2313 self.scheme = 'bundle'
2251 path = path[7:]
2314 path = path[7:]
2252 if path.startswith('//'):
2315 if path.startswith('//'):
2253 path = path[2:]
2316 path = path[2:]
2254 self.path = path
2317 self.path = path
2255 return
2318 return
2256
2319
2257 if self._matchscheme(path):
2320 if self._matchscheme(path):
2258 parts = path.split(':', 1)
2321 parts = path.split(':', 1)
2259 if parts[0]:
2322 if parts[0]:
2260 self.scheme, path = parts
2323 self.scheme, path = parts
2261 self._localpath = False
2324 self._localpath = False
2262
2325
2263 if not path:
2326 if not path:
2264 path = None
2327 path = None
2265 if self._localpath:
2328 if self._localpath:
2266 self.path = ''
2329 self.path = ''
2267 return
2330 return
2268 else:
2331 else:
2269 if self._localpath:
2332 if self._localpath:
2270 self.path = path
2333 self.path = path
2271 return
2334 return
2272
2335
2273 if parsequery and '?' in path:
2336 if parsequery and '?' in path:
2274 path, self.query = path.split('?', 1)
2337 path, self.query = path.split('?', 1)
2275 if not path:
2338 if not path:
2276 path = None
2339 path = None
2277 if not self.query:
2340 if not self.query:
2278 self.query = None
2341 self.query = None
2279
2342
2280 # // is required to specify a host/authority
2343 # // is required to specify a host/authority
2281 if path and path.startswith('//'):
2344 if path and path.startswith('//'):
2282 parts = path[2:].split('/', 1)
2345 parts = path[2:].split('/', 1)
2283 if len(parts) > 1:
2346 if len(parts) > 1:
2284 self.host, path = parts
2347 self.host, path = parts
2285 else:
2348 else:
2286 self.host = parts[0]
2349 self.host = parts[0]
2287 path = None
2350 path = None
2288 if not self.host:
2351 if not self.host:
2289 self.host = None
2352 self.host = None
2290 # path of file:///d is /d
2353 # path of file:///d is /d
2291 # path of file:///d:/ is d:/, not /d:/
2354 # path of file:///d:/ is d:/, not /d:/
2292 if path and not hasdriveletter(path):
2355 if path and not hasdriveletter(path):
2293 path = '/' + path
2356 path = '/' + path
2294
2357
2295 if self.host and '@' in self.host:
2358 if self.host and '@' in self.host:
2296 self.user, self.host = self.host.rsplit('@', 1)
2359 self.user, self.host = self.host.rsplit('@', 1)
2297 if ':' in self.user:
2360 if ':' in self.user:
2298 self.user, self.passwd = self.user.split(':', 1)
2361 self.user, self.passwd = self.user.split(':', 1)
2299 if not self.host:
2362 if not self.host:
2300 self.host = None
2363 self.host = None
2301
2364
2302 # Don't split on colons in IPv6 addresses without ports
2365 # Don't split on colons in IPv6 addresses without ports
2303 if (self.host and ':' in self.host and
2366 if (self.host and ':' in self.host and
2304 not (self.host.startswith('[') and self.host.endswith(']'))):
2367 not (self.host.startswith('[') and self.host.endswith(']'))):
2305 self._hostport = self.host
2368 self._hostport = self.host
2306 self.host, self.port = self.host.rsplit(':', 1)
2369 self.host, self.port = self.host.rsplit(':', 1)
2307 if not self.host:
2370 if not self.host:
2308 self.host = None
2371 self.host = None
2309
2372
2310 if (self.host and self.scheme == 'file' and
2373 if (self.host and self.scheme == 'file' and
2311 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2374 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2312 raise Abort(_('file:// URLs can only refer to localhost'))
2375 raise Abort(_('file:// URLs can only refer to localhost'))
2313
2376
2314 self.path = path
2377 self.path = path
2315
2378
2316 # leave the query string escaped
2379 # leave the query string escaped
2317 for a in ('user', 'passwd', 'host', 'port',
2380 for a in ('user', 'passwd', 'host', 'port',
2318 'path', 'fragment'):
2381 'path', 'fragment'):
2319 v = getattr(self, a)
2382 v = getattr(self, a)
2320 if v is not None:
2383 if v is not None:
2321 setattr(self, a, _urlunquote(v))
2384 setattr(self, a, _urlunquote(v))
2322
2385
2323 def __repr__(self):
2386 def __repr__(self):
2324 attrs = []
2387 attrs = []
2325 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2388 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2326 'query', 'fragment'):
2389 'query', 'fragment'):
2327 v = getattr(self, a)
2390 v = getattr(self, a)
2328 if v is not None:
2391 if v is not None:
2329 attrs.append('%s: %r' % (a, v))
2392 attrs.append('%s: %r' % (a, v))
2330 return '<url %s>' % ', '.join(attrs)
2393 return '<url %s>' % ', '.join(attrs)
2331
2394
2332 def __str__(self):
2395 def __str__(self):
2333 r"""Join the URL's components back into a URL string.
2396 r"""Join the URL's components back into a URL string.
2334
2397
2335 Examples:
2398 Examples:
2336
2399
2337 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2400 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2338 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2401 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2339 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2402 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2340 'http://user:pw@host:80/?foo=bar&baz=42'
2403 'http://user:pw@host:80/?foo=bar&baz=42'
2341 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2404 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2342 'http://user:pw@host:80/?foo=bar%3dbaz'
2405 'http://user:pw@host:80/?foo=bar%3dbaz'
2343 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2406 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2344 'ssh://user:pw@[::1]:2200//home/joe#'
2407 'ssh://user:pw@[::1]:2200//home/joe#'
2345 >>> str(url('http://localhost:80//'))
2408 >>> str(url('http://localhost:80//'))
2346 'http://localhost:80//'
2409 'http://localhost:80//'
2347 >>> str(url('http://localhost:80/'))
2410 >>> str(url('http://localhost:80/'))
2348 'http://localhost:80/'
2411 'http://localhost:80/'
2349 >>> str(url('http://localhost:80'))
2412 >>> str(url('http://localhost:80'))
2350 'http://localhost:80/'
2413 'http://localhost:80/'
2351 >>> str(url('bundle:foo'))
2414 >>> str(url('bundle:foo'))
2352 'bundle:foo'
2415 'bundle:foo'
2353 >>> str(url('bundle://../foo'))
2416 >>> str(url('bundle://../foo'))
2354 'bundle:../foo'
2417 'bundle:../foo'
2355 >>> str(url('path'))
2418 >>> str(url('path'))
2356 'path'
2419 'path'
2357 >>> str(url('file:///tmp/foo/bar'))
2420 >>> str(url('file:///tmp/foo/bar'))
2358 'file:///tmp/foo/bar'
2421 'file:///tmp/foo/bar'
2359 >>> str(url('file:///c:/tmp/foo/bar'))
2422 >>> str(url('file:///c:/tmp/foo/bar'))
2360 'file:///c:/tmp/foo/bar'
2423 'file:///c:/tmp/foo/bar'
2361 >>> print url(r'bundle:foo\bar')
2424 >>> print url(r'bundle:foo\bar')
2362 bundle:foo\bar
2425 bundle:foo\bar
2363 >>> print url(r'file:///D:\data\hg')
2426 >>> print url(r'file:///D:\data\hg')
2364 file:///D:\data\hg
2427 file:///D:\data\hg
2365 """
2428 """
2366 if self._localpath:
2429 if self._localpath:
2367 s = self.path
2430 s = self.path
2368 if self.scheme == 'bundle':
2431 if self.scheme == 'bundle':
2369 s = 'bundle:' + s
2432 s = 'bundle:' + s
2370 if self.fragment:
2433 if self.fragment:
2371 s += '#' + self.fragment
2434 s += '#' + self.fragment
2372 return s
2435 return s
2373
2436
2374 s = self.scheme + ':'
2437 s = self.scheme + ':'
2375 if self.user or self.passwd or self.host:
2438 if self.user or self.passwd or self.host:
2376 s += '//'
2439 s += '//'
2377 elif self.scheme and (not self.path or self.path.startswith('/')
2440 elif self.scheme and (not self.path or self.path.startswith('/')
2378 or hasdriveletter(self.path)):
2441 or hasdriveletter(self.path)):
2379 s += '//'
2442 s += '//'
2380 if hasdriveletter(self.path):
2443 if hasdriveletter(self.path):
2381 s += '/'
2444 s += '/'
2382 if self.user:
2445 if self.user:
2383 s += urlreq.quote(self.user, safe=self._safechars)
2446 s += urlreq.quote(self.user, safe=self._safechars)
2384 if self.passwd:
2447 if self.passwd:
2385 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2448 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2386 if self.user or self.passwd:
2449 if self.user or self.passwd:
2387 s += '@'
2450 s += '@'
2388 if self.host:
2451 if self.host:
2389 if not (self.host.startswith('[') and self.host.endswith(']')):
2452 if not (self.host.startswith('[') and self.host.endswith(']')):
2390 s += urlreq.quote(self.host)
2453 s += urlreq.quote(self.host)
2391 else:
2454 else:
2392 s += self.host
2455 s += self.host
2393 if self.port:
2456 if self.port:
2394 s += ':' + urlreq.quote(self.port)
2457 s += ':' + urlreq.quote(self.port)
2395 if self.host:
2458 if self.host:
2396 s += '/'
2459 s += '/'
2397 if self.path:
2460 if self.path:
2398 # TODO: similar to the query string, we should not unescape the
2461 # TODO: similar to the query string, we should not unescape the
2399 # path when we store it, the path might contain '%2f' = '/',
2462 # path when we store it, the path might contain '%2f' = '/',
2400 # which we should *not* escape.
2463 # which we should *not* escape.
2401 s += urlreq.quote(self.path, safe=self._safepchars)
2464 s += urlreq.quote(self.path, safe=self._safepchars)
2402 if self.query:
2465 if self.query:
2403 # we store the query in escaped form.
2466 # we store the query in escaped form.
2404 s += '?' + self.query
2467 s += '?' + self.query
2405 if self.fragment is not None:
2468 if self.fragment is not None:
2406 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2469 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2407 return s
2470 return s
2408
2471
2409 def authinfo(self):
2472 def authinfo(self):
2410 user, passwd = self.user, self.passwd
2473 user, passwd = self.user, self.passwd
2411 try:
2474 try:
2412 self.user, self.passwd = None, None
2475 self.user, self.passwd = None, None
2413 s = str(self)
2476 s = str(self)
2414 finally:
2477 finally:
2415 self.user, self.passwd = user, passwd
2478 self.user, self.passwd = user, passwd
2416 if not self.user:
2479 if not self.user:
2417 return (s, None)
2480 return (s, None)
2418 # authinfo[1] is passed to urllib2 password manager, and its
2481 # authinfo[1] is passed to urllib2 password manager, and its
2419 # URIs must not contain credentials. The host is passed in the
2482 # URIs must not contain credentials. The host is passed in the
2420 # URIs list because Python < 2.4.3 uses only that to search for
2483 # URIs list because Python < 2.4.3 uses only that to search for
2421 # a password.
2484 # a password.
2422 return (s, (None, (s, self.host),
2485 return (s, (None, (s, self.host),
2423 self.user, self.passwd or ''))
2486 self.user, self.passwd or ''))
2424
2487
2425 def isabs(self):
2488 def isabs(self):
2426 if self.scheme and self.scheme != 'file':
2489 if self.scheme and self.scheme != 'file':
2427 return True # remote URL
2490 return True # remote URL
2428 if hasdriveletter(self.path):
2491 if hasdriveletter(self.path):
2429 return True # absolute for our purposes - can't be joined()
2492 return True # absolute for our purposes - can't be joined()
2430 if self.path.startswith(r'\\'):
2493 if self.path.startswith(r'\\'):
2431 return True # Windows UNC path
2494 return True # Windows UNC path
2432 if self.path.startswith('/'):
2495 if self.path.startswith('/'):
2433 return True # POSIX-style
2496 return True # POSIX-style
2434 return False
2497 return False
2435
2498
2436 def localpath(self):
2499 def localpath(self):
2437 if self.scheme == 'file' or self.scheme == 'bundle':
2500 if self.scheme == 'file' or self.scheme == 'bundle':
2438 path = self.path or '/'
2501 path = self.path or '/'
2439 # For Windows, we need to promote hosts containing drive
2502 # For Windows, we need to promote hosts containing drive
2440 # letters to paths with drive letters.
2503 # letters to paths with drive letters.
2441 if hasdriveletter(self._hostport):
2504 if hasdriveletter(self._hostport):
2442 path = self._hostport + '/' + self.path
2505 path = self._hostport + '/' + self.path
2443 elif (self.host is not None and self.path
2506 elif (self.host is not None and self.path
2444 and not hasdriveletter(path)):
2507 and not hasdriveletter(path)):
2445 path = '/' + path
2508 path = '/' + path
2446 return path
2509 return path
2447 return self._origpath
2510 return self._origpath
2448
2511
2449 def islocal(self):
2512 def islocal(self):
2450 '''whether localpath will return something that posixfile can open'''
2513 '''whether localpath will return something that posixfile can open'''
2451 return (not self.scheme or self.scheme == 'file'
2514 return (not self.scheme or self.scheme == 'file'
2452 or self.scheme == 'bundle')
2515 or self.scheme == 'bundle')
2453
2516
2454 def hasscheme(path):
2517 def hasscheme(path):
2455 return bool(url(path).scheme)
2518 return bool(url(path).scheme)
2456
2519
2457 def hasdriveletter(path):
2520 def hasdriveletter(path):
2458 return path and path[1:2] == ':' and path[0:1].isalpha()
2521 return path and path[1:2] == ':' and path[0:1].isalpha()
2459
2522
2460 def urllocalpath(path):
2523 def urllocalpath(path):
2461 return url(path, parsequery=False, parsefragment=False).localpath()
2524 return url(path, parsequery=False, parsefragment=False).localpath()
2462
2525
2463 def hidepassword(u):
2526 def hidepassword(u):
2464 '''hide user credential in a url string'''
2527 '''hide user credential in a url string'''
2465 u = url(u)
2528 u = url(u)
2466 if u.passwd:
2529 if u.passwd:
2467 u.passwd = '***'
2530 u.passwd = '***'
2468 return str(u)
2531 return str(u)
2469
2532
2470 def removeauth(u):
2533 def removeauth(u):
2471 '''remove all authentication information from a url string'''
2534 '''remove all authentication information from a url string'''
2472 u = url(u)
2535 u = url(u)
2473 u.user = u.passwd = None
2536 u.user = u.passwd = None
2474 return str(u)
2537 return str(u)
2475
2538
2476 def isatty(fp):
2539 def isatty(fp):
2477 try:
2540 try:
2478 return fp.isatty()
2541 return fp.isatty()
2479 except AttributeError:
2542 except AttributeError:
2480 return False
2543 return False
2481
2544
2482 timecount = unitcountfn(
2545 timecount = unitcountfn(
2483 (1, 1e3, _('%.0f s')),
2546 (1, 1e3, _('%.0f s')),
2484 (100, 1, _('%.1f s')),
2547 (100, 1, _('%.1f s')),
2485 (10, 1, _('%.2f s')),
2548 (10, 1, _('%.2f s')),
2486 (1, 1, _('%.3f s')),
2549 (1, 1, _('%.3f s')),
2487 (100, 0.001, _('%.1f ms')),
2550 (100, 0.001, _('%.1f ms')),
2488 (10, 0.001, _('%.2f ms')),
2551 (10, 0.001, _('%.2f ms')),
2489 (1, 0.001, _('%.3f ms')),
2552 (1, 0.001, _('%.3f ms')),
2490 (100, 0.000001, _('%.1f us')),
2553 (100, 0.000001, _('%.1f us')),
2491 (10, 0.000001, _('%.2f us')),
2554 (10, 0.000001, _('%.2f us')),
2492 (1, 0.000001, _('%.3f us')),
2555 (1, 0.000001, _('%.3f us')),
2493 (100, 0.000000001, _('%.1f ns')),
2556 (100, 0.000000001, _('%.1f ns')),
2494 (10, 0.000000001, _('%.2f ns')),
2557 (10, 0.000000001, _('%.2f ns')),
2495 (1, 0.000000001, _('%.3f ns')),
2558 (1, 0.000000001, _('%.3f ns')),
2496 )
2559 )
2497
2560
2498 _timenesting = [0]
2561 _timenesting = [0]
2499
2562
2500 def timed(func):
2563 def timed(func):
2501 '''Report the execution time of a function call to stderr.
2564 '''Report the execution time of a function call to stderr.
2502
2565
2503 During development, use as a decorator when you need to measure
2566 During development, use as a decorator when you need to measure
2504 the cost of a function, e.g. as follows:
2567 the cost of a function, e.g. as follows:
2505
2568
2506 @util.timed
2569 @util.timed
2507 def foo(a, b, c):
2570 def foo(a, b, c):
2508 pass
2571 pass
2509 '''
2572 '''
2510
2573
2511 def wrapper(*args, **kwargs):
2574 def wrapper(*args, **kwargs):
2512 start = time.time()
2575 start = time.time()
2513 indent = 2
2576 indent = 2
2514 _timenesting[0] += indent
2577 _timenesting[0] += indent
2515 try:
2578 try:
2516 return func(*args, **kwargs)
2579 return func(*args, **kwargs)
2517 finally:
2580 finally:
2518 elapsed = time.time() - start
2581 elapsed = time.time() - start
2519 _timenesting[0] -= indent
2582 _timenesting[0] -= indent
2520 sys.stderr.write('%s%s: %s\n' %
2583 sys.stderr.write('%s%s: %s\n' %
2521 (' ' * _timenesting[0], func.__name__,
2584 (' ' * _timenesting[0], func.__name__,
2522 timecount(elapsed)))
2585 timecount(elapsed)))
2523 return wrapper
2586 return wrapper
2524
2587
2525 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2588 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2526 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2589 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2527
2590
2528 def sizetoint(s):
2591 def sizetoint(s):
2529 '''Convert a space specifier to a byte count.
2592 '''Convert a space specifier to a byte count.
2530
2593
2531 >>> sizetoint('30')
2594 >>> sizetoint('30')
2532 30
2595 30
2533 >>> sizetoint('2.2kb')
2596 >>> sizetoint('2.2kb')
2534 2252
2597 2252
2535 >>> sizetoint('6M')
2598 >>> sizetoint('6M')
2536 6291456
2599 6291456
2537 '''
2600 '''
2538 t = s.strip().lower()
2601 t = s.strip().lower()
2539 try:
2602 try:
2540 for k, u in _sizeunits:
2603 for k, u in _sizeunits:
2541 if t.endswith(k):
2604 if t.endswith(k):
2542 return int(float(t[:-len(k)]) * u)
2605 return int(float(t[:-len(k)]) * u)
2543 return int(t)
2606 return int(t)
2544 except ValueError:
2607 except ValueError:
2545 raise error.ParseError(_("couldn't parse size: %s") % s)
2608 raise error.ParseError(_("couldn't parse size: %s") % s)
2546
2609
2547 class hooks(object):
2610 class hooks(object):
2548 '''A collection of hook functions that can be used to extend a
2611 '''A collection of hook functions that can be used to extend a
2549 function's behavior. Hooks are called in lexicographic order,
2612 function's behavior. Hooks are called in lexicographic order,
2550 based on the names of their sources.'''
2613 based on the names of their sources.'''
2551
2614
2552 def __init__(self):
2615 def __init__(self):
2553 self._hooks = []
2616 self._hooks = []
2554
2617
2555 def add(self, source, hook):
2618 def add(self, source, hook):
2556 self._hooks.append((source, hook))
2619 self._hooks.append((source, hook))
2557
2620
2558 def __call__(self, *args):
2621 def __call__(self, *args):
2559 self._hooks.sort(key=lambda x: x[0])
2622 self._hooks.sort(key=lambda x: x[0])
2560 results = []
2623 results = []
2561 for source, hook in self._hooks:
2624 for source, hook in self._hooks:
2562 results.append(hook(*args))
2625 results.append(hook(*args))
2563 return results
2626 return results
2564
2627
2565 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2628 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2566 '''Yields lines for a nicely formatted stacktrace.
2629 '''Yields lines for a nicely formatted stacktrace.
2567 Skips the 'skip' last entries.
2630 Skips the 'skip' last entries.
2568 Each file+linenumber is formatted according to fileline.
2631 Each file+linenumber is formatted according to fileline.
2569 Each line is formatted according to line.
2632 Each line is formatted according to line.
2570 If line is None, it yields:
2633 If line is None, it yields:
2571 length of longest filepath+line number,
2634 length of longest filepath+line number,
2572 filepath+linenumber,
2635 filepath+linenumber,
2573 function
2636 function
2574
2637
2575 Not be used in production code but very convenient while developing.
2638 Not be used in production code but very convenient while developing.
2576 '''
2639 '''
2577 entries = [(fileline % (fn, ln), func)
2640 entries = [(fileline % (fn, ln), func)
2578 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2641 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2579 if entries:
2642 if entries:
2580 fnmax = max(len(entry[0]) for entry in entries)
2643 fnmax = max(len(entry[0]) for entry in entries)
2581 for fnln, func in entries:
2644 for fnln, func in entries:
2582 if line is None:
2645 if line is None:
2583 yield (fnmax, fnln, func)
2646 yield (fnmax, fnln, func)
2584 else:
2647 else:
2585 yield line % (fnmax, fnln, func)
2648 yield line % (fnmax, fnln, func)
2586
2649
2587 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2650 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2588 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2651 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2589 Skips the 'skip' last entries. By default it will flush stdout first.
2652 Skips the 'skip' last entries. By default it will flush stdout first.
2590 It can be used everywhere and intentionally does not require an ui object.
2653 It can be used everywhere and intentionally does not require an ui object.
2591 Not be used in production code but very convenient while developing.
2654 Not be used in production code but very convenient while developing.
2592 '''
2655 '''
2593 if otherf:
2656 if otherf:
2594 otherf.flush()
2657 otherf.flush()
2595 f.write('%s at:\n' % msg)
2658 f.write('%s at:\n' % msg)
2596 for line in getstackframes(skip + 1):
2659 for line in getstackframes(skip + 1):
2597 f.write(line)
2660 f.write(line)
2598 f.flush()
2661 f.flush()
2599
2662
2600 class dirs(object):
2663 class dirs(object):
2601 '''a multiset of directory names from a dirstate or manifest'''
2664 '''a multiset of directory names from a dirstate or manifest'''
2602
2665
2603 def __init__(self, map, skip=None):
2666 def __init__(self, map, skip=None):
2604 self._dirs = {}
2667 self._dirs = {}
2605 addpath = self.addpath
2668 addpath = self.addpath
2606 if safehasattr(map, 'iteritems') and skip is not None:
2669 if safehasattr(map, 'iteritems') and skip is not None:
2607 for f, s in map.iteritems():
2670 for f, s in map.iteritems():
2608 if s[0] != skip:
2671 if s[0] != skip:
2609 addpath(f)
2672 addpath(f)
2610 else:
2673 else:
2611 for f in map:
2674 for f in map:
2612 addpath(f)
2675 addpath(f)
2613
2676
2614 def addpath(self, path):
2677 def addpath(self, path):
2615 dirs = self._dirs
2678 dirs = self._dirs
2616 for base in finddirs(path):
2679 for base in finddirs(path):
2617 if base in dirs:
2680 if base in dirs:
2618 dirs[base] += 1
2681 dirs[base] += 1
2619 return
2682 return
2620 dirs[base] = 1
2683 dirs[base] = 1
2621
2684
2622 def delpath(self, path):
2685 def delpath(self, path):
2623 dirs = self._dirs
2686 dirs = self._dirs
2624 for base in finddirs(path):
2687 for base in finddirs(path):
2625 if dirs[base] > 1:
2688 if dirs[base] > 1:
2626 dirs[base] -= 1
2689 dirs[base] -= 1
2627 return
2690 return
2628 del dirs[base]
2691 del dirs[base]
2629
2692
2630 def __iter__(self):
2693 def __iter__(self):
2631 return self._dirs.iterkeys()
2694 return self._dirs.iterkeys()
2632
2695
2633 def __contains__(self, d):
2696 def __contains__(self, d):
2634 return d in self._dirs
2697 return d in self._dirs
2635
2698
2636 if safehasattr(parsers, 'dirs'):
2699 if safehasattr(parsers, 'dirs'):
2637 dirs = parsers.dirs
2700 dirs = parsers.dirs
2638
2701
2639 def finddirs(path):
2702 def finddirs(path):
2640 pos = path.rfind('/')
2703 pos = path.rfind('/')
2641 while pos != -1:
2704 while pos != -1:
2642 yield path[:pos]
2705 yield path[:pos]
2643 pos = path.rfind('/', 0, pos)
2706 pos = path.rfind('/', 0, pos)
2644
2707
2645 # compression utility
2708 # compression utility
2646
2709
2647 class nocompress(object):
2710 class nocompress(object):
2648 def compress(self, x):
2711 def compress(self, x):
2649 return x
2712 return x
2650 def flush(self):
2713 def flush(self):
2651 return ""
2714 return ""
2652
2715
2653 compressors = {
2716 compressors = {
2654 None: nocompress,
2717 None: nocompress,
2655 # lambda to prevent early import
2718 # lambda to prevent early import
2656 'BZ': lambda: bz2.BZ2Compressor(),
2719 'BZ': lambda: bz2.BZ2Compressor(),
2657 'GZ': lambda: zlib.compressobj(),
2720 'GZ': lambda: zlib.compressobj(),
2658 }
2721 }
2659 # also support the old form by courtesies
2722 # also support the old form by courtesies
2660 compressors['UN'] = compressors[None]
2723 compressors['UN'] = compressors[None]
2661
2724
2662 def _makedecompressor(decompcls):
2725 def _makedecompressor(decompcls):
2663 def generator(f):
2726 def generator(f):
2664 d = decompcls()
2727 d = decompcls()
2665 for chunk in filechunkiter(f):
2728 for chunk in filechunkiter(f):
2666 yield d.decompress(chunk)
2729 yield d.decompress(chunk)
2667 def func(fh):
2730 def func(fh):
2668 return chunkbuffer(generator(fh))
2731 return chunkbuffer(generator(fh))
2669 return func
2732 return func
2670
2733
2671 class ctxmanager(object):
2734 class ctxmanager(object):
2672 '''A context manager for use in 'with' blocks to allow multiple
2735 '''A context manager for use in 'with' blocks to allow multiple
2673 contexts to be entered at once. This is both safer and more
2736 contexts to be entered at once. This is both safer and more
2674 flexible than contextlib.nested.
2737 flexible than contextlib.nested.
2675
2738
2676 Once Mercurial supports Python 2.7+, this will become mostly
2739 Once Mercurial supports Python 2.7+, this will become mostly
2677 unnecessary.
2740 unnecessary.
2678 '''
2741 '''
2679
2742
2680 def __init__(self, *args):
2743 def __init__(self, *args):
2681 '''Accepts a list of no-argument functions that return context
2744 '''Accepts a list of no-argument functions that return context
2682 managers. These will be invoked at __call__ time.'''
2745 managers. These will be invoked at __call__ time.'''
2683 self._pending = args
2746 self._pending = args
2684 self._atexit = []
2747 self._atexit = []
2685
2748
2686 def __enter__(self):
2749 def __enter__(self):
2687 return self
2750 return self
2688
2751
2689 def enter(self):
2752 def enter(self):
2690 '''Create and enter context managers in the order in which they were
2753 '''Create and enter context managers in the order in which they were
2691 passed to the constructor.'''
2754 passed to the constructor.'''
2692 values = []
2755 values = []
2693 for func in self._pending:
2756 for func in self._pending:
2694 obj = func()
2757 obj = func()
2695 values.append(obj.__enter__())
2758 values.append(obj.__enter__())
2696 self._atexit.append(obj.__exit__)
2759 self._atexit.append(obj.__exit__)
2697 del self._pending
2760 del self._pending
2698 return values
2761 return values
2699
2762
2700 def atexit(self, func, *args, **kwargs):
2763 def atexit(self, func, *args, **kwargs):
2701 '''Add a function to call when this context manager exits. The
2764 '''Add a function to call when this context manager exits. The
2702 ordering of multiple atexit calls is unspecified, save that
2765 ordering of multiple atexit calls is unspecified, save that
2703 they will happen before any __exit__ functions.'''
2766 they will happen before any __exit__ functions.'''
2704 def wrapper(exc_type, exc_val, exc_tb):
2767 def wrapper(exc_type, exc_val, exc_tb):
2705 func(*args, **kwargs)
2768 func(*args, **kwargs)
2706 self._atexit.append(wrapper)
2769 self._atexit.append(wrapper)
2707 return func
2770 return func
2708
2771
2709 def __exit__(self, exc_type, exc_val, exc_tb):
2772 def __exit__(self, exc_type, exc_val, exc_tb):
2710 '''Context managers are exited in the reverse order from which
2773 '''Context managers are exited in the reverse order from which
2711 they were created.'''
2774 they were created.'''
2712 received = exc_type is not None
2775 received = exc_type is not None
2713 suppressed = False
2776 suppressed = False
2714 pending = None
2777 pending = None
2715 self._atexit.reverse()
2778 self._atexit.reverse()
2716 for exitfunc in self._atexit:
2779 for exitfunc in self._atexit:
2717 try:
2780 try:
2718 if exitfunc(exc_type, exc_val, exc_tb):
2781 if exitfunc(exc_type, exc_val, exc_tb):
2719 suppressed = True
2782 suppressed = True
2720 exc_type = None
2783 exc_type = None
2721 exc_val = None
2784 exc_val = None
2722 exc_tb = None
2785 exc_tb = None
2723 except BaseException:
2786 except BaseException:
2724 pending = sys.exc_info()
2787 pending = sys.exc_info()
2725 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2788 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2726 del self._atexit
2789 del self._atexit
2727 if pending:
2790 if pending:
2728 raise exc_val
2791 raise exc_val
2729 return received and suppressed
2792 return received and suppressed
2730
2793
2731 def _bz2():
2794 def _bz2():
2732 d = bz2.BZ2Decompressor()
2795 d = bz2.BZ2Decompressor()
2733 # Bzip2 stream start with BZ, but we stripped it.
2796 # Bzip2 stream start with BZ, but we stripped it.
2734 # we put it back for good measure.
2797 # we put it back for good measure.
2735 d.decompress('BZ')
2798 d.decompress('BZ')
2736 return d
2799 return d
2737
2800
2738 decompressors = {None: lambda fh: fh,
2801 decompressors = {None: lambda fh: fh,
2739 '_truncatedBZ': _makedecompressor(_bz2),
2802 '_truncatedBZ': _makedecompressor(_bz2),
2740 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2803 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2741 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2804 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2742 }
2805 }
2743 # also support the old form by courtesies
2806 # also support the old form by courtesies
2744 decompressors['UN'] = decompressors[None]
2807 decompressors['UN'] = decompressors[None]
2745
2808
2746 # convenient shortcut
2809 # convenient shortcut
2747 dst = debugstacktrace
2810 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now