##// END OF EJS Templates
util: adjust hgexecutable() to handle frozen Mercurial on OS X...
Matt Harbison -
r27765:f1fb93ee default
parent child Browse files
Show More
@@ -1,2720 +1,2724 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import collections
20 import collections
21 import datetime
21 import datetime
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import imp
25 import imp
26 import os
26 import os
27 import re as remod
27 import re as remod
28 import shutil
28 import shutil
29 import signal
29 import signal
30 import socket
30 import socket
31 import subprocess
31 import subprocess
32 import sys
32 import sys
33 import tempfile
33 import tempfile
34 import textwrap
34 import textwrap
35 import time
35 import time
36 import traceback
36 import traceback
37 import urllib
37 import urllib
38 import zlib
38 import zlib
39
39
40 from . import (
40 from . import (
41 encoding,
41 encoding,
42 error,
42 error,
43 i18n,
43 i18n,
44 osutil,
44 osutil,
45 parsers,
45 parsers,
46 )
46 )
47
47
48 if os.name == 'nt':
48 if os.name == 'nt':
49 from . import windows as platform
49 from . import windows as platform
50 else:
50 else:
51 from . import posix as platform
51 from . import posix as platform
52
52
53 md5 = hashlib.md5
53 md5 = hashlib.md5
54 sha1 = hashlib.sha1
54 sha1 = hashlib.sha1
55 sha512 = hashlib.sha512
55 sha512 = hashlib.sha512
56 _ = i18n._
56 _ = i18n._
57
57
58 cachestat = platform.cachestat
58 cachestat = platform.cachestat
59 checkexec = platform.checkexec
59 checkexec = platform.checkexec
60 checklink = platform.checklink
60 checklink = platform.checklink
61 copymode = platform.copymode
61 copymode = platform.copymode
62 executablepath = platform.executablepath
62 executablepath = platform.executablepath
63 expandglobs = platform.expandglobs
63 expandglobs = platform.expandglobs
64 explainexit = platform.explainexit
64 explainexit = platform.explainexit
65 findexe = platform.findexe
65 findexe = platform.findexe
66 gethgcmd = platform.gethgcmd
66 gethgcmd = platform.gethgcmd
67 getuser = platform.getuser
67 getuser = platform.getuser
68 groupmembers = platform.groupmembers
68 groupmembers = platform.groupmembers
69 groupname = platform.groupname
69 groupname = platform.groupname
70 hidewindow = platform.hidewindow
70 hidewindow = platform.hidewindow
71 isexec = platform.isexec
71 isexec = platform.isexec
72 isowner = platform.isowner
72 isowner = platform.isowner
73 localpath = platform.localpath
73 localpath = platform.localpath
74 lookupreg = platform.lookupreg
74 lookupreg = platform.lookupreg
75 makedir = platform.makedir
75 makedir = platform.makedir
76 nlinks = platform.nlinks
76 nlinks = platform.nlinks
77 normpath = platform.normpath
77 normpath = platform.normpath
78 normcase = platform.normcase
78 normcase = platform.normcase
79 normcasespec = platform.normcasespec
79 normcasespec = platform.normcasespec
80 normcasefallback = platform.normcasefallback
80 normcasefallback = platform.normcasefallback
81 openhardlinks = platform.openhardlinks
81 openhardlinks = platform.openhardlinks
82 oslink = platform.oslink
82 oslink = platform.oslink
83 parsepatchoutput = platform.parsepatchoutput
83 parsepatchoutput = platform.parsepatchoutput
84 pconvert = platform.pconvert
84 pconvert = platform.pconvert
85 poll = platform.poll
85 poll = platform.poll
86 popen = platform.popen
86 popen = platform.popen
87 posixfile = platform.posixfile
87 posixfile = platform.posixfile
88 quotecommand = platform.quotecommand
88 quotecommand = platform.quotecommand
89 readpipe = platform.readpipe
89 readpipe = platform.readpipe
90 rename = platform.rename
90 rename = platform.rename
91 removedirs = platform.removedirs
91 removedirs = platform.removedirs
92 samedevice = platform.samedevice
92 samedevice = platform.samedevice
93 samefile = platform.samefile
93 samefile = platform.samefile
94 samestat = platform.samestat
94 samestat = platform.samestat
95 setbinary = platform.setbinary
95 setbinary = platform.setbinary
96 setflags = platform.setflags
96 setflags = platform.setflags
97 setsignalhandler = platform.setsignalhandler
97 setsignalhandler = platform.setsignalhandler
98 shellquote = platform.shellquote
98 shellquote = platform.shellquote
99 spawndetached = platform.spawndetached
99 spawndetached = platform.spawndetached
100 split = platform.split
100 split = platform.split
101 sshargs = platform.sshargs
101 sshargs = platform.sshargs
102 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
102 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
103 statisexec = platform.statisexec
103 statisexec = platform.statisexec
104 statislink = platform.statislink
104 statislink = platform.statislink
105 termwidth = platform.termwidth
105 termwidth = platform.termwidth
106 testpid = platform.testpid
106 testpid = platform.testpid
107 umask = platform.umask
107 umask = platform.umask
108 unlink = platform.unlink
108 unlink = platform.unlink
109 unlinkpath = platform.unlinkpath
109 unlinkpath = platform.unlinkpath
110 username = platform.username
110 username = platform.username
111
111
112 # Python compatibility
112 # Python compatibility
113
113
114 _notset = object()
114 _notset = object()
115
115
116 # disable Python's problematic floating point timestamps (issue4836)
116 # disable Python's problematic floating point timestamps (issue4836)
117 # (Python hypocritically says you shouldn't change this behavior in
117 # (Python hypocritically says you shouldn't change this behavior in
118 # libraries, and sure enough Mercurial is not a library.)
118 # libraries, and sure enough Mercurial is not a library.)
119 os.stat_float_times(False)
119 os.stat_float_times(False)
120
120
121 def safehasattr(thing, attr):
121 def safehasattr(thing, attr):
122 return getattr(thing, attr, _notset) is not _notset
122 return getattr(thing, attr, _notset) is not _notset
123
123
124 DIGESTS = {
124 DIGESTS = {
125 'md5': md5,
125 'md5': md5,
126 'sha1': sha1,
126 'sha1': sha1,
127 'sha512': sha512,
127 'sha512': sha512,
128 }
128 }
129 # List of digest types from strongest to weakest
129 # List of digest types from strongest to weakest
130 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
130 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
131
131
132 for k in DIGESTS_BY_STRENGTH:
132 for k in DIGESTS_BY_STRENGTH:
133 assert k in DIGESTS
133 assert k in DIGESTS
134
134
135 class digester(object):
135 class digester(object):
136 """helper to compute digests.
136 """helper to compute digests.
137
137
138 This helper can be used to compute one or more digests given their name.
138 This helper can be used to compute one or more digests given their name.
139
139
140 >>> d = digester(['md5', 'sha1'])
140 >>> d = digester(['md5', 'sha1'])
141 >>> d.update('foo')
141 >>> d.update('foo')
142 >>> [k for k in sorted(d)]
142 >>> [k for k in sorted(d)]
143 ['md5', 'sha1']
143 ['md5', 'sha1']
144 >>> d['md5']
144 >>> d['md5']
145 'acbd18db4cc2f85cedef654fccc4a4d8'
145 'acbd18db4cc2f85cedef654fccc4a4d8'
146 >>> d['sha1']
146 >>> d['sha1']
147 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
147 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
148 >>> digester.preferred(['md5', 'sha1'])
148 >>> digester.preferred(['md5', 'sha1'])
149 'sha1'
149 'sha1'
150 """
150 """
151
151
152 def __init__(self, digests, s=''):
152 def __init__(self, digests, s=''):
153 self._hashes = {}
153 self._hashes = {}
154 for k in digests:
154 for k in digests:
155 if k not in DIGESTS:
155 if k not in DIGESTS:
156 raise Abort(_('unknown digest type: %s') % k)
156 raise Abort(_('unknown digest type: %s') % k)
157 self._hashes[k] = DIGESTS[k]()
157 self._hashes[k] = DIGESTS[k]()
158 if s:
158 if s:
159 self.update(s)
159 self.update(s)
160
160
161 def update(self, data):
161 def update(self, data):
162 for h in self._hashes.values():
162 for h in self._hashes.values():
163 h.update(data)
163 h.update(data)
164
164
165 def __getitem__(self, key):
165 def __getitem__(self, key):
166 if key not in DIGESTS:
166 if key not in DIGESTS:
167 raise Abort(_('unknown digest type: %s') % k)
167 raise Abort(_('unknown digest type: %s') % k)
168 return self._hashes[key].hexdigest()
168 return self._hashes[key].hexdigest()
169
169
170 def __iter__(self):
170 def __iter__(self):
171 return iter(self._hashes)
171 return iter(self._hashes)
172
172
173 @staticmethod
173 @staticmethod
174 def preferred(supported):
174 def preferred(supported):
175 """returns the strongest digest type in both supported and DIGESTS."""
175 """returns the strongest digest type in both supported and DIGESTS."""
176
176
177 for k in DIGESTS_BY_STRENGTH:
177 for k in DIGESTS_BY_STRENGTH:
178 if k in supported:
178 if k in supported:
179 return k
179 return k
180 return None
180 return None
181
181
182 class digestchecker(object):
182 class digestchecker(object):
183 """file handle wrapper that additionally checks content against a given
183 """file handle wrapper that additionally checks content against a given
184 size and digests.
184 size and digests.
185
185
186 d = digestchecker(fh, size, {'md5': '...'})
186 d = digestchecker(fh, size, {'md5': '...'})
187
187
188 When multiple digests are given, all of them are validated.
188 When multiple digests are given, all of them are validated.
189 """
189 """
190
190
191 def __init__(self, fh, size, digests):
191 def __init__(self, fh, size, digests):
192 self._fh = fh
192 self._fh = fh
193 self._size = size
193 self._size = size
194 self._got = 0
194 self._got = 0
195 self._digests = dict(digests)
195 self._digests = dict(digests)
196 self._digester = digester(self._digests.keys())
196 self._digester = digester(self._digests.keys())
197
197
198 def read(self, length=-1):
198 def read(self, length=-1):
199 content = self._fh.read(length)
199 content = self._fh.read(length)
200 self._digester.update(content)
200 self._digester.update(content)
201 self._got += len(content)
201 self._got += len(content)
202 return content
202 return content
203
203
204 def validate(self):
204 def validate(self):
205 if self._size != self._got:
205 if self._size != self._got:
206 raise Abort(_('size mismatch: expected %d, got %d') %
206 raise Abort(_('size mismatch: expected %d, got %d') %
207 (self._size, self._got))
207 (self._size, self._got))
208 for k, v in self._digests.items():
208 for k, v in self._digests.items():
209 if v != self._digester[k]:
209 if v != self._digester[k]:
210 # i18n: first parameter is a digest name
210 # i18n: first parameter is a digest name
211 raise Abort(_('%s mismatch: expected %s, got %s') %
211 raise Abort(_('%s mismatch: expected %s, got %s') %
212 (k, v, self._digester[k]))
212 (k, v, self._digester[k]))
213
213
214 try:
214 try:
215 buffer = buffer
215 buffer = buffer
216 except NameError:
216 except NameError:
217 if sys.version_info[0] < 3:
217 if sys.version_info[0] < 3:
218 def buffer(sliceable, offset=0):
218 def buffer(sliceable, offset=0):
219 return sliceable[offset:]
219 return sliceable[offset:]
220 else:
220 else:
221 def buffer(sliceable, offset=0):
221 def buffer(sliceable, offset=0):
222 return memoryview(sliceable)[offset:]
222 return memoryview(sliceable)[offset:]
223
223
224 closefds = os.name == 'posix'
224 closefds = os.name == 'posix'
225
225
226 _chunksize = 4096
226 _chunksize = 4096
227
227
228 class bufferedinputpipe(object):
228 class bufferedinputpipe(object):
229 """a manually buffered input pipe
229 """a manually buffered input pipe
230
230
231 Python will not let us use buffered IO and lazy reading with 'polling' at
231 Python will not let us use buffered IO and lazy reading with 'polling' at
232 the same time. We cannot probe the buffer state and select will not detect
232 the same time. We cannot probe the buffer state and select will not detect
233 that data are ready to read if they are already buffered.
233 that data are ready to read if they are already buffered.
234
234
235 This class let us work around that by implementing its own buffering
235 This class let us work around that by implementing its own buffering
236 (allowing efficient readline) while offering a way to know if the buffer is
236 (allowing efficient readline) while offering a way to know if the buffer is
237 empty from the output (allowing collaboration of the buffer with polling).
237 empty from the output (allowing collaboration of the buffer with polling).
238
238
239 This class lives in the 'util' module because it makes use of the 'os'
239 This class lives in the 'util' module because it makes use of the 'os'
240 module from the python stdlib.
240 module from the python stdlib.
241 """
241 """
242
242
243 def __init__(self, input):
243 def __init__(self, input):
244 self._input = input
244 self._input = input
245 self._buffer = []
245 self._buffer = []
246 self._eof = False
246 self._eof = False
247 self._lenbuf = 0
247 self._lenbuf = 0
248
248
249 @property
249 @property
250 def hasbuffer(self):
250 def hasbuffer(self):
251 """True is any data is currently buffered
251 """True is any data is currently buffered
252
252
253 This will be used externally a pre-step for polling IO. If there is
253 This will be used externally a pre-step for polling IO. If there is
254 already data then no polling should be set in place."""
254 already data then no polling should be set in place."""
255 return bool(self._buffer)
255 return bool(self._buffer)
256
256
257 @property
257 @property
258 def closed(self):
258 def closed(self):
259 return self._input.closed
259 return self._input.closed
260
260
261 def fileno(self):
261 def fileno(self):
262 return self._input.fileno()
262 return self._input.fileno()
263
263
264 def close(self):
264 def close(self):
265 return self._input.close()
265 return self._input.close()
266
266
267 def read(self, size):
267 def read(self, size):
268 while (not self._eof) and (self._lenbuf < size):
268 while (not self._eof) and (self._lenbuf < size):
269 self._fillbuffer()
269 self._fillbuffer()
270 return self._frombuffer(size)
270 return self._frombuffer(size)
271
271
272 def readline(self, *args, **kwargs):
272 def readline(self, *args, **kwargs):
273 if 1 < len(self._buffer):
273 if 1 < len(self._buffer):
274 # this should not happen because both read and readline end with a
274 # this should not happen because both read and readline end with a
275 # _frombuffer call that collapse it.
275 # _frombuffer call that collapse it.
276 self._buffer = [''.join(self._buffer)]
276 self._buffer = [''.join(self._buffer)]
277 self._lenbuf = len(self._buffer[0])
277 self._lenbuf = len(self._buffer[0])
278 lfi = -1
278 lfi = -1
279 if self._buffer:
279 if self._buffer:
280 lfi = self._buffer[-1].find('\n')
280 lfi = self._buffer[-1].find('\n')
281 while (not self._eof) and lfi < 0:
281 while (not self._eof) and lfi < 0:
282 self._fillbuffer()
282 self._fillbuffer()
283 if self._buffer:
283 if self._buffer:
284 lfi = self._buffer[-1].find('\n')
284 lfi = self._buffer[-1].find('\n')
285 size = lfi + 1
285 size = lfi + 1
286 if lfi < 0: # end of file
286 if lfi < 0: # end of file
287 size = self._lenbuf
287 size = self._lenbuf
288 elif 1 < len(self._buffer):
288 elif 1 < len(self._buffer):
289 # we need to take previous chunks into account
289 # we need to take previous chunks into account
290 size += self._lenbuf - len(self._buffer[-1])
290 size += self._lenbuf - len(self._buffer[-1])
291 return self._frombuffer(size)
291 return self._frombuffer(size)
292
292
293 def _frombuffer(self, size):
293 def _frombuffer(self, size):
294 """return at most 'size' data from the buffer
294 """return at most 'size' data from the buffer
295
295
296 The data are removed from the buffer."""
296 The data are removed from the buffer."""
297 if size == 0 or not self._buffer:
297 if size == 0 or not self._buffer:
298 return ''
298 return ''
299 buf = self._buffer[0]
299 buf = self._buffer[0]
300 if 1 < len(self._buffer):
300 if 1 < len(self._buffer):
301 buf = ''.join(self._buffer)
301 buf = ''.join(self._buffer)
302
302
303 data = buf[:size]
303 data = buf[:size]
304 buf = buf[len(data):]
304 buf = buf[len(data):]
305 if buf:
305 if buf:
306 self._buffer = [buf]
306 self._buffer = [buf]
307 self._lenbuf = len(buf)
307 self._lenbuf = len(buf)
308 else:
308 else:
309 self._buffer = []
309 self._buffer = []
310 self._lenbuf = 0
310 self._lenbuf = 0
311 return data
311 return data
312
312
313 def _fillbuffer(self):
313 def _fillbuffer(self):
314 """read data to the buffer"""
314 """read data to the buffer"""
315 data = os.read(self._input.fileno(), _chunksize)
315 data = os.read(self._input.fileno(), _chunksize)
316 if not data:
316 if not data:
317 self._eof = True
317 self._eof = True
318 else:
318 else:
319 self._lenbuf += len(data)
319 self._lenbuf += len(data)
320 self._buffer.append(data)
320 self._buffer.append(data)
321
321
322 def popen2(cmd, env=None, newlines=False):
322 def popen2(cmd, env=None, newlines=False):
323 # Setting bufsize to -1 lets the system decide the buffer size.
323 # Setting bufsize to -1 lets the system decide the buffer size.
324 # The default for bufsize is 0, meaning unbuffered. This leads to
324 # The default for bufsize is 0, meaning unbuffered. This leads to
325 # poor performance on Mac OS X: http://bugs.python.org/issue4194
325 # poor performance on Mac OS X: http://bugs.python.org/issue4194
326 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
326 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
327 close_fds=closefds,
327 close_fds=closefds,
328 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
328 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
329 universal_newlines=newlines,
329 universal_newlines=newlines,
330 env=env)
330 env=env)
331 return p.stdin, p.stdout
331 return p.stdin, p.stdout
332
332
333 def popen3(cmd, env=None, newlines=False):
333 def popen3(cmd, env=None, newlines=False):
334 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
334 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
335 return stdin, stdout, stderr
335 return stdin, stdout, stderr
336
336
337 def popen4(cmd, env=None, newlines=False, bufsize=-1):
337 def popen4(cmd, env=None, newlines=False, bufsize=-1):
338 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
338 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
339 close_fds=closefds,
339 close_fds=closefds,
340 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
340 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
341 stderr=subprocess.PIPE,
341 stderr=subprocess.PIPE,
342 universal_newlines=newlines,
342 universal_newlines=newlines,
343 env=env)
343 env=env)
344 return p.stdin, p.stdout, p.stderr, p
344 return p.stdin, p.stdout, p.stderr, p
345
345
346 def version():
346 def version():
347 """Return version information if available."""
347 """Return version information if available."""
348 try:
348 try:
349 from . import __version__
349 from . import __version__
350 return __version__.version
350 return __version__.version
351 except ImportError:
351 except ImportError:
352 return 'unknown'
352 return 'unknown'
353
353
354 def versiontuple(v=None, n=4):
354 def versiontuple(v=None, n=4):
355 """Parses a Mercurial version string into an N-tuple.
355 """Parses a Mercurial version string into an N-tuple.
356
356
357 The version string to be parsed is specified with the ``v`` argument.
357 The version string to be parsed is specified with the ``v`` argument.
358 If it isn't defined, the current Mercurial version string will be parsed.
358 If it isn't defined, the current Mercurial version string will be parsed.
359
359
360 ``n`` can be 2, 3, or 4. Here is how some version strings map to
360 ``n`` can be 2, 3, or 4. Here is how some version strings map to
361 returned values:
361 returned values:
362
362
363 >>> v = '3.6.1+190-df9b73d2d444'
363 >>> v = '3.6.1+190-df9b73d2d444'
364 >>> versiontuple(v, 2)
364 >>> versiontuple(v, 2)
365 (3, 6)
365 (3, 6)
366 >>> versiontuple(v, 3)
366 >>> versiontuple(v, 3)
367 (3, 6, 1)
367 (3, 6, 1)
368 >>> versiontuple(v, 4)
368 >>> versiontuple(v, 4)
369 (3, 6, 1, '190-df9b73d2d444')
369 (3, 6, 1, '190-df9b73d2d444')
370
370
371 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
371 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
372 (3, 6, 1, '190-df9b73d2d444+20151118')
372 (3, 6, 1, '190-df9b73d2d444+20151118')
373
373
374 >>> v = '3.6'
374 >>> v = '3.6'
375 >>> versiontuple(v, 2)
375 >>> versiontuple(v, 2)
376 (3, 6)
376 (3, 6)
377 >>> versiontuple(v, 3)
377 >>> versiontuple(v, 3)
378 (3, 6, None)
378 (3, 6, None)
379 >>> versiontuple(v, 4)
379 >>> versiontuple(v, 4)
380 (3, 6, None, None)
380 (3, 6, None, None)
381 """
381 """
382 if not v:
382 if not v:
383 v = version()
383 v = version()
384 parts = v.split('+', 1)
384 parts = v.split('+', 1)
385 if len(parts) == 1:
385 if len(parts) == 1:
386 vparts, extra = parts[0], None
386 vparts, extra = parts[0], None
387 else:
387 else:
388 vparts, extra = parts
388 vparts, extra = parts
389
389
390 vints = []
390 vints = []
391 for i in vparts.split('.'):
391 for i in vparts.split('.'):
392 try:
392 try:
393 vints.append(int(i))
393 vints.append(int(i))
394 except ValueError:
394 except ValueError:
395 break
395 break
396 # (3, 6) -> (3, 6, None)
396 # (3, 6) -> (3, 6, None)
397 while len(vints) < 3:
397 while len(vints) < 3:
398 vints.append(None)
398 vints.append(None)
399
399
400 if n == 2:
400 if n == 2:
401 return (vints[0], vints[1])
401 return (vints[0], vints[1])
402 if n == 3:
402 if n == 3:
403 return (vints[0], vints[1], vints[2])
403 return (vints[0], vints[1], vints[2])
404 if n == 4:
404 if n == 4:
405 return (vints[0], vints[1], vints[2], extra)
405 return (vints[0], vints[1], vints[2], extra)
406
406
407 # used by parsedate
407 # used by parsedate
408 defaultdateformats = (
408 defaultdateformats = (
409 '%Y-%m-%d %H:%M:%S',
409 '%Y-%m-%d %H:%M:%S',
410 '%Y-%m-%d %I:%M:%S%p',
410 '%Y-%m-%d %I:%M:%S%p',
411 '%Y-%m-%d %H:%M',
411 '%Y-%m-%d %H:%M',
412 '%Y-%m-%d %I:%M%p',
412 '%Y-%m-%d %I:%M%p',
413 '%Y-%m-%d',
413 '%Y-%m-%d',
414 '%m-%d',
414 '%m-%d',
415 '%m/%d',
415 '%m/%d',
416 '%m/%d/%y',
416 '%m/%d/%y',
417 '%m/%d/%Y',
417 '%m/%d/%Y',
418 '%a %b %d %H:%M:%S %Y',
418 '%a %b %d %H:%M:%S %Y',
419 '%a %b %d %I:%M:%S%p %Y',
419 '%a %b %d %I:%M:%S%p %Y',
420 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
420 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
421 '%b %d %H:%M:%S %Y',
421 '%b %d %H:%M:%S %Y',
422 '%b %d %I:%M:%S%p %Y',
422 '%b %d %I:%M:%S%p %Y',
423 '%b %d %H:%M:%S',
423 '%b %d %H:%M:%S',
424 '%b %d %I:%M:%S%p',
424 '%b %d %I:%M:%S%p',
425 '%b %d %H:%M',
425 '%b %d %H:%M',
426 '%b %d %I:%M%p',
426 '%b %d %I:%M%p',
427 '%b %d %Y',
427 '%b %d %Y',
428 '%b %d',
428 '%b %d',
429 '%H:%M:%S',
429 '%H:%M:%S',
430 '%I:%M:%S%p',
430 '%I:%M:%S%p',
431 '%H:%M',
431 '%H:%M',
432 '%I:%M%p',
432 '%I:%M%p',
433 )
433 )
434
434
435 extendeddateformats = defaultdateformats + (
435 extendeddateformats = defaultdateformats + (
436 "%Y",
436 "%Y",
437 "%Y-%m",
437 "%Y-%m",
438 "%b",
438 "%b",
439 "%b %Y",
439 "%b %Y",
440 )
440 )
441
441
442 def cachefunc(func):
442 def cachefunc(func):
443 '''cache the result of function calls'''
443 '''cache the result of function calls'''
444 # XXX doesn't handle keywords args
444 # XXX doesn't handle keywords args
445 if func.func_code.co_argcount == 0:
445 if func.func_code.co_argcount == 0:
446 cache = []
446 cache = []
447 def f():
447 def f():
448 if len(cache) == 0:
448 if len(cache) == 0:
449 cache.append(func())
449 cache.append(func())
450 return cache[0]
450 return cache[0]
451 return f
451 return f
452 cache = {}
452 cache = {}
453 if func.func_code.co_argcount == 1:
453 if func.func_code.co_argcount == 1:
454 # we gain a small amount of time because
454 # we gain a small amount of time because
455 # we don't need to pack/unpack the list
455 # we don't need to pack/unpack the list
456 def f(arg):
456 def f(arg):
457 if arg not in cache:
457 if arg not in cache:
458 cache[arg] = func(arg)
458 cache[arg] = func(arg)
459 return cache[arg]
459 return cache[arg]
460 else:
460 else:
461 def f(*args):
461 def f(*args):
462 if args not in cache:
462 if args not in cache:
463 cache[args] = func(*args)
463 cache[args] = func(*args)
464 return cache[args]
464 return cache[args]
465
465
466 return f
466 return f
467
467
468 class sortdict(dict):
468 class sortdict(dict):
469 '''a simple sorted dictionary'''
469 '''a simple sorted dictionary'''
470 def __init__(self, data=None):
470 def __init__(self, data=None):
471 self._list = []
471 self._list = []
472 if data:
472 if data:
473 self.update(data)
473 self.update(data)
474 def copy(self):
474 def copy(self):
475 return sortdict(self)
475 return sortdict(self)
476 def __setitem__(self, key, val):
476 def __setitem__(self, key, val):
477 if key in self:
477 if key in self:
478 self._list.remove(key)
478 self._list.remove(key)
479 self._list.append(key)
479 self._list.append(key)
480 dict.__setitem__(self, key, val)
480 dict.__setitem__(self, key, val)
481 def __iter__(self):
481 def __iter__(self):
482 return self._list.__iter__()
482 return self._list.__iter__()
483 def update(self, src):
483 def update(self, src):
484 if isinstance(src, dict):
484 if isinstance(src, dict):
485 src = src.iteritems()
485 src = src.iteritems()
486 for k, v in src:
486 for k, v in src:
487 self[k] = v
487 self[k] = v
488 def clear(self):
488 def clear(self):
489 dict.clear(self)
489 dict.clear(self)
490 self._list = []
490 self._list = []
491 def items(self):
491 def items(self):
492 return [(k, self[k]) for k in self._list]
492 return [(k, self[k]) for k in self._list]
493 def __delitem__(self, key):
493 def __delitem__(self, key):
494 dict.__delitem__(self, key)
494 dict.__delitem__(self, key)
495 self._list.remove(key)
495 self._list.remove(key)
496 def pop(self, key, *args, **kwargs):
496 def pop(self, key, *args, **kwargs):
497 dict.pop(self, key, *args, **kwargs)
497 dict.pop(self, key, *args, **kwargs)
498 try:
498 try:
499 self._list.remove(key)
499 self._list.remove(key)
500 except ValueError:
500 except ValueError:
501 pass
501 pass
502 def keys(self):
502 def keys(self):
503 return self._list
503 return self._list
504 def iterkeys(self):
504 def iterkeys(self):
505 return self._list.__iter__()
505 return self._list.__iter__()
506 def iteritems(self):
506 def iteritems(self):
507 for k in self._list:
507 for k in self._list:
508 yield k, self[k]
508 yield k, self[k]
509 def insert(self, index, key, val):
509 def insert(self, index, key, val):
510 self._list.insert(index, key)
510 self._list.insert(index, key)
511 dict.__setitem__(self, key, val)
511 dict.__setitem__(self, key, val)
512
512
513 class _lrucachenode(object):
513 class _lrucachenode(object):
514 """A node in a doubly linked list.
514 """A node in a doubly linked list.
515
515
516 Holds a reference to nodes on either side as well as a key-value
516 Holds a reference to nodes on either side as well as a key-value
517 pair for the dictionary entry.
517 pair for the dictionary entry.
518 """
518 """
519 __slots__ = ('next', 'prev', 'key', 'value')
519 __slots__ = ('next', 'prev', 'key', 'value')
520
520
521 def __init__(self):
521 def __init__(self):
522 self.next = None
522 self.next = None
523 self.prev = None
523 self.prev = None
524
524
525 self.key = _notset
525 self.key = _notset
526 self.value = None
526 self.value = None
527
527
528 def markempty(self):
528 def markempty(self):
529 """Mark the node as emptied."""
529 """Mark the node as emptied."""
530 self.key = _notset
530 self.key = _notset
531
531
532 class lrucachedict(object):
532 class lrucachedict(object):
533 """Dict that caches most recent accesses and sets.
533 """Dict that caches most recent accesses and sets.
534
534
535 The dict consists of an actual backing dict - indexed by original
535 The dict consists of an actual backing dict - indexed by original
536 key - and a doubly linked circular list defining the order of entries in
536 key - and a doubly linked circular list defining the order of entries in
537 the cache.
537 the cache.
538
538
539 The head node is the newest entry in the cache. If the cache is full,
539 The head node is the newest entry in the cache. If the cache is full,
540 we recycle head.prev and make it the new head. Cache accesses result in
540 we recycle head.prev and make it the new head. Cache accesses result in
541 the node being moved to before the existing head and being marked as the
541 the node being moved to before the existing head and being marked as the
542 new head node.
542 new head node.
543 """
543 """
544 def __init__(self, max):
544 def __init__(self, max):
545 self._cache = {}
545 self._cache = {}
546
546
547 self._head = head = _lrucachenode()
547 self._head = head = _lrucachenode()
548 head.prev = head
548 head.prev = head
549 head.next = head
549 head.next = head
550 self._size = 1
550 self._size = 1
551 self._capacity = max
551 self._capacity = max
552
552
553 def __len__(self):
553 def __len__(self):
554 return len(self._cache)
554 return len(self._cache)
555
555
556 def __contains__(self, k):
556 def __contains__(self, k):
557 return k in self._cache
557 return k in self._cache
558
558
559 def __iter__(self):
559 def __iter__(self):
560 # We don't have to iterate in cache order, but why not.
560 # We don't have to iterate in cache order, but why not.
561 n = self._head
561 n = self._head
562 for i in range(len(self._cache)):
562 for i in range(len(self._cache)):
563 yield n.key
563 yield n.key
564 n = n.next
564 n = n.next
565
565
566 def __getitem__(self, k):
566 def __getitem__(self, k):
567 node = self._cache[k]
567 node = self._cache[k]
568 self._movetohead(node)
568 self._movetohead(node)
569 return node.value
569 return node.value
570
570
571 def __setitem__(self, k, v):
571 def __setitem__(self, k, v):
572 node = self._cache.get(k)
572 node = self._cache.get(k)
573 # Replace existing value and mark as newest.
573 # Replace existing value and mark as newest.
574 if node is not None:
574 if node is not None:
575 node.value = v
575 node.value = v
576 self._movetohead(node)
576 self._movetohead(node)
577 return
577 return
578
578
579 if self._size < self._capacity:
579 if self._size < self._capacity:
580 node = self._addcapacity()
580 node = self._addcapacity()
581 else:
581 else:
582 # Grab the last/oldest item.
582 # Grab the last/oldest item.
583 node = self._head.prev
583 node = self._head.prev
584
584
585 # At capacity. Kill the old entry.
585 # At capacity. Kill the old entry.
586 if node.key is not _notset:
586 if node.key is not _notset:
587 del self._cache[node.key]
587 del self._cache[node.key]
588
588
589 node.key = k
589 node.key = k
590 node.value = v
590 node.value = v
591 self._cache[k] = node
591 self._cache[k] = node
592 # And mark it as newest entry. No need to adjust order since it
592 # And mark it as newest entry. No need to adjust order since it
593 # is already self._head.prev.
593 # is already self._head.prev.
594 self._head = node
594 self._head = node
595
595
596 def __delitem__(self, k):
596 def __delitem__(self, k):
597 node = self._cache.pop(k)
597 node = self._cache.pop(k)
598 node.markempty()
598 node.markempty()
599
599
600 # Temporarily mark as newest item before re-adjusting head to make
600 # Temporarily mark as newest item before re-adjusting head to make
601 # this node the oldest item.
601 # this node the oldest item.
602 self._movetohead(node)
602 self._movetohead(node)
603 self._head = node.next
603 self._head = node.next
604
604
605 # Additional dict methods.
605 # Additional dict methods.
606
606
607 def get(self, k, default=None):
607 def get(self, k, default=None):
608 try:
608 try:
609 return self._cache[k]
609 return self._cache[k]
610 except KeyError:
610 except KeyError:
611 return default
611 return default
612
612
613 def clear(self):
613 def clear(self):
614 n = self._head
614 n = self._head
615 while n.key is not _notset:
615 while n.key is not _notset:
616 n.markempty()
616 n.markempty()
617 n = n.next
617 n = n.next
618
618
619 self._cache.clear()
619 self._cache.clear()
620
620
621 def copy(self):
621 def copy(self):
622 result = lrucachedict(self._capacity)
622 result = lrucachedict(self._capacity)
623 n = self._head.prev
623 n = self._head.prev
624 # Iterate in oldest-to-newest order, so the copy has the right ordering
624 # Iterate in oldest-to-newest order, so the copy has the right ordering
625 for i in range(len(self._cache)):
625 for i in range(len(self._cache)):
626 result[n.key] = n.value
626 result[n.key] = n.value
627 n = n.prev
627 n = n.prev
628 return result
628 return result
629
629
630 def _movetohead(self, node):
630 def _movetohead(self, node):
631 """Mark a node as the newest, making it the new head.
631 """Mark a node as the newest, making it the new head.
632
632
633 When a node is accessed, it becomes the freshest entry in the LRU
633 When a node is accessed, it becomes the freshest entry in the LRU
634 list, which is denoted by self._head.
634 list, which is denoted by self._head.
635
635
636 Visually, let's make ``N`` the new head node (* denotes head):
636 Visually, let's make ``N`` the new head node (* denotes head):
637
637
638 previous/oldest <-> head <-> next/next newest
638 previous/oldest <-> head <-> next/next newest
639
639
640 ----<->--- A* ---<->-----
640 ----<->--- A* ---<->-----
641 | |
641 | |
642 E <-> D <-> N <-> C <-> B
642 E <-> D <-> N <-> C <-> B
643
643
644 To:
644 To:
645
645
646 ----<->--- N* ---<->-----
646 ----<->--- N* ---<->-----
647 | |
647 | |
648 E <-> D <-> C <-> B <-> A
648 E <-> D <-> C <-> B <-> A
649
649
650 This requires the following moves:
650 This requires the following moves:
651
651
652 C.next = D (node.prev.next = node.next)
652 C.next = D (node.prev.next = node.next)
653 D.prev = C (node.next.prev = node.prev)
653 D.prev = C (node.next.prev = node.prev)
654 E.next = N (head.prev.next = node)
654 E.next = N (head.prev.next = node)
655 N.prev = E (node.prev = head.prev)
655 N.prev = E (node.prev = head.prev)
656 N.next = A (node.next = head)
656 N.next = A (node.next = head)
657 A.prev = N (head.prev = node)
657 A.prev = N (head.prev = node)
658 """
658 """
659 head = self._head
659 head = self._head
660 # C.next = D
660 # C.next = D
661 node.prev.next = node.next
661 node.prev.next = node.next
662 # D.prev = C
662 # D.prev = C
663 node.next.prev = node.prev
663 node.next.prev = node.prev
664 # N.prev = E
664 # N.prev = E
665 node.prev = head.prev
665 node.prev = head.prev
666 # N.next = A
666 # N.next = A
667 # It is tempting to do just "head" here, however if node is
667 # It is tempting to do just "head" here, however if node is
668 # adjacent to head, this will do bad things.
668 # adjacent to head, this will do bad things.
669 node.next = head.prev.next
669 node.next = head.prev.next
670 # E.next = N
670 # E.next = N
671 node.next.prev = node
671 node.next.prev = node
672 # A.prev = N
672 # A.prev = N
673 node.prev.next = node
673 node.prev.next = node
674
674
675 self._head = node
675 self._head = node
676
676
677 def _addcapacity(self):
677 def _addcapacity(self):
678 """Add a node to the circular linked list.
678 """Add a node to the circular linked list.
679
679
680 The new node is inserted before the head node.
680 The new node is inserted before the head node.
681 """
681 """
682 head = self._head
682 head = self._head
683 node = _lrucachenode()
683 node = _lrucachenode()
684 head.prev.next = node
684 head.prev.next = node
685 node.prev = head.prev
685 node.prev = head.prev
686 node.next = head
686 node.next = head
687 head.prev = node
687 head.prev = node
688 self._size += 1
688 self._size += 1
689 return node
689 return node
690
690
691 def lrucachefunc(func):
691 def lrucachefunc(func):
692 '''cache most recent results of function calls'''
692 '''cache most recent results of function calls'''
693 cache = {}
693 cache = {}
694 order = collections.deque()
694 order = collections.deque()
695 if func.func_code.co_argcount == 1:
695 if func.func_code.co_argcount == 1:
696 def f(arg):
696 def f(arg):
697 if arg not in cache:
697 if arg not in cache:
698 if len(cache) > 20:
698 if len(cache) > 20:
699 del cache[order.popleft()]
699 del cache[order.popleft()]
700 cache[arg] = func(arg)
700 cache[arg] = func(arg)
701 else:
701 else:
702 order.remove(arg)
702 order.remove(arg)
703 order.append(arg)
703 order.append(arg)
704 return cache[arg]
704 return cache[arg]
705 else:
705 else:
706 def f(*args):
706 def f(*args):
707 if args not in cache:
707 if args not in cache:
708 if len(cache) > 20:
708 if len(cache) > 20:
709 del cache[order.popleft()]
709 del cache[order.popleft()]
710 cache[args] = func(*args)
710 cache[args] = func(*args)
711 else:
711 else:
712 order.remove(args)
712 order.remove(args)
713 order.append(args)
713 order.append(args)
714 return cache[args]
714 return cache[args]
715
715
716 return f
716 return f
717
717
718 class propertycache(object):
718 class propertycache(object):
719 def __init__(self, func):
719 def __init__(self, func):
720 self.func = func
720 self.func = func
721 self.name = func.__name__
721 self.name = func.__name__
722 def __get__(self, obj, type=None):
722 def __get__(self, obj, type=None):
723 result = self.func(obj)
723 result = self.func(obj)
724 self.cachevalue(obj, result)
724 self.cachevalue(obj, result)
725 return result
725 return result
726
726
727 def cachevalue(self, obj, value):
727 def cachevalue(self, obj, value):
728 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
728 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
729 obj.__dict__[self.name] = value
729 obj.__dict__[self.name] = value
730
730
731 def pipefilter(s, cmd):
731 def pipefilter(s, cmd):
732 '''filter string S through command CMD, returning its output'''
732 '''filter string S through command CMD, returning its output'''
733 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
733 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
734 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
734 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
735 pout, perr = p.communicate(s)
735 pout, perr = p.communicate(s)
736 return pout
736 return pout
737
737
738 def tempfilter(s, cmd):
738 def tempfilter(s, cmd):
739 '''filter string S through a pair of temporary files with CMD.
739 '''filter string S through a pair of temporary files with CMD.
740 CMD is used as a template to create the real command to be run,
740 CMD is used as a template to create the real command to be run,
741 with the strings INFILE and OUTFILE replaced by the real names of
741 with the strings INFILE and OUTFILE replaced by the real names of
742 the temporary files generated.'''
742 the temporary files generated.'''
743 inname, outname = None, None
743 inname, outname = None, None
744 try:
744 try:
745 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
745 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
746 fp = os.fdopen(infd, 'wb')
746 fp = os.fdopen(infd, 'wb')
747 fp.write(s)
747 fp.write(s)
748 fp.close()
748 fp.close()
749 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
749 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
750 os.close(outfd)
750 os.close(outfd)
751 cmd = cmd.replace('INFILE', inname)
751 cmd = cmd.replace('INFILE', inname)
752 cmd = cmd.replace('OUTFILE', outname)
752 cmd = cmd.replace('OUTFILE', outname)
753 code = os.system(cmd)
753 code = os.system(cmd)
754 if sys.platform == 'OpenVMS' and code & 1:
754 if sys.platform == 'OpenVMS' and code & 1:
755 code = 0
755 code = 0
756 if code:
756 if code:
757 raise Abort(_("command '%s' failed: %s") %
757 raise Abort(_("command '%s' failed: %s") %
758 (cmd, explainexit(code)))
758 (cmd, explainexit(code)))
759 fp = open(outname, 'rb')
759 fp = open(outname, 'rb')
760 r = fp.read()
760 r = fp.read()
761 fp.close()
761 fp.close()
762 return r
762 return r
763 finally:
763 finally:
764 try:
764 try:
765 if inname:
765 if inname:
766 os.unlink(inname)
766 os.unlink(inname)
767 except OSError:
767 except OSError:
768 pass
768 pass
769 try:
769 try:
770 if outname:
770 if outname:
771 os.unlink(outname)
771 os.unlink(outname)
772 except OSError:
772 except OSError:
773 pass
773 pass
774
774
775 filtertable = {
775 filtertable = {
776 'tempfile:': tempfilter,
776 'tempfile:': tempfilter,
777 'pipe:': pipefilter,
777 'pipe:': pipefilter,
778 }
778 }
779
779
780 def filter(s, cmd):
780 def filter(s, cmd):
781 "filter a string through a command that transforms its input to its output"
781 "filter a string through a command that transforms its input to its output"
782 for name, fn in filtertable.iteritems():
782 for name, fn in filtertable.iteritems():
783 if cmd.startswith(name):
783 if cmd.startswith(name):
784 return fn(s, cmd[len(name):].lstrip())
784 return fn(s, cmd[len(name):].lstrip())
785 return pipefilter(s, cmd)
785 return pipefilter(s, cmd)
786
786
787 def binary(s):
787 def binary(s):
788 """return true if a string is binary data"""
788 """return true if a string is binary data"""
789 return bool(s and '\0' in s)
789 return bool(s and '\0' in s)
790
790
791 def increasingchunks(source, min=1024, max=65536):
791 def increasingchunks(source, min=1024, max=65536):
792 '''return no less than min bytes per chunk while data remains,
792 '''return no less than min bytes per chunk while data remains,
793 doubling min after each chunk until it reaches max'''
793 doubling min after each chunk until it reaches max'''
794 def log2(x):
794 def log2(x):
795 if not x:
795 if not x:
796 return 0
796 return 0
797 i = 0
797 i = 0
798 while x:
798 while x:
799 x >>= 1
799 x >>= 1
800 i += 1
800 i += 1
801 return i - 1
801 return i - 1
802
802
803 buf = []
803 buf = []
804 blen = 0
804 blen = 0
805 for chunk in source:
805 for chunk in source:
806 buf.append(chunk)
806 buf.append(chunk)
807 blen += len(chunk)
807 blen += len(chunk)
808 if blen >= min:
808 if blen >= min:
809 if min < max:
809 if min < max:
810 min = min << 1
810 min = min << 1
811 nmin = 1 << log2(blen)
811 nmin = 1 << log2(blen)
812 if nmin > min:
812 if nmin > min:
813 min = nmin
813 min = nmin
814 if min > max:
814 if min > max:
815 min = max
815 min = max
816 yield ''.join(buf)
816 yield ''.join(buf)
817 blen = 0
817 blen = 0
818 buf = []
818 buf = []
819 if buf:
819 if buf:
820 yield ''.join(buf)
820 yield ''.join(buf)
821
821
822 Abort = error.Abort
822 Abort = error.Abort
823
823
824 def always(fn):
824 def always(fn):
825 return True
825 return True
826
826
827 def never(fn):
827 def never(fn):
828 return False
828 return False
829
829
830 def nogc(func):
830 def nogc(func):
831 """disable garbage collector
831 """disable garbage collector
832
832
833 Python's garbage collector triggers a GC each time a certain number of
833 Python's garbage collector triggers a GC each time a certain number of
834 container objects (the number being defined by gc.get_threshold()) are
834 container objects (the number being defined by gc.get_threshold()) are
835 allocated even when marked not to be tracked by the collector. Tracking has
835 allocated even when marked not to be tracked by the collector. Tracking has
836 no effect on when GCs are triggered, only on what objects the GC looks
836 no effect on when GCs are triggered, only on what objects the GC looks
837 into. As a workaround, disable GC while building complex (huge)
837 into. As a workaround, disable GC while building complex (huge)
838 containers.
838 containers.
839
839
840 This garbage collector issue have been fixed in 2.7.
840 This garbage collector issue have been fixed in 2.7.
841 """
841 """
842 def wrapper(*args, **kwargs):
842 def wrapper(*args, **kwargs):
843 gcenabled = gc.isenabled()
843 gcenabled = gc.isenabled()
844 gc.disable()
844 gc.disable()
845 try:
845 try:
846 return func(*args, **kwargs)
846 return func(*args, **kwargs)
847 finally:
847 finally:
848 if gcenabled:
848 if gcenabled:
849 gc.enable()
849 gc.enable()
850 return wrapper
850 return wrapper
851
851
852 def pathto(root, n1, n2):
852 def pathto(root, n1, n2):
853 '''return the relative path from one place to another.
853 '''return the relative path from one place to another.
854 root should use os.sep to separate directories
854 root should use os.sep to separate directories
855 n1 should use os.sep to separate directories
855 n1 should use os.sep to separate directories
856 n2 should use "/" to separate directories
856 n2 should use "/" to separate directories
857 returns an os.sep-separated path.
857 returns an os.sep-separated path.
858
858
859 If n1 is a relative path, it's assumed it's
859 If n1 is a relative path, it's assumed it's
860 relative to root.
860 relative to root.
861 n2 should always be relative to root.
861 n2 should always be relative to root.
862 '''
862 '''
863 if not n1:
863 if not n1:
864 return localpath(n2)
864 return localpath(n2)
865 if os.path.isabs(n1):
865 if os.path.isabs(n1):
866 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
866 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
867 return os.path.join(root, localpath(n2))
867 return os.path.join(root, localpath(n2))
868 n2 = '/'.join((pconvert(root), n2))
868 n2 = '/'.join((pconvert(root), n2))
869 a, b = splitpath(n1), n2.split('/')
869 a, b = splitpath(n1), n2.split('/')
870 a.reverse()
870 a.reverse()
871 b.reverse()
871 b.reverse()
872 while a and b and a[-1] == b[-1]:
872 while a and b and a[-1] == b[-1]:
873 a.pop()
873 a.pop()
874 b.pop()
874 b.pop()
875 b.reverse()
875 b.reverse()
876 return os.sep.join((['..'] * len(a)) + b) or '.'
876 return os.sep.join((['..'] * len(a)) + b) or '.'
877
877
878 def mainfrozen():
878 def mainfrozen():
879 """return True if we are a frozen executable.
879 """return True if we are a frozen executable.
880
880
881 The code supports py2exe (most common, Windows only) and tools/freeze
881 The code supports py2exe (most common, Windows only) and tools/freeze
882 (portable, not much used).
882 (portable, not much used).
883 """
883 """
884 return (safehasattr(sys, "frozen") or # new py2exe
884 return (safehasattr(sys, "frozen") or # new py2exe
885 safehasattr(sys, "importers") or # old py2exe
885 safehasattr(sys, "importers") or # old py2exe
886 imp.is_frozen("__main__")) # tools/freeze
886 imp.is_frozen("__main__")) # tools/freeze
887
887
888 # the location of data files matching the source code
888 # the location of data files matching the source code
889 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
889 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
890 # executable version (py2exe) doesn't support __file__
890 # executable version (py2exe) doesn't support __file__
891 datapath = os.path.dirname(sys.executable)
891 datapath = os.path.dirname(sys.executable)
892 else:
892 else:
893 datapath = os.path.dirname(__file__)
893 datapath = os.path.dirname(__file__)
894
894
895 i18n.setdatapath(datapath)
895 i18n.setdatapath(datapath)
896
896
897 _hgexecutable = None
897 _hgexecutable = None
898
898
899 def hgexecutable():
899 def hgexecutable():
900 """return location of the 'hg' executable.
900 """return location of the 'hg' executable.
901
901
902 Defaults to $HG or 'hg' in the search path.
902 Defaults to $HG or 'hg' in the search path.
903 """
903 """
904 if _hgexecutable is None:
904 if _hgexecutable is None:
905 hg = os.environ.get('HG')
905 hg = os.environ.get('HG')
906 mainmod = sys.modules['__main__']
906 mainmod = sys.modules['__main__']
907 if hg:
907 if hg:
908 _sethgexecutable(hg)
908 _sethgexecutable(hg)
909 elif mainfrozen():
909 elif mainfrozen():
910 _sethgexecutable(sys.executable)
910 if getattr(sys, 'frozen', None) == 'macosx_app':
911 # Env variable set by py2app
912 _sethgexecutable(os.environ['EXECUTABLEPATH'])
913 else:
914 _sethgexecutable(sys.executable)
911 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
915 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
912 _sethgexecutable(mainmod.__file__)
916 _sethgexecutable(mainmod.__file__)
913 else:
917 else:
914 exe = findexe('hg') or os.path.basename(sys.argv[0])
918 exe = findexe('hg') or os.path.basename(sys.argv[0])
915 _sethgexecutable(exe)
919 _sethgexecutable(exe)
916 return _hgexecutable
920 return _hgexecutable
917
921
918 def _sethgexecutable(path):
922 def _sethgexecutable(path):
919 """set location of the 'hg' executable"""
923 """set location of the 'hg' executable"""
920 global _hgexecutable
924 global _hgexecutable
921 _hgexecutable = path
925 _hgexecutable = path
922
926
923 def _isstdout(f):
927 def _isstdout(f):
924 fileno = getattr(f, 'fileno', None)
928 fileno = getattr(f, 'fileno', None)
925 return fileno and fileno() == sys.__stdout__.fileno()
929 return fileno and fileno() == sys.__stdout__.fileno()
926
930
927 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
931 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
928 '''enhanced shell command execution.
932 '''enhanced shell command execution.
929 run with environment maybe modified, maybe in different dir.
933 run with environment maybe modified, maybe in different dir.
930
934
931 if command fails and onerr is None, return status, else raise onerr
935 if command fails and onerr is None, return status, else raise onerr
932 object as exception.
936 object as exception.
933
937
934 if out is specified, it is assumed to be a file-like object that has a
938 if out is specified, it is assumed to be a file-like object that has a
935 write() method. stdout and stderr will be redirected to out.'''
939 write() method. stdout and stderr will be redirected to out.'''
936 if environ is None:
940 if environ is None:
937 environ = {}
941 environ = {}
938 try:
942 try:
939 sys.stdout.flush()
943 sys.stdout.flush()
940 except Exception:
944 except Exception:
941 pass
945 pass
942 def py2shell(val):
946 def py2shell(val):
943 'convert python object into string that is useful to shell'
947 'convert python object into string that is useful to shell'
944 if val is None or val is False:
948 if val is None or val is False:
945 return '0'
949 return '0'
946 if val is True:
950 if val is True:
947 return '1'
951 return '1'
948 return str(val)
952 return str(val)
949 origcmd = cmd
953 origcmd = cmd
950 cmd = quotecommand(cmd)
954 cmd = quotecommand(cmd)
951 if sys.platform == 'plan9' and (sys.version_info[0] == 2
955 if sys.platform == 'plan9' and (sys.version_info[0] == 2
952 and sys.version_info[1] < 7):
956 and sys.version_info[1] < 7):
953 # subprocess kludge to work around issues in half-baked Python
957 # subprocess kludge to work around issues in half-baked Python
954 # ports, notably bichued/python:
958 # ports, notably bichued/python:
955 if not cwd is None:
959 if not cwd is None:
956 os.chdir(cwd)
960 os.chdir(cwd)
957 rc = os.system(cmd)
961 rc = os.system(cmd)
958 else:
962 else:
959 env = dict(os.environ)
963 env = dict(os.environ)
960 env.update((k, py2shell(v)) for k, v in environ.iteritems())
964 env.update((k, py2shell(v)) for k, v in environ.iteritems())
961 env['HG'] = hgexecutable()
965 env['HG'] = hgexecutable()
962 if out is None or _isstdout(out):
966 if out is None or _isstdout(out):
963 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
967 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
964 env=env, cwd=cwd)
968 env=env, cwd=cwd)
965 else:
969 else:
966 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
970 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
967 env=env, cwd=cwd, stdout=subprocess.PIPE,
971 env=env, cwd=cwd, stdout=subprocess.PIPE,
968 stderr=subprocess.STDOUT)
972 stderr=subprocess.STDOUT)
969 while True:
973 while True:
970 line = proc.stdout.readline()
974 line = proc.stdout.readline()
971 if not line:
975 if not line:
972 break
976 break
973 out.write(line)
977 out.write(line)
974 proc.wait()
978 proc.wait()
975 rc = proc.returncode
979 rc = proc.returncode
976 if sys.platform == 'OpenVMS' and rc & 1:
980 if sys.platform == 'OpenVMS' and rc & 1:
977 rc = 0
981 rc = 0
978 if rc and onerr:
982 if rc and onerr:
979 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
983 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
980 explainexit(rc)[0])
984 explainexit(rc)[0])
981 if errprefix:
985 if errprefix:
982 errmsg = '%s: %s' % (errprefix, errmsg)
986 errmsg = '%s: %s' % (errprefix, errmsg)
983 raise onerr(errmsg)
987 raise onerr(errmsg)
984 return rc
988 return rc
985
989
986 def checksignature(func):
990 def checksignature(func):
987 '''wrap a function with code to check for calling errors'''
991 '''wrap a function with code to check for calling errors'''
988 def check(*args, **kwargs):
992 def check(*args, **kwargs):
989 try:
993 try:
990 return func(*args, **kwargs)
994 return func(*args, **kwargs)
991 except TypeError:
995 except TypeError:
992 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
996 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
993 raise error.SignatureError
997 raise error.SignatureError
994 raise
998 raise
995
999
996 return check
1000 return check
997
1001
998 def copyfile(src, dest, hardlink=False, copystat=False):
1002 def copyfile(src, dest, hardlink=False, copystat=False):
999 '''copy a file, preserving mode and optionally other stat info like
1003 '''copy a file, preserving mode and optionally other stat info like
1000 atime/mtime'''
1004 atime/mtime'''
1001 if os.path.lexists(dest):
1005 if os.path.lexists(dest):
1002 unlink(dest)
1006 unlink(dest)
1003 # hardlinks are problematic on CIFS, quietly ignore this flag
1007 # hardlinks are problematic on CIFS, quietly ignore this flag
1004 # until we find a way to work around it cleanly (issue4546)
1008 # until we find a way to work around it cleanly (issue4546)
1005 if False and hardlink:
1009 if False and hardlink:
1006 try:
1010 try:
1007 oslink(src, dest)
1011 oslink(src, dest)
1008 return
1012 return
1009 except (IOError, OSError):
1013 except (IOError, OSError):
1010 pass # fall back to normal copy
1014 pass # fall back to normal copy
1011 if os.path.islink(src):
1015 if os.path.islink(src):
1012 os.symlink(os.readlink(src), dest)
1016 os.symlink(os.readlink(src), dest)
1013 # copytime is ignored for symlinks, but in general copytime isn't needed
1017 # copytime is ignored for symlinks, but in general copytime isn't needed
1014 # for them anyway
1018 # for them anyway
1015 else:
1019 else:
1016 try:
1020 try:
1017 shutil.copyfile(src, dest)
1021 shutil.copyfile(src, dest)
1018 if copystat:
1022 if copystat:
1019 # copystat also copies mode
1023 # copystat also copies mode
1020 shutil.copystat(src, dest)
1024 shutil.copystat(src, dest)
1021 else:
1025 else:
1022 shutil.copymode(src, dest)
1026 shutil.copymode(src, dest)
1023 except shutil.Error as inst:
1027 except shutil.Error as inst:
1024 raise Abort(str(inst))
1028 raise Abort(str(inst))
1025
1029
1026 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1030 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1027 """Copy a directory tree using hardlinks if possible."""
1031 """Copy a directory tree using hardlinks if possible."""
1028 num = 0
1032 num = 0
1029
1033
1030 if hardlink is None:
1034 if hardlink is None:
1031 hardlink = (os.stat(src).st_dev ==
1035 hardlink = (os.stat(src).st_dev ==
1032 os.stat(os.path.dirname(dst)).st_dev)
1036 os.stat(os.path.dirname(dst)).st_dev)
1033 if hardlink:
1037 if hardlink:
1034 topic = _('linking')
1038 topic = _('linking')
1035 else:
1039 else:
1036 topic = _('copying')
1040 topic = _('copying')
1037
1041
1038 if os.path.isdir(src):
1042 if os.path.isdir(src):
1039 os.mkdir(dst)
1043 os.mkdir(dst)
1040 for name, kind in osutil.listdir(src):
1044 for name, kind in osutil.listdir(src):
1041 srcname = os.path.join(src, name)
1045 srcname = os.path.join(src, name)
1042 dstname = os.path.join(dst, name)
1046 dstname = os.path.join(dst, name)
1043 def nprog(t, pos):
1047 def nprog(t, pos):
1044 if pos is not None:
1048 if pos is not None:
1045 return progress(t, pos + num)
1049 return progress(t, pos + num)
1046 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1050 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1047 num += n
1051 num += n
1048 else:
1052 else:
1049 if hardlink:
1053 if hardlink:
1050 try:
1054 try:
1051 oslink(src, dst)
1055 oslink(src, dst)
1052 except (IOError, OSError):
1056 except (IOError, OSError):
1053 hardlink = False
1057 hardlink = False
1054 shutil.copy(src, dst)
1058 shutil.copy(src, dst)
1055 else:
1059 else:
1056 shutil.copy(src, dst)
1060 shutil.copy(src, dst)
1057 num += 1
1061 num += 1
1058 progress(topic, num)
1062 progress(topic, num)
1059 progress(topic, None)
1063 progress(topic, None)
1060
1064
1061 return hardlink, num
1065 return hardlink, num
1062
1066
1063 _winreservednames = '''con prn aux nul
1067 _winreservednames = '''con prn aux nul
1064 com1 com2 com3 com4 com5 com6 com7 com8 com9
1068 com1 com2 com3 com4 com5 com6 com7 com8 com9
1065 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1069 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1066 _winreservedchars = ':*?"<>|'
1070 _winreservedchars = ':*?"<>|'
1067 def checkwinfilename(path):
1071 def checkwinfilename(path):
1068 r'''Check that the base-relative path is a valid filename on Windows.
1072 r'''Check that the base-relative path is a valid filename on Windows.
1069 Returns None if the path is ok, or a UI string describing the problem.
1073 Returns None if the path is ok, or a UI string describing the problem.
1070
1074
1071 >>> checkwinfilename("just/a/normal/path")
1075 >>> checkwinfilename("just/a/normal/path")
1072 >>> checkwinfilename("foo/bar/con.xml")
1076 >>> checkwinfilename("foo/bar/con.xml")
1073 "filename contains 'con', which is reserved on Windows"
1077 "filename contains 'con', which is reserved on Windows"
1074 >>> checkwinfilename("foo/con.xml/bar")
1078 >>> checkwinfilename("foo/con.xml/bar")
1075 "filename contains 'con', which is reserved on Windows"
1079 "filename contains 'con', which is reserved on Windows"
1076 >>> checkwinfilename("foo/bar/xml.con")
1080 >>> checkwinfilename("foo/bar/xml.con")
1077 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1081 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1078 "filename contains 'AUX', which is reserved on Windows"
1082 "filename contains 'AUX', which is reserved on Windows"
1079 >>> checkwinfilename("foo/bar/bla:.txt")
1083 >>> checkwinfilename("foo/bar/bla:.txt")
1080 "filename contains ':', which is reserved on Windows"
1084 "filename contains ':', which is reserved on Windows"
1081 >>> checkwinfilename("foo/bar/b\07la.txt")
1085 >>> checkwinfilename("foo/bar/b\07la.txt")
1082 "filename contains '\\x07', which is invalid on Windows"
1086 "filename contains '\\x07', which is invalid on Windows"
1083 >>> checkwinfilename("foo/bar/bla ")
1087 >>> checkwinfilename("foo/bar/bla ")
1084 "filename ends with ' ', which is not allowed on Windows"
1088 "filename ends with ' ', which is not allowed on Windows"
1085 >>> checkwinfilename("../bar")
1089 >>> checkwinfilename("../bar")
1086 >>> checkwinfilename("foo\\")
1090 >>> checkwinfilename("foo\\")
1087 "filename ends with '\\', which is invalid on Windows"
1091 "filename ends with '\\', which is invalid on Windows"
1088 >>> checkwinfilename("foo\\/bar")
1092 >>> checkwinfilename("foo\\/bar")
1089 "directory name ends with '\\', which is invalid on Windows"
1093 "directory name ends with '\\', which is invalid on Windows"
1090 '''
1094 '''
1091 if path.endswith('\\'):
1095 if path.endswith('\\'):
1092 return _("filename ends with '\\', which is invalid on Windows")
1096 return _("filename ends with '\\', which is invalid on Windows")
1093 if '\\/' in path:
1097 if '\\/' in path:
1094 return _("directory name ends with '\\', which is invalid on Windows")
1098 return _("directory name ends with '\\', which is invalid on Windows")
1095 for n in path.replace('\\', '/').split('/'):
1099 for n in path.replace('\\', '/').split('/'):
1096 if not n:
1100 if not n:
1097 continue
1101 continue
1098 for c in n:
1102 for c in n:
1099 if c in _winreservedchars:
1103 if c in _winreservedchars:
1100 return _("filename contains '%s', which is reserved "
1104 return _("filename contains '%s', which is reserved "
1101 "on Windows") % c
1105 "on Windows") % c
1102 if ord(c) <= 31:
1106 if ord(c) <= 31:
1103 return _("filename contains %r, which is invalid "
1107 return _("filename contains %r, which is invalid "
1104 "on Windows") % c
1108 "on Windows") % c
1105 base = n.split('.')[0]
1109 base = n.split('.')[0]
1106 if base and base.lower() in _winreservednames:
1110 if base and base.lower() in _winreservednames:
1107 return _("filename contains '%s', which is reserved "
1111 return _("filename contains '%s', which is reserved "
1108 "on Windows") % base
1112 "on Windows") % base
1109 t = n[-1]
1113 t = n[-1]
1110 if t in '. ' and n not in '..':
1114 if t in '. ' and n not in '..':
1111 return _("filename ends with '%s', which is not allowed "
1115 return _("filename ends with '%s', which is not allowed "
1112 "on Windows") % t
1116 "on Windows") % t
1113
1117
1114 if os.name == 'nt':
1118 if os.name == 'nt':
1115 checkosfilename = checkwinfilename
1119 checkosfilename = checkwinfilename
1116 else:
1120 else:
1117 checkosfilename = platform.checkosfilename
1121 checkosfilename = platform.checkosfilename
1118
1122
1119 def makelock(info, pathname):
1123 def makelock(info, pathname):
1120 try:
1124 try:
1121 return os.symlink(info, pathname)
1125 return os.symlink(info, pathname)
1122 except OSError as why:
1126 except OSError as why:
1123 if why.errno == errno.EEXIST:
1127 if why.errno == errno.EEXIST:
1124 raise
1128 raise
1125 except AttributeError: # no symlink in os
1129 except AttributeError: # no symlink in os
1126 pass
1130 pass
1127
1131
1128 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1132 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1129 os.write(ld, info)
1133 os.write(ld, info)
1130 os.close(ld)
1134 os.close(ld)
1131
1135
1132 def readlock(pathname):
1136 def readlock(pathname):
1133 try:
1137 try:
1134 return os.readlink(pathname)
1138 return os.readlink(pathname)
1135 except OSError as why:
1139 except OSError as why:
1136 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1140 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1137 raise
1141 raise
1138 except AttributeError: # no symlink in os
1142 except AttributeError: # no symlink in os
1139 pass
1143 pass
1140 fp = posixfile(pathname)
1144 fp = posixfile(pathname)
1141 r = fp.read()
1145 r = fp.read()
1142 fp.close()
1146 fp.close()
1143 return r
1147 return r
1144
1148
1145 def fstat(fp):
1149 def fstat(fp):
1146 '''stat file object that may not have fileno method.'''
1150 '''stat file object that may not have fileno method.'''
1147 try:
1151 try:
1148 return os.fstat(fp.fileno())
1152 return os.fstat(fp.fileno())
1149 except AttributeError:
1153 except AttributeError:
1150 return os.stat(fp.name)
1154 return os.stat(fp.name)
1151
1155
1152 # File system features
1156 # File system features
1153
1157
1154 def checkcase(path):
1158 def checkcase(path):
1155 """
1159 """
1156 Return true if the given path is on a case-sensitive filesystem
1160 Return true if the given path is on a case-sensitive filesystem
1157
1161
1158 Requires a path (like /foo/.hg) ending with a foldable final
1162 Requires a path (like /foo/.hg) ending with a foldable final
1159 directory component.
1163 directory component.
1160 """
1164 """
1161 s1 = os.lstat(path)
1165 s1 = os.lstat(path)
1162 d, b = os.path.split(path)
1166 d, b = os.path.split(path)
1163 b2 = b.upper()
1167 b2 = b.upper()
1164 if b == b2:
1168 if b == b2:
1165 b2 = b.lower()
1169 b2 = b.lower()
1166 if b == b2:
1170 if b == b2:
1167 return True # no evidence against case sensitivity
1171 return True # no evidence against case sensitivity
1168 p2 = os.path.join(d, b2)
1172 p2 = os.path.join(d, b2)
1169 try:
1173 try:
1170 s2 = os.lstat(p2)
1174 s2 = os.lstat(p2)
1171 if s2 == s1:
1175 if s2 == s1:
1172 return False
1176 return False
1173 return True
1177 return True
1174 except OSError:
1178 except OSError:
1175 return True
1179 return True
1176
1180
1177 try:
1181 try:
1178 import re2
1182 import re2
1179 _re2 = None
1183 _re2 = None
1180 except ImportError:
1184 except ImportError:
1181 _re2 = False
1185 _re2 = False
1182
1186
1183 class _re(object):
1187 class _re(object):
1184 def _checkre2(self):
1188 def _checkre2(self):
1185 global _re2
1189 global _re2
1186 try:
1190 try:
1187 # check if match works, see issue3964
1191 # check if match works, see issue3964
1188 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1192 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1189 except ImportError:
1193 except ImportError:
1190 _re2 = False
1194 _re2 = False
1191
1195
1192 def compile(self, pat, flags=0):
1196 def compile(self, pat, flags=0):
1193 '''Compile a regular expression, using re2 if possible
1197 '''Compile a regular expression, using re2 if possible
1194
1198
1195 For best performance, use only re2-compatible regexp features. The
1199 For best performance, use only re2-compatible regexp features. The
1196 only flags from the re module that are re2-compatible are
1200 only flags from the re module that are re2-compatible are
1197 IGNORECASE and MULTILINE.'''
1201 IGNORECASE and MULTILINE.'''
1198 if _re2 is None:
1202 if _re2 is None:
1199 self._checkre2()
1203 self._checkre2()
1200 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1204 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1201 if flags & remod.IGNORECASE:
1205 if flags & remod.IGNORECASE:
1202 pat = '(?i)' + pat
1206 pat = '(?i)' + pat
1203 if flags & remod.MULTILINE:
1207 if flags & remod.MULTILINE:
1204 pat = '(?m)' + pat
1208 pat = '(?m)' + pat
1205 try:
1209 try:
1206 return re2.compile(pat)
1210 return re2.compile(pat)
1207 except re2.error:
1211 except re2.error:
1208 pass
1212 pass
1209 return remod.compile(pat, flags)
1213 return remod.compile(pat, flags)
1210
1214
1211 @propertycache
1215 @propertycache
1212 def escape(self):
1216 def escape(self):
1213 '''Return the version of escape corresponding to self.compile.
1217 '''Return the version of escape corresponding to self.compile.
1214
1218
1215 This is imperfect because whether re2 or re is used for a particular
1219 This is imperfect because whether re2 or re is used for a particular
1216 function depends on the flags, etc, but it's the best we can do.
1220 function depends on the flags, etc, but it's the best we can do.
1217 '''
1221 '''
1218 global _re2
1222 global _re2
1219 if _re2 is None:
1223 if _re2 is None:
1220 self._checkre2()
1224 self._checkre2()
1221 if _re2:
1225 if _re2:
1222 return re2.escape
1226 return re2.escape
1223 else:
1227 else:
1224 return remod.escape
1228 return remod.escape
1225
1229
1226 re = _re()
1230 re = _re()
1227
1231
1228 _fspathcache = {}
1232 _fspathcache = {}
1229 def fspath(name, root):
1233 def fspath(name, root):
1230 '''Get name in the case stored in the filesystem
1234 '''Get name in the case stored in the filesystem
1231
1235
1232 The name should be relative to root, and be normcase-ed for efficiency.
1236 The name should be relative to root, and be normcase-ed for efficiency.
1233
1237
1234 Note that this function is unnecessary, and should not be
1238 Note that this function is unnecessary, and should not be
1235 called, for case-sensitive filesystems (simply because it's expensive).
1239 called, for case-sensitive filesystems (simply because it's expensive).
1236
1240
1237 The root should be normcase-ed, too.
1241 The root should be normcase-ed, too.
1238 '''
1242 '''
1239 def _makefspathcacheentry(dir):
1243 def _makefspathcacheentry(dir):
1240 return dict((normcase(n), n) for n in os.listdir(dir))
1244 return dict((normcase(n), n) for n in os.listdir(dir))
1241
1245
1242 seps = os.sep
1246 seps = os.sep
1243 if os.altsep:
1247 if os.altsep:
1244 seps = seps + os.altsep
1248 seps = seps + os.altsep
1245 # Protect backslashes. This gets silly very quickly.
1249 # Protect backslashes. This gets silly very quickly.
1246 seps.replace('\\','\\\\')
1250 seps.replace('\\','\\\\')
1247 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1251 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1248 dir = os.path.normpath(root)
1252 dir = os.path.normpath(root)
1249 result = []
1253 result = []
1250 for part, sep in pattern.findall(name):
1254 for part, sep in pattern.findall(name):
1251 if sep:
1255 if sep:
1252 result.append(sep)
1256 result.append(sep)
1253 continue
1257 continue
1254
1258
1255 if dir not in _fspathcache:
1259 if dir not in _fspathcache:
1256 _fspathcache[dir] = _makefspathcacheentry(dir)
1260 _fspathcache[dir] = _makefspathcacheentry(dir)
1257 contents = _fspathcache[dir]
1261 contents = _fspathcache[dir]
1258
1262
1259 found = contents.get(part)
1263 found = contents.get(part)
1260 if not found:
1264 if not found:
1261 # retry "once per directory" per "dirstate.walk" which
1265 # retry "once per directory" per "dirstate.walk" which
1262 # may take place for each patches of "hg qpush", for example
1266 # may take place for each patches of "hg qpush", for example
1263 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1267 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1264 found = contents.get(part)
1268 found = contents.get(part)
1265
1269
1266 result.append(found or part)
1270 result.append(found or part)
1267 dir = os.path.join(dir, part)
1271 dir = os.path.join(dir, part)
1268
1272
1269 return ''.join(result)
1273 return ''.join(result)
1270
1274
1271 def checknlink(testfile):
1275 def checknlink(testfile):
1272 '''check whether hardlink count reporting works properly'''
1276 '''check whether hardlink count reporting works properly'''
1273
1277
1274 # testfile may be open, so we need a separate file for checking to
1278 # testfile may be open, so we need a separate file for checking to
1275 # work around issue2543 (or testfile may get lost on Samba shares)
1279 # work around issue2543 (or testfile may get lost on Samba shares)
1276 f1 = testfile + ".hgtmp1"
1280 f1 = testfile + ".hgtmp1"
1277 if os.path.lexists(f1):
1281 if os.path.lexists(f1):
1278 return False
1282 return False
1279 try:
1283 try:
1280 posixfile(f1, 'w').close()
1284 posixfile(f1, 'w').close()
1281 except IOError:
1285 except IOError:
1282 return False
1286 return False
1283
1287
1284 f2 = testfile + ".hgtmp2"
1288 f2 = testfile + ".hgtmp2"
1285 fd = None
1289 fd = None
1286 try:
1290 try:
1287 oslink(f1, f2)
1291 oslink(f1, f2)
1288 # nlinks() may behave differently for files on Windows shares if
1292 # nlinks() may behave differently for files on Windows shares if
1289 # the file is open.
1293 # the file is open.
1290 fd = posixfile(f2)
1294 fd = posixfile(f2)
1291 return nlinks(f2) > 1
1295 return nlinks(f2) > 1
1292 except OSError:
1296 except OSError:
1293 return False
1297 return False
1294 finally:
1298 finally:
1295 if fd is not None:
1299 if fd is not None:
1296 fd.close()
1300 fd.close()
1297 for f in (f1, f2):
1301 for f in (f1, f2):
1298 try:
1302 try:
1299 os.unlink(f)
1303 os.unlink(f)
1300 except OSError:
1304 except OSError:
1301 pass
1305 pass
1302
1306
1303 def endswithsep(path):
1307 def endswithsep(path):
1304 '''Check path ends with os.sep or os.altsep.'''
1308 '''Check path ends with os.sep or os.altsep.'''
1305 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1309 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1306
1310
1307 def splitpath(path):
1311 def splitpath(path):
1308 '''Split path by os.sep.
1312 '''Split path by os.sep.
1309 Note that this function does not use os.altsep because this is
1313 Note that this function does not use os.altsep because this is
1310 an alternative of simple "xxx.split(os.sep)".
1314 an alternative of simple "xxx.split(os.sep)".
1311 It is recommended to use os.path.normpath() before using this
1315 It is recommended to use os.path.normpath() before using this
1312 function if need.'''
1316 function if need.'''
1313 return path.split(os.sep)
1317 return path.split(os.sep)
1314
1318
1315 def gui():
1319 def gui():
1316 '''Are we running in a GUI?'''
1320 '''Are we running in a GUI?'''
1317 if sys.platform == 'darwin':
1321 if sys.platform == 'darwin':
1318 if 'SSH_CONNECTION' in os.environ:
1322 if 'SSH_CONNECTION' in os.environ:
1319 # handle SSH access to a box where the user is logged in
1323 # handle SSH access to a box where the user is logged in
1320 return False
1324 return False
1321 elif getattr(osutil, 'isgui', None):
1325 elif getattr(osutil, 'isgui', None):
1322 # check if a CoreGraphics session is available
1326 # check if a CoreGraphics session is available
1323 return osutil.isgui()
1327 return osutil.isgui()
1324 else:
1328 else:
1325 # pure build; use a safe default
1329 # pure build; use a safe default
1326 return True
1330 return True
1327 else:
1331 else:
1328 return os.name == "nt" or os.environ.get("DISPLAY")
1332 return os.name == "nt" or os.environ.get("DISPLAY")
1329
1333
1330 def mktempcopy(name, emptyok=False, createmode=None):
1334 def mktempcopy(name, emptyok=False, createmode=None):
1331 """Create a temporary file with the same contents from name
1335 """Create a temporary file with the same contents from name
1332
1336
1333 The permission bits are copied from the original file.
1337 The permission bits are copied from the original file.
1334
1338
1335 If the temporary file is going to be truncated immediately, you
1339 If the temporary file is going to be truncated immediately, you
1336 can use emptyok=True as an optimization.
1340 can use emptyok=True as an optimization.
1337
1341
1338 Returns the name of the temporary file.
1342 Returns the name of the temporary file.
1339 """
1343 """
1340 d, fn = os.path.split(name)
1344 d, fn = os.path.split(name)
1341 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1345 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1342 os.close(fd)
1346 os.close(fd)
1343 # Temporary files are created with mode 0600, which is usually not
1347 # Temporary files are created with mode 0600, which is usually not
1344 # what we want. If the original file already exists, just copy
1348 # what we want. If the original file already exists, just copy
1345 # its mode. Otherwise, manually obey umask.
1349 # its mode. Otherwise, manually obey umask.
1346 copymode(name, temp, createmode)
1350 copymode(name, temp, createmode)
1347 if emptyok:
1351 if emptyok:
1348 return temp
1352 return temp
1349 try:
1353 try:
1350 try:
1354 try:
1351 ifp = posixfile(name, "rb")
1355 ifp = posixfile(name, "rb")
1352 except IOError as inst:
1356 except IOError as inst:
1353 if inst.errno == errno.ENOENT:
1357 if inst.errno == errno.ENOENT:
1354 return temp
1358 return temp
1355 if not getattr(inst, 'filename', None):
1359 if not getattr(inst, 'filename', None):
1356 inst.filename = name
1360 inst.filename = name
1357 raise
1361 raise
1358 ofp = posixfile(temp, "wb")
1362 ofp = posixfile(temp, "wb")
1359 for chunk in filechunkiter(ifp):
1363 for chunk in filechunkiter(ifp):
1360 ofp.write(chunk)
1364 ofp.write(chunk)
1361 ifp.close()
1365 ifp.close()
1362 ofp.close()
1366 ofp.close()
1363 except: # re-raises
1367 except: # re-raises
1364 try: os.unlink(temp)
1368 try: os.unlink(temp)
1365 except OSError: pass
1369 except OSError: pass
1366 raise
1370 raise
1367 return temp
1371 return temp
1368
1372
1369 class atomictempfile(object):
1373 class atomictempfile(object):
1370 '''writable file object that atomically updates a file
1374 '''writable file object that atomically updates a file
1371
1375
1372 All writes will go to a temporary copy of the original file. Call
1376 All writes will go to a temporary copy of the original file. Call
1373 close() when you are done writing, and atomictempfile will rename
1377 close() when you are done writing, and atomictempfile will rename
1374 the temporary copy to the original name, making the changes
1378 the temporary copy to the original name, making the changes
1375 visible. If the object is destroyed without being closed, all your
1379 visible. If the object is destroyed without being closed, all your
1376 writes are discarded.
1380 writes are discarded.
1377 '''
1381 '''
1378 def __init__(self, name, mode='w+b', createmode=None):
1382 def __init__(self, name, mode='w+b', createmode=None):
1379 self.__name = name # permanent name
1383 self.__name = name # permanent name
1380 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1384 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1381 createmode=createmode)
1385 createmode=createmode)
1382 self._fp = posixfile(self._tempname, mode)
1386 self._fp = posixfile(self._tempname, mode)
1383
1387
1384 # delegated methods
1388 # delegated methods
1385 self.write = self._fp.write
1389 self.write = self._fp.write
1386 self.seek = self._fp.seek
1390 self.seek = self._fp.seek
1387 self.tell = self._fp.tell
1391 self.tell = self._fp.tell
1388 self.fileno = self._fp.fileno
1392 self.fileno = self._fp.fileno
1389
1393
1390 def close(self):
1394 def close(self):
1391 if not self._fp.closed:
1395 if not self._fp.closed:
1392 self._fp.close()
1396 self._fp.close()
1393 rename(self._tempname, localpath(self.__name))
1397 rename(self._tempname, localpath(self.__name))
1394
1398
1395 def discard(self):
1399 def discard(self):
1396 if not self._fp.closed:
1400 if not self._fp.closed:
1397 try:
1401 try:
1398 os.unlink(self._tempname)
1402 os.unlink(self._tempname)
1399 except OSError:
1403 except OSError:
1400 pass
1404 pass
1401 self._fp.close()
1405 self._fp.close()
1402
1406
1403 def __del__(self):
1407 def __del__(self):
1404 if safehasattr(self, '_fp'): # constructor actually did something
1408 if safehasattr(self, '_fp'): # constructor actually did something
1405 self.discard()
1409 self.discard()
1406
1410
1407 def makedirs(name, mode=None, notindexed=False):
1411 def makedirs(name, mode=None, notindexed=False):
1408 """recursive directory creation with parent mode inheritance"""
1412 """recursive directory creation with parent mode inheritance"""
1409 try:
1413 try:
1410 makedir(name, notindexed)
1414 makedir(name, notindexed)
1411 except OSError as err:
1415 except OSError as err:
1412 if err.errno == errno.EEXIST:
1416 if err.errno == errno.EEXIST:
1413 return
1417 return
1414 if err.errno != errno.ENOENT or not name:
1418 if err.errno != errno.ENOENT or not name:
1415 raise
1419 raise
1416 parent = os.path.dirname(os.path.abspath(name))
1420 parent = os.path.dirname(os.path.abspath(name))
1417 if parent == name:
1421 if parent == name:
1418 raise
1422 raise
1419 makedirs(parent, mode, notindexed)
1423 makedirs(parent, mode, notindexed)
1420 makedir(name, notindexed)
1424 makedir(name, notindexed)
1421 if mode is not None:
1425 if mode is not None:
1422 os.chmod(name, mode)
1426 os.chmod(name, mode)
1423
1427
1424 def ensuredirs(name, mode=None, notindexed=False):
1428 def ensuredirs(name, mode=None, notindexed=False):
1425 """race-safe recursive directory creation
1429 """race-safe recursive directory creation
1426
1430
1427 Newly created directories are marked as "not to be indexed by
1431 Newly created directories are marked as "not to be indexed by
1428 the content indexing service", if ``notindexed`` is specified
1432 the content indexing service", if ``notindexed`` is specified
1429 for "write" mode access.
1433 for "write" mode access.
1430 """
1434 """
1431 if os.path.isdir(name):
1435 if os.path.isdir(name):
1432 return
1436 return
1433 parent = os.path.dirname(os.path.abspath(name))
1437 parent = os.path.dirname(os.path.abspath(name))
1434 if parent != name:
1438 if parent != name:
1435 ensuredirs(parent, mode, notindexed)
1439 ensuredirs(parent, mode, notindexed)
1436 try:
1440 try:
1437 makedir(name, notindexed)
1441 makedir(name, notindexed)
1438 except OSError as err:
1442 except OSError as err:
1439 if err.errno == errno.EEXIST and os.path.isdir(name):
1443 if err.errno == errno.EEXIST and os.path.isdir(name):
1440 # someone else seems to have won a directory creation race
1444 # someone else seems to have won a directory creation race
1441 return
1445 return
1442 raise
1446 raise
1443 if mode is not None:
1447 if mode is not None:
1444 os.chmod(name, mode)
1448 os.chmod(name, mode)
1445
1449
1446 def readfile(path):
1450 def readfile(path):
1447 fp = open(path, 'rb')
1451 fp = open(path, 'rb')
1448 try:
1452 try:
1449 return fp.read()
1453 return fp.read()
1450 finally:
1454 finally:
1451 fp.close()
1455 fp.close()
1452
1456
1453 def writefile(path, text):
1457 def writefile(path, text):
1454 fp = open(path, 'wb')
1458 fp = open(path, 'wb')
1455 try:
1459 try:
1456 fp.write(text)
1460 fp.write(text)
1457 finally:
1461 finally:
1458 fp.close()
1462 fp.close()
1459
1463
1460 def appendfile(path, text):
1464 def appendfile(path, text):
1461 fp = open(path, 'ab')
1465 fp = open(path, 'ab')
1462 try:
1466 try:
1463 fp.write(text)
1467 fp.write(text)
1464 finally:
1468 finally:
1465 fp.close()
1469 fp.close()
1466
1470
1467 class chunkbuffer(object):
1471 class chunkbuffer(object):
1468 """Allow arbitrary sized chunks of data to be efficiently read from an
1472 """Allow arbitrary sized chunks of data to be efficiently read from an
1469 iterator over chunks of arbitrary size."""
1473 iterator over chunks of arbitrary size."""
1470
1474
1471 def __init__(self, in_iter):
1475 def __init__(self, in_iter):
1472 """in_iter is the iterator that's iterating over the input chunks.
1476 """in_iter is the iterator that's iterating over the input chunks.
1473 targetsize is how big a buffer to try to maintain."""
1477 targetsize is how big a buffer to try to maintain."""
1474 def splitbig(chunks):
1478 def splitbig(chunks):
1475 for chunk in chunks:
1479 for chunk in chunks:
1476 if len(chunk) > 2**20:
1480 if len(chunk) > 2**20:
1477 pos = 0
1481 pos = 0
1478 while pos < len(chunk):
1482 while pos < len(chunk):
1479 end = pos + 2 ** 18
1483 end = pos + 2 ** 18
1480 yield chunk[pos:end]
1484 yield chunk[pos:end]
1481 pos = end
1485 pos = end
1482 else:
1486 else:
1483 yield chunk
1487 yield chunk
1484 self.iter = splitbig(in_iter)
1488 self.iter = splitbig(in_iter)
1485 self._queue = collections.deque()
1489 self._queue = collections.deque()
1486 self._chunkoffset = 0
1490 self._chunkoffset = 0
1487
1491
1488 def read(self, l=None):
1492 def read(self, l=None):
1489 """Read L bytes of data from the iterator of chunks of data.
1493 """Read L bytes of data from the iterator of chunks of data.
1490 Returns less than L bytes if the iterator runs dry.
1494 Returns less than L bytes if the iterator runs dry.
1491
1495
1492 If size parameter is omitted, read everything"""
1496 If size parameter is omitted, read everything"""
1493 if l is None:
1497 if l is None:
1494 return ''.join(self.iter)
1498 return ''.join(self.iter)
1495
1499
1496 left = l
1500 left = l
1497 buf = []
1501 buf = []
1498 queue = self._queue
1502 queue = self._queue
1499 while left > 0:
1503 while left > 0:
1500 # refill the queue
1504 # refill the queue
1501 if not queue:
1505 if not queue:
1502 target = 2**18
1506 target = 2**18
1503 for chunk in self.iter:
1507 for chunk in self.iter:
1504 queue.append(chunk)
1508 queue.append(chunk)
1505 target -= len(chunk)
1509 target -= len(chunk)
1506 if target <= 0:
1510 if target <= 0:
1507 break
1511 break
1508 if not queue:
1512 if not queue:
1509 break
1513 break
1510
1514
1511 # The easy way to do this would be to queue.popleft(), modify the
1515 # The easy way to do this would be to queue.popleft(), modify the
1512 # chunk (if necessary), then queue.appendleft(). However, for cases
1516 # chunk (if necessary), then queue.appendleft(). However, for cases
1513 # where we read partial chunk content, this incurs 2 dequeue
1517 # where we read partial chunk content, this incurs 2 dequeue
1514 # mutations and creates a new str for the remaining chunk in the
1518 # mutations and creates a new str for the remaining chunk in the
1515 # queue. Our code below avoids this overhead.
1519 # queue. Our code below avoids this overhead.
1516
1520
1517 chunk = queue[0]
1521 chunk = queue[0]
1518 chunkl = len(chunk)
1522 chunkl = len(chunk)
1519 offset = self._chunkoffset
1523 offset = self._chunkoffset
1520
1524
1521 # Use full chunk.
1525 # Use full chunk.
1522 if offset == 0 and left >= chunkl:
1526 if offset == 0 and left >= chunkl:
1523 left -= chunkl
1527 left -= chunkl
1524 queue.popleft()
1528 queue.popleft()
1525 buf.append(chunk)
1529 buf.append(chunk)
1526 # self._chunkoffset remains at 0.
1530 # self._chunkoffset remains at 0.
1527 continue
1531 continue
1528
1532
1529 chunkremaining = chunkl - offset
1533 chunkremaining = chunkl - offset
1530
1534
1531 # Use all of unconsumed part of chunk.
1535 # Use all of unconsumed part of chunk.
1532 if left >= chunkremaining:
1536 if left >= chunkremaining:
1533 left -= chunkremaining
1537 left -= chunkremaining
1534 queue.popleft()
1538 queue.popleft()
1535 # offset == 0 is enabled by block above, so this won't merely
1539 # offset == 0 is enabled by block above, so this won't merely
1536 # copy via ``chunk[0:]``.
1540 # copy via ``chunk[0:]``.
1537 buf.append(chunk[offset:])
1541 buf.append(chunk[offset:])
1538 self._chunkoffset = 0
1542 self._chunkoffset = 0
1539
1543
1540 # Partial chunk needed.
1544 # Partial chunk needed.
1541 else:
1545 else:
1542 buf.append(chunk[offset:offset + left])
1546 buf.append(chunk[offset:offset + left])
1543 self._chunkoffset += left
1547 self._chunkoffset += left
1544 left -= chunkremaining
1548 left -= chunkremaining
1545
1549
1546 return ''.join(buf)
1550 return ''.join(buf)
1547
1551
1548 def filechunkiter(f, size=65536, limit=None):
1552 def filechunkiter(f, size=65536, limit=None):
1549 """Create a generator that produces the data in the file size
1553 """Create a generator that produces the data in the file size
1550 (default 65536) bytes at a time, up to optional limit (default is
1554 (default 65536) bytes at a time, up to optional limit (default is
1551 to read all data). Chunks may be less than size bytes if the
1555 to read all data). Chunks may be less than size bytes if the
1552 chunk is the last chunk in the file, or the file is a socket or
1556 chunk is the last chunk in the file, or the file is a socket or
1553 some other type of file that sometimes reads less data than is
1557 some other type of file that sometimes reads less data than is
1554 requested."""
1558 requested."""
1555 assert size >= 0
1559 assert size >= 0
1556 assert limit is None or limit >= 0
1560 assert limit is None or limit >= 0
1557 while True:
1561 while True:
1558 if limit is None:
1562 if limit is None:
1559 nbytes = size
1563 nbytes = size
1560 else:
1564 else:
1561 nbytes = min(limit, size)
1565 nbytes = min(limit, size)
1562 s = nbytes and f.read(nbytes)
1566 s = nbytes and f.read(nbytes)
1563 if not s:
1567 if not s:
1564 break
1568 break
1565 if limit:
1569 if limit:
1566 limit -= len(s)
1570 limit -= len(s)
1567 yield s
1571 yield s
1568
1572
1569 def makedate(timestamp=None):
1573 def makedate(timestamp=None):
1570 '''Return a unix timestamp (or the current time) as a (unixtime,
1574 '''Return a unix timestamp (or the current time) as a (unixtime,
1571 offset) tuple based off the local timezone.'''
1575 offset) tuple based off the local timezone.'''
1572 if timestamp is None:
1576 if timestamp is None:
1573 timestamp = time.time()
1577 timestamp = time.time()
1574 if timestamp < 0:
1578 if timestamp < 0:
1575 hint = _("check your clock")
1579 hint = _("check your clock")
1576 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1580 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1577 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1581 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1578 datetime.datetime.fromtimestamp(timestamp))
1582 datetime.datetime.fromtimestamp(timestamp))
1579 tz = delta.days * 86400 + delta.seconds
1583 tz = delta.days * 86400 + delta.seconds
1580 return timestamp, tz
1584 return timestamp, tz
1581
1585
1582 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1586 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1583 """represent a (unixtime, offset) tuple as a localized time.
1587 """represent a (unixtime, offset) tuple as a localized time.
1584 unixtime is seconds since the epoch, and offset is the time zone's
1588 unixtime is seconds since the epoch, and offset is the time zone's
1585 number of seconds away from UTC. if timezone is false, do not
1589 number of seconds away from UTC. if timezone is false, do not
1586 append time zone to string."""
1590 append time zone to string."""
1587 t, tz = date or makedate()
1591 t, tz = date or makedate()
1588 if t < 0:
1592 if t < 0:
1589 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1593 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1590 tz = 0
1594 tz = 0
1591 if "%1" in format or "%2" in format or "%z" in format:
1595 if "%1" in format or "%2" in format or "%z" in format:
1592 sign = (tz > 0) and "-" or "+"
1596 sign = (tz > 0) and "-" or "+"
1593 minutes = abs(tz) // 60
1597 minutes = abs(tz) // 60
1594 q, r = divmod(minutes, 60)
1598 q, r = divmod(minutes, 60)
1595 format = format.replace("%z", "%1%2")
1599 format = format.replace("%z", "%1%2")
1596 format = format.replace("%1", "%c%02d" % (sign, q))
1600 format = format.replace("%1", "%c%02d" % (sign, q))
1597 format = format.replace("%2", "%02d" % r)
1601 format = format.replace("%2", "%02d" % r)
1598 try:
1602 try:
1599 t = time.gmtime(float(t) - tz)
1603 t = time.gmtime(float(t) - tz)
1600 except ValueError:
1604 except ValueError:
1601 # time was out of range
1605 # time was out of range
1602 t = time.gmtime(sys.maxint)
1606 t = time.gmtime(sys.maxint)
1603 s = time.strftime(format, t)
1607 s = time.strftime(format, t)
1604 return s
1608 return s
1605
1609
1606 def shortdate(date=None):
1610 def shortdate(date=None):
1607 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1611 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1608 return datestr(date, format='%Y-%m-%d')
1612 return datestr(date, format='%Y-%m-%d')
1609
1613
1610 def parsetimezone(tz):
1614 def parsetimezone(tz):
1611 """parse a timezone string and return an offset integer"""
1615 """parse a timezone string and return an offset integer"""
1612 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1616 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1613 sign = (tz[0] == "+") and 1 or -1
1617 sign = (tz[0] == "+") and 1 or -1
1614 hours = int(tz[1:3])
1618 hours = int(tz[1:3])
1615 minutes = int(tz[3:5])
1619 minutes = int(tz[3:5])
1616 return -sign * (hours * 60 + minutes) * 60
1620 return -sign * (hours * 60 + minutes) * 60
1617 if tz == "GMT" or tz == "UTC":
1621 if tz == "GMT" or tz == "UTC":
1618 return 0
1622 return 0
1619 return None
1623 return None
1620
1624
1621 def strdate(string, format, defaults=[]):
1625 def strdate(string, format, defaults=[]):
1622 """parse a localized time string and return a (unixtime, offset) tuple.
1626 """parse a localized time string and return a (unixtime, offset) tuple.
1623 if the string cannot be parsed, ValueError is raised."""
1627 if the string cannot be parsed, ValueError is raised."""
1624 # NOTE: unixtime = localunixtime + offset
1628 # NOTE: unixtime = localunixtime + offset
1625 offset, date = parsetimezone(string.split()[-1]), string
1629 offset, date = parsetimezone(string.split()[-1]), string
1626 if offset is not None:
1630 if offset is not None:
1627 date = " ".join(string.split()[:-1])
1631 date = " ".join(string.split()[:-1])
1628
1632
1629 # add missing elements from defaults
1633 # add missing elements from defaults
1630 usenow = False # default to using biased defaults
1634 usenow = False # default to using biased defaults
1631 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1635 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1632 found = [True for p in part if ("%"+p) in format]
1636 found = [True for p in part if ("%"+p) in format]
1633 if not found:
1637 if not found:
1634 date += "@" + defaults[part][usenow]
1638 date += "@" + defaults[part][usenow]
1635 format += "@%" + part[0]
1639 format += "@%" + part[0]
1636 else:
1640 else:
1637 # We've found a specific time element, less specific time
1641 # We've found a specific time element, less specific time
1638 # elements are relative to today
1642 # elements are relative to today
1639 usenow = True
1643 usenow = True
1640
1644
1641 timetuple = time.strptime(date, format)
1645 timetuple = time.strptime(date, format)
1642 localunixtime = int(calendar.timegm(timetuple))
1646 localunixtime = int(calendar.timegm(timetuple))
1643 if offset is None:
1647 if offset is None:
1644 # local timezone
1648 # local timezone
1645 unixtime = int(time.mktime(timetuple))
1649 unixtime = int(time.mktime(timetuple))
1646 offset = unixtime - localunixtime
1650 offset = unixtime - localunixtime
1647 else:
1651 else:
1648 unixtime = localunixtime + offset
1652 unixtime = localunixtime + offset
1649 return unixtime, offset
1653 return unixtime, offset
1650
1654
1651 def parsedate(date, formats=None, bias=None):
1655 def parsedate(date, formats=None, bias=None):
1652 """parse a localized date/time and return a (unixtime, offset) tuple.
1656 """parse a localized date/time and return a (unixtime, offset) tuple.
1653
1657
1654 The date may be a "unixtime offset" string or in one of the specified
1658 The date may be a "unixtime offset" string or in one of the specified
1655 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1659 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1656
1660
1657 >>> parsedate(' today ') == parsedate(\
1661 >>> parsedate(' today ') == parsedate(\
1658 datetime.date.today().strftime('%b %d'))
1662 datetime.date.today().strftime('%b %d'))
1659 True
1663 True
1660 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1664 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1661 datetime.timedelta(days=1)\
1665 datetime.timedelta(days=1)\
1662 ).strftime('%b %d'))
1666 ).strftime('%b %d'))
1663 True
1667 True
1664 >>> now, tz = makedate()
1668 >>> now, tz = makedate()
1665 >>> strnow, strtz = parsedate('now')
1669 >>> strnow, strtz = parsedate('now')
1666 >>> (strnow - now) < 1
1670 >>> (strnow - now) < 1
1667 True
1671 True
1668 >>> tz == strtz
1672 >>> tz == strtz
1669 True
1673 True
1670 """
1674 """
1671 if bias is None:
1675 if bias is None:
1672 bias = {}
1676 bias = {}
1673 if not date:
1677 if not date:
1674 return 0, 0
1678 return 0, 0
1675 if isinstance(date, tuple) and len(date) == 2:
1679 if isinstance(date, tuple) and len(date) == 2:
1676 return date
1680 return date
1677 if not formats:
1681 if not formats:
1678 formats = defaultdateformats
1682 formats = defaultdateformats
1679 date = date.strip()
1683 date = date.strip()
1680
1684
1681 if date == 'now' or date == _('now'):
1685 if date == 'now' or date == _('now'):
1682 return makedate()
1686 return makedate()
1683 if date == 'today' or date == _('today'):
1687 if date == 'today' or date == _('today'):
1684 date = datetime.date.today().strftime('%b %d')
1688 date = datetime.date.today().strftime('%b %d')
1685 elif date == 'yesterday' or date == _('yesterday'):
1689 elif date == 'yesterday' or date == _('yesterday'):
1686 date = (datetime.date.today() -
1690 date = (datetime.date.today() -
1687 datetime.timedelta(days=1)).strftime('%b %d')
1691 datetime.timedelta(days=1)).strftime('%b %d')
1688
1692
1689 try:
1693 try:
1690 when, offset = map(int, date.split(' '))
1694 when, offset = map(int, date.split(' '))
1691 except ValueError:
1695 except ValueError:
1692 # fill out defaults
1696 # fill out defaults
1693 now = makedate()
1697 now = makedate()
1694 defaults = {}
1698 defaults = {}
1695 for part in ("d", "mb", "yY", "HI", "M", "S"):
1699 for part in ("d", "mb", "yY", "HI", "M", "S"):
1696 # this piece is for rounding the specific end of unknowns
1700 # this piece is for rounding the specific end of unknowns
1697 b = bias.get(part)
1701 b = bias.get(part)
1698 if b is None:
1702 if b is None:
1699 if part[0] in "HMS":
1703 if part[0] in "HMS":
1700 b = "00"
1704 b = "00"
1701 else:
1705 else:
1702 b = "0"
1706 b = "0"
1703
1707
1704 # this piece is for matching the generic end to today's date
1708 # this piece is for matching the generic end to today's date
1705 n = datestr(now, "%" + part[0])
1709 n = datestr(now, "%" + part[0])
1706
1710
1707 defaults[part] = (b, n)
1711 defaults[part] = (b, n)
1708
1712
1709 for format in formats:
1713 for format in formats:
1710 try:
1714 try:
1711 when, offset = strdate(date, format, defaults)
1715 when, offset = strdate(date, format, defaults)
1712 except (ValueError, OverflowError):
1716 except (ValueError, OverflowError):
1713 pass
1717 pass
1714 else:
1718 else:
1715 break
1719 break
1716 else:
1720 else:
1717 raise Abort(_('invalid date: %r') % date)
1721 raise Abort(_('invalid date: %r') % date)
1718 # validate explicit (probably user-specified) date and
1722 # validate explicit (probably user-specified) date and
1719 # time zone offset. values must fit in signed 32 bits for
1723 # time zone offset. values must fit in signed 32 bits for
1720 # current 32-bit linux runtimes. timezones go from UTC-12
1724 # current 32-bit linux runtimes. timezones go from UTC-12
1721 # to UTC+14
1725 # to UTC+14
1722 if abs(when) > 0x7fffffff:
1726 if abs(when) > 0x7fffffff:
1723 raise Abort(_('date exceeds 32 bits: %d') % when)
1727 raise Abort(_('date exceeds 32 bits: %d') % when)
1724 if when < 0:
1728 if when < 0:
1725 raise Abort(_('negative date value: %d') % when)
1729 raise Abort(_('negative date value: %d') % when)
1726 if offset < -50400 or offset > 43200:
1730 if offset < -50400 or offset > 43200:
1727 raise Abort(_('impossible time zone offset: %d') % offset)
1731 raise Abort(_('impossible time zone offset: %d') % offset)
1728 return when, offset
1732 return when, offset
1729
1733
1730 def matchdate(date):
1734 def matchdate(date):
1731 """Return a function that matches a given date match specifier
1735 """Return a function that matches a given date match specifier
1732
1736
1733 Formats include:
1737 Formats include:
1734
1738
1735 '{date}' match a given date to the accuracy provided
1739 '{date}' match a given date to the accuracy provided
1736
1740
1737 '<{date}' on or before a given date
1741 '<{date}' on or before a given date
1738
1742
1739 '>{date}' on or after a given date
1743 '>{date}' on or after a given date
1740
1744
1741 >>> p1 = parsedate("10:29:59")
1745 >>> p1 = parsedate("10:29:59")
1742 >>> p2 = parsedate("10:30:00")
1746 >>> p2 = parsedate("10:30:00")
1743 >>> p3 = parsedate("10:30:59")
1747 >>> p3 = parsedate("10:30:59")
1744 >>> p4 = parsedate("10:31:00")
1748 >>> p4 = parsedate("10:31:00")
1745 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1749 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1746 >>> f = matchdate("10:30")
1750 >>> f = matchdate("10:30")
1747 >>> f(p1[0])
1751 >>> f(p1[0])
1748 False
1752 False
1749 >>> f(p2[0])
1753 >>> f(p2[0])
1750 True
1754 True
1751 >>> f(p3[0])
1755 >>> f(p3[0])
1752 True
1756 True
1753 >>> f(p4[0])
1757 >>> f(p4[0])
1754 False
1758 False
1755 >>> f(p5[0])
1759 >>> f(p5[0])
1756 False
1760 False
1757 """
1761 """
1758
1762
1759 def lower(date):
1763 def lower(date):
1760 d = {'mb': "1", 'd': "1"}
1764 d = {'mb': "1", 'd': "1"}
1761 return parsedate(date, extendeddateformats, d)[0]
1765 return parsedate(date, extendeddateformats, d)[0]
1762
1766
1763 def upper(date):
1767 def upper(date):
1764 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1768 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1765 for days in ("31", "30", "29"):
1769 for days in ("31", "30", "29"):
1766 try:
1770 try:
1767 d["d"] = days
1771 d["d"] = days
1768 return parsedate(date, extendeddateformats, d)[0]
1772 return parsedate(date, extendeddateformats, d)[0]
1769 except Abort:
1773 except Abort:
1770 pass
1774 pass
1771 d["d"] = "28"
1775 d["d"] = "28"
1772 return parsedate(date, extendeddateformats, d)[0]
1776 return parsedate(date, extendeddateformats, d)[0]
1773
1777
1774 date = date.strip()
1778 date = date.strip()
1775
1779
1776 if not date:
1780 if not date:
1777 raise Abort(_("dates cannot consist entirely of whitespace"))
1781 raise Abort(_("dates cannot consist entirely of whitespace"))
1778 elif date[0] == "<":
1782 elif date[0] == "<":
1779 if not date[1:]:
1783 if not date[1:]:
1780 raise Abort(_("invalid day spec, use '<DATE'"))
1784 raise Abort(_("invalid day spec, use '<DATE'"))
1781 when = upper(date[1:])
1785 when = upper(date[1:])
1782 return lambda x: x <= when
1786 return lambda x: x <= when
1783 elif date[0] == ">":
1787 elif date[0] == ">":
1784 if not date[1:]:
1788 if not date[1:]:
1785 raise Abort(_("invalid day spec, use '>DATE'"))
1789 raise Abort(_("invalid day spec, use '>DATE'"))
1786 when = lower(date[1:])
1790 when = lower(date[1:])
1787 return lambda x: x >= when
1791 return lambda x: x >= when
1788 elif date[0] == "-":
1792 elif date[0] == "-":
1789 try:
1793 try:
1790 days = int(date[1:])
1794 days = int(date[1:])
1791 except ValueError:
1795 except ValueError:
1792 raise Abort(_("invalid day spec: %s") % date[1:])
1796 raise Abort(_("invalid day spec: %s") % date[1:])
1793 if days < 0:
1797 if days < 0:
1794 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1798 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1795 % date[1:])
1799 % date[1:])
1796 when = makedate()[0] - days * 3600 * 24
1800 when = makedate()[0] - days * 3600 * 24
1797 return lambda x: x >= when
1801 return lambda x: x >= when
1798 elif " to " in date:
1802 elif " to " in date:
1799 a, b = date.split(" to ")
1803 a, b = date.split(" to ")
1800 start, stop = lower(a), upper(b)
1804 start, stop = lower(a), upper(b)
1801 return lambda x: x >= start and x <= stop
1805 return lambda x: x >= start and x <= stop
1802 else:
1806 else:
1803 start, stop = lower(date), upper(date)
1807 start, stop = lower(date), upper(date)
1804 return lambda x: x >= start and x <= stop
1808 return lambda x: x >= start and x <= stop
1805
1809
1806 def stringmatcher(pattern):
1810 def stringmatcher(pattern):
1807 """
1811 """
1808 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1812 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1809 returns the matcher name, pattern, and matcher function.
1813 returns the matcher name, pattern, and matcher function.
1810 missing or unknown prefixes are treated as literal matches.
1814 missing or unknown prefixes are treated as literal matches.
1811
1815
1812 helper for tests:
1816 helper for tests:
1813 >>> def test(pattern, *tests):
1817 >>> def test(pattern, *tests):
1814 ... kind, pattern, matcher = stringmatcher(pattern)
1818 ... kind, pattern, matcher = stringmatcher(pattern)
1815 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1819 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1816
1820
1817 exact matching (no prefix):
1821 exact matching (no prefix):
1818 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1822 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1819 ('literal', 'abcdefg', [False, False, True])
1823 ('literal', 'abcdefg', [False, False, True])
1820
1824
1821 regex matching ('re:' prefix)
1825 regex matching ('re:' prefix)
1822 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1826 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1823 ('re', 'a.+b', [False, False, True])
1827 ('re', 'a.+b', [False, False, True])
1824
1828
1825 force exact matches ('literal:' prefix)
1829 force exact matches ('literal:' prefix)
1826 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1830 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1827 ('literal', 're:foobar', [False, True])
1831 ('literal', 're:foobar', [False, True])
1828
1832
1829 unknown prefixes are ignored and treated as literals
1833 unknown prefixes are ignored and treated as literals
1830 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1834 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1831 ('literal', 'foo:bar', [False, False, True])
1835 ('literal', 'foo:bar', [False, False, True])
1832 """
1836 """
1833 if pattern.startswith('re:'):
1837 if pattern.startswith('re:'):
1834 pattern = pattern[3:]
1838 pattern = pattern[3:]
1835 try:
1839 try:
1836 regex = remod.compile(pattern)
1840 regex = remod.compile(pattern)
1837 except remod.error as e:
1841 except remod.error as e:
1838 raise error.ParseError(_('invalid regular expression: %s')
1842 raise error.ParseError(_('invalid regular expression: %s')
1839 % e)
1843 % e)
1840 return 're', pattern, regex.search
1844 return 're', pattern, regex.search
1841 elif pattern.startswith('literal:'):
1845 elif pattern.startswith('literal:'):
1842 pattern = pattern[8:]
1846 pattern = pattern[8:]
1843 return 'literal', pattern, pattern.__eq__
1847 return 'literal', pattern, pattern.__eq__
1844
1848
1845 def shortuser(user):
1849 def shortuser(user):
1846 """Return a short representation of a user name or email address."""
1850 """Return a short representation of a user name or email address."""
1847 f = user.find('@')
1851 f = user.find('@')
1848 if f >= 0:
1852 if f >= 0:
1849 user = user[:f]
1853 user = user[:f]
1850 f = user.find('<')
1854 f = user.find('<')
1851 if f >= 0:
1855 if f >= 0:
1852 user = user[f + 1:]
1856 user = user[f + 1:]
1853 f = user.find(' ')
1857 f = user.find(' ')
1854 if f >= 0:
1858 if f >= 0:
1855 user = user[:f]
1859 user = user[:f]
1856 f = user.find('.')
1860 f = user.find('.')
1857 if f >= 0:
1861 if f >= 0:
1858 user = user[:f]
1862 user = user[:f]
1859 return user
1863 return user
1860
1864
1861 def emailuser(user):
1865 def emailuser(user):
1862 """Return the user portion of an email address."""
1866 """Return the user portion of an email address."""
1863 f = user.find('@')
1867 f = user.find('@')
1864 if f >= 0:
1868 if f >= 0:
1865 user = user[:f]
1869 user = user[:f]
1866 f = user.find('<')
1870 f = user.find('<')
1867 if f >= 0:
1871 if f >= 0:
1868 user = user[f + 1:]
1872 user = user[f + 1:]
1869 return user
1873 return user
1870
1874
1871 def email(author):
1875 def email(author):
1872 '''get email of author.'''
1876 '''get email of author.'''
1873 r = author.find('>')
1877 r = author.find('>')
1874 if r == -1:
1878 if r == -1:
1875 r = None
1879 r = None
1876 return author[author.find('<') + 1:r]
1880 return author[author.find('<') + 1:r]
1877
1881
1878 def ellipsis(text, maxlength=400):
1882 def ellipsis(text, maxlength=400):
1879 """Trim string to at most maxlength (default: 400) columns in display."""
1883 """Trim string to at most maxlength (default: 400) columns in display."""
1880 return encoding.trim(text, maxlength, ellipsis='...')
1884 return encoding.trim(text, maxlength, ellipsis='...')
1881
1885
1882 def unitcountfn(*unittable):
1886 def unitcountfn(*unittable):
1883 '''return a function that renders a readable count of some quantity'''
1887 '''return a function that renders a readable count of some quantity'''
1884
1888
1885 def go(count):
1889 def go(count):
1886 for multiplier, divisor, format in unittable:
1890 for multiplier, divisor, format in unittable:
1887 if count >= divisor * multiplier:
1891 if count >= divisor * multiplier:
1888 return format % (count / float(divisor))
1892 return format % (count / float(divisor))
1889 return unittable[-1][2] % count
1893 return unittable[-1][2] % count
1890
1894
1891 return go
1895 return go
1892
1896
1893 bytecount = unitcountfn(
1897 bytecount = unitcountfn(
1894 (100, 1 << 30, _('%.0f GB')),
1898 (100, 1 << 30, _('%.0f GB')),
1895 (10, 1 << 30, _('%.1f GB')),
1899 (10, 1 << 30, _('%.1f GB')),
1896 (1, 1 << 30, _('%.2f GB')),
1900 (1, 1 << 30, _('%.2f GB')),
1897 (100, 1 << 20, _('%.0f MB')),
1901 (100, 1 << 20, _('%.0f MB')),
1898 (10, 1 << 20, _('%.1f MB')),
1902 (10, 1 << 20, _('%.1f MB')),
1899 (1, 1 << 20, _('%.2f MB')),
1903 (1, 1 << 20, _('%.2f MB')),
1900 (100, 1 << 10, _('%.0f KB')),
1904 (100, 1 << 10, _('%.0f KB')),
1901 (10, 1 << 10, _('%.1f KB')),
1905 (10, 1 << 10, _('%.1f KB')),
1902 (1, 1 << 10, _('%.2f KB')),
1906 (1, 1 << 10, _('%.2f KB')),
1903 (1, 1, _('%.0f bytes')),
1907 (1, 1, _('%.0f bytes')),
1904 )
1908 )
1905
1909
1906 def uirepr(s):
1910 def uirepr(s):
1907 # Avoid double backslash in Windows path repr()
1911 # Avoid double backslash in Windows path repr()
1908 return repr(s).replace('\\\\', '\\')
1912 return repr(s).replace('\\\\', '\\')
1909
1913
1910 # delay import of textwrap
1914 # delay import of textwrap
1911 def MBTextWrapper(**kwargs):
1915 def MBTextWrapper(**kwargs):
1912 class tw(textwrap.TextWrapper):
1916 class tw(textwrap.TextWrapper):
1913 """
1917 """
1914 Extend TextWrapper for width-awareness.
1918 Extend TextWrapper for width-awareness.
1915
1919
1916 Neither number of 'bytes' in any encoding nor 'characters' is
1920 Neither number of 'bytes' in any encoding nor 'characters' is
1917 appropriate to calculate terminal columns for specified string.
1921 appropriate to calculate terminal columns for specified string.
1918
1922
1919 Original TextWrapper implementation uses built-in 'len()' directly,
1923 Original TextWrapper implementation uses built-in 'len()' directly,
1920 so overriding is needed to use width information of each characters.
1924 so overriding is needed to use width information of each characters.
1921
1925
1922 In addition, characters classified into 'ambiguous' width are
1926 In addition, characters classified into 'ambiguous' width are
1923 treated as wide in East Asian area, but as narrow in other.
1927 treated as wide in East Asian area, but as narrow in other.
1924
1928
1925 This requires use decision to determine width of such characters.
1929 This requires use decision to determine width of such characters.
1926 """
1930 """
1927 def _cutdown(self, ucstr, space_left):
1931 def _cutdown(self, ucstr, space_left):
1928 l = 0
1932 l = 0
1929 colwidth = encoding.ucolwidth
1933 colwidth = encoding.ucolwidth
1930 for i in xrange(len(ucstr)):
1934 for i in xrange(len(ucstr)):
1931 l += colwidth(ucstr[i])
1935 l += colwidth(ucstr[i])
1932 if space_left < l:
1936 if space_left < l:
1933 return (ucstr[:i], ucstr[i:])
1937 return (ucstr[:i], ucstr[i:])
1934 return ucstr, ''
1938 return ucstr, ''
1935
1939
1936 # overriding of base class
1940 # overriding of base class
1937 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1941 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1938 space_left = max(width - cur_len, 1)
1942 space_left = max(width - cur_len, 1)
1939
1943
1940 if self.break_long_words:
1944 if self.break_long_words:
1941 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1945 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1942 cur_line.append(cut)
1946 cur_line.append(cut)
1943 reversed_chunks[-1] = res
1947 reversed_chunks[-1] = res
1944 elif not cur_line:
1948 elif not cur_line:
1945 cur_line.append(reversed_chunks.pop())
1949 cur_line.append(reversed_chunks.pop())
1946
1950
1947 # this overriding code is imported from TextWrapper of Python 2.6
1951 # this overriding code is imported from TextWrapper of Python 2.6
1948 # to calculate columns of string by 'encoding.ucolwidth()'
1952 # to calculate columns of string by 'encoding.ucolwidth()'
1949 def _wrap_chunks(self, chunks):
1953 def _wrap_chunks(self, chunks):
1950 colwidth = encoding.ucolwidth
1954 colwidth = encoding.ucolwidth
1951
1955
1952 lines = []
1956 lines = []
1953 if self.width <= 0:
1957 if self.width <= 0:
1954 raise ValueError("invalid width %r (must be > 0)" % self.width)
1958 raise ValueError("invalid width %r (must be > 0)" % self.width)
1955
1959
1956 # Arrange in reverse order so items can be efficiently popped
1960 # Arrange in reverse order so items can be efficiently popped
1957 # from a stack of chucks.
1961 # from a stack of chucks.
1958 chunks.reverse()
1962 chunks.reverse()
1959
1963
1960 while chunks:
1964 while chunks:
1961
1965
1962 # Start the list of chunks that will make up the current line.
1966 # Start the list of chunks that will make up the current line.
1963 # cur_len is just the length of all the chunks in cur_line.
1967 # cur_len is just the length of all the chunks in cur_line.
1964 cur_line = []
1968 cur_line = []
1965 cur_len = 0
1969 cur_len = 0
1966
1970
1967 # Figure out which static string will prefix this line.
1971 # Figure out which static string will prefix this line.
1968 if lines:
1972 if lines:
1969 indent = self.subsequent_indent
1973 indent = self.subsequent_indent
1970 else:
1974 else:
1971 indent = self.initial_indent
1975 indent = self.initial_indent
1972
1976
1973 # Maximum width for this line.
1977 # Maximum width for this line.
1974 width = self.width - len(indent)
1978 width = self.width - len(indent)
1975
1979
1976 # First chunk on line is whitespace -- drop it, unless this
1980 # First chunk on line is whitespace -- drop it, unless this
1977 # is the very beginning of the text (i.e. no lines started yet).
1981 # is the very beginning of the text (i.e. no lines started yet).
1978 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1982 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1979 del chunks[-1]
1983 del chunks[-1]
1980
1984
1981 while chunks:
1985 while chunks:
1982 l = colwidth(chunks[-1])
1986 l = colwidth(chunks[-1])
1983
1987
1984 # Can at least squeeze this chunk onto the current line.
1988 # Can at least squeeze this chunk onto the current line.
1985 if cur_len + l <= width:
1989 if cur_len + l <= width:
1986 cur_line.append(chunks.pop())
1990 cur_line.append(chunks.pop())
1987 cur_len += l
1991 cur_len += l
1988
1992
1989 # Nope, this line is full.
1993 # Nope, this line is full.
1990 else:
1994 else:
1991 break
1995 break
1992
1996
1993 # The current line is full, and the next chunk is too big to
1997 # The current line is full, and the next chunk is too big to
1994 # fit on *any* line (not just this one).
1998 # fit on *any* line (not just this one).
1995 if chunks and colwidth(chunks[-1]) > width:
1999 if chunks and colwidth(chunks[-1]) > width:
1996 self._handle_long_word(chunks, cur_line, cur_len, width)
2000 self._handle_long_word(chunks, cur_line, cur_len, width)
1997
2001
1998 # If the last chunk on this line is all whitespace, drop it.
2002 # If the last chunk on this line is all whitespace, drop it.
1999 if (self.drop_whitespace and
2003 if (self.drop_whitespace and
2000 cur_line and cur_line[-1].strip() == ''):
2004 cur_line and cur_line[-1].strip() == ''):
2001 del cur_line[-1]
2005 del cur_line[-1]
2002
2006
2003 # Convert current line back to a string and store it in list
2007 # Convert current line back to a string and store it in list
2004 # of all lines (return value).
2008 # of all lines (return value).
2005 if cur_line:
2009 if cur_line:
2006 lines.append(indent + ''.join(cur_line))
2010 lines.append(indent + ''.join(cur_line))
2007
2011
2008 return lines
2012 return lines
2009
2013
2010 global MBTextWrapper
2014 global MBTextWrapper
2011 MBTextWrapper = tw
2015 MBTextWrapper = tw
2012 return tw(**kwargs)
2016 return tw(**kwargs)
2013
2017
2014 def wrap(line, width, initindent='', hangindent=''):
2018 def wrap(line, width, initindent='', hangindent=''):
2015 maxindent = max(len(hangindent), len(initindent))
2019 maxindent = max(len(hangindent), len(initindent))
2016 if width <= maxindent:
2020 if width <= maxindent:
2017 # adjust for weird terminal size
2021 # adjust for weird terminal size
2018 width = max(78, maxindent + 1)
2022 width = max(78, maxindent + 1)
2019 line = line.decode(encoding.encoding, encoding.encodingmode)
2023 line = line.decode(encoding.encoding, encoding.encodingmode)
2020 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2024 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2021 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2025 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2022 wrapper = MBTextWrapper(width=width,
2026 wrapper = MBTextWrapper(width=width,
2023 initial_indent=initindent,
2027 initial_indent=initindent,
2024 subsequent_indent=hangindent)
2028 subsequent_indent=hangindent)
2025 return wrapper.fill(line).encode(encoding.encoding)
2029 return wrapper.fill(line).encode(encoding.encoding)
2026
2030
2027 def iterlines(iterator):
2031 def iterlines(iterator):
2028 for chunk in iterator:
2032 for chunk in iterator:
2029 for line in chunk.splitlines():
2033 for line in chunk.splitlines():
2030 yield line
2034 yield line
2031
2035
2032 def expandpath(path):
2036 def expandpath(path):
2033 return os.path.expanduser(os.path.expandvars(path))
2037 return os.path.expanduser(os.path.expandvars(path))
2034
2038
2035 def hgcmd():
2039 def hgcmd():
2036 """Return the command used to execute current hg
2040 """Return the command used to execute current hg
2037
2041
2038 This is different from hgexecutable() because on Windows we want
2042 This is different from hgexecutable() because on Windows we want
2039 to avoid things opening new shell windows like batch files, so we
2043 to avoid things opening new shell windows like batch files, so we
2040 get either the python call or current executable.
2044 get either the python call or current executable.
2041 """
2045 """
2042 if mainfrozen():
2046 if mainfrozen():
2043 return [sys.executable]
2047 return [sys.executable]
2044 return gethgcmd()
2048 return gethgcmd()
2045
2049
2046 def rundetached(args, condfn):
2050 def rundetached(args, condfn):
2047 """Execute the argument list in a detached process.
2051 """Execute the argument list in a detached process.
2048
2052
2049 condfn is a callable which is called repeatedly and should return
2053 condfn is a callable which is called repeatedly and should return
2050 True once the child process is known to have started successfully.
2054 True once the child process is known to have started successfully.
2051 At this point, the child process PID is returned. If the child
2055 At this point, the child process PID is returned. If the child
2052 process fails to start or finishes before condfn() evaluates to
2056 process fails to start or finishes before condfn() evaluates to
2053 True, return -1.
2057 True, return -1.
2054 """
2058 """
2055 # Windows case is easier because the child process is either
2059 # Windows case is easier because the child process is either
2056 # successfully starting and validating the condition or exiting
2060 # successfully starting and validating the condition or exiting
2057 # on failure. We just poll on its PID. On Unix, if the child
2061 # on failure. We just poll on its PID. On Unix, if the child
2058 # process fails to start, it will be left in a zombie state until
2062 # process fails to start, it will be left in a zombie state until
2059 # the parent wait on it, which we cannot do since we expect a long
2063 # the parent wait on it, which we cannot do since we expect a long
2060 # running process on success. Instead we listen for SIGCHLD telling
2064 # running process on success. Instead we listen for SIGCHLD telling
2061 # us our child process terminated.
2065 # us our child process terminated.
2062 terminated = set()
2066 terminated = set()
2063 def handler(signum, frame):
2067 def handler(signum, frame):
2064 terminated.add(os.wait())
2068 terminated.add(os.wait())
2065 prevhandler = None
2069 prevhandler = None
2066 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2070 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2067 if SIGCHLD is not None:
2071 if SIGCHLD is not None:
2068 prevhandler = signal.signal(SIGCHLD, handler)
2072 prevhandler = signal.signal(SIGCHLD, handler)
2069 try:
2073 try:
2070 pid = spawndetached(args)
2074 pid = spawndetached(args)
2071 while not condfn():
2075 while not condfn():
2072 if ((pid in terminated or not testpid(pid))
2076 if ((pid in terminated or not testpid(pid))
2073 and not condfn()):
2077 and not condfn()):
2074 return -1
2078 return -1
2075 time.sleep(0.1)
2079 time.sleep(0.1)
2076 return pid
2080 return pid
2077 finally:
2081 finally:
2078 if prevhandler is not None:
2082 if prevhandler is not None:
2079 signal.signal(signal.SIGCHLD, prevhandler)
2083 signal.signal(signal.SIGCHLD, prevhandler)
2080
2084
2081 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2085 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2082 """Return the result of interpolating items in the mapping into string s.
2086 """Return the result of interpolating items in the mapping into string s.
2083
2087
2084 prefix is a single character string, or a two character string with
2088 prefix is a single character string, or a two character string with
2085 a backslash as the first character if the prefix needs to be escaped in
2089 a backslash as the first character if the prefix needs to be escaped in
2086 a regular expression.
2090 a regular expression.
2087
2091
2088 fn is an optional function that will be applied to the replacement text
2092 fn is an optional function that will be applied to the replacement text
2089 just before replacement.
2093 just before replacement.
2090
2094
2091 escape_prefix is an optional flag that allows using doubled prefix for
2095 escape_prefix is an optional flag that allows using doubled prefix for
2092 its escaping.
2096 its escaping.
2093 """
2097 """
2094 fn = fn or (lambda s: s)
2098 fn = fn or (lambda s: s)
2095 patterns = '|'.join(mapping.keys())
2099 patterns = '|'.join(mapping.keys())
2096 if escape_prefix:
2100 if escape_prefix:
2097 patterns += '|' + prefix
2101 patterns += '|' + prefix
2098 if len(prefix) > 1:
2102 if len(prefix) > 1:
2099 prefix_char = prefix[1:]
2103 prefix_char = prefix[1:]
2100 else:
2104 else:
2101 prefix_char = prefix
2105 prefix_char = prefix
2102 mapping[prefix_char] = prefix_char
2106 mapping[prefix_char] = prefix_char
2103 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2107 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2104 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2108 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2105
2109
2106 def getport(port):
2110 def getport(port):
2107 """Return the port for a given network service.
2111 """Return the port for a given network service.
2108
2112
2109 If port is an integer, it's returned as is. If it's a string, it's
2113 If port is an integer, it's returned as is. If it's a string, it's
2110 looked up using socket.getservbyname(). If there's no matching
2114 looked up using socket.getservbyname(). If there's no matching
2111 service, error.Abort is raised.
2115 service, error.Abort is raised.
2112 """
2116 """
2113 try:
2117 try:
2114 return int(port)
2118 return int(port)
2115 except ValueError:
2119 except ValueError:
2116 pass
2120 pass
2117
2121
2118 try:
2122 try:
2119 return socket.getservbyname(port)
2123 return socket.getservbyname(port)
2120 except socket.error:
2124 except socket.error:
2121 raise Abort(_("no port number associated with service '%s'") % port)
2125 raise Abort(_("no port number associated with service '%s'") % port)
2122
2126
2123 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2127 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2124 '0': False, 'no': False, 'false': False, 'off': False,
2128 '0': False, 'no': False, 'false': False, 'off': False,
2125 'never': False}
2129 'never': False}
2126
2130
2127 def parsebool(s):
2131 def parsebool(s):
2128 """Parse s into a boolean.
2132 """Parse s into a boolean.
2129
2133
2130 If s is not a valid boolean, returns None.
2134 If s is not a valid boolean, returns None.
2131 """
2135 """
2132 return _booleans.get(s.lower(), None)
2136 return _booleans.get(s.lower(), None)
2133
2137
2134 _hexdig = '0123456789ABCDEFabcdef'
2138 _hexdig = '0123456789ABCDEFabcdef'
2135 _hextochr = dict((a + b, chr(int(a + b, 16)))
2139 _hextochr = dict((a + b, chr(int(a + b, 16)))
2136 for a in _hexdig for b in _hexdig)
2140 for a in _hexdig for b in _hexdig)
2137
2141
2138 def _urlunquote(s):
2142 def _urlunquote(s):
2139 """Decode HTTP/HTML % encoding.
2143 """Decode HTTP/HTML % encoding.
2140
2144
2141 >>> _urlunquote('abc%20def')
2145 >>> _urlunquote('abc%20def')
2142 'abc def'
2146 'abc def'
2143 """
2147 """
2144 res = s.split('%')
2148 res = s.split('%')
2145 # fastpath
2149 # fastpath
2146 if len(res) == 1:
2150 if len(res) == 1:
2147 return s
2151 return s
2148 s = res[0]
2152 s = res[0]
2149 for item in res[1:]:
2153 for item in res[1:]:
2150 try:
2154 try:
2151 s += _hextochr[item[:2]] + item[2:]
2155 s += _hextochr[item[:2]] + item[2:]
2152 except KeyError:
2156 except KeyError:
2153 s += '%' + item
2157 s += '%' + item
2154 except UnicodeDecodeError:
2158 except UnicodeDecodeError:
2155 s += unichr(int(item[:2], 16)) + item[2:]
2159 s += unichr(int(item[:2], 16)) + item[2:]
2156 return s
2160 return s
2157
2161
2158 class url(object):
2162 class url(object):
2159 r"""Reliable URL parser.
2163 r"""Reliable URL parser.
2160
2164
2161 This parses URLs and provides attributes for the following
2165 This parses URLs and provides attributes for the following
2162 components:
2166 components:
2163
2167
2164 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2168 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2165
2169
2166 Missing components are set to None. The only exception is
2170 Missing components are set to None. The only exception is
2167 fragment, which is set to '' if present but empty.
2171 fragment, which is set to '' if present but empty.
2168
2172
2169 If parsefragment is False, fragment is included in query. If
2173 If parsefragment is False, fragment is included in query. If
2170 parsequery is False, query is included in path. If both are
2174 parsequery is False, query is included in path. If both are
2171 False, both fragment and query are included in path.
2175 False, both fragment and query are included in path.
2172
2176
2173 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2177 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2174
2178
2175 Note that for backward compatibility reasons, bundle URLs do not
2179 Note that for backward compatibility reasons, bundle URLs do not
2176 take host names. That means 'bundle://../' has a path of '../'.
2180 take host names. That means 'bundle://../' has a path of '../'.
2177
2181
2178 Examples:
2182 Examples:
2179
2183
2180 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2184 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2181 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2185 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2182 >>> url('ssh://[::1]:2200//home/joe/repo')
2186 >>> url('ssh://[::1]:2200//home/joe/repo')
2183 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2187 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2184 >>> url('file:///home/joe/repo')
2188 >>> url('file:///home/joe/repo')
2185 <url scheme: 'file', path: '/home/joe/repo'>
2189 <url scheme: 'file', path: '/home/joe/repo'>
2186 >>> url('file:///c:/temp/foo/')
2190 >>> url('file:///c:/temp/foo/')
2187 <url scheme: 'file', path: 'c:/temp/foo/'>
2191 <url scheme: 'file', path: 'c:/temp/foo/'>
2188 >>> url('bundle:foo')
2192 >>> url('bundle:foo')
2189 <url scheme: 'bundle', path: 'foo'>
2193 <url scheme: 'bundle', path: 'foo'>
2190 >>> url('bundle://../foo')
2194 >>> url('bundle://../foo')
2191 <url scheme: 'bundle', path: '../foo'>
2195 <url scheme: 'bundle', path: '../foo'>
2192 >>> url(r'c:\foo\bar')
2196 >>> url(r'c:\foo\bar')
2193 <url path: 'c:\\foo\\bar'>
2197 <url path: 'c:\\foo\\bar'>
2194 >>> url(r'\\blah\blah\blah')
2198 >>> url(r'\\blah\blah\blah')
2195 <url path: '\\\\blah\\blah\\blah'>
2199 <url path: '\\\\blah\\blah\\blah'>
2196 >>> url(r'\\blah\blah\blah#baz')
2200 >>> url(r'\\blah\blah\blah#baz')
2197 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2201 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2198 >>> url(r'file:///C:\users\me')
2202 >>> url(r'file:///C:\users\me')
2199 <url scheme: 'file', path: 'C:\\users\\me'>
2203 <url scheme: 'file', path: 'C:\\users\\me'>
2200
2204
2201 Authentication credentials:
2205 Authentication credentials:
2202
2206
2203 >>> url('ssh://joe:xyz@x/repo')
2207 >>> url('ssh://joe:xyz@x/repo')
2204 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2208 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2205 >>> url('ssh://joe@x/repo')
2209 >>> url('ssh://joe@x/repo')
2206 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2210 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2207
2211
2208 Query strings and fragments:
2212 Query strings and fragments:
2209
2213
2210 >>> url('http://host/a?b#c')
2214 >>> url('http://host/a?b#c')
2211 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2215 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2212 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2216 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2213 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2217 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2214 """
2218 """
2215
2219
2216 _safechars = "!~*'()+"
2220 _safechars = "!~*'()+"
2217 _safepchars = "/!~*'()+:\\"
2221 _safepchars = "/!~*'()+:\\"
2218 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2222 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2219
2223
2220 def __init__(self, path, parsequery=True, parsefragment=True):
2224 def __init__(self, path, parsequery=True, parsefragment=True):
2221 # We slowly chomp away at path until we have only the path left
2225 # We slowly chomp away at path until we have only the path left
2222 self.scheme = self.user = self.passwd = self.host = None
2226 self.scheme = self.user = self.passwd = self.host = None
2223 self.port = self.path = self.query = self.fragment = None
2227 self.port = self.path = self.query = self.fragment = None
2224 self._localpath = True
2228 self._localpath = True
2225 self._hostport = ''
2229 self._hostport = ''
2226 self._origpath = path
2230 self._origpath = path
2227
2231
2228 if parsefragment and '#' in path:
2232 if parsefragment and '#' in path:
2229 path, self.fragment = path.split('#', 1)
2233 path, self.fragment = path.split('#', 1)
2230 if not path:
2234 if not path:
2231 path = None
2235 path = None
2232
2236
2233 # special case for Windows drive letters and UNC paths
2237 # special case for Windows drive letters and UNC paths
2234 if hasdriveletter(path) or path.startswith(r'\\'):
2238 if hasdriveletter(path) or path.startswith(r'\\'):
2235 self.path = path
2239 self.path = path
2236 return
2240 return
2237
2241
2238 # For compatibility reasons, we can't handle bundle paths as
2242 # For compatibility reasons, we can't handle bundle paths as
2239 # normal URLS
2243 # normal URLS
2240 if path.startswith('bundle:'):
2244 if path.startswith('bundle:'):
2241 self.scheme = 'bundle'
2245 self.scheme = 'bundle'
2242 path = path[7:]
2246 path = path[7:]
2243 if path.startswith('//'):
2247 if path.startswith('//'):
2244 path = path[2:]
2248 path = path[2:]
2245 self.path = path
2249 self.path = path
2246 return
2250 return
2247
2251
2248 if self._matchscheme(path):
2252 if self._matchscheme(path):
2249 parts = path.split(':', 1)
2253 parts = path.split(':', 1)
2250 if parts[0]:
2254 if parts[0]:
2251 self.scheme, path = parts
2255 self.scheme, path = parts
2252 self._localpath = False
2256 self._localpath = False
2253
2257
2254 if not path:
2258 if not path:
2255 path = None
2259 path = None
2256 if self._localpath:
2260 if self._localpath:
2257 self.path = ''
2261 self.path = ''
2258 return
2262 return
2259 else:
2263 else:
2260 if self._localpath:
2264 if self._localpath:
2261 self.path = path
2265 self.path = path
2262 return
2266 return
2263
2267
2264 if parsequery and '?' in path:
2268 if parsequery and '?' in path:
2265 path, self.query = path.split('?', 1)
2269 path, self.query = path.split('?', 1)
2266 if not path:
2270 if not path:
2267 path = None
2271 path = None
2268 if not self.query:
2272 if not self.query:
2269 self.query = None
2273 self.query = None
2270
2274
2271 # // is required to specify a host/authority
2275 # // is required to specify a host/authority
2272 if path and path.startswith('//'):
2276 if path and path.startswith('//'):
2273 parts = path[2:].split('/', 1)
2277 parts = path[2:].split('/', 1)
2274 if len(parts) > 1:
2278 if len(parts) > 1:
2275 self.host, path = parts
2279 self.host, path = parts
2276 else:
2280 else:
2277 self.host = parts[0]
2281 self.host = parts[0]
2278 path = None
2282 path = None
2279 if not self.host:
2283 if not self.host:
2280 self.host = None
2284 self.host = None
2281 # path of file:///d is /d
2285 # path of file:///d is /d
2282 # path of file:///d:/ is d:/, not /d:/
2286 # path of file:///d:/ is d:/, not /d:/
2283 if path and not hasdriveletter(path):
2287 if path and not hasdriveletter(path):
2284 path = '/' + path
2288 path = '/' + path
2285
2289
2286 if self.host and '@' in self.host:
2290 if self.host and '@' in self.host:
2287 self.user, self.host = self.host.rsplit('@', 1)
2291 self.user, self.host = self.host.rsplit('@', 1)
2288 if ':' in self.user:
2292 if ':' in self.user:
2289 self.user, self.passwd = self.user.split(':', 1)
2293 self.user, self.passwd = self.user.split(':', 1)
2290 if not self.host:
2294 if not self.host:
2291 self.host = None
2295 self.host = None
2292
2296
2293 # Don't split on colons in IPv6 addresses without ports
2297 # Don't split on colons in IPv6 addresses without ports
2294 if (self.host and ':' in self.host and
2298 if (self.host and ':' in self.host and
2295 not (self.host.startswith('[') and self.host.endswith(']'))):
2299 not (self.host.startswith('[') and self.host.endswith(']'))):
2296 self._hostport = self.host
2300 self._hostport = self.host
2297 self.host, self.port = self.host.rsplit(':', 1)
2301 self.host, self.port = self.host.rsplit(':', 1)
2298 if not self.host:
2302 if not self.host:
2299 self.host = None
2303 self.host = None
2300
2304
2301 if (self.host and self.scheme == 'file' and
2305 if (self.host and self.scheme == 'file' and
2302 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2306 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2303 raise Abort(_('file:// URLs can only refer to localhost'))
2307 raise Abort(_('file:// URLs can only refer to localhost'))
2304
2308
2305 self.path = path
2309 self.path = path
2306
2310
2307 # leave the query string escaped
2311 # leave the query string escaped
2308 for a in ('user', 'passwd', 'host', 'port',
2312 for a in ('user', 'passwd', 'host', 'port',
2309 'path', 'fragment'):
2313 'path', 'fragment'):
2310 v = getattr(self, a)
2314 v = getattr(self, a)
2311 if v is not None:
2315 if v is not None:
2312 setattr(self, a, _urlunquote(v))
2316 setattr(self, a, _urlunquote(v))
2313
2317
2314 def __repr__(self):
2318 def __repr__(self):
2315 attrs = []
2319 attrs = []
2316 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2320 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2317 'query', 'fragment'):
2321 'query', 'fragment'):
2318 v = getattr(self, a)
2322 v = getattr(self, a)
2319 if v is not None:
2323 if v is not None:
2320 attrs.append('%s: %r' % (a, v))
2324 attrs.append('%s: %r' % (a, v))
2321 return '<url %s>' % ', '.join(attrs)
2325 return '<url %s>' % ', '.join(attrs)
2322
2326
2323 def __str__(self):
2327 def __str__(self):
2324 r"""Join the URL's components back into a URL string.
2328 r"""Join the URL's components back into a URL string.
2325
2329
2326 Examples:
2330 Examples:
2327
2331
2328 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2332 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2329 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2333 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2330 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2334 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2331 'http://user:pw@host:80/?foo=bar&baz=42'
2335 'http://user:pw@host:80/?foo=bar&baz=42'
2332 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2336 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2333 'http://user:pw@host:80/?foo=bar%3dbaz'
2337 'http://user:pw@host:80/?foo=bar%3dbaz'
2334 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2338 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2335 'ssh://user:pw@[::1]:2200//home/joe#'
2339 'ssh://user:pw@[::1]:2200//home/joe#'
2336 >>> str(url('http://localhost:80//'))
2340 >>> str(url('http://localhost:80//'))
2337 'http://localhost:80//'
2341 'http://localhost:80//'
2338 >>> str(url('http://localhost:80/'))
2342 >>> str(url('http://localhost:80/'))
2339 'http://localhost:80/'
2343 'http://localhost:80/'
2340 >>> str(url('http://localhost:80'))
2344 >>> str(url('http://localhost:80'))
2341 'http://localhost:80/'
2345 'http://localhost:80/'
2342 >>> str(url('bundle:foo'))
2346 >>> str(url('bundle:foo'))
2343 'bundle:foo'
2347 'bundle:foo'
2344 >>> str(url('bundle://../foo'))
2348 >>> str(url('bundle://../foo'))
2345 'bundle:../foo'
2349 'bundle:../foo'
2346 >>> str(url('path'))
2350 >>> str(url('path'))
2347 'path'
2351 'path'
2348 >>> str(url('file:///tmp/foo/bar'))
2352 >>> str(url('file:///tmp/foo/bar'))
2349 'file:///tmp/foo/bar'
2353 'file:///tmp/foo/bar'
2350 >>> str(url('file:///c:/tmp/foo/bar'))
2354 >>> str(url('file:///c:/tmp/foo/bar'))
2351 'file:///c:/tmp/foo/bar'
2355 'file:///c:/tmp/foo/bar'
2352 >>> print url(r'bundle:foo\bar')
2356 >>> print url(r'bundle:foo\bar')
2353 bundle:foo\bar
2357 bundle:foo\bar
2354 >>> print url(r'file:///D:\data\hg')
2358 >>> print url(r'file:///D:\data\hg')
2355 file:///D:\data\hg
2359 file:///D:\data\hg
2356 """
2360 """
2357 if self._localpath:
2361 if self._localpath:
2358 s = self.path
2362 s = self.path
2359 if self.scheme == 'bundle':
2363 if self.scheme == 'bundle':
2360 s = 'bundle:' + s
2364 s = 'bundle:' + s
2361 if self.fragment:
2365 if self.fragment:
2362 s += '#' + self.fragment
2366 s += '#' + self.fragment
2363 return s
2367 return s
2364
2368
2365 s = self.scheme + ':'
2369 s = self.scheme + ':'
2366 if self.user or self.passwd or self.host:
2370 if self.user or self.passwd or self.host:
2367 s += '//'
2371 s += '//'
2368 elif self.scheme and (not self.path or self.path.startswith('/')
2372 elif self.scheme and (not self.path or self.path.startswith('/')
2369 or hasdriveletter(self.path)):
2373 or hasdriveletter(self.path)):
2370 s += '//'
2374 s += '//'
2371 if hasdriveletter(self.path):
2375 if hasdriveletter(self.path):
2372 s += '/'
2376 s += '/'
2373 if self.user:
2377 if self.user:
2374 s += urllib.quote(self.user, safe=self._safechars)
2378 s += urllib.quote(self.user, safe=self._safechars)
2375 if self.passwd:
2379 if self.passwd:
2376 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2380 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2377 if self.user or self.passwd:
2381 if self.user or self.passwd:
2378 s += '@'
2382 s += '@'
2379 if self.host:
2383 if self.host:
2380 if not (self.host.startswith('[') and self.host.endswith(']')):
2384 if not (self.host.startswith('[') and self.host.endswith(']')):
2381 s += urllib.quote(self.host)
2385 s += urllib.quote(self.host)
2382 else:
2386 else:
2383 s += self.host
2387 s += self.host
2384 if self.port:
2388 if self.port:
2385 s += ':' + urllib.quote(self.port)
2389 s += ':' + urllib.quote(self.port)
2386 if self.host:
2390 if self.host:
2387 s += '/'
2391 s += '/'
2388 if self.path:
2392 if self.path:
2389 # TODO: similar to the query string, we should not unescape the
2393 # TODO: similar to the query string, we should not unescape the
2390 # path when we store it, the path might contain '%2f' = '/',
2394 # path when we store it, the path might contain '%2f' = '/',
2391 # which we should *not* escape.
2395 # which we should *not* escape.
2392 s += urllib.quote(self.path, safe=self._safepchars)
2396 s += urllib.quote(self.path, safe=self._safepchars)
2393 if self.query:
2397 if self.query:
2394 # we store the query in escaped form.
2398 # we store the query in escaped form.
2395 s += '?' + self.query
2399 s += '?' + self.query
2396 if self.fragment is not None:
2400 if self.fragment is not None:
2397 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2401 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2398 return s
2402 return s
2399
2403
2400 def authinfo(self):
2404 def authinfo(self):
2401 user, passwd = self.user, self.passwd
2405 user, passwd = self.user, self.passwd
2402 try:
2406 try:
2403 self.user, self.passwd = None, None
2407 self.user, self.passwd = None, None
2404 s = str(self)
2408 s = str(self)
2405 finally:
2409 finally:
2406 self.user, self.passwd = user, passwd
2410 self.user, self.passwd = user, passwd
2407 if not self.user:
2411 if not self.user:
2408 return (s, None)
2412 return (s, None)
2409 # authinfo[1] is passed to urllib2 password manager, and its
2413 # authinfo[1] is passed to urllib2 password manager, and its
2410 # URIs must not contain credentials. The host is passed in the
2414 # URIs must not contain credentials. The host is passed in the
2411 # URIs list because Python < 2.4.3 uses only that to search for
2415 # URIs list because Python < 2.4.3 uses only that to search for
2412 # a password.
2416 # a password.
2413 return (s, (None, (s, self.host),
2417 return (s, (None, (s, self.host),
2414 self.user, self.passwd or ''))
2418 self.user, self.passwd or ''))
2415
2419
2416 def isabs(self):
2420 def isabs(self):
2417 if self.scheme and self.scheme != 'file':
2421 if self.scheme and self.scheme != 'file':
2418 return True # remote URL
2422 return True # remote URL
2419 if hasdriveletter(self.path):
2423 if hasdriveletter(self.path):
2420 return True # absolute for our purposes - can't be joined()
2424 return True # absolute for our purposes - can't be joined()
2421 if self.path.startswith(r'\\'):
2425 if self.path.startswith(r'\\'):
2422 return True # Windows UNC path
2426 return True # Windows UNC path
2423 if self.path.startswith('/'):
2427 if self.path.startswith('/'):
2424 return True # POSIX-style
2428 return True # POSIX-style
2425 return False
2429 return False
2426
2430
2427 def localpath(self):
2431 def localpath(self):
2428 if self.scheme == 'file' or self.scheme == 'bundle':
2432 if self.scheme == 'file' or self.scheme == 'bundle':
2429 path = self.path or '/'
2433 path = self.path or '/'
2430 # For Windows, we need to promote hosts containing drive
2434 # For Windows, we need to promote hosts containing drive
2431 # letters to paths with drive letters.
2435 # letters to paths with drive letters.
2432 if hasdriveletter(self._hostport):
2436 if hasdriveletter(self._hostport):
2433 path = self._hostport + '/' + self.path
2437 path = self._hostport + '/' + self.path
2434 elif (self.host is not None and self.path
2438 elif (self.host is not None and self.path
2435 and not hasdriveletter(path)):
2439 and not hasdriveletter(path)):
2436 path = '/' + path
2440 path = '/' + path
2437 return path
2441 return path
2438 return self._origpath
2442 return self._origpath
2439
2443
2440 def islocal(self):
2444 def islocal(self):
2441 '''whether localpath will return something that posixfile can open'''
2445 '''whether localpath will return something that posixfile can open'''
2442 return (not self.scheme or self.scheme == 'file'
2446 return (not self.scheme or self.scheme == 'file'
2443 or self.scheme == 'bundle')
2447 or self.scheme == 'bundle')
2444
2448
2445 def hasscheme(path):
2449 def hasscheme(path):
2446 return bool(url(path).scheme)
2450 return bool(url(path).scheme)
2447
2451
2448 def hasdriveletter(path):
2452 def hasdriveletter(path):
2449 return path and path[1:2] == ':' and path[0:1].isalpha()
2453 return path and path[1:2] == ':' and path[0:1].isalpha()
2450
2454
2451 def urllocalpath(path):
2455 def urllocalpath(path):
2452 return url(path, parsequery=False, parsefragment=False).localpath()
2456 return url(path, parsequery=False, parsefragment=False).localpath()
2453
2457
2454 def hidepassword(u):
2458 def hidepassword(u):
2455 '''hide user credential in a url string'''
2459 '''hide user credential in a url string'''
2456 u = url(u)
2460 u = url(u)
2457 if u.passwd:
2461 if u.passwd:
2458 u.passwd = '***'
2462 u.passwd = '***'
2459 return str(u)
2463 return str(u)
2460
2464
2461 def removeauth(u):
2465 def removeauth(u):
2462 '''remove all authentication information from a url string'''
2466 '''remove all authentication information from a url string'''
2463 u = url(u)
2467 u = url(u)
2464 u.user = u.passwd = None
2468 u.user = u.passwd = None
2465 return str(u)
2469 return str(u)
2466
2470
2467 def isatty(fp):
2471 def isatty(fp):
2468 try:
2472 try:
2469 return fp.isatty()
2473 return fp.isatty()
2470 except AttributeError:
2474 except AttributeError:
2471 return False
2475 return False
2472
2476
2473 timecount = unitcountfn(
2477 timecount = unitcountfn(
2474 (1, 1e3, _('%.0f s')),
2478 (1, 1e3, _('%.0f s')),
2475 (100, 1, _('%.1f s')),
2479 (100, 1, _('%.1f s')),
2476 (10, 1, _('%.2f s')),
2480 (10, 1, _('%.2f s')),
2477 (1, 1, _('%.3f s')),
2481 (1, 1, _('%.3f s')),
2478 (100, 0.001, _('%.1f ms')),
2482 (100, 0.001, _('%.1f ms')),
2479 (10, 0.001, _('%.2f ms')),
2483 (10, 0.001, _('%.2f ms')),
2480 (1, 0.001, _('%.3f ms')),
2484 (1, 0.001, _('%.3f ms')),
2481 (100, 0.000001, _('%.1f us')),
2485 (100, 0.000001, _('%.1f us')),
2482 (10, 0.000001, _('%.2f us')),
2486 (10, 0.000001, _('%.2f us')),
2483 (1, 0.000001, _('%.3f us')),
2487 (1, 0.000001, _('%.3f us')),
2484 (100, 0.000000001, _('%.1f ns')),
2488 (100, 0.000000001, _('%.1f ns')),
2485 (10, 0.000000001, _('%.2f ns')),
2489 (10, 0.000000001, _('%.2f ns')),
2486 (1, 0.000000001, _('%.3f ns')),
2490 (1, 0.000000001, _('%.3f ns')),
2487 )
2491 )
2488
2492
2489 _timenesting = [0]
2493 _timenesting = [0]
2490
2494
2491 def timed(func):
2495 def timed(func):
2492 '''Report the execution time of a function call to stderr.
2496 '''Report the execution time of a function call to stderr.
2493
2497
2494 During development, use as a decorator when you need to measure
2498 During development, use as a decorator when you need to measure
2495 the cost of a function, e.g. as follows:
2499 the cost of a function, e.g. as follows:
2496
2500
2497 @util.timed
2501 @util.timed
2498 def foo(a, b, c):
2502 def foo(a, b, c):
2499 pass
2503 pass
2500 '''
2504 '''
2501
2505
2502 def wrapper(*args, **kwargs):
2506 def wrapper(*args, **kwargs):
2503 start = time.time()
2507 start = time.time()
2504 indent = 2
2508 indent = 2
2505 _timenesting[0] += indent
2509 _timenesting[0] += indent
2506 try:
2510 try:
2507 return func(*args, **kwargs)
2511 return func(*args, **kwargs)
2508 finally:
2512 finally:
2509 elapsed = time.time() - start
2513 elapsed = time.time() - start
2510 _timenesting[0] -= indent
2514 _timenesting[0] -= indent
2511 sys.stderr.write('%s%s: %s\n' %
2515 sys.stderr.write('%s%s: %s\n' %
2512 (' ' * _timenesting[0], func.__name__,
2516 (' ' * _timenesting[0], func.__name__,
2513 timecount(elapsed)))
2517 timecount(elapsed)))
2514 return wrapper
2518 return wrapper
2515
2519
2516 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2520 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2517 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2521 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2518
2522
2519 def sizetoint(s):
2523 def sizetoint(s):
2520 '''Convert a space specifier to a byte count.
2524 '''Convert a space specifier to a byte count.
2521
2525
2522 >>> sizetoint('30')
2526 >>> sizetoint('30')
2523 30
2527 30
2524 >>> sizetoint('2.2kb')
2528 >>> sizetoint('2.2kb')
2525 2252
2529 2252
2526 >>> sizetoint('6M')
2530 >>> sizetoint('6M')
2527 6291456
2531 6291456
2528 '''
2532 '''
2529 t = s.strip().lower()
2533 t = s.strip().lower()
2530 try:
2534 try:
2531 for k, u in _sizeunits:
2535 for k, u in _sizeunits:
2532 if t.endswith(k):
2536 if t.endswith(k):
2533 return int(float(t[:-len(k)]) * u)
2537 return int(float(t[:-len(k)]) * u)
2534 return int(t)
2538 return int(t)
2535 except ValueError:
2539 except ValueError:
2536 raise error.ParseError(_("couldn't parse size: %s") % s)
2540 raise error.ParseError(_("couldn't parse size: %s") % s)
2537
2541
2538 class hooks(object):
2542 class hooks(object):
2539 '''A collection of hook functions that can be used to extend a
2543 '''A collection of hook functions that can be used to extend a
2540 function's behavior. Hooks are called in lexicographic order,
2544 function's behavior. Hooks are called in lexicographic order,
2541 based on the names of their sources.'''
2545 based on the names of their sources.'''
2542
2546
2543 def __init__(self):
2547 def __init__(self):
2544 self._hooks = []
2548 self._hooks = []
2545
2549
2546 def add(self, source, hook):
2550 def add(self, source, hook):
2547 self._hooks.append((source, hook))
2551 self._hooks.append((source, hook))
2548
2552
2549 def __call__(self, *args):
2553 def __call__(self, *args):
2550 self._hooks.sort(key=lambda x: x[0])
2554 self._hooks.sort(key=lambda x: x[0])
2551 results = []
2555 results = []
2552 for source, hook in self._hooks:
2556 for source, hook in self._hooks:
2553 results.append(hook(*args))
2557 results.append(hook(*args))
2554 return results
2558 return results
2555
2559
2556 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2560 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2557 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2561 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2558 Skips the 'skip' last entries. By default it will flush stdout first.
2562 Skips the 'skip' last entries. By default it will flush stdout first.
2559 It can be used everywhere and do intentionally not require an ui object.
2563 It can be used everywhere and do intentionally not require an ui object.
2560 Not be used in production code but very convenient while developing.
2564 Not be used in production code but very convenient while developing.
2561 '''
2565 '''
2562 if otherf:
2566 if otherf:
2563 otherf.flush()
2567 otherf.flush()
2564 f.write('%s at:\n' % msg)
2568 f.write('%s at:\n' % msg)
2565 entries = [('%s:%s' % (fn, ln), func)
2569 entries = [('%s:%s' % (fn, ln), func)
2566 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2570 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2567 if entries:
2571 if entries:
2568 fnmax = max(len(entry[0]) for entry in entries)
2572 fnmax = max(len(entry[0]) for entry in entries)
2569 for fnln, func in entries:
2573 for fnln, func in entries:
2570 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2574 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2571 f.flush()
2575 f.flush()
2572
2576
2573 class dirs(object):
2577 class dirs(object):
2574 '''a multiset of directory names from a dirstate or manifest'''
2578 '''a multiset of directory names from a dirstate or manifest'''
2575
2579
2576 def __init__(self, map, skip=None):
2580 def __init__(self, map, skip=None):
2577 self._dirs = {}
2581 self._dirs = {}
2578 addpath = self.addpath
2582 addpath = self.addpath
2579 if safehasattr(map, 'iteritems') and skip is not None:
2583 if safehasattr(map, 'iteritems') and skip is not None:
2580 for f, s in map.iteritems():
2584 for f, s in map.iteritems():
2581 if s[0] != skip:
2585 if s[0] != skip:
2582 addpath(f)
2586 addpath(f)
2583 else:
2587 else:
2584 for f in map:
2588 for f in map:
2585 addpath(f)
2589 addpath(f)
2586
2590
2587 def addpath(self, path):
2591 def addpath(self, path):
2588 dirs = self._dirs
2592 dirs = self._dirs
2589 for base in finddirs(path):
2593 for base in finddirs(path):
2590 if base in dirs:
2594 if base in dirs:
2591 dirs[base] += 1
2595 dirs[base] += 1
2592 return
2596 return
2593 dirs[base] = 1
2597 dirs[base] = 1
2594
2598
2595 def delpath(self, path):
2599 def delpath(self, path):
2596 dirs = self._dirs
2600 dirs = self._dirs
2597 for base in finddirs(path):
2601 for base in finddirs(path):
2598 if dirs[base] > 1:
2602 if dirs[base] > 1:
2599 dirs[base] -= 1
2603 dirs[base] -= 1
2600 return
2604 return
2601 del dirs[base]
2605 del dirs[base]
2602
2606
2603 def __iter__(self):
2607 def __iter__(self):
2604 return self._dirs.iterkeys()
2608 return self._dirs.iterkeys()
2605
2609
2606 def __contains__(self, d):
2610 def __contains__(self, d):
2607 return d in self._dirs
2611 return d in self._dirs
2608
2612
2609 if safehasattr(parsers, 'dirs'):
2613 if safehasattr(parsers, 'dirs'):
2610 dirs = parsers.dirs
2614 dirs = parsers.dirs
2611
2615
2612 def finddirs(path):
2616 def finddirs(path):
2613 pos = path.rfind('/')
2617 pos = path.rfind('/')
2614 while pos != -1:
2618 while pos != -1:
2615 yield path[:pos]
2619 yield path[:pos]
2616 pos = path.rfind('/', 0, pos)
2620 pos = path.rfind('/', 0, pos)
2617
2621
2618 # compression utility
2622 # compression utility
2619
2623
2620 class nocompress(object):
2624 class nocompress(object):
2621 def compress(self, x):
2625 def compress(self, x):
2622 return x
2626 return x
2623 def flush(self):
2627 def flush(self):
2624 return ""
2628 return ""
2625
2629
2626 compressors = {
2630 compressors = {
2627 None: nocompress,
2631 None: nocompress,
2628 # lambda to prevent early import
2632 # lambda to prevent early import
2629 'BZ': lambda: bz2.BZ2Compressor(),
2633 'BZ': lambda: bz2.BZ2Compressor(),
2630 'GZ': lambda: zlib.compressobj(),
2634 'GZ': lambda: zlib.compressobj(),
2631 }
2635 }
2632 # also support the old form by courtesies
2636 # also support the old form by courtesies
2633 compressors['UN'] = compressors[None]
2637 compressors['UN'] = compressors[None]
2634
2638
2635 def _makedecompressor(decompcls):
2639 def _makedecompressor(decompcls):
2636 def generator(f):
2640 def generator(f):
2637 d = decompcls()
2641 d = decompcls()
2638 for chunk in filechunkiter(f):
2642 for chunk in filechunkiter(f):
2639 yield d.decompress(chunk)
2643 yield d.decompress(chunk)
2640 def func(fh):
2644 def func(fh):
2641 return chunkbuffer(generator(fh))
2645 return chunkbuffer(generator(fh))
2642 return func
2646 return func
2643
2647
2644 class ctxmanager(object):
2648 class ctxmanager(object):
2645 '''A context manager for use in 'with' blocks to allow multiple
2649 '''A context manager for use in 'with' blocks to allow multiple
2646 contexts to be entered at once. This is both safer and more
2650 contexts to be entered at once. This is both safer and more
2647 flexible than contextlib.nested.
2651 flexible than contextlib.nested.
2648
2652
2649 Once Mercurial supports Python 2.7+, this will become mostly
2653 Once Mercurial supports Python 2.7+, this will become mostly
2650 unnecessary.
2654 unnecessary.
2651 '''
2655 '''
2652
2656
2653 def __init__(self, *args):
2657 def __init__(self, *args):
2654 '''Accepts a list of no-argument functions that return context
2658 '''Accepts a list of no-argument functions that return context
2655 managers. These will be invoked at __call__ time.'''
2659 managers. These will be invoked at __call__ time.'''
2656 self._pending = args
2660 self._pending = args
2657 self._atexit = []
2661 self._atexit = []
2658
2662
2659 def __enter__(self):
2663 def __enter__(self):
2660 return self
2664 return self
2661
2665
2662 def __call__(self):
2666 def __call__(self):
2663 '''Create and enter context managers in the order in which they were
2667 '''Create and enter context managers in the order in which they were
2664 passed to the constructor.'''
2668 passed to the constructor.'''
2665 values = []
2669 values = []
2666 for func in self._pending:
2670 for func in self._pending:
2667 obj = func()
2671 obj = func()
2668 values.append(obj.__enter__())
2672 values.append(obj.__enter__())
2669 self._atexit.append(obj.__exit__)
2673 self._atexit.append(obj.__exit__)
2670 del self._pending
2674 del self._pending
2671 return values
2675 return values
2672
2676
2673 def atexit(self, func, *args, **kwargs):
2677 def atexit(self, func, *args, **kwargs):
2674 '''Add a function to call when this context manager exits. The
2678 '''Add a function to call when this context manager exits. The
2675 ordering of multiple atexit calls is unspecified, save that
2679 ordering of multiple atexit calls is unspecified, save that
2676 they will happen before any __exit__ functions.'''
2680 they will happen before any __exit__ functions.'''
2677 def wrapper(exc_type, exc_val, exc_tb):
2681 def wrapper(exc_type, exc_val, exc_tb):
2678 func(*args, **kwargs)
2682 func(*args, **kwargs)
2679 self._atexit.append(wrapper)
2683 self._atexit.append(wrapper)
2680 return func
2684 return func
2681
2685
2682 def __exit__(self, exc_type, exc_val, exc_tb):
2686 def __exit__(self, exc_type, exc_val, exc_tb):
2683 '''Context managers are exited in the reverse order from which
2687 '''Context managers are exited in the reverse order from which
2684 they were created.'''
2688 they were created.'''
2685 received = exc_type is not None
2689 received = exc_type is not None
2686 suppressed = False
2690 suppressed = False
2687 pending = None
2691 pending = None
2688 self._atexit.reverse()
2692 self._atexit.reverse()
2689 for exitfunc in self._atexit:
2693 for exitfunc in self._atexit:
2690 try:
2694 try:
2691 if exitfunc(exc_type, exc_val, exc_tb):
2695 if exitfunc(exc_type, exc_val, exc_tb):
2692 suppressed = True
2696 suppressed = True
2693 exc_type = None
2697 exc_type = None
2694 exc_val = None
2698 exc_val = None
2695 exc_tb = None
2699 exc_tb = None
2696 except BaseException:
2700 except BaseException:
2697 pending = sys.exc_info()
2701 pending = sys.exc_info()
2698 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2702 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2699 del self._atexit
2703 del self._atexit
2700 if pending:
2704 if pending:
2701 raise exc_val
2705 raise exc_val
2702 return received and suppressed
2706 return received and suppressed
2703
2707
2704 def _bz2():
2708 def _bz2():
2705 d = bz2.BZ2Decompressor()
2709 d = bz2.BZ2Decompressor()
2706 # Bzip2 stream start with BZ, but we stripped it.
2710 # Bzip2 stream start with BZ, but we stripped it.
2707 # we put it back for good measure.
2711 # we put it back for good measure.
2708 d.decompress('BZ')
2712 d.decompress('BZ')
2709 return d
2713 return d
2710
2714
2711 decompressors = {None: lambda fh: fh,
2715 decompressors = {None: lambda fh: fh,
2712 '_truncatedBZ': _makedecompressor(_bz2),
2716 '_truncatedBZ': _makedecompressor(_bz2),
2713 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2717 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2714 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2718 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2715 }
2719 }
2716 # also support the old form by courtesies
2720 # also support the old form by courtesies
2717 decompressors['UN'] = decompressors[None]
2721 decompressors['UN'] = decompressors[None]
2718
2722
2719 # convenient shortcut
2723 # convenient shortcut
2720 dst = debugstacktrace
2724 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now