##// END OF EJS Templates
util: reword debugstacktrace comment
timeless -
r28496:b592564a default
parent child Browse files
Show More
@@ -1,2717 +1,2717 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import collections
20 import collections
21 import datetime
21 import datetime
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import imp
25 import imp
26 import os
26 import os
27 import re as remod
27 import re as remod
28 import shutil
28 import shutil
29 import signal
29 import signal
30 import socket
30 import socket
31 import subprocess
31 import subprocess
32 import sys
32 import sys
33 import tempfile
33 import tempfile
34 import textwrap
34 import textwrap
35 import time
35 import time
36 import traceback
36 import traceback
37 import urllib
37 import urllib
38 import zlib
38 import zlib
39
39
40 from . import (
40 from . import (
41 encoding,
41 encoding,
42 error,
42 error,
43 i18n,
43 i18n,
44 osutil,
44 osutil,
45 parsers,
45 parsers,
46 )
46 )
47
47
48 if os.name == 'nt':
48 if os.name == 'nt':
49 from . import windows as platform
49 from . import windows as platform
50 else:
50 else:
51 from . import posix as platform
51 from . import posix as platform
52
52
53 md5 = hashlib.md5
53 md5 = hashlib.md5
54 sha1 = hashlib.sha1
54 sha1 = hashlib.sha1
55 sha512 = hashlib.sha512
55 sha512 = hashlib.sha512
56 _ = i18n._
56 _ = i18n._
57
57
58 cachestat = platform.cachestat
58 cachestat = platform.cachestat
59 checkexec = platform.checkexec
59 checkexec = platform.checkexec
60 checklink = platform.checklink
60 checklink = platform.checklink
61 copymode = platform.copymode
61 copymode = platform.copymode
62 executablepath = platform.executablepath
62 executablepath = platform.executablepath
63 expandglobs = platform.expandglobs
63 expandglobs = platform.expandglobs
64 explainexit = platform.explainexit
64 explainexit = platform.explainexit
65 findexe = platform.findexe
65 findexe = platform.findexe
66 gethgcmd = platform.gethgcmd
66 gethgcmd = platform.gethgcmd
67 getuser = platform.getuser
67 getuser = platform.getuser
68 getpid = os.getpid
68 getpid = os.getpid
69 groupmembers = platform.groupmembers
69 groupmembers = platform.groupmembers
70 groupname = platform.groupname
70 groupname = platform.groupname
71 hidewindow = platform.hidewindow
71 hidewindow = platform.hidewindow
72 isexec = platform.isexec
72 isexec = platform.isexec
73 isowner = platform.isowner
73 isowner = platform.isowner
74 localpath = platform.localpath
74 localpath = platform.localpath
75 lookupreg = platform.lookupreg
75 lookupreg = platform.lookupreg
76 makedir = platform.makedir
76 makedir = platform.makedir
77 nlinks = platform.nlinks
77 nlinks = platform.nlinks
78 normpath = platform.normpath
78 normpath = platform.normpath
79 normcase = platform.normcase
79 normcase = platform.normcase
80 normcasespec = platform.normcasespec
80 normcasespec = platform.normcasespec
81 normcasefallback = platform.normcasefallback
81 normcasefallback = platform.normcasefallback
82 openhardlinks = platform.openhardlinks
82 openhardlinks = platform.openhardlinks
83 oslink = platform.oslink
83 oslink = platform.oslink
84 parsepatchoutput = platform.parsepatchoutput
84 parsepatchoutput = platform.parsepatchoutput
85 pconvert = platform.pconvert
85 pconvert = platform.pconvert
86 poll = platform.poll
86 poll = platform.poll
87 popen = platform.popen
87 popen = platform.popen
88 posixfile = platform.posixfile
88 posixfile = platform.posixfile
89 quotecommand = platform.quotecommand
89 quotecommand = platform.quotecommand
90 readpipe = platform.readpipe
90 readpipe = platform.readpipe
91 rename = platform.rename
91 rename = platform.rename
92 removedirs = platform.removedirs
92 removedirs = platform.removedirs
93 samedevice = platform.samedevice
93 samedevice = platform.samedevice
94 samefile = platform.samefile
94 samefile = platform.samefile
95 samestat = platform.samestat
95 samestat = platform.samestat
96 setbinary = platform.setbinary
96 setbinary = platform.setbinary
97 setflags = platform.setflags
97 setflags = platform.setflags
98 setsignalhandler = platform.setsignalhandler
98 setsignalhandler = platform.setsignalhandler
99 shellquote = platform.shellquote
99 shellquote = platform.shellquote
100 spawndetached = platform.spawndetached
100 spawndetached = platform.spawndetached
101 split = platform.split
101 split = platform.split
102 sshargs = platform.sshargs
102 sshargs = platform.sshargs
103 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
103 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
104 statisexec = platform.statisexec
104 statisexec = platform.statisexec
105 statislink = platform.statislink
105 statislink = platform.statislink
106 termwidth = platform.termwidth
106 termwidth = platform.termwidth
107 testpid = platform.testpid
107 testpid = platform.testpid
108 umask = platform.umask
108 umask = platform.umask
109 unlink = platform.unlink
109 unlink = platform.unlink
110 unlinkpath = platform.unlinkpath
110 unlinkpath = platform.unlinkpath
111 username = platform.username
111 username = platform.username
112
112
113 # Python compatibility
113 # Python compatibility
114
114
115 _notset = object()
115 _notset = object()
116
116
117 # disable Python's problematic floating point timestamps (issue4836)
117 # disable Python's problematic floating point timestamps (issue4836)
118 # (Python hypocritically says you shouldn't change this behavior in
118 # (Python hypocritically says you shouldn't change this behavior in
119 # libraries, and sure enough Mercurial is not a library.)
119 # libraries, and sure enough Mercurial is not a library.)
120 os.stat_float_times(False)
120 os.stat_float_times(False)
121
121
122 def safehasattr(thing, attr):
122 def safehasattr(thing, attr):
123 return getattr(thing, attr, _notset) is not _notset
123 return getattr(thing, attr, _notset) is not _notset
124
124
125 DIGESTS = {
125 DIGESTS = {
126 'md5': md5,
126 'md5': md5,
127 'sha1': sha1,
127 'sha1': sha1,
128 'sha512': sha512,
128 'sha512': sha512,
129 }
129 }
130 # List of digest types from strongest to weakest
130 # List of digest types from strongest to weakest
131 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
131 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
132
132
133 for k in DIGESTS_BY_STRENGTH:
133 for k in DIGESTS_BY_STRENGTH:
134 assert k in DIGESTS
134 assert k in DIGESTS
135
135
136 class digester(object):
136 class digester(object):
137 """helper to compute digests.
137 """helper to compute digests.
138
138
139 This helper can be used to compute one or more digests given their name.
139 This helper can be used to compute one or more digests given their name.
140
140
141 >>> d = digester(['md5', 'sha1'])
141 >>> d = digester(['md5', 'sha1'])
142 >>> d.update('foo')
142 >>> d.update('foo')
143 >>> [k for k in sorted(d)]
143 >>> [k for k in sorted(d)]
144 ['md5', 'sha1']
144 ['md5', 'sha1']
145 >>> d['md5']
145 >>> d['md5']
146 'acbd18db4cc2f85cedef654fccc4a4d8'
146 'acbd18db4cc2f85cedef654fccc4a4d8'
147 >>> d['sha1']
147 >>> d['sha1']
148 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
148 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
149 >>> digester.preferred(['md5', 'sha1'])
149 >>> digester.preferred(['md5', 'sha1'])
150 'sha1'
150 'sha1'
151 """
151 """
152
152
153 def __init__(self, digests, s=''):
153 def __init__(self, digests, s=''):
154 self._hashes = {}
154 self._hashes = {}
155 for k in digests:
155 for k in digests:
156 if k not in DIGESTS:
156 if k not in DIGESTS:
157 raise Abort(_('unknown digest type: %s') % k)
157 raise Abort(_('unknown digest type: %s') % k)
158 self._hashes[k] = DIGESTS[k]()
158 self._hashes[k] = DIGESTS[k]()
159 if s:
159 if s:
160 self.update(s)
160 self.update(s)
161
161
162 def update(self, data):
162 def update(self, data):
163 for h in self._hashes.values():
163 for h in self._hashes.values():
164 h.update(data)
164 h.update(data)
165
165
166 def __getitem__(self, key):
166 def __getitem__(self, key):
167 if key not in DIGESTS:
167 if key not in DIGESTS:
168 raise Abort(_('unknown digest type: %s') % k)
168 raise Abort(_('unknown digest type: %s') % k)
169 return self._hashes[key].hexdigest()
169 return self._hashes[key].hexdigest()
170
170
171 def __iter__(self):
171 def __iter__(self):
172 return iter(self._hashes)
172 return iter(self._hashes)
173
173
174 @staticmethod
174 @staticmethod
175 def preferred(supported):
175 def preferred(supported):
176 """returns the strongest digest type in both supported and DIGESTS."""
176 """returns the strongest digest type in both supported and DIGESTS."""
177
177
178 for k in DIGESTS_BY_STRENGTH:
178 for k in DIGESTS_BY_STRENGTH:
179 if k in supported:
179 if k in supported:
180 return k
180 return k
181 return None
181 return None
182
182
183 class digestchecker(object):
183 class digestchecker(object):
184 """file handle wrapper that additionally checks content against a given
184 """file handle wrapper that additionally checks content against a given
185 size and digests.
185 size and digests.
186
186
187 d = digestchecker(fh, size, {'md5': '...'})
187 d = digestchecker(fh, size, {'md5': '...'})
188
188
189 When multiple digests are given, all of them are validated.
189 When multiple digests are given, all of them are validated.
190 """
190 """
191
191
192 def __init__(self, fh, size, digests):
192 def __init__(self, fh, size, digests):
193 self._fh = fh
193 self._fh = fh
194 self._size = size
194 self._size = size
195 self._got = 0
195 self._got = 0
196 self._digests = dict(digests)
196 self._digests = dict(digests)
197 self._digester = digester(self._digests.keys())
197 self._digester = digester(self._digests.keys())
198
198
199 def read(self, length=-1):
199 def read(self, length=-1):
200 content = self._fh.read(length)
200 content = self._fh.read(length)
201 self._digester.update(content)
201 self._digester.update(content)
202 self._got += len(content)
202 self._got += len(content)
203 return content
203 return content
204
204
205 def validate(self):
205 def validate(self):
206 if self._size != self._got:
206 if self._size != self._got:
207 raise Abort(_('size mismatch: expected %d, got %d') %
207 raise Abort(_('size mismatch: expected %d, got %d') %
208 (self._size, self._got))
208 (self._size, self._got))
209 for k, v in self._digests.items():
209 for k, v in self._digests.items():
210 if v != self._digester[k]:
210 if v != self._digester[k]:
211 # i18n: first parameter is a digest name
211 # i18n: first parameter is a digest name
212 raise Abort(_('%s mismatch: expected %s, got %s') %
212 raise Abort(_('%s mismatch: expected %s, got %s') %
213 (k, v, self._digester[k]))
213 (k, v, self._digester[k]))
214
214
215 try:
215 try:
216 buffer = buffer
216 buffer = buffer
217 except NameError:
217 except NameError:
218 if sys.version_info[0] < 3:
218 if sys.version_info[0] < 3:
219 def buffer(sliceable, offset=0):
219 def buffer(sliceable, offset=0):
220 return sliceable[offset:]
220 return sliceable[offset:]
221 else:
221 else:
222 def buffer(sliceable, offset=0):
222 def buffer(sliceable, offset=0):
223 return memoryview(sliceable)[offset:]
223 return memoryview(sliceable)[offset:]
224
224
225 closefds = os.name == 'posix'
225 closefds = os.name == 'posix'
226
226
227 _chunksize = 4096
227 _chunksize = 4096
228
228
229 class bufferedinputpipe(object):
229 class bufferedinputpipe(object):
230 """a manually buffered input pipe
230 """a manually buffered input pipe
231
231
232 Python will not let us use buffered IO and lazy reading with 'polling' at
232 Python will not let us use buffered IO and lazy reading with 'polling' at
233 the same time. We cannot probe the buffer state and select will not detect
233 the same time. We cannot probe the buffer state and select will not detect
234 that data are ready to read if they are already buffered.
234 that data are ready to read if they are already buffered.
235
235
236 This class let us work around that by implementing its own buffering
236 This class let us work around that by implementing its own buffering
237 (allowing efficient readline) while offering a way to know if the buffer is
237 (allowing efficient readline) while offering a way to know if the buffer is
238 empty from the output (allowing collaboration of the buffer with polling).
238 empty from the output (allowing collaboration of the buffer with polling).
239
239
240 This class lives in the 'util' module because it makes use of the 'os'
240 This class lives in the 'util' module because it makes use of the 'os'
241 module from the python stdlib.
241 module from the python stdlib.
242 """
242 """
243
243
244 def __init__(self, input):
244 def __init__(self, input):
245 self._input = input
245 self._input = input
246 self._buffer = []
246 self._buffer = []
247 self._eof = False
247 self._eof = False
248 self._lenbuf = 0
248 self._lenbuf = 0
249
249
250 @property
250 @property
251 def hasbuffer(self):
251 def hasbuffer(self):
252 """True is any data is currently buffered
252 """True is any data is currently buffered
253
253
254 This will be used externally a pre-step for polling IO. If there is
254 This will be used externally a pre-step for polling IO. If there is
255 already data then no polling should be set in place."""
255 already data then no polling should be set in place."""
256 return bool(self._buffer)
256 return bool(self._buffer)
257
257
258 @property
258 @property
259 def closed(self):
259 def closed(self):
260 return self._input.closed
260 return self._input.closed
261
261
262 def fileno(self):
262 def fileno(self):
263 return self._input.fileno()
263 return self._input.fileno()
264
264
265 def close(self):
265 def close(self):
266 return self._input.close()
266 return self._input.close()
267
267
268 def read(self, size):
268 def read(self, size):
269 while (not self._eof) and (self._lenbuf < size):
269 while (not self._eof) and (self._lenbuf < size):
270 self._fillbuffer()
270 self._fillbuffer()
271 return self._frombuffer(size)
271 return self._frombuffer(size)
272
272
273 def readline(self, *args, **kwargs):
273 def readline(self, *args, **kwargs):
274 if 1 < len(self._buffer):
274 if 1 < len(self._buffer):
275 # this should not happen because both read and readline end with a
275 # this should not happen because both read and readline end with a
276 # _frombuffer call that collapse it.
276 # _frombuffer call that collapse it.
277 self._buffer = [''.join(self._buffer)]
277 self._buffer = [''.join(self._buffer)]
278 self._lenbuf = len(self._buffer[0])
278 self._lenbuf = len(self._buffer[0])
279 lfi = -1
279 lfi = -1
280 if self._buffer:
280 if self._buffer:
281 lfi = self._buffer[-1].find('\n')
281 lfi = self._buffer[-1].find('\n')
282 while (not self._eof) and lfi < 0:
282 while (not self._eof) and lfi < 0:
283 self._fillbuffer()
283 self._fillbuffer()
284 if self._buffer:
284 if self._buffer:
285 lfi = self._buffer[-1].find('\n')
285 lfi = self._buffer[-1].find('\n')
286 size = lfi + 1
286 size = lfi + 1
287 if lfi < 0: # end of file
287 if lfi < 0: # end of file
288 size = self._lenbuf
288 size = self._lenbuf
289 elif 1 < len(self._buffer):
289 elif 1 < len(self._buffer):
290 # we need to take previous chunks into account
290 # we need to take previous chunks into account
291 size += self._lenbuf - len(self._buffer[-1])
291 size += self._lenbuf - len(self._buffer[-1])
292 return self._frombuffer(size)
292 return self._frombuffer(size)
293
293
294 def _frombuffer(self, size):
294 def _frombuffer(self, size):
295 """return at most 'size' data from the buffer
295 """return at most 'size' data from the buffer
296
296
297 The data are removed from the buffer."""
297 The data are removed from the buffer."""
298 if size == 0 or not self._buffer:
298 if size == 0 or not self._buffer:
299 return ''
299 return ''
300 buf = self._buffer[0]
300 buf = self._buffer[0]
301 if 1 < len(self._buffer):
301 if 1 < len(self._buffer):
302 buf = ''.join(self._buffer)
302 buf = ''.join(self._buffer)
303
303
304 data = buf[:size]
304 data = buf[:size]
305 buf = buf[len(data):]
305 buf = buf[len(data):]
306 if buf:
306 if buf:
307 self._buffer = [buf]
307 self._buffer = [buf]
308 self._lenbuf = len(buf)
308 self._lenbuf = len(buf)
309 else:
309 else:
310 self._buffer = []
310 self._buffer = []
311 self._lenbuf = 0
311 self._lenbuf = 0
312 return data
312 return data
313
313
314 def _fillbuffer(self):
314 def _fillbuffer(self):
315 """read data to the buffer"""
315 """read data to the buffer"""
316 data = os.read(self._input.fileno(), _chunksize)
316 data = os.read(self._input.fileno(), _chunksize)
317 if not data:
317 if not data:
318 self._eof = True
318 self._eof = True
319 else:
319 else:
320 self._lenbuf += len(data)
320 self._lenbuf += len(data)
321 self._buffer.append(data)
321 self._buffer.append(data)
322
322
323 def popen2(cmd, env=None, newlines=False):
323 def popen2(cmd, env=None, newlines=False):
324 # Setting bufsize to -1 lets the system decide the buffer size.
324 # Setting bufsize to -1 lets the system decide the buffer size.
325 # The default for bufsize is 0, meaning unbuffered. This leads to
325 # The default for bufsize is 0, meaning unbuffered. This leads to
326 # poor performance on Mac OS X: http://bugs.python.org/issue4194
326 # poor performance on Mac OS X: http://bugs.python.org/issue4194
327 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
327 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
328 close_fds=closefds,
328 close_fds=closefds,
329 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
329 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
330 universal_newlines=newlines,
330 universal_newlines=newlines,
331 env=env)
331 env=env)
332 return p.stdin, p.stdout
332 return p.stdin, p.stdout
333
333
334 def popen3(cmd, env=None, newlines=False):
334 def popen3(cmd, env=None, newlines=False):
335 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
335 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
336 return stdin, stdout, stderr
336 return stdin, stdout, stderr
337
337
338 def popen4(cmd, env=None, newlines=False, bufsize=-1):
338 def popen4(cmd, env=None, newlines=False, bufsize=-1):
339 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
339 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
340 close_fds=closefds,
340 close_fds=closefds,
341 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
341 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
342 stderr=subprocess.PIPE,
342 stderr=subprocess.PIPE,
343 universal_newlines=newlines,
343 universal_newlines=newlines,
344 env=env)
344 env=env)
345 return p.stdin, p.stdout, p.stderr, p
345 return p.stdin, p.stdout, p.stderr, p
346
346
347 def version():
347 def version():
348 """Return version information if available."""
348 """Return version information if available."""
349 try:
349 try:
350 from . import __version__
350 from . import __version__
351 return __version__.version
351 return __version__.version
352 except ImportError:
352 except ImportError:
353 return 'unknown'
353 return 'unknown'
354
354
355 def versiontuple(v=None, n=4):
355 def versiontuple(v=None, n=4):
356 """Parses a Mercurial version string into an N-tuple.
356 """Parses a Mercurial version string into an N-tuple.
357
357
358 The version string to be parsed is specified with the ``v`` argument.
358 The version string to be parsed is specified with the ``v`` argument.
359 If it isn't defined, the current Mercurial version string will be parsed.
359 If it isn't defined, the current Mercurial version string will be parsed.
360
360
361 ``n`` can be 2, 3, or 4. Here is how some version strings map to
361 ``n`` can be 2, 3, or 4. Here is how some version strings map to
362 returned values:
362 returned values:
363
363
364 >>> v = '3.6.1+190-df9b73d2d444'
364 >>> v = '3.6.1+190-df9b73d2d444'
365 >>> versiontuple(v, 2)
365 >>> versiontuple(v, 2)
366 (3, 6)
366 (3, 6)
367 >>> versiontuple(v, 3)
367 >>> versiontuple(v, 3)
368 (3, 6, 1)
368 (3, 6, 1)
369 >>> versiontuple(v, 4)
369 >>> versiontuple(v, 4)
370 (3, 6, 1, '190-df9b73d2d444')
370 (3, 6, 1, '190-df9b73d2d444')
371
371
372 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
372 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
373 (3, 6, 1, '190-df9b73d2d444+20151118')
373 (3, 6, 1, '190-df9b73d2d444+20151118')
374
374
375 >>> v = '3.6'
375 >>> v = '3.6'
376 >>> versiontuple(v, 2)
376 >>> versiontuple(v, 2)
377 (3, 6)
377 (3, 6)
378 >>> versiontuple(v, 3)
378 >>> versiontuple(v, 3)
379 (3, 6, None)
379 (3, 6, None)
380 >>> versiontuple(v, 4)
380 >>> versiontuple(v, 4)
381 (3, 6, None, None)
381 (3, 6, None, None)
382 """
382 """
383 if not v:
383 if not v:
384 v = version()
384 v = version()
385 parts = v.split('+', 1)
385 parts = v.split('+', 1)
386 if len(parts) == 1:
386 if len(parts) == 1:
387 vparts, extra = parts[0], None
387 vparts, extra = parts[0], None
388 else:
388 else:
389 vparts, extra = parts
389 vparts, extra = parts
390
390
391 vints = []
391 vints = []
392 for i in vparts.split('.'):
392 for i in vparts.split('.'):
393 try:
393 try:
394 vints.append(int(i))
394 vints.append(int(i))
395 except ValueError:
395 except ValueError:
396 break
396 break
397 # (3, 6) -> (3, 6, None)
397 # (3, 6) -> (3, 6, None)
398 while len(vints) < 3:
398 while len(vints) < 3:
399 vints.append(None)
399 vints.append(None)
400
400
401 if n == 2:
401 if n == 2:
402 return (vints[0], vints[1])
402 return (vints[0], vints[1])
403 if n == 3:
403 if n == 3:
404 return (vints[0], vints[1], vints[2])
404 return (vints[0], vints[1], vints[2])
405 if n == 4:
405 if n == 4:
406 return (vints[0], vints[1], vints[2], extra)
406 return (vints[0], vints[1], vints[2], extra)
407
407
408 # used by parsedate
408 # used by parsedate
409 defaultdateformats = (
409 defaultdateformats = (
410 '%Y-%m-%d %H:%M:%S',
410 '%Y-%m-%d %H:%M:%S',
411 '%Y-%m-%d %I:%M:%S%p',
411 '%Y-%m-%d %I:%M:%S%p',
412 '%Y-%m-%d %H:%M',
412 '%Y-%m-%d %H:%M',
413 '%Y-%m-%d %I:%M%p',
413 '%Y-%m-%d %I:%M%p',
414 '%Y-%m-%d',
414 '%Y-%m-%d',
415 '%m-%d',
415 '%m-%d',
416 '%m/%d',
416 '%m/%d',
417 '%m/%d/%y',
417 '%m/%d/%y',
418 '%m/%d/%Y',
418 '%m/%d/%Y',
419 '%a %b %d %H:%M:%S %Y',
419 '%a %b %d %H:%M:%S %Y',
420 '%a %b %d %I:%M:%S%p %Y',
420 '%a %b %d %I:%M:%S%p %Y',
421 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
421 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
422 '%b %d %H:%M:%S %Y',
422 '%b %d %H:%M:%S %Y',
423 '%b %d %I:%M:%S%p %Y',
423 '%b %d %I:%M:%S%p %Y',
424 '%b %d %H:%M:%S',
424 '%b %d %H:%M:%S',
425 '%b %d %I:%M:%S%p',
425 '%b %d %I:%M:%S%p',
426 '%b %d %H:%M',
426 '%b %d %H:%M',
427 '%b %d %I:%M%p',
427 '%b %d %I:%M%p',
428 '%b %d %Y',
428 '%b %d %Y',
429 '%b %d',
429 '%b %d',
430 '%H:%M:%S',
430 '%H:%M:%S',
431 '%I:%M:%S%p',
431 '%I:%M:%S%p',
432 '%H:%M',
432 '%H:%M',
433 '%I:%M%p',
433 '%I:%M%p',
434 )
434 )
435
435
436 extendeddateformats = defaultdateformats + (
436 extendeddateformats = defaultdateformats + (
437 "%Y",
437 "%Y",
438 "%Y-%m",
438 "%Y-%m",
439 "%b",
439 "%b",
440 "%b %Y",
440 "%b %Y",
441 )
441 )
442
442
443 def cachefunc(func):
443 def cachefunc(func):
444 '''cache the result of function calls'''
444 '''cache the result of function calls'''
445 # XXX doesn't handle keywords args
445 # XXX doesn't handle keywords args
446 if func.func_code.co_argcount == 0:
446 if func.func_code.co_argcount == 0:
447 cache = []
447 cache = []
448 def f():
448 def f():
449 if len(cache) == 0:
449 if len(cache) == 0:
450 cache.append(func())
450 cache.append(func())
451 return cache[0]
451 return cache[0]
452 return f
452 return f
453 cache = {}
453 cache = {}
454 if func.func_code.co_argcount == 1:
454 if func.func_code.co_argcount == 1:
455 # we gain a small amount of time because
455 # we gain a small amount of time because
456 # we don't need to pack/unpack the list
456 # we don't need to pack/unpack the list
457 def f(arg):
457 def f(arg):
458 if arg not in cache:
458 if arg not in cache:
459 cache[arg] = func(arg)
459 cache[arg] = func(arg)
460 return cache[arg]
460 return cache[arg]
461 else:
461 else:
462 def f(*args):
462 def f(*args):
463 if args not in cache:
463 if args not in cache:
464 cache[args] = func(*args)
464 cache[args] = func(*args)
465 return cache[args]
465 return cache[args]
466
466
467 return f
467 return f
468
468
469 class sortdict(dict):
469 class sortdict(dict):
470 '''a simple sorted dictionary'''
470 '''a simple sorted dictionary'''
471 def __init__(self, data=None):
471 def __init__(self, data=None):
472 self._list = []
472 self._list = []
473 if data:
473 if data:
474 self.update(data)
474 self.update(data)
475 def copy(self):
475 def copy(self):
476 return sortdict(self)
476 return sortdict(self)
477 def __setitem__(self, key, val):
477 def __setitem__(self, key, val):
478 if key in self:
478 if key in self:
479 self._list.remove(key)
479 self._list.remove(key)
480 self._list.append(key)
480 self._list.append(key)
481 dict.__setitem__(self, key, val)
481 dict.__setitem__(self, key, val)
482 def __iter__(self):
482 def __iter__(self):
483 return self._list.__iter__()
483 return self._list.__iter__()
484 def update(self, src):
484 def update(self, src):
485 if isinstance(src, dict):
485 if isinstance(src, dict):
486 src = src.iteritems()
486 src = src.iteritems()
487 for k, v in src:
487 for k, v in src:
488 self[k] = v
488 self[k] = v
489 def clear(self):
489 def clear(self):
490 dict.clear(self)
490 dict.clear(self)
491 self._list = []
491 self._list = []
492 def items(self):
492 def items(self):
493 return [(k, self[k]) for k in self._list]
493 return [(k, self[k]) for k in self._list]
494 def __delitem__(self, key):
494 def __delitem__(self, key):
495 dict.__delitem__(self, key)
495 dict.__delitem__(self, key)
496 self._list.remove(key)
496 self._list.remove(key)
497 def pop(self, key, *args, **kwargs):
497 def pop(self, key, *args, **kwargs):
498 dict.pop(self, key, *args, **kwargs)
498 dict.pop(self, key, *args, **kwargs)
499 try:
499 try:
500 self._list.remove(key)
500 self._list.remove(key)
501 except ValueError:
501 except ValueError:
502 pass
502 pass
503 def keys(self):
503 def keys(self):
504 return self._list
504 return self._list
505 def iterkeys(self):
505 def iterkeys(self):
506 return self._list.__iter__()
506 return self._list.__iter__()
507 def iteritems(self):
507 def iteritems(self):
508 for k in self._list:
508 for k in self._list:
509 yield k, self[k]
509 yield k, self[k]
510 def insert(self, index, key, val):
510 def insert(self, index, key, val):
511 self._list.insert(index, key)
511 self._list.insert(index, key)
512 dict.__setitem__(self, key, val)
512 dict.__setitem__(self, key, val)
513
513
514 class _lrucachenode(object):
514 class _lrucachenode(object):
515 """A node in a doubly linked list.
515 """A node in a doubly linked list.
516
516
517 Holds a reference to nodes on either side as well as a key-value
517 Holds a reference to nodes on either side as well as a key-value
518 pair for the dictionary entry.
518 pair for the dictionary entry.
519 """
519 """
520 __slots__ = ('next', 'prev', 'key', 'value')
520 __slots__ = ('next', 'prev', 'key', 'value')
521
521
522 def __init__(self):
522 def __init__(self):
523 self.next = None
523 self.next = None
524 self.prev = None
524 self.prev = None
525
525
526 self.key = _notset
526 self.key = _notset
527 self.value = None
527 self.value = None
528
528
529 def markempty(self):
529 def markempty(self):
530 """Mark the node as emptied."""
530 """Mark the node as emptied."""
531 self.key = _notset
531 self.key = _notset
532
532
533 class lrucachedict(object):
533 class lrucachedict(object):
534 """Dict that caches most recent accesses and sets.
534 """Dict that caches most recent accesses and sets.
535
535
536 The dict consists of an actual backing dict - indexed by original
536 The dict consists of an actual backing dict - indexed by original
537 key - and a doubly linked circular list defining the order of entries in
537 key - and a doubly linked circular list defining the order of entries in
538 the cache.
538 the cache.
539
539
540 The head node is the newest entry in the cache. If the cache is full,
540 The head node is the newest entry in the cache. If the cache is full,
541 we recycle head.prev and make it the new head. Cache accesses result in
541 we recycle head.prev and make it the new head. Cache accesses result in
542 the node being moved to before the existing head and being marked as the
542 the node being moved to before the existing head and being marked as the
543 new head node.
543 new head node.
544 """
544 """
545 def __init__(self, max):
545 def __init__(self, max):
546 self._cache = {}
546 self._cache = {}
547
547
548 self._head = head = _lrucachenode()
548 self._head = head = _lrucachenode()
549 head.prev = head
549 head.prev = head
550 head.next = head
550 head.next = head
551 self._size = 1
551 self._size = 1
552 self._capacity = max
552 self._capacity = max
553
553
554 def __len__(self):
554 def __len__(self):
555 return len(self._cache)
555 return len(self._cache)
556
556
557 def __contains__(self, k):
557 def __contains__(self, k):
558 return k in self._cache
558 return k in self._cache
559
559
560 def __iter__(self):
560 def __iter__(self):
561 # We don't have to iterate in cache order, but why not.
561 # We don't have to iterate in cache order, but why not.
562 n = self._head
562 n = self._head
563 for i in range(len(self._cache)):
563 for i in range(len(self._cache)):
564 yield n.key
564 yield n.key
565 n = n.next
565 n = n.next
566
566
567 def __getitem__(self, k):
567 def __getitem__(self, k):
568 node = self._cache[k]
568 node = self._cache[k]
569 self._movetohead(node)
569 self._movetohead(node)
570 return node.value
570 return node.value
571
571
572 def __setitem__(self, k, v):
572 def __setitem__(self, k, v):
573 node = self._cache.get(k)
573 node = self._cache.get(k)
574 # Replace existing value and mark as newest.
574 # Replace existing value and mark as newest.
575 if node is not None:
575 if node is not None:
576 node.value = v
576 node.value = v
577 self._movetohead(node)
577 self._movetohead(node)
578 return
578 return
579
579
580 if self._size < self._capacity:
580 if self._size < self._capacity:
581 node = self._addcapacity()
581 node = self._addcapacity()
582 else:
582 else:
583 # Grab the last/oldest item.
583 # Grab the last/oldest item.
584 node = self._head.prev
584 node = self._head.prev
585
585
586 # At capacity. Kill the old entry.
586 # At capacity. Kill the old entry.
587 if node.key is not _notset:
587 if node.key is not _notset:
588 del self._cache[node.key]
588 del self._cache[node.key]
589
589
590 node.key = k
590 node.key = k
591 node.value = v
591 node.value = v
592 self._cache[k] = node
592 self._cache[k] = node
593 # And mark it as newest entry. No need to adjust order since it
593 # And mark it as newest entry. No need to adjust order since it
594 # is already self._head.prev.
594 # is already self._head.prev.
595 self._head = node
595 self._head = node
596
596
597 def __delitem__(self, k):
597 def __delitem__(self, k):
598 node = self._cache.pop(k)
598 node = self._cache.pop(k)
599 node.markempty()
599 node.markempty()
600
600
601 # Temporarily mark as newest item before re-adjusting head to make
601 # Temporarily mark as newest item before re-adjusting head to make
602 # this node the oldest item.
602 # this node the oldest item.
603 self._movetohead(node)
603 self._movetohead(node)
604 self._head = node.next
604 self._head = node.next
605
605
606 # Additional dict methods.
606 # Additional dict methods.
607
607
608 def get(self, k, default=None):
608 def get(self, k, default=None):
609 try:
609 try:
610 return self._cache[k]
610 return self._cache[k]
611 except KeyError:
611 except KeyError:
612 return default
612 return default
613
613
614 def clear(self):
614 def clear(self):
615 n = self._head
615 n = self._head
616 while n.key is not _notset:
616 while n.key is not _notset:
617 n.markempty()
617 n.markempty()
618 n = n.next
618 n = n.next
619
619
620 self._cache.clear()
620 self._cache.clear()
621
621
622 def copy(self):
622 def copy(self):
623 result = lrucachedict(self._capacity)
623 result = lrucachedict(self._capacity)
624 n = self._head.prev
624 n = self._head.prev
625 # Iterate in oldest-to-newest order, so the copy has the right ordering
625 # Iterate in oldest-to-newest order, so the copy has the right ordering
626 for i in range(len(self._cache)):
626 for i in range(len(self._cache)):
627 result[n.key] = n.value
627 result[n.key] = n.value
628 n = n.prev
628 n = n.prev
629 return result
629 return result
630
630
631 def _movetohead(self, node):
631 def _movetohead(self, node):
632 """Mark a node as the newest, making it the new head.
632 """Mark a node as the newest, making it the new head.
633
633
634 When a node is accessed, it becomes the freshest entry in the LRU
634 When a node is accessed, it becomes the freshest entry in the LRU
635 list, which is denoted by self._head.
635 list, which is denoted by self._head.
636
636
637 Visually, let's make ``N`` the new head node (* denotes head):
637 Visually, let's make ``N`` the new head node (* denotes head):
638
638
639 previous/oldest <-> head <-> next/next newest
639 previous/oldest <-> head <-> next/next newest
640
640
641 ----<->--- A* ---<->-----
641 ----<->--- A* ---<->-----
642 | |
642 | |
643 E <-> D <-> N <-> C <-> B
643 E <-> D <-> N <-> C <-> B
644
644
645 To:
645 To:
646
646
647 ----<->--- N* ---<->-----
647 ----<->--- N* ---<->-----
648 | |
648 | |
649 E <-> D <-> C <-> B <-> A
649 E <-> D <-> C <-> B <-> A
650
650
651 This requires the following moves:
651 This requires the following moves:
652
652
653 C.next = D (node.prev.next = node.next)
653 C.next = D (node.prev.next = node.next)
654 D.prev = C (node.next.prev = node.prev)
654 D.prev = C (node.next.prev = node.prev)
655 E.next = N (head.prev.next = node)
655 E.next = N (head.prev.next = node)
656 N.prev = E (node.prev = head.prev)
656 N.prev = E (node.prev = head.prev)
657 N.next = A (node.next = head)
657 N.next = A (node.next = head)
658 A.prev = N (head.prev = node)
658 A.prev = N (head.prev = node)
659 """
659 """
660 head = self._head
660 head = self._head
661 # C.next = D
661 # C.next = D
662 node.prev.next = node.next
662 node.prev.next = node.next
663 # D.prev = C
663 # D.prev = C
664 node.next.prev = node.prev
664 node.next.prev = node.prev
665 # N.prev = E
665 # N.prev = E
666 node.prev = head.prev
666 node.prev = head.prev
667 # N.next = A
667 # N.next = A
668 # It is tempting to do just "head" here, however if node is
668 # It is tempting to do just "head" here, however if node is
669 # adjacent to head, this will do bad things.
669 # adjacent to head, this will do bad things.
670 node.next = head.prev.next
670 node.next = head.prev.next
671 # E.next = N
671 # E.next = N
672 node.next.prev = node
672 node.next.prev = node
673 # A.prev = N
673 # A.prev = N
674 node.prev.next = node
674 node.prev.next = node
675
675
676 self._head = node
676 self._head = node
677
677
678 def _addcapacity(self):
678 def _addcapacity(self):
679 """Add a node to the circular linked list.
679 """Add a node to the circular linked list.
680
680
681 The new node is inserted before the head node.
681 The new node is inserted before the head node.
682 """
682 """
683 head = self._head
683 head = self._head
684 node = _lrucachenode()
684 node = _lrucachenode()
685 head.prev.next = node
685 head.prev.next = node
686 node.prev = head.prev
686 node.prev = head.prev
687 node.next = head
687 node.next = head
688 head.prev = node
688 head.prev = node
689 self._size += 1
689 self._size += 1
690 return node
690 return node
691
691
692 def lrucachefunc(func):
692 def lrucachefunc(func):
693 '''cache most recent results of function calls'''
693 '''cache most recent results of function calls'''
694 cache = {}
694 cache = {}
695 order = collections.deque()
695 order = collections.deque()
696 if func.func_code.co_argcount == 1:
696 if func.func_code.co_argcount == 1:
697 def f(arg):
697 def f(arg):
698 if arg not in cache:
698 if arg not in cache:
699 if len(cache) > 20:
699 if len(cache) > 20:
700 del cache[order.popleft()]
700 del cache[order.popleft()]
701 cache[arg] = func(arg)
701 cache[arg] = func(arg)
702 else:
702 else:
703 order.remove(arg)
703 order.remove(arg)
704 order.append(arg)
704 order.append(arg)
705 return cache[arg]
705 return cache[arg]
706 else:
706 else:
707 def f(*args):
707 def f(*args):
708 if args not in cache:
708 if args not in cache:
709 if len(cache) > 20:
709 if len(cache) > 20:
710 del cache[order.popleft()]
710 del cache[order.popleft()]
711 cache[args] = func(*args)
711 cache[args] = func(*args)
712 else:
712 else:
713 order.remove(args)
713 order.remove(args)
714 order.append(args)
714 order.append(args)
715 return cache[args]
715 return cache[args]
716
716
717 return f
717 return f
718
718
719 class propertycache(object):
719 class propertycache(object):
720 def __init__(self, func):
720 def __init__(self, func):
721 self.func = func
721 self.func = func
722 self.name = func.__name__
722 self.name = func.__name__
723 def __get__(self, obj, type=None):
723 def __get__(self, obj, type=None):
724 result = self.func(obj)
724 result = self.func(obj)
725 self.cachevalue(obj, result)
725 self.cachevalue(obj, result)
726 return result
726 return result
727
727
728 def cachevalue(self, obj, value):
728 def cachevalue(self, obj, value):
729 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
729 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
730 obj.__dict__[self.name] = value
730 obj.__dict__[self.name] = value
731
731
732 def pipefilter(s, cmd):
732 def pipefilter(s, cmd):
733 '''filter string S through command CMD, returning its output'''
733 '''filter string S through command CMD, returning its output'''
734 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
734 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
735 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
735 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
736 pout, perr = p.communicate(s)
736 pout, perr = p.communicate(s)
737 return pout
737 return pout
738
738
739 def tempfilter(s, cmd):
739 def tempfilter(s, cmd):
740 '''filter string S through a pair of temporary files with CMD.
740 '''filter string S through a pair of temporary files with CMD.
741 CMD is used as a template to create the real command to be run,
741 CMD is used as a template to create the real command to be run,
742 with the strings INFILE and OUTFILE replaced by the real names of
742 with the strings INFILE and OUTFILE replaced by the real names of
743 the temporary files generated.'''
743 the temporary files generated.'''
744 inname, outname = None, None
744 inname, outname = None, None
745 try:
745 try:
746 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
746 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
747 fp = os.fdopen(infd, 'wb')
747 fp = os.fdopen(infd, 'wb')
748 fp.write(s)
748 fp.write(s)
749 fp.close()
749 fp.close()
750 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
750 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
751 os.close(outfd)
751 os.close(outfd)
752 cmd = cmd.replace('INFILE', inname)
752 cmd = cmd.replace('INFILE', inname)
753 cmd = cmd.replace('OUTFILE', outname)
753 cmd = cmd.replace('OUTFILE', outname)
754 code = os.system(cmd)
754 code = os.system(cmd)
755 if sys.platform == 'OpenVMS' and code & 1:
755 if sys.platform == 'OpenVMS' and code & 1:
756 code = 0
756 code = 0
757 if code:
757 if code:
758 raise Abort(_("command '%s' failed: %s") %
758 raise Abort(_("command '%s' failed: %s") %
759 (cmd, explainexit(code)))
759 (cmd, explainexit(code)))
760 return readfile(outname)
760 return readfile(outname)
761 finally:
761 finally:
762 try:
762 try:
763 if inname:
763 if inname:
764 os.unlink(inname)
764 os.unlink(inname)
765 except OSError:
765 except OSError:
766 pass
766 pass
767 try:
767 try:
768 if outname:
768 if outname:
769 os.unlink(outname)
769 os.unlink(outname)
770 except OSError:
770 except OSError:
771 pass
771 pass
772
772
773 filtertable = {
773 filtertable = {
774 'tempfile:': tempfilter,
774 'tempfile:': tempfilter,
775 'pipe:': pipefilter,
775 'pipe:': pipefilter,
776 }
776 }
777
777
778 def filter(s, cmd):
778 def filter(s, cmd):
779 "filter a string through a command that transforms its input to its output"
779 "filter a string through a command that transforms its input to its output"
780 for name, fn in filtertable.iteritems():
780 for name, fn in filtertable.iteritems():
781 if cmd.startswith(name):
781 if cmd.startswith(name):
782 return fn(s, cmd[len(name):].lstrip())
782 return fn(s, cmd[len(name):].lstrip())
783 return pipefilter(s, cmd)
783 return pipefilter(s, cmd)
784
784
785 def binary(s):
785 def binary(s):
786 """return true if a string is binary data"""
786 """return true if a string is binary data"""
787 return bool(s and '\0' in s)
787 return bool(s and '\0' in s)
788
788
789 def increasingchunks(source, min=1024, max=65536):
789 def increasingchunks(source, min=1024, max=65536):
790 '''return no less than min bytes per chunk while data remains,
790 '''return no less than min bytes per chunk while data remains,
791 doubling min after each chunk until it reaches max'''
791 doubling min after each chunk until it reaches max'''
792 def log2(x):
792 def log2(x):
793 if not x:
793 if not x:
794 return 0
794 return 0
795 i = 0
795 i = 0
796 while x:
796 while x:
797 x >>= 1
797 x >>= 1
798 i += 1
798 i += 1
799 return i - 1
799 return i - 1
800
800
801 buf = []
801 buf = []
802 blen = 0
802 blen = 0
803 for chunk in source:
803 for chunk in source:
804 buf.append(chunk)
804 buf.append(chunk)
805 blen += len(chunk)
805 blen += len(chunk)
806 if blen >= min:
806 if blen >= min:
807 if min < max:
807 if min < max:
808 min = min << 1
808 min = min << 1
809 nmin = 1 << log2(blen)
809 nmin = 1 << log2(blen)
810 if nmin > min:
810 if nmin > min:
811 min = nmin
811 min = nmin
812 if min > max:
812 if min > max:
813 min = max
813 min = max
814 yield ''.join(buf)
814 yield ''.join(buf)
815 blen = 0
815 blen = 0
816 buf = []
816 buf = []
817 if buf:
817 if buf:
818 yield ''.join(buf)
818 yield ''.join(buf)
819
819
820 Abort = error.Abort
820 Abort = error.Abort
821
821
822 def always(fn):
822 def always(fn):
823 return True
823 return True
824
824
825 def never(fn):
825 def never(fn):
826 return False
826 return False
827
827
828 def nogc(func):
828 def nogc(func):
829 """disable garbage collector
829 """disable garbage collector
830
830
831 Python's garbage collector triggers a GC each time a certain number of
831 Python's garbage collector triggers a GC each time a certain number of
832 container objects (the number being defined by gc.get_threshold()) are
832 container objects (the number being defined by gc.get_threshold()) are
833 allocated even when marked not to be tracked by the collector. Tracking has
833 allocated even when marked not to be tracked by the collector. Tracking has
834 no effect on when GCs are triggered, only on what objects the GC looks
834 no effect on when GCs are triggered, only on what objects the GC looks
835 into. As a workaround, disable GC while building complex (huge)
835 into. As a workaround, disable GC while building complex (huge)
836 containers.
836 containers.
837
837
838 This garbage collector issue have been fixed in 2.7.
838 This garbage collector issue have been fixed in 2.7.
839 """
839 """
840 def wrapper(*args, **kwargs):
840 def wrapper(*args, **kwargs):
841 gcenabled = gc.isenabled()
841 gcenabled = gc.isenabled()
842 gc.disable()
842 gc.disable()
843 try:
843 try:
844 return func(*args, **kwargs)
844 return func(*args, **kwargs)
845 finally:
845 finally:
846 if gcenabled:
846 if gcenabled:
847 gc.enable()
847 gc.enable()
848 return wrapper
848 return wrapper
849
849
850 def pathto(root, n1, n2):
850 def pathto(root, n1, n2):
851 '''return the relative path from one place to another.
851 '''return the relative path from one place to another.
852 root should use os.sep to separate directories
852 root should use os.sep to separate directories
853 n1 should use os.sep to separate directories
853 n1 should use os.sep to separate directories
854 n2 should use "/" to separate directories
854 n2 should use "/" to separate directories
855 returns an os.sep-separated path.
855 returns an os.sep-separated path.
856
856
857 If n1 is a relative path, it's assumed it's
857 If n1 is a relative path, it's assumed it's
858 relative to root.
858 relative to root.
859 n2 should always be relative to root.
859 n2 should always be relative to root.
860 '''
860 '''
861 if not n1:
861 if not n1:
862 return localpath(n2)
862 return localpath(n2)
863 if os.path.isabs(n1):
863 if os.path.isabs(n1):
864 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
864 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
865 return os.path.join(root, localpath(n2))
865 return os.path.join(root, localpath(n2))
866 n2 = '/'.join((pconvert(root), n2))
866 n2 = '/'.join((pconvert(root), n2))
867 a, b = splitpath(n1), n2.split('/')
867 a, b = splitpath(n1), n2.split('/')
868 a.reverse()
868 a.reverse()
869 b.reverse()
869 b.reverse()
870 while a and b and a[-1] == b[-1]:
870 while a and b and a[-1] == b[-1]:
871 a.pop()
871 a.pop()
872 b.pop()
872 b.pop()
873 b.reverse()
873 b.reverse()
874 return os.sep.join((['..'] * len(a)) + b) or '.'
874 return os.sep.join((['..'] * len(a)) + b) or '.'
875
875
876 def mainfrozen():
876 def mainfrozen():
877 """return True if we are a frozen executable.
877 """return True if we are a frozen executable.
878
878
879 The code supports py2exe (most common, Windows only) and tools/freeze
879 The code supports py2exe (most common, Windows only) and tools/freeze
880 (portable, not much used).
880 (portable, not much used).
881 """
881 """
882 return (safehasattr(sys, "frozen") or # new py2exe
882 return (safehasattr(sys, "frozen") or # new py2exe
883 safehasattr(sys, "importers") or # old py2exe
883 safehasattr(sys, "importers") or # old py2exe
884 imp.is_frozen("__main__")) # tools/freeze
884 imp.is_frozen("__main__")) # tools/freeze
885
885
886 # the location of data files matching the source code
886 # the location of data files matching the source code
887 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
887 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
888 # executable version (py2exe) doesn't support __file__
888 # executable version (py2exe) doesn't support __file__
889 datapath = os.path.dirname(sys.executable)
889 datapath = os.path.dirname(sys.executable)
890 else:
890 else:
891 datapath = os.path.dirname(__file__)
891 datapath = os.path.dirname(__file__)
892
892
893 i18n.setdatapath(datapath)
893 i18n.setdatapath(datapath)
894
894
895 _hgexecutable = None
895 _hgexecutable = None
896
896
897 def hgexecutable():
897 def hgexecutable():
898 """return location of the 'hg' executable.
898 """return location of the 'hg' executable.
899
899
900 Defaults to $HG or 'hg' in the search path.
900 Defaults to $HG or 'hg' in the search path.
901 """
901 """
902 if _hgexecutable is None:
902 if _hgexecutable is None:
903 hg = os.environ.get('HG')
903 hg = os.environ.get('HG')
904 mainmod = sys.modules['__main__']
904 mainmod = sys.modules['__main__']
905 if hg:
905 if hg:
906 _sethgexecutable(hg)
906 _sethgexecutable(hg)
907 elif mainfrozen():
907 elif mainfrozen():
908 if getattr(sys, 'frozen', None) == 'macosx_app':
908 if getattr(sys, 'frozen', None) == 'macosx_app':
909 # Env variable set by py2app
909 # Env variable set by py2app
910 _sethgexecutable(os.environ['EXECUTABLEPATH'])
910 _sethgexecutable(os.environ['EXECUTABLEPATH'])
911 else:
911 else:
912 _sethgexecutable(sys.executable)
912 _sethgexecutable(sys.executable)
913 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
913 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
914 _sethgexecutable(mainmod.__file__)
914 _sethgexecutable(mainmod.__file__)
915 else:
915 else:
916 exe = findexe('hg') or os.path.basename(sys.argv[0])
916 exe = findexe('hg') or os.path.basename(sys.argv[0])
917 _sethgexecutable(exe)
917 _sethgexecutable(exe)
918 return _hgexecutable
918 return _hgexecutable
919
919
920 def _sethgexecutable(path):
920 def _sethgexecutable(path):
921 """set location of the 'hg' executable"""
921 """set location of the 'hg' executable"""
922 global _hgexecutable
922 global _hgexecutable
923 _hgexecutable = path
923 _hgexecutable = path
924
924
925 def _isstdout(f):
925 def _isstdout(f):
926 fileno = getattr(f, 'fileno', None)
926 fileno = getattr(f, 'fileno', None)
927 return fileno and fileno() == sys.__stdout__.fileno()
927 return fileno and fileno() == sys.__stdout__.fileno()
928
928
929 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
929 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
930 '''enhanced shell command execution.
930 '''enhanced shell command execution.
931 run with environment maybe modified, maybe in different dir.
931 run with environment maybe modified, maybe in different dir.
932
932
933 if command fails and onerr is None, return status, else raise onerr
933 if command fails and onerr is None, return status, else raise onerr
934 object as exception.
934 object as exception.
935
935
936 if out is specified, it is assumed to be a file-like object that has a
936 if out is specified, it is assumed to be a file-like object that has a
937 write() method. stdout and stderr will be redirected to out.'''
937 write() method. stdout and stderr will be redirected to out.'''
938 if environ is None:
938 if environ is None:
939 environ = {}
939 environ = {}
940 try:
940 try:
941 sys.stdout.flush()
941 sys.stdout.flush()
942 except Exception:
942 except Exception:
943 pass
943 pass
944 def py2shell(val):
944 def py2shell(val):
945 'convert python object into string that is useful to shell'
945 'convert python object into string that is useful to shell'
946 if val is None or val is False:
946 if val is None or val is False:
947 return '0'
947 return '0'
948 if val is True:
948 if val is True:
949 return '1'
949 return '1'
950 return str(val)
950 return str(val)
951 origcmd = cmd
951 origcmd = cmd
952 cmd = quotecommand(cmd)
952 cmd = quotecommand(cmd)
953 if sys.platform == 'plan9' and (sys.version_info[0] == 2
953 if sys.platform == 'plan9' and (sys.version_info[0] == 2
954 and sys.version_info[1] < 7):
954 and sys.version_info[1] < 7):
955 # subprocess kludge to work around issues in half-baked Python
955 # subprocess kludge to work around issues in half-baked Python
956 # ports, notably bichued/python:
956 # ports, notably bichued/python:
957 if not cwd is None:
957 if not cwd is None:
958 os.chdir(cwd)
958 os.chdir(cwd)
959 rc = os.system(cmd)
959 rc = os.system(cmd)
960 else:
960 else:
961 env = dict(os.environ)
961 env = dict(os.environ)
962 env.update((k, py2shell(v)) for k, v in environ.iteritems())
962 env.update((k, py2shell(v)) for k, v in environ.iteritems())
963 env['HG'] = hgexecutable()
963 env['HG'] = hgexecutable()
964 if out is None or _isstdout(out):
964 if out is None or _isstdout(out):
965 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
965 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
966 env=env, cwd=cwd)
966 env=env, cwd=cwd)
967 else:
967 else:
968 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
968 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
969 env=env, cwd=cwd, stdout=subprocess.PIPE,
969 env=env, cwd=cwd, stdout=subprocess.PIPE,
970 stderr=subprocess.STDOUT)
970 stderr=subprocess.STDOUT)
971 while True:
971 while True:
972 line = proc.stdout.readline()
972 line = proc.stdout.readline()
973 if not line:
973 if not line:
974 break
974 break
975 out.write(line)
975 out.write(line)
976 proc.wait()
976 proc.wait()
977 rc = proc.returncode
977 rc = proc.returncode
978 if sys.platform == 'OpenVMS' and rc & 1:
978 if sys.platform == 'OpenVMS' and rc & 1:
979 rc = 0
979 rc = 0
980 if rc and onerr:
980 if rc and onerr:
981 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
981 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
982 explainexit(rc)[0])
982 explainexit(rc)[0])
983 if errprefix:
983 if errprefix:
984 errmsg = '%s: %s' % (errprefix, errmsg)
984 errmsg = '%s: %s' % (errprefix, errmsg)
985 raise onerr(errmsg)
985 raise onerr(errmsg)
986 return rc
986 return rc
987
987
988 def checksignature(func):
988 def checksignature(func):
989 '''wrap a function with code to check for calling errors'''
989 '''wrap a function with code to check for calling errors'''
990 def check(*args, **kwargs):
990 def check(*args, **kwargs):
991 try:
991 try:
992 return func(*args, **kwargs)
992 return func(*args, **kwargs)
993 except TypeError:
993 except TypeError:
994 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
994 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
995 raise error.SignatureError
995 raise error.SignatureError
996 raise
996 raise
997
997
998 return check
998 return check
999
999
1000 def copyfile(src, dest, hardlink=False, copystat=False):
1000 def copyfile(src, dest, hardlink=False, copystat=False):
1001 '''copy a file, preserving mode and optionally other stat info like
1001 '''copy a file, preserving mode and optionally other stat info like
1002 atime/mtime'''
1002 atime/mtime'''
1003 if os.path.lexists(dest):
1003 if os.path.lexists(dest):
1004 unlink(dest)
1004 unlink(dest)
1005 # hardlinks are problematic on CIFS, quietly ignore this flag
1005 # hardlinks are problematic on CIFS, quietly ignore this flag
1006 # until we find a way to work around it cleanly (issue4546)
1006 # until we find a way to work around it cleanly (issue4546)
1007 if False and hardlink:
1007 if False and hardlink:
1008 try:
1008 try:
1009 oslink(src, dest)
1009 oslink(src, dest)
1010 return
1010 return
1011 except (IOError, OSError):
1011 except (IOError, OSError):
1012 pass # fall back to normal copy
1012 pass # fall back to normal copy
1013 if os.path.islink(src):
1013 if os.path.islink(src):
1014 os.symlink(os.readlink(src), dest)
1014 os.symlink(os.readlink(src), dest)
1015 # copytime is ignored for symlinks, but in general copytime isn't needed
1015 # copytime is ignored for symlinks, but in general copytime isn't needed
1016 # for them anyway
1016 # for them anyway
1017 else:
1017 else:
1018 try:
1018 try:
1019 shutil.copyfile(src, dest)
1019 shutil.copyfile(src, dest)
1020 if copystat:
1020 if copystat:
1021 # copystat also copies mode
1021 # copystat also copies mode
1022 shutil.copystat(src, dest)
1022 shutil.copystat(src, dest)
1023 else:
1023 else:
1024 shutil.copymode(src, dest)
1024 shutil.copymode(src, dest)
1025 except shutil.Error as inst:
1025 except shutil.Error as inst:
1026 raise Abort(str(inst))
1026 raise Abort(str(inst))
1027
1027
1028 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1028 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1029 """Copy a directory tree using hardlinks if possible."""
1029 """Copy a directory tree using hardlinks if possible."""
1030 num = 0
1030 num = 0
1031
1031
1032 if hardlink is None:
1032 if hardlink is None:
1033 hardlink = (os.stat(src).st_dev ==
1033 hardlink = (os.stat(src).st_dev ==
1034 os.stat(os.path.dirname(dst)).st_dev)
1034 os.stat(os.path.dirname(dst)).st_dev)
1035 if hardlink:
1035 if hardlink:
1036 topic = _('linking')
1036 topic = _('linking')
1037 else:
1037 else:
1038 topic = _('copying')
1038 topic = _('copying')
1039
1039
1040 if os.path.isdir(src):
1040 if os.path.isdir(src):
1041 os.mkdir(dst)
1041 os.mkdir(dst)
1042 for name, kind in osutil.listdir(src):
1042 for name, kind in osutil.listdir(src):
1043 srcname = os.path.join(src, name)
1043 srcname = os.path.join(src, name)
1044 dstname = os.path.join(dst, name)
1044 dstname = os.path.join(dst, name)
1045 def nprog(t, pos):
1045 def nprog(t, pos):
1046 if pos is not None:
1046 if pos is not None:
1047 return progress(t, pos + num)
1047 return progress(t, pos + num)
1048 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1048 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1049 num += n
1049 num += n
1050 else:
1050 else:
1051 if hardlink:
1051 if hardlink:
1052 try:
1052 try:
1053 oslink(src, dst)
1053 oslink(src, dst)
1054 except (IOError, OSError):
1054 except (IOError, OSError):
1055 hardlink = False
1055 hardlink = False
1056 shutil.copy(src, dst)
1056 shutil.copy(src, dst)
1057 else:
1057 else:
1058 shutil.copy(src, dst)
1058 shutil.copy(src, dst)
1059 num += 1
1059 num += 1
1060 progress(topic, num)
1060 progress(topic, num)
1061 progress(topic, None)
1061 progress(topic, None)
1062
1062
1063 return hardlink, num
1063 return hardlink, num
1064
1064
1065 _winreservednames = '''con prn aux nul
1065 _winreservednames = '''con prn aux nul
1066 com1 com2 com3 com4 com5 com6 com7 com8 com9
1066 com1 com2 com3 com4 com5 com6 com7 com8 com9
1067 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1067 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1068 _winreservedchars = ':*?"<>|'
1068 _winreservedchars = ':*?"<>|'
1069 def checkwinfilename(path):
1069 def checkwinfilename(path):
1070 r'''Check that the base-relative path is a valid filename on Windows.
1070 r'''Check that the base-relative path is a valid filename on Windows.
1071 Returns None if the path is ok, or a UI string describing the problem.
1071 Returns None if the path is ok, or a UI string describing the problem.
1072
1072
1073 >>> checkwinfilename("just/a/normal/path")
1073 >>> checkwinfilename("just/a/normal/path")
1074 >>> checkwinfilename("foo/bar/con.xml")
1074 >>> checkwinfilename("foo/bar/con.xml")
1075 "filename contains 'con', which is reserved on Windows"
1075 "filename contains 'con', which is reserved on Windows"
1076 >>> checkwinfilename("foo/con.xml/bar")
1076 >>> checkwinfilename("foo/con.xml/bar")
1077 "filename contains 'con', which is reserved on Windows"
1077 "filename contains 'con', which is reserved on Windows"
1078 >>> checkwinfilename("foo/bar/xml.con")
1078 >>> checkwinfilename("foo/bar/xml.con")
1079 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1079 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1080 "filename contains 'AUX', which is reserved on Windows"
1080 "filename contains 'AUX', which is reserved on Windows"
1081 >>> checkwinfilename("foo/bar/bla:.txt")
1081 >>> checkwinfilename("foo/bar/bla:.txt")
1082 "filename contains ':', which is reserved on Windows"
1082 "filename contains ':', which is reserved on Windows"
1083 >>> checkwinfilename("foo/bar/b\07la.txt")
1083 >>> checkwinfilename("foo/bar/b\07la.txt")
1084 "filename contains '\\x07', which is invalid on Windows"
1084 "filename contains '\\x07', which is invalid on Windows"
1085 >>> checkwinfilename("foo/bar/bla ")
1085 >>> checkwinfilename("foo/bar/bla ")
1086 "filename ends with ' ', which is not allowed on Windows"
1086 "filename ends with ' ', which is not allowed on Windows"
1087 >>> checkwinfilename("../bar")
1087 >>> checkwinfilename("../bar")
1088 >>> checkwinfilename("foo\\")
1088 >>> checkwinfilename("foo\\")
1089 "filename ends with '\\', which is invalid on Windows"
1089 "filename ends with '\\', which is invalid on Windows"
1090 >>> checkwinfilename("foo\\/bar")
1090 >>> checkwinfilename("foo\\/bar")
1091 "directory name ends with '\\', which is invalid on Windows"
1091 "directory name ends with '\\', which is invalid on Windows"
1092 '''
1092 '''
1093 if path.endswith('\\'):
1093 if path.endswith('\\'):
1094 return _("filename ends with '\\', which is invalid on Windows")
1094 return _("filename ends with '\\', which is invalid on Windows")
1095 if '\\/' in path:
1095 if '\\/' in path:
1096 return _("directory name ends with '\\', which is invalid on Windows")
1096 return _("directory name ends with '\\', which is invalid on Windows")
1097 for n in path.replace('\\', '/').split('/'):
1097 for n in path.replace('\\', '/').split('/'):
1098 if not n:
1098 if not n:
1099 continue
1099 continue
1100 for c in n:
1100 for c in n:
1101 if c in _winreservedchars:
1101 if c in _winreservedchars:
1102 return _("filename contains '%s', which is reserved "
1102 return _("filename contains '%s', which is reserved "
1103 "on Windows") % c
1103 "on Windows") % c
1104 if ord(c) <= 31:
1104 if ord(c) <= 31:
1105 return _("filename contains %r, which is invalid "
1105 return _("filename contains %r, which is invalid "
1106 "on Windows") % c
1106 "on Windows") % c
1107 base = n.split('.')[0]
1107 base = n.split('.')[0]
1108 if base and base.lower() in _winreservednames:
1108 if base and base.lower() in _winreservednames:
1109 return _("filename contains '%s', which is reserved "
1109 return _("filename contains '%s', which is reserved "
1110 "on Windows") % base
1110 "on Windows") % base
1111 t = n[-1]
1111 t = n[-1]
1112 if t in '. ' and n not in '..':
1112 if t in '. ' and n not in '..':
1113 return _("filename ends with '%s', which is not allowed "
1113 return _("filename ends with '%s', which is not allowed "
1114 "on Windows") % t
1114 "on Windows") % t
1115
1115
1116 if os.name == 'nt':
1116 if os.name == 'nt':
1117 checkosfilename = checkwinfilename
1117 checkosfilename = checkwinfilename
1118 else:
1118 else:
1119 checkosfilename = platform.checkosfilename
1119 checkosfilename = platform.checkosfilename
1120
1120
1121 def makelock(info, pathname):
1121 def makelock(info, pathname):
1122 try:
1122 try:
1123 return os.symlink(info, pathname)
1123 return os.symlink(info, pathname)
1124 except OSError as why:
1124 except OSError as why:
1125 if why.errno == errno.EEXIST:
1125 if why.errno == errno.EEXIST:
1126 raise
1126 raise
1127 except AttributeError: # no symlink in os
1127 except AttributeError: # no symlink in os
1128 pass
1128 pass
1129
1129
1130 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1130 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1131 os.write(ld, info)
1131 os.write(ld, info)
1132 os.close(ld)
1132 os.close(ld)
1133
1133
1134 def readlock(pathname):
1134 def readlock(pathname):
1135 try:
1135 try:
1136 return os.readlink(pathname)
1136 return os.readlink(pathname)
1137 except OSError as why:
1137 except OSError as why:
1138 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1138 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1139 raise
1139 raise
1140 except AttributeError: # no symlink in os
1140 except AttributeError: # no symlink in os
1141 pass
1141 pass
1142 fp = posixfile(pathname)
1142 fp = posixfile(pathname)
1143 r = fp.read()
1143 r = fp.read()
1144 fp.close()
1144 fp.close()
1145 return r
1145 return r
1146
1146
1147 def fstat(fp):
1147 def fstat(fp):
1148 '''stat file object that may not have fileno method.'''
1148 '''stat file object that may not have fileno method.'''
1149 try:
1149 try:
1150 return os.fstat(fp.fileno())
1150 return os.fstat(fp.fileno())
1151 except AttributeError:
1151 except AttributeError:
1152 return os.stat(fp.name)
1152 return os.stat(fp.name)
1153
1153
1154 # File system features
1154 # File system features
1155
1155
1156 def checkcase(path):
1156 def checkcase(path):
1157 """
1157 """
1158 Return true if the given path is on a case-sensitive filesystem
1158 Return true if the given path is on a case-sensitive filesystem
1159
1159
1160 Requires a path (like /foo/.hg) ending with a foldable final
1160 Requires a path (like /foo/.hg) ending with a foldable final
1161 directory component.
1161 directory component.
1162 """
1162 """
1163 s1 = os.lstat(path)
1163 s1 = os.lstat(path)
1164 d, b = os.path.split(path)
1164 d, b = os.path.split(path)
1165 b2 = b.upper()
1165 b2 = b.upper()
1166 if b == b2:
1166 if b == b2:
1167 b2 = b.lower()
1167 b2 = b.lower()
1168 if b == b2:
1168 if b == b2:
1169 return True # no evidence against case sensitivity
1169 return True # no evidence against case sensitivity
1170 p2 = os.path.join(d, b2)
1170 p2 = os.path.join(d, b2)
1171 try:
1171 try:
1172 s2 = os.lstat(p2)
1172 s2 = os.lstat(p2)
1173 if s2 == s1:
1173 if s2 == s1:
1174 return False
1174 return False
1175 return True
1175 return True
1176 except OSError:
1176 except OSError:
1177 return True
1177 return True
1178
1178
1179 try:
1179 try:
1180 import re2
1180 import re2
1181 _re2 = None
1181 _re2 = None
1182 except ImportError:
1182 except ImportError:
1183 _re2 = False
1183 _re2 = False
1184
1184
1185 class _re(object):
1185 class _re(object):
1186 def _checkre2(self):
1186 def _checkre2(self):
1187 global _re2
1187 global _re2
1188 try:
1188 try:
1189 # check if match works, see issue3964
1189 # check if match works, see issue3964
1190 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1190 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1191 except ImportError:
1191 except ImportError:
1192 _re2 = False
1192 _re2 = False
1193
1193
1194 def compile(self, pat, flags=0):
1194 def compile(self, pat, flags=0):
1195 '''Compile a regular expression, using re2 if possible
1195 '''Compile a regular expression, using re2 if possible
1196
1196
1197 For best performance, use only re2-compatible regexp features. The
1197 For best performance, use only re2-compatible regexp features. The
1198 only flags from the re module that are re2-compatible are
1198 only flags from the re module that are re2-compatible are
1199 IGNORECASE and MULTILINE.'''
1199 IGNORECASE and MULTILINE.'''
1200 if _re2 is None:
1200 if _re2 is None:
1201 self._checkre2()
1201 self._checkre2()
1202 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1202 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1203 if flags & remod.IGNORECASE:
1203 if flags & remod.IGNORECASE:
1204 pat = '(?i)' + pat
1204 pat = '(?i)' + pat
1205 if flags & remod.MULTILINE:
1205 if flags & remod.MULTILINE:
1206 pat = '(?m)' + pat
1206 pat = '(?m)' + pat
1207 try:
1207 try:
1208 return re2.compile(pat)
1208 return re2.compile(pat)
1209 except re2.error:
1209 except re2.error:
1210 pass
1210 pass
1211 return remod.compile(pat, flags)
1211 return remod.compile(pat, flags)
1212
1212
1213 @propertycache
1213 @propertycache
1214 def escape(self):
1214 def escape(self):
1215 '''Return the version of escape corresponding to self.compile.
1215 '''Return the version of escape corresponding to self.compile.
1216
1216
1217 This is imperfect because whether re2 or re is used for a particular
1217 This is imperfect because whether re2 or re is used for a particular
1218 function depends on the flags, etc, but it's the best we can do.
1218 function depends on the flags, etc, but it's the best we can do.
1219 '''
1219 '''
1220 global _re2
1220 global _re2
1221 if _re2 is None:
1221 if _re2 is None:
1222 self._checkre2()
1222 self._checkre2()
1223 if _re2:
1223 if _re2:
1224 return re2.escape
1224 return re2.escape
1225 else:
1225 else:
1226 return remod.escape
1226 return remod.escape
1227
1227
1228 re = _re()
1228 re = _re()
1229
1229
1230 _fspathcache = {}
1230 _fspathcache = {}
1231 def fspath(name, root):
1231 def fspath(name, root):
1232 '''Get name in the case stored in the filesystem
1232 '''Get name in the case stored in the filesystem
1233
1233
1234 The name should be relative to root, and be normcase-ed for efficiency.
1234 The name should be relative to root, and be normcase-ed for efficiency.
1235
1235
1236 Note that this function is unnecessary, and should not be
1236 Note that this function is unnecessary, and should not be
1237 called, for case-sensitive filesystems (simply because it's expensive).
1237 called, for case-sensitive filesystems (simply because it's expensive).
1238
1238
1239 The root should be normcase-ed, too.
1239 The root should be normcase-ed, too.
1240 '''
1240 '''
1241 def _makefspathcacheentry(dir):
1241 def _makefspathcacheentry(dir):
1242 return dict((normcase(n), n) for n in os.listdir(dir))
1242 return dict((normcase(n), n) for n in os.listdir(dir))
1243
1243
1244 seps = os.sep
1244 seps = os.sep
1245 if os.altsep:
1245 if os.altsep:
1246 seps = seps + os.altsep
1246 seps = seps + os.altsep
1247 # Protect backslashes. This gets silly very quickly.
1247 # Protect backslashes. This gets silly very quickly.
1248 seps.replace('\\','\\\\')
1248 seps.replace('\\','\\\\')
1249 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1249 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1250 dir = os.path.normpath(root)
1250 dir = os.path.normpath(root)
1251 result = []
1251 result = []
1252 for part, sep in pattern.findall(name):
1252 for part, sep in pattern.findall(name):
1253 if sep:
1253 if sep:
1254 result.append(sep)
1254 result.append(sep)
1255 continue
1255 continue
1256
1256
1257 if dir not in _fspathcache:
1257 if dir not in _fspathcache:
1258 _fspathcache[dir] = _makefspathcacheentry(dir)
1258 _fspathcache[dir] = _makefspathcacheentry(dir)
1259 contents = _fspathcache[dir]
1259 contents = _fspathcache[dir]
1260
1260
1261 found = contents.get(part)
1261 found = contents.get(part)
1262 if not found:
1262 if not found:
1263 # retry "once per directory" per "dirstate.walk" which
1263 # retry "once per directory" per "dirstate.walk" which
1264 # may take place for each patches of "hg qpush", for example
1264 # may take place for each patches of "hg qpush", for example
1265 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1265 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1266 found = contents.get(part)
1266 found = contents.get(part)
1267
1267
1268 result.append(found or part)
1268 result.append(found or part)
1269 dir = os.path.join(dir, part)
1269 dir = os.path.join(dir, part)
1270
1270
1271 return ''.join(result)
1271 return ''.join(result)
1272
1272
1273 def checknlink(testfile):
1273 def checknlink(testfile):
1274 '''check whether hardlink count reporting works properly'''
1274 '''check whether hardlink count reporting works properly'''
1275
1275
1276 # testfile may be open, so we need a separate file for checking to
1276 # testfile may be open, so we need a separate file for checking to
1277 # work around issue2543 (or testfile may get lost on Samba shares)
1277 # work around issue2543 (or testfile may get lost on Samba shares)
1278 f1 = testfile + ".hgtmp1"
1278 f1 = testfile + ".hgtmp1"
1279 if os.path.lexists(f1):
1279 if os.path.lexists(f1):
1280 return False
1280 return False
1281 try:
1281 try:
1282 posixfile(f1, 'w').close()
1282 posixfile(f1, 'w').close()
1283 except IOError:
1283 except IOError:
1284 return False
1284 return False
1285
1285
1286 f2 = testfile + ".hgtmp2"
1286 f2 = testfile + ".hgtmp2"
1287 fd = None
1287 fd = None
1288 try:
1288 try:
1289 oslink(f1, f2)
1289 oslink(f1, f2)
1290 # nlinks() may behave differently for files on Windows shares if
1290 # nlinks() may behave differently for files on Windows shares if
1291 # the file is open.
1291 # the file is open.
1292 fd = posixfile(f2)
1292 fd = posixfile(f2)
1293 return nlinks(f2) > 1
1293 return nlinks(f2) > 1
1294 except OSError:
1294 except OSError:
1295 return False
1295 return False
1296 finally:
1296 finally:
1297 if fd is not None:
1297 if fd is not None:
1298 fd.close()
1298 fd.close()
1299 for f in (f1, f2):
1299 for f in (f1, f2):
1300 try:
1300 try:
1301 os.unlink(f)
1301 os.unlink(f)
1302 except OSError:
1302 except OSError:
1303 pass
1303 pass
1304
1304
1305 def endswithsep(path):
1305 def endswithsep(path):
1306 '''Check path ends with os.sep or os.altsep.'''
1306 '''Check path ends with os.sep or os.altsep.'''
1307 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1307 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1308
1308
1309 def splitpath(path):
1309 def splitpath(path):
1310 '''Split path by os.sep.
1310 '''Split path by os.sep.
1311 Note that this function does not use os.altsep because this is
1311 Note that this function does not use os.altsep because this is
1312 an alternative of simple "xxx.split(os.sep)".
1312 an alternative of simple "xxx.split(os.sep)".
1313 It is recommended to use os.path.normpath() before using this
1313 It is recommended to use os.path.normpath() before using this
1314 function if need.'''
1314 function if need.'''
1315 return path.split(os.sep)
1315 return path.split(os.sep)
1316
1316
1317 def gui():
1317 def gui():
1318 '''Are we running in a GUI?'''
1318 '''Are we running in a GUI?'''
1319 if sys.platform == 'darwin':
1319 if sys.platform == 'darwin':
1320 if 'SSH_CONNECTION' in os.environ:
1320 if 'SSH_CONNECTION' in os.environ:
1321 # handle SSH access to a box where the user is logged in
1321 # handle SSH access to a box where the user is logged in
1322 return False
1322 return False
1323 elif getattr(osutil, 'isgui', None):
1323 elif getattr(osutil, 'isgui', None):
1324 # check if a CoreGraphics session is available
1324 # check if a CoreGraphics session is available
1325 return osutil.isgui()
1325 return osutil.isgui()
1326 else:
1326 else:
1327 # pure build; use a safe default
1327 # pure build; use a safe default
1328 return True
1328 return True
1329 else:
1329 else:
1330 return os.name == "nt" or os.environ.get("DISPLAY")
1330 return os.name == "nt" or os.environ.get("DISPLAY")
1331
1331
1332 def mktempcopy(name, emptyok=False, createmode=None):
1332 def mktempcopy(name, emptyok=False, createmode=None):
1333 """Create a temporary file with the same contents from name
1333 """Create a temporary file with the same contents from name
1334
1334
1335 The permission bits are copied from the original file.
1335 The permission bits are copied from the original file.
1336
1336
1337 If the temporary file is going to be truncated immediately, you
1337 If the temporary file is going to be truncated immediately, you
1338 can use emptyok=True as an optimization.
1338 can use emptyok=True as an optimization.
1339
1339
1340 Returns the name of the temporary file.
1340 Returns the name of the temporary file.
1341 """
1341 """
1342 d, fn = os.path.split(name)
1342 d, fn = os.path.split(name)
1343 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1343 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1344 os.close(fd)
1344 os.close(fd)
1345 # Temporary files are created with mode 0600, which is usually not
1345 # Temporary files are created with mode 0600, which is usually not
1346 # what we want. If the original file already exists, just copy
1346 # what we want. If the original file already exists, just copy
1347 # its mode. Otherwise, manually obey umask.
1347 # its mode. Otherwise, manually obey umask.
1348 copymode(name, temp, createmode)
1348 copymode(name, temp, createmode)
1349 if emptyok:
1349 if emptyok:
1350 return temp
1350 return temp
1351 try:
1351 try:
1352 try:
1352 try:
1353 ifp = posixfile(name, "rb")
1353 ifp = posixfile(name, "rb")
1354 except IOError as inst:
1354 except IOError as inst:
1355 if inst.errno == errno.ENOENT:
1355 if inst.errno == errno.ENOENT:
1356 return temp
1356 return temp
1357 if not getattr(inst, 'filename', None):
1357 if not getattr(inst, 'filename', None):
1358 inst.filename = name
1358 inst.filename = name
1359 raise
1359 raise
1360 ofp = posixfile(temp, "wb")
1360 ofp = posixfile(temp, "wb")
1361 for chunk in filechunkiter(ifp):
1361 for chunk in filechunkiter(ifp):
1362 ofp.write(chunk)
1362 ofp.write(chunk)
1363 ifp.close()
1363 ifp.close()
1364 ofp.close()
1364 ofp.close()
1365 except: # re-raises
1365 except: # re-raises
1366 try: os.unlink(temp)
1366 try: os.unlink(temp)
1367 except OSError: pass
1367 except OSError: pass
1368 raise
1368 raise
1369 return temp
1369 return temp
1370
1370
1371 class atomictempfile(object):
1371 class atomictempfile(object):
1372 '''writable file object that atomically updates a file
1372 '''writable file object that atomically updates a file
1373
1373
1374 All writes will go to a temporary copy of the original file. Call
1374 All writes will go to a temporary copy of the original file. Call
1375 close() when you are done writing, and atomictempfile will rename
1375 close() when you are done writing, and atomictempfile will rename
1376 the temporary copy to the original name, making the changes
1376 the temporary copy to the original name, making the changes
1377 visible. If the object is destroyed without being closed, all your
1377 visible. If the object is destroyed without being closed, all your
1378 writes are discarded.
1378 writes are discarded.
1379 '''
1379 '''
1380 def __init__(self, name, mode='w+b', createmode=None):
1380 def __init__(self, name, mode='w+b', createmode=None):
1381 self.__name = name # permanent name
1381 self.__name = name # permanent name
1382 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1382 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1383 createmode=createmode)
1383 createmode=createmode)
1384 self._fp = posixfile(self._tempname, mode)
1384 self._fp = posixfile(self._tempname, mode)
1385
1385
1386 # delegated methods
1386 # delegated methods
1387 self.write = self._fp.write
1387 self.write = self._fp.write
1388 self.seek = self._fp.seek
1388 self.seek = self._fp.seek
1389 self.tell = self._fp.tell
1389 self.tell = self._fp.tell
1390 self.fileno = self._fp.fileno
1390 self.fileno = self._fp.fileno
1391
1391
1392 def close(self):
1392 def close(self):
1393 if not self._fp.closed:
1393 if not self._fp.closed:
1394 self._fp.close()
1394 self._fp.close()
1395 rename(self._tempname, localpath(self.__name))
1395 rename(self._tempname, localpath(self.__name))
1396
1396
1397 def discard(self):
1397 def discard(self):
1398 if not self._fp.closed:
1398 if not self._fp.closed:
1399 try:
1399 try:
1400 os.unlink(self._tempname)
1400 os.unlink(self._tempname)
1401 except OSError:
1401 except OSError:
1402 pass
1402 pass
1403 self._fp.close()
1403 self._fp.close()
1404
1404
1405 def __del__(self):
1405 def __del__(self):
1406 if safehasattr(self, '_fp'): # constructor actually did something
1406 if safehasattr(self, '_fp'): # constructor actually did something
1407 self.discard()
1407 self.discard()
1408
1408
1409 def makedirs(name, mode=None, notindexed=False):
1409 def makedirs(name, mode=None, notindexed=False):
1410 """recursive directory creation with parent mode inheritance"""
1410 """recursive directory creation with parent mode inheritance"""
1411 try:
1411 try:
1412 makedir(name, notindexed)
1412 makedir(name, notindexed)
1413 except OSError as err:
1413 except OSError as err:
1414 if err.errno == errno.EEXIST:
1414 if err.errno == errno.EEXIST:
1415 return
1415 return
1416 if err.errno != errno.ENOENT or not name:
1416 if err.errno != errno.ENOENT or not name:
1417 raise
1417 raise
1418 parent = os.path.dirname(os.path.abspath(name))
1418 parent = os.path.dirname(os.path.abspath(name))
1419 if parent == name:
1419 if parent == name:
1420 raise
1420 raise
1421 makedirs(parent, mode, notindexed)
1421 makedirs(parent, mode, notindexed)
1422 makedir(name, notindexed)
1422 makedir(name, notindexed)
1423 if mode is not None:
1423 if mode is not None:
1424 os.chmod(name, mode)
1424 os.chmod(name, mode)
1425
1425
1426 def ensuredirs(name, mode=None, notindexed=False):
1426 def ensuredirs(name, mode=None, notindexed=False):
1427 """race-safe recursive directory creation
1427 """race-safe recursive directory creation
1428
1428
1429 Newly created directories are marked as "not to be indexed by
1429 Newly created directories are marked as "not to be indexed by
1430 the content indexing service", if ``notindexed`` is specified
1430 the content indexing service", if ``notindexed`` is specified
1431 for "write" mode access.
1431 for "write" mode access.
1432 """
1432 """
1433 if os.path.isdir(name):
1433 if os.path.isdir(name):
1434 return
1434 return
1435 parent = os.path.dirname(os.path.abspath(name))
1435 parent = os.path.dirname(os.path.abspath(name))
1436 if parent != name:
1436 if parent != name:
1437 ensuredirs(parent, mode, notindexed)
1437 ensuredirs(parent, mode, notindexed)
1438 try:
1438 try:
1439 makedir(name, notindexed)
1439 makedir(name, notindexed)
1440 except OSError as err:
1440 except OSError as err:
1441 if err.errno == errno.EEXIST and os.path.isdir(name):
1441 if err.errno == errno.EEXIST and os.path.isdir(name):
1442 # someone else seems to have won a directory creation race
1442 # someone else seems to have won a directory creation race
1443 return
1443 return
1444 raise
1444 raise
1445 if mode is not None:
1445 if mode is not None:
1446 os.chmod(name, mode)
1446 os.chmod(name, mode)
1447
1447
1448 def readfile(path):
1448 def readfile(path):
1449 with open(path, 'rb') as fp:
1449 with open(path, 'rb') as fp:
1450 return fp.read()
1450 return fp.read()
1451
1451
1452 def writefile(path, text):
1452 def writefile(path, text):
1453 with open(path, 'wb') as fp:
1453 with open(path, 'wb') as fp:
1454 fp.write(text)
1454 fp.write(text)
1455
1455
1456 def appendfile(path, text):
1456 def appendfile(path, text):
1457 with open(path, 'ab') as fp:
1457 with open(path, 'ab') as fp:
1458 fp.write(text)
1458 fp.write(text)
1459
1459
1460 class chunkbuffer(object):
1460 class chunkbuffer(object):
1461 """Allow arbitrary sized chunks of data to be efficiently read from an
1461 """Allow arbitrary sized chunks of data to be efficiently read from an
1462 iterator over chunks of arbitrary size."""
1462 iterator over chunks of arbitrary size."""
1463
1463
1464 def __init__(self, in_iter):
1464 def __init__(self, in_iter):
1465 """in_iter is the iterator that's iterating over the input chunks.
1465 """in_iter is the iterator that's iterating over the input chunks.
1466 targetsize is how big a buffer to try to maintain."""
1466 targetsize is how big a buffer to try to maintain."""
1467 def splitbig(chunks):
1467 def splitbig(chunks):
1468 for chunk in chunks:
1468 for chunk in chunks:
1469 if len(chunk) > 2**20:
1469 if len(chunk) > 2**20:
1470 pos = 0
1470 pos = 0
1471 while pos < len(chunk):
1471 while pos < len(chunk):
1472 end = pos + 2 ** 18
1472 end = pos + 2 ** 18
1473 yield chunk[pos:end]
1473 yield chunk[pos:end]
1474 pos = end
1474 pos = end
1475 else:
1475 else:
1476 yield chunk
1476 yield chunk
1477 self.iter = splitbig(in_iter)
1477 self.iter = splitbig(in_iter)
1478 self._queue = collections.deque()
1478 self._queue = collections.deque()
1479 self._chunkoffset = 0
1479 self._chunkoffset = 0
1480
1480
1481 def read(self, l=None):
1481 def read(self, l=None):
1482 """Read L bytes of data from the iterator of chunks of data.
1482 """Read L bytes of data from the iterator of chunks of data.
1483 Returns less than L bytes if the iterator runs dry.
1483 Returns less than L bytes if the iterator runs dry.
1484
1484
1485 If size parameter is omitted, read everything"""
1485 If size parameter is omitted, read everything"""
1486 if l is None:
1486 if l is None:
1487 return ''.join(self.iter)
1487 return ''.join(self.iter)
1488
1488
1489 left = l
1489 left = l
1490 buf = []
1490 buf = []
1491 queue = self._queue
1491 queue = self._queue
1492 while left > 0:
1492 while left > 0:
1493 # refill the queue
1493 # refill the queue
1494 if not queue:
1494 if not queue:
1495 target = 2**18
1495 target = 2**18
1496 for chunk in self.iter:
1496 for chunk in self.iter:
1497 queue.append(chunk)
1497 queue.append(chunk)
1498 target -= len(chunk)
1498 target -= len(chunk)
1499 if target <= 0:
1499 if target <= 0:
1500 break
1500 break
1501 if not queue:
1501 if not queue:
1502 break
1502 break
1503
1503
1504 # The easy way to do this would be to queue.popleft(), modify the
1504 # The easy way to do this would be to queue.popleft(), modify the
1505 # chunk (if necessary), then queue.appendleft(). However, for cases
1505 # chunk (if necessary), then queue.appendleft(). However, for cases
1506 # where we read partial chunk content, this incurs 2 dequeue
1506 # where we read partial chunk content, this incurs 2 dequeue
1507 # mutations and creates a new str for the remaining chunk in the
1507 # mutations and creates a new str for the remaining chunk in the
1508 # queue. Our code below avoids this overhead.
1508 # queue. Our code below avoids this overhead.
1509
1509
1510 chunk = queue[0]
1510 chunk = queue[0]
1511 chunkl = len(chunk)
1511 chunkl = len(chunk)
1512 offset = self._chunkoffset
1512 offset = self._chunkoffset
1513
1513
1514 # Use full chunk.
1514 # Use full chunk.
1515 if offset == 0 and left >= chunkl:
1515 if offset == 0 and left >= chunkl:
1516 left -= chunkl
1516 left -= chunkl
1517 queue.popleft()
1517 queue.popleft()
1518 buf.append(chunk)
1518 buf.append(chunk)
1519 # self._chunkoffset remains at 0.
1519 # self._chunkoffset remains at 0.
1520 continue
1520 continue
1521
1521
1522 chunkremaining = chunkl - offset
1522 chunkremaining = chunkl - offset
1523
1523
1524 # Use all of unconsumed part of chunk.
1524 # Use all of unconsumed part of chunk.
1525 if left >= chunkremaining:
1525 if left >= chunkremaining:
1526 left -= chunkremaining
1526 left -= chunkremaining
1527 queue.popleft()
1527 queue.popleft()
1528 # offset == 0 is enabled by block above, so this won't merely
1528 # offset == 0 is enabled by block above, so this won't merely
1529 # copy via ``chunk[0:]``.
1529 # copy via ``chunk[0:]``.
1530 buf.append(chunk[offset:])
1530 buf.append(chunk[offset:])
1531 self._chunkoffset = 0
1531 self._chunkoffset = 0
1532
1532
1533 # Partial chunk needed.
1533 # Partial chunk needed.
1534 else:
1534 else:
1535 buf.append(chunk[offset:offset + left])
1535 buf.append(chunk[offset:offset + left])
1536 self._chunkoffset += left
1536 self._chunkoffset += left
1537 left -= chunkremaining
1537 left -= chunkremaining
1538
1538
1539 return ''.join(buf)
1539 return ''.join(buf)
1540
1540
1541 def filechunkiter(f, size=65536, limit=None):
1541 def filechunkiter(f, size=65536, limit=None):
1542 """Create a generator that produces the data in the file size
1542 """Create a generator that produces the data in the file size
1543 (default 65536) bytes at a time, up to optional limit (default is
1543 (default 65536) bytes at a time, up to optional limit (default is
1544 to read all data). Chunks may be less than size bytes if the
1544 to read all data). Chunks may be less than size bytes if the
1545 chunk is the last chunk in the file, or the file is a socket or
1545 chunk is the last chunk in the file, or the file is a socket or
1546 some other type of file that sometimes reads less data than is
1546 some other type of file that sometimes reads less data than is
1547 requested."""
1547 requested."""
1548 assert size >= 0
1548 assert size >= 0
1549 assert limit is None or limit >= 0
1549 assert limit is None or limit >= 0
1550 while True:
1550 while True:
1551 if limit is None:
1551 if limit is None:
1552 nbytes = size
1552 nbytes = size
1553 else:
1553 else:
1554 nbytes = min(limit, size)
1554 nbytes = min(limit, size)
1555 s = nbytes and f.read(nbytes)
1555 s = nbytes and f.read(nbytes)
1556 if not s:
1556 if not s:
1557 break
1557 break
1558 if limit:
1558 if limit:
1559 limit -= len(s)
1559 limit -= len(s)
1560 yield s
1560 yield s
1561
1561
1562 def makedate(timestamp=None):
1562 def makedate(timestamp=None):
1563 '''Return a unix timestamp (or the current time) as a (unixtime,
1563 '''Return a unix timestamp (or the current time) as a (unixtime,
1564 offset) tuple based off the local timezone.'''
1564 offset) tuple based off the local timezone.'''
1565 if timestamp is None:
1565 if timestamp is None:
1566 timestamp = time.time()
1566 timestamp = time.time()
1567 if timestamp < 0:
1567 if timestamp < 0:
1568 hint = _("check your clock")
1568 hint = _("check your clock")
1569 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1569 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1570 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1570 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1571 datetime.datetime.fromtimestamp(timestamp))
1571 datetime.datetime.fromtimestamp(timestamp))
1572 tz = delta.days * 86400 + delta.seconds
1572 tz = delta.days * 86400 + delta.seconds
1573 return timestamp, tz
1573 return timestamp, tz
1574
1574
1575 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1575 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1576 """represent a (unixtime, offset) tuple as a localized time.
1576 """represent a (unixtime, offset) tuple as a localized time.
1577 unixtime is seconds since the epoch, and offset is the time zone's
1577 unixtime is seconds since the epoch, and offset is the time zone's
1578 number of seconds away from UTC. if timezone is false, do not
1578 number of seconds away from UTC. if timezone is false, do not
1579 append time zone to string."""
1579 append time zone to string."""
1580 t, tz = date or makedate()
1580 t, tz = date or makedate()
1581 if t < 0:
1581 if t < 0:
1582 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1582 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1583 tz = 0
1583 tz = 0
1584 if "%1" in format or "%2" in format or "%z" in format:
1584 if "%1" in format or "%2" in format or "%z" in format:
1585 sign = (tz > 0) and "-" or "+"
1585 sign = (tz > 0) and "-" or "+"
1586 minutes = abs(tz) // 60
1586 minutes = abs(tz) // 60
1587 q, r = divmod(minutes, 60)
1587 q, r = divmod(minutes, 60)
1588 format = format.replace("%z", "%1%2")
1588 format = format.replace("%z", "%1%2")
1589 format = format.replace("%1", "%c%02d" % (sign, q))
1589 format = format.replace("%1", "%c%02d" % (sign, q))
1590 format = format.replace("%2", "%02d" % r)
1590 format = format.replace("%2", "%02d" % r)
1591 try:
1591 try:
1592 t = time.gmtime(float(t) - tz)
1592 t = time.gmtime(float(t) - tz)
1593 except ValueError:
1593 except ValueError:
1594 # time was out of range
1594 # time was out of range
1595 t = time.gmtime(sys.maxint)
1595 t = time.gmtime(sys.maxint)
1596 s = time.strftime(format, t)
1596 s = time.strftime(format, t)
1597 return s
1597 return s
1598
1598
1599 def shortdate(date=None):
1599 def shortdate(date=None):
1600 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1600 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1601 return datestr(date, format='%Y-%m-%d')
1601 return datestr(date, format='%Y-%m-%d')
1602
1602
1603 def parsetimezone(tz):
1603 def parsetimezone(tz):
1604 """parse a timezone string and return an offset integer"""
1604 """parse a timezone string and return an offset integer"""
1605 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1605 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1606 sign = (tz[0] == "+") and 1 or -1
1606 sign = (tz[0] == "+") and 1 or -1
1607 hours = int(tz[1:3])
1607 hours = int(tz[1:3])
1608 minutes = int(tz[3:5])
1608 minutes = int(tz[3:5])
1609 return -sign * (hours * 60 + minutes) * 60
1609 return -sign * (hours * 60 + minutes) * 60
1610 if tz == "GMT" or tz == "UTC":
1610 if tz == "GMT" or tz == "UTC":
1611 return 0
1611 return 0
1612 return None
1612 return None
1613
1613
1614 def strdate(string, format, defaults=[]):
1614 def strdate(string, format, defaults=[]):
1615 """parse a localized time string and return a (unixtime, offset) tuple.
1615 """parse a localized time string and return a (unixtime, offset) tuple.
1616 if the string cannot be parsed, ValueError is raised."""
1616 if the string cannot be parsed, ValueError is raised."""
1617 # NOTE: unixtime = localunixtime + offset
1617 # NOTE: unixtime = localunixtime + offset
1618 offset, date = parsetimezone(string.split()[-1]), string
1618 offset, date = parsetimezone(string.split()[-1]), string
1619 if offset is not None:
1619 if offset is not None:
1620 date = " ".join(string.split()[:-1])
1620 date = " ".join(string.split()[:-1])
1621
1621
1622 # add missing elements from defaults
1622 # add missing elements from defaults
1623 usenow = False # default to using biased defaults
1623 usenow = False # default to using biased defaults
1624 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1624 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1625 found = [True for p in part if ("%"+p) in format]
1625 found = [True for p in part if ("%"+p) in format]
1626 if not found:
1626 if not found:
1627 date += "@" + defaults[part][usenow]
1627 date += "@" + defaults[part][usenow]
1628 format += "@%" + part[0]
1628 format += "@%" + part[0]
1629 else:
1629 else:
1630 # We've found a specific time element, less specific time
1630 # We've found a specific time element, less specific time
1631 # elements are relative to today
1631 # elements are relative to today
1632 usenow = True
1632 usenow = True
1633
1633
1634 timetuple = time.strptime(date, format)
1634 timetuple = time.strptime(date, format)
1635 localunixtime = int(calendar.timegm(timetuple))
1635 localunixtime = int(calendar.timegm(timetuple))
1636 if offset is None:
1636 if offset is None:
1637 # local timezone
1637 # local timezone
1638 unixtime = int(time.mktime(timetuple))
1638 unixtime = int(time.mktime(timetuple))
1639 offset = unixtime - localunixtime
1639 offset = unixtime - localunixtime
1640 else:
1640 else:
1641 unixtime = localunixtime + offset
1641 unixtime = localunixtime + offset
1642 return unixtime, offset
1642 return unixtime, offset
1643
1643
1644 def parsedate(date, formats=None, bias=None):
1644 def parsedate(date, formats=None, bias=None):
1645 """parse a localized date/time and return a (unixtime, offset) tuple.
1645 """parse a localized date/time and return a (unixtime, offset) tuple.
1646
1646
1647 The date may be a "unixtime offset" string or in one of the specified
1647 The date may be a "unixtime offset" string or in one of the specified
1648 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1648 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1649
1649
1650 >>> parsedate(' today ') == parsedate(\
1650 >>> parsedate(' today ') == parsedate(\
1651 datetime.date.today().strftime('%b %d'))
1651 datetime.date.today().strftime('%b %d'))
1652 True
1652 True
1653 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1653 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1654 datetime.timedelta(days=1)\
1654 datetime.timedelta(days=1)\
1655 ).strftime('%b %d'))
1655 ).strftime('%b %d'))
1656 True
1656 True
1657 >>> now, tz = makedate()
1657 >>> now, tz = makedate()
1658 >>> strnow, strtz = parsedate('now')
1658 >>> strnow, strtz = parsedate('now')
1659 >>> (strnow - now) < 1
1659 >>> (strnow - now) < 1
1660 True
1660 True
1661 >>> tz == strtz
1661 >>> tz == strtz
1662 True
1662 True
1663 """
1663 """
1664 if bias is None:
1664 if bias is None:
1665 bias = {}
1665 bias = {}
1666 if not date:
1666 if not date:
1667 return 0, 0
1667 return 0, 0
1668 if isinstance(date, tuple) and len(date) == 2:
1668 if isinstance(date, tuple) and len(date) == 2:
1669 return date
1669 return date
1670 if not formats:
1670 if not formats:
1671 formats = defaultdateformats
1671 formats = defaultdateformats
1672 date = date.strip()
1672 date = date.strip()
1673
1673
1674 if date == 'now' or date == _('now'):
1674 if date == 'now' or date == _('now'):
1675 return makedate()
1675 return makedate()
1676 if date == 'today' or date == _('today'):
1676 if date == 'today' or date == _('today'):
1677 date = datetime.date.today().strftime('%b %d')
1677 date = datetime.date.today().strftime('%b %d')
1678 elif date == 'yesterday' or date == _('yesterday'):
1678 elif date == 'yesterday' or date == _('yesterday'):
1679 date = (datetime.date.today() -
1679 date = (datetime.date.today() -
1680 datetime.timedelta(days=1)).strftime('%b %d')
1680 datetime.timedelta(days=1)).strftime('%b %d')
1681
1681
1682 try:
1682 try:
1683 when, offset = map(int, date.split(' '))
1683 when, offset = map(int, date.split(' '))
1684 except ValueError:
1684 except ValueError:
1685 # fill out defaults
1685 # fill out defaults
1686 now = makedate()
1686 now = makedate()
1687 defaults = {}
1687 defaults = {}
1688 for part in ("d", "mb", "yY", "HI", "M", "S"):
1688 for part in ("d", "mb", "yY", "HI", "M", "S"):
1689 # this piece is for rounding the specific end of unknowns
1689 # this piece is for rounding the specific end of unknowns
1690 b = bias.get(part)
1690 b = bias.get(part)
1691 if b is None:
1691 if b is None:
1692 if part[0] in "HMS":
1692 if part[0] in "HMS":
1693 b = "00"
1693 b = "00"
1694 else:
1694 else:
1695 b = "0"
1695 b = "0"
1696
1696
1697 # this piece is for matching the generic end to today's date
1697 # this piece is for matching the generic end to today's date
1698 n = datestr(now, "%" + part[0])
1698 n = datestr(now, "%" + part[0])
1699
1699
1700 defaults[part] = (b, n)
1700 defaults[part] = (b, n)
1701
1701
1702 for format in formats:
1702 for format in formats:
1703 try:
1703 try:
1704 when, offset = strdate(date, format, defaults)
1704 when, offset = strdate(date, format, defaults)
1705 except (ValueError, OverflowError):
1705 except (ValueError, OverflowError):
1706 pass
1706 pass
1707 else:
1707 else:
1708 break
1708 break
1709 else:
1709 else:
1710 raise Abort(_('invalid date: %r') % date)
1710 raise Abort(_('invalid date: %r') % date)
1711 # validate explicit (probably user-specified) date and
1711 # validate explicit (probably user-specified) date and
1712 # time zone offset. values must fit in signed 32 bits for
1712 # time zone offset. values must fit in signed 32 bits for
1713 # current 32-bit linux runtimes. timezones go from UTC-12
1713 # current 32-bit linux runtimes. timezones go from UTC-12
1714 # to UTC+14
1714 # to UTC+14
1715 if abs(when) > 0x7fffffff:
1715 if abs(when) > 0x7fffffff:
1716 raise Abort(_('date exceeds 32 bits: %d') % when)
1716 raise Abort(_('date exceeds 32 bits: %d') % when)
1717 if when < 0:
1717 if when < 0:
1718 raise Abort(_('negative date value: %d') % when)
1718 raise Abort(_('negative date value: %d') % when)
1719 if offset < -50400 or offset > 43200:
1719 if offset < -50400 or offset > 43200:
1720 raise Abort(_('impossible time zone offset: %d') % offset)
1720 raise Abort(_('impossible time zone offset: %d') % offset)
1721 return when, offset
1721 return when, offset
1722
1722
1723 def matchdate(date):
1723 def matchdate(date):
1724 """Return a function that matches a given date match specifier
1724 """Return a function that matches a given date match specifier
1725
1725
1726 Formats include:
1726 Formats include:
1727
1727
1728 '{date}' match a given date to the accuracy provided
1728 '{date}' match a given date to the accuracy provided
1729
1729
1730 '<{date}' on or before a given date
1730 '<{date}' on or before a given date
1731
1731
1732 '>{date}' on or after a given date
1732 '>{date}' on or after a given date
1733
1733
1734 >>> p1 = parsedate("10:29:59")
1734 >>> p1 = parsedate("10:29:59")
1735 >>> p2 = parsedate("10:30:00")
1735 >>> p2 = parsedate("10:30:00")
1736 >>> p3 = parsedate("10:30:59")
1736 >>> p3 = parsedate("10:30:59")
1737 >>> p4 = parsedate("10:31:00")
1737 >>> p4 = parsedate("10:31:00")
1738 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1738 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1739 >>> f = matchdate("10:30")
1739 >>> f = matchdate("10:30")
1740 >>> f(p1[0])
1740 >>> f(p1[0])
1741 False
1741 False
1742 >>> f(p2[0])
1742 >>> f(p2[0])
1743 True
1743 True
1744 >>> f(p3[0])
1744 >>> f(p3[0])
1745 True
1745 True
1746 >>> f(p4[0])
1746 >>> f(p4[0])
1747 False
1747 False
1748 >>> f(p5[0])
1748 >>> f(p5[0])
1749 False
1749 False
1750 """
1750 """
1751
1751
1752 def lower(date):
1752 def lower(date):
1753 d = {'mb': "1", 'd': "1"}
1753 d = {'mb': "1", 'd': "1"}
1754 return parsedate(date, extendeddateformats, d)[0]
1754 return parsedate(date, extendeddateformats, d)[0]
1755
1755
1756 def upper(date):
1756 def upper(date):
1757 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1757 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1758 for days in ("31", "30", "29"):
1758 for days in ("31", "30", "29"):
1759 try:
1759 try:
1760 d["d"] = days
1760 d["d"] = days
1761 return parsedate(date, extendeddateformats, d)[0]
1761 return parsedate(date, extendeddateformats, d)[0]
1762 except Abort:
1762 except Abort:
1763 pass
1763 pass
1764 d["d"] = "28"
1764 d["d"] = "28"
1765 return parsedate(date, extendeddateformats, d)[0]
1765 return parsedate(date, extendeddateformats, d)[0]
1766
1766
1767 date = date.strip()
1767 date = date.strip()
1768
1768
1769 if not date:
1769 if not date:
1770 raise Abort(_("dates cannot consist entirely of whitespace"))
1770 raise Abort(_("dates cannot consist entirely of whitespace"))
1771 elif date[0] == "<":
1771 elif date[0] == "<":
1772 if not date[1:]:
1772 if not date[1:]:
1773 raise Abort(_("invalid day spec, use '<DATE'"))
1773 raise Abort(_("invalid day spec, use '<DATE'"))
1774 when = upper(date[1:])
1774 when = upper(date[1:])
1775 return lambda x: x <= when
1775 return lambda x: x <= when
1776 elif date[0] == ">":
1776 elif date[0] == ">":
1777 if not date[1:]:
1777 if not date[1:]:
1778 raise Abort(_("invalid day spec, use '>DATE'"))
1778 raise Abort(_("invalid day spec, use '>DATE'"))
1779 when = lower(date[1:])
1779 when = lower(date[1:])
1780 return lambda x: x >= when
1780 return lambda x: x >= when
1781 elif date[0] == "-":
1781 elif date[0] == "-":
1782 try:
1782 try:
1783 days = int(date[1:])
1783 days = int(date[1:])
1784 except ValueError:
1784 except ValueError:
1785 raise Abort(_("invalid day spec: %s") % date[1:])
1785 raise Abort(_("invalid day spec: %s") % date[1:])
1786 if days < 0:
1786 if days < 0:
1787 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1787 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1788 % date[1:])
1788 % date[1:])
1789 when = makedate()[0] - days * 3600 * 24
1789 when = makedate()[0] - days * 3600 * 24
1790 return lambda x: x >= when
1790 return lambda x: x >= when
1791 elif " to " in date:
1791 elif " to " in date:
1792 a, b = date.split(" to ")
1792 a, b = date.split(" to ")
1793 start, stop = lower(a), upper(b)
1793 start, stop = lower(a), upper(b)
1794 return lambda x: x >= start and x <= stop
1794 return lambda x: x >= start and x <= stop
1795 else:
1795 else:
1796 start, stop = lower(date), upper(date)
1796 start, stop = lower(date), upper(date)
1797 return lambda x: x >= start and x <= stop
1797 return lambda x: x >= start and x <= stop
1798
1798
1799 def stringmatcher(pattern):
1799 def stringmatcher(pattern):
1800 """
1800 """
1801 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1801 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1802 returns the matcher name, pattern, and matcher function.
1802 returns the matcher name, pattern, and matcher function.
1803 missing or unknown prefixes are treated as literal matches.
1803 missing or unknown prefixes are treated as literal matches.
1804
1804
1805 helper for tests:
1805 helper for tests:
1806 >>> def test(pattern, *tests):
1806 >>> def test(pattern, *tests):
1807 ... kind, pattern, matcher = stringmatcher(pattern)
1807 ... kind, pattern, matcher = stringmatcher(pattern)
1808 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1808 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1809
1809
1810 exact matching (no prefix):
1810 exact matching (no prefix):
1811 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1811 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1812 ('literal', 'abcdefg', [False, False, True])
1812 ('literal', 'abcdefg', [False, False, True])
1813
1813
1814 regex matching ('re:' prefix)
1814 regex matching ('re:' prefix)
1815 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1815 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1816 ('re', 'a.+b', [False, False, True])
1816 ('re', 'a.+b', [False, False, True])
1817
1817
1818 force exact matches ('literal:' prefix)
1818 force exact matches ('literal:' prefix)
1819 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1819 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1820 ('literal', 're:foobar', [False, True])
1820 ('literal', 're:foobar', [False, True])
1821
1821
1822 unknown prefixes are ignored and treated as literals
1822 unknown prefixes are ignored and treated as literals
1823 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1823 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1824 ('literal', 'foo:bar', [False, False, True])
1824 ('literal', 'foo:bar', [False, False, True])
1825 """
1825 """
1826 if pattern.startswith('re:'):
1826 if pattern.startswith('re:'):
1827 pattern = pattern[3:]
1827 pattern = pattern[3:]
1828 try:
1828 try:
1829 regex = remod.compile(pattern)
1829 regex = remod.compile(pattern)
1830 except remod.error as e:
1830 except remod.error as e:
1831 raise error.ParseError(_('invalid regular expression: %s')
1831 raise error.ParseError(_('invalid regular expression: %s')
1832 % e)
1832 % e)
1833 return 're', pattern, regex.search
1833 return 're', pattern, regex.search
1834 elif pattern.startswith('literal:'):
1834 elif pattern.startswith('literal:'):
1835 pattern = pattern[8:]
1835 pattern = pattern[8:]
1836 return 'literal', pattern, pattern.__eq__
1836 return 'literal', pattern, pattern.__eq__
1837
1837
1838 def shortuser(user):
1838 def shortuser(user):
1839 """Return a short representation of a user name or email address."""
1839 """Return a short representation of a user name or email address."""
1840 f = user.find('@')
1840 f = user.find('@')
1841 if f >= 0:
1841 if f >= 0:
1842 user = user[:f]
1842 user = user[:f]
1843 f = user.find('<')
1843 f = user.find('<')
1844 if f >= 0:
1844 if f >= 0:
1845 user = user[f + 1:]
1845 user = user[f + 1:]
1846 f = user.find(' ')
1846 f = user.find(' ')
1847 if f >= 0:
1847 if f >= 0:
1848 user = user[:f]
1848 user = user[:f]
1849 f = user.find('.')
1849 f = user.find('.')
1850 if f >= 0:
1850 if f >= 0:
1851 user = user[:f]
1851 user = user[:f]
1852 return user
1852 return user
1853
1853
1854 def emailuser(user):
1854 def emailuser(user):
1855 """Return the user portion of an email address."""
1855 """Return the user portion of an email address."""
1856 f = user.find('@')
1856 f = user.find('@')
1857 if f >= 0:
1857 if f >= 0:
1858 user = user[:f]
1858 user = user[:f]
1859 f = user.find('<')
1859 f = user.find('<')
1860 if f >= 0:
1860 if f >= 0:
1861 user = user[f + 1:]
1861 user = user[f + 1:]
1862 return user
1862 return user
1863
1863
1864 def email(author):
1864 def email(author):
1865 '''get email of author.'''
1865 '''get email of author.'''
1866 r = author.find('>')
1866 r = author.find('>')
1867 if r == -1:
1867 if r == -1:
1868 r = None
1868 r = None
1869 return author[author.find('<') + 1:r]
1869 return author[author.find('<') + 1:r]
1870
1870
1871 def ellipsis(text, maxlength=400):
1871 def ellipsis(text, maxlength=400):
1872 """Trim string to at most maxlength (default: 400) columns in display."""
1872 """Trim string to at most maxlength (default: 400) columns in display."""
1873 return encoding.trim(text, maxlength, ellipsis='...')
1873 return encoding.trim(text, maxlength, ellipsis='...')
1874
1874
1875 def unitcountfn(*unittable):
1875 def unitcountfn(*unittable):
1876 '''return a function that renders a readable count of some quantity'''
1876 '''return a function that renders a readable count of some quantity'''
1877
1877
1878 def go(count):
1878 def go(count):
1879 for multiplier, divisor, format in unittable:
1879 for multiplier, divisor, format in unittable:
1880 if count >= divisor * multiplier:
1880 if count >= divisor * multiplier:
1881 return format % (count / float(divisor))
1881 return format % (count / float(divisor))
1882 return unittable[-1][2] % count
1882 return unittable[-1][2] % count
1883
1883
1884 return go
1884 return go
1885
1885
1886 bytecount = unitcountfn(
1886 bytecount = unitcountfn(
1887 (100, 1 << 30, _('%.0f GB')),
1887 (100, 1 << 30, _('%.0f GB')),
1888 (10, 1 << 30, _('%.1f GB')),
1888 (10, 1 << 30, _('%.1f GB')),
1889 (1, 1 << 30, _('%.2f GB')),
1889 (1, 1 << 30, _('%.2f GB')),
1890 (100, 1 << 20, _('%.0f MB')),
1890 (100, 1 << 20, _('%.0f MB')),
1891 (10, 1 << 20, _('%.1f MB')),
1891 (10, 1 << 20, _('%.1f MB')),
1892 (1, 1 << 20, _('%.2f MB')),
1892 (1, 1 << 20, _('%.2f MB')),
1893 (100, 1 << 10, _('%.0f KB')),
1893 (100, 1 << 10, _('%.0f KB')),
1894 (10, 1 << 10, _('%.1f KB')),
1894 (10, 1 << 10, _('%.1f KB')),
1895 (1, 1 << 10, _('%.2f KB')),
1895 (1, 1 << 10, _('%.2f KB')),
1896 (1, 1, _('%.0f bytes')),
1896 (1, 1, _('%.0f bytes')),
1897 )
1897 )
1898
1898
1899 def uirepr(s):
1899 def uirepr(s):
1900 # Avoid double backslash in Windows path repr()
1900 # Avoid double backslash in Windows path repr()
1901 return repr(s).replace('\\\\', '\\')
1901 return repr(s).replace('\\\\', '\\')
1902
1902
1903 # delay import of textwrap
1903 # delay import of textwrap
1904 def MBTextWrapper(**kwargs):
1904 def MBTextWrapper(**kwargs):
1905 class tw(textwrap.TextWrapper):
1905 class tw(textwrap.TextWrapper):
1906 """
1906 """
1907 Extend TextWrapper for width-awareness.
1907 Extend TextWrapper for width-awareness.
1908
1908
1909 Neither number of 'bytes' in any encoding nor 'characters' is
1909 Neither number of 'bytes' in any encoding nor 'characters' is
1910 appropriate to calculate terminal columns for specified string.
1910 appropriate to calculate terminal columns for specified string.
1911
1911
1912 Original TextWrapper implementation uses built-in 'len()' directly,
1912 Original TextWrapper implementation uses built-in 'len()' directly,
1913 so overriding is needed to use width information of each characters.
1913 so overriding is needed to use width information of each characters.
1914
1914
1915 In addition, characters classified into 'ambiguous' width are
1915 In addition, characters classified into 'ambiguous' width are
1916 treated as wide in East Asian area, but as narrow in other.
1916 treated as wide in East Asian area, but as narrow in other.
1917
1917
1918 This requires use decision to determine width of such characters.
1918 This requires use decision to determine width of such characters.
1919 """
1919 """
1920 def _cutdown(self, ucstr, space_left):
1920 def _cutdown(self, ucstr, space_left):
1921 l = 0
1921 l = 0
1922 colwidth = encoding.ucolwidth
1922 colwidth = encoding.ucolwidth
1923 for i in xrange(len(ucstr)):
1923 for i in xrange(len(ucstr)):
1924 l += colwidth(ucstr[i])
1924 l += colwidth(ucstr[i])
1925 if space_left < l:
1925 if space_left < l:
1926 return (ucstr[:i], ucstr[i:])
1926 return (ucstr[:i], ucstr[i:])
1927 return ucstr, ''
1927 return ucstr, ''
1928
1928
1929 # overriding of base class
1929 # overriding of base class
1930 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1930 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1931 space_left = max(width - cur_len, 1)
1931 space_left = max(width - cur_len, 1)
1932
1932
1933 if self.break_long_words:
1933 if self.break_long_words:
1934 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1934 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1935 cur_line.append(cut)
1935 cur_line.append(cut)
1936 reversed_chunks[-1] = res
1936 reversed_chunks[-1] = res
1937 elif not cur_line:
1937 elif not cur_line:
1938 cur_line.append(reversed_chunks.pop())
1938 cur_line.append(reversed_chunks.pop())
1939
1939
1940 # this overriding code is imported from TextWrapper of Python 2.6
1940 # this overriding code is imported from TextWrapper of Python 2.6
1941 # to calculate columns of string by 'encoding.ucolwidth()'
1941 # to calculate columns of string by 'encoding.ucolwidth()'
1942 def _wrap_chunks(self, chunks):
1942 def _wrap_chunks(self, chunks):
1943 colwidth = encoding.ucolwidth
1943 colwidth = encoding.ucolwidth
1944
1944
1945 lines = []
1945 lines = []
1946 if self.width <= 0:
1946 if self.width <= 0:
1947 raise ValueError("invalid width %r (must be > 0)" % self.width)
1947 raise ValueError("invalid width %r (must be > 0)" % self.width)
1948
1948
1949 # Arrange in reverse order so items can be efficiently popped
1949 # Arrange in reverse order so items can be efficiently popped
1950 # from a stack of chucks.
1950 # from a stack of chucks.
1951 chunks.reverse()
1951 chunks.reverse()
1952
1952
1953 while chunks:
1953 while chunks:
1954
1954
1955 # Start the list of chunks that will make up the current line.
1955 # Start the list of chunks that will make up the current line.
1956 # cur_len is just the length of all the chunks in cur_line.
1956 # cur_len is just the length of all the chunks in cur_line.
1957 cur_line = []
1957 cur_line = []
1958 cur_len = 0
1958 cur_len = 0
1959
1959
1960 # Figure out which static string will prefix this line.
1960 # Figure out which static string will prefix this line.
1961 if lines:
1961 if lines:
1962 indent = self.subsequent_indent
1962 indent = self.subsequent_indent
1963 else:
1963 else:
1964 indent = self.initial_indent
1964 indent = self.initial_indent
1965
1965
1966 # Maximum width for this line.
1966 # Maximum width for this line.
1967 width = self.width - len(indent)
1967 width = self.width - len(indent)
1968
1968
1969 # First chunk on line is whitespace -- drop it, unless this
1969 # First chunk on line is whitespace -- drop it, unless this
1970 # is the very beginning of the text (i.e. no lines started yet).
1970 # is the very beginning of the text (i.e. no lines started yet).
1971 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1971 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1972 del chunks[-1]
1972 del chunks[-1]
1973
1973
1974 while chunks:
1974 while chunks:
1975 l = colwidth(chunks[-1])
1975 l = colwidth(chunks[-1])
1976
1976
1977 # Can at least squeeze this chunk onto the current line.
1977 # Can at least squeeze this chunk onto the current line.
1978 if cur_len + l <= width:
1978 if cur_len + l <= width:
1979 cur_line.append(chunks.pop())
1979 cur_line.append(chunks.pop())
1980 cur_len += l
1980 cur_len += l
1981
1981
1982 # Nope, this line is full.
1982 # Nope, this line is full.
1983 else:
1983 else:
1984 break
1984 break
1985
1985
1986 # The current line is full, and the next chunk is too big to
1986 # The current line is full, and the next chunk is too big to
1987 # fit on *any* line (not just this one).
1987 # fit on *any* line (not just this one).
1988 if chunks and colwidth(chunks[-1]) > width:
1988 if chunks and colwidth(chunks[-1]) > width:
1989 self._handle_long_word(chunks, cur_line, cur_len, width)
1989 self._handle_long_word(chunks, cur_line, cur_len, width)
1990
1990
1991 # If the last chunk on this line is all whitespace, drop it.
1991 # If the last chunk on this line is all whitespace, drop it.
1992 if (self.drop_whitespace and
1992 if (self.drop_whitespace and
1993 cur_line and cur_line[-1].strip() == ''):
1993 cur_line and cur_line[-1].strip() == ''):
1994 del cur_line[-1]
1994 del cur_line[-1]
1995
1995
1996 # Convert current line back to a string and store it in list
1996 # Convert current line back to a string and store it in list
1997 # of all lines (return value).
1997 # of all lines (return value).
1998 if cur_line:
1998 if cur_line:
1999 lines.append(indent + ''.join(cur_line))
1999 lines.append(indent + ''.join(cur_line))
2000
2000
2001 return lines
2001 return lines
2002
2002
2003 global MBTextWrapper
2003 global MBTextWrapper
2004 MBTextWrapper = tw
2004 MBTextWrapper = tw
2005 return tw(**kwargs)
2005 return tw(**kwargs)
2006
2006
2007 def wrap(line, width, initindent='', hangindent=''):
2007 def wrap(line, width, initindent='', hangindent=''):
2008 maxindent = max(len(hangindent), len(initindent))
2008 maxindent = max(len(hangindent), len(initindent))
2009 if width <= maxindent:
2009 if width <= maxindent:
2010 # adjust for weird terminal size
2010 # adjust for weird terminal size
2011 width = max(78, maxindent + 1)
2011 width = max(78, maxindent + 1)
2012 line = line.decode(encoding.encoding, encoding.encodingmode)
2012 line = line.decode(encoding.encoding, encoding.encodingmode)
2013 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2013 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2014 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2014 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2015 wrapper = MBTextWrapper(width=width,
2015 wrapper = MBTextWrapper(width=width,
2016 initial_indent=initindent,
2016 initial_indent=initindent,
2017 subsequent_indent=hangindent)
2017 subsequent_indent=hangindent)
2018 return wrapper.fill(line).encode(encoding.encoding)
2018 return wrapper.fill(line).encode(encoding.encoding)
2019
2019
2020 def iterlines(iterator):
2020 def iterlines(iterator):
2021 for chunk in iterator:
2021 for chunk in iterator:
2022 for line in chunk.splitlines():
2022 for line in chunk.splitlines():
2023 yield line
2023 yield line
2024
2024
2025 def expandpath(path):
2025 def expandpath(path):
2026 return os.path.expanduser(os.path.expandvars(path))
2026 return os.path.expanduser(os.path.expandvars(path))
2027
2027
2028 def hgcmd():
2028 def hgcmd():
2029 """Return the command used to execute current hg
2029 """Return the command used to execute current hg
2030
2030
2031 This is different from hgexecutable() because on Windows we want
2031 This is different from hgexecutable() because on Windows we want
2032 to avoid things opening new shell windows like batch files, so we
2032 to avoid things opening new shell windows like batch files, so we
2033 get either the python call or current executable.
2033 get either the python call or current executable.
2034 """
2034 """
2035 if mainfrozen():
2035 if mainfrozen():
2036 if getattr(sys, 'frozen', None) == 'macosx_app':
2036 if getattr(sys, 'frozen', None) == 'macosx_app':
2037 # Env variable set by py2app
2037 # Env variable set by py2app
2038 return [os.environ['EXECUTABLEPATH']]
2038 return [os.environ['EXECUTABLEPATH']]
2039 else:
2039 else:
2040 return [sys.executable]
2040 return [sys.executable]
2041 return gethgcmd()
2041 return gethgcmd()
2042
2042
2043 def rundetached(args, condfn):
2043 def rundetached(args, condfn):
2044 """Execute the argument list in a detached process.
2044 """Execute the argument list in a detached process.
2045
2045
2046 condfn is a callable which is called repeatedly and should return
2046 condfn is a callable which is called repeatedly and should return
2047 True once the child process is known to have started successfully.
2047 True once the child process is known to have started successfully.
2048 At this point, the child process PID is returned. If the child
2048 At this point, the child process PID is returned. If the child
2049 process fails to start or finishes before condfn() evaluates to
2049 process fails to start or finishes before condfn() evaluates to
2050 True, return -1.
2050 True, return -1.
2051 """
2051 """
2052 # Windows case is easier because the child process is either
2052 # Windows case is easier because the child process is either
2053 # successfully starting and validating the condition or exiting
2053 # successfully starting and validating the condition or exiting
2054 # on failure. We just poll on its PID. On Unix, if the child
2054 # on failure. We just poll on its PID. On Unix, if the child
2055 # process fails to start, it will be left in a zombie state until
2055 # process fails to start, it will be left in a zombie state until
2056 # the parent wait on it, which we cannot do since we expect a long
2056 # the parent wait on it, which we cannot do since we expect a long
2057 # running process on success. Instead we listen for SIGCHLD telling
2057 # running process on success. Instead we listen for SIGCHLD telling
2058 # us our child process terminated.
2058 # us our child process terminated.
2059 terminated = set()
2059 terminated = set()
2060 def handler(signum, frame):
2060 def handler(signum, frame):
2061 terminated.add(os.wait())
2061 terminated.add(os.wait())
2062 prevhandler = None
2062 prevhandler = None
2063 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2063 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2064 if SIGCHLD is not None:
2064 if SIGCHLD is not None:
2065 prevhandler = signal.signal(SIGCHLD, handler)
2065 prevhandler = signal.signal(SIGCHLD, handler)
2066 try:
2066 try:
2067 pid = spawndetached(args)
2067 pid = spawndetached(args)
2068 while not condfn():
2068 while not condfn():
2069 if ((pid in terminated or not testpid(pid))
2069 if ((pid in terminated or not testpid(pid))
2070 and not condfn()):
2070 and not condfn()):
2071 return -1
2071 return -1
2072 time.sleep(0.1)
2072 time.sleep(0.1)
2073 return pid
2073 return pid
2074 finally:
2074 finally:
2075 if prevhandler is not None:
2075 if prevhandler is not None:
2076 signal.signal(signal.SIGCHLD, prevhandler)
2076 signal.signal(signal.SIGCHLD, prevhandler)
2077
2077
2078 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2078 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2079 """Return the result of interpolating items in the mapping into string s.
2079 """Return the result of interpolating items in the mapping into string s.
2080
2080
2081 prefix is a single character string, or a two character string with
2081 prefix is a single character string, or a two character string with
2082 a backslash as the first character if the prefix needs to be escaped in
2082 a backslash as the first character if the prefix needs to be escaped in
2083 a regular expression.
2083 a regular expression.
2084
2084
2085 fn is an optional function that will be applied to the replacement text
2085 fn is an optional function that will be applied to the replacement text
2086 just before replacement.
2086 just before replacement.
2087
2087
2088 escape_prefix is an optional flag that allows using doubled prefix for
2088 escape_prefix is an optional flag that allows using doubled prefix for
2089 its escaping.
2089 its escaping.
2090 """
2090 """
2091 fn = fn or (lambda s: s)
2091 fn = fn or (lambda s: s)
2092 patterns = '|'.join(mapping.keys())
2092 patterns = '|'.join(mapping.keys())
2093 if escape_prefix:
2093 if escape_prefix:
2094 patterns += '|' + prefix
2094 patterns += '|' + prefix
2095 if len(prefix) > 1:
2095 if len(prefix) > 1:
2096 prefix_char = prefix[1:]
2096 prefix_char = prefix[1:]
2097 else:
2097 else:
2098 prefix_char = prefix
2098 prefix_char = prefix
2099 mapping[prefix_char] = prefix_char
2099 mapping[prefix_char] = prefix_char
2100 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2100 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2101 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2101 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2102
2102
2103 def getport(port):
2103 def getport(port):
2104 """Return the port for a given network service.
2104 """Return the port for a given network service.
2105
2105
2106 If port is an integer, it's returned as is. If it's a string, it's
2106 If port is an integer, it's returned as is. If it's a string, it's
2107 looked up using socket.getservbyname(). If there's no matching
2107 looked up using socket.getservbyname(). If there's no matching
2108 service, error.Abort is raised.
2108 service, error.Abort is raised.
2109 """
2109 """
2110 try:
2110 try:
2111 return int(port)
2111 return int(port)
2112 except ValueError:
2112 except ValueError:
2113 pass
2113 pass
2114
2114
2115 try:
2115 try:
2116 return socket.getservbyname(port)
2116 return socket.getservbyname(port)
2117 except socket.error:
2117 except socket.error:
2118 raise Abort(_("no port number associated with service '%s'") % port)
2118 raise Abort(_("no port number associated with service '%s'") % port)
2119
2119
2120 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2120 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2121 '0': False, 'no': False, 'false': False, 'off': False,
2121 '0': False, 'no': False, 'false': False, 'off': False,
2122 'never': False}
2122 'never': False}
2123
2123
2124 def parsebool(s):
2124 def parsebool(s):
2125 """Parse s into a boolean.
2125 """Parse s into a boolean.
2126
2126
2127 If s is not a valid boolean, returns None.
2127 If s is not a valid boolean, returns None.
2128 """
2128 """
2129 return _booleans.get(s.lower(), None)
2129 return _booleans.get(s.lower(), None)
2130
2130
2131 _hexdig = '0123456789ABCDEFabcdef'
2131 _hexdig = '0123456789ABCDEFabcdef'
2132 _hextochr = dict((a + b, chr(int(a + b, 16)))
2132 _hextochr = dict((a + b, chr(int(a + b, 16)))
2133 for a in _hexdig for b in _hexdig)
2133 for a in _hexdig for b in _hexdig)
2134
2134
2135 def _urlunquote(s):
2135 def _urlunquote(s):
2136 """Decode HTTP/HTML % encoding.
2136 """Decode HTTP/HTML % encoding.
2137
2137
2138 >>> _urlunquote('abc%20def')
2138 >>> _urlunquote('abc%20def')
2139 'abc def'
2139 'abc def'
2140 """
2140 """
2141 res = s.split('%')
2141 res = s.split('%')
2142 # fastpath
2142 # fastpath
2143 if len(res) == 1:
2143 if len(res) == 1:
2144 return s
2144 return s
2145 s = res[0]
2145 s = res[0]
2146 for item in res[1:]:
2146 for item in res[1:]:
2147 try:
2147 try:
2148 s += _hextochr[item[:2]] + item[2:]
2148 s += _hextochr[item[:2]] + item[2:]
2149 except KeyError:
2149 except KeyError:
2150 s += '%' + item
2150 s += '%' + item
2151 except UnicodeDecodeError:
2151 except UnicodeDecodeError:
2152 s += unichr(int(item[:2], 16)) + item[2:]
2152 s += unichr(int(item[:2], 16)) + item[2:]
2153 return s
2153 return s
2154
2154
2155 class url(object):
2155 class url(object):
2156 r"""Reliable URL parser.
2156 r"""Reliable URL parser.
2157
2157
2158 This parses URLs and provides attributes for the following
2158 This parses URLs and provides attributes for the following
2159 components:
2159 components:
2160
2160
2161 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2161 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2162
2162
2163 Missing components are set to None. The only exception is
2163 Missing components are set to None. The only exception is
2164 fragment, which is set to '' if present but empty.
2164 fragment, which is set to '' if present but empty.
2165
2165
2166 If parsefragment is False, fragment is included in query. If
2166 If parsefragment is False, fragment is included in query. If
2167 parsequery is False, query is included in path. If both are
2167 parsequery is False, query is included in path. If both are
2168 False, both fragment and query are included in path.
2168 False, both fragment and query are included in path.
2169
2169
2170 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2170 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2171
2171
2172 Note that for backward compatibility reasons, bundle URLs do not
2172 Note that for backward compatibility reasons, bundle URLs do not
2173 take host names. That means 'bundle://../' has a path of '../'.
2173 take host names. That means 'bundle://../' has a path of '../'.
2174
2174
2175 Examples:
2175 Examples:
2176
2176
2177 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2177 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2178 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2178 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2179 >>> url('ssh://[::1]:2200//home/joe/repo')
2179 >>> url('ssh://[::1]:2200//home/joe/repo')
2180 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2180 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2181 >>> url('file:///home/joe/repo')
2181 >>> url('file:///home/joe/repo')
2182 <url scheme: 'file', path: '/home/joe/repo'>
2182 <url scheme: 'file', path: '/home/joe/repo'>
2183 >>> url('file:///c:/temp/foo/')
2183 >>> url('file:///c:/temp/foo/')
2184 <url scheme: 'file', path: 'c:/temp/foo/'>
2184 <url scheme: 'file', path: 'c:/temp/foo/'>
2185 >>> url('bundle:foo')
2185 >>> url('bundle:foo')
2186 <url scheme: 'bundle', path: 'foo'>
2186 <url scheme: 'bundle', path: 'foo'>
2187 >>> url('bundle://../foo')
2187 >>> url('bundle://../foo')
2188 <url scheme: 'bundle', path: '../foo'>
2188 <url scheme: 'bundle', path: '../foo'>
2189 >>> url(r'c:\foo\bar')
2189 >>> url(r'c:\foo\bar')
2190 <url path: 'c:\\foo\\bar'>
2190 <url path: 'c:\\foo\\bar'>
2191 >>> url(r'\\blah\blah\blah')
2191 >>> url(r'\\blah\blah\blah')
2192 <url path: '\\\\blah\\blah\\blah'>
2192 <url path: '\\\\blah\\blah\\blah'>
2193 >>> url(r'\\blah\blah\blah#baz')
2193 >>> url(r'\\blah\blah\blah#baz')
2194 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2194 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2195 >>> url(r'file:///C:\users\me')
2195 >>> url(r'file:///C:\users\me')
2196 <url scheme: 'file', path: 'C:\\users\\me'>
2196 <url scheme: 'file', path: 'C:\\users\\me'>
2197
2197
2198 Authentication credentials:
2198 Authentication credentials:
2199
2199
2200 >>> url('ssh://joe:xyz@x/repo')
2200 >>> url('ssh://joe:xyz@x/repo')
2201 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2201 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2202 >>> url('ssh://joe@x/repo')
2202 >>> url('ssh://joe@x/repo')
2203 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2203 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2204
2204
2205 Query strings and fragments:
2205 Query strings and fragments:
2206
2206
2207 >>> url('http://host/a?b#c')
2207 >>> url('http://host/a?b#c')
2208 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2208 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2209 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2209 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2210 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2210 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2211 """
2211 """
2212
2212
2213 _safechars = "!~*'()+"
2213 _safechars = "!~*'()+"
2214 _safepchars = "/!~*'()+:\\"
2214 _safepchars = "/!~*'()+:\\"
2215 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2215 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2216
2216
2217 def __init__(self, path, parsequery=True, parsefragment=True):
2217 def __init__(self, path, parsequery=True, parsefragment=True):
2218 # We slowly chomp away at path until we have only the path left
2218 # We slowly chomp away at path until we have only the path left
2219 self.scheme = self.user = self.passwd = self.host = None
2219 self.scheme = self.user = self.passwd = self.host = None
2220 self.port = self.path = self.query = self.fragment = None
2220 self.port = self.path = self.query = self.fragment = None
2221 self._localpath = True
2221 self._localpath = True
2222 self._hostport = ''
2222 self._hostport = ''
2223 self._origpath = path
2223 self._origpath = path
2224
2224
2225 if parsefragment and '#' in path:
2225 if parsefragment and '#' in path:
2226 path, self.fragment = path.split('#', 1)
2226 path, self.fragment = path.split('#', 1)
2227 if not path:
2227 if not path:
2228 path = None
2228 path = None
2229
2229
2230 # special case for Windows drive letters and UNC paths
2230 # special case for Windows drive letters and UNC paths
2231 if hasdriveletter(path) or path.startswith(r'\\'):
2231 if hasdriveletter(path) or path.startswith(r'\\'):
2232 self.path = path
2232 self.path = path
2233 return
2233 return
2234
2234
2235 # For compatibility reasons, we can't handle bundle paths as
2235 # For compatibility reasons, we can't handle bundle paths as
2236 # normal URLS
2236 # normal URLS
2237 if path.startswith('bundle:'):
2237 if path.startswith('bundle:'):
2238 self.scheme = 'bundle'
2238 self.scheme = 'bundle'
2239 path = path[7:]
2239 path = path[7:]
2240 if path.startswith('//'):
2240 if path.startswith('//'):
2241 path = path[2:]
2241 path = path[2:]
2242 self.path = path
2242 self.path = path
2243 return
2243 return
2244
2244
2245 if self._matchscheme(path):
2245 if self._matchscheme(path):
2246 parts = path.split(':', 1)
2246 parts = path.split(':', 1)
2247 if parts[0]:
2247 if parts[0]:
2248 self.scheme, path = parts
2248 self.scheme, path = parts
2249 self._localpath = False
2249 self._localpath = False
2250
2250
2251 if not path:
2251 if not path:
2252 path = None
2252 path = None
2253 if self._localpath:
2253 if self._localpath:
2254 self.path = ''
2254 self.path = ''
2255 return
2255 return
2256 else:
2256 else:
2257 if self._localpath:
2257 if self._localpath:
2258 self.path = path
2258 self.path = path
2259 return
2259 return
2260
2260
2261 if parsequery and '?' in path:
2261 if parsequery and '?' in path:
2262 path, self.query = path.split('?', 1)
2262 path, self.query = path.split('?', 1)
2263 if not path:
2263 if not path:
2264 path = None
2264 path = None
2265 if not self.query:
2265 if not self.query:
2266 self.query = None
2266 self.query = None
2267
2267
2268 # // is required to specify a host/authority
2268 # // is required to specify a host/authority
2269 if path and path.startswith('//'):
2269 if path and path.startswith('//'):
2270 parts = path[2:].split('/', 1)
2270 parts = path[2:].split('/', 1)
2271 if len(parts) > 1:
2271 if len(parts) > 1:
2272 self.host, path = parts
2272 self.host, path = parts
2273 else:
2273 else:
2274 self.host = parts[0]
2274 self.host = parts[0]
2275 path = None
2275 path = None
2276 if not self.host:
2276 if not self.host:
2277 self.host = None
2277 self.host = None
2278 # path of file:///d is /d
2278 # path of file:///d is /d
2279 # path of file:///d:/ is d:/, not /d:/
2279 # path of file:///d:/ is d:/, not /d:/
2280 if path and not hasdriveletter(path):
2280 if path and not hasdriveletter(path):
2281 path = '/' + path
2281 path = '/' + path
2282
2282
2283 if self.host and '@' in self.host:
2283 if self.host and '@' in self.host:
2284 self.user, self.host = self.host.rsplit('@', 1)
2284 self.user, self.host = self.host.rsplit('@', 1)
2285 if ':' in self.user:
2285 if ':' in self.user:
2286 self.user, self.passwd = self.user.split(':', 1)
2286 self.user, self.passwd = self.user.split(':', 1)
2287 if not self.host:
2287 if not self.host:
2288 self.host = None
2288 self.host = None
2289
2289
2290 # Don't split on colons in IPv6 addresses without ports
2290 # Don't split on colons in IPv6 addresses without ports
2291 if (self.host and ':' in self.host and
2291 if (self.host and ':' in self.host and
2292 not (self.host.startswith('[') and self.host.endswith(']'))):
2292 not (self.host.startswith('[') and self.host.endswith(']'))):
2293 self._hostport = self.host
2293 self._hostport = self.host
2294 self.host, self.port = self.host.rsplit(':', 1)
2294 self.host, self.port = self.host.rsplit(':', 1)
2295 if not self.host:
2295 if not self.host:
2296 self.host = None
2296 self.host = None
2297
2297
2298 if (self.host and self.scheme == 'file' and
2298 if (self.host and self.scheme == 'file' and
2299 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2299 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2300 raise Abort(_('file:// URLs can only refer to localhost'))
2300 raise Abort(_('file:// URLs can only refer to localhost'))
2301
2301
2302 self.path = path
2302 self.path = path
2303
2303
2304 # leave the query string escaped
2304 # leave the query string escaped
2305 for a in ('user', 'passwd', 'host', 'port',
2305 for a in ('user', 'passwd', 'host', 'port',
2306 'path', 'fragment'):
2306 'path', 'fragment'):
2307 v = getattr(self, a)
2307 v = getattr(self, a)
2308 if v is not None:
2308 if v is not None:
2309 setattr(self, a, _urlunquote(v))
2309 setattr(self, a, _urlunquote(v))
2310
2310
2311 def __repr__(self):
2311 def __repr__(self):
2312 attrs = []
2312 attrs = []
2313 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2313 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2314 'query', 'fragment'):
2314 'query', 'fragment'):
2315 v = getattr(self, a)
2315 v = getattr(self, a)
2316 if v is not None:
2316 if v is not None:
2317 attrs.append('%s: %r' % (a, v))
2317 attrs.append('%s: %r' % (a, v))
2318 return '<url %s>' % ', '.join(attrs)
2318 return '<url %s>' % ', '.join(attrs)
2319
2319
2320 def __str__(self):
2320 def __str__(self):
2321 r"""Join the URL's components back into a URL string.
2321 r"""Join the URL's components back into a URL string.
2322
2322
2323 Examples:
2323 Examples:
2324
2324
2325 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2325 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2326 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2326 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2327 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2327 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2328 'http://user:pw@host:80/?foo=bar&baz=42'
2328 'http://user:pw@host:80/?foo=bar&baz=42'
2329 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2329 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2330 'http://user:pw@host:80/?foo=bar%3dbaz'
2330 'http://user:pw@host:80/?foo=bar%3dbaz'
2331 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2331 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2332 'ssh://user:pw@[::1]:2200//home/joe#'
2332 'ssh://user:pw@[::1]:2200//home/joe#'
2333 >>> str(url('http://localhost:80//'))
2333 >>> str(url('http://localhost:80//'))
2334 'http://localhost:80//'
2334 'http://localhost:80//'
2335 >>> str(url('http://localhost:80/'))
2335 >>> str(url('http://localhost:80/'))
2336 'http://localhost:80/'
2336 'http://localhost:80/'
2337 >>> str(url('http://localhost:80'))
2337 >>> str(url('http://localhost:80'))
2338 'http://localhost:80/'
2338 'http://localhost:80/'
2339 >>> str(url('bundle:foo'))
2339 >>> str(url('bundle:foo'))
2340 'bundle:foo'
2340 'bundle:foo'
2341 >>> str(url('bundle://../foo'))
2341 >>> str(url('bundle://../foo'))
2342 'bundle:../foo'
2342 'bundle:../foo'
2343 >>> str(url('path'))
2343 >>> str(url('path'))
2344 'path'
2344 'path'
2345 >>> str(url('file:///tmp/foo/bar'))
2345 >>> str(url('file:///tmp/foo/bar'))
2346 'file:///tmp/foo/bar'
2346 'file:///tmp/foo/bar'
2347 >>> str(url('file:///c:/tmp/foo/bar'))
2347 >>> str(url('file:///c:/tmp/foo/bar'))
2348 'file:///c:/tmp/foo/bar'
2348 'file:///c:/tmp/foo/bar'
2349 >>> print url(r'bundle:foo\bar')
2349 >>> print url(r'bundle:foo\bar')
2350 bundle:foo\bar
2350 bundle:foo\bar
2351 >>> print url(r'file:///D:\data\hg')
2351 >>> print url(r'file:///D:\data\hg')
2352 file:///D:\data\hg
2352 file:///D:\data\hg
2353 """
2353 """
2354 if self._localpath:
2354 if self._localpath:
2355 s = self.path
2355 s = self.path
2356 if self.scheme == 'bundle':
2356 if self.scheme == 'bundle':
2357 s = 'bundle:' + s
2357 s = 'bundle:' + s
2358 if self.fragment:
2358 if self.fragment:
2359 s += '#' + self.fragment
2359 s += '#' + self.fragment
2360 return s
2360 return s
2361
2361
2362 s = self.scheme + ':'
2362 s = self.scheme + ':'
2363 if self.user or self.passwd or self.host:
2363 if self.user or self.passwd or self.host:
2364 s += '//'
2364 s += '//'
2365 elif self.scheme and (not self.path or self.path.startswith('/')
2365 elif self.scheme and (not self.path or self.path.startswith('/')
2366 or hasdriveletter(self.path)):
2366 or hasdriveletter(self.path)):
2367 s += '//'
2367 s += '//'
2368 if hasdriveletter(self.path):
2368 if hasdriveletter(self.path):
2369 s += '/'
2369 s += '/'
2370 if self.user:
2370 if self.user:
2371 s += urllib.quote(self.user, safe=self._safechars)
2371 s += urllib.quote(self.user, safe=self._safechars)
2372 if self.passwd:
2372 if self.passwd:
2373 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2373 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2374 if self.user or self.passwd:
2374 if self.user or self.passwd:
2375 s += '@'
2375 s += '@'
2376 if self.host:
2376 if self.host:
2377 if not (self.host.startswith('[') and self.host.endswith(']')):
2377 if not (self.host.startswith('[') and self.host.endswith(']')):
2378 s += urllib.quote(self.host)
2378 s += urllib.quote(self.host)
2379 else:
2379 else:
2380 s += self.host
2380 s += self.host
2381 if self.port:
2381 if self.port:
2382 s += ':' + urllib.quote(self.port)
2382 s += ':' + urllib.quote(self.port)
2383 if self.host:
2383 if self.host:
2384 s += '/'
2384 s += '/'
2385 if self.path:
2385 if self.path:
2386 # TODO: similar to the query string, we should not unescape the
2386 # TODO: similar to the query string, we should not unescape the
2387 # path when we store it, the path might contain '%2f' = '/',
2387 # path when we store it, the path might contain '%2f' = '/',
2388 # which we should *not* escape.
2388 # which we should *not* escape.
2389 s += urllib.quote(self.path, safe=self._safepchars)
2389 s += urllib.quote(self.path, safe=self._safepchars)
2390 if self.query:
2390 if self.query:
2391 # we store the query in escaped form.
2391 # we store the query in escaped form.
2392 s += '?' + self.query
2392 s += '?' + self.query
2393 if self.fragment is not None:
2393 if self.fragment is not None:
2394 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2394 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2395 return s
2395 return s
2396
2396
2397 def authinfo(self):
2397 def authinfo(self):
2398 user, passwd = self.user, self.passwd
2398 user, passwd = self.user, self.passwd
2399 try:
2399 try:
2400 self.user, self.passwd = None, None
2400 self.user, self.passwd = None, None
2401 s = str(self)
2401 s = str(self)
2402 finally:
2402 finally:
2403 self.user, self.passwd = user, passwd
2403 self.user, self.passwd = user, passwd
2404 if not self.user:
2404 if not self.user:
2405 return (s, None)
2405 return (s, None)
2406 # authinfo[1] is passed to urllib2 password manager, and its
2406 # authinfo[1] is passed to urllib2 password manager, and its
2407 # URIs must not contain credentials. The host is passed in the
2407 # URIs must not contain credentials. The host is passed in the
2408 # URIs list because Python < 2.4.3 uses only that to search for
2408 # URIs list because Python < 2.4.3 uses only that to search for
2409 # a password.
2409 # a password.
2410 return (s, (None, (s, self.host),
2410 return (s, (None, (s, self.host),
2411 self.user, self.passwd or ''))
2411 self.user, self.passwd or ''))
2412
2412
2413 def isabs(self):
2413 def isabs(self):
2414 if self.scheme and self.scheme != 'file':
2414 if self.scheme and self.scheme != 'file':
2415 return True # remote URL
2415 return True # remote URL
2416 if hasdriveletter(self.path):
2416 if hasdriveletter(self.path):
2417 return True # absolute for our purposes - can't be joined()
2417 return True # absolute for our purposes - can't be joined()
2418 if self.path.startswith(r'\\'):
2418 if self.path.startswith(r'\\'):
2419 return True # Windows UNC path
2419 return True # Windows UNC path
2420 if self.path.startswith('/'):
2420 if self.path.startswith('/'):
2421 return True # POSIX-style
2421 return True # POSIX-style
2422 return False
2422 return False
2423
2423
2424 def localpath(self):
2424 def localpath(self):
2425 if self.scheme == 'file' or self.scheme == 'bundle':
2425 if self.scheme == 'file' or self.scheme == 'bundle':
2426 path = self.path or '/'
2426 path = self.path or '/'
2427 # For Windows, we need to promote hosts containing drive
2427 # For Windows, we need to promote hosts containing drive
2428 # letters to paths with drive letters.
2428 # letters to paths with drive letters.
2429 if hasdriveletter(self._hostport):
2429 if hasdriveletter(self._hostport):
2430 path = self._hostport + '/' + self.path
2430 path = self._hostport + '/' + self.path
2431 elif (self.host is not None and self.path
2431 elif (self.host is not None and self.path
2432 and not hasdriveletter(path)):
2432 and not hasdriveletter(path)):
2433 path = '/' + path
2433 path = '/' + path
2434 return path
2434 return path
2435 return self._origpath
2435 return self._origpath
2436
2436
2437 def islocal(self):
2437 def islocal(self):
2438 '''whether localpath will return something that posixfile can open'''
2438 '''whether localpath will return something that posixfile can open'''
2439 return (not self.scheme or self.scheme == 'file'
2439 return (not self.scheme or self.scheme == 'file'
2440 or self.scheme == 'bundle')
2440 or self.scheme == 'bundle')
2441
2441
2442 def hasscheme(path):
2442 def hasscheme(path):
2443 return bool(url(path).scheme)
2443 return bool(url(path).scheme)
2444
2444
2445 def hasdriveletter(path):
2445 def hasdriveletter(path):
2446 return path and path[1:2] == ':' and path[0:1].isalpha()
2446 return path and path[1:2] == ':' and path[0:1].isalpha()
2447
2447
2448 def urllocalpath(path):
2448 def urllocalpath(path):
2449 return url(path, parsequery=False, parsefragment=False).localpath()
2449 return url(path, parsequery=False, parsefragment=False).localpath()
2450
2450
2451 def hidepassword(u):
2451 def hidepassword(u):
2452 '''hide user credential in a url string'''
2452 '''hide user credential in a url string'''
2453 u = url(u)
2453 u = url(u)
2454 if u.passwd:
2454 if u.passwd:
2455 u.passwd = '***'
2455 u.passwd = '***'
2456 return str(u)
2456 return str(u)
2457
2457
2458 def removeauth(u):
2458 def removeauth(u):
2459 '''remove all authentication information from a url string'''
2459 '''remove all authentication information from a url string'''
2460 u = url(u)
2460 u = url(u)
2461 u.user = u.passwd = None
2461 u.user = u.passwd = None
2462 return str(u)
2462 return str(u)
2463
2463
2464 def isatty(fp):
2464 def isatty(fp):
2465 try:
2465 try:
2466 return fp.isatty()
2466 return fp.isatty()
2467 except AttributeError:
2467 except AttributeError:
2468 return False
2468 return False
2469
2469
2470 timecount = unitcountfn(
2470 timecount = unitcountfn(
2471 (1, 1e3, _('%.0f s')),
2471 (1, 1e3, _('%.0f s')),
2472 (100, 1, _('%.1f s')),
2472 (100, 1, _('%.1f s')),
2473 (10, 1, _('%.2f s')),
2473 (10, 1, _('%.2f s')),
2474 (1, 1, _('%.3f s')),
2474 (1, 1, _('%.3f s')),
2475 (100, 0.001, _('%.1f ms')),
2475 (100, 0.001, _('%.1f ms')),
2476 (10, 0.001, _('%.2f ms')),
2476 (10, 0.001, _('%.2f ms')),
2477 (1, 0.001, _('%.3f ms')),
2477 (1, 0.001, _('%.3f ms')),
2478 (100, 0.000001, _('%.1f us')),
2478 (100, 0.000001, _('%.1f us')),
2479 (10, 0.000001, _('%.2f us')),
2479 (10, 0.000001, _('%.2f us')),
2480 (1, 0.000001, _('%.3f us')),
2480 (1, 0.000001, _('%.3f us')),
2481 (100, 0.000000001, _('%.1f ns')),
2481 (100, 0.000000001, _('%.1f ns')),
2482 (10, 0.000000001, _('%.2f ns')),
2482 (10, 0.000000001, _('%.2f ns')),
2483 (1, 0.000000001, _('%.3f ns')),
2483 (1, 0.000000001, _('%.3f ns')),
2484 )
2484 )
2485
2485
2486 _timenesting = [0]
2486 _timenesting = [0]
2487
2487
2488 def timed(func):
2488 def timed(func):
2489 '''Report the execution time of a function call to stderr.
2489 '''Report the execution time of a function call to stderr.
2490
2490
2491 During development, use as a decorator when you need to measure
2491 During development, use as a decorator when you need to measure
2492 the cost of a function, e.g. as follows:
2492 the cost of a function, e.g. as follows:
2493
2493
2494 @util.timed
2494 @util.timed
2495 def foo(a, b, c):
2495 def foo(a, b, c):
2496 pass
2496 pass
2497 '''
2497 '''
2498
2498
2499 def wrapper(*args, **kwargs):
2499 def wrapper(*args, **kwargs):
2500 start = time.time()
2500 start = time.time()
2501 indent = 2
2501 indent = 2
2502 _timenesting[0] += indent
2502 _timenesting[0] += indent
2503 try:
2503 try:
2504 return func(*args, **kwargs)
2504 return func(*args, **kwargs)
2505 finally:
2505 finally:
2506 elapsed = time.time() - start
2506 elapsed = time.time() - start
2507 _timenesting[0] -= indent
2507 _timenesting[0] -= indent
2508 sys.stderr.write('%s%s: %s\n' %
2508 sys.stderr.write('%s%s: %s\n' %
2509 (' ' * _timenesting[0], func.__name__,
2509 (' ' * _timenesting[0], func.__name__,
2510 timecount(elapsed)))
2510 timecount(elapsed)))
2511 return wrapper
2511 return wrapper
2512
2512
2513 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2513 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2514 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2514 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2515
2515
2516 def sizetoint(s):
2516 def sizetoint(s):
2517 '''Convert a space specifier to a byte count.
2517 '''Convert a space specifier to a byte count.
2518
2518
2519 >>> sizetoint('30')
2519 >>> sizetoint('30')
2520 30
2520 30
2521 >>> sizetoint('2.2kb')
2521 >>> sizetoint('2.2kb')
2522 2252
2522 2252
2523 >>> sizetoint('6M')
2523 >>> sizetoint('6M')
2524 6291456
2524 6291456
2525 '''
2525 '''
2526 t = s.strip().lower()
2526 t = s.strip().lower()
2527 try:
2527 try:
2528 for k, u in _sizeunits:
2528 for k, u in _sizeunits:
2529 if t.endswith(k):
2529 if t.endswith(k):
2530 return int(float(t[:-len(k)]) * u)
2530 return int(float(t[:-len(k)]) * u)
2531 return int(t)
2531 return int(t)
2532 except ValueError:
2532 except ValueError:
2533 raise error.ParseError(_("couldn't parse size: %s") % s)
2533 raise error.ParseError(_("couldn't parse size: %s") % s)
2534
2534
2535 class hooks(object):
2535 class hooks(object):
2536 '''A collection of hook functions that can be used to extend a
2536 '''A collection of hook functions that can be used to extend a
2537 function's behavior. Hooks are called in lexicographic order,
2537 function's behavior. Hooks are called in lexicographic order,
2538 based on the names of their sources.'''
2538 based on the names of their sources.'''
2539
2539
2540 def __init__(self):
2540 def __init__(self):
2541 self._hooks = []
2541 self._hooks = []
2542
2542
2543 def add(self, source, hook):
2543 def add(self, source, hook):
2544 self._hooks.append((source, hook))
2544 self._hooks.append((source, hook))
2545
2545
2546 def __call__(self, *args):
2546 def __call__(self, *args):
2547 self._hooks.sort(key=lambda x: x[0])
2547 self._hooks.sort(key=lambda x: x[0])
2548 results = []
2548 results = []
2549 for source, hook in self._hooks:
2549 for source, hook in self._hooks:
2550 results.append(hook(*args))
2550 results.append(hook(*args))
2551 return results
2551 return results
2552
2552
2553 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2553 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2554 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2554 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2555 Skips the 'skip' last entries. By default it will flush stdout first.
2555 Skips the 'skip' last entries. By default it will flush stdout first.
2556 It can be used everywhere and do intentionally not require an ui object.
2556 It can be used everywhere and intentionally does not require an ui object.
2557 Not be used in production code but very convenient while developing.
2557 Not be used in production code but very convenient while developing.
2558 '''
2558 '''
2559 if otherf:
2559 if otherf:
2560 otherf.flush()
2560 otherf.flush()
2561 f.write('%s at:\n' % msg)
2561 f.write('%s at:\n' % msg)
2562 entries = [('%s:%s' % (fn, ln), func)
2562 entries = [('%s:%s' % (fn, ln), func)
2563 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2563 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2564 if entries:
2564 if entries:
2565 fnmax = max(len(entry[0]) for entry in entries)
2565 fnmax = max(len(entry[0]) for entry in entries)
2566 for fnln, func in entries:
2566 for fnln, func in entries:
2567 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2567 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2568 f.flush()
2568 f.flush()
2569
2569
2570 class dirs(object):
2570 class dirs(object):
2571 '''a multiset of directory names from a dirstate or manifest'''
2571 '''a multiset of directory names from a dirstate or manifest'''
2572
2572
2573 def __init__(self, map, skip=None):
2573 def __init__(self, map, skip=None):
2574 self._dirs = {}
2574 self._dirs = {}
2575 addpath = self.addpath
2575 addpath = self.addpath
2576 if safehasattr(map, 'iteritems') and skip is not None:
2576 if safehasattr(map, 'iteritems') and skip is not None:
2577 for f, s in map.iteritems():
2577 for f, s in map.iteritems():
2578 if s[0] != skip:
2578 if s[0] != skip:
2579 addpath(f)
2579 addpath(f)
2580 else:
2580 else:
2581 for f in map:
2581 for f in map:
2582 addpath(f)
2582 addpath(f)
2583
2583
2584 def addpath(self, path):
2584 def addpath(self, path):
2585 dirs = self._dirs
2585 dirs = self._dirs
2586 for base in finddirs(path):
2586 for base in finddirs(path):
2587 if base in dirs:
2587 if base in dirs:
2588 dirs[base] += 1
2588 dirs[base] += 1
2589 return
2589 return
2590 dirs[base] = 1
2590 dirs[base] = 1
2591
2591
2592 def delpath(self, path):
2592 def delpath(self, path):
2593 dirs = self._dirs
2593 dirs = self._dirs
2594 for base in finddirs(path):
2594 for base in finddirs(path):
2595 if dirs[base] > 1:
2595 if dirs[base] > 1:
2596 dirs[base] -= 1
2596 dirs[base] -= 1
2597 return
2597 return
2598 del dirs[base]
2598 del dirs[base]
2599
2599
2600 def __iter__(self):
2600 def __iter__(self):
2601 return self._dirs.iterkeys()
2601 return self._dirs.iterkeys()
2602
2602
2603 def __contains__(self, d):
2603 def __contains__(self, d):
2604 return d in self._dirs
2604 return d in self._dirs
2605
2605
2606 if safehasattr(parsers, 'dirs'):
2606 if safehasattr(parsers, 'dirs'):
2607 dirs = parsers.dirs
2607 dirs = parsers.dirs
2608
2608
2609 def finddirs(path):
2609 def finddirs(path):
2610 pos = path.rfind('/')
2610 pos = path.rfind('/')
2611 while pos != -1:
2611 while pos != -1:
2612 yield path[:pos]
2612 yield path[:pos]
2613 pos = path.rfind('/', 0, pos)
2613 pos = path.rfind('/', 0, pos)
2614
2614
2615 # compression utility
2615 # compression utility
2616
2616
2617 class nocompress(object):
2617 class nocompress(object):
2618 def compress(self, x):
2618 def compress(self, x):
2619 return x
2619 return x
2620 def flush(self):
2620 def flush(self):
2621 return ""
2621 return ""
2622
2622
2623 compressors = {
2623 compressors = {
2624 None: nocompress,
2624 None: nocompress,
2625 # lambda to prevent early import
2625 # lambda to prevent early import
2626 'BZ': lambda: bz2.BZ2Compressor(),
2626 'BZ': lambda: bz2.BZ2Compressor(),
2627 'GZ': lambda: zlib.compressobj(),
2627 'GZ': lambda: zlib.compressobj(),
2628 }
2628 }
2629 # also support the old form by courtesies
2629 # also support the old form by courtesies
2630 compressors['UN'] = compressors[None]
2630 compressors['UN'] = compressors[None]
2631
2631
2632 def _makedecompressor(decompcls):
2632 def _makedecompressor(decompcls):
2633 def generator(f):
2633 def generator(f):
2634 d = decompcls()
2634 d = decompcls()
2635 for chunk in filechunkiter(f):
2635 for chunk in filechunkiter(f):
2636 yield d.decompress(chunk)
2636 yield d.decompress(chunk)
2637 def func(fh):
2637 def func(fh):
2638 return chunkbuffer(generator(fh))
2638 return chunkbuffer(generator(fh))
2639 return func
2639 return func
2640
2640
2641 class ctxmanager(object):
2641 class ctxmanager(object):
2642 '''A context manager for use in 'with' blocks to allow multiple
2642 '''A context manager for use in 'with' blocks to allow multiple
2643 contexts to be entered at once. This is both safer and more
2643 contexts to be entered at once. This is both safer and more
2644 flexible than contextlib.nested.
2644 flexible than contextlib.nested.
2645
2645
2646 Once Mercurial supports Python 2.7+, this will become mostly
2646 Once Mercurial supports Python 2.7+, this will become mostly
2647 unnecessary.
2647 unnecessary.
2648 '''
2648 '''
2649
2649
2650 def __init__(self, *args):
2650 def __init__(self, *args):
2651 '''Accepts a list of no-argument functions that return context
2651 '''Accepts a list of no-argument functions that return context
2652 managers. These will be invoked at __call__ time.'''
2652 managers. These will be invoked at __call__ time.'''
2653 self._pending = args
2653 self._pending = args
2654 self._atexit = []
2654 self._atexit = []
2655
2655
2656 def __enter__(self):
2656 def __enter__(self):
2657 return self
2657 return self
2658
2658
2659 def enter(self):
2659 def enter(self):
2660 '''Create and enter context managers in the order in which they were
2660 '''Create and enter context managers in the order in which they were
2661 passed to the constructor.'''
2661 passed to the constructor.'''
2662 values = []
2662 values = []
2663 for func in self._pending:
2663 for func in self._pending:
2664 obj = func()
2664 obj = func()
2665 values.append(obj.__enter__())
2665 values.append(obj.__enter__())
2666 self._atexit.append(obj.__exit__)
2666 self._atexit.append(obj.__exit__)
2667 del self._pending
2667 del self._pending
2668 return values
2668 return values
2669
2669
2670 def atexit(self, func, *args, **kwargs):
2670 def atexit(self, func, *args, **kwargs):
2671 '''Add a function to call when this context manager exits. The
2671 '''Add a function to call when this context manager exits. The
2672 ordering of multiple atexit calls is unspecified, save that
2672 ordering of multiple atexit calls is unspecified, save that
2673 they will happen before any __exit__ functions.'''
2673 they will happen before any __exit__ functions.'''
2674 def wrapper(exc_type, exc_val, exc_tb):
2674 def wrapper(exc_type, exc_val, exc_tb):
2675 func(*args, **kwargs)
2675 func(*args, **kwargs)
2676 self._atexit.append(wrapper)
2676 self._atexit.append(wrapper)
2677 return func
2677 return func
2678
2678
2679 def __exit__(self, exc_type, exc_val, exc_tb):
2679 def __exit__(self, exc_type, exc_val, exc_tb):
2680 '''Context managers are exited in the reverse order from which
2680 '''Context managers are exited in the reverse order from which
2681 they were created.'''
2681 they were created.'''
2682 received = exc_type is not None
2682 received = exc_type is not None
2683 suppressed = False
2683 suppressed = False
2684 pending = None
2684 pending = None
2685 self._atexit.reverse()
2685 self._atexit.reverse()
2686 for exitfunc in self._atexit:
2686 for exitfunc in self._atexit:
2687 try:
2687 try:
2688 if exitfunc(exc_type, exc_val, exc_tb):
2688 if exitfunc(exc_type, exc_val, exc_tb):
2689 suppressed = True
2689 suppressed = True
2690 exc_type = None
2690 exc_type = None
2691 exc_val = None
2691 exc_val = None
2692 exc_tb = None
2692 exc_tb = None
2693 except BaseException:
2693 except BaseException:
2694 pending = sys.exc_info()
2694 pending = sys.exc_info()
2695 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2695 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2696 del self._atexit
2696 del self._atexit
2697 if pending:
2697 if pending:
2698 raise exc_val
2698 raise exc_val
2699 return received and suppressed
2699 return received and suppressed
2700
2700
2701 def _bz2():
2701 def _bz2():
2702 d = bz2.BZ2Decompressor()
2702 d = bz2.BZ2Decompressor()
2703 # Bzip2 stream start with BZ, but we stripped it.
2703 # Bzip2 stream start with BZ, but we stripped it.
2704 # we put it back for good measure.
2704 # we put it back for good measure.
2705 d.decompress('BZ')
2705 d.decompress('BZ')
2706 return d
2706 return d
2707
2707
2708 decompressors = {None: lambda fh: fh,
2708 decompressors = {None: lambda fh: fh,
2709 '_truncatedBZ': _makedecompressor(_bz2),
2709 '_truncatedBZ': _makedecompressor(_bz2),
2710 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2710 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2711 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2711 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2712 }
2712 }
2713 # also support the old form by courtesies
2713 # also support the old form by courtesies
2714 decompressors['UN'] = decompressors[None]
2714 decompressors['UN'] = decompressors[None]
2715
2715
2716 # convenient shortcut
2716 # convenient shortcut
2717 dst = debugstacktrace
2717 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now