##// END OF EJS Templates
util: adjust hgcmd() to handle frozen Mercurial on OS X...
Matt Harbison -
r27766:198f78a5 default
parent child Browse files
Show More
@@ -1,2724 +1,2728
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import collections
20 import collections
21 import datetime
21 import datetime
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import imp
25 import imp
26 import os
26 import os
27 import re as remod
27 import re as remod
28 import shutil
28 import shutil
29 import signal
29 import signal
30 import socket
30 import socket
31 import subprocess
31 import subprocess
32 import sys
32 import sys
33 import tempfile
33 import tempfile
34 import textwrap
34 import textwrap
35 import time
35 import time
36 import traceback
36 import traceback
37 import urllib
37 import urllib
38 import zlib
38 import zlib
39
39
40 from . import (
40 from . import (
41 encoding,
41 encoding,
42 error,
42 error,
43 i18n,
43 i18n,
44 osutil,
44 osutil,
45 parsers,
45 parsers,
46 )
46 )
47
47
48 if os.name == 'nt':
48 if os.name == 'nt':
49 from . import windows as platform
49 from . import windows as platform
50 else:
50 else:
51 from . import posix as platform
51 from . import posix as platform
52
52
53 md5 = hashlib.md5
53 md5 = hashlib.md5
54 sha1 = hashlib.sha1
54 sha1 = hashlib.sha1
55 sha512 = hashlib.sha512
55 sha512 = hashlib.sha512
56 _ = i18n._
56 _ = i18n._
57
57
58 cachestat = platform.cachestat
58 cachestat = platform.cachestat
59 checkexec = platform.checkexec
59 checkexec = platform.checkexec
60 checklink = platform.checklink
60 checklink = platform.checklink
61 copymode = platform.copymode
61 copymode = platform.copymode
62 executablepath = platform.executablepath
62 executablepath = platform.executablepath
63 expandglobs = platform.expandglobs
63 expandglobs = platform.expandglobs
64 explainexit = platform.explainexit
64 explainexit = platform.explainexit
65 findexe = platform.findexe
65 findexe = platform.findexe
66 gethgcmd = platform.gethgcmd
66 gethgcmd = platform.gethgcmd
67 getuser = platform.getuser
67 getuser = platform.getuser
68 groupmembers = platform.groupmembers
68 groupmembers = platform.groupmembers
69 groupname = platform.groupname
69 groupname = platform.groupname
70 hidewindow = platform.hidewindow
70 hidewindow = platform.hidewindow
71 isexec = platform.isexec
71 isexec = platform.isexec
72 isowner = platform.isowner
72 isowner = platform.isowner
73 localpath = platform.localpath
73 localpath = platform.localpath
74 lookupreg = platform.lookupreg
74 lookupreg = platform.lookupreg
75 makedir = platform.makedir
75 makedir = platform.makedir
76 nlinks = platform.nlinks
76 nlinks = platform.nlinks
77 normpath = platform.normpath
77 normpath = platform.normpath
78 normcase = platform.normcase
78 normcase = platform.normcase
79 normcasespec = platform.normcasespec
79 normcasespec = platform.normcasespec
80 normcasefallback = platform.normcasefallback
80 normcasefallback = platform.normcasefallback
81 openhardlinks = platform.openhardlinks
81 openhardlinks = platform.openhardlinks
82 oslink = platform.oslink
82 oslink = platform.oslink
83 parsepatchoutput = platform.parsepatchoutput
83 parsepatchoutput = platform.parsepatchoutput
84 pconvert = platform.pconvert
84 pconvert = platform.pconvert
85 poll = platform.poll
85 poll = platform.poll
86 popen = platform.popen
86 popen = platform.popen
87 posixfile = platform.posixfile
87 posixfile = platform.posixfile
88 quotecommand = platform.quotecommand
88 quotecommand = platform.quotecommand
89 readpipe = platform.readpipe
89 readpipe = platform.readpipe
90 rename = platform.rename
90 rename = platform.rename
91 removedirs = platform.removedirs
91 removedirs = platform.removedirs
92 samedevice = platform.samedevice
92 samedevice = platform.samedevice
93 samefile = platform.samefile
93 samefile = platform.samefile
94 samestat = platform.samestat
94 samestat = platform.samestat
95 setbinary = platform.setbinary
95 setbinary = platform.setbinary
96 setflags = platform.setflags
96 setflags = platform.setflags
97 setsignalhandler = platform.setsignalhandler
97 setsignalhandler = platform.setsignalhandler
98 shellquote = platform.shellquote
98 shellquote = platform.shellquote
99 spawndetached = platform.spawndetached
99 spawndetached = platform.spawndetached
100 split = platform.split
100 split = platform.split
101 sshargs = platform.sshargs
101 sshargs = platform.sshargs
102 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
102 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
103 statisexec = platform.statisexec
103 statisexec = platform.statisexec
104 statislink = platform.statislink
104 statislink = platform.statislink
105 termwidth = platform.termwidth
105 termwidth = platform.termwidth
106 testpid = platform.testpid
106 testpid = platform.testpid
107 umask = platform.umask
107 umask = platform.umask
108 unlink = platform.unlink
108 unlink = platform.unlink
109 unlinkpath = platform.unlinkpath
109 unlinkpath = platform.unlinkpath
110 username = platform.username
110 username = platform.username
111
111
112 # Python compatibility
112 # Python compatibility
113
113
114 _notset = object()
114 _notset = object()
115
115
116 # disable Python's problematic floating point timestamps (issue4836)
116 # disable Python's problematic floating point timestamps (issue4836)
117 # (Python hypocritically says you shouldn't change this behavior in
117 # (Python hypocritically says you shouldn't change this behavior in
118 # libraries, and sure enough Mercurial is not a library.)
118 # libraries, and sure enough Mercurial is not a library.)
119 os.stat_float_times(False)
119 os.stat_float_times(False)
120
120
121 def safehasattr(thing, attr):
121 def safehasattr(thing, attr):
122 return getattr(thing, attr, _notset) is not _notset
122 return getattr(thing, attr, _notset) is not _notset
123
123
124 DIGESTS = {
124 DIGESTS = {
125 'md5': md5,
125 'md5': md5,
126 'sha1': sha1,
126 'sha1': sha1,
127 'sha512': sha512,
127 'sha512': sha512,
128 }
128 }
129 # List of digest types from strongest to weakest
129 # List of digest types from strongest to weakest
130 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
130 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
131
131
132 for k in DIGESTS_BY_STRENGTH:
132 for k in DIGESTS_BY_STRENGTH:
133 assert k in DIGESTS
133 assert k in DIGESTS
134
134
135 class digester(object):
135 class digester(object):
136 """helper to compute digests.
136 """helper to compute digests.
137
137
138 This helper can be used to compute one or more digests given their name.
138 This helper can be used to compute one or more digests given their name.
139
139
140 >>> d = digester(['md5', 'sha1'])
140 >>> d = digester(['md5', 'sha1'])
141 >>> d.update('foo')
141 >>> d.update('foo')
142 >>> [k for k in sorted(d)]
142 >>> [k for k in sorted(d)]
143 ['md5', 'sha1']
143 ['md5', 'sha1']
144 >>> d['md5']
144 >>> d['md5']
145 'acbd18db4cc2f85cedef654fccc4a4d8'
145 'acbd18db4cc2f85cedef654fccc4a4d8'
146 >>> d['sha1']
146 >>> d['sha1']
147 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
147 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
148 >>> digester.preferred(['md5', 'sha1'])
148 >>> digester.preferred(['md5', 'sha1'])
149 'sha1'
149 'sha1'
150 """
150 """
151
151
152 def __init__(self, digests, s=''):
152 def __init__(self, digests, s=''):
153 self._hashes = {}
153 self._hashes = {}
154 for k in digests:
154 for k in digests:
155 if k not in DIGESTS:
155 if k not in DIGESTS:
156 raise Abort(_('unknown digest type: %s') % k)
156 raise Abort(_('unknown digest type: %s') % k)
157 self._hashes[k] = DIGESTS[k]()
157 self._hashes[k] = DIGESTS[k]()
158 if s:
158 if s:
159 self.update(s)
159 self.update(s)
160
160
161 def update(self, data):
161 def update(self, data):
162 for h in self._hashes.values():
162 for h in self._hashes.values():
163 h.update(data)
163 h.update(data)
164
164
165 def __getitem__(self, key):
165 def __getitem__(self, key):
166 if key not in DIGESTS:
166 if key not in DIGESTS:
167 raise Abort(_('unknown digest type: %s') % k)
167 raise Abort(_('unknown digest type: %s') % k)
168 return self._hashes[key].hexdigest()
168 return self._hashes[key].hexdigest()
169
169
170 def __iter__(self):
170 def __iter__(self):
171 return iter(self._hashes)
171 return iter(self._hashes)
172
172
173 @staticmethod
173 @staticmethod
174 def preferred(supported):
174 def preferred(supported):
175 """returns the strongest digest type in both supported and DIGESTS."""
175 """returns the strongest digest type in both supported and DIGESTS."""
176
176
177 for k in DIGESTS_BY_STRENGTH:
177 for k in DIGESTS_BY_STRENGTH:
178 if k in supported:
178 if k in supported:
179 return k
179 return k
180 return None
180 return None
181
181
182 class digestchecker(object):
182 class digestchecker(object):
183 """file handle wrapper that additionally checks content against a given
183 """file handle wrapper that additionally checks content against a given
184 size and digests.
184 size and digests.
185
185
186 d = digestchecker(fh, size, {'md5': '...'})
186 d = digestchecker(fh, size, {'md5': '...'})
187
187
188 When multiple digests are given, all of them are validated.
188 When multiple digests are given, all of them are validated.
189 """
189 """
190
190
191 def __init__(self, fh, size, digests):
191 def __init__(self, fh, size, digests):
192 self._fh = fh
192 self._fh = fh
193 self._size = size
193 self._size = size
194 self._got = 0
194 self._got = 0
195 self._digests = dict(digests)
195 self._digests = dict(digests)
196 self._digester = digester(self._digests.keys())
196 self._digester = digester(self._digests.keys())
197
197
198 def read(self, length=-1):
198 def read(self, length=-1):
199 content = self._fh.read(length)
199 content = self._fh.read(length)
200 self._digester.update(content)
200 self._digester.update(content)
201 self._got += len(content)
201 self._got += len(content)
202 return content
202 return content
203
203
204 def validate(self):
204 def validate(self):
205 if self._size != self._got:
205 if self._size != self._got:
206 raise Abort(_('size mismatch: expected %d, got %d') %
206 raise Abort(_('size mismatch: expected %d, got %d') %
207 (self._size, self._got))
207 (self._size, self._got))
208 for k, v in self._digests.items():
208 for k, v in self._digests.items():
209 if v != self._digester[k]:
209 if v != self._digester[k]:
210 # i18n: first parameter is a digest name
210 # i18n: first parameter is a digest name
211 raise Abort(_('%s mismatch: expected %s, got %s') %
211 raise Abort(_('%s mismatch: expected %s, got %s') %
212 (k, v, self._digester[k]))
212 (k, v, self._digester[k]))
213
213
214 try:
214 try:
215 buffer = buffer
215 buffer = buffer
216 except NameError:
216 except NameError:
217 if sys.version_info[0] < 3:
217 if sys.version_info[0] < 3:
218 def buffer(sliceable, offset=0):
218 def buffer(sliceable, offset=0):
219 return sliceable[offset:]
219 return sliceable[offset:]
220 else:
220 else:
221 def buffer(sliceable, offset=0):
221 def buffer(sliceable, offset=0):
222 return memoryview(sliceable)[offset:]
222 return memoryview(sliceable)[offset:]
223
223
224 closefds = os.name == 'posix'
224 closefds = os.name == 'posix'
225
225
226 _chunksize = 4096
226 _chunksize = 4096
227
227
228 class bufferedinputpipe(object):
228 class bufferedinputpipe(object):
229 """a manually buffered input pipe
229 """a manually buffered input pipe
230
230
231 Python will not let us use buffered IO and lazy reading with 'polling' at
231 Python will not let us use buffered IO and lazy reading with 'polling' at
232 the same time. We cannot probe the buffer state and select will not detect
232 the same time. We cannot probe the buffer state and select will not detect
233 that data are ready to read if they are already buffered.
233 that data are ready to read if they are already buffered.
234
234
235 This class let us work around that by implementing its own buffering
235 This class let us work around that by implementing its own buffering
236 (allowing efficient readline) while offering a way to know if the buffer is
236 (allowing efficient readline) while offering a way to know if the buffer is
237 empty from the output (allowing collaboration of the buffer with polling).
237 empty from the output (allowing collaboration of the buffer with polling).
238
238
239 This class lives in the 'util' module because it makes use of the 'os'
239 This class lives in the 'util' module because it makes use of the 'os'
240 module from the python stdlib.
240 module from the python stdlib.
241 """
241 """
242
242
243 def __init__(self, input):
243 def __init__(self, input):
244 self._input = input
244 self._input = input
245 self._buffer = []
245 self._buffer = []
246 self._eof = False
246 self._eof = False
247 self._lenbuf = 0
247 self._lenbuf = 0
248
248
249 @property
249 @property
250 def hasbuffer(self):
250 def hasbuffer(self):
251 """True is any data is currently buffered
251 """True is any data is currently buffered
252
252
253 This will be used externally a pre-step for polling IO. If there is
253 This will be used externally a pre-step for polling IO. If there is
254 already data then no polling should be set in place."""
254 already data then no polling should be set in place."""
255 return bool(self._buffer)
255 return bool(self._buffer)
256
256
257 @property
257 @property
258 def closed(self):
258 def closed(self):
259 return self._input.closed
259 return self._input.closed
260
260
261 def fileno(self):
261 def fileno(self):
262 return self._input.fileno()
262 return self._input.fileno()
263
263
264 def close(self):
264 def close(self):
265 return self._input.close()
265 return self._input.close()
266
266
267 def read(self, size):
267 def read(self, size):
268 while (not self._eof) and (self._lenbuf < size):
268 while (not self._eof) and (self._lenbuf < size):
269 self._fillbuffer()
269 self._fillbuffer()
270 return self._frombuffer(size)
270 return self._frombuffer(size)
271
271
272 def readline(self, *args, **kwargs):
272 def readline(self, *args, **kwargs):
273 if 1 < len(self._buffer):
273 if 1 < len(self._buffer):
274 # this should not happen because both read and readline end with a
274 # this should not happen because both read and readline end with a
275 # _frombuffer call that collapse it.
275 # _frombuffer call that collapse it.
276 self._buffer = [''.join(self._buffer)]
276 self._buffer = [''.join(self._buffer)]
277 self._lenbuf = len(self._buffer[0])
277 self._lenbuf = len(self._buffer[0])
278 lfi = -1
278 lfi = -1
279 if self._buffer:
279 if self._buffer:
280 lfi = self._buffer[-1].find('\n')
280 lfi = self._buffer[-1].find('\n')
281 while (not self._eof) and lfi < 0:
281 while (not self._eof) and lfi < 0:
282 self._fillbuffer()
282 self._fillbuffer()
283 if self._buffer:
283 if self._buffer:
284 lfi = self._buffer[-1].find('\n')
284 lfi = self._buffer[-1].find('\n')
285 size = lfi + 1
285 size = lfi + 1
286 if lfi < 0: # end of file
286 if lfi < 0: # end of file
287 size = self._lenbuf
287 size = self._lenbuf
288 elif 1 < len(self._buffer):
288 elif 1 < len(self._buffer):
289 # we need to take previous chunks into account
289 # we need to take previous chunks into account
290 size += self._lenbuf - len(self._buffer[-1])
290 size += self._lenbuf - len(self._buffer[-1])
291 return self._frombuffer(size)
291 return self._frombuffer(size)
292
292
293 def _frombuffer(self, size):
293 def _frombuffer(self, size):
294 """return at most 'size' data from the buffer
294 """return at most 'size' data from the buffer
295
295
296 The data are removed from the buffer."""
296 The data are removed from the buffer."""
297 if size == 0 or not self._buffer:
297 if size == 0 or not self._buffer:
298 return ''
298 return ''
299 buf = self._buffer[0]
299 buf = self._buffer[0]
300 if 1 < len(self._buffer):
300 if 1 < len(self._buffer):
301 buf = ''.join(self._buffer)
301 buf = ''.join(self._buffer)
302
302
303 data = buf[:size]
303 data = buf[:size]
304 buf = buf[len(data):]
304 buf = buf[len(data):]
305 if buf:
305 if buf:
306 self._buffer = [buf]
306 self._buffer = [buf]
307 self._lenbuf = len(buf)
307 self._lenbuf = len(buf)
308 else:
308 else:
309 self._buffer = []
309 self._buffer = []
310 self._lenbuf = 0
310 self._lenbuf = 0
311 return data
311 return data
312
312
313 def _fillbuffer(self):
313 def _fillbuffer(self):
314 """read data to the buffer"""
314 """read data to the buffer"""
315 data = os.read(self._input.fileno(), _chunksize)
315 data = os.read(self._input.fileno(), _chunksize)
316 if not data:
316 if not data:
317 self._eof = True
317 self._eof = True
318 else:
318 else:
319 self._lenbuf += len(data)
319 self._lenbuf += len(data)
320 self._buffer.append(data)
320 self._buffer.append(data)
321
321
322 def popen2(cmd, env=None, newlines=False):
322 def popen2(cmd, env=None, newlines=False):
323 # Setting bufsize to -1 lets the system decide the buffer size.
323 # Setting bufsize to -1 lets the system decide the buffer size.
324 # The default for bufsize is 0, meaning unbuffered. This leads to
324 # The default for bufsize is 0, meaning unbuffered. This leads to
325 # poor performance on Mac OS X: http://bugs.python.org/issue4194
325 # poor performance on Mac OS X: http://bugs.python.org/issue4194
326 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
326 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
327 close_fds=closefds,
327 close_fds=closefds,
328 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
328 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
329 universal_newlines=newlines,
329 universal_newlines=newlines,
330 env=env)
330 env=env)
331 return p.stdin, p.stdout
331 return p.stdin, p.stdout
332
332
333 def popen3(cmd, env=None, newlines=False):
333 def popen3(cmd, env=None, newlines=False):
334 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
334 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
335 return stdin, stdout, stderr
335 return stdin, stdout, stderr
336
336
337 def popen4(cmd, env=None, newlines=False, bufsize=-1):
337 def popen4(cmd, env=None, newlines=False, bufsize=-1):
338 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
338 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
339 close_fds=closefds,
339 close_fds=closefds,
340 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
340 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
341 stderr=subprocess.PIPE,
341 stderr=subprocess.PIPE,
342 universal_newlines=newlines,
342 universal_newlines=newlines,
343 env=env)
343 env=env)
344 return p.stdin, p.stdout, p.stderr, p
344 return p.stdin, p.stdout, p.stderr, p
345
345
346 def version():
346 def version():
347 """Return version information if available."""
347 """Return version information if available."""
348 try:
348 try:
349 from . import __version__
349 from . import __version__
350 return __version__.version
350 return __version__.version
351 except ImportError:
351 except ImportError:
352 return 'unknown'
352 return 'unknown'
353
353
354 def versiontuple(v=None, n=4):
354 def versiontuple(v=None, n=4):
355 """Parses a Mercurial version string into an N-tuple.
355 """Parses a Mercurial version string into an N-tuple.
356
356
357 The version string to be parsed is specified with the ``v`` argument.
357 The version string to be parsed is specified with the ``v`` argument.
358 If it isn't defined, the current Mercurial version string will be parsed.
358 If it isn't defined, the current Mercurial version string will be parsed.
359
359
360 ``n`` can be 2, 3, or 4. Here is how some version strings map to
360 ``n`` can be 2, 3, or 4. Here is how some version strings map to
361 returned values:
361 returned values:
362
362
363 >>> v = '3.6.1+190-df9b73d2d444'
363 >>> v = '3.6.1+190-df9b73d2d444'
364 >>> versiontuple(v, 2)
364 >>> versiontuple(v, 2)
365 (3, 6)
365 (3, 6)
366 >>> versiontuple(v, 3)
366 >>> versiontuple(v, 3)
367 (3, 6, 1)
367 (3, 6, 1)
368 >>> versiontuple(v, 4)
368 >>> versiontuple(v, 4)
369 (3, 6, 1, '190-df9b73d2d444')
369 (3, 6, 1, '190-df9b73d2d444')
370
370
371 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
371 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
372 (3, 6, 1, '190-df9b73d2d444+20151118')
372 (3, 6, 1, '190-df9b73d2d444+20151118')
373
373
374 >>> v = '3.6'
374 >>> v = '3.6'
375 >>> versiontuple(v, 2)
375 >>> versiontuple(v, 2)
376 (3, 6)
376 (3, 6)
377 >>> versiontuple(v, 3)
377 >>> versiontuple(v, 3)
378 (3, 6, None)
378 (3, 6, None)
379 >>> versiontuple(v, 4)
379 >>> versiontuple(v, 4)
380 (3, 6, None, None)
380 (3, 6, None, None)
381 """
381 """
382 if not v:
382 if not v:
383 v = version()
383 v = version()
384 parts = v.split('+', 1)
384 parts = v.split('+', 1)
385 if len(parts) == 1:
385 if len(parts) == 1:
386 vparts, extra = parts[0], None
386 vparts, extra = parts[0], None
387 else:
387 else:
388 vparts, extra = parts
388 vparts, extra = parts
389
389
390 vints = []
390 vints = []
391 for i in vparts.split('.'):
391 for i in vparts.split('.'):
392 try:
392 try:
393 vints.append(int(i))
393 vints.append(int(i))
394 except ValueError:
394 except ValueError:
395 break
395 break
396 # (3, 6) -> (3, 6, None)
396 # (3, 6) -> (3, 6, None)
397 while len(vints) < 3:
397 while len(vints) < 3:
398 vints.append(None)
398 vints.append(None)
399
399
400 if n == 2:
400 if n == 2:
401 return (vints[0], vints[1])
401 return (vints[0], vints[1])
402 if n == 3:
402 if n == 3:
403 return (vints[0], vints[1], vints[2])
403 return (vints[0], vints[1], vints[2])
404 if n == 4:
404 if n == 4:
405 return (vints[0], vints[1], vints[2], extra)
405 return (vints[0], vints[1], vints[2], extra)
406
406
407 # used by parsedate
407 # used by parsedate
408 defaultdateformats = (
408 defaultdateformats = (
409 '%Y-%m-%d %H:%M:%S',
409 '%Y-%m-%d %H:%M:%S',
410 '%Y-%m-%d %I:%M:%S%p',
410 '%Y-%m-%d %I:%M:%S%p',
411 '%Y-%m-%d %H:%M',
411 '%Y-%m-%d %H:%M',
412 '%Y-%m-%d %I:%M%p',
412 '%Y-%m-%d %I:%M%p',
413 '%Y-%m-%d',
413 '%Y-%m-%d',
414 '%m-%d',
414 '%m-%d',
415 '%m/%d',
415 '%m/%d',
416 '%m/%d/%y',
416 '%m/%d/%y',
417 '%m/%d/%Y',
417 '%m/%d/%Y',
418 '%a %b %d %H:%M:%S %Y',
418 '%a %b %d %H:%M:%S %Y',
419 '%a %b %d %I:%M:%S%p %Y',
419 '%a %b %d %I:%M:%S%p %Y',
420 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
420 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
421 '%b %d %H:%M:%S %Y',
421 '%b %d %H:%M:%S %Y',
422 '%b %d %I:%M:%S%p %Y',
422 '%b %d %I:%M:%S%p %Y',
423 '%b %d %H:%M:%S',
423 '%b %d %H:%M:%S',
424 '%b %d %I:%M:%S%p',
424 '%b %d %I:%M:%S%p',
425 '%b %d %H:%M',
425 '%b %d %H:%M',
426 '%b %d %I:%M%p',
426 '%b %d %I:%M%p',
427 '%b %d %Y',
427 '%b %d %Y',
428 '%b %d',
428 '%b %d',
429 '%H:%M:%S',
429 '%H:%M:%S',
430 '%I:%M:%S%p',
430 '%I:%M:%S%p',
431 '%H:%M',
431 '%H:%M',
432 '%I:%M%p',
432 '%I:%M%p',
433 )
433 )
434
434
435 extendeddateformats = defaultdateformats + (
435 extendeddateformats = defaultdateformats + (
436 "%Y",
436 "%Y",
437 "%Y-%m",
437 "%Y-%m",
438 "%b",
438 "%b",
439 "%b %Y",
439 "%b %Y",
440 )
440 )
441
441
442 def cachefunc(func):
442 def cachefunc(func):
443 '''cache the result of function calls'''
443 '''cache the result of function calls'''
444 # XXX doesn't handle keywords args
444 # XXX doesn't handle keywords args
445 if func.func_code.co_argcount == 0:
445 if func.func_code.co_argcount == 0:
446 cache = []
446 cache = []
447 def f():
447 def f():
448 if len(cache) == 0:
448 if len(cache) == 0:
449 cache.append(func())
449 cache.append(func())
450 return cache[0]
450 return cache[0]
451 return f
451 return f
452 cache = {}
452 cache = {}
453 if func.func_code.co_argcount == 1:
453 if func.func_code.co_argcount == 1:
454 # we gain a small amount of time because
454 # we gain a small amount of time because
455 # we don't need to pack/unpack the list
455 # we don't need to pack/unpack the list
456 def f(arg):
456 def f(arg):
457 if arg not in cache:
457 if arg not in cache:
458 cache[arg] = func(arg)
458 cache[arg] = func(arg)
459 return cache[arg]
459 return cache[arg]
460 else:
460 else:
461 def f(*args):
461 def f(*args):
462 if args not in cache:
462 if args not in cache:
463 cache[args] = func(*args)
463 cache[args] = func(*args)
464 return cache[args]
464 return cache[args]
465
465
466 return f
466 return f
467
467
468 class sortdict(dict):
468 class sortdict(dict):
469 '''a simple sorted dictionary'''
469 '''a simple sorted dictionary'''
470 def __init__(self, data=None):
470 def __init__(self, data=None):
471 self._list = []
471 self._list = []
472 if data:
472 if data:
473 self.update(data)
473 self.update(data)
474 def copy(self):
474 def copy(self):
475 return sortdict(self)
475 return sortdict(self)
476 def __setitem__(self, key, val):
476 def __setitem__(self, key, val):
477 if key in self:
477 if key in self:
478 self._list.remove(key)
478 self._list.remove(key)
479 self._list.append(key)
479 self._list.append(key)
480 dict.__setitem__(self, key, val)
480 dict.__setitem__(self, key, val)
481 def __iter__(self):
481 def __iter__(self):
482 return self._list.__iter__()
482 return self._list.__iter__()
483 def update(self, src):
483 def update(self, src):
484 if isinstance(src, dict):
484 if isinstance(src, dict):
485 src = src.iteritems()
485 src = src.iteritems()
486 for k, v in src:
486 for k, v in src:
487 self[k] = v
487 self[k] = v
488 def clear(self):
488 def clear(self):
489 dict.clear(self)
489 dict.clear(self)
490 self._list = []
490 self._list = []
491 def items(self):
491 def items(self):
492 return [(k, self[k]) for k in self._list]
492 return [(k, self[k]) for k in self._list]
493 def __delitem__(self, key):
493 def __delitem__(self, key):
494 dict.__delitem__(self, key)
494 dict.__delitem__(self, key)
495 self._list.remove(key)
495 self._list.remove(key)
496 def pop(self, key, *args, **kwargs):
496 def pop(self, key, *args, **kwargs):
497 dict.pop(self, key, *args, **kwargs)
497 dict.pop(self, key, *args, **kwargs)
498 try:
498 try:
499 self._list.remove(key)
499 self._list.remove(key)
500 except ValueError:
500 except ValueError:
501 pass
501 pass
502 def keys(self):
502 def keys(self):
503 return self._list
503 return self._list
504 def iterkeys(self):
504 def iterkeys(self):
505 return self._list.__iter__()
505 return self._list.__iter__()
506 def iteritems(self):
506 def iteritems(self):
507 for k in self._list:
507 for k in self._list:
508 yield k, self[k]
508 yield k, self[k]
509 def insert(self, index, key, val):
509 def insert(self, index, key, val):
510 self._list.insert(index, key)
510 self._list.insert(index, key)
511 dict.__setitem__(self, key, val)
511 dict.__setitem__(self, key, val)
512
512
513 class _lrucachenode(object):
513 class _lrucachenode(object):
514 """A node in a doubly linked list.
514 """A node in a doubly linked list.
515
515
516 Holds a reference to nodes on either side as well as a key-value
516 Holds a reference to nodes on either side as well as a key-value
517 pair for the dictionary entry.
517 pair for the dictionary entry.
518 """
518 """
519 __slots__ = ('next', 'prev', 'key', 'value')
519 __slots__ = ('next', 'prev', 'key', 'value')
520
520
521 def __init__(self):
521 def __init__(self):
522 self.next = None
522 self.next = None
523 self.prev = None
523 self.prev = None
524
524
525 self.key = _notset
525 self.key = _notset
526 self.value = None
526 self.value = None
527
527
528 def markempty(self):
528 def markempty(self):
529 """Mark the node as emptied."""
529 """Mark the node as emptied."""
530 self.key = _notset
530 self.key = _notset
531
531
532 class lrucachedict(object):
532 class lrucachedict(object):
533 """Dict that caches most recent accesses and sets.
533 """Dict that caches most recent accesses and sets.
534
534
535 The dict consists of an actual backing dict - indexed by original
535 The dict consists of an actual backing dict - indexed by original
536 key - and a doubly linked circular list defining the order of entries in
536 key - and a doubly linked circular list defining the order of entries in
537 the cache.
537 the cache.
538
538
539 The head node is the newest entry in the cache. If the cache is full,
539 The head node is the newest entry in the cache. If the cache is full,
540 we recycle head.prev and make it the new head. Cache accesses result in
540 we recycle head.prev and make it the new head. Cache accesses result in
541 the node being moved to before the existing head and being marked as the
541 the node being moved to before the existing head and being marked as the
542 new head node.
542 new head node.
543 """
543 """
544 def __init__(self, max):
544 def __init__(self, max):
545 self._cache = {}
545 self._cache = {}
546
546
547 self._head = head = _lrucachenode()
547 self._head = head = _lrucachenode()
548 head.prev = head
548 head.prev = head
549 head.next = head
549 head.next = head
550 self._size = 1
550 self._size = 1
551 self._capacity = max
551 self._capacity = max
552
552
553 def __len__(self):
553 def __len__(self):
554 return len(self._cache)
554 return len(self._cache)
555
555
556 def __contains__(self, k):
556 def __contains__(self, k):
557 return k in self._cache
557 return k in self._cache
558
558
559 def __iter__(self):
559 def __iter__(self):
560 # We don't have to iterate in cache order, but why not.
560 # We don't have to iterate in cache order, but why not.
561 n = self._head
561 n = self._head
562 for i in range(len(self._cache)):
562 for i in range(len(self._cache)):
563 yield n.key
563 yield n.key
564 n = n.next
564 n = n.next
565
565
566 def __getitem__(self, k):
566 def __getitem__(self, k):
567 node = self._cache[k]
567 node = self._cache[k]
568 self._movetohead(node)
568 self._movetohead(node)
569 return node.value
569 return node.value
570
570
571 def __setitem__(self, k, v):
571 def __setitem__(self, k, v):
572 node = self._cache.get(k)
572 node = self._cache.get(k)
573 # Replace existing value and mark as newest.
573 # Replace existing value and mark as newest.
574 if node is not None:
574 if node is not None:
575 node.value = v
575 node.value = v
576 self._movetohead(node)
576 self._movetohead(node)
577 return
577 return
578
578
579 if self._size < self._capacity:
579 if self._size < self._capacity:
580 node = self._addcapacity()
580 node = self._addcapacity()
581 else:
581 else:
582 # Grab the last/oldest item.
582 # Grab the last/oldest item.
583 node = self._head.prev
583 node = self._head.prev
584
584
585 # At capacity. Kill the old entry.
585 # At capacity. Kill the old entry.
586 if node.key is not _notset:
586 if node.key is not _notset:
587 del self._cache[node.key]
587 del self._cache[node.key]
588
588
589 node.key = k
589 node.key = k
590 node.value = v
590 node.value = v
591 self._cache[k] = node
591 self._cache[k] = node
592 # And mark it as newest entry. No need to adjust order since it
592 # And mark it as newest entry. No need to adjust order since it
593 # is already self._head.prev.
593 # is already self._head.prev.
594 self._head = node
594 self._head = node
595
595
596 def __delitem__(self, k):
596 def __delitem__(self, k):
597 node = self._cache.pop(k)
597 node = self._cache.pop(k)
598 node.markempty()
598 node.markempty()
599
599
600 # Temporarily mark as newest item before re-adjusting head to make
600 # Temporarily mark as newest item before re-adjusting head to make
601 # this node the oldest item.
601 # this node the oldest item.
602 self._movetohead(node)
602 self._movetohead(node)
603 self._head = node.next
603 self._head = node.next
604
604
605 # Additional dict methods.
605 # Additional dict methods.
606
606
607 def get(self, k, default=None):
607 def get(self, k, default=None):
608 try:
608 try:
609 return self._cache[k]
609 return self._cache[k]
610 except KeyError:
610 except KeyError:
611 return default
611 return default
612
612
613 def clear(self):
613 def clear(self):
614 n = self._head
614 n = self._head
615 while n.key is not _notset:
615 while n.key is not _notset:
616 n.markempty()
616 n.markempty()
617 n = n.next
617 n = n.next
618
618
619 self._cache.clear()
619 self._cache.clear()
620
620
621 def copy(self):
621 def copy(self):
622 result = lrucachedict(self._capacity)
622 result = lrucachedict(self._capacity)
623 n = self._head.prev
623 n = self._head.prev
624 # Iterate in oldest-to-newest order, so the copy has the right ordering
624 # Iterate in oldest-to-newest order, so the copy has the right ordering
625 for i in range(len(self._cache)):
625 for i in range(len(self._cache)):
626 result[n.key] = n.value
626 result[n.key] = n.value
627 n = n.prev
627 n = n.prev
628 return result
628 return result
629
629
630 def _movetohead(self, node):
630 def _movetohead(self, node):
631 """Mark a node as the newest, making it the new head.
631 """Mark a node as the newest, making it the new head.
632
632
633 When a node is accessed, it becomes the freshest entry in the LRU
633 When a node is accessed, it becomes the freshest entry in the LRU
634 list, which is denoted by self._head.
634 list, which is denoted by self._head.
635
635
636 Visually, let's make ``N`` the new head node (* denotes head):
636 Visually, let's make ``N`` the new head node (* denotes head):
637
637
638 previous/oldest <-> head <-> next/next newest
638 previous/oldest <-> head <-> next/next newest
639
639
640 ----<->--- A* ---<->-----
640 ----<->--- A* ---<->-----
641 | |
641 | |
642 E <-> D <-> N <-> C <-> B
642 E <-> D <-> N <-> C <-> B
643
643
644 To:
644 To:
645
645
646 ----<->--- N* ---<->-----
646 ----<->--- N* ---<->-----
647 | |
647 | |
648 E <-> D <-> C <-> B <-> A
648 E <-> D <-> C <-> B <-> A
649
649
650 This requires the following moves:
650 This requires the following moves:
651
651
652 C.next = D (node.prev.next = node.next)
652 C.next = D (node.prev.next = node.next)
653 D.prev = C (node.next.prev = node.prev)
653 D.prev = C (node.next.prev = node.prev)
654 E.next = N (head.prev.next = node)
654 E.next = N (head.prev.next = node)
655 N.prev = E (node.prev = head.prev)
655 N.prev = E (node.prev = head.prev)
656 N.next = A (node.next = head)
656 N.next = A (node.next = head)
657 A.prev = N (head.prev = node)
657 A.prev = N (head.prev = node)
658 """
658 """
659 head = self._head
659 head = self._head
660 # C.next = D
660 # C.next = D
661 node.prev.next = node.next
661 node.prev.next = node.next
662 # D.prev = C
662 # D.prev = C
663 node.next.prev = node.prev
663 node.next.prev = node.prev
664 # N.prev = E
664 # N.prev = E
665 node.prev = head.prev
665 node.prev = head.prev
666 # N.next = A
666 # N.next = A
667 # It is tempting to do just "head" here, however if node is
667 # It is tempting to do just "head" here, however if node is
668 # adjacent to head, this will do bad things.
668 # adjacent to head, this will do bad things.
669 node.next = head.prev.next
669 node.next = head.prev.next
670 # E.next = N
670 # E.next = N
671 node.next.prev = node
671 node.next.prev = node
672 # A.prev = N
672 # A.prev = N
673 node.prev.next = node
673 node.prev.next = node
674
674
675 self._head = node
675 self._head = node
676
676
677 def _addcapacity(self):
677 def _addcapacity(self):
678 """Add a node to the circular linked list.
678 """Add a node to the circular linked list.
679
679
680 The new node is inserted before the head node.
680 The new node is inserted before the head node.
681 """
681 """
682 head = self._head
682 head = self._head
683 node = _lrucachenode()
683 node = _lrucachenode()
684 head.prev.next = node
684 head.prev.next = node
685 node.prev = head.prev
685 node.prev = head.prev
686 node.next = head
686 node.next = head
687 head.prev = node
687 head.prev = node
688 self._size += 1
688 self._size += 1
689 return node
689 return node
690
690
691 def lrucachefunc(func):
691 def lrucachefunc(func):
692 '''cache most recent results of function calls'''
692 '''cache most recent results of function calls'''
693 cache = {}
693 cache = {}
694 order = collections.deque()
694 order = collections.deque()
695 if func.func_code.co_argcount == 1:
695 if func.func_code.co_argcount == 1:
696 def f(arg):
696 def f(arg):
697 if arg not in cache:
697 if arg not in cache:
698 if len(cache) > 20:
698 if len(cache) > 20:
699 del cache[order.popleft()]
699 del cache[order.popleft()]
700 cache[arg] = func(arg)
700 cache[arg] = func(arg)
701 else:
701 else:
702 order.remove(arg)
702 order.remove(arg)
703 order.append(arg)
703 order.append(arg)
704 return cache[arg]
704 return cache[arg]
705 else:
705 else:
706 def f(*args):
706 def f(*args):
707 if args not in cache:
707 if args not in cache:
708 if len(cache) > 20:
708 if len(cache) > 20:
709 del cache[order.popleft()]
709 del cache[order.popleft()]
710 cache[args] = func(*args)
710 cache[args] = func(*args)
711 else:
711 else:
712 order.remove(args)
712 order.remove(args)
713 order.append(args)
713 order.append(args)
714 return cache[args]
714 return cache[args]
715
715
716 return f
716 return f
717
717
718 class propertycache(object):
718 class propertycache(object):
719 def __init__(self, func):
719 def __init__(self, func):
720 self.func = func
720 self.func = func
721 self.name = func.__name__
721 self.name = func.__name__
722 def __get__(self, obj, type=None):
722 def __get__(self, obj, type=None):
723 result = self.func(obj)
723 result = self.func(obj)
724 self.cachevalue(obj, result)
724 self.cachevalue(obj, result)
725 return result
725 return result
726
726
727 def cachevalue(self, obj, value):
727 def cachevalue(self, obj, value):
728 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
728 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
729 obj.__dict__[self.name] = value
729 obj.__dict__[self.name] = value
730
730
731 def pipefilter(s, cmd):
731 def pipefilter(s, cmd):
732 '''filter string S through command CMD, returning its output'''
732 '''filter string S through command CMD, returning its output'''
733 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
733 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
734 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
734 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
735 pout, perr = p.communicate(s)
735 pout, perr = p.communicate(s)
736 return pout
736 return pout
737
737
738 def tempfilter(s, cmd):
738 def tempfilter(s, cmd):
739 '''filter string S through a pair of temporary files with CMD.
739 '''filter string S through a pair of temporary files with CMD.
740 CMD is used as a template to create the real command to be run,
740 CMD is used as a template to create the real command to be run,
741 with the strings INFILE and OUTFILE replaced by the real names of
741 with the strings INFILE and OUTFILE replaced by the real names of
742 the temporary files generated.'''
742 the temporary files generated.'''
743 inname, outname = None, None
743 inname, outname = None, None
744 try:
744 try:
745 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
745 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
746 fp = os.fdopen(infd, 'wb')
746 fp = os.fdopen(infd, 'wb')
747 fp.write(s)
747 fp.write(s)
748 fp.close()
748 fp.close()
749 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
749 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
750 os.close(outfd)
750 os.close(outfd)
751 cmd = cmd.replace('INFILE', inname)
751 cmd = cmd.replace('INFILE', inname)
752 cmd = cmd.replace('OUTFILE', outname)
752 cmd = cmd.replace('OUTFILE', outname)
753 code = os.system(cmd)
753 code = os.system(cmd)
754 if sys.platform == 'OpenVMS' and code & 1:
754 if sys.platform == 'OpenVMS' and code & 1:
755 code = 0
755 code = 0
756 if code:
756 if code:
757 raise Abort(_("command '%s' failed: %s") %
757 raise Abort(_("command '%s' failed: %s") %
758 (cmd, explainexit(code)))
758 (cmd, explainexit(code)))
759 fp = open(outname, 'rb')
759 fp = open(outname, 'rb')
760 r = fp.read()
760 r = fp.read()
761 fp.close()
761 fp.close()
762 return r
762 return r
763 finally:
763 finally:
764 try:
764 try:
765 if inname:
765 if inname:
766 os.unlink(inname)
766 os.unlink(inname)
767 except OSError:
767 except OSError:
768 pass
768 pass
769 try:
769 try:
770 if outname:
770 if outname:
771 os.unlink(outname)
771 os.unlink(outname)
772 except OSError:
772 except OSError:
773 pass
773 pass
774
774
775 filtertable = {
775 filtertable = {
776 'tempfile:': tempfilter,
776 'tempfile:': tempfilter,
777 'pipe:': pipefilter,
777 'pipe:': pipefilter,
778 }
778 }
779
779
780 def filter(s, cmd):
780 def filter(s, cmd):
781 "filter a string through a command that transforms its input to its output"
781 "filter a string through a command that transforms its input to its output"
782 for name, fn in filtertable.iteritems():
782 for name, fn in filtertable.iteritems():
783 if cmd.startswith(name):
783 if cmd.startswith(name):
784 return fn(s, cmd[len(name):].lstrip())
784 return fn(s, cmd[len(name):].lstrip())
785 return pipefilter(s, cmd)
785 return pipefilter(s, cmd)
786
786
787 def binary(s):
787 def binary(s):
788 """return true if a string is binary data"""
788 """return true if a string is binary data"""
789 return bool(s and '\0' in s)
789 return bool(s and '\0' in s)
790
790
791 def increasingchunks(source, min=1024, max=65536):
791 def increasingchunks(source, min=1024, max=65536):
792 '''return no less than min bytes per chunk while data remains,
792 '''return no less than min bytes per chunk while data remains,
793 doubling min after each chunk until it reaches max'''
793 doubling min after each chunk until it reaches max'''
794 def log2(x):
794 def log2(x):
795 if not x:
795 if not x:
796 return 0
796 return 0
797 i = 0
797 i = 0
798 while x:
798 while x:
799 x >>= 1
799 x >>= 1
800 i += 1
800 i += 1
801 return i - 1
801 return i - 1
802
802
803 buf = []
803 buf = []
804 blen = 0
804 blen = 0
805 for chunk in source:
805 for chunk in source:
806 buf.append(chunk)
806 buf.append(chunk)
807 blen += len(chunk)
807 blen += len(chunk)
808 if blen >= min:
808 if blen >= min:
809 if min < max:
809 if min < max:
810 min = min << 1
810 min = min << 1
811 nmin = 1 << log2(blen)
811 nmin = 1 << log2(blen)
812 if nmin > min:
812 if nmin > min:
813 min = nmin
813 min = nmin
814 if min > max:
814 if min > max:
815 min = max
815 min = max
816 yield ''.join(buf)
816 yield ''.join(buf)
817 blen = 0
817 blen = 0
818 buf = []
818 buf = []
819 if buf:
819 if buf:
820 yield ''.join(buf)
820 yield ''.join(buf)
821
821
822 Abort = error.Abort
822 Abort = error.Abort
823
823
824 def always(fn):
824 def always(fn):
825 return True
825 return True
826
826
827 def never(fn):
827 def never(fn):
828 return False
828 return False
829
829
830 def nogc(func):
830 def nogc(func):
831 """disable garbage collector
831 """disable garbage collector
832
832
833 Python's garbage collector triggers a GC each time a certain number of
833 Python's garbage collector triggers a GC each time a certain number of
834 container objects (the number being defined by gc.get_threshold()) are
834 container objects (the number being defined by gc.get_threshold()) are
835 allocated even when marked not to be tracked by the collector. Tracking has
835 allocated even when marked not to be tracked by the collector. Tracking has
836 no effect on when GCs are triggered, only on what objects the GC looks
836 no effect on when GCs are triggered, only on what objects the GC looks
837 into. As a workaround, disable GC while building complex (huge)
837 into. As a workaround, disable GC while building complex (huge)
838 containers.
838 containers.
839
839
840 This garbage collector issue have been fixed in 2.7.
840 This garbage collector issue have been fixed in 2.7.
841 """
841 """
842 def wrapper(*args, **kwargs):
842 def wrapper(*args, **kwargs):
843 gcenabled = gc.isenabled()
843 gcenabled = gc.isenabled()
844 gc.disable()
844 gc.disable()
845 try:
845 try:
846 return func(*args, **kwargs)
846 return func(*args, **kwargs)
847 finally:
847 finally:
848 if gcenabled:
848 if gcenabled:
849 gc.enable()
849 gc.enable()
850 return wrapper
850 return wrapper
851
851
852 def pathto(root, n1, n2):
852 def pathto(root, n1, n2):
853 '''return the relative path from one place to another.
853 '''return the relative path from one place to another.
854 root should use os.sep to separate directories
854 root should use os.sep to separate directories
855 n1 should use os.sep to separate directories
855 n1 should use os.sep to separate directories
856 n2 should use "/" to separate directories
856 n2 should use "/" to separate directories
857 returns an os.sep-separated path.
857 returns an os.sep-separated path.
858
858
859 If n1 is a relative path, it's assumed it's
859 If n1 is a relative path, it's assumed it's
860 relative to root.
860 relative to root.
861 n2 should always be relative to root.
861 n2 should always be relative to root.
862 '''
862 '''
863 if not n1:
863 if not n1:
864 return localpath(n2)
864 return localpath(n2)
865 if os.path.isabs(n1):
865 if os.path.isabs(n1):
866 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
866 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
867 return os.path.join(root, localpath(n2))
867 return os.path.join(root, localpath(n2))
868 n2 = '/'.join((pconvert(root), n2))
868 n2 = '/'.join((pconvert(root), n2))
869 a, b = splitpath(n1), n2.split('/')
869 a, b = splitpath(n1), n2.split('/')
870 a.reverse()
870 a.reverse()
871 b.reverse()
871 b.reverse()
872 while a and b and a[-1] == b[-1]:
872 while a and b and a[-1] == b[-1]:
873 a.pop()
873 a.pop()
874 b.pop()
874 b.pop()
875 b.reverse()
875 b.reverse()
876 return os.sep.join((['..'] * len(a)) + b) or '.'
876 return os.sep.join((['..'] * len(a)) + b) or '.'
877
877
878 def mainfrozen():
878 def mainfrozen():
879 """return True if we are a frozen executable.
879 """return True if we are a frozen executable.
880
880
881 The code supports py2exe (most common, Windows only) and tools/freeze
881 The code supports py2exe (most common, Windows only) and tools/freeze
882 (portable, not much used).
882 (portable, not much used).
883 """
883 """
884 return (safehasattr(sys, "frozen") or # new py2exe
884 return (safehasattr(sys, "frozen") or # new py2exe
885 safehasattr(sys, "importers") or # old py2exe
885 safehasattr(sys, "importers") or # old py2exe
886 imp.is_frozen("__main__")) # tools/freeze
886 imp.is_frozen("__main__")) # tools/freeze
887
887
888 # the location of data files matching the source code
888 # the location of data files matching the source code
889 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
889 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
890 # executable version (py2exe) doesn't support __file__
890 # executable version (py2exe) doesn't support __file__
891 datapath = os.path.dirname(sys.executable)
891 datapath = os.path.dirname(sys.executable)
892 else:
892 else:
893 datapath = os.path.dirname(__file__)
893 datapath = os.path.dirname(__file__)
894
894
895 i18n.setdatapath(datapath)
895 i18n.setdatapath(datapath)
896
896
897 _hgexecutable = None
897 _hgexecutable = None
898
898
899 def hgexecutable():
899 def hgexecutable():
900 """return location of the 'hg' executable.
900 """return location of the 'hg' executable.
901
901
902 Defaults to $HG or 'hg' in the search path.
902 Defaults to $HG or 'hg' in the search path.
903 """
903 """
904 if _hgexecutable is None:
904 if _hgexecutable is None:
905 hg = os.environ.get('HG')
905 hg = os.environ.get('HG')
906 mainmod = sys.modules['__main__']
906 mainmod = sys.modules['__main__']
907 if hg:
907 if hg:
908 _sethgexecutable(hg)
908 _sethgexecutable(hg)
909 elif mainfrozen():
909 elif mainfrozen():
910 if getattr(sys, 'frozen', None) == 'macosx_app':
910 if getattr(sys, 'frozen', None) == 'macosx_app':
911 # Env variable set by py2app
911 # Env variable set by py2app
912 _sethgexecutable(os.environ['EXECUTABLEPATH'])
912 _sethgexecutable(os.environ['EXECUTABLEPATH'])
913 else:
913 else:
914 _sethgexecutable(sys.executable)
914 _sethgexecutable(sys.executable)
915 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
915 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
916 _sethgexecutable(mainmod.__file__)
916 _sethgexecutable(mainmod.__file__)
917 else:
917 else:
918 exe = findexe('hg') or os.path.basename(sys.argv[0])
918 exe = findexe('hg') or os.path.basename(sys.argv[0])
919 _sethgexecutable(exe)
919 _sethgexecutable(exe)
920 return _hgexecutable
920 return _hgexecutable
921
921
922 def _sethgexecutable(path):
922 def _sethgexecutable(path):
923 """set location of the 'hg' executable"""
923 """set location of the 'hg' executable"""
924 global _hgexecutable
924 global _hgexecutable
925 _hgexecutable = path
925 _hgexecutable = path
926
926
927 def _isstdout(f):
927 def _isstdout(f):
928 fileno = getattr(f, 'fileno', None)
928 fileno = getattr(f, 'fileno', None)
929 return fileno and fileno() == sys.__stdout__.fileno()
929 return fileno and fileno() == sys.__stdout__.fileno()
930
930
931 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
931 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
932 '''enhanced shell command execution.
932 '''enhanced shell command execution.
933 run with environment maybe modified, maybe in different dir.
933 run with environment maybe modified, maybe in different dir.
934
934
935 if command fails and onerr is None, return status, else raise onerr
935 if command fails and onerr is None, return status, else raise onerr
936 object as exception.
936 object as exception.
937
937
938 if out is specified, it is assumed to be a file-like object that has a
938 if out is specified, it is assumed to be a file-like object that has a
939 write() method. stdout and stderr will be redirected to out.'''
939 write() method. stdout and stderr will be redirected to out.'''
940 if environ is None:
940 if environ is None:
941 environ = {}
941 environ = {}
942 try:
942 try:
943 sys.stdout.flush()
943 sys.stdout.flush()
944 except Exception:
944 except Exception:
945 pass
945 pass
946 def py2shell(val):
946 def py2shell(val):
947 'convert python object into string that is useful to shell'
947 'convert python object into string that is useful to shell'
948 if val is None or val is False:
948 if val is None or val is False:
949 return '0'
949 return '0'
950 if val is True:
950 if val is True:
951 return '1'
951 return '1'
952 return str(val)
952 return str(val)
953 origcmd = cmd
953 origcmd = cmd
954 cmd = quotecommand(cmd)
954 cmd = quotecommand(cmd)
955 if sys.platform == 'plan9' and (sys.version_info[0] == 2
955 if sys.platform == 'plan9' and (sys.version_info[0] == 2
956 and sys.version_info[1] < 7):
956 and sys.version_info[1] < 7):
957 # subprocess kludge to work around issues in half-baked Python
957 # subprocess kludge to work around issues in half-baked Python
958 # ports, notably bichued/python:
958 # ports, notably bichued/python:
959 if not cwd is None:
959 if not cwd is None:
960 os.chdir(cwd)
960 os.chdir(cwd)
961 rc = os.system(cmd)
961 rc = os.system(cmd)
962 else:
962 else:
963 env = dict(os.environ)
963 env = dict(os.environ)
964 env.update((k, py2shell(v)) for k, v in environ.iteritems())
964 env.update((k, py2shell(v)) for k, v in environ.iteritems())
965 env['HG'] = hgexecutable()
965 env['HG'] = hgexecutable()
966 if out is None or _isstdout(out):
966 if out is None or _isstdout(out):
967 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
967 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
968 env=env, cwd=cwd)
968 env=env, cwd=cwd)
969 else:
969 else:
970 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
970 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
971 env=env, cwd=cwd, stdout=subprocess.PIPE,
971 env=env, cwd=cwd, stdout=subprocess.PIPE,
972 stderr=subprocess.STDOUT)
972 stderr=subprocess.STDOUT)
973 while True:
973 while True:
974 line = proc.stdout.readline()
974 line = proc.stdout.readline()
975 if not line:
975 if not line:
976 break
976 break
977 out.write(line)
977 out.write(line)
978 proc.wait()
978 proc.wait()
979 rc = proc.returncode
979 rc = proc.returncode
980 if sys.platform == 'OpenVMS' and rc & 1:
980 if sys.platform == 'OpenVMS' and rc & 1:
981 rc = 0
981 rc = 0
982 if rc and onerr:
982 if rc and onerr:
983 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
983 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
984 explainexit(rc)[0])
984 explainexit(rc)[0])
985 if errprefix:
985 if errprefix:
986 errmsg = '%s: %s' % (errprefix, errmsg)
986 errmsg = '%s: %s' % (errprefix, errmsg)
987 raise onerr(errmsg)
987 raise onerr(errmsg)
988 return rc
988 return rc
989
989
990 def checksignature(func):
990 def checksignature(func):
991 '''wrap a function with code to check for calling errors'''
991 '''wrap a function with code to check for calling errors'''
992 def check(*args, **kwargs):
992 def check(*args, **kwargs):
993 try:
993 try:
994 return func(*args, **kwargs)
994 return func(*args, **kwargs)
995 except TypeError:
995 except TypeError:
996 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
996 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
997 raise error.SignatureError
997 raise error.SignatureError
998 raise
998 raise
999
999
1000 return check
1000 return check
1001
1001
1002 def copyfile(src, dest, hardlink=False, copystat=False):
1002 def copyfile(src, dest, hardlink=False, copystat=False):
1003 '''copy a file, preserving mode and optionally other stat info like
1003 '''copy a file, preserving mode and optionally other stat info like
1004 atime/mtime'''
1004 atime/mtime'''
1005 if os.path.lexists(dest):
1005 if os.path.lexists(dest):
1006 unlink(dest)
1006 unlink(dest)
1007 # hardlinks are problematic on CIFS, quietly ignore this flag
1007 # hardlinks are problematic on CIFS, quietly ignore this flag
1008 # until we find a way to work around it cleanly (issue4546)
1008 # until we find a way to work around it cleanly (issue4546)
1009 if False and hardlink:
1009 if False and hardlink:
1010 try:
1010 try:
1011 oslink(src, dest)
1011 oslink(src, dest)
1012 return
1012 return
1013 except (IOError, OSError):
1013 except (IOError, OSError):
1014 pass # fall back to normal copy
1014 pass # fall back to normal copy
1015 if os.path.islink(src):
1015 if os.path.islink(src):
1016 os.symlink(os.readlink(src), dest)
1016 os.symlink(os.readlink(src), dest)
1017 # copytime is ignored for symlinks, but in general copytime isn't needed
1017 # copytime is ignored for symlinks, but in general copytime isn't needed
1018 # for them anyway
1018 # for them anyway
1019 else:
1019 else:
1020 try:
1020 try:
1021 shutil.copyfile(src, dest)
1021 shutil.copyfile(src, dest)
1022 if copystat:
1022 if copystat:
1023 # copystat also copies mode
1023 # copystat also copies mode
1024 shutil.copystat(src, dest)
1024 shutil.copystat(src, dest)
1025 else:
1025 else:
1026 shutil.copymode(src, dest)
1026 shutil.copymode(src, dest)
1027 except shutil.Error as inst:
1027 except shutil.Error as inst:
1028 raise Abort(str(inst))
1028 raise Abort(str(inst))
1029
1029
1030 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1030 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1031 """Copy a directory tree using hardlinks if possible."""
1031 """Copy a directory tree using hardlinks if possible."""
1032 num = 0
1032 num = 0
1033
1033
1034 if hardlink is None:
1034 if hardlink is None:
1035 hardlink = (os.stat(src).st_dev ==
1035 hardlink = (os.stat(src).st_dev ==
1036 os.stat(os.path.dirname(dst)).st_dev)
1036 os.stat(os.path.dirname(dst)).st_dev)
1037 if hardlink:
1037 if hardlink:
1038 topic = _('linking')
1038 topic = _('linking')
1039 else:
1039 else:
1040 topic = _('copying')
1040 topic = _('copying')
1041
1041
1042 if os.path.isdir(src):
1042 if os.path.isdir(src):
1043 os.mkdir(dst)
1043 os.mkdir(dst)
1044 for name, kind in osutil.listdir(src):
1044 for name, kind in osutil.listdir(src):
1045 srcname = os.path.join(src, name)
1045 srcname = os.path.join(src, name)
1046 dstname = os.path.join(dst, name)
1046 dstname = os.path.join(dst, name)
1047 def nprog(t, pos):
1047 def nprog(t, pos):
1048 if pos is not None:
1048 if pos is not None:
1049 return progress(t, pos + num)
1049 return progress(t, pos + num)
1050 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1050 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1051 num += n
1051 num += n
1052 else:
1052 else:
1053 if hardlink:
1053 if hardlink:
1054 try:
1054 try:
1055 oslink(src, dst)
1055 oslink(src, dst)
1056 except (IOError, OSError):
1056 except (IOError, OSError):
1057 hardlink = False
1057 hardlink = False
1058 shutil.copy(src, dst)
1058 shutil.copy(src, dst)
1059 else:
1059 else:
1060 shutil.copy(src, dst)
1060 shutil.copy(src, dst)
1061 num += 1
1061 num += 1
1062 progress(topic, num)
1062 progress(topic, num)
1063 progress(topic, None)
1063 progress(topic, None)
1064
1064
1065 return hardlink, num
1065 return hardlink, num
1066
1066
1067 _winreservednames = '''con prn aux nul
1067 _winreservednames = '''con prn aux nul
1068 com1 com2 com3 com4 com5 com6 com7 com8 com9
1068 com1 com2 com3 com4 com5 com6 com7 com8 com9
1069 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1069 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1070 _winreservedchars = ':*?"<>|'
1070 _winreservedchars = ':*?"<>|'
1071 def checkwinfilename(path):
1071 def checkwinfilename(path):
1072 r'''Check that the base-relative path is a valid filename on Windows.
1072 r'''Check that the base-relative path is a valid filename on Windows.
1073 Returns None if the path is ok, or a UI string describing the problem.
1073 Returns None if the path is ok, or a UI string describing the problem.
1074
1074
1075 >>> checkwinfilename("just/a/normal/path")
1075 >>> checkwinfilename("just/a/normal/path")
1076 >>> checkwinfilename("foo/bar/con.xml")
1076 >>> checkwinfilename("foo/bar/con.xml")
1077 "filename contains 'con', which is reserved on Windows"
1077 "filename contains 'con', which is reserved on Windows"
1078 >>> checkwinfilename("foo/con.xml/bar")
1078 >>> checkwinfilename("foo/con.xml/bar")
1079 "filename contains 'con', which is reserved on Windows"
1079 "filename contains 'con', which is reserved on Windows"
1080 >>> checkwinfilename("foo/bar/xml.con")
1080 >>> checkwinfilename("foo/bar/xml.con")
1081 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1081 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1082 "filename contains 'AUX', which is reserved on Windows"
1082 "filename contains 'AUX', which is reserved on Windows"
1083 >>> checkwinfilename("foo/bar/bla:.txt")
1083 >>> checkwinfilename("foo/bar/bla:.txt")
1084 "filename contains ':', which is reserved on Windows"
1084 "filename contains ':', which is reserved on Windows"
1085 >>> checkwinfilename("foo/bar/b\07la.txt")
1085 >>> checkwinfilename("foo/bar/b\07la.txt")
1086 "filename contains '\\x07', which is invalid on Windows"
1086 "filename contains '\\x07', which is invalid on Windows"
1087 >>> checkwinfilename("foo/bar/bla ")
1087 >>> checkwinfilename("foo/bar/bla ")
1088 "filename ends with ' ', which is not allowed on Windows"
1088 "filename ends with ' ', which is not allowed on Windows"
1089 >>> checkwinfilename("../bar")
1089 >>> checkwinfilename("../bar")
1090 >>> checkwinfilename("foo\\")
1090 >>> checkwinfilename("foo\\")
1091 "filename ends with '\\', which is invalid on Windows"
1091 "filename ends with '\\', which is invalid on Windows"
1092 >>> checkwinfilename("foo\\/bar")
1092 >>> checkwinfilename("foo\\/bar")
1093 "directory name ends with '\\', which is invalid on Windows"
1093 "directory name ends with '\\', which is invalid on Windows"
1094 '''
1094 '''
1095 if path.endswith('\\'):
1095 if path.endswith('\\'):
1096 return _("filename ends with '\\', which is invalid on Windows")
1096 return _("filename ends with '\\', which is invalid on Windows")
1097 if '\\/' in path:
1097 if '\\/' in path:
1098 return _("directory name ends with '\\', which is invalid on Windows")
1098 return _("directory name ends with '\\', which is invalid on Windows")
1099 for n in path.replace('\\', '/').split('/'):
1099 for n in path.replace('\\', '/').split('/'):
1100 if not n:
1100 if not n:
1101 continue
1101 continue
1102 for c in n:
1102 for c in n:
1103 if c in _winreservedchars:
1103 if c in _winreservedchars:
1104 return _("filename contains '%s', which is reserved "
1104 return _("filename contains '%s', which is reserved "
1105 "on Windows") % c
1105 "on Windows") % c
1106 if ord(c) <= 31:
1106 if ord(c) <= 31:
1107 return _("filename contains %r, which is invalid "
1107 return _("filename contains %r, which is invalid "
1108 "on Windows") % c
1108 "on Windows") % c
1109 base = n.split('.')[0]
1109 base = n.split('.')[0]
1110 if base and base.lower() in _winreservednames:
1110 if base and base.lower() in _winreservednames:
1111 return _("filename contains '%s', which is reserved "
1111 return _("filename contains '%s', which is reserved "
1112 "on Windows") % base
1112 "on Windows") % base
1113 t = n[-1]
1113 t = n[-1]
1114 if t in '. ' and n not in '..':
1114 if t in '. ' and n not in '..':
1115 return _("filename ends with '%s', which is not allowed "
1115 return _("filename ends with '%s', which is not allowed "
1116 "on Windows") % t
1116 "on Windows") % t
1117
1117
1118 if os.name == 'nt':
1118 if os.name == 'nt':
1119 checkosfilename = checkwinfilename
1119 checkosfilename = checkwinfilename
1120 else:
1120 else:
1121 checkosfilename = platform.checkosfilename
1121 checkosfilename = platform.checkosfilename
1122
1122
1123 def makelock(info, pathname):
1123 def makelock(info, pathname):
1124 try:
1124 try:
1125 return os.symlink(info, pathname)
1125 return os.symlink(info, pathname)
1126 except OSError as why:
1126 except OSError as why:
1127 if why.errno == errno.EEXIST:
1127 if why.errno == errno.EEXIST:
1128 raise
1128 raise
1129 except AttributeError: # no symlink in os
1129 except AttributeError: # no symlink in os
1130 pass
1130 pass
1131
1131
1132 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1132 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1133 os.write(ld, info)
1133 os.write(ld, info)
1134 os.close(ld)
1134 os.close(ld)
1135
1135
1136 def readlock(pathname):
1136 def readlock(pathname):
1137 try:
1137 try:
1138 return os.readlink(pathname)
1138 return os.readlink(pathname)
1139 except OSError as why:
1139 except OSError as why:
1140 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1140 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1141 raise
1141 raise
1142 except AttributeError: # no symlink in os
1142 except AttributeError: # no symlink in os
1143 pass
1143 pass
1144 fp = posixfile(pathname)
1144 fp = posixfile(pathname)
1145 r = fp.read()
1145 r = fp.read()
1146 fp.close()
1146 fp.close()
1147 return r
1147 return r
1148
1148
1149 def fstat(fp):
1149 def fstat(fp):
1150 '''stat file object that may not have fileno method.'''
1150 '''stat file object that may not have fileno method.'''
1151 try:
1151 try:
1152 return os.fstat(fp.fileno())
1152 return os.fstat(fp.fileno())
1153 except AttributeError:
1153 except AttributeError:
1154 return os.stat(fp.name)
1154 return os.stat(fp.name)
1155
1155
1156 # File system features
1156 # File system features
1157
1157
1158 def checkcase(path):
1158 def checkcase(path):
1159 """
1159 """
1160 Return true if the given path is on a case-sensitive filesystem
1160 Return true if the given path is on a case-sensitive filesystem
1161
1161
1162 Requires a path (like /foo/.hg) ending with a foldable final
1162 Requires a path (like /foo/.hg) ending with a foldable final
1163 directory component.
1163 directory component.
1164 """
1164 """
1165 s1 = os.lstat(path)
1165 s1 = os.lstat(path)
1166 d, b = os.path.split(path)
1166 d, b = os.path.split(path)
1167 b2 = b.upper()
1167 b2 = b.upper()
1168 if b == b2:
1168 if b == b2:
1169 b2 = b.lower()
1169 b2 = b.lower()
1170 if b == b2:
1170 if b == b2:
1171 return True # no evidence against case sensitivity
1171 return True # no evidence against case sensitivity
1172 p2 = os.path.join(d, b2)
1172 p2 = os.path.join(d, b2)
1173 try:
1173 try:
1174 s2 = os.lstat(p2)
1174 s2 = os.lstat(p2)
1175 if s2 == s1:
1175 if s2 == s1:
1176 return False
1176 return False
1177 return True
1177 return True
1178 except OSError:
1178 except OSError:
1179 return True
1179 return True
1180
1180
1181 try:
1181 try:
1182 import re2
1182 import re2
1183 _re2 = None
1183 _re2 = None
1184 except ImportError:
1184 except ImportError:
1185 _re2 = False
1185 _re2 = False
1186
1186
1187 class _re(object):
1187 class _re(object):
1188 def _checkre2(self):
1188 def _checkre2(self):
1189 global _re2
1189 global _re2
1190 try:
1190 try:
1191 # check if match works, see issue3964
1191 # check if match works, see issue3964
1192 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1192 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1193 except ImportError:
1193 except ImportError:
1194 _re2 = False
1194 _re2 = False
1195
1195
1196 def compile(self, pat, flags=0):
1196 def compile(self, pat, flags=0):
1197 '''Compile a regular expression, using re2 if possible
1197 '''Compile a regular expression, using re2 if possible
1198
1198
1199 For best performance, use only re2-compatible regexp features. The
1199 For best performance, use only re2-compatible regexp features. The
1200 only flags from the re module that are re2-compatible are
1200 only flags from the re module that are re2-compatible are
1201 IGNORECASE and MULTILINE.'''
1201 IGNORECASE and MULTILINE.'''
1202 if _re2 is None:
1202 if _re2 is None:
1203 self._checkre2()
1203 self._checkre2()
1204 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1204 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1205 if flags & remod.IGNORECASE:
1205 if flags & remod.IGNORECASE:
1206 pat = '(?i)' + pat
1206 pat = '(?i)' + pat
1207 if flags & remod.MULTILINE:
1207 if flags & remod.MULTILINE:
1208 pat = '(?m)' + pat
1208 pat = '(?m)' + pat
1209 try:
1209 try:
1210 return re2.compile(pat)
1210 return re2.compile(pat)
1211 except re2.error:
1211 except re2.error:
1212 pass
1212 pass
1213 return remod.compile(pat, flags)
1213 return remod.compile(pat, flags)
1214
1214
1215 @propertycache
1215 @propertycache
1216 def escape(self):
1216 def escape(self):
1217 '''Return the version of escape corresponding to self.compile.
1217 '''Return the version of escape corresponding to self.compile.
1218
1218
1219 This is imperfect because whether re2 or re is used for a particular
1219 This is imperfect because whether re2 or re is used for a particular
1220 function depends on the flags, etc, but it's the best we can do.
1220 function depends on the flags, etc, but it's the best we can do.
1221 '''
1221 '''
1222 global _re2
1222 global _re2
1223 if _re2 is None:
1223 if _re2 is None:
1224 self._checkre2()
1224 self._checkre2()
1225 if _re2:
1225 if _re2:
1226 return re2.escape
1226 return re2.escape
1227 else:
1227 else:
1228 return remod.escape
1228 return remod.escape
1229
1229
1230 re = _re()
1230 re = _re()
1231
1231
1232 _fspathcache = {}
1232 _fspathcache = {}
1233 def fspath(name, root):
1233 def fspath(name, root):
1234 '''Get name in the case stored in the filesystem
1234 '''Get name in the case stored in the filesystem
1235
1235
1236 The name should be relative to root, and be normcase-ed for efficiency.
1236 The name should be relative to root, and be normcase-ed for efficiency.
1237
1237
1238 Note that this function is unnecessary, and should not be
1238 Note that this function is unnecessary, and should not be
1239 called, for case-sensitive filesystems (simply because it's expensive).
1239 called, for case-sensitive filesystems (simply because it's expensive).
1240
1240
1241 The root should be normcase-ed, too.
1241 The root should be normcase-ed, too.
1242 '''
1242 '''
1243 def _makefspathcacheentry(dir):
1243 def _makefspathcacheentry(dir):
1244 return dict((normcase(n), n) for n in os.listdir(dir))
1244 return dict((normcase(n), n) for n in os.listdir(dir))
1245
1245
1246 seps = os.sep
1246 seps = os.sep
1247 if os.altsep:
1247 if os.altsep:
1248 seps = seps + os.altsep
1248 seps = seps + os.altsep
1249 # Protect backslashes. This gets silly very quickly.
1249 # Protect backslashes. This gets silly very quickly.
1250 seps.replace('\\','\\\\')
1250 seps.replace('\\','\\\\')
1251 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1251 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1252 dir = os.path.normpath(root)
1252 dir = os.path.normpath(root)
1253 result = []
1253 result = []
1254 for part, sep in pattern.findall(name):
1254 for part, sep in pattern.findall(name):
1255 if sep:
1255 if sep:
1256 result.append(sep)
1256 result.append(sep)
1257 continue
1257 continue
1258
1258
1259 if dir not in _fspathcache:
1259 if dir not in _fspathcache:
1260 _fspathcache[dir] = _makefspathcacheentry(dir)
1260 _fspathcache[dir] = _makefspathcacheentry(dir)
1261 contents = _fspathcache[dir]
1261 contents = _fspathcache[dir]
1262
1262
1263 found = contents.get(part)
1263 found = contents.get(part)
1264 if not found:
1264 if not found:
1265 # retry "once per directory" per "dirstate.walk" which
1265 # retry "once per directory" per "dirstate.walk" which
1266 # may take place for each patches of "hg qpush", for example
1266 # may take place for each patches of "hg qpush", for example
1267 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1267 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1268 found = contents.get(part)
1268 found = contents.get(part)
1269
1269
1270 result.append(found or part)
1270 result.append(found or part)
1271 dir = os.path.join(dir, part)
1271 dir = os.path.join(dir, part)
1272
1272
1273 return ''.join(result)
1273 return ''.join(result)
1274
1274
1275 def checknlink(testfile):
1275 def checknlink(testfile):
1276 '''check whether hardlink count reporting works properly'''
1276 '''check whether hardlink count reporting works properly'''
1277
1277
1278 # testfile may be open, so we need a separate file for checking to
1278 # testfile may be open, so we need a separate file for checking to
1279 # work around issue2543 (or testfile may get lost on Samba shares)
1279 # work around issue2543 (or testfile may get lost on Samba shares)
1280 f1 = testfile + ".hgtmp1"
1280 f1 = testfile + ".hgtmp1"
1281 if os.path.lexists(f1):
1281 if os.path.lexists(f1):
1282 return False
1282 return False
1283 try:
1283 try:
1284 posixfile(f1, 'w').close()
1284 posixfile(f1, 'w').close()
1285 except IOError:
1285 except IOError:
1286 return False
1286 return False
1287
1287
1288 f2 = testfile + ".hgtmp2"
1288 f2 = testfile + ".hgtmp2"
1289 fd = None
1289 fd = None
1290 try:
1290 try:
1291 oslink(f1, f2)
1291 oslink(f1, f2)
1292 # nlinks() may behave differently for files on Windows shares if
1292 # nlinks() may behave differently for files on Windows shares if
1293 # the file is open.
1293 # the file is open.
1294 fd = posixfile(f2)
1294 fd = posixfile(f2)
1295 return nlinks(f2) > 1
1295 return nlinks(f2) > 1
1296 except OSError:
1296 except OSError:
1297 return False
1297 return False
1298 finally:
1298 finally:
1299 if fd is not None:
1299 if fd is not None:
1300 fd.close()
1300 fd.close()
1301 for f in (f1, f2):
1301 for f in (f1, f2):
1302 try:
1302 try:
1303 os.unlink(f)
1303 os.unlink(f)
1304 except OSError:
1304 except OSError:
1305 pass
1305 pass
1306
1306
1307 def endswithsep(path):
1307 def endswithsep(path):
1308 '''Check path ends with os.sep or os.altsep.'''
1308 '''Check path ends with os.sep or os.altsep.'''
1309 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1309 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1310
1310
1311 def splitpath(path):
1311 def splitpath(path):
1312 '''Split path by os.sep.
1312 '''Split path by os.sep.
1313 Note that this function does not use os.altsep because this is
1313 Note that this function does not use os.altsep because this is
1314 an alternative of simple "xxx.split(os.sep)".
1314 an alternative of simple "xxx.split(os.sep)".
1315 It is recommended to use os.path.normpath() before using this
1315 It is recommended to use os.path.normpath() before using this
1316 function if need.'''
1316 function if need.'''
1317 return path.split(os.sep)
1317 return path.split(os.sep)
1318
1318
1319 def gui():
1319 def gui():
1320 '''Are we running in a GUI?'''
1320 '''Are we running in a GUI?'''
1321 if sys.platform == 'darwin':
1321 if sys.platform == 'darwin':
1322 if 'SSH_CONNECTION' in os.environ:
1322 if 'SSH_CONNECTION' in os.environ:
1323 # handle SSH access to a box where the user is logged in
1323 # handle SSH access to a box where the user is logged in
1324 return False
1324 return False
1325 elif getattr(osutil, 'isgui', None):
1325 elif getattr(osutil, 'isgui', None):
1326 # check if a CoreGraphics session is available
1326 # check if a CoreGraphics session is available
1327 return osutil.isgui()
1327 return osutil.isgui()
1328 else:
1328 else:
1329 # pure build; use a safe default
1329 # pure build; use a safe default
1330 return True
1330 return True
1331 else:
1331 else:
1332 return os.name == "nt" or os.environ.get("DISPLAY")
1332 return os.name == "nt" or os.environ.get("DISPLAY")
1333
1333
1334 def mktempcopy(name, emptyok=False, createmode=None):
1334 def mktempcopy(name, emptyok=False, createmode=None):
1335 """Create a temporary file with the same contents from name
1335 """Create a temporary file with the same contents from name
1336
1336
1337 The permission bits are copied from the original file.
1337 The permission bits are copied from the original file.
1338
1338
1339 If the temporary file is going to be truncated immediately, you
1339 If the temporary file is going to be truncated immediately, you
1340 can use emptyok=True as an optimization.
1340 can use emptyok=True as an optimization.
1341
1341
1342 Returns the name of the temporary file.
1342 Returns the name of the temporary file.
1343 """
1343 """
1344 d, fn = os.path.split(name)
1344 d, fn = os.path.split(name)
1345 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1345 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1346 os.close(fd)
1346 os.close(fd)
1347 # Temporary files are created with mode 0600, which is usually not
1347 # Temporary files are created with mode 0600, which is usually not
1348 # what we want. If the original file already exists, just copy
1348 # what we want. If the original file already exists, just copy
1349 # its mode. Otherwise, manually obey umask.
1349 # its mode. Otherwise, manually obey umask.
1350 copymode(name, temp, createmode)
1350 copymode(name, temp, createmode)
1351 if emptyok:
1351 if emptyok:
1352 return temp
1352 return temp
1353 try:
1353 try:
1354 try:
1354 try:
1355 ifp = posixfile(name, "rb")
1355 ifp = posixfile(name, "rb")
1356 except IOError as inst:
1356 except IOError as inst:
1357 if inst.errno == errno.ENOENT:
1357 if inst.errno == errno.ENOENT:
1358 return temp
1358 return temp
1359 if not getattr(inst, 'filename', None):
1359 if not getattr(inst, 'filename', None):
1360 inst.filename = name
1360 inst.filename = name
1361 raise
1361 raise
1362 ofp = posixfile(temp, "wb")
1362 ofp = posixfile(temp, "wb")
1363 for chunk in filechunkiter(ifp):
1363 for chunk in filechunkiter(ifp):
1364 ofp.write(chunk)
1364 ofp.write(chunk)
1365 ifp.close()
1365 ifp.close()
1366 ofp.close()
1366 ofp.close()
1367 except: # re-raises
1367 except: # re-raises
1368 try: os.unlink(temp)
1368 try: os.unlink(temp)
1369 except OSError: pass
1369 except OSError: pass
1370 raise
1370 raise
1371 return temp
1371 return temp
1372
1372
1373 class atomictempfile(object):
1373 class atomictempfile(object):
1374 '''writable file object that atomically updates a file
1374 '''writable file object that atomically updates a file
1375
1375
1376 All writes will go to a temporary copy of the original file. Call
1376 All writes will go to a temporary copy of the original file. Call
1377 close() when you are done writing, and atomictempfile will rename
1377 close() when you are done writing, and atomictempfile will rename
1378 the temporary copy to the original name, making the changes
1378 the temporary copy to the original name, making the changes
1379 visible. If the object is destroyed without being closed, all your
1379 visible. If the object is destroyed without being closed, all your
1380 writes are discarded.
1380 writes are discarded.
1381 '''
1381 '''
1382 def __init__(self, name, mode='w+b', createmode=None):
1382 def __init__(self, name, mode='w+b', createmode=None):
1383 self.__name = name # permanent name
1383 self.__name = name # permanent name
1384 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1384 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1385 createmode=createmode)
1385 createmode=createmode)
1386 self._fp = posixfile(self._tempname, mode)
1386 self._fp = posixfile(self._tempname, mode)
1387
1387
1388 # delegated methods
1388 # delegated methods
1389 self.write = self._fp.write
1389 self.write = self._fp.write
1390 self.seek = self._fp.seek
1390 self.seek = self._fp.seek
1391 self.tell = self._fp.tell
1391 self.tell = self._fp.tell
1392 self.fileno = self._fp.fileno
1392 self.fileno = self._fp.fileno
1393
1393
1394 def close(self):
1394 def close(self):
1395 if not self._fp.closed:
1395 if not self._fp.closed:
1396 self._fp.close()
1396 self._fp.close()
1397 rename(self._tempname, localpath(self.__name))
1397 rename(self._tempname, localpath(self.__name))
1398
1398
1399 def discard(self):
1399 def discard(self):
1400 if not self._fp.closed:
1400 if not self._fp.closed:
1401 try:
1401 try:
1402 os.unlink(self._tempname)
1402 os.unlink(self._tempname)
1403 except OSError:
1403 except OSError:
1404 pass
1404 pass
1405 self._fp.close()
1405 self._fp.close()
1406
1406
1407 def __del__(self):
1407 def __del__(self):
1408 if safehasattr(self, '_fp'): # constructor actually did something
1408 if safehasattr(self, '_fp'): # constructor actually did something
1409 self.discard()
1409 self.discard()
1410
1410
1411 def makedirs(name, mode=None, notindexed=False):
1411 def makedirs(name, mode=None, notindexed=False):
1412 """recursive directory creation with parent mode inheritance"""
1412 """recursive directory creation with parent mode inheritance"""
1413 try:
1413 try:
1414 makedir(name, notindexed)
1414 makedir(name, notindexed)
1415 except OSError as err:
1415 except OSError as err:
1416 if err.errno == errno.EEXIST:
1416 if err.errno == errno.EEXIST:
1417 return
1417 return
1418 if err.errno != errno.ENOENT or not name:
1418 if err.errno != errno.ENOENT or not name:
1419 raise
1419 raise
1420 parent = os.path.dirname(os.path.abspath(name))
1420 parent = os.path.dirname(os.path.abspath(name))
1421 if parent == name:
1421 if parent == name:
1422 raise
1422 raise
1423 makedirs(parent, mode, notindexed)
1423 makedirs(parent, mode, notindexed)
1424 makedir(name, notindexed)
1424 makedir(name, notindexed)
1425 if mode is not None:
1425 if mode is not None:
1426 os.chmod(name, mode)
1426 os.chmod(name, mode)
1427
1427
1428 def ensuredirs(name, mode=None, notindexed=False):
1428 def ensuredirs(name, mode=None, notindexed=False):
1429 """race-safe recursive directory creation
1429 """race-safe recursive directory creation
1430
1430
1431 Newly created directories are marked as "not to be indexed by
1431 Newly created directories are marked as "not to be indexed by
1432 the content indexing service", if ``notindexed`` is specified
1432 the content indexing service", if ``notindexed`` is specified
1433 for "write" mode access.
1433 for "write" mode access.
1434 """
1434 """
1435 if os.path.isdir(name):
1435 if os.path.isdir(name):
1436 return
1436 return
1437 parent = os.path.dirname(os.path.abspath(name))
1437 parent = os.path.dirname(os.path.abspath(name))
1438 if parent != name:
1438 if parent != name:
1439 ensuredirs(parent, mode, notindexed)
1439 ensuredirs(parent, mode, notindexed)
1440 try:
1440 try:
1441 makedir(name, notindexed)
1441 makedir(name, notindexed)
1442 except OSError as err:
1442 except OSError as err:
1443 if err.errno == errno.EEXIST and os.path.isdir(name):
1443 if err.errno == errno.EEXIST and os.path.isdir(name):
1444 # someone else seems to have won a directory creation race
1444 # someone else seems to have won a directory creation race
1445 return
1445 return
1446 raise
1446 raise
1447 if mode is not None:
1447 if mode is not None:
1448 os.chmod(name, mode)
1448 os.chmod(name, mode)
1449
1449
1450 def readfile(path):
1450 def readfile(path):
1451 fp = open(path, 'rb')
1451 fp = open(path, 'rb')
1452 try:
1452 try:
1453 return fp.read()
1453 return fp.read()
1454 finally:
1454 finally:
1455 fp.close()
1455 fp.close()
1456
1456
1457 def writefile(path, text):
1457 def writefile(path, text):
1458 fp = open(path, 'wb')
1458 fp = open(path, 'wb')
1459 try:
1459 try:
1460 fp.write(text)
1460 fp.write(text)
1461 finally:
1461 finally:
1462 fp.close()
1462 fp.close()
1463
1463
1464 def appendfile(path, text):
1464 def appendfile(path, text):
1465 fp = open(path, 'ab')
1465 fp = open(path, 'ab')
1466 try:
1466 try:
1467 fp.write(text)
1467 fp.write(text)
1468 finally:
1468 finally:
1469 fp.close()
1469 fp.close()
1470
1470
1471 class chunkbuffer(object):
1471 class chunkbuffer(object):
1472 """Allow arbitrary sized chunks of data to be efficiently read from an
1472 """Allow arbitrary sized chunks of data to be efficiently read from an
1473 iterator over chunks of arbitrary size."""
1473 iterator over chunks of arbitrary size."""
1474
1474
1475 def __init__(self, in_iter):
1475 def __init__(self, in_iter):
1476 """in_iter is the iterator that's iterating over the input chunks.
1476 """in_iter is the iterator that's iterating over the input chunks.
1477 targetsize is how big a buffer to try to maintain."""
1477 targetsize is how big a buffer to try to maintain."""
1478 def splitbig(chunks):
1478 def splitbig(chunks):
1479 for chunk in chunks:
1479 for chunk in chunks:
1480 if len(chunk) > 2**20:
1480 if len(chunk) > 2**20:
1481 pos = 0
1481 pos = 0
1482 while pos < len(chunk):
1482 while pos < len(chunk):
1483 end = pos + 2 ** 18
1483 end = pos + 2 ** 18
1484 yield chunk[pos:end]
1484 yield chunk[pos:end]
1485 pos = end
1485 pos = end
1486 else:
1486 else:
1487 yield chunk
1487 yield chunk
1488 self.iter = splitbig(in_iter)
1488 self.iter = splitbig(in_iter)
1489 self._queue = collections.deque()
1489 self._queue = collections.deque()
1490 self._chunkoffset = 0
1490 self._chunkoffset = 0
1491
1491
1492 def read(self, l=None):
1492 def read(self, l=None):
1493 """Read L bytes of data from the iterator of chunks of data.
1493 """Read L bytes of data from the iterator of chunks of data.
1494 Returns less than L bytes if the iterator runs dry.
1494 Returns less than L bytes if the iterator runs dry.
1495
1495
1496 If size parameter is omitted, read everything"""
1496 If size parameter is omitted, read everything"""
1497 if l is None:
1497 if l is None:
1498 return ''.join(self.iter)
1498 return ''.join(self.iter)
1499
1499
1500 left = l
1500 left = l
1501 buf = []
1501 buf = []
1502 queue = self._queue
1502 queue = self._queue
1503 while left > 0:
1503 while left > 0:
1504 # refill the queue
1504 # refill the queue
1505 if not queue:
1505 if not queue:
1506 target = 2**18
1506 target = 2**18
1507 for chunk in self.iter:
1507 for chunk in self.iter:
1508 queue.append(chunk)
1508 queue.append(chunk)
1509 target -= len(chunk)
1509 target -= len(chunk)
1510 if target <= 0:
1510 if target <= 0:
1511 break
1511 break
1512 if not queue:
1512 if not queue:
1513 break
1513 break
1514
1514
1515 # The easy way to do this would be to queue.popleft(), modify the
1515 # The easy way to do this would be to queue.popleft(), modify the
1516 # chunk (if necessary), then queue.appendleft(). However, for cases
1516 # chunk (if necessary), then queue.appendleft(). However, for cases
1517 # where we read partial chunk content, this incurs 2 dequeue
1517 # where we read partial chunk content, this incurs 2 dequeue
1518 # mutations and creates a new str for the remaining chunk in the
1518 # mutations and creates a new str for the remaining chunk in the
1519 # queue. Our code below avoids this overhead.
1519 # queue. Our code below avoids this overhead.
1520
1520
1521 chunk = queue[0]
1521 chunk = queue[0]
1522 chunkl = len(chunk)
1522 chunkl = len(chunk)
1523 offset = self._chunkoffset
1523 offset = self._chunkoffset
1524
1524
1525 # Use full chunk.
1525 # Use full chunk.
1526 if offset == 0 and left >= chunkl:
1526 if offset == 0 and left >= chunkl:
1527 left -= chunkl
1527 left -= chunkl
1528 queue.popleft()
1528 queue.popleft()
1529 buf.append(chunk)
1529 buf.append(chunk)
1530 # self._chunkoffset remains at 0.
1530 # self._chunkoffset remains at 0.
1531 continue
1531 continue
1532
1532
1533 chunkremaining = chunkl - offset
1533 chunkremaining = chunkl - offset
1534
1534
1535 # Use all of unconsumed part of chunk.
1535 # Use all of unconsumed part of chunk.
1536 if left >= chunkremaining:
1536 if left >= chunkremaining:
1537 left -= chunkremaining
1537 left -= chunkremaining
1538 queue.popleft()
1538 queue.popleft()
1539 # offset == 0 is enabled by block above, so this won't merely
1539 # offset == 0 is enabled by block above, so this won't merely
1540 # copy via ``chunk[0:]``.
1540 # copy via ``chunk[0:]``.
1541 buf.append(chunk[offset:])
1541 buf.append(chunk[offset:])
1542 self._chunkoffset = 0
1542 self._chunkoffset = 0
1543
1543
1544 # Partial chunk needed.
1544 # Partial chunk needed.
1545 else:
1545 else:
1546 buf.append(chunk[offset:offset + left])
1546 buf.append(chunk[offset:offset + left])
1547 self._chunkoffset += left
1547 self._chunkoffset += left
1548 left -= chunkremaining
1548 left -= chunkremaining
1549
1549
1550 return ''.join(buf)
1550 return ''.join(buf)
1551
1551
1552 def filechunkiter(f, size=65536, limit=None):
1552 def filechunkiter(f, size=65536, limit=None):
1553 """Create a generator that produces the data in the file size
1553 """Create a generator that produces the data in the file size
1554 (default 65536) bytes at a time, up to optional limit (default is
1554 (default 65536) bytes at a time, up to optional limit (default is
1555 to read all data). Chunks may be less than size bytes if the
1555 to read all data). Chunks may be less than size bytes if the
1556 chunk is the last chunk in the file, or the file is a socket or
1556 chunk is the last chunk in the file, or the file is a socket or
1557 some other type of file that sometimes reads less data than is
1557 some other type of file that sometimes reads less data than is
1558 requested."""
1558 requested."""
1559 assert size >= 0
1559 assert size >= 0
1560 assert limit is None or limit >= 0
1560 assert limit is None or limit >= 0
1561 while True:
1561 while True:
1562 if limit is None:
1562 if limit is None:
1563 nbytes = size
1563 nbytes = size
1564 else:
1564 else:
1565 nbytes = min(limit, size)
1565 nbytes = min(limit, size)
1566 s = nbytes and f.read(nbytes)
1566 s = nbytes and f.read(nbytes)
1567 if not s:
1567 if not s:
1568 break
1568 break
1569 if limit:
1569 if limit:
1570 limit -= len(s)
1570 limit -= len(s)
1571 yield s
1571 yield s
1572
1572
1573 def makedate(timestamp=None):
1573 def makedate(timestamp=None):
1574 '''Return a unix timestamp (or the current time) as a (unixtime,
1574 '''Return a unix timestamp (or the current time) as a (unixtime,
1575 offset) tuple based off the local timezone.'''
1575 offset) tuple based off the local timezone.'''
1576 if timestamp is None:
1576 if timestamp is None:
1577 timestamp = time.time()
1577 timestamp = time.time()
1578 if timestamp < 0:
1578 if timestamp < 0:
1579 hint = _("check your clock")
1579 hint = _("check your clock")
1580 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1580 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1581 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1581 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1582 datetime.datetime.fromtimestamp(timestamp))
1582 datetime.datetime.fromtimestamp(timestamp))
1583 tz = delta.days * 86400 + delta.seconds
1583 tz = delta.days * 86400 + delta.seconds
1584 return timestamp, tz
1584 return timestamp, tz
1585
1585
1586 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1586 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1587 """represent a (unixtime, offset) tuple as a localized time.
1587 """represent a (unixtime, offset) tuple as a localized time.
1588 unixtime is seconds since the epoch, and offset is the time zone's
1588 unixtime is seconds since the epoch, and offset is the time zone's
1589 number of seconds away from UTC. if timezone is false, do not
1589 number of seconds away from UTC. if timezone is false, do not
1590 append time zone to string."""
1590 append time zone to string."""
1591 t, tz = date or makedate()
1591 t, tz = date or makedate()
1592 if t < 0:
1592 if t < 0:
1593 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1593 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1594 tz = 0
1594 tz = 0
1595 if "%1" in format or "%2" in format or "%z" in format:
1595 if "%1" in format or "%2" in format or "%z" in format:
1596 sign = (tz > 0) and "-" or "+"
1596 sign = (tz > 0) and "-" or "+"
1597 minutes = abs(tz) // 60
1597 minutes = abs(tz) // 60
1598 q, r = divmod(minutes, 60)
1598 q, r = divmod(minutes, 60)
1599 format = format.replace("%z", "%1%2")
1599 format = format.replace("%z", "%1%2")
1600 format = format.replace("%1", "%c%02d" % (sign, q))
1600 format = format.replace("%1", "%c%02d" % (sign, q))
1601 format = format.replace("%2", "%02d" % r)
1601 format = format.replace("%2", "%02d" % r)
1602 try:
1602 try:
1603 t = time.gmtime(float(t) - tz)
1603 t = time.gmtime(float(t) - tz)
1604 except ValueError:
1604 except ValueError:
1605 # time was out of range
1605 # time was out of range
1606 t = time.gmtime(sys.maxint)
1606 t = time.gmtime(sys.maxint)
1607 s = time.strftime(format, t)
1607 s = time.strftime(format, t)
1608 return s
1608 return s
1609
1609
1610 def shortdate(date=None):
1610 def shortdate(date=None):
1611 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1611 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1612 return datestr(date, format='%Y-%m-%d')
1612 return datestr(date, format='%Y-%m-%d')
1613
1613
1614 def parsetimezone(tz):
1614 def parsetimezone(tz):
1615 """parse a timezone string and return an offset integer"""
1615 """parse a timezone string and return an offset integer"""
1616 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1616 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1617 sign = (tz[0] == "+") and 1 or -1
1617 sign = (tz[0] == "+") and 1 or -1
1618 hours = int(tz[1:3])
1618 hours = int(tz[1:3])
1619 minutes = int(tz[3:5])
1619 minutes = int(tz[3:5])
1620 return -sign * (hours * 60 + minutes) * 60
1620 return -sign * (hours * 60 + minutes) * 60
1621 if tz == "GMT" or tz == "UTC":
1621 if tz == "GMT" or tz == "UTC":
1622 return 0
1622 return 0
1623 return None
1623 return None
1624
1624
1625 def strdate(string, format, defaults=[]):
1625 def strdate(string, format, defaults=[]):
1626 """parse a localized time string and return a (unixtime, offset) tuple.
1626 """parse a localized time string and return a (unixtime, offset) tuple.
1627 if the string cannot be parsed, ValueError is raised."""
1627 if the string cannot be parsed, ValueError is raised."""
1628 # NOTE: unixtime = localunixtime + offset
1628 # NOTE: unixtime = localunixtime + offset
1629 offset, date = parsetimezone(string.split()[-1]), string
1629 offset, date = parsetimezone(string.split()[-1]), string
1630 if offset is not None:
1630 if offset is not None:
1631 date = " ".join(string.split()[:-1])
1631 date = " ".join(string.split()[:-1])
1632
1632
1633 # add missing elements from defaults
1633 # add missing elements from defaults
1634 usenow = False # default to using biased defaults
1634 usenow = False # default to using biased defaults
1635 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1635 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1636 found = [True for p in part if ("%"+p) in format]
1636 found = [True for p in part if ("%"+p) in format]
1637 if not found:
1637 if not found:
1638 date += "@" + defaults[part][usenow]
1638 date += "@" + defaults[part][usenow]
1639 format += "@%" + part[0]
1639 format += "@%" + part[0]
1640 else:
1640 else:
1641 # We've found a specific time element, less specific time
1641 # We've found a specific time element, less specific time
1642 # elements are relative to today
1642 # elements are relative to today
1643 usenow = True
1643 usenow = True
1644
1644
1645 timetuple = time.strptime(date, format)
1645 timetuple = time.strptime(date, format)
1646 localunixtime = int(calendar.timegm(timetuple))
1646 localunixtime = int(calendar.timegm(timetuple))
1647 if offset is None:
1647 if offset is None:
1648 # local timezone
1648 # local timezone
1649 unixtime = int(time.mktime(timetuple))
1649 unixtime = int(time.mktime(timetuple))
1650 offset = unixtime - localunixtime
1650 offset = unixtime - localunixtime
1651 else:
1651 else:
1652 unixtime = localunixtime + offset
1652 unixtime = localunixtime + offset
1653 return unixtime, offset
1653 return unixtime, offset
1654
1654
1655 def parsedate(date, formats=None, bias=None):
1655 def parsedate(date, formats=None, bias=None):
1656 """parse a localized date/time and return a (unixtime, offset) tuple.
1656 """parse a localized date/time and return a (unixtime, offset) tuple.
1657
1657
1658 The date may be a "unixtime offset" string or in one of the specified
1658 The date may be a "unixtime offset" string or in one of the specified
1659 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1659 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1660
1660
1661 >>> parsedate(' today ') == parsedate(\
1661 >>> parsedate(' today ') == parsedate(\
1662 datetime.date.today().strftime('%b %d'))
1662 datetime.date.today().strftime('%b %d'))
1663 True
1663 True
1664 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1664 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1665 datetime.timedelta(days=1)\
1665 datetime.timedelta(days=1)\
1666 ).strftime('%b %d'))
1666 ).strftime('%b %d'))
1667 True
1667 True
1668 >>> now, tz = makedate()
1668 >>> now, tz = makedate()
1669 >>> strnow, strtz = parsedate('now')
1669 >>> strnow, strtz = parsedate('now')
1670 >>> (strnow - now) < 1
1670 >>> (strnow - now) < 1
1671 True
1671 True
1672 >>> tz == strtz
1672 >>> tz == strtz
1673 True
1673 True
1674 """
1674 """
1675 if bias is None:
1675 if bias is None:
1676 bias = {}
1676 bias = {}
1677 if not date:
1677 if not date:
1678 return 0, 0
1678 return 0, 0
1679 if isinstance(date, tuple) and len(date) == 2:
1679 if isinstance(date, tuple) and len(date) == 2:
1680 return date
1680 return date
1681 if not formats:
1681 if not formats:
1682 formats = defaultdateformats
1682 formats = defaultdateformats
1683 date = date.strip()
1683 date = date.strip()
1684
1684
1685 if date == 'now' or date == _('now'):
1685 if date == 'now' or date == _('now'):
1686 return makedate()
1686 return makedate()
1687 if date == 'today' or date == _('today'):
1687 if date == 'today' or date == _('today'):
1688 date = datetime.date.today().strftime('%b %d')
1688 date = datetime.date.today().strftime('%b %d')
1689 elif date == 'yesterday' or date == _('yesterday'):
1689 elif date == 'yesterday' or date == _('yesterday'):
1690 date = (datetime.date.today() -
1690 date = (datetime.date.today() -
1691 datetime.timedelta(days=1)).strftime('%b %d')
1691 datetime.timedelta(days=1)).strftime('%b %d')
1692
1692
1693 try:
1693 try:
1694 when, offset = map(int, date.split(' '))
1694 when, offset = map(int, date.split(' '))
1695 except ValueError:
1695 except ValueError:
1696 # fill out defaults
1696 # fill out defaults
1697 now = makedate()
1697 now = makedate()
1698 defaults = {}
1698 defaults = {}
1699 for part in ("d", "mb", "yY", "HI", "M", "S"):
1699 for part in ("d", "mb", "yY", "HI", "M", "S"):
1700 # this piece is for rounding the specific end of unknowns
1700 # this piece is for rounding the specific end of unknowns
1701 b = bias.get(part)
1701 b = bias.get(part)
1702 if b is None:
1702 if b is None:
1703 if part[0] in "HMS":
1703 if part[0] in "HMS":
1704 b = "00"
1704 b = "00"
1705 else:
1705 else:
1706 b = "0"
1706 b = "0"
1707
1707
1708 # this piece is for matching the generic end to today's date
1708 # this piece is for matching the generic end to today's date
1709 n = datestr(now, "%" + part[0])
1709 n = datestr(now, "%" + part[0])
1710
1710
1711 defaults[part] = (b, n)
1711 defaults[part] = (b, n)
1712
1712
1713 for format in formats:
1713 for format in formats:
1714 try:
1714 try:
1715 when, offset = strdate(date, format, defaults)
1715 when, offset = strdate(date, format, defaults)
1716 except (ValueError, OverflowError):
1716 except (ValueError, OverflowError):
1717 pass
1717 pass
1718 else:
1718 else:
1719 break
1719 break
1720 else:
1720 else:
1721 raise Abort(_('invalid date: %r') % date)
1721 raise Abort(_('invalid date: %r') % date)
1722 # validate explicit (probably user-specified) date and
1722 # validate explicit (probably user-specified) date and
1723 # time zone offset. values must fit in signed 32 bits for
1723 # time zone offset. values must fit in signed 32 bits for
1724 # current 32-bit linux runtimes. timezones go from UTC-12
1724 # current 32-bit linux runtimes. timezones go from UTC-12
1725 # to UTC+14
1725 # to UTC+14
1726 if abs(when) > 0x7fffffff:
1726 if abs(when) > 0x7fffffff:
1727 raise Abort(_('date exceeds 32 bits: %d') % when)
1727 raise Abort(_('date exceeds 32 bits: %d') % when)
1728 if when < 0:
1728 if when < 0:
1729 raise Abort(_('negative date value: %d') % when)
1729 raise Abort(_('negative date value: %d') % when)
1730 if offset < -50400 or offset > 43200:
1730 if offset < -50400 or offset > 43200:
1731 raise Abort(_('impossible time zone offset: %d') % offset)
1731 raise Abort(_('impossible time zone offset: %d') % offset)
1732 return when, offset
1732 return when, offset
1733
1733
1734 def matchdate(date):
1734 def matchdate(date):
1735 """Return a function that matches a given date match specifier
1735 """Return a function that matches a given date match specifier
1736
1736
1737 Formats include:
1737 Formats include:
1738
1738
1739 '{date}' match a given date to the accuracy provided
1739 '{date}' match a given date to the accuracy provided
1740
1740
1741 '<{date}' on or before a given date
1741 '<{date}' on or before a given date
1742
1742
1743 '>{date}' on or after a given date
1743 '>{date}' on or after a given date
1744
1744
1745 >>> p1 = parsedate("10:29:59")
1745 >>> p1 = parsedate("10:29:59")
1746 >>> p2 = parsedate("10:30:00")
1746 >>> p2 = parsedate("10:30:00")
1747 >>> p3 = parsedate("10:30:59")
1747 >>> p3 = parsedate("10:30:59")
1748 >>> p4 = parsedate("10:31:00")
1748 >>> p4 = parsedate("10:31:00")
1749 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1749 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1750 >>> f = matchdate("10:30")
1750 >>> f = matchdate("10:30")
1751 >>> f(p1[0])
1751 >>> f(p1[0])
1752 False
1752 False
1753 >>> f(p2[0])
1753 >>> f(p2[0])
1754 True
1754 True
1755 >>> f(p3[0])
1755 >>> f(p3[0])
1756 True
1756 True
1757 >>> f(p4[0])
1757 >>> f(p4[0])
1758 False
1758 False
1759 >>> f(p5[0])
1759 >>> f(p5[0])
1760 False
1760 False
1761 """
1761 """
1762
1762
1763 def lower(date):
1763 def lower(date):
1764 d = {'mb': "1", 'd': "1"}
1764 d = {'mb': "1", 'd': "1"}
1765 return parsedate(date, extendeddateformats, d)[0]
1765 return parsedate(date, extendeddateformats, d)[0]
1766
1766
1767 def upper(date):
1767 def upper(date):
1768 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1768 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1769 for days in ("31", "30", "29"):
1769 for days in ("31", "30", "29"):
1770 try:
1770 try:
1771 d["d"] = days
1771 d["d"] = days
1772 return parsedate(date, extendeddateformats, d)[0]
1772 return parsedate(date, extendeddateformats, d)[0]
1773 except Abort:
1773 except Abort:
1774 pass
1774 pass
1775 d["d"] = "28"
1775 d["d"] = "28"
1776 return parsedate(date, extendeddateformats, d)[0]
1776 return parsedate(date, extendeddateformats, d)[0]
1777
1777
1778 date = date.strip()
1778 date = date.strip()
1779
1779
1780 if not date:
1780 if not date:
1781 raise Abort(_("dates cannot consist entirely of whitespace"))
1781 raise Abort(_("dates cannot consist entirely of whitespace"))
1782 elif date[0] == "<":
1782 elif date[0] == "<":
1783 if not date[1:]:
1783 if not date[1:]:
1784 raise Abort(_("invalid day spec, use '<DATE'"))
1784 raise Abort(_("invalid day spec, use '<DATE'"))
1785 when = upper(date[1:])
1785 when = upper(date[1:])
1786 return lambda x: x <= when
1786 return lambda x: x <= when
1787 elif date[0] == ">":
1787 elif date[0] == ">":
1788 if not date[1:]:
1788 if not date[1:]:
1789 raise Abort(_("invalid day spec, use '>DATE'"))
1789 raise Abort(_("invalid day spec, use '>DATE'"))
1790 when = lower(date[1:])
1790 when = lower(date[1:])
1791 return lambda x: x >= when
1791 return lambda x: x >= when
1792 elif date[0] == "-":
1792 elif date[0] == "-":
1793 try:
1793 try:
1794 days = int(date[1:])
1794 days = int(date[1:])
1795 except ValueError:
1795 except ValueError:
1796 raise Abort(_("invalid day spec: %s") % date[1:])
1796 raise Abort(_("invalid day spec: %s") % date[1:])
1797 if days < 0:
1797 if days < 0:
1798 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1798 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1799 % date[1:])
1799 % date[1:])
1800 when = makedate()[0] - days * 3600 * 24
1800 when = makedate()[0] - days * 3600 * 24
1801 return lambda x: x >= when
1801 return lambda x: x >= when
1802 elif " to " in date:
1802 elif " to " in date:
1803 a, b = date.split(" to ")
1803 a, b = date.split(" to ")
1804 start, stop = lower(a), upper(b)
1804 start, stop = lower(a), upper(b)
1805 return lambda x: x >= start and x <= stop
1805 return lambda x: x >= start and x <= stop
1806 else:
1806 else:
1807 start, stop = lower(date), upper(date)
1807 start, stop = lower(date), upper(date)
1808 return lambda x: x >= start and x <= stop
1808 return lambda x: x >= start and x <= stop
1809
1809
1810 def stringmatcher(pattern):
1810 def stringmatcher(pattern):
1811 """
1811 """
1812 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1812 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1813 returns the matcher name, pattern, and matcher function.
1813 returns the matcher name, pattern, and matcher function.
1814 missing or unknown prefixes are treated as literal matches.
1814 missing or unknown prefixes are treated as literal matches.
1815
1815
1816 helper for tests:
1816 helper for tests:
1817 >>> def test(pattern, *tests):
1817 >>> def test(pattern, *tests):
1818 ... kind, pattern, matcher = stringmatcher(pattern)
1818 ... kind, pattern, matcher = stringmatcher(pattern)
1819 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1819 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1820
1820
1821 exact matching (no prefix):
1821 exact matching (no prefix):
1822 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1822 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1823 ('literal', 'abcdefg', [False, False, True])
1823 ('literal', 'abcdefg', [False, False, True])
1824
1824
1825 regex matching ('re:' prefix)
1825 regex matching ('re:' prefix)
1826 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1826 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1827 ('re', 'a.+b', [False, False, True])
1827 ('re', 'a.+b', [False, False, True])
1828
1828
1829 force exact matches ('literal:' prefix)
1829 force exact matches ('literal:' prefix)
1830 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1830 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1831 ('literal', 're:foobar', [False, True])
1831 ('literal', 're:foobar', [False, True])
1832
1832
1833 unknown prefixes are ignored and treated as literals
1833 unknown prefixes are ignored and treated as literals
1834 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1834 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1835 ('literal', 'foo:bar', [False, False, True])
1835 ('literal', 'foo:bar', [False, False, True])
1836 """
1836 """
1837 if pattern.startswith('re:'):
1837 if pattern.startswith('re:'):
1838 pattern = pattern[3:]
1838 pattern = pattern[3:]
1839 try:
1839 try:
1840 regex = remod.compile(pattern)
1840 regex = remod.compile(pattern)
1841 except remod.error as e:
1841 except remod.error as e:
1842 raise error.ParseError(_('invalid regular expression: %s')
1842 raise error.ParseError(_('invalid regular expression: %s')
1843 % e)
1843 % e)
1844 return 're', pattern, regex.search
1844 return 're', pattern, regex.search
1845 elif pattern.startswith('literal:'):
1845 elif pattern.startswith('literal:'):
1846 pattern = pattern[8:]
1846 pattern = pattern[8:]
1847 return 'literal', pattern, pattern.__eq__
1847 return 'literal', pattern, pattern.__eq__
1848
1848
1849 def shortuser(user):
1849 def shortuser(user):
1850 """Return a short representation of a user name or email address."""
1850 """Return a short representation of a user name or email address."""
1851 f = user.find('@')
1851 f = user.find('@')
1852 if f >= 0:
1852 if f >= 0:
1853 user = user[:f]
1853 user = user[:f]
1854 f = user.find('<')
1854 f = user.find('<')
1855 if f >= 0:
1855 if f >= 0:
1856 user = user[f + 1:]
1856 user = user[f + 1:]
1857 f = user.find(' ')
1857 f = user.find(' ')
1858 if f >= 0:
1858 if f >= 0:
1859 user = user[:f]
1859 user = user[:f]
1860 f = user.find('.')
1860 f = user.find('.')
1861 if f >= 0:
1861 if f >= 0:
1862 user = user[:f]
1862 user = user[:f]
1863 return user
1863 return user
1864
1864
1865 def emailuser(user):
1865 def emailuser(user):
1866 """Return the user portion of an email address."""
1866 """Return the user portion of an email address."""
1867 f = user.find('@')
1867 f = user.find('@')
1868 if f >= 0:
1868 if f >= 0:
1869 user = user[:f]
1869 user = user[:f]
1870 f = user.find('<')
1870 f = user.find('<')
1871 if f >= 0:
1871 if f >= 0:
1872 user = user[f + 1:]
1872 user = user[f + 1:]
1873 return user
1873 return user
1874
1874
1875 def email(author):
1875 def email(author):
1876 '''get email of author.'''
1876 '''get email of author.'''
1877 r = author.find('>')
1877 r = author.find('>')
1878 if r == -1:
1878 if r == -1:
1879 r = None
1879 r = None
1880 return author[author.find('<') + 1:r]
1880 return author[author.find('<') + 1:r]
1881
1881
1882 def ellipsis(text, maxlength=400):
1882 def ellipsis(text, maxlength=400):
1883 """Trim string to at most maxlength (default: 400) columns in display."""
1883 """Trim string to at most maxlength (default: 400) columns in display."""
1884 return encoding.trim(text, maxlength, ellipsis='...')
1884 return encoding.trim(text, maxlength, ellipsis='...')
1885
1885
1886 def unitcountfn(*unittable):
1886 def unitcountfn(*unittable):
1887 '''return a function that renders a readable count of some quantity'''
1887 '''return a function that renders a readable count of some quantity'''
1888
1888
1889 def go(count):
1889 def go(count):
1890 for multiplier, divisor, format in unittable:
1890 for multiplier, divisor, format in unittable:
1891 if count >= divisor * multiplier:
1891 if count >= divisor * multiplier:
1892 return format % (count / float(divisor))
1892 return format % (count / float(divisor))
1893 return unittable[-1][2] % count
1893 return unittable[-1][2] % count
1894
1894
1895 return go
1895 return go
1896
1896
1897 bytecount = unitcountfn(
1897 bytecount = unitcountfn(
1898 (100, 1 << 30, _('%.0f GB')),
1898 (100, 1 << 30, _('%.0f GB')),
1899 (10, 1 << 30, _('%.1f GB')),
1899 (10, 1 << 30, _('%.1f GB')),
1900 (1, 1 << 30, _('%.2f GB')),
1900 (1, 1 << 30, _('%.2f GB')),
1901 (100, 1 << 20, _('%.0f MB')),
1901 (100, 1 << 20, _('%.0f MB')),
1902 (10, 1 << 20, _('%.1f MB')),
1902 (10, 1 << 20, _('%.1f MB')),
1903 (1, 1 << 20, _('%.2f MB')),
1903 (1, 1 << 20, _('%.2f MB')),
1904 (100, 1 << 10, _('%.0f KB')),
1904 (100, 1 << 10, _('%.0f KB')),
1905 (10, 1 << 10, _('%.1f KB')),
1905 (10, 1 << 10, _('%.1f KB')),
1906 (1, 1 << 10, _('%.2f KB')),
1906 (1, 1 << 10, _('%.2f KB')),
1907 (1, 1, _('%.0f bytes')),
1907 (1, 1, _('%.0f bytes')),
1908 )
1908 )
1909
1909
1910 def uirepr(s):
1910 def uirepr(s):
1911 # Avoid double backslash in Windows path repr()
1911 # Avoid double backslash in Windows path repr()
1912 return repr(s).replace('\\\\', '\\')
1912 return repr(s).replace('\\\\', '\\')
1913
1913
1914 # delay import of textwrap
1914 # delay import of textwrap
1915 def MBTextWrapper(**kwargs):
1915 def MBTextWrapper(**kwargs):
1916 class tw(textwrap.TextWrapper):
1916 class tw(textwrap.TextWrapper):
1917 """
1917 """
1918 Extend TextWrapper for width-awareness.
1918 Extend TextWrapper for width-awareness.
1919
1919
1920 Neither number of 'bytes' in any encoding nor 'characters' is
1920 Neither number of 'bytes' in any encoding nor 'characters' is
1921 appropriate to calculate terminal columns for specified string.
1921 appropriate to calculate terminal columns for specified string.
1922
1922
1923 Original TextWrapper implementation uses built-in 'len()' directly,
1923 Original TextWrapper implementation uses built-in 'len()' directly,
1924 so overriding is needed to use width information of each characters.
1924 so overriding is needed to use width information of each characters.
1925
1925
1926 In addition, characters classified into 'ambiguous' width are
1926 In addition, characters classified into 'ambiguous' width are
1927 treated as wide in East Asian area, but as narrow in other.
1927 treated as wide in East Asian area, but as narrow in other.
1928
1928
1929 This requires use decision to determine width of such characters.
1929 This requires use decision to determine width of such characters.
1930 """
1930 """
1931 def _cutdown(self, ucstr, space_left):
1931 def _cutdown(self, ucstr, space_left):
1932 l = 0
1932 l = 0
1933 colwidth = encoding.ucolwidth
1933 colwidth = encoding.ucolwidth
1934 for i in xrange(len(ucstr)):
1934 for i in xrange(len(ucstr)):
1935 l += colwidth(ucstr[i])
1935 l += colwidth(ucstr[i])
1936 if space_left < l:
1936 if space_left < l:
1937 return (ucstr[:i], ucstr[i:])
1937 return (ucstr[:i], ucstr[i:])
1938 return ucstr, ''
1938 return ucstr, ''
1939
1939
1940 # overriding of base class
1940 # overriding of base class
1941 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1941 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1942 space_left = max(width - cur_len, 1)
1942 space_left = max(width - cur_len, 1)
1943
1943
1944 if self.break_long_words:
1944 if self.break_long_words:
1945 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1945 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1946 cur_line.append(cut)
1946 cur_line.append(cut)
1947 reversed_chunks[-1] = res
1947 reversed_chunks[-1] = res
1948 elif not cur_line:
1948 elif not cur_line:
1949 cur_line.append(reversed_chunks.pop())
1949 cur_line.append(reversed_chunks.pop())
1950
1950
1951 # this overriding code is imported from TextWrapper of Python 2.6
1951 # this overriding code is imported from TextWrapper of Python 2.6
1952 # to calculate columns of string by 'encoding.ucolwidth()'
1952 # to calculate columns of string by 'encoding.ucolwidth()'
1953 def _wrap_chunks(self, chunks):
1953 def _wrap_chunks(self, chunks):
1954 colwidth = encoding.ucolwidth
1954 colwidth = encoding.ucolwidth
1955
1955
1956 lines = []
1956 lines = []
1957 if self.width <= 0:
1957 if self.width <= 0:
1958 raise ValueError("invalid width %r (must be > 0)" % self.width)
1958 raise ValueError("invalid width %r (must be > 0)" % self.width)
1959
1959
1960 # Arrange in reverse order so items can be efficiently popped
1960 # Arrange in reverse order so items can be efficiently popped
1961 # from a stack of chucks.
1961 # from a stack of chucks.
1962 chunks.reverse()
1962 chunks.reverse()
1963
1963
1964 while chunks:
1964 while chunks:
1965
1965
1966 # Start the list of chunks that will make up the current line.
1966 # Start the list of chunks that will make up the current line.
1967 # cur_len is just the length of all the chunks in cur_line.
1967 # cur_len is just the length of all the chunks in cur_line.
1968 cur_line = []
1968 cur_line = []
1969 cur_len = 0
1969 cur_len = 0
1970
1970
1971 # Figure out which static string will prefix this line.
1971 # Figure out which static string will prefix this line.
1972 if lines:
1972 if lines:
1973 indent = self.subsequent_indent
1973 indent = self.subsequent_indent
1974 else:
1974 else:
1975 indent = self.initial_indent
1975 indent = self.initial_indent
1976
1976
1977 # Maximum width for this line.
1977 # Maximum width for this line.
1978 width = self.width - len(indent)
1978 width = self.width - len(indent)
1979
1979
1980 # First chunk on line is whitespace -- drop it, unless this
1980 # First chunk on line is whitespace -- drop it, unless this
1981 # is the very beginning of the text (i.e. no lines started yet).
1981 # is the very beginning of the text (i.e. no lines started yet).
1982 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1982 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1983 del chunks[-1]
1983 del chunks[-1]
1984
1984
1985 while chunks:
1985 while chunks:
1986 l = colwidth(chunks[-1])
1986 l = colwidth(chunks[-1])
1987
1987
1988 # Can at least squeeze this chunk onto the current line.
1988 # Can at least squeeze this chunk onto the current line.
1989 if cur_len + l <= width:
1989 if cur_len + l <= width:
1990 cur_line.append(chunks.pop())
1990 cur_line.append(chunks.pop())
1991 cur_len += l
1991 cur_len += l
1992
1992
1993 # Nope, this line is full.
1993 # Nope, this line is full.
1994 else:
1994 else:
1995 break
1995 break
1996
1996
1997 # The current line is full, and the next chunk is too big to
1997 # The current line is full, and the next chunk is too big to
1998 # fit on *any* line (not just this one).
1998 # fit on *any* line (not just this one).
1999 if chunks and colwidth(chunks[-1]) > width:
1999 if chunks and colwidth(chunks[-1]) > width:
2000 self._handle_long_word(chunks, cur_line, cur_len, width)
2000 self._handle_long_word(chunks, cur_line, cur_len, width)
2001
2001
2002 # If the last chunk on this line is all whitespace, drop it.
2002 # If the last chunk on this line is all whitespace, drop it.
2003 if (self.drop_whitespace and
2003 if (self.drop_whitespace and
2004 cur_line and cur_line[-1].strip() == ''):
2004 cur_line and cur_line[-1].strip() == ''):
2005 del cur_line[-1]
2005 del cur_line[-1]
2006
2006
2007 # Convert current line back to a string and store it in list
2007 # Convert current line back to a string and store it in list
2008 # of all lines (return value).
2008 # of all lines (return value).
2009 if cur_line:
2009 if cur_line:
2010 lines.append(indent + ''.join(cur_line))
2010 lines.append(indent + ''.join(cur_line))
2011
2011
2012 return lines
2012 return lines
2013
2013
2014 global MBTextWrapper
2014 global MBTextWrapper
2015 MBTextWrapper = tw
2015 MBTextWrapper = tw
2016 return tw(**kwargs)
2016 return tw(**kwargs)
2017
2017
2018 def wrap(line, width, initindent='', hangindent=''):
2018 def wrap(line, width, initindent='', hangindent=''):
2019 maxindent = max(len(hangindent), len(initindent))
2019 maxindent = max(len(hangindent), len(initindent))
2020 if width <= maxindent:
2020 if width <= maxindent:
2021 # adjust for weird terminal size
2021 # adjust for weird terminal size
2022 width = max(78, maxindent + 1)
2022 width = max(78, maxindent + 1)
2023 line = line.decode(encoding.encoding, encoding.encodingmode)
2023 line = line.decode(encoding.encoding, encoding.encodingmode)
2024 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2024 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2025 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2025 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2026 wrapper = MBTextWrapper(width=width,
2026 wrapper = MBTextWrapper(width=width,
2027 initial_indent=initindent,
2027 initial_indent=initindent,
2028 subsequent_indent=hangindent)
2028 subsequent_indent=hangindent)
2029 return wrapper.fill(line).encode(encoding.encoding)
2029 return wrapper.fill(line).encode(encoding.encoding)
2030
2030
2031 def iterlines(iterator):
2031 def iterlines(iterator):
2032 for chunk in iterator:
2032 for chunk in iterator:
2033 for line in chunk.splitlines():
2033 for line in chunk.splitlines():
2034 yield line
2034 yield line
2035
2035
2036 def expandpath(path):
2036 def expandpath(path):
2037 return os.path.expanduser(os.path.expandvars(path))
2037 return os.path.expanduser(os.path.expandvars(path))
2038
2038
2039 def hgcmd():
2039 def hgcmd():
2040 """Return the command used to execute current hg
2040 """Return the command used to execute current hg
2041
2041
2042 This is different from hgexecutable() because on Windows we want
2042 This is different from hgexecutable() because on Windows we want
2043 to avoid things opening new shell windows like batch files, so we
2043 to avoid things opening new shell windows like batch files, so we
2044 get either the python call or current executable.
2044 get either the python call or current executable.
2045 """
2045 """
2046 if mainfrozen():
2046 if mainfrozen():
2047 if getattr(sys, 'frozen', None) == 'macosx_app':
2048 # Env variable set by py2app
2049 return [os.environ['EXECUTABLEPATH']]
2050 else:
2047 return [sys.executable]
2051 return [sys.executable]
2048 return gethgcmd()
2052 return gethgcmd()
2049
2053
2050 def rundetached(args, condfn):
2054 def rundetached(args, condfn):
2051 """Execute the argument list in a detached process.
2055 """Execute the argument list in a detached process.
2052
2056
2053 condfn is a callable which is called repeatedly and should return
2057 condfn is a callable which is called repeatedly and should return
2054 True once the child process is known to have started successfully.
2058 True once the child process is known to have started successfully.
2055 At this point, the child process PID is returned. If the child
2059 At this point, the child process PID is returned. If the child
2056 process fails to start or finishes before condfn() evaluates to
2060 process fails to start or finishes before condfn() evaluates to
2057 True, return -1.
2061 True, return -1.
2058 """
2062 """
2059 # Windows case is easier because the child process is either
2063 # Windows case is easier because the child process is either
2060 # successfully starting and validating the condition or exiting
2064 # successfully starting and validating the condition or exiting
2061 # on failure. We just poll on its PID. On Unix, if the child
2065 # on failure. We just poll on its PID. On Unix, if the child
2062 # process fails to start, it will be left in a zombie state until
2066 # process fails to start, it will be left in a zombie state until
2063 # the parent wait on it, which we cannot do since we expect a long
2067 # the parent wait on it, which we cannot do since we expect a long
2064 # running process on success. Instead we listen for SIGCHLD telling
2068 # running process on success. Instead we listen for SIGCHLD telling
2065 # us our child process terminated.
2069 # us our child process terminated.
2066 terminated = set()
2070 terminated = set()
2067 def handler(signum, frame):
2071 def handler(signum, frame):
2068 terminated.add(os.wait())
2072 terminated.add(os.wait())
2069 prevhandler = None
2073 prevhandler = None
2070 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2074 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2071 if SIGCHLD is not None:
2075 if SIGCHLD is not None:
2072 prevhandler = signal.signal(SIGCHLD, handler)
2076 prevhandler = signal.signal(SIGCHLD, handler)
2073 try:
2077 try:
2074 pid = spawndetached(args)
2078 pid = spawndetached(args)
2075 while not condfn():
2079 while not condfn():
2076 if ((pid in terminated or not testpid(pid))
2080 if ((pid in terminated or not testpid(pid))
2077 and not condfn()):
2081 and not condfn()):
2078 return -1
2082 return -1
2079 time.sleep(0.1)
2083 time.sleep(0.1)
2080 return pid
2084 return pid
2081 finally:
2085 finally:
2082 if prevhandler is not None:
2086 if prevhandler is not None:
2083 signal.signal(signal.SIGCHLD, prevhandler)
2087 signal.signal(signal.SIGCHLD, prevhandler)
2084
2088
2085 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2089 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2086 """Return the result of interpolating items in the mapping into string s.
2090 """Return the result of interpolating items in the mapping into string s.
2087
2091
2088 prefix is a single character string, or a two character string with
2092 prefix is a single character string, or a two character string with
2089 a backslash as the first character if the prefix needs to be escaped in
2093 a backslash as the first character if the prefix needs to be escaped in
2090 a regular expression.
2094 a regular expression.
2091
2095
2092 fn is an optional function that will be applied to the replacement text
2096 fn is an optional function that will be applied to the replacement text
2093 just before replacement.
2097 just before replacement.
2094
2098
2095 escape_prefix is an optional flag that allows using doubled prefix for
2099 escape_prefix is an optional flag that allows using doubled prefix for
2096 its escaping.
2100 its escaping.
2097 """
2101 """
2098 fn = fn or (lambda s: s)
2102 fn = fn or (lambda s: s)
2099 patterns = '|'.join(mapping.keys())
2103 patterns = '|'.join(mapping.keys())
2100 if escape_prefix:
2104 if escape_prefix:
2101 patterns += '|' + prefix
2105 patterns += '|' + prefix
2102 if len(prefix) > 1:
2106 if len(prefix) > 1:
2103 prefix_char = prefix[1:]
2107 prefix_char = prefix[1:]
2104 else:
2108 else:
2105 prefix_char = prefix
2109 prefix_char = prefix
2106 mapping[prefix_char] = prefix_char
2110 mapping[prefix_char] = prefix_char
2107 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2111 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2108 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2112 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2109
2113
2110 def getport(port):
2114 def getport(port):
2111 """Return the port for a given network service.
2115 """Return the port for a given network service.
2112
2116
2113 If port is an integer, it's returned as is. If it's a string, it's
2117 If port is an integer, it's returned as is. If it's a string, it's
2114 looked up using socket.getservbyname(). If there's no matching
2118 looked up using socket.getservbyname(). If there's no matching
2115 service, error.Abort is raised.
2119 service, error.Abort is raised.
2116 """
2120 """
2117 try:
2121 try:
2118 return int(port)
2122 return int(port)
2119 except ValueError:
2123 except ValueError:
2120 pass
2124 pass
2121
2125
2122 try:
2126 try:
2123 return socket.getservbyname(port)
2127 return socket.getservbyname(port)
2124 except socket.error:
2128 except socket.error:
2125 raise Abort(_("no port number associated with service '%s'") % port)
2129 raise Abort(_("no port number associated with service '%s'") % port)
2126
2130
2127 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2131 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2128 '0': False, 'no': False, 'false': False, 'off': False,
2132 '0': False, 'no': False, 'false': False, 'off': False,
2129 'never': False}
2133 'never': False}
2130
2134
2131 def parsebool(s):
2135 def parsebool(s):
2132 """Parse s into a boolean.
2136 """Parse s into a boolean.
2133
2137
2134 If s is not a valid boolean, returns None.
2138 If s is not a valid boolean, returns None.
2135 """
2139 """
2136 return _booleans.get(s.lower(), None)
2140 return _booleans.get(s.lower(), None)
2137
2141
2138 _hexdig = '0123456789ABCDEFabcdef'
2142 _hexdig = '0123456789ABCDEFabcdef'
2139 _hextochr = dict((a + b, chr(int(a + b, 16)))
2143 _hextochr = dict((a + b, chr(int(a + b, 16)))
2140 for a in _hexdig for b in _hexdig)
2144 for a in _hexdig for b in _hexdig)
2141
2145
2142 def _urlunquote(s):
2146 def _urlunquote(s):
2143 """Decode HTTP/HTML % encoding.
2147 """Decode HTTP/HTML % encoding.
2144
2148
2145 >>> _urlunquote('abc%20def')
2149 >>> _urlunquote('abc%20def')
2146 'abc def'
2150 'abc def'
2147 """
2151 """
2148 res = s.split('%')
2152 res = s.split('%')
2149 # fastpath
2153 # fastpath
2150 if len(res) == 1:
2154 if len(res) == 1:
2151 return s
2155 return s
2152 s = res[0]
2156 s = res[0]
2153 for item in res[1:]:
2157 for item in res[1:]:
2154 try:
2158 try:
2155 s += _hextochr[item[:2]] + item[2:]
2159 s += _hextochr[item[:2]] + item[2:]
2156 except KeyError:
2160 except KeyError:
2157 s += '%' + item
2161 s += '%' + item
2158 except UnicodeDecodeError:
2162 except UnicodeDecodeError:
2159 s += unichr(int(item[:2], 16)) + item[2:]
2163 s += unichr(int(item[:2], 16)) + item[2:]
2160 return s
2164 return s
2161
2165
2162 class url(object):
2166 class url(object):
2163 r"""Reliable URL parser.
2167 r"""Reliable URL parser.
2164
2168
2165 This parses URLs and provides attributes for the following
2169 This parses URLs and provides attributes for the following
2166 components:
2170 components:
2167
2171
2168 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2172 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2169
2173
2170 Missing components are set to None. The only exception is
2174 Missing components are set to None. The only exception is
2171 fragment, which is set to '' if present but empty.
2175 fragment, which is set to '' if present but empty.
2172
2176
2173 If parsefragment is False, fragment is included in query. If
2177 If parsefragment is False, fragment is included in query. If
2174 parsequery is False, query is included in path. If both are
2178 parsequery is False, query is included in path. If both are
2175 False, both fragment and query are included in path.
2179 False, both fragment and query are included in path.
2176
2180
2177 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2181 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2178
2182
2179 Note that for backward compatibility reasons, bundle URLs do not
2183 Note that for backward compatibility reasons, bundle URLs do not
2180 take host names. That means 'bundle://../' has a path of '../'.
2184 take host names. That means 'bundle://../' has a path of '../'.
2181
2185
2182 Examples:
2186 Examples:
2183
2187
2184 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2188 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2185 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2189 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2186 >>> url('ssh://[::1]:2200//home/joe/repo')
2190 >>> url('ssh://[::1]:2200//home/joe/repo')
2187 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2191 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2188 >>> url('file:///home/joe/repo')
2192 >>> url('file:///home/joe/repo')
2189 <url scheme: 'file', path: '/home/joe/repo'>
2193 <url scheme: 'file', path: '/home/joe/repo'>
2190 >>> url('file:///c:/temp/foo/')
2194 >>> url('file:///c:/temp/foo/')
2191 <url scheme: 'file', path: 'c:/temp/foo/'>
2195 <url scheme: 'file', path: 'c:/temp/foo/'>
2192 >>> url('bundle:foo')
2196 >>> url('bundle:foo')
2193 <url scheme: 'bundle', path: 'foo'>
2197 <url scheme: 'bundle', path: 'foo'>
2194 >>> url('bundle://../foo')
2198 >>> url('bundle://../foo')
2195 <url scheme: 'bundle', path: '../foo'>
2199 <url scheme: 'bundle', path: '../foo'>
2196 >>> url(r'c:\foo\bar')
2200 >>> url(r'c:\foo\bar')
2197 <url path: 'c:\\foo\\bar'>
2201 <url path: 'c:\\foo\\bar'>
2198 >>> url(r'\\blah\blah\blah')
2202 >>> url(r'\\blah\blah\blah')
2199 <url path: '\\\\blah\\blah\\blah'>
2203 <url path: '\\\\blah\\blah\\blah'>
2200 >>> url(r'\\blah\blah\blah#baz')
2204 >>> url(r'\\blah\blah\blah#baz')
2201 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2205 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2202 >>> url(r'file:///C:\users\me')
2206 >>> url(r'file:///C:\users\me')
2203 <url scheme: 'file', path: 'C:\\users\\me'>
2207 <url scheme: 'file', path: 'C:\\users\\me'>
2204
2208
2205 Authentication credentials:
2209 Authentication credentials:
2206
2210
2207 >>> url('ssh://joe:xyz@x/repo')
2211 >>> url('ssh://joe:xyz@x/repo')
2208 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2212 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2209 >>> url('ssh://joe@x/repo')
2213 >>> url('ssh://joe@x/repo')
2210 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2214 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2211
2215
2212 Query strings and fragments:
2216 Query strings and fragments:
2213
2217
2214 >>> url('http://host/a?b#c')
2218 >>> url('http://host/a?b#c')
2215 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2219 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2216 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2220 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2217 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2221 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2218 """
2222 """
2219
2223
2220 _safechars = "!~*'()+"
2224 _safechars = "!~*'()+"
2221 _safepchars = "/!~*'()+:\\"
2225 _safepchars = "/!~*'()+:\\"
2222 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2226 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2223
2227
2224 def __init__(self, path, parsequery=True, parsefragment=True):
2228 def __init__(self, path, parsequery=True, parsefragment=True):
2225 # We slowly chomp away at path until we have only the path left
2229 # We slowly chomp away at path until we have only the path left
2226 self.scheme = self.user = self.passwd = self.host = None
2230 self.scheme = self.user = self.passwd = self.host = None
2227 self.port = self.path = self.query = self.fragment = None
2231 self.port = self.path = self.query = self.fragment = None
2228 self._localpath = True
2232 self._localpath = True
2229 self._hostport = ''
2233 self._hostport = ''
2230 self._origpath = path
2234 self._origpath = path
2231
2235
2232 if parsefragment and '#' in path:
2236 if parsefragment and '#' in path:
2233 path, self.fragment = path.split('#', 1)
2237 path, self.fragment = path.split('#', 1)
2234 if not path:
2238 if not path:
2235 path = None
2239 path = None
2236
2240
2237 # special case for Windows drive letters and UNC paths
2241 # special case for Windows drive letters and UNC paths
2238 if hasdriveletter(path) or path.startswith(r'\\'):
2242 if hasdriveletter(path) or path.startswith(r'\\'):
2239 self.path = path
2243 self.path = path
2240 return
2244 return
2241
2245
2242 # For compatibility reasons, we can't handle bundle paths as
2246 # For compatibility reasons, we can't handle bundle paths as
2243 # normal URLS
2247 # normal URLS
2244 if path.startswith('bundle:'):
2248 if path.startswith('bundle:'):
2245 self.scheme = 'bundle'
2249 self.scheme = 'bundle'
2246 path = path[7:]
2250 path = path[7:]
2247 if path.startswith('//'):
2251 if path.startswith('//'):
2248 path = path[2:]
2252 path = path[2:]
2249 self.path = path
2253 self.path = path
2250 return
2254 return
2251
2255
2252 if self._matchscheme(path):
2256 if self._matchscheme(path):
2253 parts = path.split(':', 1)
2257 parts = path.split(':', 1)
2254 if parts[0]:
2258 if parts[0]:
2255 self.scheme, path = parts
2259 self.scheme, path = parts
2256 self._localpath = False
2260 self._localpath = False
2257
2261
2258 if not path:
2262 if not path:
2259 path = None
2263 path = None
2260 if self._localpath:
2264 if self._localpath:
2261 self.path = ''
2265 self.path = ''
2262 return
2266 return
2263 else:
2267 else:
2264 if self._localpath:
2268 if self._localpath:
2265 self.path = path
2269 self.path = path
2266 return
2270 return
2267
2271
2268 if parsequery and '?' in path:
2272 if parsequery and '?' in path:
2269 path, self.query = path.split('?', 1)
2273 path, self.query = path.split('?', 1)
2270 if not path:
2274 if not path:
2271 path = None
2275 path = None
2272 if not self.query:
2276 if not self.query:
2273 self.query = None
2277 self.query = None
2274
2278
2275 # // is required to specify a host/authority
2279 # // is required to specify a host/authority
2276 if path and path.startswith('//'):
2280 if path and path.startswith('//'):
2277 parts = path[2:].split('/', 1)
2281 parts = path[2:].split('/', 1)
2278 if len(parts) > 1:
2282 if len(parts) > 1:
2279 self.host, path = parts
2283 self.host, path = parts
2280 else:
2284 else:
2281 self.host = parts[0]
2285 self.host = parts[0]
2282 path = None
2286 path = None
2283 if not self.host:
2287 if not self.host:
2284 self.host = None
2288 self.host = None
2285 # path of file:///d is /d
2289 # path of file:///d is /d
2286 # path of file:///d:/ is d:/, not /d:/
2290 # path of file:///d:/ is d:/, not /d:/
2287 if path and not hasdriveletter(path):
2291 if path and not hasdriveletter(path):
2288 path = '/' + path
2292 path = '/' + path
2289
2293
2290 if self.host and '@' in self.host:
2294 if self.host and '@' in self.host:
2291 self.user, self.host = self.host.rsplit('@', 1)
2295 self.user, self.host = self.host.rsplit('@', 1)
2292 if ':' in self.user:
2296 if ':' in self.user:
2293 self.user, self.passwd = self.user.split(':', 1)
2297 self.user, self.passwd = self.user.split(':', 1)
2294 if not self.host:
2298 if not self.host:
2295 self.host = None
2299 self.host = None
2296
2300
2297 # Don't split on colons in IPv6 addresses without ports
2301 # Don't split on colons in IPv6 addresses without ports
2298 if (self.host and ':' in self.host and
2302 if (self.host and ':' in self.host and
2299 not (self.host.startswith('[') and self.host.endswith(']'))):
2303 not (self.host.startswith('[') and self.host.endswith(']'))):
2300 self._hostport = self.host
2304 self._hostport = self.host
2301 self.host, self.port = self.host.rsplit(':', 1)
2305 self.host, self.port = self.host.rsplit(':', 1)
2302 if not self.host:
2306 if not self.host:
2303 self.host = None
2307 self.host = None
2304
2308
2305 if (self.host and self.scheme == 'file' and
2309 if (self.host and self.scheme == 'file' and
2306 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2310 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2307 raise Abort(_('file:// URLs can only refer to localhost'))
2311 raise Abort(_('file:// URLs can only refer to localhost'))
2308
2312
2309 self.path = path
2313 self.path = path
2310
2314
2311 # leave the query string escaped
2315 # leave the query string escaped
2312 for a in ('user', 'passwd', 'host', 'port',
2316 for a in ('user', 'passwd', 'host', 'port',
2313 'path', 'fragment'):
2317 'path', 'fragment'):
2314 v = getattr(self, a)
2318 v = getattr(self, a)
2315 if v is not None:
2319 if v is not None:
2316 setattr(self, a, _urlunquote(v))
2320 setattr(self, a, _urlunquote(v))
2317
2321
2318 def __repr__(self):
2322 def __repr__(self):
2319 attrs = []
2323 attrs = []
2320 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2324 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2321 'query', 'fragment'):
2325 'query', 'fragment'):
2322 v = getattr(self, a)
2326 v = getattr(self, a)
2323 if v is not None:
2327 if v is not None:
2324 attrs.append('%s: %r' % (a, v))
2328 attrs.append('%s: %r' % (a, v))
2325 return '<url %s>' % ', '.join(attrs)
2329 return '<url %s>' % ', '.join(attrs)
2326
2330
2327 def __str__(self):
2331 def __str__(self):
2328 r"""Join the URL's components back into a URL string.
2332 r"""Join the URL's components back into a URL string.
2329
2333
2330 Examples:
2334 Examples:
2331
2335
2332 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2336 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2333 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2337 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2334 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2338 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2335 'http://user:pw@host:80/?foo=bar&baz=42'
2339 'http://user:pw@host:80/?foo=bar&baz=42'
2336 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2340 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2337 'http://user:pw@host:80/?foo=bar%3dbaz'
2341 'http://user:pw@host:80/?foo=bar%3dbaz'
2338 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2342 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2339 'ssh://user:pw@[::1]:2200//home/joe#'
2343 'ssh://user:pw@[::1]:2200//home/joe#'
2340 >>> str(url('http://localhost:80//'))
2344 >>> str(url('http://localhost:80//'))
2341 'http://localhost:80//'
2345 'http://localhost:80//'
2342 >>> str(url('http://localhost:80/'))
2346 >>> str(url('http://localhost:80/'))
2343 'http://localhost:80/'
2347 'http://localhost:80/'
2344 >>> str(url('http://localhost:80'))
2348 >>> str(url('http://localhost:80'))
2345 'http://localhost:80/'
2349 'http://localhost:80/'
2346 >>> str(url('bundle:foo'))
2350 >>> str(url('bundle:foo'))
2347 'bundle:foo'
2351 'bundle:foo'
2348 >>> str(url('bundle://../foo'))
2352 >>> str(url('bundle://../foo'))
2349 'bundle:../foo'
2353 'bundle:../foo'
2350 >>> str(url('path'))
2354 >>> str(url('path'))
2351 'path'
2355 'path'
2352 >>> str(url('file:///tmp/foo/bar'))
2356 >>> str(url('file:///tmp/foo/bar'))
2353 'file:///tmp/foo/bar'
2357 'file:///tmp/foo/bar'
2354 >>> str(url('file:///c:/tmp/foo/bar'))
2358 >>> str(url('file:///c:/tmp/foo/bar'))
2355 'file:///c:/tmp/foo/bar'
2359 'file:///c:/tmp/foo/bar'
2356 >>> print url(r'bundle:foo\bar')
2360 >>> print url(r'bundle:foo\bar')
2357 bundle:foo\bar
2361 bundle:foo\bar
2358 >>> print url(r'file:///D:\data\hg')
2362 >>> print url(r'file:///D:\data\hg')
2359 file:///D:\data\hg
2363 file:///D:\data\hg
2360 """
2364 """
2361 if self._localpath:
2365 if self._localpath:
2362 s = self.path
2366 s = self.path
2363 if self.scheme == 'bundle':
2367 if self.scheme == 'bundle':
2364 s = 'bundle:' + s
2368 s = 'bundle:' + s
2365 if self.fragment:
2369 if self.fragment:
2366 s += '#' + self.fragment
2370 s += '#' + self.fragment
2367 return s
2371 return s
2368
2372
2369 s = self.scheme + ':'
2373 s = self.scheme + ':'
2370 if self.user or self.passwd or self.host:
2374 if self.user or self.passwd or self.host:
2371 s += '//'
2375 s += '//'
2372 elif self.scheme and (not self.path or self.path.startswith('/')
2376 elif self.scheme and (not self.path or self.path.startswith('/')
2373 or hasdriveletter(self.path)):
2377 or hasdriveletter(self.path)):
2374 s += '//'
2378 s += '//'
2375 if hasdriveletter(self.path):
2379 if hasdriveletter(self.path):
2376 s += '/'
2380 s += '/'
2377 if self.user:
2381 if self.user:
2378 s += urllib.quote(self.user, safe=self._safechars)
2382 s += urllib.quote(self.user, safe=self._safechars)
2379 if self.passwd:
2383 if self.passwd:
2380 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2384 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2381 if self.user or self.passwd:
2385 if self.user or self.passwd:
2382 s += '@'
2386 s += '@'
2383 if self.host:
2387 if self.host:
2384 if not (self.host.startswith('[') and self.host.endswith(']')):
2388 if not (self.host.startswith('[') and self.host.endswith(']')):
2385 s += urllib.quote(self.host)
2389 s += urllib.quote(self.host)
2386 else:
2390 else:
2387 s += self.host
2391 s += self.host
2388 if self.port:
2392 if self.port:
2389 s += ':' + urllib.quote(self.port)
2393 s += ':' + urllib.quote(self.port)
2390 if self.host:
2394 if self.host:
2391 s += '/'
2395 s += '/'
2392 if self.path:
2396 if self.path:
2393 # TODO: similar to the query string, we should not unescape the
2397 # TODO: similar to the query string, we should not unescape the
2394 # path when we store it, the path might contain '%2f' = '/',
2398 # path when we store it, the path might contain '%2f' = '/',
2395 # which we should *not* escape.
2399 # which we should *not* escape.
2396 s += urllib.quote(self.path, safe=self._safepchars)
2400 s += urllib.quote(self.path, safe=self._safepchars)
2397 if self.query:
2401 if self.query:
2398 # we store the query in escaped form.
2402 # we store the query in escaped form.
2399 s += '?' + self.query
2403 s += '?' + self.query
2400 if self.fragment is not None:
2404 if self.fragment is not None:
2401 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2405 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2402 return s
2406 return s
2403
2407
2404 def authinfo(self):
2408 def authinfo(self):
2405 user, passwd = self.user, self.passwd
2409 user, passwd = self.user, self.passwd
2406 try:
2410 try:
2407 self.user, self.passwd = None, None
2411 self.user, self.passwd = None, None
2408 s = str(self)
2412 s = str(self)
2409 finally:
2413 finally:
2410 self.user, self.passwd = user, passwd
2414 self.user, self.passwd = user, passwd
2411 if not self.user:
2415 if not self.user:
2412 return (s, None)
2416 return (s, None)
2413 # authinfo[1] is passed to urllib2 password manager, and its
2417 # authinfo[1] is passed to urllib2 password manager, and its
2414 # URIs must not contain credentials. The host is passed in the
2418 # URIs must not contain credentials. The host is passed in the
2415 # URIs list because Python < 2.4.3 uses only that to search for
2419 # URIs list because Python < 2.4.3 uses only that to search for
2416 # a password.
2420 # a password.
2417 return (s, (None, (s, self.host),
2421 return (s, (None, (s, self.host),
2418 self.user, self.passwd or ''))
2422 self.user, self.passwd or ''))
2419
2423
2420 def isabs(self):
2424 def isabs(self):
2421 if self.scheme and self.scheme != 'file':
2425 if self.scheme and self.scheme != 'file':
2422 return True # remote URL
2426 return True # remote URL
2423 if hasdriveletter(self.path):
2427 if hasdriveletter(self.path):
2424 return True # absolute for our purposes - can't be joined()
2428 return True # absolute for our purposes - can't be joined()
2425 if self.path.startswith(r'\\'):
2429 if self.path.startswith(r'\\'):
2426 return True # Windows UNC path
2430 return True # Windows UNC path
2427 if self.path.startswith('/'):
2431 if self.path.startswith('/'):
2428 return True # POSIX-style
2432 return True # POSIX-style
2429 return False
2433 return False
2430
2434
2431 def localpath(self):
2435 def localpath(self):
2432 if self.scheme == 'file' or self.scheme == 'bundle':
2436 if self.scheme == 'file' or self.scheme == 'bundle':
2433 path = self.path or '/'
2437 path = self.path or '/'
2434 # For Windows, we need to promote hosts containing drive
2438 # For Windows, we need to promote hosts containing drive
2435 # letters to paths with drive letters.
2439 # letters to paths with drive letters.
2436 if hasdriveletter(self._hostport):
2440 if hasdriveletter(self._hostport):
2437 path = self._hostport + '/' + self.path
2441 path = self._hostport + '/' + self.path
2438 elif (self.host is not None and self.path
2442 elif (self.host is not None and self.path
2439 and not hasdriveletter(path)):
2443 and not hasdriveletter(path)):
2440 path = '/' + path
2444 path = '/' + path
2441 return path
2445 return path
2442 return self._origpath
2446 return self._origpath
2443
2447
2444 def islocal(self):
2448 def islocal(self):
2445 '''whether localpath will return something that posixfile can open'''
2449 '''whether localpath will return something that posixfile can open'''
2446 return (not self.scheme or self.scheme == 'file'
2450 return (not self.scheme or self.scheme == 'file'
2447 or self.scheme == 'bundle')
2451 or self.scheme == 'bundle')
2448
2452
2449 def hasscheme(path):
2453 def hasscheme(path):
2450 return bool(url(path).scheme)
2454 return bool(url(path).scheme)
2451
2455
2452 def hasdriveletter(path):
2456 def hasdriveletter(path):
2453 return path and path[1:2] == ':' and path[0:1].isalpha()
2457 return path and path[1:2] == ':' and path[0:1].isalpha()
2454
2458
2455 def urllocalpath(path):
2459 def urllocalpath(path):
2456 return url(path, parsequery=False, parsefragment=False).localpath()
2460 return url(path, parsequery=False, parsefragment=False).localpath()
2457
2461
2458 def hidepassword(u):
2462 def hidepassword(u):
2459 '''hide user credential in a url string'''
2463 '''hide user credential in a url string'''
2460 u = url(u)
2464 u = url(u)
2461 if u.passwd:
2465 if u.passwd:
2462 u.passwd = '***'
2466 u.passwd = '***'
2463 return str(u)
2467 return str(u)
2464
2468
2465 def removeauth(u):
2469 def removeauth(u):
2466 '''remove all authentication information from a url string'''
2470 '''remove all authentication information from a url string'''
2467 u = url(u)
2471 u = url(u)
2468 u.user = u.passwd = None
2472 u.user = u.passwd = None
2469 return str(u)
2473 return str(u)
2470
2474
2471 def isatty(fp):
2475 def isatty(fp):
2472 try:
2476 try:
2473 return fp.isatty()
2477 return fp.isatty()
2474 except AttributeError:
2478 except AttributeError:
2475 return False
2479 return False
2476
2480
2477 timecount = unitcountfn(
2481 timecount = unitcountfn(
2478 (1, 1e3, _('%.0f s')),
2482 (1, 1e3, _('%.0f s')),
2479 (100, 1, _('%.1f s')),
2483 (100, 1, _('%.1f s')),
2480 (10, 1, _('%.2f s')),
2484 (10, 1, _('%.2f s')),
2481 (1, 1, _('%.3f s')),
2485 (1, 1, _('%.3f s')),
2482 (100, 0.001, _('%.1f ms')),
2486 (100, 0.001, _('%.1f ms')),
2483 (10, 0.001, _('%.2f ms')),
2487 (10, 0.001, _('%.2f ms')),
2484 (1, 0.001, _('%.3f ms')),
2488 (1, 0.001, _('%.3f ms')),
2485 (100, 0.000001, _('%.1f us')),
2489 (100, 0.000001, _('%.1f us')),
2486 (10, 0.000001, _('%.2f us')),
2490 (10, 0.000001, _('%.2f us')),
2487 (1, 0.000001, _('%.3f us')),
2491 (1, 0.000001, _('%.3f us')),
2488 (100, 0.000000001, _('%.1f ns')),
2492 (100, 0.000000001, _('%.1f ns')),
2489 (10, 0.000000001, _('%.2f ns')),
2493 (10, 0.000000001, _('%.2f ns')),
2490 (1, 0.000000001, _('%.3f ns')),
2494 (1, 0.000000001, _('%.3f ns')),
2491 )
2495 )
2492
2496
2493 _timenesting = [0]
2497 _timenesting = [0]
2494
2498
2495 def timed(func):
2499 def timed(func):
2496 '''Report the execution time of a function call to stderr.
2500 '''Report the execution time of a function call to stderr.
2497
2501
2498 During development, use as a decorator when you need to measure
2502 During development, use as a decorator when you need to measure
2499 the cost of a function, e.g. as follows:
2503 the cost of a function, e.g. as follows:
2500
2504
2501 @util.timed
2505 @util.timed
2502 def foo(a, b, c):
2506 def foo(a, b, c):
2503 pass
2507 pass
2504 '''
2508 '''
2505
2509
2506 def wrapper(*args, **kwargs):
2510 def wrapper(*args, **kwargs):
2507 start = time.time()
2511 start = time.time()
2508 indent = 2
2512 indent = 2
2509 _timenesting[0] += indent
2513 _timenesting[0] += indent
2510 try:
2514 try:
2511 return func(*args, **kwargs)
2515 return func(*args, **kwargs)
2512 finally:
2516 finally:
2513 elapsed = time.time() - start
2517 elapsed = time.time() - start
2514 _timenesting[0] -= indent
2518 _timenesting[0] -= indent
2515 sys.stderr.write('%s%s: %s\n' %
2519 sys.stderr.write('%s%s: %s\n' %
2516 (' ' * _timenesting[0], func.__name__,
2520 (' ' * _timenesting[0], func.__name__,
2517 timecount(elapsed)))
2521 timecount(elapsed)))
2518 return wrapper
2522 return wrapper
2519
2523
2520 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2524 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2521 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2525 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2522
2526
2523 def sizetoint(s):
2527 def sizetoint(s):
2524 '''Convert a space specifier to a byte count.
2528 '''Convert a space specifier to a byte count.
2525
2529
2526 >>> sizetoint('30')
2530 >>> sizetoint('30')
2527 30
2531 30
2528 >>> sizetoint('2.2kb')
2532 >>> sizetoint('2.2kb')
2529 2252
2533 2252
2530 >>> sizetoint('6M')
2534 >>> sizetoint('6M')
2531 6291456
2535 6291456
2532 '''
2536 '''
2533 t = s.strip().lower()
2537 t = s.strip().lower()
2534 try:
2538 try:
2535 for k, u in _sizeunits:
2539 for k, u in _sizeunits:
2536 if t.endswith(k):
2540 if t.endswith(k):
2537 return int(float(t[:-len(k)]) * u)
2541 return int(float(t[:-len(k)]) * u)
2538 return int(t)
2542 return int(t)
2539 except ValueError:
2543 except ValueError:
2540 raise error.ParseError(_("couldn't parse size: %s") % s)
2544 raise error.ParseError(_("couldn't parse size: %s") % s)
2541
2545
2542 class hooks(object):
2546 class hooks(object):
2543 '''A collection of hook functions that can be used to extend a
2547 '''A collection of hook functions that can be used to extend a
2544 function's behavior. Hooks are called in lexicographic order,
2548 function's behavior. Hooks are called in lexicographic order,
2545 based on the names of their sources.'''
2549 based on the names of their sources.'''
2546
2550
2547 def __init__(self):
2551 def __init__(self):
2548 self._hooks = []
2552 self._hooks = []
2549
2553
2550 def add(self, source, hook):
2554 def add(self, source, hook):
2551 self._hooks.append((source, hook))
2555 self._hooks.append((source, hook))
2552
2556
2553 def __call__(self, *args):
2557 def __call__(self, *args):
2554 self._hooks.sort(key=lambda x: x[0])
2558 self._hooks.sort(key=lambda x: x[0])
2555 results = []
2559 results = []
2556 for source, hook in self._hooks:
2560 for source, hook in self._hooks:
2557 results.append(hook(*args))
2561 results.append(hook(*args))
2558 return results
2562 return results
2559
2563
2560 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2564 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2561 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2565 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2562 Skips the 'skip' last entries. By default it will flush stdout first.
2566 Skips the 'skip' last entries. By default it will flush stdout first.
2563 It can be used everywhere and do intentionally not require an ui object.
2567 It can be used everywhere and do intentionally not require an ui object.
2564 Not be used in production code but very convenient while developing.
2568 Not be used in production code but very convenient while developing.
2565 '''
2569 '''
2566 if otherf:
2570 if otherf:
2567 otherf.flush()
2571 otherf.flush()
2568 f.write('%s at:\n' % msg)
2572 f.write('%s at:\n' % msg)
2569 entries = [('%s:%s' % (fn, ln), func)
2573 entries = [('%s:%s' % (fn, ln), func)
2570 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2574 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2571 if entries:
2575 if entries:
2572 fnmax = max(len(entry[0]) for entry in entries)
2576 fnmax = max(len(entry[0]) for entry in entries)
2573 for fnln, func in entries:
2577 for fnln, func in entries:
2574 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2578 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2575 f.flush()
2579 f.flush()
2576
2580
2577 class dirs(object):
2581 class dirs(object):
2578 '''a multiset of directory names from a dirstate or manifest'''
2582 '''a multiset of directory names from a dirstate or manifest'''
2579
2583
2580 def __init__(self, map, skip=None):
2584 def __init__(self, map, skip=None):
2581 self._dirs = {}
2585 self._dirs = {}
2582 addpath = self.addpath
2586 addpath = self.addpath
2583 if safehasattr(map, 'iteritems') and skip is not None:
2587 if safehasattr(map, 'iteritems') and skip is not None:
2584 for f, s in map.iteritems():
2588 for f, s in map.iteritems():
2585 if s[0] != skip:
2589 if s[0] != skip:
2586 addpath(f)
2590 addpath(f)
2587 else:
2591 else:
2588 for f in map:
2592 for f in map:
2589 addpath(f)
2593 addpath(f)
2590
2594
2591 def addpath(self, path):
2595 def addpath(self, path):
2592 dirs = self._dirs
2596 dirs = self._dirs
2593 for base in finddirs(path):
2597 for base in finddirs(path):
2594 if base in dirs:
2598 if base in dirs:
2595 dirs[base] += 1
2599 dirs[base] += 1
2596 return
2600 return
2597 dirs[base] = 1
2601 dirs[base] = 1
2598
2602
2599 def delpath(self, path):
2603 def delpath(self, path):
2600 dirs = self._dirs
2604 dirs = self._dirs
2601 for base in finddirs(path):
2605 for base in finddirs(path):
2602 if dirs[base] > 1:
2606 if dirs[base] > 1:
2603 dirs[base] -= 1
2607 dirs[base] -= 1
2604 return
2608 return
2605 del dirs[base]
2609 del dirs[base]
2606
2610
2607 def __iter__(self):
2611 def __iter__(self):
2608 return self._dirs.iterkeys()
2612 return self._dirs.iterkeys()
2609
2613
2610 def __contains__(self, d):
2614 def __contains__(self, d):
2611 return d in self._dirs
2615 return d in self._dirs
2612
2616
2613 if safehasattr(parsers, 'dirs'):
2617 if safehasattr(parsers, 'dirs'):
2614 dirs = parsers.dirs
2618 dirs = parsers.dirs
2615
2619
2616 def finddirs(path):
2620 def finddirs(path):
2617 pos = path.rfind('/')
2621 pos = path.rfind('/')
2618 while pos != -1:
2622 while pos != -1:
2619 yield path[:pos]
2623 yield path[:pos]
2620 pos = path.rfind('/', 0, pos)
2624 pos = path.rfind('/', 0, pos)
2621
2625
2622 # compression utility
2626 # compression utility
2623
2627
2624 class nocompress(object):
2628 class nocompress(object):
2625 def compress(self, x):
2629 def compress(self, x):
2626 return x
2630 return x
2627 def flush(self):
2631 def flush(self):
2628 return ""
2632 return ""
2629
2633
2630 compressors = {
2634 compressors = {
2631 None: nocompress,
2635 None: nocompress,
2632 # lambda to prevent early import
2636 # lambda to prevent early import
2633 'BZ': lambda: bz2.BZ2Compressor(),
2637 'BZ': lambda: bz2.BZ2Compressor(),
2634 'GZ': lambda: zlib.compressobj(),
2638 'GZ': lambda: zlib.compressobj(),
2635 }
2639 }
2636 # also support the old form by courtesies
2640 # also support the old form by courtesies
2637 compressors['UN'] = compressors[None]
2641 compressors['UN'] = compressors[None]
2638
2642
2639 def _makedecompressor(decompcls):
2643 def _makedecompressor(decompcls):
2640 def generator(f):
2644 def generator(f):
2641 d = decompcls()
2645 d = decompcls()
2642 for chunk in filechunkiter(f):
2646 for chunk in filechunkiter(f):
2643 yield d.decompress(chunk)
2647 yield d.decompress(chunk)
2644 def func(fh):
2648 def func(fh):
2645 return chunkbuffer(generator(fh))
2649 return chunkbuffer(generator(fh))
2646 return func
2650 return func
2647
2651
2648 class ctxmanager(object):
2652 class ctxmanager(object):
2649 '''A context manager for use in 'with' blocks to allow multiple
2653 '''A context manager for use in 'with' blocks to allow multiple
2650 contexts to be entered at once. This is both safer and more
2654 contexts to be entered at once. This is both safer and more
2651 flexible than contextlib.nested.
2655 flexible than contextlib.nested.
2652
2656
2653 Once Mercurial supports Python 2.7+, this will become mostly
2657 Once Mercurial supports Python 2.7+, this will become mostly
2654 unnecessary.
2658 unnecessary.
2655 '''
2659 '''
2656
2660
2657 def __init__(self, *args):
2661 def __init__(self, *args):
2658 '''Accepts a list of no-argument functions that return context
2662 '''Accepts a list of no-argument functions that return context
2659 managers. These will be invoked at __call__ time.'''
2663 managers. These will be invoked at __call__ time.'''
2660 self._pending = args
2664 self._pending = args
2661 self._atexit = []
2665 self._atexit = []
2662
2666
2663 def __enter__(self):
2667 def __enter__(self):
2664 return self
2668 return self
2665
2669
2666 def __call__(self):
2670 def __call__(self):
2667 '''Create and enter context managers in the order in which they were
2671 '''Create and enter context managers in the order in which they were
2668 passed to the constructor.'''
2672 passed to the constructor.'''
2669 values = []
2673 values = []
2670 for func in self._pending:
2674 for func in self._pending:
2671 obj = func()
2675 obj = func()
2672 values.append(obj.__enter__())
2676 values.append(obj.__enter__())
2673 self._atexit.append(obj.__exit__)
2677 self._atexit.append(obj.__exit__)
2674 del self._pending
2678 del self._pending
2675 return values
2679 return values
2676
2680
2677 def atexit(self, func, *args, **kwargs):
2681 def atexit(self, func, *args, **kwargs):
2678 '''Add a function to call when this context manager exits. The
2682 '''Add a function to call when this context manager exits. The
2679 ordering of multiple atexit calls is unspecified, save that
2683 ordering of multiple atexit calls is unspecified, save that
2680 they will happen before any __exit__ functions.'''
2684 they will happen before any __exit__ functions.'''
2681 def wrapper(exc_type, exc_val, exc_tb):
2685 def wrapper(exc_type, exc_val, exc_tb):
2682 func(*args, **kwargs)
2686 func(*args, **kwargs)
2683 self._atexit.append(wrapper)
2687 self._atexit.append(wrapper)
2684 return func
2688 return func
2685
2689
2686 def __exit__(self, exc_type, exc_val, exc_tb):
2690 def __exit__(self, exc_type, exc_val, exc_tb):
2687 '''Context managers are exited in the reverse order from which
2691 '''Context managers are exited in the reverse order from which
2688 they were created.'''
2692 they were created.'''
2689 received = exc_type is not None
2693 received = exc_type is not None
2690 suppressed = False
2694 suppressed = False
2691 pending = None
2695 pending = None
2692 self._atexit.reverse()
2696 self._atexit.reverse()
2693 for exitfunc in self._atexit:
2697 for exitfunc in self._atexit:
2694 try:
2698 try:
2695 if exitfunc(exc_type, exc_val, exc_tb):
2699 if exitfunc(exc_type, exc_val, exc_tb):
2696 suppressed = True
2700 suppressed = True
2697 exc_type = None
2701 exc_type = None
2698 exc_val = None
2702 exc_val = None
2699 exc_tb = None
2703 exc_tb = None
2700 except BaseException:
2704 except BaseException:
2701 pending = sys.exc_info()
2705 pending = sys.exc_info()
2702 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2706 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2703 del self._atexit
2707 del self._atexit
2704 if pending:
2708 if pending:
2705 raise exc_val
2709 raise exc_val
2706 return received and suppressed
2710 return received and suppressed
2707
2711
2708 def _bz2():
2712 def _bz2():
2709 d = bz2.BZ2Decompressor()
2713 d = bz2.BZ2Decompressor()
2710 # Bzip2 stream start with BZ, but we stripped it.
2714 # Bzip2 stream start with BZ, but we stripped it.
2711 # we put it back for good measure.
2715 # we put it back for good measure.
2712 d.decompress('BZ')
2716 d.decompress('BZ')
2713 return d
2717 return d
2714
2718
2715 decompressors = {None: lambda fh: fh,
2719 decompressors = {None: lambda fh: fh,
2716 '_truncatedBZ': _makedecompressor(_bz2),
2720 '_truncatedBZ': _makedecompressor(_bz2),
2717 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2721 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2718 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2722 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2719 }
2723 }
2720 # also support the old form by courtesies
2724 # also support the old form by courtesies
2721 decompressors['UN'] = decompressors[None]
2725 decompressors['UN'] = decompressors[None]
2722
2726
2723 # convenient shortcut
2727 # convenient shortcut
2724 dst = debugstacktrace
2728 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now