##// END OF EJS Templates
util: rename argument of isatty()...
Yuya Nishihara -
r27363:c7ab2087 default
parent child Browse files
Show More
@@ -1,2504 +1,2504
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import collections
20 import collections
21 import datetime
21 import datetime
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import imp
25 import imp
26 import os
26 import os
27 import re as remod
27 import re as remod
28 import shutil
28 import shutil
29 import signal
29 import signal
30 import socket
30 import socket
31 import subprocess
31 import subprocess
32 import sys
32 import sys
33 import tempfile
33 import tempfile
34 import textwrap
34 import textwrap
35 import time
35 import time
36 import traceback
36 import traceback
37 import urllib
37 import urllib
38 import zlib
38 import zlib
39
39
40 from . import (
40 from . import (
41 encoding,
41 encoding,
42 error,
42 error,
43 i18n,
43 i18n,
44 osutil,
44 osutil,
45 parsers,
45 parsers,
46 )
46 )
47
47
48 if os.name == 'nt':
48 if os.name == 'nt':
49 from . import windows as platform
49 from . import windows as platform
50 else:
50 else:
51 from . import posix as platform
51 from . import posix as platform
52
52
53 md5 = hashlib.md5
53 md5 = hashlib.md5
54 sha1 = hashlib.sha1
54 sha1 = hashlib.sha1
55 sha512 = hashlib.sha512
55 sha512 = hashlib.sha512
56 _ = i18n._
56 _ = i18n._
57
57
58 cachestat = platform.cachestat
58 cachestat = platform.cachestat
59 checkexec = platform.checkexec
59 checkexec = platform.checkexec
60 checklink = platform.checklink
60 checklink = platform.checklink
61 copymode = platform.copymode
61 copymode = platform.copymode
62 executablepath = platform.executablepath
62 executablepath = platform.executablepath
63 expandglobs = platform.expandglobs
63 expandglobs = platform.expandglobs
64 explainexit = platform.explainexit
64 explainexit = platform.explainexit
65 findexe = platform.findexe
65 findexe = platform.findexe
66 gethgcmd = platform.gethgcmd
66 gethgcmd = platform.gethgcmd
67 getuser = platform.getuser
67 getuser = platform.getuser
68 groupmembers = platform.groupmembers
68 groupmembers = platform.groupmembers
69 groupname = platform.groupname
69 groupname = platform.groupname
70 hidewindow = platform.hidewindow
70 hidewindow = platform.hidewindow
71 isexec = platform.isexec
71 isexec = platform.isexec
72 isowner = platform.isowner
72 isowner = platform.isowner
73 localpath = platform.localpath
73 localpath = platform.localpath
74 lookupreg = platform.lookupreg
74 lookupreg = platform.lookupreg
75 makedir = platform.makedir
75 makedir = platform.makedir
76 nlinks = platform.nlinks
76 nlinks = platform.nlinks
77 normpath = platform.normpath
77 normpath = platform.normpath
78 normcase = platform.normcase
78 normcase = platform.normcase
79 normcasespec = platform.normcasespec
79 normcasespec = platform.normcasespec
80 normcasefallback = platform.normcasefallback
80 normcasefallback = platform.normcasefallback
81 openhardlinks = platform.openhardlinks
81 openhardlinks = platform.openhardlinks
82 oslink = platform.oslink
82 oslink = platform.oslink
83 parsepatchoutput = platform.parsepatchoutput
83 parsepatchoutput = platform.parsepatchoutput
84 pconvert = platform.pconvert
84 pconvert = platform.pconvert
85 poll = platform.poll
85 poll = platform.poll
86 popen = platform.popen
86 popen = platform.popen
87 posixfile = platform.posixfile
87 posixfile = platform.posixfile
88 quotecommand = platform.quotecommand
88 quotecommand = platform.quotecommand
89 readpipe = platform.readpipe
89 readpipe = platform.readpipe
90 rename = platform.rename
90 rename = platform.rename
91 removedirs = platform.removedirs
91 removedirs = platform.removedirs
92 samedevice = platform.samedevice
92 samedevice = platform.samedevice
93 samefile = platform.samefile
93 samefile = platform.samefile
94 samestat = platform.samestat
94 samestat = platform.samestat
95 setbinary = platform.setbinary
95 setbinary = platform.setbinary
96 setflags = platform.setflags
96 setflags = platform.setflags
97 setsignalhandler = platform.setsignalhandler
97 setsignalhandler = platform.setsignalhandler
98 shellquote = platform.shellquote
98 shellquote = platform.shellquote
99 spawndetached = platform.spawndetached
99 spawndetached = platform.spawndetached
100 split = platform.split
100 split = platform.split
101 sshargs = platform.sshargs
101 sshargs = platform.sshargs
102 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
102 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
103 statisexec = platform.statisexec
103 statisexec = platform.statisexec
104 statislink = platform.statislink
104 statislink = platform.statislink
105 termwidth = platform.termwidth
105 termwidth = platform.termwidth
106 testpid = platform.testpid
106 testpid = platform.testpid
107 umask = platform.umask
107 umask = platform.umask
108 unlink = platform.unlink
108 unlink = platform.unlink
109 unlinkpath = platform.unlinkpath
109 unlinkpath = platform.unlinkpath
110 username = platform.username
110 username = platform.username
111
111
112 # Python compatibility
112 # Python compatibility
113
113
114 _notset = object()
114 _notset = object()
115
115
116 # disable Python's problematic floating point timestamps (issue4836)
116 # disable Python's problematic floating point timestamps (issue4836)
117 # (Python hypocritically says you shouldn't change this behavior in
117 # (Python hypocritically says you shouldn't change this behavior in
118 # libraries, and sure enough Mercurial is not a library.)
118 # libraries, and sure enough Mercurial is not a library.)
119 os.stat_float_times(False)
119 os.stat_float_times(False)
120
120
121 def safehasattr(thing, attr):
121 def safehasattr(thing, attr):
122 return getattr(thing, attr, _notset) is not _notset
122 return getattr(thing, attr, _notset) is not _notset
123
123
124 DIGESTS = {
124 DIGESTS = {
125 'md5': md5,
125 'md5': md5,
126 'sha1': sha1,
126 'sha1': sha1,
127 'sha512': sha512,
127 'sha512': sha512,
128 }
128 }
129 # List of digest types from strongest to weakest
129 # List of digest types from strongest to weakest
130 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
130 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
131
131
132 for k in DIGESTS_BY_STRENGTH:
132 for k in DIGESTS_BY_STRENGTH:
133 assert k in DIGESTS
133 assert k in DIGESTS
134
134
135 class digester(object):
135 class digester(object):
136 """helper to compute digests.
136 """helper to compute digests.
137
137
138 This helper can be used to compute one or more digests given their name.
138 This helper can be used to compute one or more digests given their name.
139
139
140 >>> d = digester(['md5', 'sha1'])
140 >>> d = digester(['md5', 'sha1'])
141 >>> d.update('foo')
141 >>> d.update('foo')
142 >>> [k for k in sorted(d)]
142 >>> [k for k in sorted(d)]
143 ['md5', 'sha1']
143 ['md5', 'sha1']
144 >>> d['md5']
144 >>> d['md5']
145 'acbd18db4cc2f85cedef654fccc4a4d8'
145 'acbd18db4cc2f85cedef654fccc4a4d8'
146 >>> d['sha1']
146 >>> d['sha1']
147 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
147 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
148 >>> digester.preferred(['md5', 'sha1'])
148 >>> digester.preferred(['md5', 'sha1'])
149 'sha1'
149 'sha1'
150 """
150 """
151
151
152 def __init__(self, digests, s=''):
152 def __init__(self, digests, s=''):
153 self._hashes = {}
153 self._hashes = {}
154 for k in digests:
154 for k in digests:
155 if k not in DIGESTS:
155 if k not in DIGESTS:
156 raise Abort(_('unknown digest type: %s') % k)
156 raise Abort(_('unknown digest type: %s') % k)
157 self._hashes[k] = DIGESTS[k]()
157 self._hashes[k] = DIGESTS[k]()
158 if s:
158 if s:
159 self.update(s)
159 self.update(s)
160
160
161 def update(self, data):
161 def update(self, data):
162 for h in self._hashes.values():
162 for h in self._hashes.values():
163 h.update(data)
163 h.update(data)
164
164
165 def __getitem__(self, key):
165 def __getitem__(self, key):
166 if key not in DIGESTS:
166 if key not in DIGESTS:
167 raise Abort(_('unknown digest type: %s') % k)
167 raise Abort(_('unknown digest type: %s') % k)
168 return self._hashes[key].hexdigest()
168 return self._hashes[key].hexdigest()
169
169
170 def __iter__(self):
170 def __iter__(self):
171 return iter(self._hashes)
171 return iter(self._hashes)
172
172
173 @staticmethod
173 @staticmethod
174 def preferred(supported):
174 def preferred(supported):
175 """returns the strongest digest type in both supported and DIGESTS."""
175 """returns the strongest digest type in both supported and DIGESTS."""
176
176
177 for k in DIGESTS_BY_STRENGTH:
177 for k in DIGESTS_BY_STRENGTH:
178 if k in supported:
178 if k in supported:
179 return k
179 return k
180 return None
180 return None
181
181
182 class digestchecker(object):
182 class digestchecker(object):
183 """file handle wrapper that additionally checks content against a given
183 """file handle wrapper that additionally checks content against a given
184 size and digests.
184 size and digests.
185
185
186 d = digestchecker(fh, size, {'md5': '...'})
186 d = digestchecker(fh, size, {'md5': '...'})
187
187
188 When multiple digests are given, all of them are validated.
188 When multiple digests are given, all of them are validated.
189 """
189 """
190
190
191 def __init__(self, fh, size, digests):
191 def __init__(self, fh, size, digests):
192 self._fh = fh
192 self._fh = fh
193 self._size = size
193 self._size = size
194 self._got = 0
194 self._got = 0
195 self._digests = dict(digests)
195 self._digests = dict(digests)
196 self._digester = digester(self._digests.keys())
196 self._digester = digester(self._digests.keys())
197
197
198 def read(self, length=-1):
198 def read(self, length=-1):
199 content = self._fh.read(length)
199 content = self._fh.read(length)
200 self._digester.update(content)
200 self._digester.update(content)
201 self._got += len(content)
201 self._got += len(content)
202 return content
202 return content
203
203
204 def validate(self):
204 def validate(self):
205 if self._size != self._got:
205 if self._size != self._got:
206 raise Abort(_('size mismatch: expected %d, got %d') %
206 raise Abort(_('size mismatch: expected %d, got %d') %
207 (self._size, self._got))
207 (self._size, self._got))
208 for k, v in self._digests.items():
208 for k, v in self._digests.items():
209 if v != self._digester[k]:
209 if v != self._digester[k]:
210 # i18n: first parameter is a digest name
210 # i18n: first parameter is a digest name
211 raise Abort(_('%s mismatch: expected %s, got %s') %
211 raise Abort(_('%s mismatch: expected %s, got %s') %
212 (k, v, self._digester[k]))
212 (k, v, self._digester[k]))
213
213
214 try:
214 try:
215 buffer = buffer
215 buffer = buffer
216 except NameError:
216 except NameError:
217 if sys.version_info[0] < 3:
217 if sys.version_info[0] < 3:
218 def buffer(sliceable, offset=0):
218 def buffer(sliceable, offset=0):
219 return sliceable[offset:]
219 return sliceable[offset:]
220 else:
220 else:
221 def buffer(sliceable, offset=0):
221 def buffer(sliceable, offset=0):
222 return memoryview(sliceable)[offset:]
222 return memoryview(sliceable)[offset:]
223
223
224 closefds = os.name == 'posix'
224 closefds = os.name == 'posix'
225
225
226 _chunksize = 4096
226 _chunksize = 4096
227
227
228 class bufferedinputpipe(object):
228 class bufferedinputpipe(object):
229 """a manually buffered input pipe
229 """a manually buffered input pipe
230
230
231 Python will not let us use buffered IO and lazy reading with 'polling' at
231 Python will not let us use buffered IO and lazy reading with 'polling' at
232 the same time. We cannot probe the buffer state and select will not detect
232 the same time. We cannot probe the buffer state and select will not detect
233 that data are ready to read if they are already buffered.
233 that data are ready to read if they are already buffered.
234
234
235 This class let us work around that by implementing its own buffering
235 This class let us work around that by implementing its own buffering
236 (allowing efficient readline) while offering a way to know if the buffer is
236 (allowing efficient readline) while offering a way to know if the buffer is
237 empty from the output (allowing collaboration of the buffer with polling).
237 empty from the output (allowing collaboration of the buffer with polling).
238
238
239 This class lives in the 'util' module because it makes use of the 'os'
239 This class lives in the 'util' module because it makes use of the 'os'
240 module from the python stdlib.
240 module from the python stdlib.
241 """
241 """
242
242
243 def __init__(self, input):
243 def __init__(self, input):
244 self._input = input
244 self._input = input
245 self._buffer = []
245 self._buffer = []
246 self._eof = False
246 self._eof = False
247 self._lenbuf = 0
247 self._lenbuf = 0
248
248
249 @property
249 @property
250 def hasbuffer(self):
250 def hasbuffer(self):
251 """True is any data is currently buffered
251 """True is any data is currently buffered
252
252
253 This will be used externally a pre-step for polling IO. If there is
253 This will be used externally a pre-step for polling IO. If there is
254 already data then no polling should be set in place."""
254 already data then no polling should be set in place."""
255 return bool(self._buffer)
255 return bool(self._buffer)
256
256
257 @property
257 @property
258 def closed(self):
258 def closed(self):
259 return self._input.closed
259 return self._input.closed
260
260
261 def fileno(self):
261 def fileno(self):
262 return self._input.fileno()
262 return self._input.fileno()
263
263
264 def close(self):
264 def close(self):
265 return self._input.close()
265 return self._input.close()
266
266
267 def read(self, size):
267 def read(self, size):
268 while (not self._eof) and (self._lenbuf < size):
268 while (not self._eof) and (self._lenbuf < size):
269 self._fillbuffer()
269 self._fillbuffer()
270 return self._frombuffer(size)
270 return self._frombuffer(size)
271
271
272 def readline(self, *args, **kwargs):
272 def readline(self, *args, **kwargs):
273 if 1 < len(self._buffer):
273 if 1 < len(self._buffer):
274 # this should not happen because both read and readline end with a
274 # this should not happen because both read and readline end with a
275 # _frombuffer call that collapse it.
275 # _frombuffer call that collapse it.
276 self._buffer = [''.join(self._buffer)]
276 self._buffer = [''.join(self._buffer)]
277 self._lenbuf = len(self._buffer[0])
277 self._lenbuf = len(self._buffer[0])
278 lfi = -1
278 lfi = -1
279 if self._buffer:
279 if self._buffer:
280 lfi = self._buffer[-1].find('\n')
280 lfi = self._buffer[-1].find('\n')
281 while (not self._eof) and lfi < 0:
281 while (not self._eof) and lfi < 0:
282 self._fillbuffer()
282 self._fillbuffer()
283 if self._buffer:
283 if self._buffer:
284 lfi = self._buffer[-1].find('\n')
284 lfi = self._buffer[-1].find('\n')
285 size = lfi + 1
285 size = lfi + 1
286 if lfi < 0: # end of file
286 if lfi < 0: # end of file
287 size = self._lenbuf
287 size = self._lenbuf
288 elif 1 < len(self._buffer):
288 elif 1 < len(self._buffer):
289 # we need to take previous chunks into account
289 # we need to take previous chunks into account
290 size += self._lenbuf - len(self._buffer[-1])
290 size += self._lenbuf - len(self._buffer[-1])
291 return self._frombuffer(size)
291 return self._frombuffer(size)
292
292
293 def _frombuffer(self, size):
293 def _frombuffer(self, size):
294 """return at most 'size' data from the buffer
294 """return at most 'size' data from the buffer
295
295
296 The data are removed from the buffer."""
296 The data are removed from the buffer."""
297 if size == 0 or not self._buffer:
297 if size == 0 or not self._buffer:
298 return ''
298 return ''
299 buf = self._buffer[0]
299 buf = self._buffer[0]
300 if 1 < len(self._buffer):
300 if 1 < len(self._buffer):
301 buf = ''.join(self._buffer)
301 buf = ''.join(self._buffer)
302
302
303 data = buf[:size]
303 data = buf[:size]
304 buf = buf[len(data):]
304 buf = buf[len(data):]
305 if buf:
305 if buf:
306 self._buffer = [buf]
306 self._buffer = [buf]
307 self._lenbuf = len(buf)
307 self._lenbuf = len(buf)
308 else:
308 else:
309 self._buffer = []
309 self._buffer = []
310 self._lenbuf = 0
310 self._lenbuf = 0
311 return data
311 return data
312
312
313 def _fillbuffer(self):
313 def _fillbuffer(self):
314 """read data to the buffer"""
314 """read data to the buffer"""
315 data = os.read(self._input.fileno(), _chunksize)
315 data = os.read(self._input.fileno(), _chunksize)
316 if not data:
316 if not data:
317 self._eof = True
317 self._eof = True
318 else:
318 else:
319 self._lenbuf += len(data)
319 self._lenbuf += len(data)
320 self._buffer.append(data)
320 self._buffer.append(data)
321
321
322 def popen2(cmd, env=None, newlines=False):
322 def popen2(cmd, env=None, newlines=False):
323 # Setting bufsize to -1 lets the system decide the buffer size.
323 # Setting bufsize to -1 lets the system decide the buffer size.
324 # The default for bufsize is 0, meaning unbuffered. This leads to
324 # The default for bufsize is 0, meaning unbuffered. This leads to
325 # poor performance on Mac OS X: http://bugs.python.org/issue4194
325 # poor performance on Mac OS X: http://bugs.python.org/issue4194
326 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
326 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
327 close_fds=closefds,
327 close_fds=closefds,
328 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
328 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
329 universal_newlines=newlines,
329 universal_newlines=newlines,
330 env=env)
330 env=env)
331 return p.stdin, p.stdout
331 return p.stdin, p.stdout
332
332
333 def popen3(cmd, env=None, newlines=False):
333 def popen3(cmd, env=None, newlines=False):
334 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
334 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
335 return stdin, stdout, stderr
335 return stdin, stdout, stderr
336
336
337 def popen4(cmd, env=None, newlines=False, bufsize=-1):
337 def popen4(cmd, env=None, newlines=False, bufsize=-1):
338 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
338 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
339 close_fds=closefds,
339 close_fds=closefds,
340 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
340 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
341 stderr=subprocess.PIPE,
341 stderr=subprocess.PIPE,
342 universal_newlines=newlines,
342 universal_newlines=newlines,
343 env=env)
343 env=env)
344 return p.stdin, p.stdout, p.stderr, p
344 return p.stdin, p.stdout, p.stderr, p
345
345
346 def version():
346 def version():
347 """Return version information if available."""
347 """Return version information if available."""
348 try:
348 try:
349 from . import __version__
349 from . import __version__
350 return __version__.version
350 return __version__.version
351 except ImportError:
351 except ImportError:
352 return 'unknown'
352 return 'unknown'
353
353
354 def versiontuple(v=None, n=4):
354 def versiontuple(v=None, n=4):
355 """Parses a Mercurial version string into an N-tuple.
355 """Parses a Mercurial version string into an N-tuple.
356
356
357 The version string to be parsed is specified with the ``v`` argument.
357 The version string to be parsed is specified with the ``v`` argument.
358 If it isn't defined, the current Mercurial version string will be parsed.
358 If it isn't defined, the current Mercurial version string will be parsed.
359
359
360 ``n`` can be 2, 3, or 4. Here is how some version strings map to
360 ``n`` can be 2, 3, or 4. Here is how some version strings map to
361 returned values:
361 returned values:
362
362
363 >>> v = '3.6.1+190-df9b73d2d444'
363 >>> v = '3.6.1+190-df9b73d2d444'
364 >>> versiontuple(v, 2)
364 >>> versiontuple(v, 2)
365 (3, 6)
365 (3, 6)
366 >>> versiontuple(v, 3)
366 >>> versiontuple(v, 3)
367 (3, 6, 1)
367 (3, 6, 1)
368 >>> versiontuple(v, 4)
368 >>> versiontuple(v, 4)
369 (3, 6, 1, '190-df9b73d2d444')
369 (3, 6, 1, '190-df9b73d2d444')
370
370
371 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
371 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
372 (3, 6, 1, '190-df9b73d2d444+20151118')
372 (3, 6, 1, '190-df9b73d2d444+20151118')
373
373
374 >>> v = '3.6'
374 >>> v = '3.6'
375 >>> versiontuple(v, 2)
375 >>> versiontuple(v, 2)
376 (3, 6)
376 (3, 6)
377 >>> versiontuple(v, 3)
377 >>> versiontuple(v, 3)
378 (3, 6, None)
378 (3, 6, None)
379 >>> versiontuple(v, 4)
379 >>> versiontuple(v, 4)
380 (3, 6, None, None)
380 (3, 6, None, None)
381 """
381 """
382 if not v:
382 if not v:
383 v = version()
383 v = version()
384 parts = v.split('+', 1)
384 parts = v.split('+', 1)
385 if len(parts) == 1:
385 if len(parts) == 1:
386 vparts, extra = parts[0], None
386 vparts, extra = parts[0], None
387 else:
387 else:
388 vparts, extra = parts
388 vparts, extra = parts
389
389
390 vints = []
390 vints = []
391 for i in vparts.split('.'):
391 for i in vparts.split('.'):
392 try:
392 try:
393 vints.append(int(i))
393 vints.append(int(i))
394 except ValueError:
394 except ValueError:
395 break
395 break
396 # (3, 6) -> (3, 6, None)
396 # (3, 6) -> (3, 6, None)
397 while len(vints) < 3:
397 while len(vints) < 3:
398 vints.append(None)
398 vints.append(None)
399
399
400 if n == 2:
400 if n == 2:
401 return (vints[0], vints[1])
401 return (vints[0], vints[1])
402 if n == 3:
402 if n == 3:
403 return (vints[0], vints[1], vints[2])
403 return (vints[0], vints[1], vints[2])
404 if n == 4:
404 if n == 4:
405 return (vints[0], vints[1], vints[2], extra)
405 return (vints[0], vints[1], vints[2], extra)
406
406
407 # used by parsedate
407 # used by parsedate
408 defaultdateformats = (
408 defaultdateformats = (
409 '%Y-%m-%d %H:%M:%S',
409 '%Y-%m-%d %H:%M:%S',
410 '%Y-%m-%d %I:%M:%S%p',
410 '%Y-%m-%d %I:%M:%S%p',
411 '%Y-%m-%d %H:%M',
411 '%Y-%m-%d %H:%M',
412 '%Y-%m-%d %I:%M%p',
412 '%Y-%m-%d %I:%M%p',
413 '%Y-%m-%d',
413 '%Y-%m-%d',
414 '%m-%d',
414 '%m-%d',
415 '%m/%d',
415 '%m/%d',
416 '%m/%d/%y',
416 '%m/%d/%y',
417 '%m/%d/%Y',
417 '%m/%d/%Y',
418 '%a %b %d %H:%M:%S %Y',
418 '%a %b %d %H:%M:%S %Y',
419 '%a %b %d %I:%M:%S%p %Y',
419 '%a %b %d %I:%M:%S%p %Y',
420 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
420 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
421 '%b %d %H:%M:%S %Y',
421 '%b %d %H:%M:%S %Y',
422 '%b %d %I:%M:%S%p %Y',
422 '%b %d %I:%M:%S%p %Y',
423 '%b %d %H:%M:%S',
423 '%b %d %H:%M:%S',
424 '%b %d %I:%M:%S%p',
424 '%b %d %I:%M:%S%p',
425 '%b %d %H:%M',
425 '%b %d %H:%M',
426 '%b %d %I:%M%p',
426 '%b %d %I:%M%p',
427 '%b %d %Y',
427 '%b %d %Y',
428 '%b %d',
428 '%b %d',
429 '%H:%M:%S',
429 '%H:%M:%S',
430 '%I:%M:%S%p',
430 '%I:%M:%S%p',
431 '%H:%M',
431 '%H:%M',
432 '%I:%M%p',
432 '%I:%M%p',
433 )
433 )
434
434
435 extendeddateformats = defaultdateformats + (
435 extendeddateformats = defaultdateformats + (
436 "%Y",
436 "%Y",
437 "%Y-%m",
437 "%Y-%m",
438 "%b",
438 "%b",
439 "%b %Y",
439 "%b %Y",
440 )
440 )
441
441
442 def cachefunc(func):
442 def cachefunc(func):
443 '''cache the result of function calls'''
443 '''cache the result of function calls'''
444 # XXX doesn't handle keywords args
444 # XXX doesn't handle keywords args
445 if func.func_code.co_argcount == 0:
445 if func.func_code.co_argcount == 0:
446 cache = []
446 cache = []
447 def f():
447 def f():
448 if len(cache) == 0:
448 if len(cache) == 0:
449 cache.append(func())
449 cache.append(func())
450 return cache[0]
450 return cache[0]
451 return f
451 return f
452 cache = {}
452 cache = {}
453 if func.func_code.co_argcount == 1:
453 if func.func_code.co_argcount == 1:
454 # we gain a small amount of time because
454 # we gain a small amount of time because
455 # we don't need to pack/unpack the list
455 # we don't need to pack/unpack the list
456 def f(arg):
456 def f(arg):
457 if arg not in cache:
457 if arg not in cache:
458 cache[arg] = func(arg)
458 cache[arg] = func(arg)
459 return cache[arg]
459 return cache[arg]
460 else:
460 else:
461 def f(*args):
461 def f(*args):
462 if args not in cache:
462 if args not in cache:
463 cache[args] = func(*args)
463 cache[args] = func(*args)
464 return cache[args]
464 return cache[args]
465
465
466 return f
466 return f
467
467
468 class sortdict(dict):
468 class sortdict(dict):
469 '''a simple sorted dictionary'''
469 '''a simple sorted dictionary'''
470 def __init__(self, data=None):
470 def __init__(self, data=None):
471 self._list = []
471 self._list = []
472 if data:
472 if data:
473 self.update(data)
473 self.update(data)
474 def copy(self):
474 def copy(self):
475 return sortdict(self)
475 return sortdict(self)
476 def __setitem__(self, key, val):
476 def __setitem__(self, key, val):
477 if key in self:
477 if key in self:
478 self._list.remove(key)
478 self._list.remove(key)
479 self._list.append(key)
479 self._list.append(key)
480 dict.__setitem__(self, key, val)
480 dict.__setitem__(self, key, val)
481 def __iter__(self):
481 def __iter__(self):
482 return self._list.__iter__()
482 return self._list.__iter__()
483 def update(self, src):
483 def update(self, src):
484 if isinstance(src, dict):
484 if isinstance(src, dict):
485 src = src.iteritems()
485 src = src.iteritems()
486 for k, v in src:
486 for k, v in src:
487 self[k] = v
487 self[k] = v
488 def clear(self):
488 def clear(self):
489 dict.clear(self)
489 dict.clear(self)
490 self._list = []
490 self._list = []
491 def items(self):
491 def items(self):
492 return [(k, self[k]) for k in self._list]
492 return [(k, self[k]) for k in self._list]
493 def __delitem__(self, key):
493 def __delitem__(self, key):
494 dict.__delitem__(self, key)
494 dict.__delitem__(self, key)
495 self._list.remove(key)
495 self._list.remove(key)
496 def pop(self, key, *args, **kwargs):
496 def pop(self, key, *args, **kwargs):
497 dict.pop(self, key, *args, **kwargs)
497 dict.pop(self, key, *args, **kwargs)
498 try:
498 try:
499 self._list.remove(key)
499 self._list.remove(key)
500 except ValueError:
500 except ValueError:
501 pass
501 pass
502 def keys(self):
502 def keys(self):
503 return self._list
503 return self._list
504 def iterkeys(self):
504 def iterkeys(self):
505 return self._list.__iter__()
505 return self._list.__iter__()
506 def iteritems(self):
506 def iteritems(self):
507 for k in self._list:
507 for k in self._list:
508 yield k, self[k]
508 yield k, self[k]
509 def insert(self, index, key, val):
509 def insert(self, index, key, val):
510 self._list.insert(index, key)
510 self._list.insert(index, key)
511 dict.__setitem__(self, key, val)
511 dict.__setitem__(self, key, val)
512
512
513 class lrucachedict(object):
513 class lrucachedict(object):
514 '''cache most recent gets from or sets to this dictionary'''
514 '''cache most recent gets from or sets to this dictionary'''
515 def __init__(self, maxsize):
515 def __init__(self, maxsize):
516 self._cache = {}
516 self._cache = {}
517 self._maxsize = maxsize
517 self._maxsize = maxsize
518 self._order = collections.deque()
518 self._order = collections.deque()
519
519
520 def __getitem__(self, key):
520 def __getitem__(self, key):
521 value = self._cache[key]
521 value = self._cache[key]
522 self._order.remove(key)
522 self._order.remove(key)
523 self._order.append(key)
523 self._order.append(key)
524 return value
524 return value
525
525
526 def __setitem__(self, key, value):
526 def __setitem__(self, key, value):
527 if key not in self._cache:
527 if key not in self._cache:
528 if len(self._cache) >= self._maxsize:
528 if len(self._cache) >= self._maxsize:
529 del self._cache[self._order.popleft()]
529 del self._cache[self._order.popleft()]
530 else:
530 else:
531 self._order.remove(key)
531 self._order.remove(key)
532 self._cache[key] = value
532 self._cache[key] = value
533 self._order.append(key)
533 self._order.append(key)
534
534
535 def __contains__(self, key):
535 def __contains__(self, key):
536 return key in self._cache
536 return key in self._cache
537
537
538 def clear(self):
538 def clear(self):
539 self._cache.clear()
539 self._cache.clear()
540 self._order = collections.deque()
540 self._order = collections.deque()
541
541
542 def lrucachefunc(func):
542 def lrucachefunc(func):
543 '''cache most recent results of function calls'''
543 '''cache most recent results of function calls'''
544 cache = {}
544 cache = {}
545 order = collections.deque()
545 order = collections.deque()
546 if func.func_code.co_argcount == 1:
546 if func.func_code.co_argcount == 1:
547 def f(arg):
547 def f(arg):
548 if arg not in cache:
548 if arg not in cache:
549 if len(cache) > 20:
549 if len(cache) > 20:
550 del cache[order.popleft()]
550 del cache[order.popleft()]
551 cache[arg] = func(arg)
551 cache[arg] = func(arg)
552 else:
552 else:
553 order.remove(arg)
553 order.remove(arg)
554 order.append(arg)
554 order.append(arg)
555 return cache[arg]
555 return cache[arg]
556 else:
556 else:
557 def f(*args):
557 def f(*args):
558 if args not in cache:
558 if args not in cache:
559 if len(cache) > 20:
559 if len(cache) > 20:
560 del cache[order.popleft()]
560 del cache[order.popleft()]
561 cache[args] = func(*args)
561 cache[args] = func(*args)
562 else:
562 else:
563 order.remove(args)
563 order.remove(args)
564 order.append(args)
564 order.append(args)
565 return cache[args]
565 return cache[args]
566
566
567 return f
567 return f
568
568
569 class propertycache(object):
569 class propertycache(object):
570 def __init__(self, func):
570 def __init__(self, func):
571 self.func = func
571 self.func = func
572 self.name = func.__name__
572 self.name = func.__name__
573 def __get__(self, obj, type=None):
573 def __get__(self, obj, type=None):
574 result = self.func(obj)
574 result = self.func(obj)
575 self.cachevalue(obj, result)
575 self.cachevalue(obj, result)
576 return result
576 return result
577
577
578 def cachevalue(self, obj, value):
578 def cachevalue(self, obj, value):
579 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
579 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
580 obj.__dict__[self.name] = value
580 obj.__dict__[self.name] = value
581
581
582 def pipefilter(s, cmd):
582 def pipefilter(s, cmd):
583 '''filter string S through command CMD, returning its output'''
583 '''filter string S through command CMD, returning its output'''
584 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
584 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
585 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
585 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
586 pout, perr = p.communicate(s)
586 pout, perr = p.communicate(s)
587 return pout
587 return pout
588
588
589 def tempfilter(s, cmd):
589 def tempfilter(s, cmd):
590 '''filter string S through a pair of temporary files with CMD.
590 '''filter string S through a pair of temporary files with CMD.
591 CMD is used as a template to create the real command to be run,
591 CMD is used as a template to create the real command to be run,
592 with the strings INFILE and OUTFILE replaced by the real names of
592 with the strings INFILE and OUTFILE replaced by the real names of
593 the temporary files generated.'''
593 the temporary files generated.'''
594 inname, outname = None, None
594 inname, outname = None, None
595 try:
595 try:
596 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
596 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
597 fp = os.fdopen(infd, 'wb')
597 fp = os.fdopen(infd, 'wb')
598 fp.write(s)
598 fp.write(s)
599 fp.close()
599 fp.close()
600 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
600 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
601 os.close(outfd)
601 os.close(outfd)
602 cmd = cmd.replace('INFILE', inname)
602 cmd = cmd.replace('INFILE', inname)
603 cmd = cmd.replace('OUTFILE', outname)
603 cmd = cmd.replace('OUTFILE', outname)
604 code = os.system(cmd)
604 code = os.system(cmd)
605 if sys.platform == 'OpenVMS' and code & 1:
605 if sys.platform == 'OpenVMS' and code & 1:
606 code = 0
606 code = 0
607 if code:
607 if code:
608 raise Abort(_("command '%s' failed: %s") %
608 raise Abort(_("command '%s' failed: %s") %
609 (cmd, explainexit(code)))
609 (cmd, explainexit(code)))
610 fp = open(outname, 'rb')
610 fp = open(outname, 'rb')
611 r = fp.read()
611 r = fp.read()
612 fp.close()
612 fp.close()
613 return r
613 return r
614 finally:
614 finally:
615 try:
615 try:
616 if inname:
616 if inname:
617 os.unlink(inname)
617 os.unlink(inname)
618 except OSError:
618 except OSError:
619 pass
619 pass
620 try:
620 try:
621 if outname:
621 if outname:
622 os.unlink(outname)
622 os.unlink(outname)
623 except OSError:
623 except OSError:
624 pass
624 pass
625
625
626 filtertable = {
626 filtertable = {
627 'tempfile:': tempfilter,
627 'tempfile:': tempfilter,
628 'pipe:': pipefilter,
628 'pipe:': pipefilter,
629 }
629 }
630
630
631 def filter(s, cmd):
631 def filter(s, cmd):
632 "filter a string through a command that transforms its input to its output"
632 "filter a string through a command that transforms its input to its output"
633 for name, fn in filtertable.iteritems():
633 for name, fn in filtertable.iteritems():
634 if cmd.startswith(name):
634 if cmd.startswith(name):
635 return fn(s, cmd[len(name):].lstrip())
635 return fn(s, cmd[len(name):].lstrip())
636 return pipefilter(s, cmd)
636 return pipefilter(s, cmd)
637
637
638 def binary(s):
638 def binary(s):
639 """return true if a string is binary data"""
639 """return true if a string is binary data"""
640 return bool(s and '\0' in s)
640 return bool(s and '\0' in s)
641
641
642 def increasingchunks(source, min=1024, max=65536):
642 def increasingchunks(source, min=1024, max=65536):
643 '''return no less than min bytes per chunk while data remains,
643 '''return no less than min bytes per chunk while data remains,
644 doubling min after each chunk until it reaches max'''
644 doubling min after each chunk until it reaches max'''
645 def log2(x):
645 def log2(x):
646 if not x:
646 if not x:
647 return 0
647 return 0
648 i = 0
648 i = 0
649 while x:
649 while x:
650 x >>= 1
650 x >>= 1
651 i += 1
651 i += 1
652 return i - 1
652 return i - 1
653
653
654 buf = []
654 buf = []
655 blen = 0
655 blen = 0
656 for chunk in source:
656 for chunk in source:
657 buf.append(chunk)
657 buf.append(chunk)
658 blen += len(chunk)
658 blen += len(chunk)
659 if blen >= min:
659 if blen >= min:
660 if min < max:
660 if min < max:
661 min = min << 1
661 min = min << 1
662 nmin = 1 << log2(blen)
662 nmin = 1 << log2(blen)
663 if nmin > min:
663 if nmin > min:
664 min = nmin
664 min = nmin
665 if min > max:
665 if min > max:
666 min = max
666 min = max
667 yield ''.join(buf)
667 yield ''.join(buf)
668 blen = 0
668 blen = 0
669 buf = []
669 buf = []
670 if buf:
670 if buf:
671 yield ''.join(buf)
671 yield ''.join(buf)
672
672
673 Abort = error.Abort
673 Abort = error.Abort
674
674
675 def always(fn):
675 def always(fn):
676 return True
676 return True
677
677
678 def never(fn):
678 def never(fn):
679 return False
679 return False
680
680
681 def nogc(func):
681 def nogc(func):
682 """disable garbage collector
682 """disable garbage collector
683
683
684 Python's garbage collector triggers a GC each time a certain number of
684 Python's garbage collector triggers a GC each time a certain number of
685 container objects (the number being defined by gc.get_threshold()) are
685 container objects (the number being defined by gc.get_threshold()) are
686 allocated even when marked not to be tracked by the collector. Tracking has
686 allocated even when marked not to be tracked by the collector. Tracking has
687 no effect on when GCs are triggered, only on what objects the GC looks
687 no effect on when GCs are triggered, only on what objects the GC looks
688 into. As a workaround, disable GC while building complex (huge)
688 into. As a workaround, disable GC while building complex (huge)
689 containers.
689 containers.
690
690
691 This garbage collector issue have been fixed in 2.7.
691 This garbage collector issue have been fixed in 2.7.
692 """
692 """
693 def wrapper(*args, **kwargs):
693 def wrapper(*args, **kwargs):
694 gcenabled = gc.isenabled()
694 gcenabled = gc.isenabled()
695 gc.disable()
695 gc.disable()
696 try:
696 try:
697 return func(*args, **kwargs)
697 return func(*args, **kwargs)
698 finally:
698 finally:
699 if gcenabled:
699 if gcenabled:
700 gc.enable()
700 gc.enable()
701 return wrapper
701 return wrapper
702
702
703 def pathto(root, n1, n2):
703 def pathto(root, n1, n2):
704 '''return the relative path from one place to another.
704 '''return the relative path from one place to another.
705 root should use os.sep to separate directories
705 root should use os.sep to separate directories
706 n1 should use os.sep to separate directories
706 n1 should use os.sep to separate directories
707 n2 should use "/" to separate directories
707 n2 should use "/" to separate directories
708 returns an os.sep-separated path.
708 returns an os.sep-separated path.
709
709
710 If n1 is a relative path, it's assumed it's
710 If n1 is a relative path, it's assumed it's
711 relative to root.
711 relative to root.
712 n2 should always be relative to root.
712 n2 should always be relative to root.
713 '''
713 '''
714 if not n1:
714 if not n1:
715 return localpath(n2)
715 return localpath(n2)
716 if os.path.isabs(n1):
716 if os.path.isabs(n1):
717 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
717 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
718 return os.path.join(root, localpath(n2))
718 return os.path.join(root, localpath(n2))
719 n2 = '/'.join((pconvert(root), n2))
719 n2 = '/'.join((pconvert(root), n2))
720 a, b = splitpath(n1), n2.split('/')
720 a, b = splitpath(n1), n2.split('/')
721 a.reverse()
721 a.reverse()
722 b.reverse()
722 b.reverse()
723 while a and b and a[-1] == b[-1]:
723 while a and b and a[-1] == b[-1]:
724 a.pop()
724 a.pop()
725 b.pop()
725 b.pop()
726 b.reverse()
726 b.reverse()
727 return os.sep.join((['..'] * len(a)) + b) or '.'
727 return os.sep.join((['..'] * len(a)) + b) or '.'
728
728
729 def mainfrozen():
729 def mainfrozen():
730 """return True if we are a frozen executable.
730 """return True if we are a frozen executable.
731
731
732 The code supports py2exe (most common, Windows only) and tools/freeze
732 The code supports py2exe (most common, Windows only) and tools/freeze
733 (portable, not much used).
733 (portable, not much used).
734 """
734 """
735 return (safehasattr(sys, "frozen") or # new py2exe
735 return (safehasattr(sys, "frozen") or # new py2exe
736 safehasattr(sys, "importers") or # old py2exe
736 safehasattr(sys, "importers") or # old py2exe
737 imp.is_frozen("__main__")) # tools/freeze
737 imp.is_frozen("__main__")) # tools/freeze
738
738
739 # the location of data files matching the source code
739 # the location of data files matching the source code
740 if mainfrozen():
740 if mainfrozen():
741 # executable version (py2exe) doesn't support __file__
741 # executable version (py2exe) doesn't support __file__
742 datapath = os.path.dirname(sys.executable)
742 datapath = os.path.dirname(sys.executable)
743 else:
743 else:
744 datapath = os.path.dirname(__file__)
744 datapath = os.path.dirname(__file__)
745
745
746 i18n.setdatapath(datapath)
746 i18n.setdatapath(datapath)
747
747
748 _hgexecutable = None
748 _hgexecutable = None
749
749
750 def hgexecutable():
750 def hgexecutable():
751 """return location of the 'hg' executable.
751 """return location of the 'hg' executable.
752
752
753 Defaults to $HG or 'hg' in the search path.
753 Defaults to $HG or 'hg' in the search path.
754 """
754 """
755 if _hgexecutable is None:
755 if _hgexecutable is None:
756 hg = os.environ.get('HG')
756 hg = os.environ.get('HG')
757 mainmod = sys.modules['__main__']
757 mainmod = sys.modules['__main__']
758 if hg:
758 if hg:
759 _sethgexecutable(hg)
759 _sethgexecutable(hg)
760 elif mainfrozen():
760 elif mainfrozen():
761 _sethgexecutable(sys.executable)
761 _sethgexecutable(sys.executable)
762 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
762 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
763 _sethgexecutable(mainmod.__file__)
763 _sethgexecutable(mainmod.__file__)
764 else:
764 else:
765 exe = findexe('hg') or os.path.basename(sys.argv[0])
765 exe = findexe('hg') or os.path.basename(sys.argv[0])
766 _sethgexecutable(exe)
766 _sethgexecutable(exe)
767 return _hgexecutable
767 return _hgexecutable
768
768
769 def _sethgexecutable(path):
769 def _sethgexecutable(path):
770 """set location of the 'hg' executable"""
770 """set location of the 'hg' executable"""
771 global _hgexecutable
771 global _hgexecutable
772 _hgexecutable = path
772 _hgexecutable = path
773
773
774 def _isstdout(f):
774 def _isstdout(f):
775 fileno = getattr(f, 'fileno', None)
775 fileno = getattr(f, 'fileno', None)
776 return fileno and fileno() == sys.__stdout__.fileno()
776 return fileno and fileno() == sys.__stdout__.fileno()
777
777
778 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
778 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
779 '''enhanced shell command execution.
779 '''enhanced shell command execution.
780 run with environment maybe modified, maybe in different dir.
780 run with environment maybe modified, maybe in different dir.
781
781
782 if command fails and onerr is None, return status, else raise onerr
782 if command fails and onerr is None, return status, else raise onerr
783 object as exception.
783 object as exception.
784
784
785 if out is specified, it is assumed to be a file-like object that has a
785 if out is specified, it is assumed to be a file-like object that has a
786 write() method. stdout and stderr will be redirected to out.'''
786 write() method. stdout and stderr will be redirected to out.'''
787 if environ is None:
787 if environ is None:
788 environ = {}
788 environ = {}
789 try:
789 try:
790 sys.stdout.flush()
790 sys.stdout.flush()
791 except Exception:
791 except Exception:
792 pass
792 pass
793 def py2shell(val):
793 def py2shell(val):
794 'convert python object into string that is useful to shell'
794 'convert python object into string that is useful to shell'
795 if val is None or val is False:
795 if val is None or val is False:
796 return '0'
796 return '0'
797 if val is True:
797 if val is True:
798 return '1'
798 return '1'
799 return str(val)
799 return str(val)
800 origcmd = cmd
800 origcmd = cmd
801 cmd = quotecommand(cmd)
801 cmd = quotecommand(cmd)
802 if sys.platform == 'plan9' and (sys.version_info[0] == 2
802 if sys.platform == 'plan9' and (sys.version_info[0] == 2
803 and sys.version_info[1] < 7):
803 and sys.version_info[1] < 7):
804 # subprocess kludge to work around issues in half-baked Python
804 # subprocess kludge to work around issues in half-baked Python
805 # ports, notably bichued/python:
805 # ports, notably bichued/python:
806 if not cwd is None:
806 if not cwd is None:
807 os.chdir(cwd)
807 os.chdir(cwd)
808 rc = os.system(cmd)
808 rc = os.system(cmd)
809 else:
809 else:
810 env = dict(os.environ)
810 env = dict(os.environ)
811 env.update((k, py2shell(v)) for k, v in environ.iteritems())
811 env.update((k, py2shell(v)) for k, v in environ.iteritems())
812 env['HG'] = hgexecutable()
812 env['HG'] = hgexecutable()
813 if out is None or _isstdout(out):
813 if out is None or _isstdout(out):
814 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
814 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
815 env=env, cwd=cwd)
815 env=env, cwd=cwd)
816 else:
816 else:
817 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
817 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
818 env=env, cwd=cwd, stdout=subprocess.PIPE,
818 env=env, cwd=cwd, stdout=subprocess.PIPE,
819 stderr=subprocess.STDOUT)
819 stderr=subprocess.STDOUT)
820 while True:
820 while True:
821 line = proc.stdout.readline()
821 line = proc.stdout.readline()
822 if not line:
822 if not line:
823 break
823 break
824 out.write(line)
824 out.write(line)
825 proc.wait()
825 proc.wait()
826 rc = proc.returncode
826 rc = proc.returncode
827 if sys.platform == 'OpenVMS' and rc & 1:
827 if sys.platform == 'OpenVMS' and rc & 1:
828 rc = 0
828 rc = 0
829 if rc and onerr:
829 if rc and onerr:
830 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
830 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
831 explainexit(rc)[0])
831 explainexit(rc)[0])
832 if errprefix:
832 if errprefix:
833 errmsg = '%s: %s' % (errprefix, errmsg)
833 errmsg = '%s: %s' % (errprefix, errmsg)
834 raise onerr(errmsg)
834 raise onerr(errmsg)
835 return rc
835 return rc
836
836
837 def checksignature(func):
837 def checksignature(func):
838 '''wrap a function with code to check for calling errors'''
838 '''wrap a function with code to check for calling errors'''
839 def check(*args, **kwargs):
839 def check(*args, **kwargs):
840 try:
840 try:
841 return func(*args, **kwargs)
841 return func(*args, **kwargs)
842 except TypeError:
842 except TypeError:
843 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
843 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
844 raise error.SignatureError
844 raise error.SignatureError
845 raise
845 raise
846
846
847 return check
847 return check
848
848
849 def copyfile(src, dest, hardlink=False):
849 def copyfile(src, dest, hardlink=False):
850 "copy a file, preserving mode and atime/mtime"
850 "copy a file, preserving mode and atime/mtime"
851 if os.path.lexists(dest):
851 if os.path.lexists(dest):
852 unlink(dest)
852 unlink(dest)
853 # hardlinks are problematic on CIFS, quietly ignore this flag
853 # hardlinks are problematic on CIFS, quietly ignore this flag
854 # until we find a way to work around it cleanly (issue4546)
854 # until we find a way to work around it cleanly (issue4546)
855 if False and hardlink:
855 if False and hardlink:
856 try:
856 try:
857 oslink(src, dest)
857 oslink(src, dest)
858 return
858 return
859 except (IOError, OSError):
859 except (IOError, OSError):
860 pass # fall back to normal copy
860 pass # fall back to normal copy
861 if os.path.islink(src):
861 if os.path.islink(src):
862 os.symlink(os.readlink(src), dest)
862 os.symlink(os.readlink(src), dest)
863 else:
863 else:
864 try:
864 try:
865 shutil.copyfile(src, dest)
865 shutil.copyfile(src, dest)
866 shutil.copymode(src, dest)
866 shutil.copymode(src, dest)
867 except shutil.Error as inst:
867 except shutil.Error as inst:
868 raise Abort(str(inst))
868 raise Abort(str(inst))
869
869
870 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
870 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
871 """Copy a directory tree using hardlinks if possible."""
871 """Copy a directory tree using hardlinks if possible."""
872 num = 0
872 num = 0
873
873
874 if hardlink is None:
874 if hardlink is None:
875 hardlink = (os.stat(src).st_dev ==
875 hardlink = (os.stat(src).st_dev ==
876 os.stat(os.path.dirname(dst)).st_dev)
876 os.stat(os.path.dirname(dst)).st_dev)
877 if hardlink:
877 if hardlink:
878 topic = _('linking')
878 topic = _('linking')
879 else:
879 else:
880 topic = _('copying')
880 topic = _('copying')
881
881
882 if os.path.isdir(src):
882 if os.path.isdir(src):
883 os.mkdir(dst)
883 os.mkdir(dst)
884 for name, kind in osutil.listdir(src):
884 for name, kind in osutil.listdir(src):
885 srcname = os.path.join(src, name)
885 srcname = os.path.join(src, name)
886 dstname = os.path.join(dst, name)
886 dstname = os.path.join(dst, name)
887 def nprog(t, pos):
887 def nprog(t, pos):
888 if pos is not None:
888 if pos is not None:
889 return progress(t, pos + num)
889 return progress(t, pos + num)
890 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
890 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
891 num += n
891 num += n
892 else:
892 else:
893 if hardlink:
893 if hardlink:
894 try:
894 try:
895 oslink(src, dst)
895 oslink(src, dst)
896 except (IOError, OSError):
896 except (IOError, OSError):
897 hardlink = False
897 hardlink = False
898 shutil.copy(src, dst)
898 shutil.copy(src, dst)
899 else:
899 else:
900 shutil.copy(src, dst)
900 shutil.copy(src, dst)
901 num += 1
901 num += 1
902 progress(topic, num)
902 progress(topic, num)
903 progress(topic, None)
903 progress(topic, None)
904
904
905 return hardlink, num
905 return hardlink, num
906
906
907 _winreservednames = '''con prn aux nul
907 _winreservednames = '''con prn aux nul
908 com1 com2 com3 com4 com5 com6 com7 com8 com9
908 com1 com2 com3 com4 com5 com6 com7 com8 com9
909 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
909 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
910 _winreservedchars = ':*?"<>|'
910 _winreservedchars = ':*?"<>|'
911 def checkwinfilename(path):
911 def checkwinfilename(path):
912 r'''Check that the base-relative path is a valid filename on Windows.
912 r'''Check that the base-relative path is a valid filename on Windows.
913 Returns None if the path is ok, or a UI string describing the problem.
913 Returns None if the path is ok, or a UI string describing the problem.
914
914
915 >>> checkwinfilename("just/a/normal/path")
915 >>> checkwinfilename("just/a/normal/path")
916 >>> checkwinfilename("foo/bar/con.xml")
916 >>> checkwinfilename("foo/bar/con.xml")
917 "filename contains 'con', which is reserved on Windows"
917 "filename contains 'con', which is reserved on Windows"
918 >>> checkwinfilename("foo/con.xml/bar")
918 >>> checkwinfilename("foo/con.xml/bar")
919 "filename contains 'con', which is reserved on Windows"
919 "filename contains 'con', which is reserved on Windows"
920 >>> checkwinfilename("foo/bar/xml.con")
920 >>> checkwinfilename("foo/bar/xml.con")
921 >>> checkwinfilename("foo/bar/AUX/bla.txt")
921 >>> checkwinfilename("foo/bar/AUX/bla.txt")
922 "filename contains 'AUX', which is reserved on Windows"
922 "filename contains 'AUX', which is reserved on Windows"
923 >>> checkwinfilename("foo/bar/bla:.txt")
923 >>> checkwinfilename("foo/bar/bla:.txt")
924 "filename contains ':', which is reserved on Windows"
924 "filename contains ':', which is reserved on Windows"
925 >>> checkwinfilename("foo/bar/b\07la.txt")
925 >>> checkwinfilename("foo/bar/b\07la.txt")
926 "filename contains '\\x07', which is invalid on Windows"
926 "filename contains '\\x07', which is invalid on Windows"
927 >>> checkwinfilename("foo/bar/bla ")
927 >>> checkwinfilename("foo/bar/bla ")
928 "filename ends with ' ', which is not allowed on Windows"
928 "filename ends with ' ', which is not allowed on Windows"
929 >>> checkwinfilename("../bar")
929 >>> checkwinfilename("../bar")
930 >>> checkwinfilename("foo\\")
930 >>> checkwinfilename("foo\\")
931 "filename ends with '\\', which is invalid on Windows"
931 "filename ends with '\\', which is invalid on Windows"
932 >>> checkwinfilename("foo\\/bar")
932 >>> checkwinfilename("foo\\/bar")
933 "directory name ends with '\\', which is invalid on Windows"
933 "directory name ends with '\\', which is invalid on Windows"
934 '''
934 '''
935 if path.endswith('\\'):
935 if path.endswith('\\'):
936 return _("filename ends with '\\', which is invalid on Windows")
936 return _("filename ends with '\\', which is invalid on Windows")
937 if '\\/' in path:
937 if '\\/' in path:
938 return _("directory name ends with '\\', which is invalid on Windows")
938 return _("directory name ends with '\\', which is invalid on Windows")
939 for n in path.replace('\\', '/').split('/'):
939 for n in path.replace('\\', '/').split('/'):
940 if not n:
940 if not n:
941 continue
941 continue
942 for c in n:
942 for c in n:
943 if c in _winreservedchars:
943 if c in _winreservedchars:
944 return _("filename contains '%s', which is reserved "
944 return _("filename contains '%s', which is reserved "
945 "on Windows") % c
945 "on Windows") % c
946 if ord(c) <= 31:
946 if ord(c) <= 31:
947 return _("filename contains %r, which is invalid "
947 return _("filename contains %r, which is invalid "
948 "on Windows") % c
948 "on Windows") % c
949 base = n.split('.')[0]
949 base = n.split('.')[0]
950 if base and base.lower() in _winreservednames:
950 if base and base.lower() in _winreservednames:
951 return _("filename contains '%s', which is reserved "
951 return _("filename contains '%s', which is reserved "
952 "on Windows") % base
952 "on Windows") % base
953 t = n[-1]
953 t = n[-1]
954 if t in '. ' and n not in '..':
954 if t in '. ' and n not in '..':
955 return _("filename ends with '%s', which is not allowed "
955 return _("filename ends with '%s', which is not allowed "
956 "on Windows") % t
956 "on Windows") % t
957
957
958 if os.name == 'nt':
958 if os.name == 'nt':
959 checkosfilename = checkwinfilename
959 checkosfilename = checkwinfilename
960 else:
960 else:
961 checkosfilename = platform.checkosfilename
961 checkosfilename = platform.checkosfilename
962
962
963 def makelock(info, pathname):
963 def makelock(info, pathname):
964 try:
964 try:
965 return os.symlink(info, pathname)
965 return os.symlink(info, pathname)
966 except OSError as why:
966 except OSError as why:
967 if why.errno == errno.EEXIST:
967 if why.errno == errno.EEXIST:
968 raise
968 raise
969 except AttributeError: # no symlink in os
969 except AttributeError: # no symlink in os
970 pass
970 pass
971
971
972 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
972 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
973 os.write(ld, info)
973 os.write(ld, info)
974 os.close(ld)
974 os.close(ld)
975
975
976 def readlock(pathname):
976 def readlock(pathname):
977 try:
977 try:
978 return os.readlink(pathname)
978 return os.readlink(pathname)
979 except OSError as why:
979 except OSError as why:
980 if why.errno not in (errno.EINVAL, errno.ENOSYS):
980 if why.errno not in (errno.EINVAL, errno.ENOSYS):
981 raise
981 raise
982 except AttributeError: # no symlink in os
982 except AttributeError: # no symlink in os
983 pass
983 pass
984 fp = posixfile(pathname)
984 fp = posixfile(pathname)
985 r = fp.read()
985 r = fp.read()
986 fp.close()
986 fp.close()
987 return r
987 return r
988
988
989 def fstat(fp):
989 def fstat(fp):
990 '''stat file object that may not have fileno method.'''
990 '''stat file object that may not have fileno method.'''
991 try:
991 try:
992 return os.fstat(fp.fileno())
992 return os.fstat(fp.fileno())
993 except AttributeError:
993 except AttributeError:
994 return os.stat(fp.name)
994 return os.stat(fp.name)
995
995
996 # File system features
996 # File system features
997
997
998 def checkcase(path):
998 def checkcase(path):
999 """
999 """
1000 Return true if the given path is on a case-sensitive filesystem
1000 Return true if the given path is on a case-sensitive filesystem
1001
1001
1002 Requires a path (like /foo/.hg) ending with a foldable final
1002 Requires a path (like /foo/.hg) ending with a foldable final
1003 directory component.
1003 directory component.
1004 """
1004 """
1005 s1 = os.lstat(path)
1005 s1 = os.lstat(path)
1006 d, b = os.path.split(path)
1006 d, b = os.path.split(path)
1007 b2 = b.upper()
1007 b2 = b.upper()
1008 if b == b2:
1008 if b == b2:
1009 b2 = b.lower()
1009 b2 = b.lower()
1010 if b == b2:
1010 if b == b2:
1011 return True # no evidence against case sensitivity
1011 return True # no evidence against case sensitivity
1012 p2 = os.path.join(d, b2)
1012 p2 = os.path.join(d, b2)
1013 try:
1013 try:
1014 s2 = os.lstat(p2)
1014 s2 = os.lstat(p2)
1015 if s2 == s1:
1015 if s2 == s1:
1016 return False
1016 return False
1017 return True
1017 return True
1018 except OSError:
1018 except OSError:
1019 return True
1019 return True
1020
1020
1021 try:
1021 try:
1022 import re2
1022 import re2
1023 _re2 = None
1023 _re2 = None
1024 except ImportError:
1024 except ImportError:
1025 _re2 = False
1025 _re2 = False
1026
1026
1027 class _re(object):
1027 class _re(object):
1028 def _checkre2(self):
1028 def _checkre2(self):
1029 global _re2
1029 global _re2
1030 try:
1030 try:
1031 # check if match works, see issue3964
1031 # check if match works, see issue3964
1032 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1032 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1033 except ImportError:
1033 except ImportError:
1034 _re2 = False
1034 _re2 = False
1035
1035
1036 def compile(self, pat, flags=0):
1036 def compile(self, pat, flags=0):
1037 '''Compile a regular expression, using re2 if possible
1037 '''Compile a regular expression, using re2 if possible
1038
1038
1039 For best performance, use only re2-compatible regexp features. The
1039 For best performance, use only re2-compatible regexp features. The
1040 only flags from the re module that are re2-compatible are
1040 only flags from the re module that are re2-compatible are
1041 IGNORECASE and MULTILINE.'''
1041 IGNORECASE and MULTILINE.'''
1042 if _re2 is None:
1042 if _re2 is None:
1043 self._checkre2()
1043 self._checkre2()
1044 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1044 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1045 if flags & remod.IGNORECASE:
1045 if flags & remod.IGNORECASE:
1046 pat = '(?i)' + pat
1046 pat = '(?i)' + pat
1047 if flags & remod.MULTILINE:
1047 if flags & remod.MULTILINE:
1048 pat = '(?m)' + pat
1048 pat = '(?m)' + pat
1049 try:
1049 try:
1050 return re2.compile(pat)
1050 return re2.compile(pat)
1051 except re2.error:
1051 except re2.error:
1052 pass
1052 pass
1053 return remod.compile(pat, flags)
1053 return remod.compile(pat, flags)
1054
1054
1055 @propertycache
1055 @propertycache
1056 def escape(self):
1056 def escape(self):
1057 '''Return the version of escape corresponding to self.compile.
1057 '''Return the version of escape corresponding to self.compile.
1058
1058
1059 This is imperfect because whether re2 or re is used for a particular
1059 This is imperfect because whether re2 or re is used for a particular
1060 function depends on the flags, etc, but it's the best we can do.
1060 function depends on the flags, etc, but it's the best we can do.
1061 '''
1061 '''
1062 global _re2
1062 global _re2
1063 if _re2 is None:
1063 if _re2 is None:
1064 self._checkre2()
1064 self._checkre2()
1065 if _re2:
1065 if _re2:
1066 return re2.escape
1066 return re2.escape
1067 else:
1067 else:
1068 return remod.escape
1068 return remod.escape
1069
1069
1070 re = _re()
1070 re = _re()
1071
1071
1072 _fspathcache = {}
1072 _fspathcache = {}
1073 def fspath(name, root):
1073 def fspath(name, root):
1074 '''Get name in the case stored in the filesystem
1074 '''Get name in the case stored in the filesystem
1075
1075
1076 The name should be relative to root, and be normcase-ed for efficiency.
1076 The name should be relative to root, and be normcase-ed for efficiency.
1077
1077
1078 Note that this function is unnecessary, and should not be
1078 Note that this function is unnecessary, and should not be
1079 called, for case-sensitive filesystems (simply because it's expensive).
1079 called, for case-sensitive filesystems (simply because it's expensive).
1080
1080
1081 The root should be normcase-ed, too.
1081 The root should be normcase-ed, too.
1082 '''
1082 '''
1083 def _makefspathcacheentry(dir):
1083 def _makefspathcacheentry(dir):
1084 return dict((normcase(n), n) for n in os.listdir(dir))
1084 return dict((normcase(n), n) for n in os.listdir(dir))
1085
1085
1086 seps = os.sep
1086 seps = os.sep
1087 if os.altsep:
1087 if os.altsep:
1088 seps = seps + os.altsep
1088 seps = seps + os.altsep
1089 # Protect backslashes. This gets silly very quickly.
1089 # Protect backslashes. This gets silly very quickly.
1090 seps.replace('\\','\\\\')
1090 seps.replace('\\','\\\\')
1091 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1091 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1092 dir = os.path.normpath(root)
1092 dir = os.path.normpath(root)
1093 result = []
1093 result = []
1094 for part, sep in pattern.findall(name):
1094 for part, sep in pattern.findall(name):
1095 if sep:
1095 if sep:
1096 result.append(sep)
1096 result.append(sep)
1097 continue
1097 continue
1098
1098
1099 if dir not in _fspathcache:
1099 if dir not in _fspathcache:
1100 _fspathcache[dir] = _makefspathcacheentry(dir)
1100 _fspathcache[dir] = _makefspathcacheentry(dir)
1101 contents = _fspathcache[dir]
1101 contents = _fspathcache[dir]
1102
1102
1103 found = contents.get(part)
1103 found = contents.get(part)
1104 if not found:
1104 if not found:
1105 # retry "once per directory" per "dirstate.walk" which
1105 # retry "once per directory" per "dirstate.walk" which
1106 # may take place for each patches of "hg qpush", for example
1106 # may take place for each patches of "hg qpush", for example
1107 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1107 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1108 found = contents.get(part)
1108 found = contents.get(part)
1109
1109
1110 result.append(found or part)
1110 result.append(found or part)
1111 dir = os.path.join(dir, part)
1111 dir = os.path.join(dir, part)
1112
1112
1113 return ''.join(result)
1113 return ''.join(result)
1114
1114
1115 def checknlink(testfile):
1115 def checknlink(testfile):
1116 '''check whether hardlink count reporting works properly'''
1116 '''check whether hardlink count reporting works properly'''
1117
1117
1118 # testfile may be open, so we need a separate file for checking to
1118 # testfile may be open, so we need a separate file for checking to
1119 # work around issue2543 (or testfile may get lost on Samba shares)
1119 # work around issue2543 (or testfile may get lost on Samba shares)
1120 f1 = testfile + ".hgtmp1"
1120 f1 = testfile + ".hgtmp1"
1121 if os.path.lexists(f1):
1121 if os.path.lexists(f1):
1122 return False
1122 return False
1123 try:
1123 try:
1124 posixfile(f1, 'w').close()
1124 posixfile(f1, 'w').close()
1125 except IOError:
1125 except IOError:
1126 return False
1126 return False
1127
1127
1128 f2 = testfile + ".hgtmp2"
1128 f2 = testfile + ".hgtmp2"
1129 fd = None
1129 fd = None
1130 try:
1130 try:
1131 oslink(f1, f2)
1131 oslink(f1, f2)
1132 # nlinks() may behave differently for files on Windows shares if
1132 # nlinks() may behave differently for files on Windows shares if
1133 # the file is open.
1133 # the file is open.
1134 fd = posixfile(f2)
1134 fd = posixfile(f2)
1135 return nlinks(f2) > 1
1135 return nlinks(f2) > 1
1136 except OSError:
1136 except OSError:
1137 return False
1137 return False
1138 finally:
1138 finally:
1139 if fd is not None:
1139 if fd is not None:
1140 fd.close()
1140 fd.close()
1141 for f in (f1, f2):
1141 for f in (f1, f2):
1142 try:
1142 try:
1143 os.unlink(f)
1143 os.unlink(f)
1144 except OSError:
1144 except OSError:
1145 pass
1145 pass
1146
1146
1147 def endswithsep(path):
1147 def endswithsep(path):
1148 '''Check path ends with os.sep or os.altsep.'''
1148 '''Check path ends with os.sep or os.altsep.'''
1149 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1149 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1150
1150
1151 def splitpath(path):
1151 def splitpath(path):
1152 '''Split path by os.sep.
1152 '''Split path by os.sep.
1153 Note that this function does not use os.altsep because this is
1153 Note that this function does not use os.altsep because this is
1154 an alternative of simple "xxx.split(os.sep)".
1154 an alternative of simple "xxx.split(os.sep)".
1155 It is recommended to use os.path.normpath() before using this
1155 It is recommended to use os.path.normpath() before using this
1156 function if need.'''
1156 function if need.'''
1157 return path.split(os.sep)
1157 return path.split(os.sep)
1158
1158
1159 def gui():
1159 def gui():
1160 '''Are we running in a GUI?'''
1160 '''Are we running in a GUI?'''
1161 if sys.platform == 'darwin':
1161 if sys.platform == 'darwin':
1162 if 'SSH_CONNECTION' in os.environ:
1162 if 'SSH_CONNECTION' in os.environ:
1163 # handle SSH access to a box where the user is logged in
1163 # handle SSH access to a box where the user is logged in
1164 return False
1164 return False
1165 elif getattr(osutil, 'isgui', None):
1165 elif getattr(osutil, 'isgui', None):
1166 # check if a CoreGraphics session is available
1166 # check if a CoreGraphics session is available
1167 return osutil.isgui()
1167 return osutil.isgui()
1168 else:
1168 else:
1169 # pure build; use a safe default
1169 # pure build; use a safe default
1170 return True
1170 return True
1171 else:
1171 else:
1172 return os.name == "nt" or os.environ.get("DISPLAY")
1172 return os.name == "nt" or os.environ.get("DISPLAY")
1173
1173
1174 def mktempcopy(name, emptyok=False, createmode=None):
1174 def mktempcopy(name, emptyok=False, createmode=None):
1175 """Create a temporary file with the same contents from name
1175 """Create a temporary file with the same contents from name
1176
1176
1177 The permission bits are copied from the original file.
1177 The permission bits are copied from the original file.
1178
1178
1179 If the temporary file is going to be truncated immediately, you
1179 If the temporary file is going to be truncated immediately, you
1180 can use emptyok=True as an optimization.
1180 can use emptyok=True as an optimization.
1181
1181
1182 Returns the name of the temporary file.
1182 Returns the name of the temporary file.
1183 """
1183 """
1184 d, fn = os.path.split(name)
1184 d, fn = os.path.split(name)
1185 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1185 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1186 os.close(fd)
1186 os.close(fd)
1187 # Temporary files are created with mode 0600, which is usually not
1187 # Temporary files are created with mode 0600, which is usually not
1188 # what we want. If the original file already exists, just copy
1188 # what we want. If the original file already exists, just copy
1189 # its mode. Otherwise, manually obey umask.
1189 # its mode. Otherwise, manually obey umask.
1190 copymode(name, temp, createmode)
1190 copymode(name, temp, createmode)
1191 if emptyok:
1191 if emptyok:
1192 return temp
1192 return temp
1193 try:
1193 try:
1194 try:
1194 try:
1195 ifp = posixfile(name, "rb")
1195 ifp = posixfile(name, "rb")
1196 except IOError as inst:
1196 except IOError as inst:
1197 if inst.errno == errno.ENOENT:
1197 if inst.errno == errno.ENOENT:
1198 return temp
1198 return temp
1199 if not getattr(inst, 'filename', None):
1199 if not getattr(inst, 'filename', None):
1200 inst.filename = name
1200 inst.filename = name
1201 raise
1201 raise
1202 ofp = posixfile(temp, "wb")
1202 ofp = posixfile(temp, "wb")
1203 for chunk in filechunkiter(ifp):
1203 for chunk in filechunkiter(ifp):
1204 ofp.write(chunk)
1204 ofp.write(chunk)
1205 ifp.close()
1205 ifp.close()
1206 ofp.close()
1206 ofp.close()
1207 except: # re-raises
1207 except: # re-raises
1208 try: os.unlink(temp)
1208 try: os.unlink(temp)
1209 except OSError: pass
1209 except OSError: pass
1210 raise
1210 raise
1211 return temp
1211 return temp
1212
1212
1213 class atomictempfile(object):
1213 class atomictempfile(object):
1214 '''writable file object that atomically updates a file
1214 '''writable file object that atomically updates a file
1215
1215
1216 All writes will go to a temporary copy of the original file. Call
1216 All writes will go to a temporary copy of the original file. Call
1217 close() when you are done writing, and atomictempfile will rename
1217 close() when you are done writing, and atomictempfile will rename
1218 the temporary copy to the original name, making the changes
1218 the temporary copy to the original name, making the changes
1219 visible. If the object is destroyed without being closed, all your
1219 visible. If the object is destroyed without being closed, all your
1220 writes are discarded.
1220 writes are discarded.
1221 '''
1221 '''
1222 def __init__(self, name, mode='w+b', createmode=None):
1222 def __init__(self, name, mode='w+b', createmode=None):
1223 self.__name = name # permanent name
1223 self.__name = name # permanent name
1224 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1224 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1225 createmode=createmode)
1225 createmode=createmode)
1226 self._fp = posixfile(self._tempname, mode)
1226 self._fp = posixfile(self._tempname, mode)
1227
1227
1228 # delegated methods
1228 # delegated methods
1229 self.write = self._fp.write
1229 self.write = self._fp.write
1230 self.seek = self._fp.seek
1230 self.seek = self._fp.seek
1231 self.tell = self._fp.tell
1231 self.tell = self._fp.tell
1232 self.fileno = self._fp.fileno
1232 self.fileno = self._fp.fileno
1233
1233
1234 def close(self):
1234 def close(self):
1235 if not self._fp.closed:
1235 if not self._fp.closed:
1236 self._fp.close()
1236 self._fp.close()
1237 rename(self._tempname, localpath(self.__name))
1237 rename(self._tempname, localpath(self.__name))
1238
1238
1239 def discard(self):
1239 def discard(self):
1240 if not self._fp.closed:
1240 if not self._fp.closed:
1241 try:
1241 try:
1242 os.unlink(self._tempname)
1242 os.unlink(self._tempname)
1243 except OSError:
1243 except OSError:
1244 pass
1244 pass
1245 self._fp.close()
1245 self._fp.close()
1246
1246
1247 def __del__(self):
1247 def __del__(self):
1248 if safehasattr(self, '_fp'): # constructor actually did something
1248 if safehasattr(self, '_fp'): # constructor actually did something
1249 self.discard()
1249 self.discard()
1250
1250
1251 def makedirs(name, mode=None, notindexed=False):
1251 def makedirs(name, mode=None, notindexed=False):
1252 """recursive directory creation with parent mode inheritance"""
1252 """recursive directory creation with parent mode inheritance"""
1253 try:
1253 try:
1254 makedir(name, notindexed)
1254 makedir(name, notindexed)
1255 except OSError as err:
1255 except OSError as err:
1256 if err.errno == errno.EEXIST:
1256 if err.errno == errno.EEXIST:
1257 return
1257 return
1258 if err.errno != errno.ENOENT or not name:
1258 if err.errno != errno.ENOENT or not name:
1259 raise
1259 raise
1260 parent = os.path.dirname(os.path.abspath(name))
1260 parent = os.path.dirname(os.path.abspath(name))
1261 if parent == name:
1261 if parent == name:
1262 raise
1262 raise
1263 makedirs(parent, mode, notindexed)
1263 makedirs(parent, mode, notindexed)
1264 makedir(name, notindexed)
1264 makedir(name, notindexed)
1265 if mode is not None:
1265 if mode is not None:
1266 os.chmod(name, mode)
1266 os.chmod(name, mode)
1267
1267
1268 def ensuredirs(name, mode=None, notindexed=False):
1268 def ensuredirs(name, mode=None, notindexed=False):
1269 """race-safe recursive directory creation
1269 """race-safe recursive directory creation
1270
1270
1271 Newly created directories are marked as "not to be indexed by
1271 Newly created directories are marked as "not to be indexed by
1272 the content indexing service", if ``notindexed`` is specified
1272 the content indexing service", if ``notindexed`` is specified
1273 for "write" mode access.
1273 for "write" mode access.
1274 """
1274 """
1275 if os.path.isdir(name):
1275 if os.path.isdir(name):
1276 return
1276 return
1277 parent = os.path.dirname(os.path.abspath(name))
1277 parent = os.path.dirname(os.path.abspath(name))
1278 if parent != name:
1278 if parent != name:
1279 ensuredirs(parent, mode, notindexed)
1279 ensuredirs(parent, mode, notindexed)
1280 try:
1280 try:
1281 makedir(name, notindexed)
1281 makedir(name, notindexed)
1282 except OSError as err:
1282 except OSError as err:
1283 if err.errno == errno.EEXIST and os.path.isdir(name):
1283 if err.errno == errno.EEXIST and os.path.isdir(name):
1284 # someone else seems to have won a directory creation race
1284 # someone else seems to have won a directory creation race
1285 return
1285 return
1286 raise
1286 raise
1287 if mode is not None:
1287 if mode is not None:
1288 os.chmod(name, mode)
1288 os.chmod(name, mode)
1289
1289
1290 def readfile(path):
1290 def readfile(path):
1291 fp = open(path, 'rb')
1291 fp = open(path, 'rb')
1292 try:
1292 try:
1293 return fp.read()
1293 return fp.read()
1294 finally:
1294 finally:
1295 fp.close()
1295 fp.close()
1296
1296
1297 def writefile(path, text):
1297 def writefile(path, text):
1298 fp = open(path, 'wb')
1298 fp = open(path, 'wb')
1299 try:
1299 try:
1300 fp.write(text)
1300 fp.write(text)
1301 finally:
1301 finally:
1302 fp.close()
1302 fp.close()
1303
1303
1304 def appendfile(path, text):
1304 def appendfile(path, text):
1305 fp = open(path, 'ab')
1305 fp = open(path, 'ab')
1306 try:
1306 try:
1307 fp.write(text)
1307 fp.write(text)
1308 finally:
1308 finally:
1309 fp.close()
1309 fp.close()
1310
1310
1311 class chunkbuffer(object):
1311 class chunkbuffer(object):
1312 """Allow arbitrary sized chunks of data to be efficiently read from an
1312 """Allow arbitrary sized chunks of data to be efficiently read from an
1313 iterator over chunks of arbitrary size."""
1313 iterator over chunks of arbitrary size."""
1314
1314
1315 def __init__(self, in_iter):
1315 def __init__(self, in_iter):
1316 """in_iter is the iterator that's iterating over the input chunks.
1316 """in_iter is the iterator that's iterating over the input chunks.
1317 targetsize is how big a buffer to try to maintain."""
1317 targetsize is how big a buffer to try to maintain."""
1318 def splitbig(chunks):
1318 def splitbig(chunks):
1319 for chunk in chunks:
1319 for chunk in chunks:
1320 if len(chunk) > 2**20:
1320 if len(chunk) > 2**20:
1321 pos = 0
1321 pos = 0
1322 while pos < len(chunk):
1322 while pos < len(chunk):
1323 end = pos + 2 ** 18
1323 end = pos + 2 ** 18
1324 yield chunk[pos:end]
1324 yield chunk[pos:end]
1325 pos = end
1325 pos = end
1326 else:
1326 else:
1327 yield chunk
1327 yield chunk
1328 self.iter = splitbig(in_iter)
1328 self.iter = splitbig(in_iter)
1329 self._queue = collections.deque()
1329 self._queue = collections.deque()
1330 self._chunkoffset = 0
1330 self._chunkoffset = 0
1331
1331
1332 def read(self, l=None):
1332 def read(self, l=None):
1333 """Read L bytes of data from the iterator of chunks of data.
1333 """Read L bytes of data from the iterator of chunks of data.
1334 Returns less than L bytes if the iterator runs dry.
1334 Returns less than L bytes if the iterator runs dry.
1335
1335
1336 If size parameter is omitted, read everything"""
1336 If size parameter is omitted, read everything"""
1337 if l is None:
1337 if l is None:
1338 return ''.join(self.iter)
1338 return ''.join(self.iter)
1339
1339
1340 left = l
1340 left = l
1341 buf = []
1341 buf = []
1342 queue = self._queue
1342 queue = self._queue
1343 while left > 0:
1343 while left > 0:
1344 # refill the queue
1344 # refill the queue
1345 if not queue:
1345 if not queue:
1346 target = 2**18
1346 target = 2**18
1347 for chunk in self.iter:
1347 for chunk in self.iter:
1348 queue.append(chunk)
1348 queue.append(chunk)
1349 target -= len(chunk)
1349 target -= len(chunk)
1350 if target <= 0:
1350 if target <= 0:
1351 break
1351 break
1352 if not queue:
1352 if not queue:
1353 break
1353 break
1354
1354
1355 # The easy way to do this would be to queue.popleft(), modify the
1355 # The easy way to do this would be to queue.popleft(), modify the
1356 # chunk (if necessary), then queue.appendleft(). However, for cases
1356 # chunk (if necessary), then queue.appendleft(). However, for cases
1357 # where we read partial chunk content, this incurs 2 dequeue
1357 # where we read partial chunk content, this incurs 2 dequeue
1358 # mutations and creates a new str for the remaining chunk in the
1358 # mutations and creates a new str for the remaining chunk in the
1359 # queue. Our code below avoids this overhead.
1359 # queue. Our code below avoids this overhead.
1360
1360
1361 chunk = queue[0]
1361 chunk = queue[0]
1362 chunkl = len(chunk)
1362 chunkl = len(chunk)
1363 offset = self._chunkoffset
1363 offset = self._chunkoffset
1364
1364
1365 # Use full chunk.
1365 # Use full chunk.
1366 if offset == 0 and left >= chunkl:
1366 if offset == 0 and left >= chunkl:
1367 left -= chunkl
1367 left -= chunkl
1368 queue.popleft()
1368 queue.popleft()
1369 buf.append(chunk)
1369 buf.append(chunk)
1370 # self._chunkoffset remains at 0.
1370 # self._chunkoffset remains at 0.
1371 continue
1371 continue
1372
1372
1373 chunkremaining = chunkl - offset
1373 chunkremaining = chunkl - offset
1374
1374
1375 # Use all of unconsumed part of chunk.
1375 # Use all of unconsumed part of chunk.
1376 if left >= chunkremaining:
1376 if left >= chunkremaining:
1377 left -= chunkremaining
1377 left -= chunkremaining
1378 queue.popleft()
1378 queue.popleft()
1379 # offset == 0 is enabled by block above, so this won't merely
1379 # offset == 0 is enabled by block above, so this won't merely
1380 # copy via ``chunk[0:]``.
1380 # copy via ``chunk[0:]``.
1381 buf.append(chunk[offset:])
1381 buf.append(chunk[offset:])
1382 self._chunkoffset = 0
1382 self._chunkoffset = 0
1383
1383
1384 # Partial chunk needed.
1384 # Partial chunk needed.
1385 else:
1385 else:
1386 buf.append(chunk[offset:offset + left])
1386 buf.append(chunk[offset:offset + left])
1387 self._chunkoffset += left
1387 self._chunkoffset += left
1388 left -= chunkremaining
1388 left -= chunkremaining
1389
1389
1390 return ''.join(buf)
1390 return ''.join(buf)
1391
1391
1392 def filechunkiter(f, size=65536, limit=None):
1392 def filechunkiter(f, size=65536, limit=None):
1393 """Create a generator that produces the data in the file size
1393 """Create a generator that produces the data in the file size
1394 (default 65536) bytes at a time, up to optional limit (default is
1394 (default 65536) bytes at a time, up to optional limit (default is
1395 to read all data). Chunks may be less than size bytes if the
1395 to read all data). Chunks may be less than size bytes if the
1396 chunk is the last chunk in the file, or the file is a socket or
1396 chunk is the last chunk in the file, or the file is a socket or
1397 some other type of file that sometimes reads less data than is
1397 some other type of file that sometimes reads less data than is
1398 requested."""
1398 requested."""
1399 assert size >= 0
1399 assert size >= 0
1400 assert limit is None or limit >= 0
1400 assert limit is None or limit >= 0
1401 while True:
1401 while True:
1402 if limit is None:
1402 if limit is None:
1403 nbytes = size
1403 nbytes = size
1404 else:
1404 else:
1405 nbytes = min(limit, size)
1405 nbytes = min(limit, size)
1406 s = nbytes and f.read(nbytes)
1406 s = nbytes and f.read(nbytes)
1407 if not s:
1407 if not s:
1408 break
1408 break
1409 if limit:
1409 if limit:
1410 limit -= len(s)
1410 limit -= len(s)
1411 yield s
1411 yield s
1412
1412
1413 def makedate(timestamp=None):
1413 def makedate(timestamp=None):
1414 '''Return a unix timestamp (or the current time) as a (unixtime,
1414 '''Return a unix timestamp (or the current time) as a (unixtime,
1415 offset) tuple based off the local timezone.'''
1415 offset) tuple based off the local timezone.'''
1416 if timestamp is None:
1416 if timestamp is None:
1417 timestamp = time.time()
1417 timestamp = time.time()
1418 if timestamp < 0:
1418 if timestamp < 0:
1419 hint = _("check your clock")
1419 hint = _("check your clock")
1420 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1420 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1421 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1421 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1422 datetime.datetime.fromtimestamp(timestamp))
1422 datetime.datetime.fromtimestamp(timestamp))
1423 tz = delta.days * 86400 + delta.seconds
1423 tz = delta.days * 86400 + delta.seconds
1424 return timestamp, tz
1424 return timestamp, tz
1425
1425
1426 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1426 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1427 """represent a (unixtime, offset) tuple as a localized time.
1427 """represent a (unixtime, offset) tuple as a localized time.
1428 unixtime is seconds since the epoch, and offset is the time zone's
1428 unixtime is seconds since the epoch, and offset is the time zone's
1429 number of seconds away from UTC. if timezone is false, do not
1429 number of seconds away from UTC. if timezone is false, do not
1430 append time zone to string."""
1430 append time zone to string."""
1431 t, tz = date or makedate()
1431 t, tz = date or makedate()
1432 if t < 0:
1432 if t < 0:
1433 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1433 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1434 tz = 0
1434 tz = 0
1435 if "%1" in format or "%2" in format or "%z" in format:
1435 if "%1" in format or "%2" in format or "%z" in format:
1436 sign = (tz > 0) and "-" or "+"
1436 sign = (tz > 0) and "-" or "+"
1437 minutes = abs(tz) // 60
1437 minutes = abs(tz) // 60
1438 q, r = divmod(minutes, 60)
1438 q, r = divmod(minutes, 60)
1439 format = format.replace("%z", "%1%2")
1439 format = format.replace("%z", "%1%2")
1440 format = format.replace("%1", "%c%02d" % (sign, q))
1440 format = format.replace("%1", "%c%02d" % (sign, q))
1441 format = format.replace("%2", "%02d" % r)
1441 format = format.replace("%2", "%02d" % r)
1442 try:
1442 try:
1443 t = time.gmtime(float(t) - tz)
1443 t = time.gmtime(float(t) - tz)
1444 except ValueError:
1444 except ValueError:
1445 # time was out of range
1445 # time was out of range
1446 t = time.gmtime(sys.maxint)
1446 t = time.gmtime(sys.maxint)
1447 s = time.strftime(format, t)
1447 s = time.strftime(format, t)
1448 return s
1448 return s
1449
1449
1450 def shortdate(date=None):
1450 def shortdate(date=None):
1451 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1451 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1452 return datestr(date, format='%Y-%m-%d')
1452 return datestr(date, format='%Y-%m-%d')
1453
1453
1454 def parsetimezone(tz):
1454 def parsetimezone(tz):
1455 """parse a timezone string and return an offset integer"""
1455 """parse a timezone string and return an offset integer"""
1456 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1456 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1457 sign = (tz[0] == "+") and 1 or -1
1457 sign = (tz[0] == "+") and 1 or -1
1458 hours = int(tz[1:3])
1458 hours = int(tz[1:3])
1459 minutes = int(tz[3:5])
1459 minutes = int(tz[3:5])
1460 return -sign * (hours * 60 + minutes) * 60
1460 return -sign * (hours * 60 + minutes) * 60
1461 if tz == "GMT" or tz == "UTC":
1461 if tz == "GMT" or tz == "UTC":
1462 return 0
1462 return 0
1463 return None
1463 return None
1464
1464
1465 def strdate(string, format, defaults=[]):
1465 def strdate(string, format, defaults=[]):
1466 """parse a localized time string and return a (unixtime, offset) tuple.
1466 """parse a localized time string and return a (unixtime, offset) tuple.
1467 if the string cannot be parsed, ValueError is raised."""
1467 if the string cannot be parsed, ValueError is raised."""
1468 # NOTE: unixtime = localunixtime + offset
1468 # NOTE: unixtime = localunixtime + offset
1469 offset, date = parsetimezone(string.split()[-1]), string
1469 offset, date = parsetimezone(string.split()[-1]), string
1470 if offset is not None:
1470 if offset is not None:
1471 date = " ".join(string.split()[:-1])
1471 date = " ".join(string.split()[:-1])
1472
1472
1473 # add missing elements from defaults
1473 # add missing elements from defaults
1474 usenow = False # default to using biased defaults
1474 usenow = False # default to using biased defaults
1475 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1475 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1476 found = [True for p in part if ("%"+p) in format]
1476 found = [True for p in part if ("%"+p) in format]
1477 if not found:
1477 if not found:
1478 date += "@" + defaults[part][usenow]
1478 date += "@" + defaults[part][usenow]
1479 format += "@%" + part[0]
1479 format += "@%" + part[0]
1480 else:
1480 else:
1481 # We've found a specific time element, less specific time
1481 # We've found a specific time element, less specific time
1482 # elements are relative to today
1482 # elements are relative to today
1483 usenow = True
1483 usenow = True
1484
1484
1485 timetuple = time.strptime(date, format)
1485 timetuple = time.strptime(date, format)
1486 localunixtime = int(calendar.timegm(timetuple))
1486 localunixtime = int(calendar.timegm(timetuple))
1487 if offset is None:
1487 if offset is None:
1488 # local timezone
1488 # local timezone
1489 unixtime = int(time.mktime(timetuple))
1489 unixtime = int(time.mktime(timetuple))
1490 offset = unixtime - localunixtime
1490 offset = unixtime - localunixtime
1491 else:
1491 else:
1492 unixtime = localunixtime + offset
1492 unixtime = localunixtime + offset
1493 return unixtime, offset
1493 return unixtime, offset
1494
1494
1495 def parsedate(date, formats=None, bias=None):
1495 def parsedate(date, formats=None, bias=None):
1496 """parse a localized date/time and return a (unixtime, offset) tuple.
1496 """parse a localized date/time and return a (unixtime, offset) tuple.
1497
1497
1498 The date may be a "unixtime offset" string or in one of the specified
1498 The date may be a "unixtime offset" string or in one of the specified
1499 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1499 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1500
1500
1501 >>> parsedate(' today ') == parsedate(\
1501 >>> parsedate(' today ') == parsedate(\
1502 datetime.date.today().strftime('%b %d'))
1502 datetime.date.today().strftime('%b %d'))
1503 True
1503 True
1504 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1504 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1505 datetime.timedelta(days=1)\
1505 datetime.timedelta(days=1)\
1506 ).strftime('%b %d'))
1506 ).strftime('%b %d'))
1507 True
1507 True
1508 >>> now, tz = makedate()
1508 >>> now, tz = makedate()
1509 >>> strnow, strtz = parsedate('now')
1509 >>> strnow, strtz = parsedate('now')
1510 >>> (strnow - now) < 1
1510 >>> (strnow - now) < 1
1511 True
1511 True
1512 >>> tz == strtz
1512 >>> tz == strtz
1513 True
1513 True
1514 """
1514 """
1515 if bias is None:
1515 if bias is None:
1516 bias = {}
1516 bias = {}
1517 if not date:
1517 if not date:
1518 return 0, 0
1518 return 0, 0
1519 if isinstance(date, tuple) and len(date) == 2:
1519 if isinstance(date, tuple) and len(date) == 2:
1520 return date
1520 return date
1521 if not formats:
1521 if not formats:
1522 formats = defaultdateformats
1522 formats = defaultdateformats
1523 date = date.strip()
1523 date = date.strip()
1524
1524
1525 if date == 'now' or date == _('now'):
1525 if date == 'now' or date == _('now'):
1526 return makedate()
1526 return makedate()
1527 if date == 'today' or date == _('today'):
1527 if date == 'today' or date == _('today'):
1528 date = datetime.date.today().strftime('%b %d')
1528 date = datetime.date.today().strftime('%b %d')
1529 elif date == 'yesterday' or date == _('yesterday'):
1529 elif date == 'yesterday' or date == _('yesterday'):
1530 date = (datetime.date.today() -
1530 date = (datetime.date.today() -
1531 datetime.timedelta(days=1)).strftime('%b %d')
1531 datetime.timedelta(days=1)).strftime('%b %d')
1532
1532
1533 try:
1533 try:
1534 when, offset = map(int, date.split(' '))
1534 when, offset = map(int, date.split(' '))
1535 except ValueError:
1535 except ValueError:
1536 # fill out defaults
1536 # fill out defaults
1537 now = makedate()
1537 now = makedate()
1538 defaults = {}
1538 defaults = {}
1539 for part in ("d", "mb", "yY", "HI", "M", "S"):
1539 for part in ("d", "mb", "yY", "HI", "M", "S"):
1540 # this piece is for rounding the specific end of unknowns
1540 # this piece is for rounding the specific end of unknowns
1541 b = bias.get(part)
1541 b = bias.get(part)
1542 if b is None:
1542 if b is None:
1543 if part[0] in "HMS":
1543 if part[0] in "HMS":
1544 b = "00"
1544 b = "00"
1545 else:
1545 else:
1546 b = "0"
1546 b = "0"
1547
1547
1548 # this piece is for matching the generic end to today's date
1548 # this piece is for matching the generic end to today's date
1549 n = datestr(now, "%" + part[0])
1549 n = datestr(now, "%" + part[0])
1550
1550
1551 defaults[part] = (b, n)
1551 defaults[part] = (b, n)
1552
1552
1553 for format in formats:
1553 for format in formats:
1554 try:
1554 try:
1555 when, offset = strdate(date, format, defaults)
1555 when, offset = strdate(date, format, defaults)
1556 except (ValueError, OverflowError):
1556 except (ValueError, OverflowError):
1557 pass
1557 pass
1558 else:
1558 else:
1559 break
1559 break
1560 else:
1560 else:
1561 raise Abort(_('invalid date: %r') % date)
1561 raise Abort(_('invalid date: %r') % date)
1562 # validate explicit (probably user-specified) date and
1562 # validate explicit (probably user-specified) date and
1563 # time zone offset. values must fit in signed 32 bits for
1563 # time zone offset. values must fit in signed 32 bits for
1564 # current 32-bit linux runtimes. timezones go from UTC-12
1564 # current 32-bit linux runtimes. timezones go from UTC-12
1565 # to UTC+14
1565 # to UTC+14
1566 if abs(when) > 0x7fffffff:
1566 if abs(when) > 0x7fffffff:
1567 raise Abort(_('date exceeds 32 bits: %d') % when)
1567 raise Abort(_('date exceeds 32 bits: %d') % when)
1568 if when < 0:
1568 if when < 0:
1569 raise Abort(_('negative date value: %d') % when)
1569 raise Abort(_('negative date value: %d') % when)
1570 if offset < -50400 or offset > 43200:
1570 if offset < -50400 or offset > 43200:
1571 raise Abort(_('impossible time zone offset: %d') % offset)
1571 raise Abort(_('impossible time zone offset: %d') % offset)
1572 return when, offset
1572 return when, offset
1573
1573
1574 def matchdate(date):
1574 def matchdate(date):
1575 """Return a function that matches a given date match specifier
1575 """Return a function that matches a given date match specifier
1576
1576
1577 Formats include:
1577 Formats include:
1578
1578
1579 '{date}' match a given date to the accuracy provided
1579 '{date}' match a given date to the accuracy provided
1580
1580
1581 '<{date}' on or before a given date
1581 '<{date}' on or before a given date
1582
1582
1583 '>{date}' on or after a given date
1583 '>{date}' on or after a given date
1584
1584
1585 >>> p1 = parsedate("10:29:59")
1585 >>> p1 = parsedate("10:29:59")
1586 >>> p2 = parsedate("10:30:00")
1586 >>> p2 = parsedate("10:30:00")
1587 >>> p3 = parsedate("10:30:59")
1587 >>> p3 = parsedate("10:30:59")
1588 >>> p4 = parsedate("10:31:00")
1588 >>> p4 = parsedate("10:31:00")
1589 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1589 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1590 >>> f = matchdate("10:30")
1590 >>> f = matchdate("10:30")
1591 >>> f(p1[0])
1591 >>> f(p1[0])
1592 False
1592 False
1593 >>> f(p2[0])
1593 >>> f(p2[0])
1594 True
1594 True
1595 >>> f(p3[0])
1595 >>> f(p3[0])
1596 True
1596 True
1597 >>> f(p4[0])
1597 >>> f(p4[0])
1598 False
1598 False
1599 >>> f(p5[0])
1599 >>> f(p5[0])
1600 False
1600 False
1601 """
1601 """
1602
1602
1603 def lower(date):
1603 def lower(date):
1604 d = {'mb': "1", 'd': "1"}
1604 d = {'mb': "1", 'd': "1"}
1605 return parsedate(date, extendeddateformats, d)[0]
1605 return parsedate(date, extendeddateformats, d)[0]
1606
1606
1607 def upper(date):
1607 def upper(date):
1608 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1608 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1609 for days in ("31", "30", "29"):
1609 for days in ("31", "30", "29"):
1610 try:
1610 try:
1611 d["d"] = days
1611 d["d"] = days
1612 return parsedate(date, extendeddateformats, d)[0]
1612 return parsedate(date, extendeddateformats, d)[0]
1613 except Abort:
1613 except Abort:
1614 pass
1614 pass
1615 d["d"] = "28"
1615 d["d"] = "28"
1616 return parsedate(date, extendeddateformats, d)[0]
1616 return parsedate(date, extendeddateformats, d)[0]
1617
1617
1618 date = date.strip()
1618 date = date.strip()
1619
1619
1620 if not date:
1620 if not date:
1621 raise Abort(_("dates cannot consist entirely of whitespace"))
1621 raise Abort(_("dates cannot consist entirely of whitespace"))
1622 elif date[0] == "<":
1622 elif date[0] == "<":
1623 if not date[1:]:
1623 if not date[1:]:
1624 raise Abort(_("invalid day spec, use '<DATE'"))
1624 raise Abort(_("invalid day spec, use '<DATE'"))
1625 when = upper(date[1:])
1625 when = upper(date[1:])
1626 return lambda x: x <= when
1626 return lambda x: x <= when
1627 elif date[0] == ">":
1627 elif date[0] == ">":
1628 if not date[1:]:
1628 if not date[1:]:
1629 raise Abort(_("invalid day spec, use '>DATE'"))
1629 raise Abort(_("invalid day spec, use '>DATE'"))
1630 when = lower(date[1:])
1630 when = lower(date[1:])
1631 return lambda x: x >= when
1631 return lambda x: x >= when
1632 elif date[0] == "-":
1632 elif date[0] == "-":
1633 try:
1633 try:
1634 days = int(date[1:])
1634 days = int(date[1:])
1635 except ValueError:
1635 except ValueError:
1636 raise Abort(_("invalid day spec: %s") % date[1:])
1636 raise Abort(_("invalid day spec: %s") % date[1:])
1637 if days < 0:
1637 if days < 0:
1638 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1638 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1639 % date[1:])
1639 % date[1:])
1640 when = makedate()[0] - days * 3600 * 24
1640 when = makedate()[0] - days * 3600 * 24
1641 return lambda x: x >= when
1641 return lambda x: x >= when
1642 elif " to " in date:
1642 elif " to " in date:
1643 a, b = date.split(" to ")
1643 a, b = date.split(" to ")
1644 start, stop = lower(a), upper(b)
1644 start, stop = lower(a), upper(b)
1645 return lambda x: x >= start and x <= stop
1645 return lambda x: x >= start and x <= stop
1646 else:
1646 else:
1647 start, stop = lower(date), upper(date)
1647 start, stop = lower(date), upper(date)
1648 return lambda x: x >= start and x <= stop
1648 return lambda x: x >= start and x <= stop
1649
1649
1650 def stringmatcher(pattern):
1650 def stringmatcher(pattern):
1651 """
1651 """
1652 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1652 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1653 returns the matcher name, pattern, and matcher function.
1653 returns the matcher name, pattern, and matcher function.
1654 missing or unknown prefixes are treated as literal matches.
1654 missing or unknown prefixes are treated as literal matches.
1655
1655
1656 helper for tests:
1656 helper for tests:
1657 >>> def test(pattern, *tests):
1657 >>> def test(pattern, *tests):
1658 ... kind, pattern, matcher = stringmatcher(pattern)
1658 ... kind, pattern, matcher = stringmatcher(pattern)
1659 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1659 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1660
1660
1661 exact matching (no prefix):
1661 exact matching (no prefix):
1662 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1662 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1663 ('literal', 'abcdefg', [False, False, True])
1663 ('literal', 'abcdefg', [False, False, True])
1664
1664
1665 regex matching ('re:' prefix)
1665 regex matching ('re:' prefix)
1666 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1666 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1667 ('re', 'a.+b', [False, False, True])
1667 ('re', 'a.+b', [False, False, True])
1668
1668
1669 force exact matches ('literal:' prefix)
1669 force exact matches ('literal:' prefix)
1670 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1670 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1671 ('literal', 're:foobar', [False, True])
1671 ('literal', 're:foobar', [False, True])
1672
1672
1673 unknown prefixes are ignored and treated as literals
1673 unknown prefixes are ignored and treated as literals
1674 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1674 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1675 ('literal', 'foo:bar', [False, False, True])
1675 ('literal', 'foo:bar', [False, False, True])
1676 """
1676 """
1677 if pattern.startswith('re:'):
1677 if pattern.startswith('re:'):
1678 pattern = pattern[3:]
1678 pattern = pattern[3:]
1679 try:
1679 try:
1680 regex = remod.compile(pattern)
1680 regex = remod.compile(pattern)
1681 except remod.error as e:
1681 except remod.error as e:
1682 raise error.ParseError(_('invalid regular expression: %s')
1682 raise error.ParseError(_('invalid regular expression: %s')
1683 % e)
1683 % e)
1684 return 're', pattern, regex.search
1684 return 're', pattern, regex.search
1685 elif pattern.startswith('literal:'):
1685 elif pattern.startswith('literal:'):
1686 pattern = pattern[8:]
1686 pattern = pattern[8:]
1687 return 'literal', pattern, pattern.__eq__
1687 return 'literal', pattern, pattern.__eq__
1688
1688
1689 def shortuser(user):
1689 def shortuser(user):
1690 """Return a short representation of a user name or email address."""
1690 """Return a short representation of a user name or email address."""
1691 f = user.find('@')
1691 f = user.find('@')
1692 if f >= 0:
1692 if f >= 0:
1693 user = user[:f]
1693 user = user[:f]
1694 f = user.find('<')
1694 f = user.find('<')
1695 if f >= 0:
1695 if f >= 0:
1696 user = user[f + 1:]
1696 user = user[f + 1:]
1697 f = user.find(' ')
1697 f = user.find(' ')
1698 if f >= 0:
1698 if f >= 0:
1699 user = user[:f]
1699 user = user[:f]
1700 f = user.find('.')
1700 f = user.find('.')
1701 if f >= 0:
1701 if f >= 0:
1702 user = user[:f]
1702 user = user[:f]
1703 return user
1703 return user
1704
1704
1705 def emailuser(user):
1705 def emailuser(user):
1706 """Return the user portion of an email address."""
1706 """Return the user portion of an email address."""
1707 f = user.find('@')
1707 f = user.find('@')
1708 if f >= 0:
1708 if f >= 0:
1709 user = user[:f]
1709 user = user[:f]
1710 f = user.find('<')
1710 f = user.find('<')
1711 if f >= 0:
1711 if f >= 0:
1712 user = user[f + 1:]
1712 user = user[f + 1:]
1713 return user
1713 return user
1714
1714
1715 def email(author):
1715 def email(author):
1716 '''get email of author.'''
1716 '''get email of author.'''
1717 r = author.find('>')
1717 r = author.find('>')
1718 if r == -1:
1718 if r == -1:
1719 r = None
1719 r = None
1720 return author[author.find('<') + 1:r]
1720 return author[author.find('<') + 1:r]
1721
1721
1722 def ellipsis(text, maxlength=400):
1722 def ellipsis(text, maxlength=400):
1723 """Trim string to at most maxlength (default: 400) columns in display."""
1723 """Trim string to at most maxlength (default: 400) columns in display."""
1724 return encoding.trim(text, maxlength, ellipsis='...')
1724 return encoding.trim(text, maxlength, ellipsis='...')
1725
1725
1726 def unitcountfn(*unittable):
1726 def unitcountfn(*unittable):
1727 '''return a function that renders a readable count of some quantity'''
1727 '''return a function that renders a readable count of some quantity'''
1728
1728
1729 def go(count):
1729 def go(count):
1730 for multiplier, divisor, format in unittable:
1730 for multiplier, divisor, format in unittable:
1731 if count >= divisor * multiplier:
1731 if count >= divisor * multiplier:
1732 return format % (count / float(divisor))
1732 return format % (count / float(divisor))
1733 return unittable[-1][2] % count
1733 return unittable[-1][2] % count
1734
1734
1735 return go
1735 return go
1736
1736
1737 bytecount = unitcountfn(
1737 bytecount = unitcountfn(
1738 (100, 1 << 30, _('%.0f GB')),
1738 (100, 1 << 30, _('%.0f GB')),
1739 (10, 1 << 30, _('%.1f GB')),
1739 (10, 1 << 30, _('%.1f GB')),
1740 (1, 1 << 30, _('%.2f GB')),
1740 (1, 1 << 30, _('%.2f GB')),
1741 (100, 1 << 20, _('%.0f MB')),
1741 (100, 1 << 20, _('%.0f MB')),
1742 (10, 1 << 20, _('%.1f MB')),
1742 (10, 1 << 20, _('%.1f MB')),
1743 (1, 1 << 20, _('%.2f MB')),
1743 (1, 1 << 20, _('%.2f MB')),
1744 (100, 1 << 10, _('%.0f KB')),
1744 (100, 1 << 10, _('%.0f KB')),
1745 (10, 1 << 10, _('%.1f KB')),
1745 (10, 1 << 10, _('%.1f KB')),
1746 (1, 1 << 10, _('%.2f KB')),
1746 (1, 1 << 10, _('%.2f KB')),
1747 (1, 1, _('%.0f bytes')),
1747 (1, 1, _('%.0f bytes')),
1748 )
1748 )
1749
1749
1750 def uirepr(s):
1750 def uirepr(s):
1751 # Avoid double backslash in Windows path repr()
1751 # Avoid double backslash in Windows path repr()
1752 return repr(s).replace('\\\\', '\\')
1752 return repr(s).replace('\\\\', '\\')
1753
1753
1754 # delay import of textwrap
1754 # delay import of textwrap
1755 def MBTextWrapper(**kwargs):
1755 def MBTextWrapper(**kwargs):
1756 class tw(textwrap.TextWrapper):
1756 class tw(textwrap.TextWrapper):
1757 """
1757 """
1758 Extend TextWrapper for width-awareness.
1758 Extend TextWrapper for width-awareness.
1759
1759
1760 Neither number of 'bytes' in any encoding nor 'characters' is
1760 Neither number of 'bytes' in any encoding nor 'characters' is
1761 appropriate to calculate terminal columns for specified string.
1761 appropriate to calculate terminal columns for specified string.
1762
1762
1763 Original TextWrapper implementation uses built-in 'len()' directly,
1763 Original TextWrapper implementation uses built-in 'len()' directly,
1764 so overriding is needed to use width information of each characters.
1764 so overriding is needed to use width information of each characters.
1765
1765
1766 In addition, characters classified into 'ambiguous' width are
1766 In addition, characters classified into 'ambiguous' width are
1767 treated as wide in East Asian area, but as narrow in other.
1767 treated as wide in East Asian area, but as narrow in other.
1768
1768
1769 This requires use decision to determine width of such characters.
1769 This requires use decision to determine width of such characters.
1770 """
1770 """
1771 def _cutdown(self, ucstr, space_left):
1771 def _cutdown(self, ucstr, space_left):
1772 l = 0
1772 l = 0
1773 colwidth = encoding.ucolwidth
1773 colwidth = encoding.ucolwidth
1774 for i in xrange(len(ucstr)):
1774 for i in xrange(len(ucstr)):
1775 l += colwidth(ucstr[i])
1775 l += colwidth(ucstr[i])
1776 if space_left < l:
1776 if space_left < l:
1777 return (ucstr[:i], ucstr[i:])
1777 return (ucstr[:i], ucstr[i:])
1778 return ucstr, ''
1778 return ucstr, ''
1779
1779
1780 # overriding of base class
1780 # overriding of base class
1781 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1781 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1782 space_left = max(width - cur_len, 1)
1782 space_left = max(width - cur_len, 1)
1783
1783
1784 if self.break_long_words:
1784 if self.break_long_words:
1785 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1785 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1786 cur_line.append(cut)
1786 cur_line.append(cut)
1787 reversed_chunks[-1] = res
1787 reversed_chunks[-1] = res
1788 elif not cur_line:
1788 elif not cur_line:
1789 cur_line.append(reversed_chunks.pop())
1789 cur_line.append(reversed_chunks.pop())
1790
1790
1791 # this overriding code is imported from TextWrapper of Python 2.6
1791 # this overriding code is imported from TextWrapper of Python 2.6
1792 # to calculate columns of string by 'encoding.ucolwidth()'
1792 # to calculate columns of string by 'encoding.ucolwidth()'
1793 def _wrap_chunks(self, chunks):
1793 def _wrap_chunks(self, chunks):
1794 colwidth = encoding.ucolwidth
1794 colwidth = encoding.ucolwidth
1795
1795
1796 lines = []
1796 lines = []
1797 if self.width <= 0:
1797 if self.width <= 0:
1798 raise ValueError("invalid width %r (must be > 0)" % self.width)
1798 raise ValueError("invalid width %r (must be > 0)" % self.width)
1799
1799
1800 # Arrange in reverse order so items can be efficiently popped
1800 # Arrange in reverse order so items can be efficiently popped
1801 # from a stack of chucks.
1801 # from a stack of chucks.
1802 chunks.reverse()
1802 chunks.reverse()
1803
1803
1804 while chunks:
1804 while chunks:
1805
1805
1806 # Start the list of chunks that will make up the current line.
1806 # Start the list of chunks that will make up the current line.
1807 # cur_len is just the length of all the chunks in cur_line.
1807 # cur_len is just the length of all the chunks in cur_line.
1808 cur_line = []
1808 cur_line = []
1809 cur_len = 0
1809 cur_len = 0
1810
1810
1811 # Figure out which static string will prefix this line.
1811 # Figure out which static string will prefix this line.
1812 if lines:
1812 if lines:
1813 indent = self.subsequent_indent
1813 indent = self.subsequent_indent
1814 else:
1814 else:
1815 indent = self.initial_indent
1815 indent = self.initial_indent
1816
1816
1817 # Maximum width for this line.
1817 # Maximum width for this line.
1818 width = self.width - len(indent)
1818 width = self.width - len(indent)
1819
1819
1820 # First chunk on line is whitespace -- drop it, unless this
1820 # First chunk on line is whitespace -- drop it, unless this
1821 # is the very beginning of the text (i.e. no lines started yet).
1821 # is the very beginning of the text (i.e. no lines started yet).
1822 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1822 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1823 del chunks[-1]
1823 del chunks[-1]
1824
1824
1825 while chunks:
1825 while chunks:
1826 l = colwidth(chunks[-1])
1826 l = colwidth(chunks[-1])
1827
1827
1828 # Can at least squeeze this chunk onto the current line.
1828 # Can at least squeeze this chunk onto the current line.
1829 if cur_len + l <= width:
1829 if cur_len + l <= width:
1830 cur_line.append(chunks.pop())
1830 cur_line.append(chunks.pop())
1831 cur_len += l
1831 cur_len += l
1832
1832
1833 # Nope, this line is full.
1833 # Nope, this line is full.
1834 else:
1834 else:
1835 break
1835 break
1836
1836
1837 # The current line is full, and the next chunk is too big to
1837 # The current line is full, and the next chunk is too big to
1838 # fit on *any* line (not just this one).
1838 # fit on *any* line (not just this one).
1839 if chunks and colwidth(chunks[-1]) > width:
1839 if chunks and colwidth(chunks[-1]) > width:
1840 self._handle_long_word(chunks, cur_line, cur_len, width)
1840 self._handle_long_word(chunks, cur_line, cur_len, width)
1841
1841
1842 # If the last chunk on this line is all whitespace, drop it.
1842 # If the last chunk on this line is all whitespace, drop it.
1843 if (self.drop_whitespace and
1843 if (self.drop_whitespace and
1844 cur_line and cur_line[-1].strip() == ''):
1844 cur_line and cur_line[-1].strip() == ''):
1845 del cur_line[-1]
1845 del cur_line[-1]
1846
1846
1847 # Convert current line back to a string and store it in list
1847 # Convert current line back to a string and store it in list
1848 # of all lines (return value).
1848 # of all lines (return value).
1849 if cur_line:
1849 if cur_line:
1850 lines.append(indent + ''.join(cur_line))
1850 lines.append(indent + ''.join(cur_line))
1851
1851
1852 return lines
1852 return lines
1853
1853
1854 global MBTextWrapper
1854 global MBTextWrapper
1855 MBTextWrapper = tw
1855 MBTextWrapper = tw
1856 return tw(**kwargs)
1856 return tw(**kwargs)
1857
1857
1858 def wrap(line, width, initindent='', hangindent=''):
1858 def wrap(line, width, initindent='', hangindent=''):
1859 maxindent = max(len(hangindent), len(initindent))
1859 maxindent = max(len(hangindent), len(initindent))
1860 if width <= maxindent:
1860 if width <= maxindent:
1861 # adjust for weird terminal size
1861 # adjust for weird terminal size
1862 width = max(78, maxindent + 1)
1862 width = max(78, maxindent + 1)
1863 line = line.decode(encoding.encoding, encoding.encodingmode)
1863 line = line.decode(encoding.encoding, encoding.encodingmode)
1864 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1864 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1865 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1865 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1866 wrapper = MBTextWrapper(width=width,
1866 wrapper = MBTextWrapper(width=width,
1867 initial_indent=initindent,
1867 initial_indent=initindent,
1868 subsequent_indent=hangindent)
1868 subsequent_indent=hangindent)
1869 return wrapper.fill(line).encode(encoding.encoding)
1869 return wrapper.fill(line).encode(encoding.encoding)
1870
1870
1871 def iterlines(iterator):
1871 def iterlines(iterator):
1872 for chunk in iterator:
1872 for chunk in iterator:
1873 for line in chunk.splitlines():
1873 for line in chunk.splitlines():
1874 yield line
1874 yield line
1875
1875
1876 def expandpath(path):
1876 def expandpath(path):
1877 return os.path.expanduser(os.path.expandvars(path))
1877 return os.path.expanduser(os.path.expandvars(path))
1878
1878
1879 def hgcmd():
1879 def hgcmd():
1880 """Return the command used to execute current hg
1880 """Return the command used to execute current hg
1881
1881
1882 This is different from hgexecutable() because on Windows we want
1882 This is different from hgexecutable() because on Windows we want
1883 to avoid things opening new shell windows like batch files, so we
1883 to avoid things opening new shell windows like batch files, so we
1884 get either the python call or current executable.
1884 get either the python call or current executable.
1885 """
1885 """
1886 if mainfrozen():
1886 if mainfrozen():
1887 return [sys.executable]
1887 return [sys.executable]
1888 return gethgcmd()
1888 return gethgcmd()
1889
1889
1890 def rundetached(args, condfn):
1890 def rundetached(args, condfn):
1891 """Execute the argument list in a detached process.
1891 """Execute the argument list in a detached process.
1892
1892
1893 condfn is a callable which is called repeatedly and should return
1893 condfn is a callable which is called repeatedly and should return
1894 True once the child process is known to have started successfully.
1894 True once the child process is known to have started successfully.
1895 At this point, the child process PID is returned. If the child
1895 At this point, the child process PID is returned. If the child
1896 process fails to start or finishes before condfn() evaluates to
1896 process fails to start or finishes before condfn() evaluates to
1897 True, return -1.
1897 True, return -1.
1898 """
1898 """
1899 # Windows case is easier because the child process is either
1899 # Windows case is easier because the child process is either
1900 # successfully starting and validating the condition or exiting
1900 # successfully starting and validating the condition or exiting
1901 # on failure. We just poll on its PID. On Unix, if the child
1901 # on failure. We just poll on its PID. On Unix, if the child
1902 # process fails to start, it will be left in a zombie state until
1902 # process fails to start, it will be left in a zombie state until
1903 # the parent wait on it, which we cannot do since we expect a long
1903 # the parent wait on it, which we cannot do since we expect a long
1904 # running process on success. Instead we listen for SIGCHLD telling
1904 # running process on success. Instead we listen for SIGCHLD telling
1905 # us our child process terminated.
1905 # us our child process terminated.
1906 terminated = set()
1906 terminated = set()
1907 def handler(signum, frame):
1907 def handler(signum, frame):
1908 terminated.add(os.wait())
1908 terminated.add(os.wait())
1909 prevhandler = None
1909 prevhandler = None
1910 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1910 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1911 if SIGCHLD is not None:
1911 if SIGCHLD is not None:
1912 prevhandler = signal.signal(SIGCHLD, handler)
1912 prevhandler = signal.signal(SIGCHLD, handler)
1913 try:
1913 try:
1914 pid = spawndetached(args)
1914 pid = spawndetached(args)
1915 while not condfn():
1915 while not condfn():
1916 if ((pid in terminated or not testpid(pid))
1916 if ((pid in terminated or not testpid(pid))
1917 and not condfn()):
1917 and not condfn()):
1918 return -1
1918 return -1
1919 time.sleep(0.1)
1919 time.sleep(0.1)
1920 return pid
1920 return pid
1921 finally:
1921 finally:
1922 if prevhandler is not None:
1922 if prevhandler is not None:
1923 signal.signal(signal.SIGCHLD, prevhandler)
1923 signal.signal(signal.SIGCHLD, prevhandler)
1924
1924
1925 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1925 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1926 """Return the result of interpolating items in the mapping into string s.
1926 """Return the result of interpolating items in the mapping into string s.
1927
1927
1928 prefix is a single character string, or a two character string with
1928 prefix is a single character string, or a two character string with
1929 a backslash as the first character if the prefix needs to be escaped in
1929 a backslash as the first character if the prefix needs to be escaped in
1930 a regular expression.
1930 a regular expression.
1931
1931
1932 fn is an optional function that will be applied to the replacement text
1932 fn is an optional function that will be applied to the replacement text
1933 just before replacement.
1933 just before replacement.
1934
1934
1935 escape_prefix is an optional flag that allows using doubled prefix for
1935 escape_prefix is an optional flag that allows using doubled prefix for
1936 its escaping.
1936 its escaping.
1937 """
1937 """
1938 fn = fn or (lambda s: s)
1938 fn = fn or (lambda s: s)
1939 patterns = '|'.join(mapping.keys())
1939 patterns = '|'.join(mapping.keys())
1940 if escape_prefix:
1940 if escape_prefix:
1941 patterns += '|' + prefix
1941 patterns += '|' + prefix
1942 if len(prefix) > 1:
1942 if len(prefix) > 1:
1943 prefix_char = prefix[1:]
1943 prefix_char = prefix[1:]
1944 else:
1944 else:
1945 prefix_char = prefix
1945 prefix_char = prefix
1946 mapping[prefix_char] = prefix_char
1946 mapping[prefix_char] = prefix_char
1947 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1947 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1948 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1948 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1949
1949
1950 def getport(port):
1950 def getport(port):
1951 """Return the port for a given network service.
1951 """Return the port for a given network service.
1952
1952
1953 If port is an integer, it's returned as is. If it's a string, it's
1953 If port is an integer, it's returned as is. If it's a string, it's
1954 looked up using socket.getservbyname(). If there's no matching
1954 looked up using socket.getservbyname(). If there's no matching
1955 service, error.Abort is raised.
1955 service, error.Abort is raised.
1956 """
1956 """
1957 try:
1957 try:
1958 return int(port)
1958 return int(port)
1959 except ValueError:
1959 except ValueError:
1960 pass
1960 pass
1961
1961
1962 try:
1962 try:
1963 return socket.getservbyname(port)
1963 return socket.getservbyname(port)
1964 except socket.error:
1964 except socket.error:
1965 raise Abort(_("no port number associated with service '%s'") % port)
1965 raise Abort(_("no port number associated with service '%s'") % port)
1966
1966
1967 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1967 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1968 '0': False, 'no': False, 'false': False, 'off': False,
1968 '0': False, 'no': False, 'false': False, 'off': False,
1969 'never': False}
1969 'never': False}
1970
1970
1971 def parsebool(s):
1971 def parsebool(s):
1972 """Parse s into a boolean.
1972 """Parse s into a boolean.
1973
1973
1974 If s is not a valid boolean, returns None.
1974 If s is not a valid boolean, returns None.
1975 """
1975 """
1976 return _booleans.get(s.lower(), None)
1976 return _booleans.get(s.lower(), None)
1977
1977
1978 _hexdig = '0123456789ABCDEFabcdef'
1978 _hexdig = '0123456789ABCDEFabcdef'
1979 _hextochr = dict((a + b, chr(int(a + b, 16)))
1979 _hextochr = dict((a + b, chr(int(a + b, 16)))
1980 for a in _hexdig for b in _hexdig)
1980 for a in _hexdig for b in _hexdig)
1981
1981
1982 def _urlunquote(s):
1982 def _urlunquote(s):
1983 """Decode HTTP/HTML % encoding.
1983 """Decode HTTP/HTML % encoding.
1984
1984
1985 >>> _urlunquote('abc%20def')
1985 >>> _urlunquote('abc%20def')
1986 'abc def'
1986 'abc def'
1987 """
1987 """
1988 res = s.split('%')
1988 res = s.split('%')
1989 # fastpath
1989 # fastpath
1990 if len(res) == 1:
1990 if len(res) == 1:
1991 return s
1991 return s
1992 s = res[0]
1992 s = res[0]
1993 for item in res[1:]:
1993 for item in res[1:]:
1994 try:
1994 try:
1995 s += _hextochr[item[:2]] + item[2:]
1995 s += _hextochr[item[:2]] + item[2:]
1996 except KeyError:
1996 except KeyError:
1997 s += '%' + item
1997 s += '%' + item
1998 except UnicodeDecodeError:
1998 except UnicodeDecodeError:
1999 s += unichr(int(item[:2], 16)) + item[2:]
1999 s += unichr(int(item[:2], 16)) + item[2:]
2000 return s
2000 return s
2001
2001
2002 class url(object):
2002 class url(object):
2003 r"""Reliable URL parser.
2003 r"""Reliable URL parser.
2004
2004
2005 This parses URLs and provides attributes for the following
2005 This parses URLs and provides attributes for the following
2006 components:
2006 components:
2007
2007
2008 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2008 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2009
2009
2010 Missing components are set to None. The only exception is
2010 Missing components are set to None. The only exception is
2011 fragment, which is set to '' if present but empty.
2011 fragment, which is set to '' if present but empty.
2012
2012
2013 If parsefragment is False, fragment is included in query. If
2013 If parsefragment is False, fragment is included in query. If
2014 parsequery is False, query is included in path. If both are
2014 parsequery is False, query is included in path. If both are
2015 False, both fragment and query are included in path.
2015 False, both fragment and query are included in path.
2016
2016
2017 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2017 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2018
2018
2019 Note that for backward compatibility reasons, bundle URLs do not
2019 Note that for backward compatibility reasons, bundle URLs do not
2020 take host names. That means 'bundle://../' has a path of '../'.
2020 take host names. That means 'bundle://../' has a path of '../'.
2021
2021
2022 Examples:
2022 Examples:
2023
2023
2024 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2024 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2025 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2025 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2026 >>> url('ssh://[::1]:2200//home/joe/repo')
2026 >>> url('ssh://[::1]:2200//home/joe/repo')
2027 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2027 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2028 >>> url('file:///home/joe/repo')
2028 >>> url('file:///home/joe/repo')
2029 <url scheme: 'file', path: '/home/joe/repo'>
2029 <url scheme: 'file', path: '/home/joe/repo'>
2030 >>> url('file:///c:/temp/foo/')
2030 >>> url('file:///c:/temp/foo/')
2031 <url scheme: 'file', path: 'c:/temp/foo/'>
2031 <url scheme: 'file', path: 'c:/temp/foo/'>
2032 >>> url('bundle:foo')
2032 >>> url('bundle:foo')
2033 <url scheme: 'bundle', path: 'foo'>
2033 <url scheme: 'bundle', path: 'foo'>
2034 >>> url('bundle://../foo')
2034 >>> url('bundle://../foo')
2035 <url scheme: 'bundle', path: '../foo'>
2035 <url scheme: 'bundle', path: '../foo'>
2036 >>> url(r'c:\foo\bar')
2036 >>> url(r'c:\foo\bar')
2037 <url path: 'c:\\foo\\bar'>
2037 <url path: 'c:\\foo\\bar'>
2038 >>> url(r'\\blah\blah\blah')
2038 >>> url(r'\\blah\blah\blah')
2039 <url path: '\\\\blah\\blah\\blah'>
2039 <url path: '\\\\blah\\blah\\blah'>
2040 >>> url(r'\\blah\blah\blah#baz')
2040 >>> url(r'\\blah\blah\blah#baz')
2041 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2041 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2042 >>> url(r'file:///C:\users\me')
2042 >>> url(r'file:///C:\users\me')
2043 <url scheme: 'file', path: 'C:\\users\\me'>
2043 <url scheme: 'file', path: 'C:\\users\\me'>
2044
2044
2045 Authentication credentials:
2045 Authentication credentials:
2046
2046
2047 >>> url('ssh://joe:xyz@x/repo')
2047 >>> url('ssh://joe:xyz@x/repo')
2048 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2048 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2049 >>> url('ssh://joe@x/repo')
2049 >>> url('ssh://joe@x/repo')
2050 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2050 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2051
2051
2052 Query strings and fragments:
2052 Query strings and fragments:
2053
2053
2054 >>> url('http://host/a?b#c')
2054 >>> url('http://host/a?b#c')
2055 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2055 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2056 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2056 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2057 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2057 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2058 """
2058 """
2059
2059
2060 _safechars = "!~*'()+"
2060 _safechars = "!~*'()+"
2061 _safepchars = "/!~*'()+:\\"
2061 _safepchars = "/!~*'()+:\\"
2062 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2062 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2063
2063
2064 def __init__(self, path, parsequery=True, parsefragment=True):
2064 def __init__(self, path, parsequery=True, parsefragment=True):
2065 # We slowly chomp away at path until we have only the path left
2065 # We slowly chomp away at path until we have only the path left
2066 self.scheme = self.user = self.passwd = self.host = None
2066 self.scheme = self.user = self.passwd = self.host = None
2067 self.port = self.path = self.query = self.fragment = None
2067 self.port = self.path = self.query = self.fragment = None
2068 self._localpath = True
2068 self._localpath = True
2069 self._hostport = ''
2069 self._hostport = ''
2070 self._origpath = path
2070 self._origpath = path
2071
2071
2072 if parsefragment and '#' in path:
2072 if parsefragment and '#' in path:
2073 path, self.fragment = path.split('#', 1)
2073 path, self.fragment = path.split('#', 1)
2074 if not path:
2074 if not path:
2075 path = None
2075 path = None
2076
2076
2077 # special case for Windows drive letters and UNC paths
2077 # special case for Windows drive letters and UNC paths
2078 if hasdriveletter(path) or path.startswith(r'\\'):
2078 if hasdriveletter(path) or path.startswith(r'\\'):
2079 self.path = path
2079 self.path = path
2080 return
2080 return
2081
2081
2082 # For compatibility reasons, we can't handle bundle paths as
2082 # For compatibility reasons, we can't handle bundle paths as
2083 # normal URLS
2083 # normal URLS
2084 if path.startswith('bundle:'):
2084 if path.startswith('bundle:'):
2085 self.scheme = 'bundle'
2085 self.scheme = 'bundle'
2086 path = path[7:]
2086 path = path[7:]
2087 if path.startswith('//'):
2087 if path.startswith('//'):
2088 path = path[2:]
2088 path = path[2:]
2089 self.path = path
2089 self.path = path
2090 return
2090 return
2091
2091
2092 if self._matchscheme(path):
2092 if self._matchscheme(path):
2093 parts = path.split(':', 1)
2093 parts = path.split(':', 1)
2094 if parts[0]:
2094 if parts[0]:
2095 self.scheme, path = parts
2095 self.scheme, path = parts
2096 self._localpath = False
2096 self._localpath = False
2097
2097
2098 if not path:
2098 if not path:
2099 path = None
2099 path = None
2100 if self._localpath:
2100 if self._localpath:
2101 self.path = ''
2101 self.path = ''
2102 return
2102 return
2103 else:
2103 else:
2104 if self._localpath:
2104 if self._localpath:
2105 self.path = path
2105 self.path = path
2106 return
2106 return
2107
2107
2108 if parsequery and '?' in path:
2108 if parsequery and '?' in path:
2109 path, self.query = path.split('?', 1)
2109 path, self.query = path.split('?', 1)
2110 if not path:
2110 if not path:
2111 path = None
2111 path = None
2112 if not self.query:
2112 if not self.query:
2113 self.query = None
2113 self.query = None
2114
2114
2115 # // is required to specify a host/authority
2115 # // is required to specify a host/authority
2116 if path and path.startswith('//'):
2116 if path and path.startswith('//'):
2117 parts = path[2:].split('/', 1)
2117 parts = path[2:].split('/', 1)
2118 if len(parts) > 1:
2118 if len(parts) > 1:
2119 self.host, path = parts
2119 self.host, path = parts
2120 else:
2120 else:
2121 self.host = parts[0]
2121 self.host = parts[0]
2122 path = None
2122 path = None
2123 if not self.host:
2123 if not self.host:
2124 self.host = None
2124 self.host = None
2125 # path of file:///d is /d
2125 # path of file:///d is /d
2126 # path of file:///d:/ is d:/, not /d:/
2126 # path of file:///d:/ is d:/, not /d:/
2127 if path and not hasdriveletter(path):
2127 if path and not hasdriveletter(path):
2128 path = '/' + path
2128 path = '/' + path
2129
2129
2130 if self.host and '@' in self.host:
2130 if self.host and '@' in self.host:
2131 self.user, self.host = self.host.rsplit('@', 1)
2131 self.user, self.host = self.host.rsplit('@', 1)
2132 if ':' in self.user:
2132 if ':' in self.user:
2133 self.user, self.passwd = self.user.split(':', 1)
2133 self.user, self.passwd = self.user.split(':', 1)
2134 if not self.host:
2134 if not self.host:
2135 self.host = None
2135 self.host = None
2136
2136
2137 # Don't split on colons in IPv6 addresses without ports
2137 # Don't split on colons in IPv6 addresses without ports
2138 if (self.host and ':' in self.host and
2138 if (self.host and ':' in self.host and
2139 not (self.host.startswith('[') and self.host.endswith(']'))):
2139 not (self.host.startswith('[') and self.host.endswith(']'))):
2140 self._hostport = self.host
2140 self._hostport = self.host
2141 self.host, self.port = self.host.rsplit(':', 1)
2141 self.host, self.port = self.host.rsplit(':', 1)
2142 if not self.host:
2142 if not self.host:
2143 self.host = None
2143 self.host = None
2144
2144
2145 if (self.host and self.scheme == 'file' and
2145 if (self.host and self.scheme == 'file' and
2146 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2146 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2147 raise Abort(_('file:// URLs can only refer to localhost'))
2147 raise Abort(_('file:// URLs can only refer to localhost'))
2148
2148
2149 self.path = path
2149 self.path = path
2150
2150
2151 # leave the query string escaped
2151 # leave the query string escaped
2152 for a in ('user', 'passwd', 'host', 'port',
2152 for a in ('user', 'passwd', 'host', 'port',
2153 'path', 'fragment'):
2153 'path', 'fragment'):
2154 v = getattr(self, a)
2154 v = getattr(self, a)
2155 if v is not None:
2155 if v is not None:
2156 setattr(self, a, _urlunquote(v))
2156 setattr(self, a, _urlunquote(v))
2157
2157
2158 def __repr__(self):
2158 def __repr__(self):
2159 attrs = []
2159 attrs = []
2160 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2160 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2161 'query', 'fragment'):
2161 'query', 'fragment'):
2162 v = getattr(self, a)
2162 v = getattr(self, a)
2163 if v is not None:
2163 if v is not None:
2164 attrs.append('%s: %r' % (a, v))
2164 attrs.append('%s: %r' % (a, v))
2165 return '<url %s>' % ', '.join(attrs)
2165 return '<url %s>' % ', '.join(attrs)
2166
2166
2167 def __str__(self):
2167 def __str__(self):
2168 r"""Join the URL's components back into a URL string.
2168 r"""Join the URL's components back into a URL string.
2169
2169
2170 Examples:
2170 Examples:
2171
2171
2172 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2172 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2173 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2173 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2174 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2174 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2175 'http://user:pw@host:80/?foo=bar&baz=42'
2175 'http://user:pw@host:80/?foo=bar&baz=42'
2176 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2176 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2177 'http://user:pw@host:80/?foo=bar%3dbaz'
2177 'http://user:pw@host:80/?foo=bar%3dbaz'
2178 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2178 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2179 'ssh://user:pw@[::1]:2200//home/joe#'
2179 'ssh://user:pw@[::1]:2200//home/joe#'
2180 >>> str(url('http://localhost:80//'))
2180 >>> str(url('http://localhost:80//'))
2181 'http://localhost:80//'
2181 'http://localhost:80//'
2182 >>> str(url('http://localhost:80/'))
2182 >>> str(url('http://localhost:80/'))
2183 'http://localhost:80/'
2183 'http://localhost:80/'
2184 >>> str(url('http://localhost:80'))
2184 >>> str(url('http://localhost:80'))
2185 'http://localhost:80/'
2185 'http://localhost:80/'
2186 >>> str(url('bundle:foo'))
2186 >>> str(url('bundle:foo'))
2187 'bundle:foo'
2187 'bundle:foo'
2188 >>> str(url('bundle://../foo'))
2188 >>> str(url('bundle://../foo'))
2189 'bundle:../foo'
2189 'bundle:../foo'
2190 >>> str(url('path'))
2190 >>> str(url('path'))
2191 'path'
2191 'path'
2192 >>> str(url('file:///tmp/foo/bar'))
2192 >>> str(url('file:///tmp/foo/bar'))
2193 'file:///tmp/foo/bar'
2193 'file:///tmp/foo/bar'
2194 >>> str(url('file:///c:/tmp/foo/bar'))
2194 >>> str(url('file:///c:/tmp/foo/bar'))
2195 'file:///c:/tmp/foo/bar'
2195 'file:///c:/tmp/foo/bar'
2196 >>> print url(r'bundle:foo\bar')
2196 >>> print url(r'bundle:foo\bar')
2197 bundle:foo\bar
2197 bundle:foo\bar
2198 >>> print url(r'file:///D:\data\hg')
2198 >>> print url(r'file:///D:\data\hg')
2199 file:///D:\data\hg
2199 file:///D:\data\hg
2200 """
2200 """
2201 if self._localpath:
2201 if self._localpath:
2202 s = self.path
2202 s = self.path
2203 if self.scheme == 'bundle':
2203 if self.scheme == 'bundle':
2204 s = 'bundle:' + s
2204 s = 'bundle:' + s
2205 if self.fragment:
2205 if self.fragment:
2206 s += '#' + self.fragment
2206 s += '#' + self.fragment
2207 return s
2207 return s
2208
2208
2209 s = self.scheme + ':'
2209 s = self.scheme + ':'
2210 if self.user or self.passwd or self.host:
2210 if self.user or self.passwd or self.host:
2211 s += '//'
2211 s += '//'
2212 elif self.scheme and (not self.path or self.path.startswith('/')
2212 elif self.scheme and (not self.path or self.path.startswith('/')
2213 or hasdriveletter(self.path)):
2213 or hasdriveletter(self.path)):
2214 s += '//'
2214 s += '//'
2215 if hasdriveletter(self.path):
2215 if hasdriveletter(self.path):
2216 s += '/'
2216 s += '/'
2217 if self.user:
2217 if self.user:
2218 s += urllib.quote(self.user, safe=self._safechars)
2218 s += urllib.quote(self.user, safe=self._safechars)
2219 if self.passwd:
2219 if self.passwd:
2220 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2220 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2221 if self.user or self.passwd:
2221 if self.user or self.passwd:
2222 s += '@'
2222 s += '@'
2223 if self.host:
2223 if self.host:
2224 if not (self.host.startswith('[') and self.host.endswith(']')):
2224 if not (self.host.startswith('[') and self.host.endswith(']')):
2225 s += urllib.quote(self.host)
2225 s += urllib.quote(self.host)
2226 else:
2226 else:
2227 s += self.host
2227 s += self.host
2228 if self.port:
2228 if self.port:
2229 s += ':' + urllib.quote(self.port)
2229 s += ':' + urllib.quote(self.port)
2230 if self.host:
2230 if self.host:
2231 s += '/'
2231 s += '/'
2232 if self.path:
2232 if self.path:
2233 # TODO: similar to the query string, we should not unescape the
2233 # TODO: similar to the query string, we should not unescape the
2234 # path when we store it, the path might contain '%2f' = '/',
2234 # path when we store it, the path might contain '%2f' = '/',
2235 # which we should *not* escape.
2235 # which we should *not* escape.
2236 s += urllib.quote(self.path, safe=self._safepchars)
2236 s += urllib.quote(self.path, safe=self._safepchars)
2237 if self.query:
2237 if self.query:
2238 # we store the query in escaped form.
2238 # we store the query in escaped form.
2239 s += '?' + self.query
2239 s += '?' + self.query
2240 if self.fragment is not None:
2240 if self.fragment is not None:
2241 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2241 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2242 return s
2242 return s
2243
2243
2244 def authinfo(self):
2244 def authinfo(self):
2245 user, passwd = self.user, self.passwd
2245 user, passwd = self.user, self.passwd
2246 try:
2246 try:
2247 self.user, self.passwd = None, None
2247 self.user, self.passwd = None, None
2248 s = str(self)
2248 s = str(self)
2249 finally:
2249 finally:
2250 self.user, self.passwd = user, passwd
2250 self.user, self.passwd = user, passwd
2251 if not self.user:
2251 if not self.user:
2252 return (s, None)
2252 return (s, None)
2253 # authinfo[1] is passed to urllib2 password manager, and its
2253 # authinfo[1] is passed to urllib2 password manager, and its
2254 # URIs must not contain credentials. The host is passed in the
2254 # URIs must not contain credentials. The host is passed in the
2255 # URIs list because Python < 2.4.3 uses only that to search for
2255 # URIs list because Python < 2.4.3 uses only that to search for
2256 # a password.
2256 # a password.
2257 return (s, (None, (s, self.host),
2257 return (s, (None, (s, self.host),
2258 self.user, self.passwd or ''))
2258 self.user, self.passwd or ''))
2259
2259
2260 def isabs(self):
2260 def isabs(self):
2261 if self.scheme and self.scheme != 'file':
2261 if self.scheme and self.scheme != 'file':
2262 return True # remote URL
2262 return True # remote URL
2263 if hasdriveletter(self.path):
2263 if hasdriveletter(self.path):
2264 return True # absolute for our purposes - can't be joined()
2264 return True # absolute for our purposes - can't be joined()
2265 if self.path.startswith(r'\\'):
2265 if self.path.startswith(r'\\'):
2266 return True # Windows UNC path
2266 return True # Windows UNC path
2267 if self.path.startswith('/'):
2267 if self.path.startswith('/'):
2268 return True # POSIX-style
2268 return True # POSIX-style
2269 return False
2269 return False
2270
2270
2271 def localpath(self):
2271 def localpath(self):
2272 if self.scheme == 'file' or self.scheme == 'bundle':
2272 if self.scheme == 'file' or self.scheme == 'bundle':
2273 path = self.path or '/'
2273 path = self.path or '/'
2274 # For Windows, we need to promote hosts containing drive
2274 # For Windows, we need to promote hosts containing drive
2275 # letters to paths with drive letters.
2275 # letters to paths with drive letters.
2276 if hasdriveletter(self._hostport):
2276 if hasdriveletter(self._hostport):
2277 path = self._hostport + '/' + self.path
2277 path = self._hostport + '/' + self.path
2278 elif (self.host is not None and self.path
2278 elif (self.host is not None and self.path
2279 and not hasdriveletter(path)):
2279 and not hasdriveletter(path)):
2280 path = '/' + path
2280 path = '/' + path
2281 return path
2281 return path
2282 return self._origpath
2282 return self._origpath
2283
2283
2284 def islocal(self):
2284 def islocal(self):
2285 '''whether localpath will return something that posixfile can open'''
2285 '''whether localpath will return something that posixfile can open'''
2286 return (not self.scheme or self.scheme == 'file'
2286 return (not self.scheme or self.scheme == 'file'
2287 or self.scheme == 'bundle')
2287 or self.scheme == 'bundle')
2288
2288
2289 def hasscheme(path):
2289 def hasscheme(path):
2290 return bool(url(path).scheme)
2290 return bool(url(path).scheme)
2291
2291
2292 def hasdriveletter(path):
2292 def hasdriveletter(path):
2293 return path and path[1:2] == ':' and path[0:1].isalpha()
2293 return path and path[1:2] == ':' and path[0:1].isalpha()
2294
2294
2295 def urllocalpath(path):
2295 def urllocalpath(path):
2296 return url(path, parsequery=False, parsefragment=False).localpath()
2296 return url(path, parsequery=False, parsefragment=False).localpath()
2297
2297
2298 def hidepassword(u):
2298 def hidepassword(u):
2299 '''hide user credential in a url string'''
2299 '''hide user credential in a url string'''
2300 u = url(u)
2300 u = url(u)
2301 if u.passwd:
2301 if u.passwd:
2302 u.passwd = '***'
2302 u.passwd = '***'
2303 return str(u)
2303 return str(u)
2304
2304
2305 def removeauth(u):
2305 def removeauth(u):
2306 '''remove all authentication information from a url string'''
2306 '''remove all authentication information from a url string'''
2307 u = url(u)
2307 u = url(u)
2308 u.user = u.passwd = None
2308 u.user = u.passwd = None
2309 return str(u)
2309 return str(u)
2310
2310
2311 def isatty(fd):
2311 def isatty(fp):
2312 try:
2312 try:
2313 return fd.isatty()
2313 return fp.isatty()
2314 except AttributeError:
2314 except AttributeError:
2315 return False
2315 return False
2316
2316
2317 timecount = unitcountfn(
2317 timecount = unitcountfn(
2318 (1, 1e3, _('%.0f s')),
2318 (1, 1e3, _('%.0f s')),
2319 (100, 1, _('%.1f s')),
2319 (100, 1, _('%.1f s')),
2320 (10, 1, _('%.2f s')),
2320 (10, 1, _('%.2f s')),
2321 (1, 1, _('%.3f s')),
2321 (1, 1, _('%.3f s')),
2322 (100, 0.001, _('%.1f ms')),
2322 (100, 0.001, _('%.1f ms')),
2323 (10, 0.001, _('%.2f ms')),
2323 (10, 0.001, _('%.2f ms')),
2324 (1, 0.001, _('%.3f ms')),
2324 (1, 0.001, _('%.3f ms')),
2325 (100, 0.000001, _('%.1f us')),
2325 (100, 0.000001, _('%.1f us')),
2326 (10, 0.000001, _('%.2f us')),
2326 (10, 0.000001, _('%.2f us')),
2327 (1, 0.000001, _('%.3f us')),
2327 (1, 0.000001, _('%.3f us')),
2328 (100, 0.000000001, _('%.1f ns')),
2328 (100, 0.000000001, _('%.1f ns')),
2329 (10, 0.000000001, _('%.2f ns')),
2329 (10, 0.000000001, _('%.2f ns')),
2330 (1, 0.000000001, _('%.3f ns')),
2330 (1, 0.000000001, _('%.3f ns')),
2331 )
2331 )
2332
2332
2333 _timenesting = [0]
2333 _timenesting = [0]
2334
2334
2335 def timed(func):
2335 def timed(func):
2336 '''Report the execution time of a function call to stderr.
2336 '''Report the execution time of a function call to stderr.
2337
2337
2338 During development, use as a decorator when you need to measure
2338 During development, use as a decorator when you need to measure
2339 the cost of a function, e.g. as follows:
2339 the cost of a function, e.g. as follows:
2340
2340
2341 @util.timed
2341 @util.timed
2342 def foo(a, b, c):
2342 def foo(a, b, c):
2343 pass
2343 pass
2344 '''
2344 '''
2345
2345
2346 def wrapper(*args, **kwargs):
2346 def wrapper(*args, **kwargs):
2347 start = time.time()
2347 start = time.time()
2348 indent = 2
2348 indent = 2
2349 _timenesting[0] += indent
2349 _timenesting[0] += indent
2350 try:
2350 try:
2351 return func(*args, **kwargs)
2351 return func(*args, **kwargs)
2352 finally:
2352 finally:
2353 elapsed = time.time() - start
2353 elapsed = time.time() - start
2354 _timenesting[0] -= indent
2354 _timenesting[0] -= indent
2355 sys.stderr.write('%s%s: %s\n' %
2355 sys.stderr.write('%s%s: %s\n' %
2356 (' ' * _timenesting[0], func.__name__,
2356 (' ' * _timenesting[0], func.__name__,
2357 timecount(elapsed)))
2357 timecount(elapsed)))
2358 return wrapper
2358 return wrapper
2359
2359
2360 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2360 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2361 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2361 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2362
2362
2363 def sizetoint(s):
2363 def sizetoint(s):
2364 '''Convert a space specifier to a byte count.
2364 '''Convert a space specifier to a byte count.
2365
2365
2366 >>> sizetoint('30')
2366 >>> sizetoint('30')
2367 30
2367 30
2368 >>> sizetoint('2.2kb')
2368 >>> sizetoint('2.2kb')
2369 2252
2369 2252
2370 >>> sizetoint('6M')
2370 >>> sizetoint('6M')
2371 6291456
2371 6291456
2372 '''
2372 '''
2373 t = s.strip().lower()
2373 t = s.strip().lower()
2374 try:
2374 try:
2375 for k, u in _sizeunits:
2375 for k, u in _sizeunits:
2376 if t.endswith(k):
2376 if t.endswith(k):
2377 return int(float(t[:-len(k)]) * u)
2377 return int(float(t[:-len(k)]) * u)
2378 return int(t)
2378 return int(t)
2379 except ValueError:
2379 except ValueError:
2380 raise error.ParseError(_("couldn't parse size: %s") % s)
2380 raise error.ParseError(_("couldn't parse size: %s") % s)
2381
2381
2382 class hooks(object):
2382 class hooks(object):
2383 '''A collection of hook functions that can be used to extend a
2383 '''A collection of hook functions that can be used to extend a
2384 function's behavior. Hooks are called in lexicographic order,
2384 function's behavior. Hooks are called in lexicographic order,
2385 based on the names of their sources.'''
2385 based on the names of their sources.'''
2386
2386
2387 def __init__(self):
2387 def __init__(self):
2388 self._hooks = []
2388 self._hooks = []
2389
2389
2390 def add(self, source, hook):
2390 def add(self, source, hook):
2391 self._hooks.append((source, hook))
2391 self._hooks.append((source, hook))
2392
2392
2393 def __call__(self, *args):
2393 def __call__(self, *args):
2394 self._hooks.sort(key=lambda x: x[0])
2394 self._hooks.sort(key=lambda x: x[0])
2395 results = []
2395 results = []
2396 for source, hook in self._hooks:
2396 for source, hook in self._hooks:
2397 results.append(hook(*args))
2397 results.append(hook(*args))
2398 return results
2398 return results
2399
2399
2400 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2400 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2401 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2401 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2402 Skips the 'skip' last entries. By default it will flush stdout first.
2402 Skips the 'skip' last entries. By default it will flush stdout first.
2403 It can be used everywhere and do intentionally not require an ui object.
2403 It can be used everywhere and do intentionally not require an ui object.
2404 Not be used in production code but very convenient while developing.
2404 Not be used in production code but very convenient while developing.
2405 '''
2405 '''
2406 if otherf:
2406 if otherf:
2407 otherf.flush()
2407 otherf.flush()
2408 f.write('%s at:\n' % msg)
2408 f.write('%s at:\n' % msg)
2409 entries = [('%s:%s' % (fn, ln), func)
2409 entries = [('%s:%s' % (fn, ln), func)
2410 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2410 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2411 if entries:
2411 if entries:
2412 fnmax = max(len(entry[0]) for entry in entries)
2412 fnmax = max(len(entry[0]) for entry in entries)
2413 for fnln, func in entries:
2413 for fnln, func in entries:
2414 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2414 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2415 f.flush()
2415 f.flush()
2416
2416
2417 class dirs(object):
2417 class dirs(object):
2418 '''a multiset of directory names from a dirstate or manifest'''
2418 '''a multiset of directory names from a dirstate or manifest'''
2419
2419
2420 def __init__(self, map, skip=None):
2420 def __init__(self, map, skip=None):
2421 self._dirs = {}
2421 self._dirs = {}
2422 addpath = self.addpath
2422 addpath = self.addpath
2423 if safehasattr(map, 'iteritems') and skip is not None:
2423 if safehasattr(map, 'iteritems') and skip is not None:
2424 for f, s in map.iteritems():
2424 for f, s in map.iteritems():
2425 if s[0] != skip:
2425 if s[0] != skip:
2426 addpath(f)
2426 addpath(f)
2427 else:
2427 else:
2428 for f in map:
2428 for f in map:
2429 addpath(f)
2429 addpath(f)
2430
2430
2431 def addpath(self, path):
2431 def addpath(self, path):
2432 dirs = self._dirs
2432 dirs = self._dirs
2433 for base in finddirs(path):
2433 for base in finddirs(path):
2434 if base in dirs:
2434 if base in dirs:
2435 dirs[base] += 1
2435 dirs[base] += 1
2436 return
2436 return
2437 dirs[base] = 1
2437 dirs[base] = 1
2438
2438
2439 def delpath(self, path):
2439 def delpath(self, path):
2440 dirs = self._dirs
2440 dirs = self._dirs
2441 for base in finddirs(path):
2441 for base in finddirs(path):
2442 if dirs[base] > 1:
2442 if dirs[base] > 1:
2443 dirs[base] -= 1
2443 dirs[base] -= 1
2444 return
2444 return
2445 del dirs[base]
2445 del dirs[base]
2446
2446
2447 def __iter__(self):
2447 def __iter__(self):
2448 return self._dirs.iterkeys()
2448 return self._dirs.iterkeys()
2449
2449
2450 def __contains__(self, d):
2450 def __contains__(self, d):
2451 return d in self._dirs
2451 return d in self._dirs
2452
2452
2453 if safehasattr(parsers, 'dirs'):
2453 if safehasattr(parsers, 'dirs'):
2454 dirs = parsers.dirs
2454 dirs = parsers.dirs
2455
2455
2456 def finddirs(path):
2456 def finddirs(path):
2457 pos = path.rfind('/')
2457 pos = path.rfind('/')
2458 while pos != -1:
2458 while pos != -1:
2459 yield path[:pos]
2459 yield path[:pos]
2460 pos = path.rfind('/', 0, pos)
2460 pos = path.rfind('/', 0, pos)
2461
2461
2462 # compression utility
2462 # compression utility
2463
2463
2464 class nocompress(object):
2464 class nocompress(object):
2465 def compress(self, x):
2465 def compress(self, x):
2466 return x
2466 return x
2467 def flush(self):
2467 def flush(self):
2468 return ""
2468 return ""
2469
2469
2470 compressors = {
2470 compressors = {
2471 None: nocompress,
2471 None: nocompress,
2472 # lambda to prevent early import
2472 # lambda to prevent early import
2473 'BZ': lambda: bz2.BZ2Compressor(),
2473 'BZ': lambda: bz2.BZ2Compressor(),
2474 'GZ': lambda: zlib.compressobj(),
2474 'GZ': lambda: zlib.compressobj(),
2475 }
2475 }
2476 # also support the old form by courtesies
2476 # also support the old form by courtesies
2477 compressors['UN'] = compressors[None]
2477 compressors['UN'] = compressors[None]
2478
2478
2479 def _makedecompressor(decompcls):
2479 def _makedecompressor(decompcls):
2480 def generator(f):
2480 def generator(f):
2481 d = decompcls()
2481 d = decompcls()
2482 for chunk in filechunkiter(f):
2482 for chunk in filechunkiter(f):
2483 yield d.decompress(chunk)
2483 yield d.decompress(chunk)
2484 def func(fh):
2484 def func(fh):
2485 return chunkbuffer(generator(fh))
2485 return chunkbuffer(generator(fh))
2486 return func
2486 return func
2487
2487
2488 def _bz2():
2488 def _bz2():
2489 d = bz2.BZ2Decompressor()
2489 d = bz2.BZ2Decompressor()
2490 # Bzip2 stream start with BZ, but we stripped it.
2490 # Bzip2 stream start with BZ, but we stripped it.
2491 # we put it back for good measure.
2491 # we put it back for good measure.
2492 d.decompress('BZ')
2492 d.decompress('BZ')
2493 return d
2493 return d
2494
2494
2495 decompressors = {None: lambda fh: fh,
2495 decompressors = {None: lambda fh: fh,
2496 '_truncatedBZ': _makedecompressor(_bz2),
2496 '_truncatedBZ': _makedecompressor(_bz2),
2497 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2497 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2498 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2498 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2499 }
2499 }
2500 # also support the old form by courtesies
2500 # also support the old form by courtesies
2501 decompressors['UN'] = decompressors[None]
2501 decompressors['UN'] = decompressors[None]
2502
2502
2503 # convenient shortcut
2503 # convenient shortcut
2504 dst = debugstacktrace
2504 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now