##// END OF EJS Templates
util: make hashlib import unconditional...
Gregory Szorc -
r27357:7f5a0bd4 default
parent child Browse files
Show More
@@ -1,2489 +1,2484
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 import i18n
16 import i18n
17 _ = i18n._
17 _ = i18n._
18 import error, osutil, encoding, parsers
18 import error, osutil, encoding, parsers
19 import errno, shutil, sys, tempfile, traceback
19 import errno, shutil, sys, tempfile, traceback
20 import re as remod
20 import re as remod
21 import os, time, datetime, calendar, textwrap, signal, collections
21 import os, time, datetime, calendar, textwrap, signal, collections
22 import imp, socket, urllib
22 import imp, socket, urllib
23 import gc
23 import gc
24 import bz2
24 import bz2
25 import zlib
25 import zlib
26 import hashlib
26
27
27 if os.name == 'nt':
28 if os.name == 'nt':
28 import windows as platform
29 import windows as platform
29 else:
30 else:
30 import posix as platform
31 import posix as platform
31
32
33 md5 = hashlib.md5
34 sha1 = hashlib.sha1
35 sha512 = hashlib.sha512
36
32 cachestat = platform.cachestat
37 cachestat = platform.cachestat
33 checkexec = platform.checkexec
38 checkexec = platform.checkexec
34 checklink = platform.checklink
39 checklink = platform.checklink
35 copymode = platform.copymode
40 copymode = platform.copymode
36 executablepath = platform.executablepath
41 executablepath = platform.executablepath
37 expandglobs = platform.expandglobs
42 expandglobs = platform.expandglobs
38 explainexit = platform.explainexit
43 explainexit = platform.explainexit
39 findexe = platform.findexe
44 findexe = platform.findexe
40 gethgcmd = platform.gethgcmd
45 gethgcmd = platform.gethgcmd
41 getuser = platform.getuser
46 getuser = platform.getuser
42 groupmembers = platform.groupmembers
47 groupmembers = platform.groupmembers
43 groupname = platform.groupname
48 groupname = platform.groupname
44 hidewindow = platform.hidewindow
49 hidewindow = platform.hidewindow
45 isexec = platform.isexec
50 isexec = platform.isexec
46 isowner = platform.isowner
51 isowner = platform.isowner
47 localpath = platform.localpath
52 localpath = platform.localpath
48 lookupreg = platform.lookupreg
53 lookupreg = platform.lookupreg
49 makedir = platform.makedir
54 makedir = platform.makedir
50 nlinks = platform.nlinks
55 nlinks = platform.nlinks
51 normpath = platform.normpath
56 normpath = platform.normpath
52 normcase = platform.normcase
57 normcase = platform.normcase
53 normcasespec = platform.normcasespec
58 normcasespec = platform.normcasespec
54 normcasefallback = platform.normcasefallback
59 normcasefallback = platform.normcasefallback
55 openhardlinks = platform.openhardlinks
60 openhardlinks = platform.openhardlinks
56 oslink = platform.oslink
61 oslink = platform.oslink
57 parsepatchoutput = platform.parsepatchoutput
62 parsepatchoutput = platform.parsepatchoutput
58 pconvert = platform.pconvert
63 pconvert = platform.pconvert
59 poll = platform.poll
64 poll = platform.poll
60 popen = platform.popen
65 popen = platform.popen
61 posixfile = platform.posixfile
66 posixfile = platform.posixfile
62 quotecommand = platform.quotecommand
67 quotecommand = platform.quotecommand
63 readpipe = platform.readpipe
68 readpipe = platform.readpipe
64 rename = platform.rename
69 rename = platform.rename
65 removedirs = platform.removedirs
70 removedirs = platform.removedirs
66 samedevice = platform.samedevice
71 samedevice = platform.samedevice
67 samefile = platform.samefile
72 samefile = platform.samefile
68 samestat = platform.samestat
73 samestat = platform.samestat
69 setbinary = platform.setbinary
74 setbinary = platform.setbinary
70 setflags = platform.setflags
75 setflags = platform.setflags
71 setsignalhandler = platform.setsignalhandler
76 setsignalhandler = platform.setsignalhandler
72 shellquote = platform.shellquote
77 shellquote = platform.shellquote
73 spawndetached = platform.spawndetached
78 spawndetached = platform.spawndetached
74 split = platform.split
79 split = platform.split
75 sshargs = platform.sshargs
80 sshargs = platform.sshargs
76 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
81 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
77 statisexec = platform.statisexec
82 statisexec = platform.statisexec
78 statislink = platform.statislink
83 statislink = platform.statislink
79 termwidth = platform.termwidth
84 termwidth = platform.termwidth
80 testpid = platform.testpid
85 testpid = platform.testpid
81 umask = platform.umask
86 umask = platform.umask
82 unlink = platform.unlink
87 unlink = platform.unlink
83 unlinkpath = platform.unlinkpath
88 unlinkpath = platform.unlinkpath
84 username = platform.username
89 username = platform.username
85
90
86 # Python compatibility
91 # Python compatibility
87
92
88 _notset = object()
93 _notset = object()
89
94
90 # disable Python's problematic floating point timestamps (issue4836)
95 # disable Python's problematic floating point timestamps (issue4836)
91 # (Python hypocritically says you shouldn't change this behavior in
96 # (Python hypocritically says you shouldn't change this behavior in
92 # libraries, and sure enough Mercurial is not a library.)
97 # libraries, and sure enough Mercurial is not a library.)
93 os.stat_float_times(False)
98 os.stat_float_times(False)
94
99
95 def safehasattr(thing, attr):
100 def safehasattr(thing, attr):
96 return getattr(thing, attr, _notset) is not _notset
101 return getattr(thing, attr, _notset) is not _notset
97
102
98 from hashlib import md5, sha1
99
100 DIGESTS = {
103 DIGESTS = {
101 'md5': md5,
104 'md5': md5,
102 'sha1': sha1,
105 'sha1': sha1,
106 'sha512': sha512,
103 }
107 }
104 # List of digest types from strongest to weakest
108 # List of digest types from strongest to weakest
105 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
109 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
106
107 try:
108 import hashlib
109 DIGESTS.update({
110 'sha512': hashlib.sha512,
111 })
112 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
113 except ImportError:
114 pass
115
110
116 for k in DIGESTS_BY_STRENGTH:
111 for k in DIGESTS_BY_STRENGTH:
117 assert k in DIGESTS
112 assert k in DIGESTS
118
113
119 class digester(object):
114 class digester(object):
120 """helper to compute digests.
115 """helper to compute digests.
121
116
122 This helper can be used to compute one or more digests given their name.
117 This helper can be used to compute one or more digests given their name.
123
118
124 >>> d = digester(['md5', 'sha1'])
119 >>> d = digester(['md5', 'sha1'])
125 >>> d.update('foo')
120 >>> d.update('foo')
126 >>> [k for k in sorted(d)]
121 >>> [k for k in sorted(d)]
127 ['md5', 'sha1']
122 ['md5', 'sha1']
128 >>> d['md5']
123 >>> d['md5']
129 'acbd18db4cc2f85cedef654fccc4a4d8'
124 'acbd18db4cc2f85cedef654fccc4a4d8'
130 >>> d['sha1']
125 >>> d['sha1']
131 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
126 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
132 >>> digester.preferred(['md5', 'sha1'])
127 >>> digester.preferred(['md5', 'sha1'])
133 'sha1'
128 'sha1'
134 """
129 """
135
130
136 def __init__(self, digests, s=''):
131 def __init__(self, digests, s=''):
137 self._hashes = {}
132 self._hashes = {}
138 for k in digests:
133 for k in digests:
139 if k not in DIGESTS:
134 if k not in DIGESTS:
140 raise Abort(_('unknown digest type: %s') % k)
135 raise Abort(_('unknown digest type: %s') % k)
141 self._hashes[k] = DIGESTS[k]()
136 self._hashes[k] = DIGESTS[k]()
142 if s:
137 if s:
143 self.update(s)
138 self.update(s)
144
139
145 def update(self, data):
140 def update(self, data):
146 for h in self._hashes.values():
141 for h in self._hashes.values():
147 h.update(data)
142 h.update(data)
148
143
149 def __getitem__(self, key):
144 def __getitem__(self, key):
150 if key not in DIGESTS:
145 if key not in DIGESTS:
151 raise Abort(_('unknown digest type: %s') % k)
146 raise Abort(_('unknown digest type: %s') % k)
152 return self._hashes[key].hexdigest()
147 return self._hashes[key].hexdigest()
153
148
154 def __iter__(self):
149 def __iter__(self):
155 return iter(self._hashes)
150 return iter(self._hashes)
156
151
157 @staticmethod
152 @staticmethod
158 def preferred(supported):
153 def preferred(supported):
159 """returns the strongest digest type in both supported and DIGESTS."""
154 """returns the strongest digest type in both supported and DIGESTS."""
160
155
161 for k in DIGESTS_BY_STRENGTH:
156 for k in DIGESTS_BY_STRENGTH:
162 if k in supported:
157 if k in supported:
163 return k
158 return k
164 return None
159 return None
165
160
166 class digestchecker(object):
161 class digestchecker(object):
167 """file handle wrapper that additionally checks content against a given
162 """file handle wrapper that additionally checks content against a given
168 size and digests.
163 size and digests.
169
164
170 d = digestchecker(fh, size, {'md5': '...'})
165 d = digestchecker(fh, size, {'md5': '...'})
171
166
172 When multiple digests are given, all of them are validated.
167 When multiple digests are given, all of them are validated.
173 """
168 """
174
169
175 def __init__(self, fh, size, digests):
170 def __init__(self, fh, size, digests):
176 self._fh = fh
171 self._fh = fh
177 self._size = size
172 self._size = size
178 self._got = 0
173 self._got = 0
179 self._digests = dict(digests)
174 self._digests = dict(digests)
180 self._digester = digester(self._digests.keys())
175 self._digester = digester(self._digests.keys())
181
176
182 def read(self, length=-1):
177 def read(self, length=-1):
183 content = self._fh.read(length)
178 content = self._fh.read(length)
184 self._digester.update(content)
179 self._digester.update(content)
185 self._got += len(content)
180 self._got += len(content)
186 return content
181 return content
187
182
188 def validate(self):
183 def validate(self):
189 if self._size != self._got:
184 if self._size != self._got:
190 raise Abort(_('size mismatch: expected %d, got %d') %
185 raise Abort(_('size mismatch: expected %d, got %d') %
191 (self._size, self._got))
186 (self._size, self._got))
192 for k, v in self._digests.items():
187 for k, v in self._digests.items():
193 if v != self._digester[k]:
188 if v != self._digester[k]:
194 # i18n: first parameter is a digest name
189 # i18n: first parameter is a digest name
195 raise Abort(_('%s mismatch: expected %s, got %s') %
190 raise Abort(_('%s mismatch: expected %s, got %s') %
196 (k, v, self._digester[k]))
191 (k, v, self._digester[k]))
197
192
198 try:
193 try:
199 buffer = buffer
194 buffer = buffer
200 except NameError:
195 except NameError:
201 if sys.version_info[0] < 3:
196 if sys.version_info[0] < 3:
202 def buffer(sliceable, offset=0):
197 def buffer(sliceable, offset=0):
203 return sliceable[offset:]
198 return sliceable[offset:]
204 else:
199 else:
205 def buffer(sliceable, offset=0):
200 def buffer(sliceable, offset=0):
206 return memoryview(sliceable)[offset:]
201 return memoryview(sliceable)[offset:]
207
202
208 import subprocess
203 import subprocess
209 closefds = os.name == 'posix'
204 closefds = os.name == 'posix'
210
205
211 _chunksize = 4096
206 _chunksize = 4096
212
207
213 class bufferedinputpipe(object):
208 class bufferedinputpipe(object):
214 """a manually buffered input pipe
209 """a manually buffered input pipe
215
210
216 Python will not let us use buffered IO and lazy reading with 'polling' at
211 Python will not let us use buffered IO and lazy reading with 'polling' at
217 the same time. We cannot probe the buffer state and select will not detect
212 the same time. We cannot probe the buffer state and select will not detect
218 that data are ready to read if they are already buffered.
213 that data are ready to read if they are already buffered.
219
214
220 This class let us work around that by implementing its own buffering
215 This class let us work around that by implementing its own buffering
221 (allowing efficient readline) while offering a way to know if the buffer is
216 (allowing efficient readline) while offering a way to know if the buffer is
222 empty from the output (allowing collaboration of the buffer with polling).
217 empty from the output (allowing collaboration of the buffer with polling).
223
218
224 This class lives in the 'util' module because it makes use of the 'os'
219 This class lives in the 'util' module because it makes use of the 'os'
225 module from the python stdlib.
220 module from the python stdlib.
226 """
221 """
227
222
228 def __init__(self, input):
223 def __init__(self, input):
229 self._input = input
224 self._input = input
230 self._buffer = []
225 self._buffer = []
231 self._eof = False
226 self._eof = False
232 self._lenbuf = 0
227 self._lenbuf = 0
233
228
234 @property
229 @property
235 def hasbuffer(self):
230 def hasbuffer(self):
236 """True is any data is currently buffered
231 """True is any data is currently buffered
237
232
238 This will be used externally a pre-step for polling IO. If there is
233 This will be used externally a pre-step for polling IO. If there is
239 already data then no polling should be set in place."""
234 already data then no polling should be set in place."""
240 return bool(self._buffer)
235 return bool(self._buffer)
241
236
242 @property
237 @property
243 def closed(self):
238 def closed(self):
244 return self._input.closed
239 return self._input.closed
245
240
246 def fileno(self):
241 def fileno(self):
247 return self._input.fileno()
242 return self._input.fileno()
248
243
249 def close(self):
244 def close(self):
250 return self._input.close()
245 return self._input.close()
251
246
252 def read(self, size):
247 def read(self, size):
253 while (not self._eof) and (self._lenbuf < size):
248 while (not self._eof) and (self._lenbuf < size):
254 self._fillbuffer()
249 self._fillbuffer()
255 return self._frombuffer(size)
250 return self._frombuffer(size)
256
251
257 def readline(self, *args, **kwargs):
252 def readline(self, *args, **kwargs):
258 if 1 < len(self._buffer):
253 if 1 < len(self._buffer):
259 # this should not happen because both read and readline end with a
254 # this should not happen because both read and readline end with a
260 # _frombuffer call that collapse it.
255 # _frombuffer call that collapse it.
261 self._buffer = [''.join(self._buffer)]
256 self._buffer = [''.join(self._buffer)]
262 self._lenbuf = len(self._buffer[0])
257 self._lenbuf = len(self._buffer[0])
263 lfi = -1
258 lfi = -1
264 if self._buffer:
259 if self._buffer:
265 lfi = self._buffer[-1].find('\n')
260 lfi = self._buffer[-1].find('\n')
266 while (not self._eof) and lfi < 0:
261 while (not self._eof) and lfi < 0:
267 self._fillbuffer()
262 self._fillbuffer()
268 if self._buffer:
263 if self._buffer:
269 lfi = self._buffer[-1].find('\n')
264 lfi = self._buffer[-1].find('\n')
270 size = lfi + 1
265 size = lfi + 1
271 if lfi < 0: # end of file
266 if lfi < 0: # end of file
272 size = self._lenbuf
267 size = self._lenbuf
273 elif 1 < len(self._buffer):
268 elif 1 < len(self._buffer):
274 # we need to take previous chunks into account
269 # we need to take previous chunks into account
275 size += self._lenbuf - len(self._buffer[-1])
270 size += self._lenbuf - len(self._buffer[-1])
276 return self._frombuffer(size)
271 return self._frombuffer(size)
277
272
278 def _frombuffer(self, size):
273 def _frombuffer(self, size):
279 """return at most 'size' data from the buffer
274 """return at most 'size' data from the buffer
280
275
281 The data are removed from the buffer."""
276 The data are removed from the buffer."""
282 if size == 0 or not self._buffer:
277 if size == 0 or not self._buffer:
283 return ''
278 return ''
284 buf = self._buffer[0]
279 buf = self._buffer[0]
285 if 1 < len(self._buffer):
280 if 1 < len(self._buffer):
286 buf = ''.join(self._buffer)
281 buf = ''.join(self._buffer)
287
282
288 data = buf[:size]
283 data = buf[:size]
289 buf = buf[len(data):]
284 buf = buf[len(data):]
290 if buf:
285 if buf:
291 self._buffer = [buf]
286 self._buffer = [buf]
292 self._lenbuf = len(buf)
287 self._lenbuf = len(buf)
293 else:
288 else:
294 self._buffer = []
289 self._buffer = []
295 self._lenbuf = 0
290 self._lenbuf = 0
296 return data
291 return data
297
292
298 def _fillbuffer(self):
293 def _fillbuffer(self):
299 """read data to the buffer"""
294 """read data to the buffer"""
300 data = os.read(self._input.fileno(), _chunksize)
295 data = os.read(self._input.fileno(), _chunksize)
301 if not data:
296 if not data:
302 self._eof = True
297 self._eof = True
303 else:
298 else:
304 self._lenbuf += len(data)
299 self._lenbuf += len(data)
305 self._buffer.append(data)
300 self._buffer.append(data)
306
301
307 def popen2(cmd, env=None, newlines=False):
302 def popen2(cmd, env=None, newlines=False):
308 # Setting bufsize to -1 lets the system decide the buffer size.
303 # Setting bufsize to -1 lets the system decide the buffer size.
309 # The default for bufsize is 0, meaning unbuffered. This leads to
304 # The default for bufsize is 0, meaning unbuffered. This leads to
310 # poor performance on Mac OS X: http://bugs.python.org/issue4194
305 # poor performance on Mac OS X: http://bugs.python.org/issue4194
311 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
306 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
312 close_fds=closefds,
307 close_fds=closefds,
313 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
308 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
314 universal_newlines=newlines,
309 universal_newlines=newlines,
315 env=env)
310 env=env)
316 return p.stdin, p.stdout
311 return p.stdin, p.stdout
317
312
318 def popen3(cmd, env=None, newlines=False):
313 def popen3(cmd, env=None, newlines=False):
319 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
314 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
320 return stdin, stdout, stderr
315 return stdin, stdout, stderr
321
316
322 def popen4(cmd, env=None, newlines=False, bufsize=-1):
317 def popen4(cmd, env=None, newlines=False, bufsize=-1):
323 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
318 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
324 close_fds=closefds,
319 close_fds=closefds,
325 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
320 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
326 stderr=subprocess.PIPE,
321 stderr=subprocess.PIPE,
327 universal_newlines=newlines,
322 universal_newlines=newlines,
328 env=env)
323 env=env)
329 return p.stdin, p.stdout, p.stderr, p
324 return p.stdin, p.stdout, p.stderr, p
330
325
331 def version():
326 def version():
332 """Return version information if available."""
327 """Return version information if available."""
333 try:
328 try:
334 import __version__
329 import __version__
335 return __version__.version
330 return __version__.version
336 except ImportError:
331 except ImportError:
337 return 'unknown'
332 return 'unknown'
338
333
339 def versiontuple(v=None, n=4):
334 def versiontuple(v=None, n=4):
340 """Parses a Mercurial version string into an N-tuple.
335 """Parses a Mercurial version string into an N-tuple.
341
336
342 The version string to be parsed is specified with the ``v`` argument.
337 The version string to be parsed is specified with the ``v`` argument.
343 If it isn't defined, the current Mercurial version string will be parsed.
338 If it isn't defined, the current Mercurial version string will be parsed.
344
339
345 ``n`` can be 2, 3, or 4. Here is how some version strings map to
340 ``n`` can be 2, 3, or 4. Here is how some version strings map to
346 returned values:
341 returned values:
347
342
348 >>> v = '3.6.1+190-df9b73d2d444'
343 >>> v = '3.6.1+190-df9b73d2d444'
349 >>> versiontuple(v, 2)
344 >>> versiontuple(v, 2)
350 (3, 6)
345 (3, 6)
351 >>> versiontuple(v, 3)
346 >>> versiontuple(v, 3)
352 (3, 6, 1)
347 (3, 6, 1)
353 >>> versiontuple(v, 4)
348 >>> versiontuple(v, 4)
354 (3, 6, 1, '190-df9b73d2d444')
349 (3, 6, 1, '190-df9b73d2d444')
355
350
356 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
351 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
357 (3, 6, 1, '190-df9b73d2d444+20151118')
352 (3, 6, 1, '190-df9b73d2d444+20151118')
358
353
359 >>> v = '3.6'
354 >>> v = '3.6'
360 >>> versiontuple(v, 2)
355 >>> versiontuple(v, 2)
361 (3, 6)
356 (3, 6)
362 >>> versiontuple(v, 3)
357 >>> versiontuple(v, 3)
363 (3, 6, None)
358 (3, 6, None)
364 >>> versiontuple(v, 4)
359 >>> versiontuple(v, 4)
365 (3, 6, None, None)
360 (3, 6, None, None)
366 """
361 """
367 if not v:
362 if not v:
368 v = version()
363 v = version()
369 parts = v.split('+', 1)
364 parts = v.split('+', 1)
370 if len(parts) == 1:
365 if len(parts) == 1:
371 vparts, extra = parts[0], None
366 vparts, extra = parts[0], None
372 else:
367 else:
373 vparts, extra = parts
368 vparts, extra = parts
374
369
375 vints = []
370 vints = []
376 for i in vparts.split('.'):
371 for i in vparts.split('.'):
377 try:
372 try:
378 vints.append(int(i))
373 vints.append(int(i))
379 except ValueError:
374 except ValueError:
380 break
375 break
381 # (3, 6) -> (3, 6, None)
376 # (3, 6) -> (3, 6, None)
382 while len(vints) < 3:
377 while len(vints) < 3:
383 vints.append(None)
378 vints.append(None)
384
379
385 if n == 2:
380 if n == 2:
386 return (vints[0], vints[1])
381 return (vints[0], vints[1])
387 if n == 3:
382 if n == 3:
388 return (vints[0], vints[1], vints[2])
383 return (vints[0], vints[1], vints[2])
389 if n == 4:
384 if n == 4:
390 return (vints[0], vints[1], vints[2], extra)
385 return (vints[0], vints[1], vints[2], extra)
391
386
392 # used by parsedate
387 # used by parsedate
393 defaultdateformats = (
388 defaultdateformats = (
394 '%Y-%m-%d %H:%M:%S',
389 '%Y-%m-%d %H:%M:%S',
395 '%Y-%m-%d %I:%M:%S%p',
390 '%Y-%m-%d %I:%M:%S%p',
396 '%Y-%m-%d %H:%M',
391 '%Y-%m-%d %H:%M',
397 '%Y-%m-%d %I:%M%p',
392 '%Y-%m-%d %I:%M%p',
398 '%Y-%m-%d',
393 '%Y-%m-%d',
399 '%m-%d',
394 '%m-%d',
400 '%m/%d',
395 '%m/%d',
401 '%m/%d/%y',
396 '%m/%d/%y',
402 '%m/%d/%Y',
397 '%m/%d/%Y',
403 '%a %b %d %H:%M:%S %Y',
398 '%a %b %d %H:%M:%S %Y',
404 '%a %b %d %I:%M:%S%p %Y',
399 '%a %b %d %I:%M:%S%p %Y',
405 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
400 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
406 '%b %d %H:%M:%S %Y',
401 '%b %d %H:%M:%S %Y',
407 '%b %d %I:%M:%S%p %Y',
402 '%b %d %I:%M:%S%p %Y',
408 '%b %d %H:%M:%S',
403 '%b %d %H:%M:%S',
409 '%b %d %I:%M:%S%p',
404 '%b %d %I:%M:%S%p',
410 '%b %d %H:%M',
405 '%b %d %H:%M',
411 '%b %d %I:%M%p',
406 '%b %d %I:%M%p',
412 '%b %d %Y',
407 '%b %d %Y',
413 '%b %d',
408 '%b %d',
414 '%H:%M:%S',
409 '%H:%M:%S',
415 '%I:%M:%S%p',
410 '%I:%M:%S%p',
416 '%H:%M',
411 '%H:%M',
417 '%I:%M%p',
412 '%I:%M%p',
418 )
413 )
419
414
420 extendeddateformats = defaultdateformats + (
415 extendeddateformats = defaultdateformats + (
421 "%Y",
416 "%Y",
422 "%Y-%m",
417 "%Y-%m",
423 "%b",
418 "%b",
424 "%b %Y",
419 "%b %Y",
425 )
420 )
426
421
427 def cachefunc(func):
422 def cachefunc(func):
428 '''cache the result of function calls'''
423 '''cache the result of function calls'''
429 # XXX doesn't handle keywords args
424 # XXX doesn't handle keywords args
430 if func.func_code.co_argcount == 0:
425 if func.func_code.co_argcount == 0:
431 cache = []
426 cache = []
432 def f():
427 def f():
433 if len(cache) == 0:
428 if len(cache) == 0:
434 cache.append(func())
429 cache.append(func())
435 return cache[0]
430 return cache[0]
436 return f
431 return f
437 cache = {}
432 cache = {}
438 if func.func_code.co_argcount == 1:
433 if func.func_code.co_argcount == 1:
439 # we gain a small amount of time because
434 # we gain a small amount of time because
440 # we don't need to pack/unpack the list
435 # we don't need to pack/unpack the list
441 def f(arg):
436 def f(arg):
442 if arg not in cache:
437 if arg not in cache:
443 cache[arg] = func(arg)
438 cache[arg] = func(arg)
444 return cache[arg]
439 return cache[arg]
445 else:
440 else:
446 def f(*args):
441 def f(*args):
447 if args not in cache:
442 if args not in cache:
448 cache[args] = func(*args)
443 cache[args] = func(*args)
449 return cache[args]
444 return cache[args]
450
445
451 return f
446 return f
452
447
453 class sortdict(dict):
448 class sortdict(dict):
454 '''a simple sorted dictionary'''
449 '''a simple sorted dictionary'''
455 def __init__(self, data=None):
450 def __init__(self, data=None):
456 self._list = []
451 self._list = []
457 if data:
452 if data:
458 self.update(data)
453 self.update(data)
459 def copy(self):
454 def copy(self):
460 return sortdict(self)
455 return sortdict(self)
461 def __setitem__(self, key, val):
456 def __setitem__(self, key, val):
462 if key in self:
457 if key in self:
463 self._list.remove(key)
458 self._list.remove(key)
464 self._list.append(key)
459 self._list.append(key)
465 dict.__setitem__(self, key, val)
460 dict.__setitem__(self, key, val)
466 def __iter__(self):
461 def __iter__(self):
467 return self._list.__iter__()
462 return self._list.__iter__()
468 def update(self, src):
463 def update(self, src):
469 if isinstance(src, dict):
464 if isinstance(src, dict):
470 src = src.iteritems()
465 src = src.iteritems()
471 for k, v in src:
466 for k, v in src:
472 self[k] = v
467 self[k] = v
473 def clear(self):
468 def clear(self):
474 dict.clear(self)
469 dict.clear(self)
475 self._list = []
470 self._list = []
476 def items(self):
471 def items(self):
477 return [(k, self[k]) for k in self._list]
472 return [(k, self[k]) for k in self._list]
478 def __delitem__(self, key):
473 def __delitem__(self, key):
479 dict.__delitem__(self, key)
474 dict.__delitem__(self, key)
480 self._list.remove(key)
475 self._list.remove(key)
481 def pop(self, key, *args, **kwargs):
476 def pop(self, key, *args, **kwargs):
482 dict.pop(self, key, *args, **kwargs)
477 dict.pop(self, key, *args, **kwargs)
483 try:
478 try:
484 self._list.remove(key)
479 self._list.remove(key)
485 except ValueError:
480 except ValueError:
486 pass
481 pass
487 def keys(self):
482 def keys(self):
488 return self._list
483 return self._list
489 def iterkeys(self):
484 def iterkeys(self):
490 return self._list.__iter__()
485 return self._list.__iter__()
491 def iteritems(self):
486 def iteritems(self):
492 for k in self._list:
487 for k in self._list:
493 yield k, self[k]
488 yield k, self[k]
494 def insert(self, index, key, val):
489 def insert(self, index, key, val):
495 self._list.insert(index, key)
490 self._list.insert(index, key)
496 dict.__setitem__(self, key, val)
491 dict.__setitem__(self, key, val)
497
492
498 class lrucachedict(object):
493 class lrucachedict(object):
499 '''cache most recent gets from or sets to this dictionary'''
494 '''cache most recent gets from or sets to this dictionary'''
500 def __init__(self, maxsize):
495 def __init__(self, maxsize):
501 self._cache = {}
496 self._cache = {}
502 self._maxsize = maxsize
497 self._maxsize = maxsize
503 self._order = collections.deque()
498 self._order = collections.deque()
504
499
505 def __getitem__(self, key):
500 def __getitem__(self, key):
506 value = self._cache[key]
501 value = self._cache[key]
507 self._order.remove(key)
502 self._order.remove(key)
508 self._order.append(key)
503 self._order.append(key)
509 return value
504 return value
510
505
511 def __setitem__(self, key, value):
506 def __setitem__(self, key, value):
512 if key not in self._cache:
507 if key not in self._cache:
513 if len(self._cache) >= self._maxsize:
508 if len(self._cache) >= self._maxsize:
514 del self._cache[self._order.popleft()]
509 del self._cache[self._order.popleft()]
515 else:
510 else:
516 self._order.remove(key)
511 self._order.remove(key)
517 self._cache[key] = value
512 self._cache[key] = value
518 self._order.append(key)
513 self._order.append(key)
519
514
520 def __contains__(self, key):
515 def __contains__(self, key):
521 return key in self._cache
516 return key in self._cache
522
517
523 def clear(self):
518 def clear(self):
524 self._cache.clear()
519 self._cache.clear()
525 self._order = collections.deque()
520 self._order = collections.deque()
526
521
527 def lrucachefunc(func):
522 def lrucachefunc(func):
528 '''cache most recent results of function calls'''
523 '''cache most recent results of function calls'''
529 cache = {}
524 cache = {}
530 order = collections.deque()
525 order = collections.deque()
531 if func.func_code.co_argcount == 1:
526 if func.func_code.co_argcount == 1:
532 def f(arg):
527 def f(arg):
533 if arg not in cache:
528 if arg not in cache:
534 if len(cache) > 20:
529 if len(cache) > 20:
535 del cache[order.popleft()]
530 del cache[order.popleft()]
536 cache[arg] = func(arg)
531 cache[arg] = func(arg)
537 else:
532 else:
538 order.remove(arg)
533 order.remove(arg)
539 order.append(arg)
534 order.append(arg)
540 return cache[arg]
535 return cache[arg]
541 else:
536 else:
542 def f(*args):
537 def f(*args):
543 if args not in cache:
538 if args not in cache:
544 if len(cache) > 20:
539 if len(cache) > 20:
545 del cache[order.popleft()]
540 del cache[order.popleft()]
546 cache[args] = func(*args)
541 cache[args] = func(*args)
547 else:
542 else:
548 order.remove(args)
543 order.remove(args)
549 order.append(args)
544 order.append(args)
550 return cache[args]
545 return cache[args]
551
546
552 return f
547 return f
553
548
554 class propertycache(object):
549 class propertycache(object):
555 def __init__(self, func):
550 def __init__(self, func):
556 self.func = func
551 self.func = func
557 self.name = func.__name__
552 self.name = func.__name__
558 def __get__(self, obj, type=None):
553 def __get__(self, obj, type=None):
559 result = self.func(obj)
554 result = self.func(obj)
560 self.cachevalue(obj, result)
555 self.cachevalue(obj, result)
561 return result
556 return result
562
557
563 def cachevalue(self, obj, value):
558 def cachevalue(self, obj, value):
564 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
559 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
565 obj.__dict__[self.name] = value
560 obj.__dict__[self.name] = value
566
561
567 def pipefilter(s, cmd):
562 def pipefilter(s, cmd):
568 '''filter string S through command CMD, returning its output'''
563 '''filter string S through command CMD, returning its output'''
569 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
564 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
570 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
565 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
571 pout, perr = p.communicate(s)
566 pout, perr = p.communicate(s)
572 return pout
567 return pout
573
568
574 def tempfilter(s, cmd):
569 def tempfilter(s, cmd):
575 '''filter string S through a pair of temporary files with CMD.
570 '''filter string S through a pair of temporary files with CMD.
576 CMD is used as a template to create the real command to be run,
571 CMD is used as a template to create the real command to be run,
577 with the strings INFILE and OUTFILE replaced by the real names of
572 with the strings INFILE and OUTFILE replaced by the real names of
578 the temporary files generated.'''
573 the temporary files generated.'''
579 inname, outname = None, None
574 inname, outname = None, None
580 try:
575 try:
581 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
576 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
582 fp = os.fdopen(infd, 'wb')
577 fp = os.fdopen(infd, 'wb')
583 fp.write(s)
578 fp.write(s)
584 fp.close()
579 fp.close()
585 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
580 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
586 os.close(outfd)
581 os.close(outfd)
587 cmd = cmd.replace('INFILE', inname)
582 cmd = cmd.replace('INFILE', inname)
588 cmd = cmd.replace('OUTFILE', outname)
583 cmd = cmd.replace('OUTFILE', outname)
589 code = os.system(cmd)
584 code = os.system(cmd)
590 if sys.platform == 'OpenVMS' and code & 1:
585 if sys.platform == 'OpenVMS' and code & 1:
591 code = 0
586 code = 0
592 if code:
587 if code:
593 raise Abort(_("command '%s' failed: %s") %
588 raise Abort(_("command '%s' failed: %s") %
594 (cmd, explainexit(code)))
589 (cmd, explainexit(code)))
595 fp = open(outname, 'rb')
590 fp = open(outname, 'rb')
596 r = fp.read()
591 r = fp.read()
597 fp.close()
592 fp.close()
598 return r
593 return r
599 finally:
594 finally:
600 try:
595 try:
601 if inname:
596 if inname:
602 os.unlink(inname)
597 os.unlink(inname)
603 except OSError:
598 except OSError:
604 pass
599 pass
605 try:
600 try:
606 if outname:
601 if outname:
607 os.unlink(outname)
602 os.unlink(outname)
608 except OSError:
603 except OSError:
609 pass
604 pass
610
605
611 filtertable = {
606 filtertable = {
612 'tempfile:': tempfilter,
607 'tempfile:': tempfilter,
613 'pipe:': pipefilter,
608 'pipe:': pipefilter,
614 }
609 }
615
610
616 def filter(s, cmd):
611 def filter(s, cmd):
617 "filter a string through a command that transforms its input to its output"
612 "filter a string through a command that transforms its input to its output"
618 for name, fn in filtertable.iteritems():
613 for name, fn in filtertable.iteritems():
619 if cmd.startswith(name):
614 if cmd.startswith(name):
620 return fn(s, cmd[len(name):].lstrip())
615 return fn(s, cmd[len(name):].lstrip())
621 return pipefilter(s, cmd)
616 return pipefilter(s, cmd)
622
617
623 def binary(s):
618 def binary(s):
624 """return true if a string is binary data"""
619 """return true if a string is binary data"""
625 return bool(s and '\0' in s)
620 return bool(s and '\0' in s)
626
621
627 def increasingchunks(source, min=1024, max=65536):
622 def increasingchunks(source, min=1024, max=65536):
628 '''return no less than min bytes per chunk while data remains,
623 '''return no less than min bytes per chunk while data remains,
629 doubling min after each chunk until it reaches max'''
624 doubling min after each chunk until it reaches max'''
630 def log2(x):
625 def log2(x):
631 if not x:
626 if not x:
632 return 0
627 return 0
633 i = 0
628 i = 0
634 while x:
629 while x:
635 x >>= 1
630 x >>= 1
636 i += 1
631 i += 1
637 return i - 1
632 return i - 1
638
633
639 buf = []
634 buf = []
640 blen = 0
635 blen = 0
641 for chunk in source:
636 for chunk in source:
642 buf.append(chunk)
637 buf.append(chunk)
643 blen += len(chunk)
638 blen += len(chunk)
644 if blen >= min:
639 if blen >= min:
645 if min < max:
640 if min < max:
646 min = min << 1
641 min = min << 1
647 nmin = 1 << log2(blen)
642 nmin = 1 << log2(blen)
648 if nmin > min:
643 if nmin > min:
649 min = nmin
644 min = nmin
650 if min > max:
645 if min > max:
651 min = max
646 min = max
652 yield ''.join(buf)
647 yield ''.join(buf)
653 blen = 0
648 blen = 0
654 buf = []
649 buf = []
655 if buf:
650 if buf:
656 yield ''.join(buf)
651 yield ''.join(buf)
657
652
658 Abort = error.Abort
653 Abort = error.Abort
659
654
660 def always(fn):
655 def always(fn):
661 return True
656 return True
662
657
663 def never(fn):
658 def never(fn):
664 return False
659 return False
665
660
666 def nogc(func):
661 def nogc(func):
667 """disable garbage collector
662 """disable garbage collector
668
663
669 Python's garbage collector triggers a GC each time a certain number of
664 Python's garbage collector triggers a GC each time a certain number of
670 container objects (the number being defined by gc.get_threshold()) are
665 container objects (the number being defined by gc.get_threshold()) are
671 allocated even when marked not to be tracked by the collector. Tracking has
666 allocated even when marked not to be tracked by the collector. Tracking has
672 no effect on when GCs are triggered, only on what objects the GC looks
667 no effect on when GCs are triggered, only on what objects the GC looks
673 into. As a workaround, disable GC while building complex (huge)
668 into. As a workaround, disable GC while building complex (huge)
674 containers.
669 containers.
675
670
676 This garbage collector issue have been fixed in 2.7.
671 This garbage collector issue have been fixed in 2.7.
677 """
672 """
678 def wrapper(*args, **kwargs):
673 def wrapper(*args, **kwargs):
679 gcenabled = gc.isenabled()
674 gcenabled = gc.isenabled()
680 gc.disable()
675 gc.disable()
681 try:
676 try:
682 return func(*args, **kwargs)
677 return func(*args, **kwargs)
683 finally:
678 finally:
684 if gcenabled:
679 if gcenabled:
685 gc.enable()
680 gc.enable()
686 return wrapper
681 return wrapper
687
682
688 def pathto(root, n1, n2):
683 def pathto(root, n1, n2):
689 '''return the relative path from one place to another.
684 '''return the relative path from one place to another.
690 root should use os.sep to separate directories
685 root should use os.sep to separate directories
691 n1 should use os.sep to separate directories
686 n1 should use os.sep to separate directories
692 n2 should use "/" to separate directories
687 n2 should use "/" to separate directories
693 returns an os.sep-separated path.
688 returns an os.sep-separated path.
694
689
695 If n1 is a relative path, it's assumed it's
690 If n1 is a relative path, it's assumed it's
696 relative to root.
691 relative to root.
697 n2 should always be relative to root.
692 n2 should always be relative to root.
698 '''
693 '''
699 if not n1:
694 if not n1:
700 return localpath(n2)
695 return localpath(n2)
701 if os.path.isabs(n1):
696 if os.path.isabs(n1):
702 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
697 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
703 return os.path.join(root, localpath(n2))
698 return os.path.join(root, localpath(n2))
704 n2 = '/'.join((pconvert(root), n2))
699 n2 = '/'.join((pconvert(root), n2))
705 a, b = splitpath(n1), n2.split('/')
700 a, b = splitpath(n1), n2.split('/')
706 a.reverse()
701 a.reverse()
707 b.reverse()
702 b.reverse()
708 while a and b and a[-1] == b[-1]:
703 while a and b and a[-1] == b[-1]:
709 a.pop()
704 a.pop()
710 b.pop()
705 b.pop()
711 b.reverse()
706 b.reverse()
712 return os.sep.join((['..'] * len(a)) + b) or '.'
707 return os.sep.join((['..'] * len(a)) + b) or '.'
713
708
714 def mainfrozen():
709 def mainfrozen():
715 """return True if we are a frozen executable.
710 """return True if we are a frozen executable.
716
711
717 The code supports py2exe (most common, Windows only) and tools/freeze
712 The code supports py2exe (most common, Windows only) and tools/freeze
718 (portable, not much used).
713 (portable, not much used).
719 """
714 """
720 return (safehasattr(sys, "frozen") or # new py2exe
715 return (safehasattr(sys, "frozen") or # new py2exe
721 safehasattr(sys, "importers") or # old py2exe
716 safehasattr(sys, "importers") or # old py2exe
722 imp.is_frozen("__main__")) # tools/freeze
717 imp.is_frozen("__main__")) # tools/freeze
723
718
724 # the location of data files matching the source code
719 # the location of data files matching the source code
725 if mainfrozen():
720 if mainfrozen():
726 # executable version (py2exe) doesn't support __file__
721 # executable version (py2exe) doesn't support __file__
727 datapath = os.path.dirname(sys.executable)
722 datapath = os.path.dirname(sys.executable)
728 else:
723 else:
729 datapath = os.path.dirname(__file__)
724 datapath = os.path.dirname(__file__)
730
725
731 i18n.setdatapath(datapath)
726 i18n.setdatapath(datapath)
732
727
733 _hgexecutable = None
728 _hgexecutable = None
734
729
735 def hgexecutable():
730 def hgexecutable():
736 """return location of the 'hg' executable.
731 """return location of the 'hg' executable.
737
732
738 Defaults to $HG or 'hg' in the search path.
733 Defaults to $HG or 'hg' in the search path.
739 """
734 """
740 if _hgexecutable is None:
735 if _hgexecutable is None:
741 hg = os.environ.get('HG')
736 hg = os.environ.get('HG')
742 mainmod = sys.modules['__main__']
737 mainmod = sys.modules['__main__']
743 if hg:
738 if hg:
744 _sethgexecutable(hg)
739 _sethgexecutable(hg)
745 elif mainfrozen():
740 elif mainfrozen():
746 _sethgexecutable(sys.executable)
741 _sethgexecutable(sys.executable)
747 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
742 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
748 _sethgexecutable(mainmod.__file__)
743 _sethgexecutable(mainmod.__file__)
749 else:
744 else:
750 exe = findexe('hg') or os.path.basename(sys.argv[0])
745 exe = findexe('hg') or os.path.basename(sys.argv[0])
751 _sethgexecutable(exe)
746 _sethgexecutable(exe)
752 return _hgexecutable
747 return _hgexecutable
753
748
754 def _sethgexecutable(path):
749 def _sethgexecutable(path):
755 """set location of the 'hg' executable"""
750 """set location of the 'hg' executable"""
756 global _hgexecutable
751 global _hgexecutable
757 _hgexecutable = path
752 _hgexecutable = path
758
753
759 def _isstdout(f):
754 def _isstdout(f):
760 fileno = getattr(f, 'fileno', None)
755 fileno = getattr(f, 'fileno', None)
761 return fileno and fileno() == sys.__stdout__.fileno()
756 return fileno and fileno() == sys.__stdout__.fileno()
762
757
763 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
758 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
764 '''enhanced shell command execution.
759 '''enhanced shell command execution.
765 run with environment maybe modified, maybe in different dir.
760 run with environment maybe modified, maybe in different dir.
766
761
767 if command fails and onerr is None, return status, else raise onerr
762 if command fails and onerr is None, return status, else raise onerr
768 object as exception.
763 object as exception.
769
764
770 if out is specified, it is assumed to be a file-like object that has a
765 if out is specified, it is assumed to be a file-like object that has a
771 write() method. stdout and stderr will be redirected to out.'''
766 write() method. stdout and stderr will be redirected to out.'''
772 if environ is None:
767 if environ is None:
773 environ = {}
768 environ = {}
774 try:
769 try:
775 sys.stdout.flush()
770 sys.stdout.flush()
776 except Exception:
771 except Exception:
777 pass
772 pass
778 def py2shell(val):
773 def py2shell(val):
779 'convert python object into string that is useful to shell'
774 'convert python object into string that is useful to shell'
780 if val is None or val is False:
775 if val is None or val is False:
781 return '0'
776 return '0'
782 if val is True:
777 if val is True:
783 return '1'
778 return '1'
784 return str(val)
779 return str(val)
785 origcmd = cmd
780 origcmd = cmd
786 cmd = quotecommand(cmd)
781 cmd = quotecommand(cmd)
787 if sys.platform == 'plan9' and (sys.version_info[0] == 2
782 if sys.platform == 'plan9' and (sys.version_info[0] == 2
788 and sys.version_info[1] < 7):
783 and sys.version_info[1] < 7):
789 # subprocess kludge to work around issues in half-baked Python
784 # subprocess kludge to work around issues in half-baked Python
790 # ports, notably bichued/python:
785 # ports, notably bichued/python:
791 if not cwd is None:
786 if not cwd is None:
792 os.chdir(cwd)
787 os.chdir(cwd)
793 rc = os.system(cmd)
788 rc = os.system(cmd)
794 else:
789 else:
795 env = dict(os.environ)
790 env = dict(os.environ)
796 env.update((k, py2shell(v)) for k, v in environ.iteritems())
791 env.update((k, py2shell(v)) for k, v in environ.iteritems())
797 env['HG'] = hgexecutable()
792 env['HG'] = hgexecutable()
798 if out is None or _isstdout(out):
793 if out is None or _isstdout(out):
799 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
794 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
800 env=env, cwd=cwd)
795 env=env, cwd=cwd)
801 else:
796 else:
802 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
797 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
803 env=env, cwd=cwd, stdout=subprocess.PIPE,
798 env=env, cwd=cwd, stdout=subprocess.PIPE,
804 stderr=subprocess.STDOUT)
799 stderr=subprocess.STDOUT)
805 while True:
800 while True:
806 line = proc.stdout.readline()
801 line = proc.stdout.readline()
807 if not line:
802 if not line:
808 break
803 break
809 out.write(line)
804 out.write(line)
810 proc.wait()
805 proc.wait()
811 rc = proc.returncode
806 rc = proc.returncode
812 if sys.platform == 'OpenVMS' and rc & 1:
807 if sys.platform == 'OpenVMS' and rc & 1:
813 rc = 0
808 rc = 0
814 if rc and onerr:
809 if rc and onerr:
815 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
810 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
816 explainexit(rc)[0])
811 explainexit(rc)[0])
817 if errprefix:
812 if errprefix:
818 errmsg = '%s: %s' % (errprefix, errmsg)
813 errmsg = '%s: %s' % (errprefix, errmsg)
819 raise onerr(errmsg)
814 raise onerr(errmsg)
820 return rc
815 return rc
821
816
822 def checksignature(func):
817 def checksignature(func):
823 '''wrap a function with code to check for calling errors'''
818 '''wrap a function with code to check for calling errors'''
824 def check(*args, **kwargs):
819 def check(*args, **kwargs):
825 try:
820 try:
826 return func(*args, **kwargs)
821 return func(*args, **kwargs)
827 except TypeError:
822 except TypeError:
828 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
823 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
829 raise error.SignatureError
824 raise error.SignatureError
830 raise
825 raise
831
826
832 return check
827 return check
833
828
834 def copyfile(src, dest, hardlink=False):
829 def copyfile(src, dest, hardlink=False):
835 "copy a file, preserving mode and atime/mtime"
830 "copy a file, preserving mode and atime/mtime"
836 if os.path.lexists(dest):
831 if os.path.lexists(dest):
837 unlink(dest)
832 unlink(dest)
838 # hardlinks are problematic on CIFS, quietly ignore this flag
833 # hardlinks are problematic on CIFS, quietly ignore this flag
839 # until we find a way to work around it cleanly (issue4546)
834 # until we find a way to work around it cleanly (issue4546)
840 if False and hardlink:
835 if False and hardlink:
841 try:
836 try:
842 oslink(src, dest)
837 oslink(src, dest)
843 return
838 return
844 except (IOError, OSError):
839 except (IOError, OSError):
845 pass # fall back to normal copy
840 pass # fall back to normal copy
846 if os.path.islink(src):
841 if os.path.islink(src):
847 os.symlink(os.readlink(src), dest)
842 os.symlink(os.readlink(src), dest)
848 else:
843 else:
849 try:
844 try:
850 shutil.copyfile(src, dest)
845 shutil.copyfile(src, dest)
851 shutil.copymode(src, dest)
846 shutil.copymode(src, dest)
852 except shutil.Error as inst:
847 except shutil.Error as inst:
853 raise Abort(str(inst))
848 raise Abort(str(inst))
854
849
855 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
850 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
856 """Copy a directory tree using hardlinks if possible."""
851 """Copy a directory tree using hardlinks if possible."""
857 num = 0
852 num = 0
858
853
859 if hardlink is None:
854 if hardlink is None:
860 hardlink = (os.stat(src).st_dev ==
855 hardlink = (os.stat(src).st_dev ==
861 os.stat(os.path.dirname(dst)).st_dev)
856 os.stat(os.path.dirname(dst)).st_dev)
862 if hardlink:
857 if hardlink:
863 topic = _('linking')
858 topic = _('linking')
864 else:
859 else:
865 topic = _('copying')
860 topic = _('copying')
866
861
867 if os.path.isdir(src):
862 if os.path.isdir(src):
868 os.mkdir(dst)
863 os.mkdir(dst)
869 for name, kind in osutil.listdir(src):
864 for name, kind in osutil.listdir(src):
870 srcname = os.path.join(src, name)
865 srcname = os.path.join(src, name)
871 dstname = os.path.join(dst, name)
866 dstname = os.path.join(dst, name)
872 def nprog(t, pos):
867 def nprog(t, pos):
873 if pos is not None:
868 if pos is not None:
874 return progress(t, pos + num)
869 return progress(t, pos + num)
875 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
870 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
876 num += n
871 num += n
877 else:
872 else:
878 if hardlink:
873 if hardlink:
879 try:
874 try:
880 oslink(src, dst)
875 oslink(src, dst)
881 except (IOError, OSError):
876 except (IOError, OSError):
882 hardlink = False
877 hardlink = False
883 shutil.copy(src, dst)
878 shutil.copy(src, dst)
884 else:
879 else:
885 shutil.copy(src, dst)
880 shutil.copy(src, dst)
886 num += 1
881 num += 1
887 progress(topic, num)
882 progress(topic, num)
888 progress(topic, None)
883 progress(topic, None)
889
884
890 return hardlink, num
885 return hardlink, num
891
886
892 _winreservednames = '''con prn aux nul
887 _winreservednames = '''con prn aux nul
893 com1 com2 com3 com4 com5 com6 com7 com8 com9
888 com1 com2 com3 com4 com5 com6 com7 com8 com9
894 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
889 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
895 _winreservedchars = ':*?"<>|'
890 _winreservedchars = ':*?"<>|'
896 def checkwinfilename(path):
891 def checkwinfilename(path):
897 r'''Check that the base-relative path is a valid filename on Windows.
892 r'''Check that the base-relative path is a valid filename on Windows.
898 Returns None if the path is ok, or a UI string describing the problem.
893 Returns None if the path is ok, or a UI string describing the problem.
899
894
900 >>> checkwinfilename("just/a/normal/path")
895 >>> checkwinfilename("just/a/normal/path")
901 >>> checkwinfilename("foo/bar/con.xml")
896 >>> checkwinfilename("foo/bar/con.xml")
902 "filename contains 'con', which is reserved on Windows"
897 "filename contains 'con', which is reserved on Windows"
903 >>> checkwinfilename("foo/con.xml/bar")
898 >>> checkwinfilename("foo/con.xml/bar")
904 "filename contains 'con', which is reserved on Windows"
899 "filename contains 'con', which is reserved on Windows"
905 >>> checkwinfilename("foo/bar/xml.con")
900 >>> checkwinfilename("foo/bar/xml.con")
906 >>> checkwinfilename("foo/bar/AUX/bla.txt")
901 >>> checkwinfilename("foo/bar/AUX/bla.txt")
907 "filename contains 'AUX', which is reserved on Windows"
902 "filename contains 'AUX', which is reserved on Windows"
908 >>> checkwinfilename("foo/bar/bla:.txt")
903 >>> checkwinfilename("foo/bar/bla:.txt")
909 "filename contains ':', which is reserved on Windows"
904 "filename contains ':', which is reserved on Windows"
910 >>> checkwinfilename("foo/bar/b\07la.txt")
905 >>> checkwinfilename("foo/bar/b\07la.txt")
911 "filename contains '\\x07', which is invalid on Windows"
906 "filename contains '\\x07', which is invalid on Windows"
912 >>> checkwinfilename("foo/bar/bla ")
907 >>> checkwinfilename("foo/bar/bla ")
913 "filename ends with ' ', which is not allowed on Windows"
908 "filename ends with ' ', which is not allowed on Windows"
914 >>> checkwinfilename("../bar")
909 >>> checkwinfilename("../bar")
915 >>> checkwinfilename("foo\\")
910 >>> checkwinfilename("foo\\")
916 "filename ends with '\\', which is invalid on Windows"
911 "filename ends with '\\', which is invalid on Windows"
917 >>> checkwinfilename("foo\\/bar")
912 >>> checkwinfilename("foo\\/bar")
918 "directory name ends with '\\', which is invalid on Windows"
913 "directory name ends with '\\', which is invalid on Windows"
919 '''
914 '''
920 if path.endswith('\\'):
915 if path.endswith('\\'):
921 return _("filename ends with '\\', which is invalid on Windows")
916 return _("filename ends with '\\', which is invalid on Windows")
922 if '\\/' in path:
917 if '\\/' in path:
923 return _("directory name ends with '\\', which is invalid on Windows")
918 return _("directory name ends with '\\', which is invalid on Windows")
924 for n in path.replace('\\', '/').split('/'):
919 for n in path.replace('\\', '/').split('/'):
925 if not n:
920 if not n:
926 continue
921 continue
927 for c in n:
922 for c in n:
928 if c in _winreservedchars:
923 if c in _winreservedchars:
929 return _("filename contains '%s', which is reserved "
924 return _("filename contains '%s', which is reserved "
930 "on Windows") % c
925 "on Windows") % c
931 if ord(c) <= 31:
926 if ord(c) <= 31:
932 return _("filename contains %r, which is invalid "
927 return _("filename contains %r, which is invalid "
933 "on Windows") % c
928 "on Windows") % c
934 base = n.split('.')[0]
929 base = n.split('.')[0]
935 if base and base.lower() in _winreservednames:
930 if base and base.lower() in _winreservednames:
936 return _("filename contains '%s', which is reserved "
931 return _("filename contains '%s', which is reserved "
937 "on Windows") % base
932 "on Windows") % base
938 t = n[-1]
933 t = n[-1]
939 if t in '. ' and n not in '..':
934 if t in '. ' and n not in '..':
940 return _("filename ends with '%s', which is not allowed "
935 return _("filename ends with '%s', which is not allowed "
941 "on Windows") % t
936 "on Windows") % t
942
937
943 if os.name == 'nt':
938 if os.name == 'nt':
944 checkosfilename = checkwinfilename
939 checkosfilename = checkwinfilename
945 else:
940 else:
946 checkosfilename = platform.checkosfilename
941 checkosfilename = platform.checkosfilename
947
942
948 def makelock(info, pathname):
943 def makelock(info, pathname):
949 try:
944 try:
950 return os.symlink(info, pathname)
945 return os.symlink(info, pathname)
951 except OSError as why:
946 except OSError as why:
952 if why.errno == errno.EEXIST:
947 if why.errno == errno.EEXIST:
953 raise
948 raise
954 except AttributeError: # no symlink in os
949 except AttributeError: # no symlink in os
955 pass
950 pass
956
951
957 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
952 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
958 os.write(ld, info)
953 os.write(ld, info)
959 os.close(ld)
954 os.close(ld)
960
955
961 def readlock(pathname):
956 def readlock(pathname):
962 try:
957 try:
963 return os.readlink(pathname)
958 return os.readlink(pathname)
964 except OSError as why:
959 except OSError as why:
965 if why.errno not in (errno.EINVAL, errno.ENOSYS):
960 if why.errno not in (errno.EINVAL, errno.ENOSYS):
966 raise
961 raise
967 except AttributeError: # no symlink in os
962 except AttributeError: # no symlink in os
968 pass
963 pass
969 fp = posixfile(pathname)
964 fp = posixfile(pathname)
970 r = fp.read()
965 r = fp.read()
971 fp.close()
966 fp.close()
972 return r
967 return r
973
968
974 def fstat(fp):
969 def fstat(fp):
975 '''stat file object that may not have fileno method.'''
970 '''stat file object that may not have fileno method.'''
976 try:
971 try:
977 return os.fstat(fp.fileno())
972 return os.fstat(fp.fileno())
978 except AttributeError:
973 except AttributeError:
979 return os.stat(fp.name)
974 return os.stat(fp.name)
980
975
981 # File system features
976 # File system features
982
977
983 def checkcase(path):
978 def checkcase(path):
984 """
979 """
985 Return true if the given path is on a case-sensitive filesystem
980 Return true if the given path is on a case-sensitive filesystem
986
981
987 Requires a path (like /foo/.hg) ending with a foldable final
982 Requires a path (like /foo/.hg) ending with a foldable final
988 directory component.
983 directory component.
989 """
984 """
990 s1 = os.lstat(path)
985 s1 = os.lstat(path)
991 d, b = os.path.split(path)
986 d, b = os.path.split(path)
992 b2 = b.upper()
987 b2 = b.upper()
993 if b == b2:
988 if b == b2:
994 b2 = b.lower()
989 b2 = b.lower()
995 if b == b2:
990 if b == b2:
996 return True # no evidence against case sensitivity
991 return True # no evidence against case sensitivity
997 p2 = os.path.join(d, b2)
992 p2 = os.path.join(d, b2)
998 try:
993 try:
999 s2 = os.lstat(p2)
994 s2 = os.lstat(p2)
1000 if s2 == s1:
995 if s2 == s1:
1001 return False
996 return False
1002 return True
997 return True
1003 except OSError:
998 except OSError:
1004 return True
999 return True
1005
1000
1006 try:
1001 try:
1007 import re2
1002 import re2
1008 _re2 = None
1003 _re2 = None
1009 except ImportError:
1004 except ImportError:
1010 _re2 = False
1005 _re2 = False
1011
1006
1012 class _re(object):
1007 class _re(object):
1013 def _checkre2(self):
1008 def _checkre2(self):
1014 global _re2
1009 global _re2
1015 try:
1010 try:
1016 # check if match works, see issue3964
1011 # check if match works, see issue3964
1017 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1012 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1018 except ImportError:
1013 except ImportError:
1019 _re2 = False
1014 _re2 = False
1020
1015
1021 def compile(self, pat, flags=0):
1016 def compile(self, pat, flags=0):
1022 '''Compile a regular expression, using re2 if possible
1017 '''Compile a regular expression, using re2 if possible
1023
1018
1024 For best performance, use only re2-compatible regexp features. The
1019 For best performance, use only re2-compatible regexp features. The
1025 only flags from the re module that are re2-compatible are
1020 only flags from the re module that are re2-compatible are
1026 IGNORECASE and MULTILINE.'''
1021 IGNORECASE and MULTILINE.'''
1027 if _re2 is None:
1022 if _re2 is None:
1028 self._checkre2()
1023 self._checkre2()
1029 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1024 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1030 if flags & remod.IGNORECASE:
1025 if flags & remod.IGNORECASE:
1031 pat = '(?i)' + pat
1026 pat = '(?i)' + pat
1032 if flags & remod.MULTILINE:
1027 if flags & remod.MULTILINE:
1033 pat = '(?m)' + pat
1028 pat = '(?m)' + pat
1034 try:
1029 try:
1035 return re2.compile(pat)
1030 return re2.compile(pat)
1036 except re2.error:
1031 except re2.error:
1037 pass
1032 pass
1038 return remod.compile(pat, flags)
1033 return remod.compile(pat, flags)
1039
1034
1040 @propertycache
1035 @propertycache
1041 def escape(self):
1036 def escape(self):
1042 '''Return the version of escape corresponding to self.compile.
1037 '''Return the version of escape corresponding to self.compile.
1043
1038
1044 This is imperfect because whether re2 or re is used for a particular
1039 This is imperfect because whether re2 or re is used for a particular
1045 function depends on the flags, etc, but it's the best we can do.
1040 function depends on the flags, etc, but it's the best we can do.
1046 '''
1041 '''
1047 global _re2
1042 global _re2
1048 if _re2 is None:
1043 if _re2 is None:
1049 self._checkre2()
1044 self._checkre2()
1050 if _re2:
1045 if _re2:
1051 return re2.escape
1046 return re2.escape
1052 else:
1047 else:
1053 return remod.escape
1048 return remod.escape
1054
1049
1055 re = _re()
1050 re = _re()
1056
1051
1057 _fspathcache = {}
1052 _fspathcache = {}
1058 def fspath(name, root):
1053 def fspath(name, root):
1059 '''Get name in the case stored in the filesystem
1054 '''Get name in the case stored in the filesystem
1060
1055
1061 The name should be relative to root, and be normcase-ed for efficiency.
1056 The name should be relative to root, and be normcase-ed for efficiency.
1062
1057
1063 Note that this function is unnecessary, and should not be
1058 Note that this function is unnecessary, and should not be
1064 called, for case-sensitive filesystems (simply because it's expensive).
1059 called, for case-sensitive filesystems (simply because it's expensive).
1065
1060
1066 The root should be normcase-ed, too.
1061 The root should be normcase-ed, too.
1067 '''
1062 '''
1068 def _makefspathcacheentry(dir):
1063 def _makefspathcacheentry(dir):
1069 return dict((normcase(n), n) for n in os.listdir(dir))
1064 return dict((normcase(n), n) for n in os.listdir(dir))
1070
1065
1071 seps = os.sep
1066 seps = os.sep
1072 if os.altsep:
1067 if os.altsep:
1073 seps = seps + os.altsep
1068 seps = seps + os.altsep
1074 # Protect backslashes. This gets silly very quickly.
1069 # Protect backslashes. This gets silly very quickly.
1075 seps.replace('\\','\\\\')
1070 seps.replace('\\','\\\\')
1076 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1071 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1077 dir = os.path.normpath(root)
1072 dir = os.path.normpath(root)
1078 result = []
1073 result = []
1079 for part, sep in pattern.findall(name):
1074 for part, sep in pattern.findall(name):
1080 if sep:
1075 if sep:
1081 result.append(sep)
1076 result.append(sep)
1082 continue
1077 continue
1083
1078
1084 if dir not in _fspathcache:
1079 if dir not in _fspathcache:
1085 _fspathcache[dir] = _makefspathcacheentry(dir)
1080 _fspathcache[dir] = _makefspathcacheentry(dir)
1086 contents = _fspathcache[dir]
1081 contents = _fspathcache[dir]
1087
1082
1088 found = contents.get(part)
1083 found = contents.get(part)
1089 if not found:
1084 if not found:
1090 # retry "once per directory" per "dirstate.walk" which
1085 # retry "once per directory" per "dirstate.walk" which
1091 # may take place for each patches of "hg qpush", for example
1086 # may take place for each patches of "hg qpush", for example
1092 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1087 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1093 found = contents.get(part)
1088 found = contents.get(part)
1094
1089
1095 result.append(found or part)
1090 result.append(found or part)
1096 dir = os.path.join(dir, part)
1091 dir = os.path.join(dir, part)
1097
1092
1098 return ''.join(result)
1093 return ''.join(result)
1099
1094
1100 def checknlink(testfile):
1095 def checknlink(testfile):
1101 '''check whether hardlink count reporting works properly'''
1096 '''check whether hardlink count reporting works properly'''
1102
1097
1103 # testfile may be open, so we need a separate file for checking to
1098 # testfile may be open, so we need a separate file for checking to
1104 # work around issue2543 (or testfile may get lost on Samba shares)
1099 # work around issue2543 (or testfile may get lost on Samba shares)
1105 f1 = testfile + ".hgtmp1"
1100 f1 = testfile + ".hgtmp1"
1106 if os.path.lexists(f1):
1101 if os.path.lexists(f1):
1107 return False
1102 return False
1108 try:
1103 try:
1109 posixfile(f1, 'w').close()
1104 posixfile(f1, 'w').close()
1110 except IOError:
1105 except IOError:
1111 return False
1106 return False
1112
1107
1113 f2 = testfile + ".hgtmp2"
1108 f2 = testfile + ".hgtmp2"
1114 fd = None
1109 fd = None
1115 try:
1110 try:
1116 oslink(f1, f2)
1111 oslink(f1, f2)
1117 # nlinks() may behave differently for files on Windows shares if
1112 # nlinks() may behave differently for files on Windows shares if
1118 # the file is open.
1113 # the file is open.
1119 fd = posixfile(f2)
1114 fd = posixfile(f2)
1120 return nlinks(f2) > 1
1115 return nlinks(f2) > 1
1121 except OSError:
1116 except OSError:
1122 return False
1117 return False
1123 finally:
1118 finally:
1124 if fd is not None:
1119 if fd is not None:
1125 fd.close()
1120 fd.close()
1126 for f in (f1, f2):
1121 for f in (f1, f2):
1127 try:
1122 try:
1128 os.unlink(f)
1123 os.unlink(f)
1129 except OSError:
1124 except OSError:
1130 pass
1125 pass
1131
1126
1132 def endswithsep(path):
1127 def endswithsep(path):
1133 '''Check path ends with os.sep or os.altsep.'''
1128 '''Check path ends with os.sep or os.altsep.'''
1134 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1129 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1135
1130
1136 def splitpath(path):
1131 def splitpath(path):
1137 '''Split path by os.sep.
1132 '''Split path by os.sep.
1138 Note that this function does not use os.altsep because this is
1133 Note that this function does not use os.altsep because this is
1139 an alternative of simple "xxx.split(os.sep)".
1134 an alternative of simple "xxx.split(os.sep)".
1140 It is recommended to use os.path.normpath() before using this
1135 It is recommended to use os.path.normpath() before using this
1141 function if need.'''
1136 function if need.'''
1142 return path.split(os.sep)
1137 return path.split(os.sep)
1143
1138
1144 def gui():
1139 def gui():
1145 '''Are we running in a GUI?'''
1140 '''Are we running in a GUI?'''
1146 if sys.platform == 'darwin':
1141 if sys.platform == 'darwin':
1147 if 'SSH_CONNECTION' in os.environ:
1142 if 'SSH_CONNECTION' in os.environ:
1148 # handle SSH access to a box where the user is logged in
1143 # handle SSH access to a box where the user is logged in
1149 return False
1144 return False
1150 elif getattr(osutil, 'isgui', None):
1145 elif getattr(osutil, 'isgui', None):
1151 # check if a CoreGraphics session is available
1146 # check if a CoreGraphics session is available
1152 return osutil.isgui()
1147 return osutil.isgui()
1153 else:
1148 else:
1154 # pure build; use a safe default
1149 # pure build; use a safe default
1155 return True
1150 return True
1156 else:
1151 else:
1157 return os.name == "nt" or os.environ.get("DISPLAY")
1152 return os.name == "nt" or os.environ.get("DISPLAY")
1158
1153
1159 def mktempcopy(name, emptyok=False, createmode=None):
1154 def mktempcopy(name, emptyok=False, createmode=None):
1160 """Create a temporary file with the same contents from name
1155 """Create a temporary file with the same contents from name
1161
1156
1162 The permission bits are copied from the original file.
1157 The permission bits are copied from the original file.
1163
1158
1164 If the temporary file is going to be truncated immediately, you
1159 If the temporary file is going to be truncated immediately, you
1165 can use emptyok=True as an optimization.
1160 can use emptyok=True as an optimization.
1166
1161
1167 Returns the name of the temporary file.
1162 Returns the name of the temporary file.
1168 """
1163 """
1169 d, fn = os.path.split(name)
1164 d, fn = os.path.split(name)
1170 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1165 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1171 os.close(fd)
1166 os.close(fd)
1172 # Temporary files are created with mode 0600, which is usually not
1167 # Temporary files are created with mode 0600, which is usually not
1173 # what we want. If the original file already exists, just copy
1168 # what we want. If the original file already exists, just copy
1174 # its mode. Otherwise, manually obey umask.
1169 # its mode. Otherwise, manually obey umask.
1175 copymode(name, temp, createmode)
1170 copymode(name, temp, createmode)
1176 if emptyok:
1171 if emptyok:
1177 return temp
1172 return temp
1178 try:
1173 try:
1179 try:
1174 try:
1180 ifp = posixfile(name, "rb")
1175 ifp = posixfile(name, "rb")
1181 except IOError as inst:
1176 except IOError as inst:
1182 if inst.errno == errno.ENOENT:
1177 if inst.errno == errno.ENOENT:
1183 return temp
1178 return temp
1184 if not getattr(inst, 'filename', None):
1179 if not getattr(inst, 'filename', None):
1185 inst.filename = name
1180 inst.filename = name
1186 raise
1181 raise
1187 ofp = posixfile(temp, "wb")
1182 ofp = posixfile(temp, "wb")
1188 for chunk in filechunkiter(ifp):
1183 for chunk in filechunkiter(ifp):
1189 ofp.write(chunk)
1184 ofp.write(chunk)
1190 ifp.close()
1185 ifp.close()
1191 ofp.close()
1186 ofp.close()
1192 except: # re-raises
1187 except: # re-raises
1193 try: os.unlink(temp)
1188 try: os.unlink(temp)
1194 except OSError: pass
1189 except OSError: pass
1195 raise
1190 raise
1196 return temp
1191 return temp
1197
1192
1198 class atomictempfile(object):
1193 class atomictempfile(object):
1199 '''writable file object that atomically updates a file
1194 '''writable file object that atomically updates a file
1200
1195
1201 All writes will go to a temporary copy of the original file. Call
1196 All writes will go to a temporary copy of the original file. Call
1202 close() when you are done writing, and atomictempfile will rename
1197 close() when you are done writing, and atomictempfile will rename
1203 the temporary copy to the original name, making the changes
1198 the temporary copy to the original name, making the changes
1204 visible. If the object is destroyed without being closed, all your
1199 visible. If the object is destroyed without being closed, all your
1205 writes are discarded.
1200 writes are discarded.
1206 '''
1201 '''
1207 def __init__(self, name, mode='w+b', createmode=None):
1202 def __init__(self, name, mode='w+b', createmode=None):
1208 self.__name = name # permanent name
1203 self.__name = name # permanent name
1209 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1204 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1210 createmode=createmode)
1205 createmode=createmode)
1211 self._fp = posixfile(self._tempname, mode)
1206 self._fp = posixfile(self._tempname, mode)
1212
1207
1213 # delegated methods
1208 # delegated methods
1214 self.write = self._fp.write
1209 self.write = self._fp.write
1215 self.seek = self._fp.seek
1210 self.seek = self._fp.seek
1216 self.tell = self._fp.tell
1211 self.tell = self._fp.tell
1217 self.fileno = self._fp.fileno
1212 self.fileno = self._fp.fileno
1218
1213
1219 def close(self):
1214 def close(self):
1220 if not self._fp.closed:
1215 if not self._fp.closed:
1221 self._fp.close()
1216 self._fp.close()
1222 rename(self._tempname, localpath(self.__name))
1217 rename(self._tempname, localpath(self.__name))
1223
1218
1224 def discard(self):
1219 def discard(self):
1225 if not self._fp.closed:
1220 if not self._fp.closed:
1226 try:
1221 try:
1227 os.unlink(self._tempname)
1222 os.unlink(self._tempname)
1228 except OSError:
1223 except OSError:
1229 pass
1224 pass
1230 self._fp.close()
1225 self._fp.close()
1231
1226
1232 def __del__(self):
1227 def __del__(self):
1233 if safehasattr(self, '_fp'): # constructor actually did something
1228 if safehasattr(self, '_fp'): # constructor actually did something
1234 self.discard()
1229 self.discard()
1235
1230
1236 def makedirs(name, mode=None, notindexed=False):
1231 def makedirs(name, mode=None, notindexed=False):
1237 """recursive directory creation with parent mode inheritance"""
1232 """recursive directory creation with parent mode inheritance"""
1238 try:
1233 try:
1239 makedir(name, notindexed)
1234 makedir(name, notindexed)
1240 except OSError as err:
1235 except OSError as err:
1241 if err.errno == errno.EEXIST:
1236 if err.errno == errno.EEXIST:
1242 return
1237 return
1243 if err.errno != errno.ENOENT or not name:
1238 if err.errno != errno.ENOENT or not name:
1244 raise
1239 raise
1245 parent = os.path.dirname(os.path.abspath(name))
1240 parent = os.path.dirname(os.path.abspath(name))
1246 if parent == name:
1241 if parent == name:
1247 raise
1242 raise
1248 makedirs(parent, mode, notindexed)
1243 makedirs(parent, mode, notindexed)
1249 makedir(name, notindexed)
1244 makedir(name, notindexed)
1250 if mode is not None:
1245 if mode is not None:
1251 os.chmod(name, mode)
1246 os.chmod(name, mode)
1252
1247
1253 def ensuredirs(name, mode=None, notindexed=False):
1248 def ensuredirs(name, mode=None, notindexed=False):
1254 """race-safe recursive directory creation
1249 """race-safe recursive directory creation
1255
1250
1256 Newly created directories are marked as "not to be indexed by
1251 Newly created directories are marked as "not to be indexed by
1257 the content indexing service", if ``notindexed`` is specified
1252 the content indexing service", if ``notindexed`` is specified
1258 for "write" mode access.
1253 for "write" mode access.
1259 """
1254 """
1260 if os.path.isdir(name):
1255 if os.path.isdir(name):
1261 return
1256 return
1262 parent = os.path.dirname(os.path.abspath(name))
1257 parent = os.path.dirname(os.path.abspath(name))
1263 if parent != name:
1258 if parent != name:
1264 ensuredirs(parent, mode, notindexed)
1259 ensuredirs(parent, mode, notindexed)
1265 try:
1260 try:
1266 makedir(name, notindexed)
1261 makedir(name, notindexed)
1267 except OSError as err:
1262 except OSError as err:
1268 if err.errno == errno.EEXIST and os.path.isdir(name):
1263 if err.errno == errno.EEXIST and os.path.isdir(name):
1269 # someone else seems to have won a directory creation race
1264 # someone else seems to have won a directory creation race
1270 return
1265 return
1271 raise
1266 raise
1272 if mode is not None:
1267 if mode is not None:
1273 os.chmod(name, mode)
1268 os.chmod(name, mode)
1274
1269
1275 def readfile(path):
1270 def readfile(path):
1276 fp = open(path, 'rb')
1271 fp = open(path, 'rb')
1277 try:
1272 try:
1278 return fp.read()
1273 return fp.read()
1279 finally:
1274 finally:
1280 fp.close()
1275 fp.close()
1281
1276
1282 def writefile(path, text):
1277 def writefile(path, text):
1283 fp = open(path, 'wb')
1278 fp = open(path, 'wb')
1284 try:
1279 try:
1285 fp.write(text)
1280 fp.write(text)
1286 finally:
1281 finally:
1287 fp.close()
1282 fp.close()
1288
1283
1289 def appendfile(path, text):
1284 def appendfile(path, text):
1290 fp = open(path, 'ab')
1285 fp = open(path, 'ab')
1291 try:
1286 try:
1292 fp.write(text)
1287 fp.write(text)
1293 finally:
1288 finally:
1294 fp.close()
1289 fp.close()
1295
1290
1296 class chunkbuffer(object):
1291 class chunkbuffer(object):
1297 """Allow arbitrary sized chunks of data to be efficiently read from an
1292 """Allow arbitrary sized chunks of data to be efficiently read from an
1298 iterator over chunks of arbitrary size."""
1293 iterator over chunks of arbitrary size."""
1299
1294
1300 def __init__(self, in_iter):
1295 def __init__(self, in_iter):
1301 """in_iter is the iterator that's iterating over the input chunks.
1296 """in_iter is the iterator that's iterating over the input chunks.
1302 targetsize is how big a buffer to try to maintain."""
1297 targetsize is how big a buffer to try to maintain."""
1303 def splitbig(chunks):
1298 def splitbig(chunks):
1304 for chunk in chunks:
1299 for chunk in chunks:
1305 if len(chunk) > 2**20:
1300 if len(chunk) > 2**20:
1306 pos = 0
1301 pos = 0
1307 while pos < len(chunk):
1302 while pos < len(chunk):
1308 end = pos + 2 ** 18
1303 end = pos + 2 ** 18
1309 yield chunk[pos:end]
1304 yield chunk[pos:end]
1310 pos = end
1305 pos = end
1311 else:
1306 else:
1312 yield chunk
1307 yield chunk
1313 self.iter = splitbig(in_iter)
1308 self.iter = splitbig(in_iter)
1314 self._queue = collections.deque()
1309 self._queue = collections.deque()
1315 self._chunkoffset = 0
1310 self._chunkoffset = 0
1316
1311
1317 def read(self, l=None):
1312 def read(self, l=None):
1318 """Read L bytes of data from the iterator of chunks of data.
1313 """Read L bytes of data from the iterator of chunks of data.
1319 Returns less than L bytes if the iterator runs dry.
1314 Returns less than L bytes if the iterator runs dry.
1320
1315
1321 If size parameter is omitted, read everything"""
1316 If size parameter is omitted, read everything"""
1322 if l is None:
1317 if l is None:
1323 return ''.join(self.iter)
1318 return ''.join(self.iter)
1324
1319
1325 left = l
1320 left = l
1326 buf = []
1321 buf = []
1327 queue = self._queue
1322 queue = self._queue
1328 while left > 0:
1323 while left > 0:
1329 # refill the queue
1324 # refill the queue
1330 if not queue:
1325 if not queue:
1331 target = 2**18
1326 target = 2**18
1332 for chunk in self.iter:
1327 for chunk in self.iter:
1333 queue.append(chunk)
1328 queue.append(chunk)
1334 target -= len(chunk)
1329 target -= len(chunk)
1335 if target <= 0:
1330 if target <= 0:
1336 break
1331 break
1337 if not queue:
1332 if not queue:
1338 break
1333 break
1339
1334
1340 # The easy way to do this would be to queue.popleft(), modify the
1335 # The easy way to do this would be to queue.popleft(), modify the
1341 # chunk (if necessary), then queue.appendleft(). However, for cases
1336 # chunk (if necessary), then queue.appendleft(). However, for cases
1342 # where we read partial chunk content, this incurs 2 dequeue
1337 # where we read partial chunk content, this incurs 2 dequeue
1343 # mutations and creates a new str for the remaining chunk in the
1338 # mutations and creates a new str for the remaining chunk in the
1344 # queue. Our code below avoids this overhead.
1339 # queue. Our code below avoids this overhead.
1345
1340
1346 chunk = queue[0]
1341 chunk = queue[0]
1347 chunkl = len(chunk)
1342 chunkl = len(chunk)
1348 offset = self._chunkoffset
1343 offset = self._chunkoffset
1349
1344
1350 # Use full chunk.
1345 # Use full chunk.
1351 if offset == 0 and left >= chunkl:
1346 if offset == 0 and left >= chunkl:
1352 left -= chunkl
1347 left -= chunkl
1353 queue.popleft()
1348 queue.popleft()
1354 buf.append(chunk)
1349 buf.append(chunk)
1355 # self._chunkoffset remains at 0.
1350 # self._chunkoffset remains at 0.
1356 continue
1351 continue
1357
1352
1358 chunkremaining = chunkl - offset
1353 chunkremaining = chunkl - offset
1359
1354
1360 # Use all of unconsumed part of chunk.
1355 # Use all of unconsumed part of chunk.
1361 if left >= chunkremaining:
1356 if left >= chunkremaining:
1362 left -= chunkremaining
1357 left -= chunkremaining
1363 queue.popleft()
1358 queue.popleft()
1364 # offset == 0 is enabled by block above, so this won't merely
1359 # offset == 0 is enabled by block above, so this won't merely
1365 # copy via ``chunk[0:]``.
1360 # copy via ``chunk[0:]``.
1366 buf.append(chunk[offset:])
1361 buf.append(chunk[offset:])
1367 self._chunkoffset = 0
1362 self._chunkoffset = 0
1368
1363
1369 # Partial chunk needed.
1364 # Partial chunk needed.
1370 else:
1365 else:
1371 buf.append(chunk[offset:offset + left])
1366 buf.append(chunk[offset:offset + left])
1372 self._chunkoffset += left
1367 self._chunkoffset += left
1373 left -= chunkremaining
1368 left -= chunkremaining
1374
1369
1375 return ''.join(buf)
1370 return ''.join(buf)
1376
1371
1377 def filechunkiter(f, size=65536, limit=None):
1372 def filechunkiter(f, size=65536, limit=None):
1378 """Create a generator that produces the data in the file size
1373 """Create a generator that produces the data in the file size
1379 (default 65536) bytes at a time, up to optional limit (default is
1374 (default 65536) bytes at a time, up to optional limit (default is
1380 to read all data). Chunks may be less than size bytes if the
1375 to read all data). Chunks may be less than size bytes if the
1381 chunk is the last chunk in the file, or the file is a socket or
1376 chunk is the last chunk in the file, or the file is a socket or
1382 some other type of file that sometimes reads less data than is
1377 some other type of file that sometimes reads less data than is
1383 requested."""
1378 requested."""
1384 assert size >= 0
1379 assert size >= 0
1385 assert limit is None or limit >= 0
1380 assert limit is None or limit >= 0
1386 while True:
1381 while True:
1387 if limit is None:
1382 if limit is None:
1388 nbytes = size
1383 nbytes = size
1389 else:
1384 else:
1390 nbytes = min(limit, size)
1385 nbytes = min(limit, size)
1391 s = nbytes and f.read(nbytes)
1386 s = nbytes and f.read(nbytes)
1392 if not s:
1387 if not s:
1393 break
1388 break
1394 if limit:
1389 if limit:
1395 limit -= len(s)
1390 limit -= len(s)
1396 yield s
1391 yield s
1397
1392
1398 def makedate(timestamp=None):
1393 def makedate(timestamp=None):
1399 '''Return a unix timestamp (or the current time) as a (unixtime,
1394 '''Return a unix timestamp (or the current time) as a (unixtime,
1400 offset) tuple based off the local timezone.'''
1395 offset) tuple based off the local timezone.'''
1401 if timestamp is None:
1396 if timestamp is None:
1402 timestamp = time.time()
1397 timestamp = time.time()
1403 if timestamp < 0:
1398 if timestamp < 0:
1404 hint = _("check your clock")
1399 hint = _("check your clock")
1405 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1400 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1406 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1401 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1407 datetime.datetime.fromtimestamp(timestamp))
1402 datetime.datetime.fromtimestamp(timestamp))
1408 tz = delta.days * 86400 + delta.seconds
1403 tz = delta.days * 86400 + delta.seconds
1409 return timestamp, tz
1404 return timestamp, tz
1410
1405
1411 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1406 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1412 """represent a (unixtime, offset) tuple as a localized time.
1407 """represent a (unixtime, offset) tuple as a localized time.
1413 unixtime is seconds since the epoch, and offset is the time zone's
1408 unixtime is seconds since the epoch, and offset is the time zone's
1414 number of seconds away from UTC. if timezone is false, do not
1409 number of seconds away from UTC. if timezone is false, do not
1415 append time zone to string."""
1410 append time zone to string."""
1416 t, tz = date or makedate()
1411 t, tz = date or makedate()
1417 if t < 0:
1412 if t < 0:
1418 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1413 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1419 tz = 0
1414 tz = 0
1420 if "%1" in format or "%2" in format or "%z" in format:
1415 if "%1" in format or "%2" in format or "%z" in format:
1421 sign = (tz > 0) and "-" or "+"
1416 sign = (tz > 0) and "-" or "+"
1422 minutes = abs(tz) // 60
1417 minutes = abs(tz) // 60
1423 q, r = divmod(minutes, 60)
1418 q, r = divmod(minutes, 60)
1424 format = format.replace("%z", "%1%2")
1419 format = format.replace("%z", "%1%2")
1425 format = format.replace("%1", "%c%02d" % (sign, q))
1420 format = format.replace("%1", "%c%02d" % (sign, q))
1426 format = format.replace("%2", "%02d" % r)
1421 format = format.replace("%2", "%02d" % r)
1427 try:
1422 try:
1428 t = time.gmtime(float(t) - tz)
1423 t = time.gmtime(float(t) - tz)
1429 except ValueError:
1424 except ValueError:
1430 # time was out of range
1425 # time was out of range
1431 t = time.gmtime(sys.maxint)
1426 t = time.gmtime(sys.maxint)
1432 s = time.strftime(format, t)
1427 s = time.strftime(format, t)
1433 return s
1428 return s
1434
1429
1435 def shortdate(date=None):
1430 def shortdate(date=None):
1436 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1431 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1437 return datestr(date, format='%Y-%m-%d')
1432 return datestr(date, format='%Y-%m-%d')
1438
1433
1439 def parsetimezone(tz):
1434 def parsetimezone(tz):
1440 """parse a timezone string and return an offset integer"""
1435 """parse a timezone string and return an offset integer"""
1441 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1436 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1442 sign = (tz[0] == "+") and 1 or -1
1437 sign = (tz[0] == "+") and 1 or -1
1443 hours = int(tz[1:3])
1438 hours = int(tz[1:3])
1444 minutes = int(tz[3:5])
1439 minutes = int(tz[3:5])
1445 return -sign * (hours * 60 + minutes) * 60
1440 return -sign * (hours * 60 + minutes) * 60
1446 if tz == "GMT" or tz == "UTC":
1441 if tz == "GMT" or tz == "UTC":
1447 return 0
1442 return 0
1448 return None
1443 return None
1449
1444
1450 def strdate(string, format, defaults=[]):
1445 def strdate(string, format, defaults=[]):
1451 """parse a localized time string and return a (unixtime, offset) tuple.
1446 """parse a localized time string and return a (unixtime, offset) tuple.
1452 if the string cannot be parsed, ValueError is raised."""
1447 if the string cannot be parsed, ValueError is raised."""
1453 # NOTE: unixtime = localunixtime + offset
1448 # NOTE: unixtime = localunixtime + offset
1454 offset, date = parsetimezone(string.split()[-1]), string
1449 offset, date = parsetimezone(string.split()[-1]), string
1455 if offset is not None:
1450 if offset is not None:
1456 date = " ".join(string.split()[:-1])
1451 date = " ".join(string.split()[:-1])
1457
1452
1458 # add missing elements from defaults
1453 # add missing elements from defaults
1459 usenow = False # default to using biased defaults
1454 usenow = False # default to using biased defaults
1460 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1455 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1461 found = [True for p in part if ("%"+p) in format]
1456 found = [True for p in part if ("%"+p) in format]
1462 if not found:
1457 if not found:
1463 date += "@" + defaults[part][usenow]
1458 date += "@" + defaults[part][usenow]
1464 format += "@%" + part[0]
1459 format += "@%" + part[0]
1465 else:
1460 else:
1466 # We've found a specific time element, less specific time
1461 # We've found a specific time element, less specific time
1467 # elements are relative to today
1462 # elements are relative to today
1468 usenow = True
1463 usenow = True
1469
1464
1470 timetuple = time.strptime(date, format)
1465 timetuple = time.strptime(date, format)
1471 localunixtime = int(calendar.timegm(timetuple))
1466 localunixtime = int(calendar.timegm(timetuple))
1472 if offset is None:
1467 if offset is None:
1473 # local timezone
1468 # local timezone
1474 unixtime = int(time.mktime(timetuple))
1469 unixtime = int(time.mktime(timetuple))
1475 offset = unixtime - localunixtime
1470 offset = unixtime - localunixtime
1476 else:
1471 else:
1477 unixtime = localunixtime + offset
1472 unixtime = localunixtime + offset
1478 return unixtime, offset
1473 return unixtime, offset
1479
1474
1480 def parsedate(date, formats=None, bias=None):
1475 def parsedate(date, formats=None, bias=None):
1481 """parse a localized date/time and return a (unixtime, offset) tuple.
1476 """parse a localized date/time and return a (unixtime, offset) tuple.
1482
1477
1483 The date may be a "unixtime offset" string or in one of the specified
1478 The date may be a "unixtime offset" string or in one of the specified
1484 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1479 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1485
1480
1486 >>> parsedate(' today ') == parsedate(\
1481 >>> parsedate(' today ') == parsedate(\
1487 datetime.date.today().strftime('%b %d'))
1482 datetime.date.today().strftime('%b %d'))
1488 True
1483 True
1489 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1484 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1490 datetime.timedelta(days=1)\
1485 datetime.timedelta(days=1)\
1491 ).strftime('%b %d'))
1486 ).strftime('%b %d'))
1492 True
1487 True
1493 >>> now, tz = makedate()
1488 >>> now, tz = makedate()
1494 >>> strnow, strtz = parsedate('now')
1489 >>> strnow, strtz = parsedate('now')
1495 >>> (strnow - now) < 1
1490 >>> (strnow - now) < 1
1496 True
1491 True
1497 >>> tz == strtz
1492 >>> tz == strtz
1498 True
1493 True
1499 """
1494 """
1500 if bias is None:
1495 if bias is None:
1501 bias = {}
1496 bias = {}
1502 if not date:
1497 if not date:
1503 return 0, 0
1498 return 0, 0
1504 if isinstance(date, tuple) and len(date) == 2:
1499 if isinstance(date, tuple) and len(date) == 2:
1505 return date
1500 return date
1506 if not formats:
1501 if not formats:
1507 formats = defaultdateformats
1502 formats = defaultdateformats
1508 date = date.strip()
1503 date = date.strip()
1509
1504
1510 if date == 'now' or date == _('now'):
1505 if date == 'now' or date == _('now'):
1511 return makedate()
1506 return makedate()
1512 if date == 'today' or date == _('today'):
1507 if date == 'today' or date == _('today'):
1513 date = datetime.date.today().strftime('%b %d')
1508 date = datetime.date.today().strftime('%b %d')
1514 elif date == 'yesterday' or date == _('yesterday'):
1509 elif date == 'yesterday' or date == _('yesterday'):
1515 date = (datetime.date.today() -
1510 date = (datetime.date.today() -
1516 datetime.timedelta(days=1)).strftime('%b %d')
1511 datetime.timedelta(days=1)).strftime('%b %d')
1517
1512
1518 try:
1513 try:
1519 when, offset = map(int, date.split(' '))
1514 when, offset = map(int, date.split(' '))
1520 except ValueError:
1515 except ValueError:
1521 # fill out defaults
1516 # fill out defaults
1522 now = makedate()
1517 now = makedate()
1523 defaults = {}
1518 defaults = {}
1524 for part in ("d", "mb", "yY", "HI", "M", "S"):
1519 for part in ("d", "mb", "yY", "HI", "M", "S"):
1525 # this piece is for rounding the specific end of unknowns
1520 # this piece is for rounding the specific end of unknowns
1526 b = bias.get(part)
1521 b = bias.get(part)
1527 if b is None:
1522 if b is None:
1528 if part[0] in "HMS":
1523 if part[0] in "HMS":
1529 b = "00"
1524 b = "00"
1530 else:
1525 else:
1531 b = "0"
1526 b = "0"
1532
1527
1533 # this piece is for matching the generic end to today's date
1528 # this piece is for matching the generic end to today's date
1534 n = datestr(now, "%" + part[0])
1529 n = datestr(now, "%" + part[0])
1535
1530
1536 defaults[part] = (b, n)
1531 defaults[part] = (b, n)
1537
1532
1538 for format in formats:
1533 for format in formats:
1539 try:
1534 try:
1540 when, offset = strdate(date, format, defaults)
1535 when, offset = strdate(date, format, defaults)
1541 except (ValueError, OverflowError):
1536 except (ValueError, OverflowError):
1542 pass
1537 pass
1543 else:
1538 else:
1544 break
1539 break
1545 else:
1540 else:
1546 raise Abort(_('invalid date: %r') % date)
1541 raise Abort(_('invalid date: %r') % date)
1547 # validate explicit (probably user-specified) date and
1542 # validate explicit (probably user-specified) date and
1548 # time zone offset. values must fit in signed 32 bits for
1543 # time zone offset. values must fit in signed 32 bits for
1549 # current 32-bit linux runtimes. timezones go from UTC-12
1544 # current 32-bit linux runtimes. timezones go from UTC-12
1550 # to UTC+14
1545 # to UTC+14
1551 if abs(when) > 0x7fffffff:
1546 if abs(when) > 0x7fffffff:
1552 raise Abort(_('date exceeds 32 bits: %d') % when)
1547 raise Abort(_('date exceeds 32 bits: %d') % when)
1553 if when < 0:
1548 if when < 0:
1554 raise Abort(_('negative date value: %d') % when)
1549 raise Abort(_('negative date value: %d') % when)
1555 if offset < -50400 or offset > 43200:
1550 if offset < -50400 or offset > 43200:
1556 raise Abort(_('impossible time zone offset: %d') % offset)
1551 raise Abort(_('impossible time zone offset: %d') % offset)
1557 return when, offset
1552 return when, offset
1558
1553
1559 def matchdate(date):
1554 def matchdate(date):
1560 """Return a function that matches a given date match specifier
1555 """Return a function that matches a given date match specifier
1561
1556
1562 Formats include:
1557 Formats include:
1563
1558
1564 '{date}' match a given date to the accuracy provided
1559 '{date}' match a given date to the accuracy provided
1565
1560
1566 '<{date}' on or before a given date
1561 '<{date}' on or before a given date
1567
1562
1568 '>{date}' on or after a given date
1563 '>{date}' on or after a given date
1569
1564
1570 >>> p1 = parsedate("10:29:59")
1565 >>> p1 = parsedate("10:29:59")
1571 >>> p2 = parsedate("10:30:00")
1566 >>> p2 = parsedate("10:30:00")
1572 >>> p3 = parsedate("10:30:59")
1567 >>> p3 = parsedate("10:30:59")
1573 >>> p4 = parsedate("10:31:00")
1568 >>> p4 = parsedate("10:31:00")
1574 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1569 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1575 >>> f = matchdate("10:30")
1570 >>> f = matchdate("10:30")
1576 >>> f(p1[0])
1571 >>> f(p1[0])
1577 False
1572 False
1578 >>> f(p2[0])
1573 >>> f(p2[0])
1579 True
1574 True
1580 >>> f(p3[0])
1575 >>> f(p3[0])
1581 True
1576 True
1582 >>> f(p4[0])
1577 >>> f(p4[0])
1583 False
1578 False
1584 >>> f(p5[0])
1579 >>> f(p5[0])
1585 False
1580 False
1586 """
1581 """
1587
1582
1588 def lower(date):
1583 def lower(date):
1589 d = {'mb': "1", 'd': "1"}
1584 d = {'mb': "1", 'd': "1"}
1590 return parsedate(date, extendeddateformats, d)[0]
1585 return parsedate(date, extendeddateformats, d)[0]
1591
1586
1592 def upper(date):
1587 def upper(date):
1593 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1588 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1594 for days in ("31", "30", "29"):
1589 for days in ("31", "30", "29"):
1595 try:
1590 try:
1596 d["d"] = days
1591 d["d"] = days
1597 return parsedate(date, extendeddateformats, d)[0]
1592 return parsedate(date, extendeddateformats, d)[0]
1598 except Abort:
1593 except Abort:
1599 pass
1594 pass
1600 d["d"] = "28"
1595 d["d"] = "28"
1601 return parsedate(date, extendeddateformats, d)[0]
1596 return parsedate(date, extendeddateformats, d)[0]
1602
1597
1603 date = date.strip()
1598 date = date.strip()
1604
1599
1605 if not date:
1600 if not date:
1606 raise Abort(_("dates cannot consist entirely of whitespace"))
1601 raise Abort(_("dates cannot consist entirely of whitespace"))
1607 elif date[0] == "<":
1602 elif date[0] == "<":
1608 if not date[1:]:
1603 if not date[1:]:
1609 raise Abort(_("invalid day spec, use '<DATE'"))
1604 raise Abort(_("invalid day spec, use '<DATE'"))
1610 when = upper(date[1:])
1605 when = upper(date[1:])
1611 return lambda x: x <= when
1606 return lambda x: x <= when
1612 elif date[0] == ">":
1607 elif date[0] == ">":
1613 if not date[1:]:
1608 if not date[1:]:
1614 raise Abort(_("invalid day spec, use '>DATE'"))
1609 raise Abort(_("invalid day spec, use '>DATE'"))
1615 when = lower(date[1:])
1610 when = lower(date[1:])
1616 return lambda x: x >= when
1611 return lambda x: x >= when
1617 elif date[0] == "-":
1612 elif date[0] == "-":
1618 try:
1613 try:
1619 days = int(date[1:])
1614 days = int(date[1:])
1620 except ValueError:
1615 except ValueError:
1621 raise Abort(_("invalid day spec: %s") % date[1:])
1616 raise Abort(_("invalid day spec: %s") % date[1:])
1622 if days < 0:
1617 if days < 0:
1623 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1618 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1624 % date[1:])
1619 % date[1:])
1625 when = makedate()[0] - days * 3600 * 24
1620 when = makedate()[0] - days * 3600 * 24
1626 return lambda x: x >= when
1621 return lambda x: x >= when
1627 elif " to " in date:
1622 elif " to " in date:
1628 a, b = date.split(" to ")
1623 a, b = date.split(" to ")
1629 start, stop = lower(a), upper(b)
1624 start, stop = lower(a), upper(b)
1630 return lambda x: x >= start and x <= stop
1625 return lambda x: x >= start and x <= stop
1631 else:
1626 else:
1632 start, stop = lower(date), upper(date)
1627 start, stop = lower(date), upper(date)
1633 return lambda x: x >= start and x <= stop
1628 return lambda x: x >= start and x <= stop
1634
1629
1635 def stringmatcher(pattern):
1630 def stringmatcher(pattern):
1636 """
1631 """
1637 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1632 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1638 returns the matcher name, pattern, and matcher function.
1633 returns the matcher name, pattern, and matcher function.
1639 missing or unknown prefixes are treated as literal matches.
1634 missing or unknown prefixes are treated as literal matches.
1640
1635
1641 helper for tests:
1636 helper for tests:
1642 >>> def test(pattern, *tests):
1637 >>> def test(pattern, *tests):
1643 ... kind, pattern, matcher = stringmatcher(pattern)
1638 ... kind, pattern, matcher = stringmatcher(pattern)
1644 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1639 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1645
1640
1646 exact matching (no prefix):
1641 exact matching (no prefix):
1647 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1642 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1648 ('literal', 'abcdefg', [False, False, True])
1643 ('literal', 'abcdefg', [False, False, True])
1649
1644
1650 regex matching ('re:' prefix)
1645 regex matching ('re:' prefix)
1651 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1646 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1652 ('re', 'a.+b', [False, False, True])
1647 ('re', 'a.+b', [False, False, True])
1653
1648
1654 force exact matches ('literal:' prefix)
1649 force exact matches ('literal:' prefix)
1655 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1650 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1656 ('literal', 're:foobar', [False, True])
1651 ('literal', 're:foobar', [False, True])
1657
1652
1658 unknown prefixes are ignored and treated as literals
1653 unknown prefixes are ignored and treated as literals
1659 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1654 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1660 ('literal', 'foo:bar', [False, False, True])
1655 ('literal', 'foo:bar', [False, False, True])
1661 """
1656 """
1662 if pattern.startswith('re:'):
1657 if pattern.startswith('re:'):
1663 pattern = pattern[3:]
1658 pattern = pattern[3:]
1664 try:
1659 try:
1665 regex = remod.compile(pattern)
1660 regex = remod.compile(pattern)
1666 except remod.error as e:
1661 except remod.error as e:
1667 raise error.ParseError(_('invalid regular expression: %s')
1662 raise error.ParseError(_('invalid regular expression: %s')
1668 % e)
1663 % e)
1669 return 're', pattern, regex.search
1664 return 're', pattern, regex.search
1670 elif pattern.startswith('literal:'):
1665 elif pattern.startswith('literal:'):
1671 pattern = pattern[8:]
1666 pattern = pattern[8:]
1672 return 'literal', pattern, pattern.__eq__
1667 return 'literal', pattern, pattern.__eq__
1673
1668
1674 def shortuser(user):
1669 def shortuser(user):
1675 """Return a short representation of a user name or email address."""
1670 """Return a short representation of a user name or email address."""
1676 f = user.find('@')
1671 f = user.find('@')
1677 if f >= 0:
1672 if f >= 0:
1678 user = user[:f]
1673 user = user[:f]
1679 f = user.find('<')
1674 f = user.find('<')
1680 if f >= 0:
1675 if f >= 0:
1681 user = user[f + 1:]
1676 user = user[f + 1:]
1682 f = user.find(' ')
1677 f = user.find(' ')
1683 if f >= 0:
1678 if f >= 0:
1684 user = user[:f]
1679 user = user[:f]
1685 f = user.find('.')
1680 f = user.find('.')
1686 if f >= 0:
1681 if f >= 0:
1687 user = user[:f]
1682 user = user[:f]
1688 return user
1683 return user
1689
1684
1690 def emailuser(user):
1685 def emailuser(user):
1691 """Return the user portion of an email address."""
1686 """Return the user portion of an email address."""
1692 f = user.find('@')
1687 f = user.find('@')
1693 if f >= 0:
1688 if f >= 0:
1694 user = user[:f]
1689 user = user[:f]
1695 f = user.find('<')
1690 f = user.find('<')
1696 if f >= 0:
1691 if f >= 0:
1697 user = user[f + 1:]
1692 user = user[f + 1:]
1698 return user
1693 return user
1699
1694
1700 def email(author):
1695 def email(author):
1701 '''get email of author.'''
1696 '''get email of author.'''
1702 r = author.find('>')
1697 r = author.find('>')
1703 if r == -1:
1698 if r == -1:
1704 r = None
1699 r = None
1705 return author[author.find('<') + 1:r]
1700 return author[author.find('<') + 1:r]
1706
1701
1707 def ellipsis(text, maxlength=400):
1702 def ellipsis(text, maxlength=400):
1708 """Trim string to at most maxlength (default: 400) columns in display."""
1703 """Trim string to at most maxlength (default: 400) columns in display."""
1709 return encoding.trim(text, maxlength, ellipsis='...')
1704 return encoding.trim(text, maxlength, ellipsis='...')
1710
1705
1711 def unitcountfn(*unittable):
1706 def unitcountfn(*unittable):
1712 '''return a function that renders a readable count of some quantity'''
1707 '''return a function that renders a readable count of some quantity'''
1713
1708
1714 def go(count):
1709 def go(count):
1715 for multiplier, divisor, format in unittable:
1710 for multiplier, divisor, format in unittable:
1716 if count >= divisor * multiplier:
1711 if count >= divisor * multiplier:
1717 return format % (count / float(divisor))
1712 return format % (count / float(divisor))
1718 return unittable[-1][2] % count
1713 return unittable[-1][2] % count
1719
1714
1720 return go
1715 return go
1721
1716
1722 bytecount = unitcountfn(
1717 bytecount = unitcountfn(
1723 (100, 1 << 30, _('%.0f GB')),
1718 (100, 1 << 30, _('%.0f GB')),
1724 (10, 1 << 30, _('%.1f GB')),
1719 (10, 1 << 30, _('%.1f GB')),
1725 (1, 1 << 30, _('%.2f GB')),
1720 (1, 1 << 30, _('%.2f GB')),
1726 (100, 1 << 20, _('%.0f MB')),
1721 (100, 1 << 20, _('%.0f MB')),
1727 (10, 1 << 20, _('%.1f MB')),
1722 (10, 1 << 20, _('%.1f MB')),
1728 (1, 1 << 20, _('%.2f MB')),
1723 (1, 1 << 20, _('%.2f MB')),
1729 (100, 1 << 10, _('%.0f KB')),
1724 (100, 1 << 10, _('%.0f KB')),
1730 (10, 1 << 10, _('%.1f KB')),
1725 (10, 1 << 10, _('%.1f KB')),
1731 (1, 1 << 10, _('%.2f KB')),
1726 (1, 1 << 10, _('%.2f KB')),
1732 (1, 1, _('%.0f bytes')),
1727 (1, 1, _('%.0f bytes')),
1733 )
1728 )
1734
1729
1735 def uirepr(s):
1730 def uirepr(s):
1736 # Avoid double backslash in Windows path repr()
1731 # Avoid double backslash in Windows path repr()
1737 return repr(s).replace('\\\\', '\\')
1732 return repr(s).replace('\\\\', '\\')
1738
1733
1739 # delay import of textwrap
1734 # delay import of textwrap
1740 def MBTextWrapper(**kwargs):
1735 def MBTextWrapper(**kwargs):
1741 class tw(textwrap.TextWrapper):
1736 class tw(textwrap.TextWrapper):
1742 """
1737 """
1743 Extend TextWrapper for width-awareness.
1738 Extend TextWrapper for width-awareness.
1744
1739
1745 Neither number of 'bytes' in any encoding nor 'characters' is
1740 Neither number of 'bytes' in any encoding nor 'characters' is
1746 appropriate to calculate terminal columns for specified string.
1741 appropriate to calculate terminal columns for specified string.
1747
1742
1748 Original TextWrapper implementation uses built-in 'len()' directly,
1743 Original TextWrapper implementation uses built-in 'len()' directly,
1749 so overriding is needed to use width information of each characters.
1744 so overriding is needed to use width information of each characters.
1750
1745
1751 In addition, characters classified into 'ambiguous' width are
1746 In addition, characters classified into 'ambiguous' width are
1752 treated as wide in East Asian area, but as narrow in other.
1747 treated as wide in East Asian area, but as narrow in other.
1753
1748
1754 This requires use decision to determine width of such characters.
1749 This requires use decision to determine width of such characters.
1755 """
1750 """
1756 def _cutdown(self, ucstr, space_left):
1751 def _cutdown(self, ucstr, space_left):
1757 l = 0
1752 l = 0
1758 colwidth = encoding.ucolwidth
1753 colwidth = encoding.ucolwidth
1759 for i in xrange(len(ucstr)):
1754 for i in xrange(len(ucstr)):
1760 l += colwidth(ucstr[i])
1755 l += colwidth(ucstr[i])
1761 if space_left < l:
1756 if space_left < l:
1762 return (ucstr[:i], ucstr[i:])
1757 return (ucstr[:i], ucstr[i:])
1763 return ucstr, ''
1758 return ucstr, ''
1764
1759
1765 # overriding of base class
1760 # overriding of base class
1766 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1761 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1767 space_left = max(width - cur_len, 1)
1762 space_left = max(width - cur_len, 1)
1768
1763
1769 if self.break_long_words:
1764 if self.break_long_words:
1770 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1765 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1771 cur_line.append(cut)
1766 cur_line.append(cut)
1772 reversed_chunks[-1] = res
1767 reversed_chunks[-1] = res
1773 elif not cur_line:
1768 elif not cur_line:
1774 cur_line.append(reversed_chunks.pop())
1769 cur_line.append(reversed_chunks.pop())
1775
1770
1776 # this overriding code is imported from TextWrapper of Python 2.6
1771 # this overriding code is imported from TextWrapper of Python 2.6
1777 # to calculate columns of string by 'encoding.ucolwidth()'
1772 # to calculate columns of string by 'encoding.ucolwidth()'
1778 def _wrap_chunks(self, chunks):
1773 def _wrap_chunks(self, chunks):
1779 colwidth = encoding.ucolwidth
1774 colwidth = encoding.ucolwidth
1780
1775
1781 lines = []
1776 lines = []
1782 if self.width <= 0:
1777 if self.width <= 0:
1783 raise ValueError("invalid width %r (must be > 0)" % self.width)
1778 raise ValueError("invalid width %r (must be > 0)" % self.width)
1784
1779
1785 # Arrange in reverse order so items can be efficiently popped
1780 # Arrange in reverse order so items can be efficiently popped
1786 # from a stack of chucks.
1781 # from a stack of chucks.
1787 chunks.reverse()
1782 chunks.reverse()
1788
1783
1789 while chunks:
1784 while chunks:
1790
1785
1791 # Start the list of chunks that will make up the current line.
1786 # Start the list of chunks that will make up the current line.
1792 # cur_len is just the length of all the chunks in cur_line.
1787 # cur_len is just the length of all the chunks in cur_line.
1793 cur_line = []
1788 cur_line = []
1794 cur_len = 0
1789 cur_len = 0
1795
1790
1796 # Figure out which static string will prefix this line.
1791 # Figure out which static string will prefix this line.
1797 if lines:
1792 if lines:
1798 indent = self.subsequent_indent
1793 indent = self.subsequent_indent
1799 else:
1794 else:
1800 indent = self.initial_indent
1795 indent = self.initial_indent
1801
1796
1802 # Maximum width for this line.
1797 # Maximum width for this line.
1803 width = self.width - len(indent)
1798 width = self.width - len(indent)
1804
1799
1805 # First chunk on line is whitespace -- drop it, unless this
1800 # First chunk on line is whitespace -- drop it, unless this
1806 # is the very beginning of the text (i.e. no lines started yet).
1801 # is the very beginning of the text (i.e. no lines started yet).
1807 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1802 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1808 del chunks[-1]
1803 del chunks[-1]
1809
1804
1810 while chunks:
1805 while chunks:
1811 l = colwidth(chunks[-1])
1806 l = colwidth(chunks[-1])
1812
1807
1813 # Can at least squeeze this chunk onto the current line.
1808 # Can at least squeeze this chunk onto the current line.
1814 if cur_len + l <= width:
1809 if cur_len + l <= width:
1815 cur_line.append(chunks.pop())
1810 cur_line.append(chunks.pop())
1816 cur_len += l
1811 cur_len += l
1817
1812
1818 # Nope, this line is full.
1813 # Nope, this line is full.
1819 else:
1814 else:
1820 break
1815 break
1821
1816
1822 # The current line is full, and the next chunk is too big to
1817 # The current line is full, and the next chunk is too big to
1823 # fit on *any* line (not just this one).
1818 # fit on *any* line (not just this one).
1824 if chunks and colwidth(chunks[-1]) > width:
1819 if chunks and colwidth(chunks[-1]) > width:
1825 self._handle_long_word(chunks, cur_line, cur_len, width)
1820 self._handle_long_word(chunks, cur_line, cur_len, width)
1826
1821
1827 # If the last chunk on this line is all whitespace, drop it.
1822 # If the last chunk on this line is all whitespace, drop it.
1828 if (self.drop_whitespace and
1823 if (self.drop_whitespace and
1829 cur_line and cur_line[-1].strip() == ''):
1824 cur_line and cur_line[-1].strip() == ''):
1830 del cur_line[-1]
1825 del cur_line[-1]
1831
1826
1832 # Convert current line back to a string and store it in list
1827 # Convert current line back to a string and store it in list
1833 # of all lines (return value).
1828 # of all lines (return value).
1834 if cur_line:
1829 if cur_line:
1835 lines.append(indent + ''.join(cur_line))
1830 lines.append(indent + ''.join(cur_line))
1836
1831
1837 return lines
1832 return lines
1838
1833
1839 global MBTextWrapper
1834 global MBTextWrapper
1840 MBTextWrapper = tw
1835 MBTextWrapper = tw
1841 return tw(**kwargs)
1836 return tw(**kwargs)
1842
1837
1843 def wrap(line, width, initindent='', hangindent=''):
1838 def wrap(line, width, initindent='', hangindent=''):
1844 maxindent = max(len(hangindent), len(initindent))
1839 maxindent = max(len(hangindent), len(initindent))
1845 if width <= maxindent:
1840 if width <= maxindent:
1846 # adjust for weird terminal size
1841 # adjust for weird terminal size
1847 width = max(78, maxindent + 1)
1842 width = max(78, maxindent + 1)
1848 line = line.decode(encoding.encoding, encoding.encodingmode)
1843 line = line.decode(encoding.encoding, encoding.encodingmode)
1849 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1844 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1850 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1845 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1851 wrapper = MBTextWrapper(width=width,
1846 wrapper = MBTextWrapper(width=width,
1852 initial_indent=initindent,
1847 initial_indent=initindent,
1853 subsequent_indent=hangindent)
1848 subsequent_indent=hangindent)
1854 return wrapper.fill(line).encode(encoding.encoding)
1849 return wrapper.fill(line).encode(encoding.encoding)
1855
1850
1856 def iterlines(iterator):
1851 def iterlines(iterator):
1857 for chunk in iterator:
1852 for chunk in iterator:
1858 for line in chunk.splitlines():
1853 for line in chunk.splitlines():
1859 yield line
1854 yield line
1860
1855
1861 def expandpath(path):
1856 def expandpath(path):
1862 return os.path.expanduser(os.path.expandvars(path))
1857 return os.path.expanduser(os.path.expandvars(path))
1863
1858
1864 def hgcmd():
1859 def hgcmd():
1865 """Return the command used to execute current hg
1860 """Return the command used to execute current hg
1866
1861
1867 This is different from hgexecutable() because on Windows we want
1862 This is different from hgexecutable() because on Windows we want
1868 to avoid things opening new shell windows like batch files, so we
1863 to avoid things opening new shell windows like batch files, so we
1869 get either the python call or current executable.
1864 get either the python call or current executable.
1870 """
1865 """
1871 if mainfrozen():
1866 if mainfrozen():
1872 return [sys.executable]
1867 return [sys.executable]
1873 return gethgcmd()
1868 return gethgcmd()
1874
1869
1875 def rundetached(args, condfn):
1870 def rundetached(args, condfn):
1876 """Execute the argument list in a detached process.
1871 """Execute the argument list in a detached process.
1877
1872
1878 condfn is a callable which is called repeatedly and should return
1873 condfn is a callable which is called repeatedly and should return
1879 True once the child process is known to have started successfully.
1874 True once the child process is known to have started successfully.
1880 At this point, the child process PID is returned. If the child
1875 At this point, the child process PID is returned. If the child
1881 process fails to start or finishes before condfn() evaluates to
1876 process fails to start or finishes before condfn() evaluates to
1882 True, return -1.
1877 True, return -1.
1883 """
1878 """
1884 # Windows case is easier because the child process is either
1879 # Windows case is easier because the child process is either
1885 # successfully starting and validating the condition or exiting
1880 # successfully starting and validating the condition or exiting
1886 # on failure. We just poll on its PID. On Unix, if the child
1881 # on failure. We just poll on its PID. On Unix, if the child
1887 # process fails to start, it will be left in a zombie state until
1882 # process fails to start, it will be left in a zombie state until
1888 # the parent wait on it, which we cannot do since we expect a long
1883 # the parent wait on it, which we cannot do since we expect a long
1889 # running process on success. Instead we listen for SIGCHLD telling
1884 # running process on success. Instead we listen for SIGCHLD telling
1890 # us our child process terminated.
1885 # us our child process terminated.
1891 terminated = set()
1886 terminated = set()
1892 def handler(signum, frame):
1887 def handler(signum, frame):
1893 terminated.add(os.wait())
1888 terminated.add(os.wait())
1894 prevhandler = None
1889 prevhandler = None
1895 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1890 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1896 if SIGCHLD is not None:
1891 if SIGCHLD is not None:
1897 prevhandler = signal.signal(SIGCHLD, handler)
1892 prevhandler = signal.signal(SIGCHLD, handler)
1898 try:
1893 try:
1899 pid = spawndetached(args)
1894 pid = spawndetached(args)
1900 while not condfn():
1895 while not condfn():
1901 if ((pid in terminated or not testpid(pid))
1896 if ((pid in terminated or not testpid(pid))
1902 and not condfn()):
1897 and not condfn()):
1903 return -1
1898 return -1
1904 time.sleep(0.1)
1899 time.sleep(0.1)
1905 return pid
1900 return pid
1906 finally:
1901 finally:
1907 if prevhandler is not None:
1902 if prevhandler is not None:
1908 signal.signal(signal.SIGCHLD, prevhandler)
1903 signal.signal(signal.SIGCHLD, prevhandler)
1909
1904
1910 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1905 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1911 """Return the result of interpolating items in the mapping into string s.
1906 """Return the result of interpolating items in the mapping into string s.
1912
1907
1913 prefix is a single character string, or a two character string with
1908 prefix is a single character string, or a two character string with
1914 a backslash as the first character if the prefix needs to be escaped in
1909 a backslash as the first character if the prefix needs to be escaped in
1915 a regular expression.
1910 a regular expression.
1916
1911
1917 fn is an optional function that will be applied to the replacement text
1912 fn is an optional function that will be applied to the replacement text
1918 just before replacement.
1913 just before replacement.
1919
1914
1920 escape_prefix is an optional flag that allows using doubled prefix for
1915 escape_prefix is an optional flag that allows using doubled prefix for
1921 its escaping.
1916 its escaping.
1922 """
1917 """
1923 fn = fn or (lambda s: s)
1918 fn = fn or (lambda s: s)
1924 patterns = '|'.join(mapping.keys())
1919 patterns = '|'.join(mapping.keys())
1925 if escape_prefix:
1920 if escape_prefix:
1926 patterns += '|' + prefix
1921 patterns += '|' + prefix
1927 if len(prefix) > 1:
1922 if len(prefix) > 1:
1928 prefix_char = prefix[1:]
1923 prefix_char = prefix[1:]
1929 else:
1924 else:
1930 prefix_char = prefix
1925 prefix_char = prefix
1931 mapping[prefix_char] = prefix_char
1926 mapping[prefix_char] = prefix_char
1932 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1927 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1933 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1928 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1934
1929
1935 def getport(port):
1930 def getport(port):
1936 """Return the port for a given network service.
1931 """Return the port for a given network service.
1937
1932
1938 If port is an integer, it's returned as is. If it's a string, it's
1933 If port is an integer, it's returned as is. If it's a string, it's
1939 looked up using socket.getservbyname(). If there's no matching
1934 looked up using socket.getservbyname(). If there's no matching
1940 service, error.Abort is raised.
1935 service, error.Abort is raised.
1941 """
1936 """
1942 try:
1937 try:
1943 return int(port)
1938 return int(port)
1944 except ValueError:
1939 except ValueError:
1945 pass
1940 pass
1946
1941
1947 try:
1942 try:
1948 return socket.getservbyname(port)
1943 return socket.getservbyname(port)
1949 except socket.error:
1944 except socket.error:
1950 raise Abort(_("no port number associated with service '%s'") % port)
1945 raise Abort(_("no port number associated with service '%s'") % port)
1951
1946
1952 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1947 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1953 '0': False, 'no': False, 'false': False, 'off': False,
1948 '0': False, 'no': False, 'false': False, 'off': False,
1954 'never': False}
1949 'never': False}
1955
1950
1956 def parsebool(s):
1951 def parsebool(s):
1957 """Parse s into a boolean.
1952 """Parse s into a boolean.
1958
1953
1959 If s is not a valid boolean, returns None.
1954 If s is not a valid boolean, returns None.
1960 """
1955 """
1961 return _booleans.get(s.lower(), None)
1956 return _booleans.get(s.lower(), None)
1962
1957
1963 _hexdig = '0123456789ABCDEFabcdef'
1958 _hexdig = '0123456789ABCDEFabcdef'
1964 _hextochr = dict((a + b, chr(int(a + b, 16)))
1959 _hextochr = dict((a + b, chr(int(a + b, 16)))
1965 for a in _hexdig for b in _hexdig)
1960 for a in _hexdig for b in _hexdig)
1966
1961
1967 def _urlunquote(s):
1962 def _urlunquote(s):
1968 """Decode HTTP/HTML % encoding.
1963 """Decode HTTP/HTML % encoding.
1969
1964
1970 >>> _urlunquote('abc%20def')
1965 >>> _urlunquote('abc%20def')
1971 'abc def'
1966 'abc def'
1972 """
1967 """
1973 res = s.split('%')
1968 res = s.split('%')
1974 # fastpath
1969 # fastpath
1975 if len(res) == 1:
1970 if len(res) == 1:
1976 return s
1971 return s
1977 s = res[0]
1972 s = res[0]
1978 for item in res[1:]:
1973 for item in res[1:]:
1979 try:
1974 try:
1980 s += _hextochr[item[:2]] + item[2:]
1975 s += _hextochr[item[:2]] + item[2:]
1981 except KeyError:
1976 except KeyError:
1982 s += '%' + item
1977 s += '%' + item
1983 except UnicodeDecodeError:
1978 except UnicodeDecodeError:
1984 s += unichr(int(item[:2], 16)) + item[2:]
1979 s += unichr(int(item[:2], 16)) + item[2:]
1985 return s
1980 return s
1986
1981
1987 class url(object):
1982 class url(object):
1988 r"""Reliable URL parser.
1983 r"""Reliable URL parser.
1989
1984
1990 This parses URLs and provides attributes for the following
1985 This parses URLs and provides attributes for the following
1991 components:
1986 components:
1992
1987
1993 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1988 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1994
1989
1995 Missing components are set to None. The only exception is
1990 Missing components are set to None. The only exception is
1996 fragment, which is set to '' if present but empty.
1991 fragment, which is set to '' if present but empty.
1997
1992
1998 If parsefragment is False, fragment is included in query. If
1993 If parsefragment is False, fragment is included in query. If
1999 parsequery is False, query is included in path. If both are
1994 parsequery is False, query is included in path. If both are
2000 False, both fragment and query are included in path.
1995 False, both fragment and query are included in path.
2001
1996
2002 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1997 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2003
1998
2004 Note that for backward compatibility reasons, bundle URLs do not
1999 Note that for backward compatibility reasons, bundle URLs do not
2005 take host names. That means 'bundle://../' has a path of '../'.
2000 take host names. That means 'bundle://../' has a path of '../'.
2006
2001
2007 Examples:
2002 Examples:
2008
2003
2009 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2004 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2010 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2005 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2011 >>> url('ssh://[::1]:2200//home/joe/repo')
2006 >>> url('ssh://[::1]:2200//home/joe/repo')
2012 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2007 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2013 >>> url('file:///home/joe/repo')
2008 >>> url('file:///home/joe/repo')
2014 <url scheme: 'file', path: '/home/joe/repo'>
2009 <url scheme: 'file', path: '/home/joe/repo'>
2015 >>> url('file:///c:/temp/foo/')
2010 >>> url('file:///c:/temp/foo/')
2016 <url scheme: 'file', path: 'c:/temp/foo/'>
2011 <url scheme: 'file', path: 'c:/temp/foo/'>
2017 >>> url('bundle:foo')
2012 >>> url('bundle:foo')
2018 <url scheme: 'bundle', path: 'foo'>
2013 <url scheme: 'bundle', path: 'foo'>
2019 >>> url('bundle://../foo')
2014 >>> url('bundle://../foo')
2020 <url scheme: 'bundle', path: '../foo'>
2015 <url scheme: 'bundle', path: '../foo'>
2021 >>> url(r'c:\foo\bar')
2016 >>> url(r'c:\foo\bar')
2022 <url path: 'c:\\foo\\bar'>
2017 <url path: 'c:\\foo\\bar'>
2023 >>> url(r'\\blah\blah\blah')
2018 >>> url(r'\\blah\blah\blah')
2024 <url path: '\\\\blah\\blah\\blah'>
2019 <url path: '\\\\blah\\blah\\blah'>
2025 >>> url(r'\\blah\blah\blah#baz')
2020 >>> url(r'\\blah\blah\blah#baz')
2026 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2021 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2027 >>> url(r'file:///C:\users\me')
2022 >>> url(r'file:///C:\users\me')
2028 <url scheme: 'file', path: 'C:\\users\\me'>
2023 <url scheme: 'file', path: 'C:\\users\\me'>
2029
2024
2030 Authentication credentials:
2025 Authentication credentials:
2031
2026
2032 >>> url('ssh://joe:xyz@x/repo')
2027 >>> url('ssh://joe:xyz@x/repo')
2033 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2028 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2034 >>> url('ssh://joe@x/repo')
2029 >>> url('ssh://joe@x/repo')
2035 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2030 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2036
2031
2037 Query strings and fragments:
2032 Query strings and fragments:
2038
2033
2039 >>> url('http://host/a?b#c')
2034 >>> url('http://host/a?b#c')
2040 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2035 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2041 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2036 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2042 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2037 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2043 """
2038 """
2044
2039
2045 _safechars = "!~*'()+"
2040 _safechars = "!~*'()+"
2046 _safepchars = "/!~*'()+:\\"
2041 _safepchars = "/!~*'()+:\\"
2047 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2042 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2048
2043
2049 def __init__(self, path, parsequery=True, parsefragment=True):
2044 def __init__(self, path, parsequery=True, parsefragment=True):
2050 # We slowly chomp away at path until we have only the path left
2045 # We slowly chomp away at path until we have only the path left
2051 self.scheme = self.user = self.passwd = self.host = None
2046 self.scheme = self.user = self.passwd = self.host = None
2052 self.port = self.path = self.query = self.fragment = None
2047 self.port = self.path = self.query = self.fragment = None
2053 self._localpath = True
2048 self._localpath = True
2054 self._hostport = ''
2049 self._hostport = ''
2055 self._origpath = path
2050 self._origpath = path
2056
2051
2057 if parsefragment and '#' in path:
2052 if parsefragment and '#' in path:
2058 path, self.fragment = path.split('#', 1)
2053 path, self.fragment = path.split('#', 1)
2059 if not path:
2054 if not path:
2060 path = None
2055 path = None
2061
2056
2062 # special case for Windows drive letters and UNC paths
2057 # special case for Windows drive letters and UNC paths
2063 if hasdriveletter(path) or path.startswith(r'\\'):
2058 if hasdriveletter(path) or path.startswith(r'\\'):
2064 self.path = path
2059 self.path = path
2065 return
2060 return
2066
2061
2067 # For compatibility reasons, we can't handle bundle paths as
2062 # For compatibility reasons, we can't handle bundle paths as
2068 # normal URLS
2063 # normal URLS
2069 if path.startswith('bundle:'):
2064 if path.startswith('bundle:'):
2070 self.scheme = 'bundle'
2065 self.scheme = 'bundle'
2071 path = path[7:]
2066 path = path[7:]
2072 if path.startswith('//'):
2067 if path.startswith('//'):
2073 path = path[2:]
2068 path = path[2:]
2074 self.path = path
2069 self.path = path
2075 return
2070 return
2076
2071
2077 if self._matchscheme(path):
2072 if self._matchscheme(path):
2078 parts = path.split(':', 1)
2073 parts = path.split(':', 1)
2079 if parts[0]:
2074 if parts[0]:
2080 self.scheme, path = parts
2075 self.scheme, path = parts
2081 self._localpath = False
2076 self._localpath = False
2082
2077
2083 if not path:
2078 if not path:
2084 path = None
2079 path = None
2085 if self._localpath:
2080 if self._localpath:
2086 self.path = ''
2081 self.path = ''
2087 return
2082 return
2088 else:
2083 else:
2089 if self._localpath:
2084 if self._localpath:
2090 self.path = path
2085 self.path = path
2091 return
2086 return
2092
2087
2093 if parsequery and '?' in path:
2088 if parsequery and '?' in path:
2094 path, self.query = path.split('?', 1)
2089 path, self.query = path.split('?', 1)
2095 if not path:
2090 if not path:
2096 path = None
2091 path = None
2097 if not self.query:
2092 if not self.query:
2098 self.query = None
2093 self.query = None
2099
2094
2100 # // is required to specify a host/authority
2095 # // is required to specify a host/authority
2101 if path and path.startswith('//'):
2096 if path and path.startswith('//'):
2102 parts = path[2:].split('/', 1)
2097 parts = path[2:].split('/', 1)
2103 if len(parts) > 1:
2098 if len(parts) > 1:
2104 self.host, path = parts
2099 self.host, path = parts
2105 else:
2100 else:
2106 self.host = parts[0]
2101 self.host = parts[0]
2107 path = None
2102 path = None
2108 if not self.host:
2103 if not self.host:
2109 self.host = None
2104 self.host = None
2110 # path of file:///d is /d
2105 # path of file:///d is /d
2111 # path of file:///d:/ is d:/, not /d:/
2106 # path of file:///d:/ is d:/, not /d:/
2112 if path and not hasdriveletter(path):
2107 if path and not hasdriveletter(path):
2113 path = '/' + path
2108 path = '/' + path
2114
2109
2115 if self.host and '@' in self.host:
2110 if self.host and '@' in self.host:
2116 self.user, self.host = self.host.rsplit('@', 1)
2111 self.user, self.host = self.host.rsplit('@', 1)
2117 if ':' in self.user:
2112 if ':' in self.user:
2118 self.user, self.passwd = self.user.split(':', 1)
2113 self.user, self.passwd = self.user.split(':', 1)
2119 if not self.host:
2114 if not self.host:
2120 self.host = None
2115 self.host = None
2121
2116
2122 # Don't split on colons in IPv6 addresses without ports
2117 # Don't split on colons in IPv6 addresses without ports
2123 if (self.host and ':' in self.host and
2118 if (self.host and ':' in self.host and
2124 not (self.host.startswith('[') and self.host.endswith(']'))):
2119 not (self.host.startswith('[') and self.host.endswith(']'))):
2125 self._hostport = self.host
2120 self._hostport = self.host
2126 self.host, self.port = self.host.rsplit(':', 1)
2121 self.host, self.port = self.host.rsplit(':', 1)
2127 if not self.host:
2122 if not self.host:
2128 self.host = None
2123 self.host = None
2129
2124
2130 if (self.host and self.scheme == 'file' and
2125 if (self.host and self.scheme == 'file' and
2131 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2126 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2132 raise Abort(_('file:// URLs can only refer to localhost'))
2127 raise Abort(_('file:// URLs can only refer to localhost'))
2133
2128
2134 self.path = path
2129 self.path = path
2135
2130
2136 # leave the query string escaped
2131 # leave the query string escaped
2137 for a in ('user', 'passwd', 'host', 'port',
2132 for a in ('user', 'passwd', 'host', 'port',
2138 'path', 'fragment'):
2133 'path', 'fragment'):
2139 v = getattr(self, a)
2134 v = getattr(self, a)
2140 if v is not None:
2135 if v is not None:
2141 setattr(self, a, _urlunquote(v))
2136 setattr(self, a, _urlunquote(v))
2142
2137
2143 def __repr__(self):
2138 def __repr__(self):
2144 attrs = []
2139 attrs = []
2145 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2140 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2146 'query', 'fragment'):
2141 'query', 'fragment'):
2147 v = getattr(self, a)
2142 v = getattr(self, a)
2148 if v is not None:
2143 if v is not None:
2149 attrs.append('%s: %r' % (a, v))
2144 attrs.append('%s: %r' % (a, v))
2150 return '<url %s>' % ', '.join(attrs)
2145 return '<url %s>' % ', '.join(attrs)
2151
2146
2152 def __str__(self):
2147 def __str__(self):
2153 r"""Join the URL's components back into a URL string.
2148 r"""Join the URL's components back into a URL string.
2154
2149
2155 Examples:
2150 Examples:
2156
2151
2157 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2152 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2158 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2153 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2159 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2154 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2160 'http://user:pw@host:80/?foo=bar&baz=42'
2155 'http://user:pw@host:80/?foo=bar&baz=42'
2161 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2156 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2162 'http://user:pw@host:80/?foo=bar%3dbaz'
2157 'http://user:pw@host:80/?foo=bar%3dbaz'
2163 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2158 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2164 'ssh://user:pw@[::1]:2200//home/joe#'
2159 'ssh://user:pw@[::1]:2200//home/joe#'
2165 >>> str(url('http://localhost:80//'))
2160 >>> str(url('http://localhost:80//'))
2166 'http://localhost:80//'
2161 'http://localhost:80//'
2167 >>> str(url('http://localhost:80/'))
2162 >>> str(url('http://localhost:80/'))
2168 'http://localhost:80/'
2163 'http://localhost:80/'
2169 >>> str(url('http://localhost:80'))
2164 >>> str(url('http://localhost:80'))
2170 'http://localhost:80/'
2165 'http://localhost:80/'
2171 >>> str(url('bundle:foo'))
2166 >>> str(url('bundle:foo'))
2172 'bundle:foo'
2167 'bundle:foo'
2173 >>> str(url('bundle://../foo'))
2168 >>> str(url('bundle://../foo'))
2174 'bundle:../foo'
2169 'bundle:../foo'
2175 >>> str(url('path'))
2170 >>> str(url('path'))
2176 'path'
2171 'path'
2177 >>> str(url('file:///tmp/foo/bar'))
2172 >>> str(url('file:///tmp/foo/bar'))
2178 'file:///tmp/foo/bar'
2173 'file:///tmp/foo/bar'
2179 >>> str(url('file:///c:/tmp/foo/bar'))
2174 >>> str(url('file:///c:/tmp/foo/bar'))
2180 'file:///c:/tmp/foo/bar'
2175 'file:///c:/tmp/foo/bar'
2181 >>> print url(r'bundle:foo\bar')
2176 >>> print url(r'bundle:foo\bar')
2182 bundle:foo\bar
2177 bundle:foo\bar
2183 >>> print url(r'file:///D:\data\hg')
2178 >>> print url(r'file:///D:\data\hg')
2184 file:///D:\data\hg
2179 file:///D:\data\hg
2185 """
2180 """
2186 if self._localpath:
2181 if self._localpath:
2187 s = self.path
2182 s = self.path
2188 if self.scheme == 'bundle':
2183 if self.scheme == 'bundle':
2189 s = 'bundle:' + s
2184 s = 'bundle:' + s
2190 if self.fragment:
2185 if self.fragment:
2191 s += '#' + self.fragment
2186 s += '#' + self.fragment
2192 return s
2187 return s
2193
2188
2194 s = self.scheme + ':'
2189 s = self.scheme + ':'
2195 if self.user or self.passwd or self.host:
2190 if self.user or self.passwd or self.host:
2196 s += '//'
2191 s += '//'
2197 elif self.scheme and (not self.path or self.path.startswith('/')
2192 elif self.scheme and (not self.path or self.path.startswith('/')
2198 or hasdriveletter(self.path)):
2193 or hasdriveletter(self.path)):
2199 s += '//'
2194 s += '//'
2200 if hasdriveletter(self.path):
2195 if hasdriveletter(self.path):
2201 s += '/'
2196 s += '/'
2202 if self.user:
2197 if self.user:
2203 s += urllib.quote(self.user, safe=self._safechars)
2198 s += urllib.quote(self.user, safe=self._safechars)
2204 if self.passwd:
2199 if self.passwd:
2205 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2200 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2206 if self.user or self.passwd:
2201 if self.user or self.passwd:
2207 s += '@'
2202 s += '@'
2208 if self.host:
2203 if self.host:
2209 if not (self.host.startswith('[') and self.host.endswith(']')):
2204 if not (self.host.startswith('[') and self.host.endswith(']')):
2210 s += urllib.quote(self.host)
2205 s += urllib.quote(self.host)
2211 else:
2206 else:
2212 s += self.host
2207 s += self.host
2213 if self.port:
2208 if self.port:
2214 s += ':' + urllib.quote(self.port)
2209 s += ':' + urllib.quote(self.port)
2215 if self.host:
2210 if self.host:
2216 s += '/'
2211 s += '/'
2217 if self.path:
2212 if self.path:
2218 # TODO: similar to the query string, we should not unescape the
2213 # TODO: similar to the query string, we should not unescape the
2219 # path when we store it, the path might contain '%2f' = '/',
2214 # path when we store it, the path might contain '%2f' = '/',
2220 # which we should *not* escape.
2215 # which we should *not* escape.
2221 s += urllib.quote(self.path, safe=self._safepchars)
2216 s += urllib.quote(self.path, safe=self._safepchars)
2222 if self.query:
2217 if self.query:
2223 # we store the query in escaped form.
2218 # we store the query in escaped form.
2224 s += '?' + self.query
2219 s += '?' + self.query
2225 if self.fragment is not None:
2220 if self.fragment is not None:
2226 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2221 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2227 return s
2222 return s
2228
2223
2229 def authinfo(self):
2224 def authinfo(self):
2230 user, passwd = self.user, self.passwd
2225 user, passwd = self.user, self.passwd
2231 try:
2226 try:
2232 self.user, self.passwd = None, None
2227 self.user, self.passwd = None, None
2233 s = str(self)
2228 s = str(self)
2234 finally:
2229 finally:
2235 self.user, self.passwd = user, passwd
2230 self.user, self.passwd = user, passwd
2236 if not self.user:
2231 if not self.user:
2237 return (s, None)
2232 return (s, None)
2238 # authinfo[1] is passed to urllib2 password manager, and its
2233 # authinfo[1] is passed to urllib2 password manager, and its
2239 # URIs must not contain credentials. The host is passed in the
2234 # URIs must not contain credentials. The host is passed in the
2240 # URIs list because Python < 2.4.3 uses only that to search for
2235 # URIs list because Python < 2.4.3 uses only that to search for
2241 # a password.
2236 # a password.
2242 return (s, (None, (s, self.host),
2237 return (s, (None, (s, self.host),
2243 self.user, self.passwd or ''))
2238 self.user, self.passwd or ''))
2244
2239
2245 def isabs(self):
2240 def isabs(self):
2246 if self.scheme and self.scheme != 'file':
2241 if self.scheme and self.scheme != 'file':
2247 return True # remote URL
2242 return True # remote URL
2248 if hasdriveletter(self.path):
2243 if hasdriveletter(self.path):
2249 return True # absolute for our purposes - can't be joined()
2244 return True # absolute for our purposes - can't be joined()
2250 if self.path.startswith(r'\\'):
2245 if self.path.startswith(r'\\'):
2251 return True # Windows UNC path
2246 return True # Windows UNC path
2252 if self.path.startswith('/'):
2247 if self.path.startswith('/'):
2253 return True # POSIX-style
2248 return True # POSIX-style
2254 return False
2249 return False
2255
2250
2256 def localpath(self):
2251 def localpath(self):
2257 if self.scheme == 'file' or self.scheme == 'bundle':
2252 if self.scheme == 'file' or self.scheme == 'bundle':
2258 path = self.path or '/'
2253 path = self.path or '/'
2259 # For Windows, we need to promote hosts containing drive
2254 # For Windows, we need to promote hosts containing drive
2260 # letters to paths with drive letters.
2255 # letters to paths with drive letters.
2261 if hasdriveletter(self._hostport):
2256 if hasdriveletter(self._hostport):
2262 path = self._hostport + '/' + self.path
2257 path = self._hostport + '/' + self.path
2263 elif (self.host is not None and self.path
2258 elif (self.host is not None and self.path
2264 and not hasdriveletter(path)):
2259 and not hasdriveletter(path)):
2265 path = '/' + path
2260 path = '/' + path
2266 return path
2261 return path
2267 return self._origpath
2262 return self._origpath
2268
2263
2269 def islocal(self):
2264 def islocal(self):
2270 '''whether localpath will return something that posixfile can open'''
2265 '''whether localpath will return something that posixfile can open'''
2271 return (not self.scheme or self.scheme == 'file'
2266 return (not self.scheme or self.scheme == 'file'
2272 or self.scheme == 'bundle')
2267 or self.scheme == 'bundle')
2273
2268
2274 def hasscheme(path):
2269 def hasscheme(path):
2275 return bool(url(path).scheme)
2270 return bool(url(path).scheme)
2276
2271
2277 def hasdriveletter(path):
2272 def hasdriveletter(path):
2278 return path and path[1:2] == ':' and path[0:1].isalpha()
2273 return path and path[1:2] == ':' and path[0:1].isalpha()
2279
2274
2280 def urllocalpath(path):
2275 def urllocalpath(path):
2281 return url(path, parsequery=False, parsefragment=False).localpath()
2276 return url(path, parsequery=False, parsefragment=False).localpath()
2282
2277
2283 def hidepassword(u):
2278 def hidepassword(u):
2284 '''hide user credential in a url string'''
2279 '''hide user credential in a url string'''
2285 u = url(u)
2280 u = url(u)
2286 if u.passwd:
2281 if u.passwd:
2287 u.passwd = '***'
2282 u.passwd = '***'
2288 return str(u)
2283 return str(u)
2289
2284
2290 def removeauth(u):
2285 def removeauth(u):
2291 '''remove all authentication information from a url string'''
2286 '''remove all authentication information from a url string'''
2292 u = url(u)
2287 u = url(u)
2293 u.user = u.passwd = None
2288 u.user = u.passwd = None
2294 return str(u)
2289 return str(u)
2295
2290
2296 def isatty(fd):
2291 def isatty(fd):
2297 try:
2292 try:
2298 return fd.isatty()
2293 return fd.isatty()
2299 except AttributeError:
2294 except AttributeError:
2300 return False
2295 return False
2301
2296
2302 timecount = unitcountfn(
2297 timecount = unitcountfn(
2303 (1, 1e3, _('%.0f s')),
2298 (1, 1e3, _('%.0f s')),
2304 (100, 1, _('%.1f s')),
2299 (100, 1, _('%.1f s')),
2305 (10, 1, _('%.2f s')),
2300 (10, 1, _('%.2f s')),
2306 (1, 1, _('%.3f s')),
2301 (1, 1, _('%.3f s')),
2307 (100, 0.001, _('%.1f ms')),
2302 (100, 0.001, _('%.1f ms')),
2308 (10, 0.001, _('%.2f ms')),
2303 (10, 0.001, _('%.2f ms')),
2309 (1, 0.001, _('%.3f ms')),
2304 (1, 0.001, _('%.3f ms')),
2310 (100, 0.000001, _('%.1f us')),
2305 (100, 0.000001, _('%.1f us')),
2311 (10, 0.000001, _('%.2f us')),
2306 (10, 0.000001, _('%.2f us')),
2312 (1, 0.000001, _('%.3f us')),
2307 (1, 0.000001, _('%.3f us')),
2313 (100, 0.000000001, _('%.1f ns')),
2308 (100, 0.000000001, _('%.1f ns')),
2314 (10, 0.000000001, _('%.2f ns')),
2309 (10, 0.000000001, _('%.2f ns')),
2315 (1, 0.000000001, _('%.3f ns')),
2310 (1, 0.000000001, _('%.3f ns')),
2316 )
2311 )
2317
2312
2318 _timenesting = [0]
2313 _timenesting = [0]
2319
2314
2320 def timed(func):
2315 def timed(func):
2321 '''Report the execution time of a function call to stderr.
2316 '''Report the execution time of a function call to stderr.
2322
2317
2323 During development, use as a decorator when you need to measure
2318 During development, use as a decorator when you need to measure
2324 the cost of a function, e.g. as follows:
2319 the cost of a function, e.g. as follows:
2325
2320
2326 @util.timed
2321 @util.timed
2327 def foo(a, b, c):
2322 def foo(a, b, c):
2328 pass
2323 pass
2329 '''
2324 '''
2330
2325
2331 def wrapper(*args, **kwargs):
2326 def wrapper(*args, **kwargs):
2332 start = time.time()
2327 start = time.time()
2333 indent = 2
2328 indent = 2
2334 _timenesting[0] += indent
2329 _timenesting[0] += indent
2335 try:
2330 try:
2336 return func(*args, **kwargs)
2331 return func(*args, **kwargs)
2337 finally:
2332 finally:
2338 elapsed = time.time() - start
2333 elapsed = time.time() - start
2339 _timenesting[0] -= indent
2334 _timenesting[0] -= indent
2340 sys.stderr.write('%s%s: %s\n' %
2335 sys.stderr.write('%s%s: %s\n' %
2341 (' ' * _timenesting[0], func.__name__,
2336 (' ' * _timenesting[0], func.__name__,
2342 timecount(elapsed)))
2337 timecount(elapsed)))
2343 return wrapper
2338 return wrapper
2344
2339
2345 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2340 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2346 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2341 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2347
2342
2348 def sizetoint(s):
2343 def sizetoint(s):
2349 '''Convert a space specifier to a byte count.
2344 '''Convert a space specifier to a byte count.
2350
2345
2351 >>> sizetoint('30')
2346 >>> sizetoint('30')
2352 30
2347 30
2353 >>> sizetoint('2.2kb')
2348 >>> sizetoint('2.2kb')
2354 2252
2349 2252
2355 >>> sizetoint('6M')
2350 >>> sizetoint('6M')
2356 6291456
2351 6291456
2357 '''
2352 '''
2358 t = s.strip().lower()
2353 t = s.strip().lower()
2359 try:
2354 try:
2360 for k, u in _sizeunits:
2355 for k, u in _sizeunits:
2361 if t.endswith(k):
2356 if t.endswith(k):
2362 return int(float(t[:-len(k)]) * u)
2357 return int(float(t[:-len(k)]) * u)
2363 return int(t)
2358 return int(t)
2364 except ValueError:
2359 except ValueError:
2365 raise error.ParseError(_("couldn't parse size: %s") % s)
2360 raise error.ParseError(_("couldn't parse size: %s") % s)
2366
2361
2367 class hooks(object):
2362 class hooks(object):
2368 '''A collection of hook functions that can be used to extend a
2363 '''A collection of hook functions that can be used to extend a
2369 function's behavior. Hooks are called in lexicographic order,
2364 function's behavior. Hooks are called in lexicographic order,
2370 based on the names of their sources.'''
2365 based on the names of their sources.'''
2371
2366
2372 def __init__(self):
2367 def __init__(self):
2373 self._hooks = []
2368 self._hooks = []
2374
2369
2375 def add(self, source, hook):
2370 def add(self, source, hook):
2376 self._hooks.append((source, hook))
2371 self._hooks.append((source, hook))
2377
2372
2378 def __call__(self, *args):
2373 def __call__(self, *args):
2379 self._hooks.sort(key=lambda x: x[0])
2374 self._hooks.sort(key=lambda x: x[0])
2380 results = []
2375 results = []
2381 for source, hook in self._hooks:
2376 for source, hook in self._hooks:
2382 results.append(hook(*args))
2377 results.append(hook(*args))
2383 return results
2378 return results
2384
2379
2385 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2380 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2386 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2381 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2387 Skips the 'skip' last entries. By default it will flush stdout first.
2382 Skips the 'skip' last entries. By default it will flush stdout first.
2388 It can be used everywhere and do intentionally not require an ui object.
2383 It can be used everywhere and do intentionally not require an ui object.
2389 Not be used in production code but very convenient while developing.
2384 Not be used in production code but very convenient while developing.
2390 '''
2385 '''
2391 if otherf:
2386 if otherf:
2392 otherf.flush()
2387 otherf.flush()
2393 f.write('%s at:\n' % msg)
2388 f.write('%s at:\n' % msg)
2394 entries = [('%s:%s' % (fn, ln), func)
2389 entries = [('%s:%s' % (fn, ln), func)
2395 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2390 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2396 if entries:
2391 if entries:
2397 fnmax = max(len(entry[0]) for entry in entries)
2392 fnmax = max(len(entry[0]) for entry in entries)
2398 for fnln, func in entries:
2393 for fnln, func in entries:
2399 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2394 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2400 f.flush()
2395 f.flush()
2401
2396
2402 class dirs(object):
2397 class dirs(object):
2403 '''a multiset of directory names from a dirstate or manifest'''
2398 '''a multiset of directory names from a dirstate or manifest'''
2404
2399
2405 def __init__(self, map, skip=None):
2400 def __init__(self, map, skip=None):
2406 self._dirs = {}
2401 self._dirs = {}
2407 addpath = self.addpath
2402 addpath = self.addpath
2408 if safehasattr(map, 'iteritems') and skip is not None:
2403 if safehasattr(map, 'iteritems') and skip is not None:
2409 for f, s in map.iteritems():
2404 for f, s in map.iteritems():
2410 if s[0] != skip:
2405 if s[0] != skip:
2411 addpath(f)
2406 addpath(f)
2412 else:
2407 else:
2413 for f in map:
2408 for f in map:
2414 addpath(f)
2409 addpath(f)
2415
2410
2416 def addpath(self, path):
2411 def addpath(self, path):
2417 dirs = self._dirs
2412 dirs = self._dirs
2418 for base in finddirs(path):
2413 for base in finddirs(path):
2419 if base in dirs:
2414 if base in dirs:
2420 dirs[base] += 1
2415 dirs[base] += 1
2421 return
2416 return
2422 dirs[base] = 1
2417 dirs[base] = 1
2423
2418
2424 def delpath(self, path):
2419 def delpath(self, path):
2425 dirs = self._dirs
2420 dirs = self._dirs
2426 for base in finddirs(path):
2421 for base in finddirs(path):
2427 if dirs[base] > 1:
2422 if dirs[base] > 1:
2428 dirs[base] -= 1
2423 dirs[base] -= 1
2429 return
2424 return
2430 del dirs[base]
2425 del dirs[base]
2431
2426
2432 def __iter__(self):
2427 def __iter__(self):
2433 return self._dirs.iterkeys()
2428 return self._dirs.iterkeys()
2434
2429
2435 def __contains__(self, d):
2430 def __contains__(self, d):
2436 return d in self._dirs
2431 return d in self._dirs
2437
2432
2438 if safehasattr(parsers, 'dirs'):
2433 if safehasattr(parsers, 'dirs'):
2439 dirs = parsers.dirs
2434 dirs = parsers.dirs
2440
2435
2441 def finddirs(path):
2436 def finddirs(path):
2442 pos = path.rfind('/')
2437 pos = path.rfind('/')
2443 while pos != -1:
2438 while pos != -1:
2444 yield path[:pos]
2439 yield path[:pos]
2445 pos = path.rfind('/', 0, pos)
2440 pos = path.rfind('/', 0, pos)
2446
2441
2447 # compression utility
2442 # compression utility
2448
2443
2449 class nocompress(object):
2444 class nocompress(object):
2450 def compress(self, x):
2445 def compress(self, x):
2451 return x
2446 return x
2452 def flush(self):
2447 def flush(self):
2453 return ""
2448 return ""
2454
2449
2455 compressors = {
2450 compressors = {
2456 None: nocompress,
2451 None: nocompress,
2457 # lambda to prevent early import
2452 # lambda to prevent early import
2458 'BZ': lambda: bz2.BZ2Compressor(),
2453 'BZ': lambda: bz2.BZ2Compressor(),
2459 'GZ': lambda: zlib.compressobj(),
2454 'GZ': lambda: zlib.compressobj(),
2460 }
2455 }
2461 # also support the old form by courtesies
2456 # also support the old form by courtesies
2462 compressors['UN'] = compressors[None]
2457 compressors['UN'] = compressors[None]
2463
2458
2464 def _makedecompressor(decompcls):
2459 def _makedecompressor(decompcls):
2465 def generator(f):
2460 def generator(f):
2466 d = decompcls()
2461 d = decompcls()
2467 for chunk in filechunkiter(f):
2462 for chunk in filechunkiter(f):
2468 yield d.decompress(chunk)
2463 yield d.decompress(chunk)
2469 def func(fh):
2464 def func(fh):
2470 return chunkbuffer(generator(fh))
2465 return chunkbuffer(generator(fh))
2471 return func
2466 return func
2472
2467
2473 def _bz2():
2468 def _bz2():
2474 d = bz2.BZ2Decompressor()
2469 d = bz2.BZ2Decompressor()
2475 # Bzip2 stream start with BZ, but we stripped it.
2470 # Bzip2 stream start with BZ, but we stripped it.
2476 # we put it back for good measure.
2471 # we put it back for good measure.
2477 d.decompress('BZ')
2472 d.decompress('BZ')
2478 return d
2473 return d
2479
2474
2480 decompressors = {None: lambda fh: fh,
2475 decompressors = {None: lambda fh: fh,
2481 '_truncatedBZ': _makedecompressor(_bz2),
2476 '_truncatedBZ': _makedecompressor(_bz2),
2482 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2477 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2483 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2478 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2484 }
2479 }
2485 # also support the old form by courtesies
2480 # also support the old form by courtesies
2486 decompressors['UN'] = decompressors[None]
2481 decompressors['UN'] = decompressors[None]
2487
2482
2488 # convenient shortcut
2483 # convenient shortcut
2489 dst = debugstacktrace
2484 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now