##// END OF EJS Templates
util: also catch IndexError...
Sean Farley -
r26665:6331a0c3 default
parent child Browse files
Show More
@@ -1,2477 +1,2477 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 import i18n
16 import i18n
17 _ = i18n._
17 _ = i18n._
18 import error, osutil, encoding, parsers
18 import error, osutil, encoding, parsers
19 import errno, shutil, sys, tempfile, traceback
19 import errno, shutil, sys, tempfile, traceback
20 import re as remod
20 import re as remod
21 import os, time, datetime, calendar, textwrap, signal, collections
21 import os, time, datetime, calendar, textwrap, signal, collections
22 import stat
22 import stat
23 import imp, socket, urllib
23 import imp, socket, urllib
24 import gc
24 import gc
25 import bz2
25 import bz2
26 import zlib
26 import zlib
27
27
28 if os.name == 'nt':
28 if os.name == 'nt':
29 import windows as platform
29 import windows as platform
30 else:
30 else:
31 import posix as platform
31 import posix as platform
32
32
33 cachestat = platform.cachestat
33 cachestat = platform.cachestat
34 checkexec = platform.checkexec
34 checkexec = platform.checkexec
35 checklink = platform.checklink
35 checklink = platform.checklink
36 copymode = platform.copymode
36 copymode = platform.copymode
37 executablepath = platform.executablepath
37 executablepath = platform.executablepath
38 expandglobs = platform.expandglobs
38 expandglobs = platform.expandglobs
39 explainexit = platform.explainexit
39 explainexit = platform.explainexit
40 findexe = platform.findexe
40 findexe = platform.findexe
41 gethgcmd = platform.gethgcmd
41 gethgcmd = platform.gethgcmd
42 getuser = platform.getuser
42 getuser = platform.getuser
43 groupmembers = platform.groupmembers
43 groupmembers = platform.groupmembers
44 groupname = platform.groupname
44 groupname = platform.groupname
45 hidewindow = platform.hidewindow
45 hidewindow = platform.hidewindow
46 isexec = platform.isexec
46 isexec = platform.isexec
47 isowner = platform.isowner
47 isowner = platform.isowner
48 localpath = platform.localpath
48 localpath = platform.localpath
49 lookupreg = platform.lookupreg
49 lookupreg = platform.lookupreg
50 makedir = platform.makedir
50 makedir = platform.makedir
51 nlinks = platform.nlinks
51 nlinks = platform.nlinks
52 normpath = platform.normpath
52 normpath = platform.normpath
53 normcase = platform.normcase
53 normcase = platform.normcase
54 normcasespec = platform.normcasespec
54 normcasespec = platform.normcasespec
55 normcasefallback = platform.normcasefallback
55 normcasefallback = platform.normcasefallback
56 openhardlinks = platform.openhardlinks
56 openhardlinks = platform.openhardlinks
57 oslink = platform.oslink
57 oslink = platform.oslink
58 parsepatchoutput = platform.parsepatchoutput
58 parsepatchoutput = platform.parsepatchoutput
59 pconvert = platform.pconvert
59 pconvert = platform.pconvert
60 poll = platform.poll
60 poll = platform.poll
61 popen = platform.popen
61 popen = platform.popen
62 posixfile = platform.posixfile
62 posixfile = platform.posixfile
63 quotecommand = platform.quotecommand
63 quotecommand = platform.quotecommand
64 readpipe = platform.readpipe
64 readpipe = platform.readpipe
65 rename = platform.rename
65 rename = platform.rename
66 removedirs = platform.removedirs
66 removedirs = platform.removedirs
67 samedevice = platform.samedevice
67 samedevice = platform.samedevice
68 samefile = platform.samefile
68 samefile = platform.samefile
69 samestat = platform.samestat
69 samestat = platform.samestat
70 setbinary = platform.setbinary
70 setbinary = platform.setbinary
71 setflags = platform.setflags
71 setflags = platform.setflags
72 setsignalhandler = platform.setsignalhandler
72 setsignalhandler = platform.setsignalhandler
73 shellquote = platform.shellquote
73 shellquote = platform.shellquote
74 spawndetached = platform.spawndetached
74 spawndetached = platform.spawndetached
75 split = platform.split
75 split = platform.split
76 sshargs = platform.sshargs
76 sshargs = platform.sshargs
77 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
77 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
78 statisexec = platform.statisexec
78 statisexec = platform.statisexec
79 statislink = platform.statislink
79 statislink = platform.statislink
80 termwidth = platform.termwidth
80 termwidth = platform.termwidth
81 testpid = platform.testpid
81 testpid = platform.testpid
82 umask = platform.umask
82 umask = platform.umask
83 unlink = platform.unlink
83 unlink = platform.unlink
84 unlinkpath = platform.unlinkpath
84 unlinkpath = platform.unlinkpath
85 username = platform.username
85 username = platform.username
86
86
87 # Python compatibility
87 # Python compatibility
88
88
89 _notset = object()
89 _notset = object()
90
90
91 def safehasattr(thing, attr):
91 def safehasattr(thing, attr):
92 return getattr(thing, attr, _notset) is not _notset
92 return getattr(thing, attr, _notset) is not _notset
93
93
94 def sha1(s=''):
94 def sha1(s=''):
95 '''
95 '''
96 Low-overhead wrapper around Python's SHA support
96 Low-overhead wrapper around Python's SHA support
97
97
98 >>> f = _fastsha1
98 >>> f = _fastsha1
99 >>> a = sha1()
99 >>> a = sha1()
100 >>> a = f()
100 >>> a = f()
101 >>> a.hexdigest()
101 >>> a.hexdigest()
102 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
102 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
103 '''
103 '''
104
104
105 return _fastsha1(s)
105 return _fastsha1(s)
106
106
107 def _fastsha1(s=''):
107 def _fastsha1(s=''):
108 # This function will import sha1 from hashlib or sha (whichever is
108 # This function will import sha1 from hashlib or sha (whichever is
109 # available) and overwrite itself with it on the first call.
109 # available) and overwrite itself with it on the first call.
110 # Subsequent calls will go directly to the imported function.
110 # Subsequent calls will go directly to the imported function.
111 if sys.version_info >= (2, 5):
111 if sys.version_info >= (2, 5):
112 from hashlib import sha1 as _sha1
112 from hashlib import sha1 as _sha1
113 else:
113 else:
114 from sha import sha as _sha1
114 from sha import sha as _sha1
115 global _fastsha1, sha1
115 global _fastsha1, sha1
116 _fastsha1 = sha1 = _sha1
116 _fastsha1 = sha1 = _sha1
117 return _sha1(s)
117 return _sha1(s)
118
118
119 def md5(s=''):
119 def md5(s=''):
120 try:
120 try:
121 from hashlib import md5 as _md5
121 from hashlib import md5 as _md5
122 except ImportError:
122 except ImportError:
123 from md5 import md5 as _md5
123 from md5 import md5 as _md5
124 global md5
124 global md5
125 md5 = _md5
125 md5 = _md5
126 return _md5(s)
126 return _md5(s)
127
127
128 DIGESTS = {
128 DIGESTS = {
129 'md5': md5,
129 'md5': md5,
130 'sha1': sha1,
130 'sha1': sha1,
131 }
131 }
132 # List of digest types from strongest to weakest
132 # List of digest types from strongest to weakest
133 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
133 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
134
134
135 try:
135 try:
136 import hashlib
136 import hashlib
137 DIGESTS.update({
137 DIGESTS.update({
138 'sha512': hashlib.sha512,
138 'sha512': hashlib.sha512,
139 })
139 })
140 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
140 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
141 except ImportError:
141 except ImportError:
142 pass
142 pass
143
143
144 for k in DIGESTS_BY_STRENGTH:
144 for k in DIGESTS_BY_STRENGTH:
145 assert k in DIGESTS
145 assert k in DIGESTS
146
146
147 class digester(object):
147 class digester(object):
148 """helper to compute digests.
148 """helper to compute digests.
149
149
150 This helper can be used to compute one or more digests given their name.
150 This helper can be used to compute one or more digests given their name.
151
151
152 >>> d = digester(['md5', 'sha1'])
152 >>> d = digester(['md5', 'sha1'])
153 >>> d.update('foo')
153 >>> d.update('foo')
154 >>> [k for k in sorted(d)]
154 >>> [k for k in sorted(d)]
155 ['md5', 'sha1']
155 ['md5', 'sha1']
156 >>> d['md5']
156 >>> d['md5']
157 'acbd18db4cc2f85cedef654fccc4a4d8'
157 'acbd18db4cc2f85cedef654fccc4a4d8'
158 >>> d['sha1']
158 >>> d['sha1']
159 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
159 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
160 >>> digester.preferred(['md5', 'sha1'])
160 >>> digester.preferred(['md5', 'sha1'])
161 'sha1'
161 'sha1'
162 """
162 """
163
163
164 def __init__(self, digests, s=''):
164 def __init__(self, digests, s=''):
165 self._hashes = {}
165 self._hashes = {}
166 for k in digests:
166 for k in digests:
167 if k not in DIGESTS:
167 if k not in DIGESTS:
168 raise Abort(_('unknown digest type: %s') % k)
168 raise Abort(_('unknown digest type: %s') % k)
169 self._hashes[k] = DIGESTS[k]()
169 self._hashes[k] = DIGESTS[k]()
170 if s:
170 if s:
171 self.update(s)
171 self.update(s)
172
172
173 def update(self, data):
173 def update(self, data):
174 for h in self._hashes.values():
174 for h in self._hashes.values():
175 h.update(data)
175 h.update(data)
176
176
177 def __getitem__(self, key):
177 def __getitem__(self, key):
178 if key not in DIGESTS:
178 if key not in DIGESTS:
179 raise Abort(_('unknown digest type: %s') % k)
179 raise Abort(_('unknown digest type: %s') % k)
180 return self._hashes[key].hexdigest()
180 return self._hashes[key].hexdigest()
181
181
182 def __iter__(self):
182 def __iter__(self):
183 return iter(self._hashes)
183 return iter(self._hashes)
184
184
185 @staticmethod
185 @staticmethod
186 def preferred(supported):
186 def preferred(supported):
187 """returns the strongest digest type in both supported and DIGESTS."""
187 """returns the strongest digest type in both supported and DIGESTS."""
188
188
189 for k in DIGESTS_BY_STRENGTH:
189 for k in DIGESTS_BY_STRENGTH:
190 if k in supported:
190 if k in supported:
191 return k
191 return k
192 return None
192 return None
193
193
194 class digestchecker(object):
194 class digestchecker(object):
195 """file handle wrapper that additionally checks content against a given
195 """file handle wrapper that additionally checks content against a given
196 size and digests.
196 size and digests.
197
197
198 d = digestchecker(fh, size, {'md5': '...'})
198 d = digestchecker(fh, size, {'md5': '...'})
199
199
200 When multiple digests are given, all of them are validated.
200 When multiple digests are given, all of them are validated.
201 """
201 """
202
202
203 def __init__(self, fh, size, digests):
203 def __init__(self, fh, size, digests):
204 self._fh = fh
204 self._fh = fh
205 self._size = size
205 self._size = size
206 self._got = 0
206 self._got = 0
207 self._digests = dict(digests)
207 self._digests = dict(digests)
208 self._digester = digester(self._digests.keys())
208 self._digester = digester(self._digests.keys())
209
209
210 def read(self, length=-1):
210 def read(self, length=-1):
211 content = self._fh.read(length)
211 content = self._fh.read(length)
212 self._digester.update(content)
212 self._digester.update(content)
213 self._got += len(content)
213 self._got += len(content)
214 return content
214 return content
215
215
216 def validate(self):
216 def validate(self):
217 if self._size != self._got:
217 if self._size != self._got:
218 raise Abort(_('size mismatch: expected %d, got %d') %
218 raise Abort(_('size mismatch: expected %d, got %d') %
219 (self._size, self._got))
219 (self._size, self._got))
220 for k, v in self._digests.items():
220 for k, v in self._digests.items():
221 if v != self._digester[k]:
221 if v != self._digester[k]:
222 # i18n: first parameter is a digest name
222 # i18n: first parameter is a digest name
223 raise Abort(_('%s mismatch: expected %s, got %s') %
223 raise Abort(_('%s mismatch: expected %s, got %s') %
224 (k, v, self._digester[k]))
224 (k, v, self._digester[k]))
225
225
226 try:
226 try:
227 buffer = buffer
227 buffer = buffer
228 except NameError:
228 except NameError:
229 if sys.version_info[0] < 3:
229 if sys.version_info[0] < 3:
230 def buffer(sliceable, offset=0):
230 def buffer(sliceable, offset=0):
231 return sliceable[offset:]
231 return sliceable[offset:]
232 else:
232 else:
233 def buffer(sliceable, offset=0):
233 def buffer(sliceable, offset=0):
234 return memoryview(sliceable)[offset:]
234 return memoryview(sliceable)[offset:]
235
235
236 import subprocess
236 import subprocess
237 closefds = os.name == 'posix'
237 closefds = os.name == 'posix'
238
238
239 _chunksize = 4096
239 _chunksize = 4096
240
240
241 class bufferedinputpipe(object):
241 class bufferedinputpipe(object):
242 """a manually buffered input pipe
242 """a manually buffered input pipe
243
243
244 Python will not let us use buffered IO and lazy reading with 'polling' at
244 Python will not let us use buffered IO and lazy reading with 'polling' at
245 the same time. We cannot probe the buffer state and select will not detect
245 the same time. We cannot probe the buffer state and select will not detect
246 that data are ready to read if they are already buffered.
246 that data are ready to read if they are already buffered.
247
247
248 This class let us work around that by implementing its own buffering
248 This class let us work around that by implementing its own buffering
249 (allowing efficient readline) while offering a way to know if the buffer is
249 (allowing efficient readline) while offering a way to know if the buffer is
250 empty from the output (allowing collaboration of the buffer with polling).
250 empty from the output (allowing collaboration of the buffer with polling).
251
251
252 This class lives in the 'util' module because it makes use of the 'os'
252 This class lives in the 'util' module because it makes use of the 'os'
253 module from the python stdlib.
253 module from the python stdlib.
254 """
254 """
255
255
256 def __init__(self, input):
256 def __init__(self, input):
257 self._input = input
257 self._input = input
258 self._buffer = []
258 self._buffer = []
259 self._eof = False
259 self._eof = False
260 self._lenbuf = 0
260 self._lenbuf = 0
261
261
262 @property
262 @property
263 def hasbuffer(self):
263 def hasbuffer(self):
264 """True is any data is currently buffered
264 """True is any data is currently buffered
265
265
266 This will be used externally a pre-step for polling IO. If there is
266 This will be used externally a pre-step for polling IO. If there is
267 already data then no polling should be set in place."""
267 already data then no polling should be set in place."""
268 return bool(self._buffer)
268 return bool(self._buffer)
269
269
270 @property
270 @property
271 def closed(self):
271 def closed(self):
272 return self._input.closed
272 return self._input.closed
273
273
274 def fileno(self):
274 def fileno(self):
275 return self._input.fileno()
275 return self._input.fileno()
276
276
277 def close(self):
277 def close(self):
278 return self._input.close()
278 return self._input.close()
279
279
280 def read(self, size):
280 def read(self, size):
281 while (not self._eof) and (self._lenbuf < size):
281 while (not self._eof) and (self._lenbuf < size):
282 self._fillbuffer()
282 self._fillbuffer()
283 return self._frombuffer(size)
283 return self._frombuffer(size)
284
284
285 def readline(self, *args, **kwargs):
285 def readline(self, *args, **kwargs):
286 if 1 < len(self._buffer):
286 if 1 < len(self._buffer):
287 # this should not happen because both read and readline end with a
287 # this should not happen because both read and readline end with a
288 # _frombuffer call that collapse it.
288 # _frombuffer call that collapse it.
289 self._buffer = [''.join(self._buffer)]
289 self._buffer = [''.join(self._buffer)]
290 self._lenbuf = len(self._buffer[0])
290 self._lenbuf = len(self._buffer[0])
291 lfi = -1
291 lfi = -1
292 if self._buffer:
292 if self._buffer:
293 lfi = self._buffer[-1].find('\n')
293 lfi = self._buffer[-1].find('\n')
294 while (not self._eof) and lfi < 0:
294 while (not self._eof) and lfi < 0:
295 self._fillbuffer()
295 self._fillbuffer()
296 if self._buffer:
296 if self._buffer:
297 lfi = self._buffer[-1].find('\n')
297 lfi = self._buffer[-1].find('\n')
298 size = lfi + 1
298 size = lfi + 1
299 if lfi < 0: # end of file
299 if lfi < 0: # end of file
300 size = self._lenbuf
300 size = self._lenbuf
301 elif 1 < len(self._buffer):
301 elif 1 < len(self._buffer):
302 # we need to take previous chunks into account
302 # we need to take previous chunks into account
303 size += self._lenbuf - len(self._buffer[-1])
303 size += self._lenbuf - len(self._buffer[-1])
304 return self._frombuffer(size)
304 return self._frombuffer(size)
305
305
306 def _frombuffer(self, size):
306 def _frombuffer(self, size):
307 """return at most 'size' data from the buffer
307 """return at most 'size' data from the buffer
308
308
309 The data are removed from the buffer."""
309 The data are removed from the buffer."""
310 if size == 0 or not self._buffer:
310 if size == 0 or not self._buffer:
311 return ''
311 return ''
312 buf = self._buffer[0]
312 buf = self._buffer[0]
313 if 1 < len(self._buffer):
313 if 1 < len(self._buffer):
314 buf = ''.join(self._buffer)
314 buf = ''.join(self._buffer)
315
315
316 data = buf[:size]
316 data = buf[:size]
317 buf = buf[len(data):]
317 buf = buf[len(data):]
318 if buf:
318 if buf:
319 self._buffer = [buf]
319 self._buffer = [buf]
320 self._lenbuf = len(buf)
320 self._lenbuf = len(buf)
321 else:
321 else:
322 self._buffer = []
322 self._buffer = []
323 self._lenbuf = 0
323 self._lenbuf = 0
324 return data
324 return data
325
325
326 def _fillbuffer(self):
326 def _fillbuffer(self):
327 """read data to the buffer"""
327 """read data to the buffer"""
328 data = os.read(self._input.fileno(), _chunksize)
328 data = os.read(self._input.fileno(), _chunksize)
329 if not data:
329 if not data:
330 self._eof = True
330 self._eof = True
331 else:
331 else:
332 self._lenbuf += len(data)
332 self._lenbuf += len(data)
333 self._buffer.append(data)
333 self._buffer.append(data)
334
334
335 def popen2(cmd, env=None, newlines=False):
335 def popen2(cmd, env=None, newlines=False):
336 # Setting bufsize to -1 lets the system decide the buffer size.
336 # Setting bufsize to -1 lets the system decide the buffer size.
337 # The default for bufsize is 0, meaning unbuffered. This leads to
337 # The default for bufsize is 0, meaning unbuffered. This leads to
338 # poor performance on Mac OS X: http://bugs.python.org/issue4194
338 # poor performance on Mac OS X: http://bugs.python.org/issue4194
339 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
339 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
340 close_fds=closefds,
340 close_fds=closefds,
341 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
341 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
342 universal_newlines=newlines,
342 universal_newlines=newlines,
343 env=env)
343 env=env)
344 return p.stdin, p.stdout
344 return p.stdin, p.stdout
345
345
346 def popen3(cmd, env=None, newlines=False):
346 def popen3(cmd, env=None, newlines=False):
347 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
347 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
348 return stdin, stdout, stderr
348 return stdin, stdout, stderr
349
349
350 def popen4(cmd, env=None, newlines=False, bufsize=-1):
350 def popen4(cmd, env=None, newlines=False, bufsize=-1):
351 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
351 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
352 close_fds=closefds,
352 close_fds=closefds,
353 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
353 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
354 stderr=subprocess.PIPE,
354 stderr=subprocess.PIPE,
355 universal_newlines=newlines,
355 universal_newlines=newlines,
356 env=env)
356 env=env)
357 return p.stdin, p.stdout, p.stderr, p
357 return p.stdin, p.stdout, p.stderr, p
358
358
359 def version():
359 def version():
360 """Return version information if available."""
360 """Return version information if available."""
361 try:
361 try:
362 import __version__
362 import __version__
363 return __version__.version
363 return __version__.version
364 except ImportError:
364 except ImportError:
365 return 'unknown'
365 return 'unknown'
366
366
367 # used by parsedate
367 # used by parsedate
368 defaultdateformats = (
368 defaultdateformats = (
369 '%Y-%m-%d %H:%M:%S',
369 '%Y-%m-%d %H:%M:%S',
370 '%Y-%m-%d %I:%M:%S%p',
370 '%Y-%m-%d %I:%M:%S%p',
371 '%Y-%m-%d %H:%M',
371 '%Y-%m-%d %H:%M',
372 '%Y-%m-%d %I:%M%p',
372 '%Y-%m-%d %I:%M%p',
373 '%Y-%m-%d',
373 '%Y-%m-%d',
374 '%m-%d',
374 '%m-%d',
375 '%m/%d',
375 '%m/%d',
376 '%m/%d/%y',
376 '%m/%d/%y',
377 '%m/%d/%Y',
377 '%m/%d/%Y',
378 '%a %b %d %H:%M:%S %Y',
378 '%a %b %d %H:%M:%S %Y',
379 '%a %b %d %I:%M:%S%p %Y',
379 '%a %b %d %I:%M:%S%p %Y',
380 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
380 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
381 '%b %d %H:%M:%S %Y',
381 '%b %d %H:%M:%S %Y',
382 '%b %d %I:%M:%S%p %Y',
382 '%b %d %I:%M:%S%p %Y',
383 '%b %d %H:%M:%S',
383 '%b %d %H:%M:%S',
384 '%b %d %I:%M:%S%p',
384 '%b %d %I:%M:%S%p',
385 '%b %d %H:%M',
385 '%b %d %H:%M',
386 '%b %d %I:%M%p',
386 '%b %d %I:%M%p',
387 '%b %d %Y',
387 '%b %d %Y',
388 '%b %d',
388 '%b %d',
389 '%H:%M:%S',
389 '%H:%M:%S',
390 '%I:%M:%S%p',
390 '%I:%M:%S%p',
391 '%H:%M',
391 '%H:%M',
392 '%I:%M%p',
392 '%I:%M%p',
393 )
393 )
394
394
395 extendeddateformats = defaultdateformats + (
395 extendeddateformats = defaultdateformats + (
396 "%Y",
396 "%Y",
397 "%Y-%m",
397 "%Y-%m",
398 "%b",
398 "%b",
399 "%b %Y",
399 "%b %Y",
400 )
400 )
401
401
402 def cachefunc(func):
402 def cachefunc(func):
403 '''cache the result of function calls'''
403 '''cache the result of function calls'''
404 # XXX doesn't handle keywords args
404 # XXX doesn't handle keywords args
405 if func.func_code.co_argcount == 0:
405 if func.func_code.co_argcount == 0:
406 cache = []
406 cache = []
407 def f():
407 def f():
408 if len(cache) == 0:
408 if len(cache) == 0:
409 cache.append(func())
409 cache.append(func())
410 return cache[0]
410 return cache[0]
411 return f
411 return f
412 cache = {}
412 cache = {}
413 if func.func_code.co_argcount == 1:
413 if func.func_code.co_argcount == 1:
414 # we gain a small amount of time because
414 # we gain a small amount of time because
415 # we don't need to pack/unpack the list
415 # we don't need to pack/unpack the list
416 def f(arg):
416 def f(arg):
417 if arg not in cache:
417 if arg not in cache:
418 cache[arg] = func(arg)
418 cache[arg] = func(arg)
419 return cache[arg]
419 return cache[arg]
420 else:
420 else:
421 def f(*args):
421 def f(*args):
422 if args not in cache:
422 if args not in cache:
423 cache[args] = func(*args)
423 cache[args] = func(*args)
424 return cache[args]
424 return cache[args]
425
425
426 return f
426 return f
427
427
428 class sortdict(dict):
428 class sortdict(dict):
429 '''a simple sorted dictionary'''
429 '''a simple sorted dictionary'''
430 def __init__(self, data=None):
430 def __init__(self, data=None):
431 self._list = []
431 self._list = []
432 if data:
432 if data:
433 self.update(data)
433 self.update(data)
434 def copy(self):
434 def copy(self):
435 return sortdict(self)
435 return sortdict(self)
436 def __setitem__(self, key, val):
436 def __setitem__(self, key, val):
437 if key in self:
437 if key in self:
438 self._list.remove(key)
438 self._list.remove(key)
439 self._list.append(key)
439 self._list.append(key)
440 dict.__setitem__(self, key, val)
440 dict.__setitem__(self, key, val)
441 def __iter__(self):
441 def __iter__(self):
442 return self._list.__iter__()
442 return self._list.__iter__()
443 def update(self, src):
443 def update(self, src):
444 if isinstance(src, dict):
444 if isinstance(src, dict):
445 src = src.iteritems()
445 src = src.iteritems()
446 for k, v in src:
446 for k, v in src:
447 self[k] = v
447 self[k] = v
448 def clear(self):
448 def clear(self):
449 dict.clear(self)
449 dict.clear(self)
450 self._list = []
450 self._list = []
451 def items(self):
451 def items(self):
452 return [(k, self[k]) for k in self._list]
452 return [(k, self[k]) for k in self._list]
453 def __delitem__(self, key):
453 def __delitem__(self, key):
454 dict.__delitem__(self, key)
454 dict.__delitem__(self, key)
455 self._list.remove(key)
455 self._list.remove(key)
456 def pop(self, key, *args, **kwargs):
456 def pop(self, key, *args, **kwargs):
457 dict.pop(self, key, *args, **kwargs)
457 dict.pop(self, key, *args, **kwargs)
458 try:
458 try:
459 self._list.remove(key)
459 self._list.remove(key)
460 except ValueError:
460 except ValueError:
461 pass
461 pass
462 def keys(self):
462 def keys(self):
463 return self._list
463 return self._list
464 def iterkeys(self):
464 def iterkeys(self):
465 return self._list.__iter__()
465 return self._list.__iter__()
466 def iteritems(self):
466 def iteritems(self):
467 for k in self._list:
467 for k in self._list:
468 yield k, self[k]
468 yield k, self[k]
469 def insert(self, index, key, val):
469 def insert(self, index, key, val):
470 self._list.insert(index, key)
470 self._list.insert(index, key)
471 dict.__setitem__(self, key, val)
471 dict.__setitem__(self, key, val)
472
472
473 class lrucachedict(object):
473 class lrucachedict(object):
474 '''cache most recent gets from or sets to this dictionary'''
474 '''cache most recent gets from or sets to this dictionary'''
475 def __init__(self, maxsize):
475 def __init__(self, maxsize):
476 self._cache = {}
476 self._cache = {}
477 self._maxsize = maxsize
477 self._maxsize = maxsize
478 self._order = collections.deque()
478 self._order = collections.deque()
479
479
480 def __getitem__(self, key):
480 def __getitem__(self, key):
481 value = self._cache[key]
481 value = self._cache[key]
482 self._order.remove(key)
482 self._order.remove(key)
483 self._order.append(key)
483 self._order.append(key)
484 return value
484 return value
485
485
486 def __setitem__(self, key, value):
486 def __setitem__(self, key, value):
487 if key not in self._cache:
487 if key not in self._cache:
488 if len(self._cache) >= self._maxsize:
488 if len(self._cache) >= self._maxsize:
489 del self._cache[self._order.popleft()]
489 del self._cache[self._order.popleft()]
490 else:
490 else:
491 self._order.remove(key)
491 self._order.remove(key)
492 self._cache[key] = value
492 self._cache[key] = value
493 self._order.append(key)
493 self._order.append(key)
494
494
495 def __contains__(self, key):
495 def __contains__(self, key):
496 return key in self._cache
496 return key in self._cache
497
497
498 def clear(self):
498 def clear(self):
499 self._cache.clear()
499 self._cache.clear()
500 self._order = collections.deque()
500 self._order = collections.deque()
501
501
502 def lrucachefunc(func):
502 def lrucachefunc(func):
503 '''cache most recent results of function calls'''
503 '''cache most recent results of function calls'''
504 cache = {}
504 cache = {}
505 order = collections.deque()
505 order = collections.deque()
506 if func.func_code.co_argcount == 1:
506 if func.func_code.co_argcount == 1:
507 def f(arg):
507 def f(arg):
508 if arg not in cache:
508 if arg not in cache:
509 if len(cache) > 20:
509 if len(cache) > 20:
510 del cache[order.popleft()]
510 del cache[order.popleft()]
511 cache[arg] = func(arg)
511 cache[arg] = func(arg)
512 else:
512 else:
513 order.remove(arg)
513 order.remove(arg)
514 order.append(arg)
514 order.append(arg)
515 return cache[arg]
515 return cache[arg]
516 else:
516 else:
517 def f(*args):
517 def f(*args):
518 if args not in cache:
518 if args not in cache:
519 if len(cache) > 20:
519 if len(cache) > 20:
520 del cache[order.popleft()]
520 del cache[order.popleft()]
521 cache[args] = func(*args)
521 cache[args] = func(*args)
522 else:
522 else:
523 order.remove(args)
523 order.remove(args)
524 order.append(args)
524 order.append(args)
525 return cache[args]
525 return cache[args]
526
526
527 return f
527 return f
528
528
529 class propertycache(object):
529 class propertycache(object):
530 def __init__(self, func):
530 def __init__(self, func):
531 self.func = func
531 self.func = func
532 self.name = func.__name__
532 self.name = func.__name__
533 def __get__(self, obj, type=None):
533 def __get__(self, obj, type=None):
534 result = self.func(obj)
534 result = self.func(obj)
535 self.cachevalue(obj, result)
535 self.cachevalue(obj, result)
536 return result
536 return result
537
537
538 def cachevalue(self, obj, value):
538 def cachevalue(self, obj, value):
539 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
539 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
540 obj.__dict__[self.name] = value
540 obj.__dict__[self.name] = value
541
541
542 def pipefilter(s, cmd):
542 def pipefilter(s, cmd):
543 '''filter string S through command CMD, returning its output'''
543 '''filter string S through command CMD, returning its output'''
544 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
544 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
545 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
545 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
546 pout, perr = p.communicate(s)
546 pout, perr = p.communicate(s)
547 return pout
547 return pout
548
548
549 def tempfilter(s, cmd):
549 def tempfilter(s, cmd):
550 '''filter string S through a pair of temporary files with CMD.
550 '''filter string S through a pair of temporary files with CMD.
551 CMD is used as a template to create the real command to be run,
551 CMD is used as a template to create the real command to be run,
552 with the strings INFILE and OUTFILE replaced by the real names of
552 with the strings INFILE and OUTFILE replaced by the real names of
553 the temporary files generated.'''
553 the temporary files generated.'''
554 inname, outname = None, None
554 inname, outname = None, None
555 try:
555 try:
556 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
556 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
557 fp = os.fdopen(infd, 'wb')
557 fp = os.fdopen(infd, 'wb')
558 fp.write(s)
558 fp.write(s)
559 fp.close()
559 fp.close()
560 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
560 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
561 os.close(outfd)
561 os.close(outfd)
562 cmd = cmd.replace('INFILE', inname)
562 cmd = cmd.replace('INFILE', inname)
563 cmd = cmd.replace('OUTFILE', outname)
563 cmd = cmd.replace('OUTFILE', outname)
564 code = os.system(cmd)
564 code = os.system(cmd)
565 if sys.platform == 'OpenVMS' and code & 1:
565 if sys.platform == 'OpenVMS' and code & 1:
566 code = 0
566 code = 0
567 if code:
567 if code:
568 raise Abort(_("command '%s' failed: %s") %
568 raise Abort(_("command '%s' failed: %s") %
569 (cmd, explainexit(code)))
569 (cmd, explainexit(code)))
570 fp = open(outname, 'rb')
570 fp = open(outname, 'rb')
571 r = fp.read()
571 r = fp.read()
572 fp.close()
572 fp.close()
573 return r
573 return r
574 finally:
574 finally:
575 try:
575 try:
576 if inname:
576 if inname:
577 os.unlink(inname)
577 os.unlink(inname)
578 except OSError:
578 except OSError:
579 pass
579 pass
580 try:
580 try:
581 if outname:
581 if outname:
582 os.unlink(outname)
582 os.unlink(outname)
583 except OSError:
583 except OSError:
584 pass
584 pass
585
585
586 filtertable = {
586 filtertable = {
587 'tempfile:': tempfilter,
587 'tempfile:': tempfilter,
588 'pipe:': pipefilter,
588 'pipe:': pipefilter,
589 }
589 }
590
590
591 def filter(s, cmd):
591 def filter(s, cmd):
592 "filter a string through a command that transforms its input to its output"
592 "filter a string through a command that transforms its input to its output"
593 for name, fn in filtertable.iteritems():
593 for name, fn in filtertable.iteritems():
594 if cmd.startswith(name):
594 if cmd.startswith(name):
595 return fn(s, cmd[len(name):].lstrip())
595 return fn(s, cmd[len(name):].lstrip())
596 return pipefilter(s, cmd)
596 return pipefilter(s, cmd)
597
597
598 def binary(s):
598 def binary(s):
599 """return true if a string is binary data"""
599 """return true if a string is binary data"""
600 return bool(s and '\0' in s)
600 return bool(s and '\0' in s)
601
601
602 def increasingchunks(source, min=1024, max=65536):
602 def increasingchunks(source, min=1024, max=65536):
603 '''return no less than min bytes per chunk while data remains,
603 '''return no less than min bytes per chunk while data remains,
604 doubling min after each chunk until it reaches max'''
604 doubling min after each chunk until it reaches max'''
605 def log2(x):
605 def log2(x):
606 if not x:
606 if not x:
607 return 0
607 return 0
608 i = 0
608 i = 0
609 while x:
609 while x:
610 x >>= 1
610 x >>= 1
611 i += 1
611 i += 1
612 return i - 1
612 return i - 1
613
613
614 buf = []
614 buf = []
615 blen = 0
615 blen = 0
616 for chunk in source:
616 for chunk in source:
617 buf.append(chunk)
617 buf.append(chunk)
618 blen += len(chunk)
618 blen += len(chunk)
619 if blen >= min:
619 if blen >= min:
620 if min < max:
620 if min < max:
621 min = min << 1
621 min = min << 1
622 nmin = 1 << log2(blen)
622 nmin = 1 << log2(blen)
623 if nmin > min:
623 if nmin > min:
624 min = nmin
624 min = nmin
625 if min > max:
625 if min > max:
626 min = max
626 min = max
627 yield ''.join(buf)
627 yield ''.join(buf)
628 blen = 0
628 blen = 0
629 buf = []
629 buf = []
630 if buf:
630 if buf:
631 yield ''.join(buf)
631 yield ''.join(buf)
632
632
633 Abort = error.Abort
633 Abort = error.Abort
634
634
635 def always(fn):
635 def always(fn):
636 return True
636 return True
637
637
638 def never(fn):
638 def never(fn):
639 return False
639 return False
640
640
641 def nogc(func):
641 def nogc(func):
642 """disable garbage collector
642 """disable garbage collector
643
643
644 Python's garbage collector triggers a GC each time a certain number of
644 Python's garbage collector triggers a GC each time a certain number of
645 container objects (the number being defined by gc.get_threshold()) are
645 container objects (the number being defined by gc.get_threshold()) are
646 allocated even when marked not to be tracked by the collector. Tracking has
646 allocated even when marked not to be tracked by the collector. Tracking has
647 no effect on when GCs are triggered, only on what objects the GC looks
647 no effect on when GCs are triggered, only on what objects the GC looks
648 into. As a workaround, disable GC while building complex (huge)
648 into. As a workaround, disable GC while building complex (huge)
649 containers.
649 containers.
650
650
651 This garbage collector issue have been fixed in 2.7.
651 This garbage collector issue have been fixed in 2.7.
652 """
652 """
653 def wrapper(*args, **kwargs):
653 def wrapper(*args, **kwargs):
654 gcenabled = gc.isenabled()
654 gcenabled = gc.isenabled()
655 gc.disable()
655 gc.disable()
656 try:
656 try:
657 return func(*args, **kwargs)
657 return func(*args, **kwargs)
658 finally:
658 finally:
659 if gcenabled:
659 if gcenabled:
660 gc.enable()
660 gc.enable()
661 return wrapper
661 return wrapper
662
662
663 def pathto(root, n1, n2):
663 def pathto(root, n1, n2):
664 '''return the relative path from one place to another.
664 '''return the relative path from one place to another.
665 root should use os.sep to separate directories
665 root should use os.sep to separate directories
666 n1 should use os.sep to separate directories
666 n1 should use os.sep to separate directories
667 n2 should use "/" to separate directories
667 n2 should use "/" to separate directories
668 returns an os.sep-separated path.
668 returns an os.sep-separated path.
669
669
670 If n1 is a relative path, it's assumed it's
670 If n1 is a relative path, it's assumed it's
671 relative to root.
671 relative to root.
672 n2 should always be relative to root.
672 n2 should always be relative to root.
673 '''
673 '''
674 if not n1:
674 if not n1:
675 return localpath(n2)
675 return localpath(n2)
676 if os.path.isabs(n1):
676 if os.path.isabs(n1):
677 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
677 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
678 return os.path.join(root, localpath(n2))
678 return os.path.join(root, localpath(n2))
679 n2 = '/'.join((pconvert(root), n2))
679 n2 = '/'.join((pconvert(root), n2))
680 a, b = splitpath(n1), n2.split('/')
680 a, b = splitpath(n1), n2.split('/')
681 a.reverse()
681 a.reverse()
682 b.reverse()
682 b.reverse()
683 while a and b and a[-1] == b[-1]:
683 while a and b and a[-1] == b[-1]:
684 a.pop()
684 a.pop()
685 b.pop()
685 b.pop()
686 b.reverse()
686 b.reverse()
687 return os.sep.join((['..'] * len(a)) + b) or '.'
687 return os.sep.join((['..'] * len(a)) + b) or '.'
688
688
689 def mainfrozen():
689 def mainfrozen():
690 """return True if we are a frozen executable.
690 """return True if we are a frozen executable.
691
691
692 The code supports py2exe (most common, Windows only) and tools/freeze
692 The code supports py2exe (most common, Windows only) and tools/freeze
693 (portable, not much used).
693 (portable, not much used).
694 """
694 """
695 return (safehasattr(sys, "frozen") or # new py2exe
695 return (safehasattr(sys, "frozen") or # new py2exe
696 safehasattr(sys, "importers") or # old py2exe
696 safehasattr(sys, "importers") or # old py2exe
697 imp.is_frozen("__main__")) # tools/freeze
697 imp.is_frozen("__main__")) # tools/freeze
698
698
699 # the location of data files matching the source code
699 # the location of data files matching the source code
700 if mainfrozen():
700 if mainfrozen():
701 # executable version (py2exe) doesn't support __file__
701 # executable version (py2exe) doesn't support __file__
702 datapath = os.path.dirname(sys.executable)
702 datapath = os.path.dirname(sys.executable)
703 else:
703 else:
704 datapath = os.path.dirname(__file__)
704 datapath = os.path.dirname(__file__)
705
705
706 i18n.setdatapath(datapath)
706 i18n.setdatapath(datapath)
707
707
708 _hgexecutable = None
708 _hgexecutable = None
709
709
710 def hgexecutable():
710 def hgexecutable():
711 """return location of the 'hg' executable.
711 """return location of the 'hg' executable.
712
712
713 Defaults to $HG or 'hg' in the search path.
713 Defaults to $HG or 'hg' in the search path.
714 """
714 """
715 if _hgexecutable is None:
715 if _hgexecutable is None:
716 hg = os.environ.get('HG')
716 hg = os.environ.get('HG')
717 mainmod = sys.modules['__main__']
717 mainmod = sys.modules['__main__']
718 if hg:
718 if hg:
719 _sethgexecutable(hg)
719 _sethgexecutable(hg)
720 elif mainfrozen():
720 elif mainfrozen():
721 _sethgexecutable(sys.executable)
721 _sethgexecutable(sys.executable)
722 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
722 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
723 _sethgexecutable(mainmod.__file__)
723 _sethgexecutable(mainmod.__file__)
724 else:
724 else:
725 exe = findexe('hg') or os.path.basename(sys.argv[0])
725 exe = findexe('hg') or os.path.basename(sys.argv[0])
726 _sethgexecutable(exe)
726 _sethgexecutable(exe)
727 return _hgexecutable
727 return _hgexecutable
728
728
729 def _sethgexecutable(path):
729 def _sethgexecutable(path):
730 """set location of the 'hg' executable"""
730 """set location of the 'hg' executable"""
731 global _hgexecutable
731 global _hgexecutable
732 _hgexecutable = path
732 _hgexecutable = path
733
733
734 def _isstdout(f):
734 def _isstdout(f):
735 fileno = getattr(f, 'fileno', None)
735 fileno = getattr(f, 'fileno', None)
736 return fileno and fileno() == sys.__stdout__.fileno()
736 return fileno and fileno() == sys.__stdout__.fileno()
737
737
738 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
738 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
739 '''enhanced shell command execution.
739 '''enhanced shell command execution.
740 run with environment maybe modified, maybe in different dir.
740 run with environment maybe modified, maybe in different dir.
741
741
742 if command fails and onerr is None, return status, else raise onerr
742 if command fails and onerr is None, return status, else raise onerr
743 object as exception.
743 object as exception.
744
744
745 if out is specified, it is assumed to be a file-like object that has a
745 if out is specified, it is assumed to be a file-like object that has a
746 write() method. stdout and stderr will be redirected to out.'''
746 write() method. stdout and stderr will be redirected to out.'''
747 if environ is None:
747 if environ is None:
748 environ = {}
748 environ = {}
749 try:
749 try:
750 sys.stdout.flush()
750 sys.stdout.flush()
751 except Exception:
751 except Exception:
752 pass
752 pass
753 def py2shell(val):
753 def py2shell(val):
754 'convert python object into string that is useful to shell'
754 'convert python object into string that is useful to shell'
755 if val is None or val is False:
755 if val is None or val is False:
756 return '0'
756 return '0'
757 if val is True:
757 if val is True:
758 return '1'
758 return '1'
759 return str(val)
759 return str(val)
760 origcmd = cmd
760 origcmd = cmd
761 cmd = quotecommand(cmd)
761 cmd = quotecommand(cmd)
762 if sys.platform == 'plan9' and (sys.version_info[0] == 2
762 if sys.platform == 'plan9' and (sys.version_info[0] == 2
763 and sys.version_info[1] < 7):
763 and sys.version_info[1] < 7):
764 # subprocess kludge to work around issues in half-baked Python
764 # subprocess kludge to work around issues in half-baked Python
765 # ports, notably bichued/python:
765 # ports, notably bichued/python:
766 if not cwd is None:
766 if not cwd is None:
767 os.chdir(cwd)
767 os.chdir(cwd)
768 rc = os.system(cmd)
768 rc = os.system(cmd)
769 else:
769 else:
770 env = dict(os.environ)
770 env = dict(os.environ)
771 env.update((k, py2shell(v)) for k, v in environ.iteritems())
771 env.update((k, py2shell(v)) for k, v in environ.iteritems())
772 env['HG'] = hgexecutable()
772 env['HG'] = hgexecutable()
773 if out is None or _isstdout(out):
773 if out is None or _isstdout(out):
774 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
774 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
775 env=env, cwd=cwd)
775 env=env, cwd=cwd)
776 else:
776 else:
777 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
777 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
778 env=env, cwd=cwd, stdout=subprocess.PIPE,
778 env=env, cwd=cwd, stdout=subprocess.PIPE,
779 stderr=subprocess.STDOUT)
779 stderr=subprocess.STDOUT)
780 while True:
780 while True:
781 line = proc.stdout.readline()
781 line = proc.stdout.readline()
782 if not line:
782 if not line:
783 break
783 break
784 out.write(line)
784 out.write(line)
785 proc.wait()
785 proc.wait()
786 rc = proc.returncode
786 rc = proc.returncode
787 if sys.platform == 'OpenVMS' and rc & 1:
787 if sys.platform == 'OpenVMS' and rc & 1:
788 rc = 0
788 rc = 0
789 if rc and onerr:
789 if rc and onerr:
790 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
790 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
791 explainexit(rc)[0])
791 explainexit(rc)[0])
792 if errprefix:
792 if errprefix:
793 errmsg = '%s: %s' % (errprefix, errmsg)
793 errmsg = '%s: %s' % (errprefix, errmsg)
794 raise onerr(errmsg)
794 raise onerr(errmsg)
795 return rc
795 return rc
796
796
797 def checksignature(func):
797 def checksignature(func):
798 '''wrap a function with code to check for calling errors'''
798 '''wrap a function with code to check for calling errors'''
799 def check(*args, **kwargs):
799 def check(*args, **kwargs):
800 try:
800 try:
801 return func(*args, **kwargs)
801 return func(*args, **kwargs)
802 except TypeError:
802 except TypeError:
803 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
803 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
804 raise error.SignatureError
804 raise error.SignatureError
805 raise
805 raise
806
806
807 return check
807 return check
808
808
809 def copyfile(src, dest, hardlink=False):
809 def copyfile(src, dest, hardlink=False):
810 "copy a file, preserving mode and atime/mtime"
810 "copy a file, preserving mode and atime/mtime"
811 if os.path.lexists(dest):
811 if os.path.lexists(dest):
812 unlink(dest)
812 unlink(dest)
813 # hardlinks are problematic on CIFS, quietly ignore this flag
813 # hardlinks are problematic on CIFS, quietly ignore this flag
814 # until we find a way to work around it cleanly (issue4546)
814 # until we find a way to work around it cleanly (issue4546)
815 if False and hardlink:
815 if False and hardlink:
816 try:
816 try:
817 oslink(src, dest)
817 oslink(src, dest)
818 return
818 return
819 except (IOError, OSError):
819 except (IOError, OSError):
820 pass # fall back to normal copy
820 pass # fall back to normal copy
821 if os.path.islink(src):
821 if os.path.islink(src):
822 os.symlink(os.readlink(src), dest)
822 os.symlink(os.readlink(src), dest)
823 else:
823 else:
824 try:
824 try:
825 shutil.copyfile(src, dest)
825 shutil.copyfile(src, dest)
826 shutil.copymode(src, dest)
826 shutil.copymode(src, dest)
827 except shutil.Error as inst:
827 except shutil.Error as inst:
828 raise Abort(str(inst))
828 raise Abort(str(inst))
829
829
830 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
830 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
831 """Copy a directory tree using hardlinks if possible."""
831 """Copy a directory tree using hardlinks if possible."""
832 num = 0
832 num = 0
833
833
834 if hardlink is None:
834 if hardlink is None:
835 hardlink = (os.stat(src).st_dev ==
835 hardlink = (os.stat(src).st_dev ==
836 os.stat(os.path.dirname(dst)).st_dev)
836 os.stat(os.path.dirname(dst)).st_dev)
837 if hardlink:
837 if hardlink:
838 topic = _('linking')
838 topic = _('linking')
839 else:
839 else:
840 topic = _('copying')
840 topic = _('copying')
841
841
842 if os.path.isdir(src):
842 if os.path.isdir(src):
843 os.mkdir(dst)
843 os.mkdir(dst)
844 for name, kind in osutil.listdir(src):
844 for name, kind in osutil.listdir(src):
845 srcname = os.path.join(src, name)
845 srcname = os.path.join(src, name)
846 dstname = os.path.join(dst, name)
846 dstname = os.path.join(dst, name)
847 def nprog(t, pos):
847 def nprog(t, pos):
848 if pos is not None:
848 if pos is not None:
849 return progress(t, pos + num)
849 return progress(t, pos + num)
850 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
850 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
851 num += n
851 num += n
852 else:
852 else:
853 if hardlink:
853 if hardlink:
854 try:
854 try:
855 oslink(src, dst)
855 oslink(src, dst)
856 except (IOError, OSError):
856 except (IOError, OSError):
857 hardlink = False
857 hardlink = False
858 shutil.copy(src, dst)
858 shutil.copy(src, dst)
859 else:
859 else:
860 shutil.copy(src, dst)
860 shutil.copy(src, dst)
861 num += 1
861 num += 1
862 progress(topic, num)
862 progress(topic, num)
863 progress(topic, None)
863 progress(topic, None)
864
864
865 return hardlink, num
865 return hardlink, num
866
866
867 _winreservednames = '''con prn aux nul
867 _winreservednames = '''con prn aux nul
868 com1 com2 com3 com4 com5 com6 com7 com8 com9
868 com1 com2 com3 com4 com5 com6 com7 com8 com9
869 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
869 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
870 _winreservedchars = ':*?"<>|'
870 _winreservedchars = ':*?"<>|'
871 def checkwinfilename(path):
871 def checkwinfilename(path):
872 r'''Check that the base-relative path is a valid filename on Windows.
872 r'''Check that the base-relative path is a valid filename on Windows.
873 Returns None if the path is ok, or a UI string describing the problem.
873 Returns None if the path is ok, or a UI string describing the problem.
874
874
875 >>> checkwinfilename("just/a/normal/path")
875 >>> checkwinfilename("just/a/normal/path")
876 >>> checkwinfilename("foo/bar/con.xml")
876 >>> checkwinfilename("foo/bar/con.xml")
877 "filename contains 'con', which is reserved on Windows"
877 "filename contains 'con', which is reserved on Windows"
878 >>> checkwinfilename("foo/con.xml/bar")
878 >>> checkwinfilename("foo/con.xml/bar")
879 "filename contains 'con', which is reserved on Windows"
879 "filename contains 'con', which is reserved on Windows"
880 >>> checkwinfilename("foo/bar/xml.con")
880 >>> checkwinfilename("foo/bar/xml.con")
881 >>> checkwinfilename("foo/bar/AUX/bla.txt")
881 >>> checkwinfilename("foo/bar/AUX/bla.txt")
882 "filename contains 'AUX', which is reserved on Windows"
882 "filename contains 'AUX', which is reserved on Windows"
883 >>> checkwinfilename("foo/bar/bla:.txt")
883 >>> checkwinfilename("foo/bar/bla:.txt")
884 "filename contains ':', which is reserved on Windows"
884 "filename contains ':', which is reserved on Windows"
885 >>> checkwinfilename("foo/bar/b\07la.txt")
885 >>> checkwinfilename("foo/bar/b\07la.txt")
886 "filename contains '\\x07', which is invalid on Windows"
886 "filename contains '\\x07', which is invalid on Windows"
887 >>> checkwinfilename("foo/bar/bla ")
887 >>> checkwinfilename("foo/bar/bla ")
888 "filename ends with ' ', which is not allowed on Windows"
888 "filename ends with ' ', which is not allowed on Windows"
889 >>> checkwinfilename("../bar")
889 >>> checkwinfilename("../bar")
890 >>> checkwinfilename("foo\\")
890 >>> checkwinfilename("foo\\")
891 "filename ends with '\\', which is invalid on Windows"
891 "filename ends with '\\', which is invalid on Windows"
892 >>> checkwinfilename("foo\\/bar")
892 >>> checkwinfilename("foo\\/bar")
893 "directory name ends with '\\', which is invalid on Windows"
893 "directory name ends with '\\', which is invalid on Windows"
894 '''
894 '''
895 if path.endswith('\\'):
895 if path.endswith('\\'):
896 return _("filename ends with '\\', which is invalid on Windows")
896 return _("filename ends with '\\', which is invalid on Windows")
897 if '\\/' in path:
897 if '\\/' in path:
898 return _("directory name ends with '\\', which is invalid on Windows")
898 return _("directory name ends with '\\', which is invalid on Windows")
899 for n in path.replace('\\', '/').split('/'):
899 for n in path.replace('\\', '/').split('/'):
900 if not n:
900 if not n:
901 continue
901 continue
902 for c in n:
902 for c in n:
903 if c in _winreservedchars:
903 if c in _winreservedchars:
904 return _("filename contains '%s', which is reserved "
904 return _("filename contains '%s', which is reserved "
905 "on Windows") % c
905 "on Windows") % c
906 if ord(c) <= 31:
906 if ord(c) <= 31:
907 return _("filename contains %r, which is invalid "
907 return _("filename contains %r, which is invalid "
908 "on Windows") % c
908 "on Windows") % c
909 base = n.split('.')[0]
909 base = n.split('.')[0]
910 if base and base.lower() in _winreservednames:
910 if base and base.lower() in _winreservednames:
911 return _("filename contains '%s', which is reserved "
911 return _("filename contains '%s', which is reserved "
912 "on Windows") % base
912 "on Windows") % base
913 t = n[-1]
913 t = n[-1]
914 if t in '. ' and n not in '..':
914 if t in '. ' and n not in '..':
915 return _("filename ends with '%s', which is not allowed "
915 return _("filename ends with '%s', which is not allowed "
916 "on Windows") % t
916 "on Windows") % t
917
917
918 if os.name == 'nt':
918 if os.name == 'nt':
919 checkosfilename = checkwinfilename
919 checkosfilename = checkwinfilename
920 else:
920 else:
921 checkosfilename = platform.checkosfilename
921 checkosfilename = platform.checkosfilename
922
922
923 def makelock(info, pathname):
923 def makelock(info, pathname):
924 try:
924 try:
925 return os.symlink(info, pathname)
925 return os.symlink(info, pathname)
926 except OSError as why:
926 except OSError as why:
927 if why.errno == errno.EEXIST:
927 if why.errno == errno.EEXIST:
928 raise
928 raise
929 except AttributeError: # no symlink in os
929 except AttributeError: # no symlink in os
930 pass
930 pass
931
931
932 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
932 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
933 os.write(ld, info)
933 os.write(ld, info)
934 os.close(ld)
934 os.close(ld)
935
935
936 def readlock(pathname):
936 def readlock(pathname):
937 try:
937 try:
938 return os.readlink(pathname)
938 return os.readlink(pathname)
939 except OSError as why:
939 except OSError as why:
940 if why.errno not in (errno.EINVAL, errno.ENOSYS):
940 if why.errno not in (errno.EINVAL, errno.ENOSYS):
941 raise
941 raise
942 except AttributeError: # no symlink in os
942 except AttributeError: # no symlink in os
943 pass
943 pass
944 fp = posixfile(pathname)
944 fp = posixfile(pathname)
945 r = fp.read()
945 r = fp.read()
946 fp.close()
946 fp.close()
947 return r
947 return r
948
948
949 def fstat(fp):
949 def fstat(fp):
950 '''stat file object that may not have fileno method.'''
950 '''stat file object that may not have fileno method.'''
951 try:
951 try:
952 return os.fstat(fp.fileno())
952 return os.fstat(fp.fileno())
953 except AttributeError:
953 except AttributeError:
954 return os.stat(fp.name)
954 return os.stat(fp.name)
955
955
956 def statmtimesec(st):
956 def statmtimesec(st):
957 """Get mtime as integer of seconds
957 """Get mtime as integer of seconds
958
958
959 'int(st.st_mtime)' cannot be used because st.st_mtime is computed as
959 'int(st.st_mtime)' cannot be used because st.st_mtime is computed as
960 'sec + 1e-9 * nsec' and double-precision floating-point type is too narrow
960 'sec + 1e-9 * nsec' and double-precision floating-point type is too narrow
961 to represent nanoseconds. If 'nsec' is close to 1 sec, 'int(st.st_mtime)'
961 to represent nanoseconds. If 'nsec' is close to 1 sec, 'int(st.st_mtime)'
962 can be 'sec + 1'. (issue4836)
962 can be 'sec + 1'. (issue4836)
963 """
963 """
964 try:
964 try:
965 return st[stat.ST_MTIME]
965 return st[stat.ST_MTIME]
966 except TypeError:
966 except (TypeError, IndexError):
967 # osutil.stat doesn't allow index access and its st_mtime is int
967 # osutil.stat doesn't allow index access and its st_mtime is int
968 return st.st_mtime
968 return st.st_mtime
969
969
970 # File system features
970 # File system features
971
971
972 def checkcase(path):
972 def checkcase(path):
973 """
973 """
974 Return true if the given path is on a case-sensitive filesystem
974 Return true if the given path is on a case-sensitive filesystem
975
975
976 Requires a path (like /foo/.hg) ending with a foldable final
976 Requires a path (like /foo/.hg) ending with a foldable final
977 directory component.
977 directory component.
978 """
978 """
979 s1 = os.lstat(path)
979 s1 = os.lstat(path)
980 d, b = os.path.split(path)
980 d, b = os.path.split(path)
981 b2 = b.upper()
981 b2 = b.upper()
982 if b == b2:
982 if b == b2:
983 b2 = b.lower()
983 b2 = b.lower()
984 if b == b2:
984 if b == b2:
985 return True # no evidence against case sensitivity
985 return True # no evidence against case sensitivity
986 p2 = os.path.join(d, b2)
986 p2 = os.path.join(d, b2)
987 try:
987 try:
988 s2 = os.lstat(p2)
988 s2 = os.lstat(p2)
989 if s2 == s1:
989 if s2 == s1:
990 return False
990 return False
991 return True
991 return True
992 except OSError:
992 except OSError:
993 return True
993 return True
994
994
995 try:
995 try:
996 import re2
996 import re2
997 _re2 = None
997 _re2 = None
998 except ImportError:
998 except ImportError:
999 _re2 = False
999 _re2 = False
1000
1000
1001 class _re(object):
1001 class _re(object):
1002 def _checkre2(self):
1002 def _checkre2(self):
1003 global _re2
1003 global _re2
1004 try:
1004 try:
1005 # check if match works, see issue3964
1005 # check if match works, see issue3964
1006 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1006 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1007 except ImportError:
1007 except ImportError:
1008 _re2 = False
1008 _re2 = False
1009
1009
1010 def compile(self, pat, flags=0):
1010 def compile(self, pat, flags=0):
1011 '''Compile a regular expression, using re2 if possible
1011 '''Compile a regular expression, using re2 if possible
1012
1012
1013 For best performance, use only re2-compatible regexp features. The
1013 For best performance, use only re2-compatible regexp features. The
1014 only flags from the re module that are re2-compatible are
1014 only flags from the re module that are re2-compatible are
1015 IGNORECASE and MULTILINE.'''
1015 IGNORECASE and MULTILINE.'''
1016 if _re2 is None:
1016 if _re2 is None:
1017 self._checkre2()
1017 self._checkre2()
1018 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1018 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1019 if flags & remod.IGNORECASE:
1019 if flags & remod.IGNORECASE:
1020 pat = '(?i)' + pat
1020 pat = '(?i)' + pat
1021 if flags & remod.MULTILINE:
1021 if flags & remod.MULTILINE:
1022 pat = '(?m)' + pat
1022 pat = '(?m)' + pat
1023 try:
1023 try:
1024 return re2.compile(pat)
1024 return re2.compile(pat)
1025 except re2.error:
1025 except re2.error:
1026 pass
1026 pass
1027 return remod.compile(pat, flags)
1027 return remod.compile(pat, flags)
1028
1028
1029 @propertycache
1029 @propertycache
1030 def escape(self):
1030 def escape(self):
1031 '''Return the version of escape corresponding to self.compile.
1031 '''Return the version of escape corresponding to self.compile.
1032
1032
1033 This is imperfect because whether re2 or re is used for a particular
1033 This is imperfect because whether re2 or re is used for a particular
1034 function depends on the flags, etc, but it's the best we can do.
1034 function depends on the flags, etc, but it's the best we can do.
1035 '''
1035 '''
1036 global _re2
1036 global _re2
1037 if _re2 is None:
1037 if _re2 is None:
1038 self._checkre2()
1038 self._checkre2()
1039 if _re2:
1039 if _re2:
1040 return re2.escape
1040 return re2.escape
1041 else:
1041 else:
1042 return remod.escape
1042 return remod.escape
1043
1043
1044 re = _re()
1044 re = _re()
1045
1045
1046 _fspathcache = {}
1046 _fspathcache = {}
1047 def fspath(name, root):
1047 def fspath(name, root):
1048 '''Get name in the case stored in the filesystem
1048 '''Get name in the case stored in the filesystem
1049
1049
1050 The name should be relative to root, and be normcase-ed for efficiency.
1050 The name should be relative to root, and be normcase-ed for efficiency.
1051
1051
1052 Note that this function is unnecessary, and should not be
1052 Note that this function is unnecessary, and should not be
1053 called, for case-sensitive filesystems (simply because it's expensive).
1053 called, for case-sensitive filesystems (simply because it's expensive).
1054
1054
1055 The root should be normcase-ed, too.
1055 The root should be normcase-ed, too.
1056 '''
1056 '''
1057 def _makefspathcacheentry(dir):
1057 def _makefspathcacheentry(dir):
1058 return dict((normcase(n), n) for n in os.listdir(dir))
1058 return dict((normcase(n), n) for n in os.listdir(dir))
1059
1059
1060 seps = os.sep
1060 seps = os.sep
1061 if os.altsep:
1061 if os.altsep:
1062 seps = seps + os.altsep
1062 seps = seps + os.altsep
1063 # Protect backslashes. This gets silly very quickly.
1063 # Protect backslashes. This gets silly very quickly.
1064 seps.replace('\\','\\\\')
1064 seps.replace('\\','\\\\')
1065 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1065 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1066 dir = os.path.normpath(root)
1066 dir = os.path.normpath(root)
1067 result = []
1067 result = []
1068 for part, sep in pattern.findall(name):
1068 for part, sep in pattern.findall(name):
1069 if sep:
1069 if sep:
1070 result.append(sep)
1070 result.append(sep)
1071 continue
1071 continue
1072
1072
1073 if dir not in _fspathcache:
1073 if dir not in _fspathcache:
1074 _fspathcache[dir] = _makefspathcacheentry(dir)
1074 _fspathcache[dir] = _makefspathcacheentry(dir)
1075 contents = _fspathcache[dir]
1075 contents = _fspathcache[dir]
1076
1076
1077 found = contents.get(part)
1077 found = contents.get(part)
1078 if not found:
1078 if not found:
1079 # retry "once per directory" per "dirstate.walk" which
1079 # retry "once per directory" per "dirstate.walk" which
1080 # may take place for each patches of "hg qpush", for example
1080 # may take place for each patches of "hg qpush", for example
1081 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1081 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1082 found = contents.get(part)
1082 found = contents.get(part)
1083
1083
1084 result.append(found or part)
1084 result.append(found or part)
1085 dir = os.path.join(dir, part)
1085 dir = os.path.join(dir, part)
1086
1086
1087 return ''.join(result)
1087 return ''.join(result)
1088
1088
1089 def checknlink(testfile):
1089 def checknlink(testfile):
1090 '''check whether hardlink count reporting works properly'''
1090 '''check whether hardlink count reporting works properly'''
1091
1091
1092 # testfile may be open, so we need a separate file for checking to
1092 # testfile may be open, so we need a separate file for checking to
1093 # work around issue2543 (or testfile may get lost on Samba shares)
1093 # work around issue2543 (or testfile may get lost on Samba shares)
1094 f1 = testfile + ".hgtmp1"
1094 f1 = testfile + ".hgtmp1"
1095 if os.path.lexists(f1):
1095 if os.path.lexists(f1):
1096 return False
1096 return False
1097 try:
1097 try:
1098 posixfile(f1, 'w').close()
1098 posixfile(f1, 'w').close()
1099 except IOError:
1099 except IOError:
1100 return False
1100 return False
1101
1101
1102 f2 = testfile + ".hgtmp2"
1102 f2 = testfile + ".hgtmp2"
1103 fd = None
1103 fd = None
1104 try:
1104 try:
1105 oslink(f1, f2)
1105 oslink(f1, f2)
1106 # nlinks() may behave differently for files on Windows shares if
1106 # nlinks() may behave differently for files on Windows shares if
1107 # the file is open.
1107 # the file is open.
1108 fd = posixfile(f2)
1108 fd = posixfile(f2)
1109 return nlinks(f2) > 1
1109 return nlinks(f2) > 1
1110 except OSError:
1110 except OSError:
1111 return False
1111 return False
1112 finally:
1112 finally:
1113 if fd is not None:
1113 if fd is not None:
1114 fd.close()
1114 fd.close()
1115 for f in (f1, f2):
1115 for f in (f1, f2):
1116 try:
1116 try:
1117 os.unlink(f)
1117 os.unlink(f)
1118 except OSError:
1118 except OSError:
1119 pass
1119 pass
1120
1120
1121 def endswithsep(path):
1121 def endswithsep(path):
1122 '''Check path ends with os.sep or os.altsep.'''
1122 '''Check path ends with os.sep or os.altsep.'''
1123 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1123 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1124
1124
1125 def splitpath(path):
1125 def splitpath(path):
1126 '''Split path by os.sep.
1126 '''Split path by os.sep.
1127 Note that this function does not use os.altsep because this is
1127 Note that this function does not use os.altsep because this is
1128 an alternative of simple "xxx.split(os.sep)".
1128 an alternative of simple "xxx.split(os.sep)".
1129 It is recommended to use os.path.normpath() before using this
1129 It is recommended to use os.path.normpath() before using this
1130 function if need.'''
1130 function if need.'''
1131 return path.split(os.sep)
1131 return path.split(os.sep)
1132
1132
1133 def gui():
1133 def gui():
1134 '''Are we running in a GUI?'''
1134 '''Are we running in a GUI?'''
1135 if sys.platform == 'darwin':
1135 if sys.platform == 'darwin':
1136 if 'SSH_CONNECTION' in os.environ:
1136 if 'SSH_CONNECTION' in os.environ:
1137 # handle SSH access to a box where the user is logged in
1137 # handle SSH access to a box where the user is logged in
1138 return False
1138 return False
1139 elif getattr(osutil, 'isgui', None):
1139 elif getattr(osutil, 'isgui', None):
1140 # check if a CoreGraphics session is available
1140 # check if a CoreGraphics session is available
1141 return osutil.isgui()
1141 return osutil.isgui()
1142 else:
1142 else:
1143 # pure build; use a safe default
1143 # pure build; use a safe default
1144 return True
1144 return True
1145 else:
1145 else:
1146 return os.name == "nt" or os.environ.get("DISPLAY")
1146 return os.name == "nt" or os.environ.get("DISPLAY")
1147
1147
1148 def mktempcopy(name, emptyok=False, createmode=None):
1148 def mktempcopy(name, emptyok=False, createmode=None):
1149 """Create a temporary file with the same contents from name
1149 """Create a temporary file with the same contents from name
1150
1150
1151 The permission bits are copied from the original file.
1151 The permission bits are copied from the original file.
1152
1152
1153 If the temporary file is going to be truncated immediately, you
1153 If the temporary file is going to be truncated immediately, you
1154 can use emptyok=True as an optimization.
1154 can use emptyok=True as an optimization.
1155
1155
1156 Returns the name of the temporary file.
1156 Returns the name of the temporary file.
1157 """
1157 """
1158 d, fn = os.path.split(name)
1158 d, fn = os.path.split(name)
1159 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1159 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1160 os.close(fd)
1160 os.close(fd)
1161 # Temporary files are created with mode 0600, which is usually not
1161 # Temporary files are created with mode 0600, which is usually not
1162 # what we want. If the original file already exists, just copy
1162 # what we want. If the original file already exists, just copy
1163 # its mode. Otherwise, manually obey umask.
1163 # its mode. Otherwise, manually obey umask.
1164 copymode(name, temp, createmode)
1164 copymode(name, temp, createmode)
1165 if emptyok:
1165 if emptyok:
1166 return temp
1166 return temp
1167 try:
1167 try:
1168 try:
1168 try:
1169 ifp = posixfile(name, "rb")
1169 ifp = posixfile(name, "rb")
1170 except IOError as inst:
1170 except IOError as inst:
1171 if inst.errno == errno.ENOENT:
1171 if inst.errno == errno.ENOENT:
1172 return temp
1172 return temp
1173 if not getattr(inst, 'filename', None):
1173 if not getattr(inst, 'filename', None):
1174 inst.filename = name
1174 inst.filename = name
1175 raise
1175 raise
1176 ofp = posixfile(temp, "wb")
1176 ofp = posixfile(temp, "wb")
1177 for chunk in filechunkiter(ifp):
1177 for chunk in filechunkiter(ifp):
1178 ofp.write(chunk)
1178 ofp.write(chunk)
1179 ifp.close()
1179 ifp.close()
1180 ofp.close()
1180 ofp.close()
1181 except: # re-raises
1181 except: # re-raises
1182 try: os.unlink(temp)
1182 try: os.unlink(temp)
1183 except OSError: pass
1183 except OSError: pass
1184 raise
1184 raise
1185 return temp
1185 return temp
1186
1186
1187 class atomictempfile(object):
1187 class atomictempfile(object):
1188 '''writable file object that atomically updates a file
1188 '''writable file object that atomically updates a file
1189
1189
1190 All writes will go to a temporary copy of the original file. Call
1190 All writes will go to a temporary copy of the original file. Call
1191 close() when you are done writing, and atomictempfile will rename
1191 close() when you are done writing, and atomictempfile will rename
1192 the temporary copy to the original name, making the changes
1192 the temporary copy to the original name, making the changes
1193 visible. If the object is destroyed without being closed, all your
1193 visible. If the object is destroyed without being closed, all your
1194 writes are discarded.
1194 writes are discarded.
1195 '''
1195 '''
1196 def __init__(self, name, mode='w+b', createmode=None):
1196 def __init__(self, name, mode='w+b', createmode=None):
1197 self.__name = name # permanent name
1197 self.__name = name # permanent name
1198 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1198 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1199 createmode=createmode)
1199 createmode=createmode)
1200 self._fp = posixfile(self._tempname, mode)
1200 self._fp = posixfile(self._tempname, mode)
1201
1201
1202 # delegated methods
1202 # delegated methods
1203 self.write = self._fp.write
1203 self.write = self._fp.write
1204 self.seek = self._fp.seek
1204 self.seek = self._fp.seek
1205 self.tell = self._fp.tell
1205 self.tell = self._fp.tell
1206 self.fileno = self._fp.fileno
1206 self.fileno = self._fp.fileno
1207
1207
1208 def close(self):
1208 def close(self):
1209 if not self._fp.closed:
1209 if not self._fp.closed:
1210 self._fp.close()
1210 self._fp.close()
1211 rename(self._tempname, localpath(self.__name))
1211 rename(self._tempname, localpath(self.__name))
1212
1212
1213 def discard(self):
1213 def discard(self):
1214 if not self._fp.closed:
1214 if not self._fp.closed:
1215 try:
1215 try:
1216 os.unlink(self._tempname)
1216 os.unlink(self._tempname)
1217 except OSError:
1217 except OSError:
1218 pass
1218 pass
1219 self._fp.close()
1219 self._fp.close()
1220
1220
1221 def __del__(self):
1221 def __del__(self):
1222 if safehasattr(self, '_fp'): # constructor actually did something
1222 if safehasattr(self, '_fp'): # constructor actually did something
1223 self.discard()
1223 self.discard()
1224
1224
1225 def makedirs(name, mode=None, notindexed=False):
1225 def makedirs(name, mode=None, notindexed=False):
1226 """recursive directory creation with parent mode inheritance"""
1226 """recursive directory creation with parent mode inheritance"""
1227 try:
1227 try:
1228 makedir(name, notindexed)
1228 makedir(name, notindexed)
1229 except OSError as err:
1229 except OSError as err:
1230 if err.errno == errno.EEXIST:
1230 if err.errno == errno.EEXIST:
1231 return
1231 return
1232 if err.errno != errno.ENOENT or not name:
1232 if err.errno != errno.ENOENT or not name:
1233 raise
1233 raise
1234 parent = os.path.dirname(os.path.abspath(name))
1234 parent = os.path.dirname(os.path.abspath(name))
1235 if parent == name:
1235 if parent == name:
1236 raise
1236 raise
1237 makedirs(parent, mode, notindexed)
1237 makedirs(parent, mode, notindexed)
1238 makedir(name, notindexed)
1238 makedir(name, notindexed)
1239 if mode is not None:
1239 if mode is not None:
1240 os.chmod(name, mode)
1240 os.chmod(name, mode)
1241
1241
1242 def ensuredirs(name, mode=None, notindexed=False):
1242 def ensuredirs(name, mode=None, notindexed=False):
1243 """race-safe recursive directory creation
1243 """race-safe recursive directory creation
1244
1244
1245 Newly created directories are marked as "not to be indexed by
1245 Newly created directories are marked as "not to be indexed by
1246 the content indexing service", if ``notindexed`` is specified
1246 the content indexing service", if ``notindexed`` is specified
1247 for "write" mode access.
1247 for "write" mode access.
1248 """
1248 """
1249 if os.path.isdir(name):
1249 if os.path.isdir(name):
1250 return
1250 return
1251 parent = os.path.dirname(os.path.abspath(name))
1251 parent = os.path.dirname(os.path.abspath(name))
1252 if parent != name:
1252 if parent != name:
1253 ensuredirs(parent, mode, notindexed)
1253 ensuredirs(parent, mode, notindexed)
1254 try:
1254 try:
1255 makedir(name, notindexed)
1255 makedir(name, notindexed)
1256 except OSError as err:
1256 except OSError as err:
1257 if err.errno == errno.EEXIST and os.path.isdir(name):
1257 if err.errno == errno.EEXIST and os.path.isdir(name):
1258 # someone else seems to have won a directory creation race
1258 # someone else seems to have won a directory creation race
1259 return
1259 return
1260 raise
1260 raise
1261 if mode is not None:
1261 if mode is not None:
1262 os.chmod(name, mode)
1262 os.chmod(name, mode)
1263
1263
1264 def readfile(path):
1264 def readfile(path):
1265 fp = open(path, 'rb')
1265 fp = open(path, 'rb')
1266 try:
1266 try:
1267 return fp.read()
1267 return fp.read()
1268 finally:
1268 finally:
1269 fp.close()
1269 fp.close()
1270
1270
1271 def writefile(path, text):
1271 def writefile(path, text):
1272 fp = open(path, 'wb')
1272 fp = open(path, 'wb')
1273 try:
1273 try:
1274 fp.write(text)
1274 fp.write(text)
1275 finally:
1275 finally:
1276 fp.close()
1276 fp.close()
1277
1277
1278 def appendfile(path, text):
1278 def appendfile(path, text):
1279 fp = open(path, 'ab')
1279 fp = open(path, 'ab')
1280 try:
1280 try:
1281 fp.write(text)
1281 fp.write(text)
1282 finally:
1282 finally:
1283 fp.close()
1283 fp.close()
1284
1284
1285 class chunkbuffer(object):
1285 class chunkbuffer(object):
1286 """Allow arbitrary sized chunks of data to be efficiently read from an
1286 """Allow arbitrary sized chunks of data to be efficiently read from an
1287 iterator over chunks of arbitrary size."""
1287 iterator over chunks of arbitrary size."""
1288
1288
1289 def __init__(self, in_iter):
1289 def __init__(self, in_iter):
1290 """in_iter is the iterator that's iterating over the input chunks.
1290 """in_iter is the iterator that's iterating over the input chunks.
1291 targetsize is how big a buffer to try to maintain."""
1291 targetsize is how big a buffer to try to maintain."""
1292 def splitbig(chunks):
1292 def splitbig(chunks):
1293 for chunk in chunks:
1293 for chunk in chunks:
1294 if len(chunk) > 2**20:
1294 if len(chunk) > 2**20:
1295 pos = 0
1295 pos = 0
1296 while pos < len(chunk):
1296 while pos < len(chunk):
1297 end = pos + 2 ** 18
1297 end = pos + 2 ** 18
1298 yield chunk[pos:end]
1298 yield chunk[pos:end]
1299 pos = end
1299 pos = end
1300 else:
1300 else:
1301 yield chunk
1301 yield chunk
1302 self.iter = splitbig(in_iter)
1302 self.iter = splitbig(in_iter)
1303 self._queue = collections.deque()
1303 self._queue = collections.deque()
1304 self._chunkoffset = 0
1304 self._chunkoffset = 0
1305
1305
1306 def read(self, l=None):
1306 def read(self, l=None):
1307 """Read L bytes of data from the iterator of chunks of data.
1307 """Read L bytes of data from the iterator of chunks of data.
1308 Returns less than L bytes if the iterator runs dry.
1308 Returns less than L bytes if the iterator runs dry.
1309
1309
1310 If size parameter is omitted, read everything"""
1310 If size parameter is omitted, read everything"""
1311 if l is None:
1311 if l is None:
1312 return ''.join(self.iter)
1312 return ''.join(self.iter)
1313
1313
1314 left = l
1314 left = l
1315 buf = []
1315 buf = []
1316 queue = self._queue
1316 queue = self._queue
1317 while left > 0:
1317 while left > 0:
1318 # refill the queue
1318 # refill the queue
1319 if not queue:
1319 if not queue:
1320 target = 2**18
1320 target = 2**18
1321 for chunk in self.iter:
1321 for chunk in self.iter:
1322 queue.append(chunk)
1322 queue.append(chunk)
1323 target -= len(chunk)
1323 target -= len(chunk)
1324 if target <= 0:
1324 if target <= 0:
1325 break
1325 break
1326 if not queue:
1326 if not queue:
1327 break
1327 break
1328
1328
1329 # The easy way to do this would be to queue.popleft(), modify the
1329 # The easy way to do this would be to queue.popleft(), modify the
1330 # chunk (if necessary), then queue.appendleft(). However, for cases
1330 # chunk (if necessary), then queue.appendleft(). However, for cases
1331 # where we read partial chunk content, this incurs 2 dequeue
1331 # where we read partial chunk content, this incurs 2 dequeue
1332 # mutations and creates a new str for the remaining chunk in the
1332 # mutations and creates a new str for the remaining chunk in the
1333 # queue. Our code below avoids this overhead.
1333 # queue. Our code below avoids this overhead.
1334
1334
1335 chunk = queue[0]
1335 chunk = queue[0]
1336 chunkl = len(chunk)
1336 chunkl = len(chunk)
1337 offset = self._chunkoffset
1337 offset = self._chunkoffset
1338
1338
1339 # Use full chunk.
1339 # Use full chunk.
1340 if offset == 0 and left >= chunkl:
1340 if offset == 0 and left >= chunkl:
1341 left -= chunkl
1341 left -= chunkl
1342 queue.popleft()
1342 queue.popleft()
1343 buf.append(chunk)
1343 buf.append(chunk)
1344 # self._chunkoffset remains at 0.
1344 # self._chunkoffset remains at 0.
1345 continue
1345 continue
1346
1346
1347 chunkremaining = chunkl - offset
1347 chunkremaining = chunkl - offset
1348
1348
1349 # Use all of unconsumed part of chunk.
1349 # Use all of unconsumed part of chunk.
1350 if left >= chunkremaining:
1350 if left >= chunkremaining:
1351 left -= chunkremaining
1351 left -= chunkremaining
1352 queue.popleft()
1352 queue.popleft()
1353 # offset == 0 is enabled by block above, so this won't merely
1353 # offset == 0 is enabled by block above, so this won't merely
1354 # copy via ``chunk[0:]``.
1354 # copy via ``chunk[0:]``.
1355 buf.append(chunk[offset:])
1355 buf.append(chunk[offset:])
1356 self._chunkoffset = 0
1356 self._chunkoffset = 0
1357
1357
1358 # Partial chunk needed.
1358 # Partial chunk needed.
1359 else:
1359 else:
1360 buf.append(chunk[offset:offset + left])
1360 buf.append(chunk[offset:offset + left])
1361 self._chunkoffset += left
1361 self._chunkoffset += left
1362 left -= chunkremaining
1362 left -= chunkremaining
1363
1363
1364 return ''.join(buf)
1364 return ''.join(buf)
1365
1365
1366 def filechunkiter(f, size=65536, limit=None):
1366 def filechunkiter(f, size=65536, limit=None):
1367 """Create a generator that produces the data in the file size
1367 """Create a generator that produces the data in the file size
1368 (default 65536) bytes at a time, up to optional limit (default is
1368 (default 65536) bytes at a time, up to optional limit (default is
1369 to read all data). Chunks may be less than size bytes if the
1369 to read all data). Chunks may be less than size bytes if the
1370 chunk is the last chunk in the file, or the file is a socket or
1370 chunk is the last chunk in the file, or the file is a socket or
1371 some other type of file that sometimes reads less data than is
1371 some other type of file that sometimes reads less data than is
1372 requested."""
1372 requested."""
1373 assert size >= 0
1373 assert size >= 0
1374 assert limit is None or limit >= 0
1374 assert limit is None or limit >= 0
1375 while True:
1375 while True:
1376 if limit is None:
1376 if limit is None:
1377 nbytes = size
1377 nbytes = size
1378 else:
1378 else:
1379 nbytes = min(limit, size)
1379 nbytes = min(limit, size)
1380 s = nbytes and f.read(nbytes)
1380 s = nbytes and f.read(nbytes)
1381 if not s:
1381 if not s:
1382 break
1382 break
1383 if limit:
1383 if limit:
1384 limit -= len(s)
1384 limit -= len(s)
1385 yield s
1385 yield s
1386
1386
1387 def makedate(timestamp=None):
1387 def makedate(timestamp=None):
1388 '''Return a unix timestamp (or the current time) as a (unixtime,
1388 '''Return a unix timestamp (or the current time) as a (unixtime,
1389 offset) tuple based off the local timezone.'''
1389 offset) tuple based off the local timezone.'''
1390 if timestamp is None:
1390 if timestamp is None:
1391 timestamp = time.time()
1391 timestamp = time.time()
1392 if timestamp < 0:
1392 if timestamp < 0:
1393 hint = _("check your clock")
1393 hint = _("check your clock")
1394 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1394 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1395 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1395 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1396 datetime.datetime.fromtimestamp(timestamp))
1396 datetime.datetime.fromtimestamp(timestamp))
1397 tz = delta.days * 86400 + delta.seconds
1397 tz = delta.days * 86400 + delta.seconds
1398 return timestamp, tz
1398 return timestamp, tz
1399
1399
1400 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1400 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1401 """represent a (unixtime, offset) tuple as a localized time.
1401 """represent a (unixtime, offset) tuple as a localized time.
1402 unixtime is seconds since the epoch, and offset is the time zone's
1402 unixtime is seconds since the epoch, and offset is the time zone's
1403 number of seconds away from UTC. if timezone is false, do not
1403 number of seconds away from UTC. if timezone is false, do not
1404 append time zone to string."""
1404 append time zone to string."""
1405 t, tz = date or makedate()
1405 t, tz = date or makedate()
1406 if t < 0:
1406 if t < 0:
1407 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1407 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1408 tz = 0
1408 tz = 0
1409 if "%1" in format or "%2" in format or "%z" in format:
1409 if "%1" in format or "%2" in format or "%z" in format:
1410 sign = (tz > 0) and "-" or "+"
1410 sign = (tz > 0) and "-" or "+"
1411 minutes = abs(tz) // 60
1411 minutes = abs(tz) // 60
1412 format = format.replace("%z", "%1%2")
1412 format = format.replace("%z", "%1%2")
1413 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1413 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1414 format = format.replace("%2", "%02d" % (minutes % 60))
1414 format = format.replace("%2", "%02d" % (minutes % 60))
1415 try:
1415 try:
1416 t = time.gmtime(float(t) - tz)
1416 t = time.gmtime(float(t) - tz)
1417 except ValueError:
1417 except ValueError:
1418 # time was out of range
1418 # time was out of range
1419 t = time.gmtime(sys.maxint)
1419 t = time.gmtime(sys.maxint)
1420 s = time.strftime(format, t)
1420 s = time.strftime(format, t)
1421 return s
1421 return s
1422
1422
1423 def shortdate(date=None):
1423 def shortdate(date=None):
1424 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1424 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1425 return datestr(date, format='%Y-%m-%d')
1425 return datestr(date, format='%Y-%m-%d')
1426
1426
1427 def parsetimezone(tz):
1427 def parsetimezone(tz):
1428 """parse a timezone string and return an offset integer"""
1428 """parse a timezone string and return an offset integer"""
1429 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1429 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1430 sign = (tz[0] == "+") and 1 or -1
1430 sign = (tz[0] == "+") and 1 or -1
1431 hours = int(tz[1:3])
1431 hours = int(tz[1:3])
1432 minutes = int(tz[3:5])
1432 minutes = int(tz[3:5])
1433 return -sign * (hours * 60 + minutes) * 60
1433 return -sign * (hours * 60 + minutes) * 60
1434 if tz == "GMT" or tz == "UTC":
1434 if tz == "GMT" or tz == "UTC":
1435 return 0
1435 return 0
1436 return None
1436 return None
1437
1437
1438 def strdate(string, format, defaults=[]):
1438 def strdate(string, format, defaults=[]):
1439 """parse a localized time string and return a (unixtime, offset) tuple.
1439 """parse a localized time string and return a (unixtime, offset) tuple.
1440 if the string cannot be parsed, ValueError is raised."""
1440 if the string cannot be parsed, ValueError is raised."""
1441 # NOTE: unixtime = localunixtime + offset
1441 # NOTE: unixtime = localunixtime + offset
1442 offset, date = parsetimezone(string.split()[-1]), string
1442 offset, date = parsetimezone(string.split()[-1]), string
1443 if offset is not None:
1443 if offset is not None:
1444 date = " ".join(string.split()[:-1])
1444 date = " ".join(string.split()[:-1])
1445
1445
1446 # add missing elements from defaults
1446 # add missing elements from defaults
1447 usenow = False # default to using biased defaults
1447 usenow = False # default to using biased defaults
1448 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1448 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1449 found = [True for p in part if ("%"+p) in format]
1449 found = [True for p in part if ("%"+p) in format]
1450 if not found:
1450 if not found:
1451 date += "@" + defaults[part][usenow]
1451 date += "@" + defaults[part][usenow]
1452 format += "@%" + part[0]
1452 format += "@%" + part[0]
1453 else:
1453 else:
1454 # We've found a specific time element, less specific time
1454 # We've found a specific time element, less specific time
1455 # elements are relative to today
1455 # elements are relative to today
1456 usenow = True
1456 usenow = True
1457
1457
1458 timetuple = time.strptime(date, format)
1458 timetuple = time.strptime(date, format)
1459 localunixtime = int(calendar.timegm(timetuple))
1459 localunixtime = int(calendar.timegm(timetuple))
1460 if offset is None:
1460 if offset is None:
1461 # local timezone
1461 # local timezone
1462 unixtime = int(time.mktime(timetuple))
1462 unixtime = int(time.mktime(timetuple))
1463 offset = unixtime - localunixtime
1463 offset = unixtime - localunixtime
1464 else:
1464 else:
1465 unixtime = localunixtime + offset
1465 unixtime = localunixtime + offset
1466 return unixtime, offset
1466 return unixtime, offset
1467
1467
1468 def parsedate(date, formats=None, bias=None):
1468 def parsedate(date, formats=None, bias=None):
1469 """parse a localized date/time and return a (unixtime, offset) tuple.
1469 """parse a localized date/time and return a (unixtime, offset) tuple.
1470
1470
1471 The date may be a "unixtime offset" string or in one of the specified
1471 The date may be a "unixtime offset" string or in one of the specified
1472 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1472 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1473
1473
1474 >>> parsedate(' today ') == parsedate(\
1474 >>> parsedate(' today ') == parsedate(\
1475 datetime.date.today().strftime('%b %d'))
1475 datetime.date.today().strftime('%b %d'))
1476 True
1476 True
1477 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1477 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1478 datetime.timedelta(days=1)\
1478 datetime.timedelta(days=1)\
1479 ).strftime('%b %d'))
1479 ).strftime('%b %d'))
1480 True
1480 True
1481 >>> now, tz = makedate()
1481 >>> now, tz = makedate()
1482 >>> strnow, strtz = parsedate('now')
1482 >>> strnow, strtz = parsedate('now')
1483 >>> (strnow - now) < 1
1483 >>> (strnow - now) < 1
1484 True
1484 True
1485 >>> tz == strtz
1485 >>> tz == strtz
1486 True
1486 True
1487 """
1487 """
1488 if bias is None:
1488 if bias is None:
1489 bias = {}
1489 bias = {}
1490 if not date:
1490 if not date:
1491 return 0, 0
1491 return 0, 0
1492 if isinstance(date, tuple) and len(date) == 2:
1492 if isinstance(date, tuple) and len(date) == 2:
1493 return date
1493 return date
1494 if not formats:
1494 if not formats:
1495 formats = defaultdateformats
1495 formats = defaultdateformats
1496 date = date.strip()
1496 date = date.strip()
1497
1497
1498 if date == 'now' or date == _('now'):
1498 if date == 'now' or date == _('now'):
1499 return makedate()
1499 return makedate()
1500 if date == 'today' or date == _('today'):
1500 if date == 'today' or date == _('today'):
1501 date = datetime.date.today().strftime('%b %d')
1501 date = datetime.date.today().strftime('%b %d')
1502 elif date == 'yesterday' or date == _('yesterday'):
1502 elif date == 'yesterday' or date == _('yesterday'):
1503 date = (datetime.date.today() -
1503 date = (datetime.date.today() -
1504 datetime.timedelta(days=1)).strftime('%b %d')
1504 datetime.timedelta(days=1)).strftime('%b %d')
1505
1505
1506 try:
1506 try:
1507 when, offset = map(int, date.split(' '))
1507 when, offset = map(int, date.split(' '))
1508 except ValueError:
1508 except ValueError:
1509 # fill out defaults
1509 # fill out defaults
1510 now = makedate()
1510 now = makedate()
1511 defaults = {}
1511 defaults = {}
1512 for part in ("d", "mb", "yY", "HI", "M", "S"):
1512 for part in ("d", "mb", "yY", "HI", "M", "S"):
1513 # this piece is for rounding the specific end of unknowns
1513 # this piece is for rounding the specific end of unknowns
1514 b = bias.get(part)
1514 b = bias.get(part)
1515 if b is None:
1515 if b is None:
1516 if part[0] in "HMS":
1516 if part[0] in "HMS":
1517 b = "00"
1517 b = "00"
1518 else:
1518 else:
1519 b = "0"
1519 b = "0"
1520
1520
1521 # this piece is for matching the generic end to today's date
1521 # this piece is for matching the generic end to today's date
1522 n = datestr(now, "%" + part[0])
1522 n = datestr(now, "%" + part[0])
1523
1523
1524 defaults[part] = (b, n)
1524 defaults[part] = (b, n)
1525
1525
1526 for format in formats:
1526 for format in formats:
1527 try:
1527 try:
1528 when, offset = strdate(date, format, defaults)
1528 when, offset = strdate(date, format, defaults)
1529 except (ValueError, OverflowError):
1529 except (ValueError, OverflowError):
1530 pass
1530 pass
1531 else:
1531 else:
1532 break
1532 break
1533 else:
1533 else:
1534 raise Abort(_('invalid date: %r') % date)
1534 raise Abort(_('invalid date: %r') % date)
1535 # validate explicit (probably user-specified) date and
1535 # validate explicit (probably user-specified) date and
1536 # time zone offset. values must fit in signed 32 bits for
1536 # time zone offset. values must fit in signed 32 bits for
1537 # current 32-bit linux runtimes. timezones go from UTC-12
1537 # current 32-bit linux runtimes. timezones go from UTC-12
1538 # to UTC+14
1538 # to UTC+14
1539 if abs(when) > 0x7fffffff:
1539 if abs(when) > 0x7fffffff:
1540 raise Abort(_('date exceeds 32 bits: %d') % when)
1540 raise Abort(_('date exceeds 32 bits: %d') % when)
1541 if when < 0:
1541 if when < 0:
1542 raise Abort(_('negative date value: %d') % when)
1542 raise Abort(_('negative date value: %d') % when)
1543 if offset < -50400 or offset > 43200:
1543 if offset < -50400 or offset > 43200:
1544 raise Abort(_('impossible time zone offset: %d') % offset)
1544 raise Abort(_('impossible time zone offset: %d') % offset)
1545 return when, offset
1545 return when, offset
1546
1546
1547 def matchdate(date):
1547 def matchdate(date):
1548 """Return a function that matches a given date match specifier
1548 """Return a function that matches a given date match specifier
1549
1549
1550 Formats include:
1550 Formats include:
1551
1551
1552 '{date}' match a given date to the accuracy provided
1552 '{date}' match a given date to the accuracy provided
1553
1553
1554 '<{date}' on or before a given date
1554 '<{date}' on or before a given date
1555
1555
1556 '>{date}' on or after a given date
1556 '>{date}' on or after a given date
1557
1557
1558 >>> p1 = parsedate("10:29:59")
1558 >>> p1 = parsedate("10:29:59")
1559 >>> p2 = parsedate("10:30:00")
1559 >>> p2 = parsedate("10:30:00")
1560 >>> p3 = parsedate("10:30:59")
1560 >>> p3 = parsedate("10:30:59")
1561 >>> p4 = parsedate("10:31:00")
1561 >>> p4 = parsedate("10:31:00")
1562 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1562 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1563 >>> f = matchdate("10:30")
1563 >>> f = matchdate("10:30")
1564 >>> f(p1[0])
1564 >>> f(p1[0])
1565 False
1565 False
1566 >>> f(p2[0])
1566 >>> f(p2[0])
1567 True
1567 True
1568 >>> f(p3[0])
1568 >>> f(p3[0])
1569 True
1569 True
1570 >>> f(p4[0])
1570 >>> f(p4[0])
1571 False
1571 False
1572 >>> f(p5[0])
1572 >>> f(p5[0])
1573 False
1573 False
1574 """
1574 """
1575
1575
1576 def lower(date):
1576 def lower(date):
1577 d = {'mb': "1", 'd': "1"}
1577 d = {'mb': "1", 'd': "1"}
1578 return parsedate(date, extendeddateformats, d)[0]
1578 return parsedate(date, extendeddateformats, d)[0]
1579
1579
1580 def upper(date):
1580 def upper(date):
1581 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1581 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1582 for days in ("31", "30", "29"):
1582 for days in ("31", "30", "29"):
1583 try:
1583 try:
1584 d["d"] = days
1584 d["d"] = days
1585 return parsedate(date, extendeddateformats, d)[0]
1585 return parsedate(date, extendeddateformats, d)[0]
1586 except Abort:
1586 except Abort:
1587 pass
1587 pass
1588 d["d"] = "28"
1588 d["d"] = "28"
1589 return parsedate(date, extendeddateformats, d)[0]
1589 return parsedate(date, extendeddateformats, d)[0]
1590
1590
1591 date = date.strip()
1591 date = date.strip()
1592
1592
1593 if not date:
1593 if not date:
1594 raise Abort(_("dates cannot consist entirely of whitespace"))
1594 raise Abort(_("dates cannot consist entirely of whitespace"))
1595 elif date[0] == "<":
1595 elif date[0] == "<":
1596 if not date[1:]:
1596 if not date[1:]:
1597 raise Abort(_("invalid day spec, use '<DATE'"))
1597 raise Abort(_("invalid day spec, use '<DATE'"))
1598 when = upper(date[1:])
1598 when = upper(date[1:])
1599 return lambda x: x <= when
1599 return lambda x: x <= when
1600 elif date[0] == ">":
1600 elif date[0] == ">":
1601 if not date[1:]:
1601 if not date[1:]:
1602 raise Abort(_("invalid day spec, use '>DATE'"))
1602 raise Abort(_("invalid day spec, use '>DATE'"))
1603 when = lower(date[1:])
1603 when = lower(date[1:])
1604 return lambda x: x >= when
1604 return lambda x: x >= when
1605 elif date[0] == "-":
1605 elif date[0] == "-":
1606 try:
1606 try:
1607 days = int(date[1:])
1607 days = int(date[1:])
1608 except ValueError:
1608 except ValueError:
1609 raise Abort(_("invalid day spec: %s") % date[1:])
1609 raise Abort(_("invalid day spec: %s") % date[1:])
1610 if days < 0:
1610 if days < 0:
1611 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1611 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1612 % date[1:])
1612 % date[1:])
1613 when = makedate()[0] - days * 3600 * 24
1613 when = makedate()[0] - days * 3600 * 24
1614 return lambda x: x >= when
1614 return lambda x: x >= when
1615 elif " to " in date:
1615 elif " to " in date:
1616 a, b = date.split(" to ")
1616 a, b = date.split(" to ")
1617 start, stop = lower(a), upper(b)
1617 start, stop = lower(a), upper(b)
1618 return lambda x: x >= start and x <= stop
1618 return lambda x: x >= start and x <= stop
1619 else:
1619 else:
1620 start, stop = lower(date), upper(date)
1620 start, stop = lower(date), upper(date)
1621 return lambda x: x >= start and x <= stop
1621 return lambda x: x >= start and x <= stop
1622
1622
1623 def stringmatcher(pattern):
1623 def stringmatcher(pattern):
1624 """
1624 """
1625 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1625 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1626 returns the matcher name, pattern, and matcher function.
1626 returns the matcher name, pattern, and matcher function.
1627 missing or unknown prefixes are treated as literal matches.
1627 missing or unknown prefixes are treated as literal matches.
1628
1628
1629 helper for tests:
1629 helper for tests:
1630 >>> def test(pattern, *tests):
1630 >>> def test(pattern, *tests):
1631 ... kind, pattern, matcher = stringmatcher(pattern)
1631 ... kind, pattern, matcher = stringmatcher(pattern)
1632 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1632 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1633
1633
1634 exact matching (no prefix):
1634 exact matching (no prefix):
1635 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1635 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1636 ('literal', 'abcdefg', [False, False, True])
1636 ('literal', 'abcdefg', [False, False, True])
1637
1637
1638 regex matching ('re:' prefix)
1638 regex matching ('re:' prefix)
1639 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1639 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1640 ('re', 'a.+b', [False, False, True])
1640 ('re', 'a.+b', [False, False, True])
1641
1641
1642 force exact matches ('literal:' prefix)
1642 force exact matches ('literal:' prefix)
1643 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1643 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1644 ('literal', 're:foobar', [False, True])
1644 ('literal', 're:foobar', [False, True])
1645
1645
1646 unknown prefixes are ignored and treated as literals
1646 unknown prefixes are ignored and treated as literals
1647 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1647 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1648 ('literal', 'foo:bar', [False, False, True])
1648 ('literal', 'foo:bar', [False, False, True])
1649 """
1649 """
1650 if pattern.startswith('re:'):
1650 if pattern.startswith('re:'):
1651 pattern = pattern[3:]
1651 pattern = pattern[3:]
1652 try:
1652 try:
1653 regex = remod.compile(pattern)
1653 regex = remod.compile(pattern)
1654 except remod.error as e:
1654 except remod.error as e:
1655 raise error.ParseError(_('invalid regular expression: %s')
1655 raise error.ParseError(_('invalid regular expression: %s')
1656 % e)
1656 % e)
1657 return 're', pattern, regex.search
1657 return 're', pattern, regex.search
1658 elif pattern.startswith('literal:'):
1658 elif pattern.startswith('literal:'):
1659 pattern = pattern[8:]
1659 pattern = pattern[8:]
1660 return 'literal', pattern, pattern.__eq__
1660 return 'literal', pattern, pattern.__eq__
1661
1661
1662 def shortuser(user):
1662 def shortuser(user):
1663 """Return a short representation of a user name or email address."""
1663 """Return a short representation of a user name or email address."""
1664 f = user.find('@')
1664 f = user.find('@')
1665 if f >= 0:
1665 if f >= 0:
1666 user = user[:f]
1666 user = user[:f]
1667 f = user.find('<')
1667 f = user.find('<')
1668 if f >= 0:
1668 if f >= 0:
1669 user = user[f + 1:]
1669 user = user[f + 1:]
1670 f = user.find(' ')
1670 f = user.find(' ')
1671 if f >= 0:
1671 if f >= 0:
1672 user = user[:f]
1672 user = user[:f]
1673 f = user.find('.')
1673 f = user.find('.')
1674 if f >= 0:
1674 if f >= 0:
1675 user = user[:f]
1675 user = user[:f]
1676 return user
1676 return user
1677
1677
1678 def emailuser(user):
1678 def emailuser(user):
1679 """Return the user portion of an email address."""
1679 """Return the user portion of an email address."""
1680 f = user.find('@')
1680 f = user.find('@')
1681 if f >= 0:
1681 if f >= 0:
1682 user = user[:f]
1682 user = user[:f]
1683 f = user.find('<')
1683 f = user.find('<')
1684 if f >= 0:
1684 if f >= 0:
1685 user = user[f + 1:]
1685 user = user[f + 1:]
1686 return user
1686 return user
1687
1687
1688 def email(author):
1688 def email(author):
1689 '''get email of author.'''
1689 '''get email of author.'''
1690 r = author.find('>')
1690 r = author.find('>')
1691 if r == -1:
1691 if r == -1:
1692 r = None
1692 r = None
1693 return author[author.find('<') + 1:r]
1693 return author[author.find('<') + 1:r]
1694
1694
1695 def ellipsis(text, maxlength=400):
1695 def ellipsis(text, maxlength=400):
1696 """Trim string to at most maxlength (default: 400) columns in display."""
1696 """Trim string to at most maxlength (default: 400) columns in display."""
1697 return encoding.trim(text, maxlength, ellipsis='...')
1697 return encoding.trim(text, maxlength, ellipsis='...')
1698
1698
1699 def unitcountfn(*unittable):
1699 def unitcountfn(*unittable):
1700 '''return a function that renders a readable count of some quantity'''
1700 '''return a function that renders a readable count of some quantity'''
1701
1701
1702 def go(count):
1702 def go(count):
1703 for multiplier, divisor, format in unittable:
1703 for multiplier, divisor, format in unittable:
1704 if count >= divisor * multiplier:
1704 if count >= divisor * multiplier:
1705 return format % (count / float(divisor))
1705 return format % (count / float(divisor))
1706 return unittable[-1][2] % count
1706 return unittable[-1][2] % count
1707
1707
1708 return go
1708 return go
1709
1709
1710 bytecount = unitcountfn(
1710 bytecount = unitcountfn(
1711 (100, 1 << 30, _('%.0f GB')),
1711 (100, 1 << 30, _('%.0f GB')),
1712 (10, 1 << 30, _('%.1f GB')),
1712 (10, 1 << 30, _('%.1f GB')),
1713 (1, 1 << 30, _('%.2f GB')),
1713 (1, 1 << 30, _('%.2f GB')),
1714 (100, 1 << 20, _('%.0f MB')),
1714 (100, 1 << 20, _('%.0f MB')),
1715 (10, 1 << 20, _('%.1f MB')),
1715 (10, 1 << 20, _('%.1f MB')),
1716 (1, 1 << 20, _('%.2f MB')),
1716 (1, 1 << 20, _('%.2f MB')),
1717 (100, 1 << 10, _('%.0f KB')),
1717 (100, 1 << 10, _('%.0f KB')),
1718 (10, 1 << 10, _('%.1f KB')),
1718 (10, 1 << 10, _('%.1f KB')),
1719 (1, 1 << 10, _('%.2f KB')),
1719 (1, 1 << 10, _('%.2f KB')),
1720 (1, 1, _('%.0f bytes')),
1720 (1, 1, _('%.0f bytes')),
1721 )
1721 )
1722
1722
1723 def uirepr(s):
1723 def uirepr(s):
1724 # Avoid double backslash in Windows path repr()
1724 # Avoid double backslash in Windows path repr()
1725 return repr(s).replace('\\\\', '\\')
1725 return repr(s).replace('\\\\', '\\')
1726
1726
1727 # delay import of textwrap
1727 # delay import of textwrap
1728 def MBTextWrapper(**kwargs):
1728 def MBTextWrapper(**kwargs):
1729 class tw(textwrap.TextWrapper):
1729 class tw(textwrap.TextWrapper):
1730 """
1730 """
1731 Extend TextWrapper for width-awareness.
1731 Extend TextWrapper for width-awareness.
1732
1732
1733 Neither number of 'bytes' in any encoding nor 'characters' is
1733 Neither number of 'bytes' in any encoding nor 'characters' is
1734 appropriate to calculate terminal columns for specified string.
1734 appropriate to calculate terminal columns for specified string.
1735
1735
1736 Original TextWrapper implementation uses built-in 'len()' directly,
1736 Original TextWrapper implementation uses built-in 'len()' directly,
1737 so overriding is needed to use width information of each characters.
1737 so overriding is needed to use width information of each characters.
1738
1738
1739 In addition, characters classified into 'ambiguous' width are
1739 In addition, characters classified into 'ambiguous' width are
1740 treated as wide in East Asian area, but as narrow in other.
1740 treated as wide in East Asian area, but as narrow in other.
1741
1741
1742 This requires use decision to determine width of such characters.
1742 This requires use decision to determine width of such characters.
1743 """
1743 """
1744 def _cutdown(self, ucstr, space_left):
1744 def _cutdown(self, ucstr, space_left):
1745 l = 0
1745 l = 0
1746 colwidth = encoding.ucolwidth
1746 colwidth = encoding.ucolwidth
1747 for i in xrange(len(ucstr)):
1747 for i in xrange(len(ucstr)):
1748 l += colwidth(ucstr[i])
1748 l += colwidth(ucstr[i])
1749 if space_left < l:
1749 if space_left < l:
1750 return (ucstr[:i], ucstr[i:])
1750 return (ucstr[:i], ucstr[i:])
1751 return ucstr, ''
1751 return ucstr, ''
1752
1752
1753 # overriding of base class
1753 # overriding of base class
1754 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1754 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1755 space_left = max(width - cur_len, 1)
1755 space_left = max(width - cur_len, 1)
1756
1756
1757 if self.break_long_words:
1757 if self.break_long_words:
1758 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1758 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1759 cur_line.append(cut)
1759 cur_line.append(cut)
1760 reversed_chunks[-1] = res
1760 reversed_chunks[-1] = res
1761 elif not cur_line:
1761 elif not cur_line:
1762 cur_line.append(reversed_chunks.pop())
1762 cur_line.append(reversed_chunks.pop())
1763
1763
1764 # this overriding code is imported from TextWrapper of Python 2.6
1764 # this overriding code is imported from TextWrapper of Python 2.6
1765 # to calculate columns of string by 'encoding.ucolwidth()'
1765 # to calculate columns of string by 'encoding.ucolwidth()'
1766 def _wrap_chunks(self, chunks):
1766 def _wrap_chunks(self, chunks):
1767 colwidth = encoding.ucolwidth
1767 colwidth = encoding.ucolwidth
1768
1768
1769 lines = []
1769 lines = []
1770 if self.width <= 0:
1770 if self.width <= 0:
1771 raise ValueError("invalid width %r (must be > 0)" % self.width)
1771 raise ValueError("invalid width %r (must be > 0)" % self.width)
1772
1772
1773 # Arrange in reverse order so items can be efficiently popped
1773 # Arrange in reverse order so items can be efficiently popped
1774 # from a stack of chucks.
1774 # from a stack of chucks.
1775 chunks.reverse()
1775 chunks.reverse()
1776
1776
1777 while chunks:
1777 while chunks:
1778
1778
1779 # Start the list of chunks that will make up the current line.
1779 # Start the list of chunks that will make up the current line.
1780 # cur_len is just the length of all the chunks in cur_line.
1780 # cur_len is just the length of all the chunks in cur_line.
1781 cur_line = []
1781 cur_line = []
1782 cur_len = 0
1782 cur_len = 0
1783
1783
1784 # Figure out which static string will prefix this line.
1784 # Figure out which static string will prefix this line.
1785 if lines:
1785 if lines:
1786 indent = self.subsequent_indent
1786 indent = self.subsequent_indent
1787 else:
1787 else:
1788 indent = self.initial_indent
1788 indent = self.initial_indent
1789
1789
1790 # Maximum width for this line.
1790 # Maximum width for this line.
1791 width = self.width - len(indent)
1791 width = self.width - len(indent)
1792
1792
1793 # First chunk on line is whitespace -- drop it, unless this
1793 # First chunk on line is whitespace -- drop it, unless this
1794 # is the very beginning of the text (i.e. no lines started yet).
1794 # is the very beginning of the text (i.e. no lines started yet).
1795 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1795 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1796 del chunks[-1]
1796 del chunks[-1]
1797
1797
1798 while chunks:
1798 while chunks:
1799 l = colwidth(chunks[-1])
1799 l = colwidth(chunks[-1])
1800
1800
1801 # Can at least squeeze this chunk onto the current line.
1801 # Can at least squeeze this chunk onto the current line.
1802 if cur_len + l <= width:
1802 if cur_len + l <= width:
1803 cur_line.append(chunks.pop())
1803 cur_line.append(chunks.pop())
1804 cur_len += l
1804 cur_len += l
1805
1805
1806 # Nope, this line is full.
1806 # Nope, this line is full.
1807 else:
1807 else:
1808 break
1808 break
1809
1809
1810 # The current line is full, and the next chunk is too big to
1810 # The current line is full, and the next chunk is too big to
1811 # fit on *any* line (not just this one).
1811 # fit on *any* line (not just this one).
1812 if chunks and colwidth(chunks[-1]) > width:
1812 if chunks and colwidth(chunks[-1]) > width:
1813 self._handle_long_word(chunks, cur_line, cur_len, width)
1813 self._handle_long_word(chunks, cur_line, cur_len, width)
1814
1814
1815 # If the last chunk on this line is all whitespace, drop it.
1815 # If the last chunk on this line is all whitespace, drop it.
1816 if (self.drop_whitespace and
1816 if (self.drop_whitespace and
1817 cur_line and cur_line[-1].strip() == ''):
1817 cur_line and cur_line[-1].strip() == ''):
1818 del cur_line[-1]
1818 del cur_line[-1]
1819
1819
1820 # Convert current line back to a string and store it in list
1820 # Convert current line back to a string and store it in list
1821 # of all lines (return value).
1821 # of all lines (return value).
1822 if cur_line:
1822 if cur_line:
1823 lines.append(indent + ''.join(cur_line))
1823 lines.append(indent + ''.join(cur_line))
1824
1824
1825 return lines
1825 return lines
1826
1826
1827 global MBTextWrapper
1827 global MBTextWrapper
1828 MBTextWrapper = tw
1828 MBTextWrapper = tw
1829 return tw(**kwargs)
1829 return tw(**kwargs)
1830
1830
1831 def wrap(line, width, initindent='', hangindent=''):
1831 def wrap(line, width, initindent='', hangindent=''):
1832 maxindent = max(len(hangindent), len(initindent))
1832 maxindent = max(len(hangindent), len(initindent))
1833 if width <= maxindent:
1833 if width <= maxindent:
1834 # adjust for weird terminal size
1834 # adjust for weird terminal size
1835 width = max(78, maxindent + 1)
1835 width = max(78, maxindent + 1)
1836 line = line.decode(encoding.encoding, encoding.encodingmode)
1836 line = line.decode(encoding.encoding, encoding.encodingmode)
1837 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1837 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1838 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1838 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1839 wrapper = MBTextWrapper(width=width,
1839 wrapper = MBTextWrapper(width=width,
1840 initial_indent=initindent,
1840 initial_indent=initindent,
1841 subsequent_indent=hangindent)
1841 subsequent_indent=hangindent)
1842 return wrapper.fill(line).encode(encoding.encoding)
1842 return wrapper.fill(line).encode(encoding.encoding)
1843
1843
1844 def iterlines(iterator):
1844 def iterlines(iterator):
1845 for chunk in iterator:
1845 for chunk in iterator:
1846 for line in chunk.splitlines():
1846 for line in chunk.splitlines():
1847 yield line
1847 yield line
1848
1848
1849 def expandpath(path):
1849 def expandpath(path):
1850 return os.path.expanduser(os.path.expandvars(path))
1850 return os.path.expanduser(os.path.expandvars(path))
1851
1851
1852 def hgcmd():
1852 def hgcmd():
1853 """Return the command used to execute current hg
1853 """Return the command used to execute current hg
1854
1854
1855 This is different from hgexecutable() because on Windows we want
1855 This is different from hgexecutable() because on Windows we want
1856 to avoid things opening new shell windows like batch files, so we
1856 to avoid things opening new shell windows like batch files, so we
1857 get either the python call or current executable.
1857 get either the python call or current executable.
1858 """
1858 """
1859 if mainfrozen():
1859 if mainfrozen():
1860 return [sys.executable]
1860 return [sys.executable]
1861 return gethgcmd()
1861 return gethgcmd()
1862
1862
1863 def rundetached(args, condfn):
1863 def rundetached(args, condfn):
1864 """Execute the argument list in a detached process.
1864 """Execute the argument list in a detached process.
1865
1865
1866 condfn is a callable which is called repeatedly and should return
1866 condfn is a callable which is called repeatedly and should return
1867 True once the child process is known to have started successfully.
1867 True once the child process is known to have started successfully.
1868 At this point, the child process PID is returned. If the child
1868 At this point, the child process PID is returned. If the child
1869 process fails to start or finishes before condfn() evaluates to
1869 process fails to start or finishes before condfn() evaluates to
1870 True, return -1.
1870 True, return -1.
1871 """
1871 """
1872 # Windows case is easier because the child process is either
1872 # Windows case is easier because the child process is either
1873 # successfully starting and validating the condition or exiting
1873 # successfully starting and validating the condition or exiting
1874 # on failure. We just poll on its PID. On Unix, if the child
1874 # on failure. We just poll on its PID. On Unix, if the child
1875 # process fails to start, it will be left in a zombie state until
1875 # process fails to start, it will be left in a zombie state until
1876 # the parent wait on it, which we cannot do since we expect a long
1876 # the parent wait on it, which we cannot do since we expect a long
1877 # running process on success. Instead we listen for SIGCHLD telling
1877 # running process on success. Instead we listen for SIGCHLD telling
1878 # us our child process terminated.
1878 # us our child process terminated.
1879 terminated = set()
1879 terminated = set()
1880 def handler(signum, frame):
1880 def handler(signum, frame):
1881 terminated.add(os.wait())
1881 terminated.add(os.wait())
1882 prevhandler = None
1882 prevhandler = None
1883 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1883 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1884 if SIGCHLD is not None:
1884 if SIGCHLD is not None:
1885 prevhandler = signal.signal(SIGCHLD, handler)
1885 prevhandler = signal.signal(SIGCHLD, handler)
1886 try:
1886 try:
1887 pid = spawndetached(args)
1887 pid = spawndetached(args)
1888 while not condfn():
1888 while not condfn():
1889 if ((pid in terminated or not testpid(pid))
1889 if ((pid in terminated or not testpid(pid))
1890 and not condfn()):
1890 and not condfn()):
1891 return -1
1891 return -1
1892 time.sleep(0.1)
1892 time.sleep(0.1)
1893 return pid
1893 return pid
1894 finally:
1894 finally:
1895 if prevhandler is not None:
1895 if prevhandler is not None:
1896 signal.signal(signal.SIGCHLD, prevhandler)
1896 signal.signal(signal.SIGCHLD, prevhandler)
1897
1897
1898 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1898 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1899 """Return the result of interpolating items in the mapping into string s.
1899 """Return the result of interpolating items in the mapping into string s.
1900
1900
1901 prefix is a single character string, or a two character string with
1901 prefix is a single character string, or a two character string with
1902 a backslash as the first character if the prefix needs to be escaped in
1902 a backslash as the first character if the prefix needs to be escaped in
1903 a regular expression.
1903 a regular expression.
1904
1904
1905 fn is an optional function that will be applied to the replacement text
1905 fn is an optional function that will be applied to the replacement text
1906 just before replacement.
1906 just before replacement.
1907
1907
1908 escape_prefix is an optional flag that allows using doubled prefix for
1908 escape_prefix is an optional flag that allows using doubled prefix for
1909 its escaping.
1909 its escaping.
1910 """
1910 """
1911 fn = fn or (lambda s: s)
1911 fn = fn or (lambda s: s)
1912 patterns = '|'.join(mapping.keys())
1912 patterns = '|'.join(mapping.keys())
1913 if escape_prefix:
1913 if escape_prefix:
1914 patterns += '|' + prefix
1914 patterns += '|' + prefix
1915 if len(prefix) > 1:
1915 if len(prefix) > 1:
1916 prefix_char = prefix[1:]
1916 prefix_char = prefix[1:]
1917 else:
1917 else:
1918 prefix_char = prefix
1918 prefix_char = prefix
1919 mapping[prefix_char] = prefix_char
1919 mapping[prefix_char] = prefix_char
1920 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1920 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1921 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1921 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1922
1922
1923 def getport(port):
1923 def getport(port):
1924 """Return the port for a given network service.
1924 """Return the port for a given network service.
1925
1925
1926 If port is an integer, it's returned as is. If it's a string, it's
1926 If port is an integer, it's returned as is. If it's a string, it's
1927 looked up using socket.getservbyname(). If there's no matching
1927 looked up using socket.getservbyname(). If there's no matching
1928 service, error.Abort is raised.
1928 service, error.Abort is raised.
1929 """
1929 """
1930 try:
1930 try:
1931 return int(port)
1931 return int(port)
1932 except ValueError:
1932 except ValueError:
1933 pass
1933 pass
1934
1934
1935 try:
1935 try:
1936 return socket.getservbyname(port)
1936 return socket.getservbyname(port)
1937 except socket.error:
1937 except socket.error:
1938 raise Abort(_("no port number associated with service '%s'") % port)
1938 raise Abort(_("no port number associated with service '%s'") % port)
1939
1939
1940 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1940 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1941 '0': False, 'no': False, 'false': False, 'off': False,
1941 '0': False, 'no': False, 'false': False, 'off': False,
1942 'never': False}
1942 'never': False}
1943
1943
1944 def parsebool(s):
1944 def parsebool(s):
1945 """Parse s into a boolean.
1945 """Parse s into a boolean.
1946
1946
1947 If s is not a valid boolean, returns None.
1947 If s is not a valid boolean, returns None.
1948 """
1948 """
1949 return _booleans.get(s.lower(), None)
1949 return _booleans.get(s.lower(), None)
1950
1950
1951 _hexdig = '0123456789ABCDEFabcdef'
1951 _hexdig = '0123456789ABCDEFabcdef'
1952 _hextochr = dict((a + b, chr(int(a + b, 16)))
1952 _hextochr = dict((a + b, chr(int(a + b, 16)))
1953 for a in _hexdig for b in _hexdig)
1953 for a in _hexdig for b in _hexdig)
1954
1954
1955 def _urlunquote(s):
1955 def _urlunquote(s):
1956 """Decode HTTP/HTML % encoding.
1956 """Decode HTTP/HTML % encoding.
1957
1957
1958 >>> _urlunquote('abc%20def')
1958 >>> _urlunquote('abc%20def')
1959 'abc def'
1959 'abc def'
1960 """
1960 """
1961 res = s.split('%')
1961 res = s.split('%')
1962 # fastpath
1962 # fastpath
1963 if len(res) == 1:
1963 if len(res) == 1:
1964 return s
1964 return s
1965 s = res[0]
1965 s = res[0]
1966 for item in res[1:]:
1966 for item in res[1:]:
1967 try:
1967 try:
1968 s += _hextochr[item[:2]] + item[2:]
1968 s += _hextochr[item[:2]] + item[2:]
1969 except KeyError:
1969 except KeyError:
1970 s += '%' + item
1970 s += '%' + item
1971 except UnicodeDecodeError:
1971 except UnicodeDecodeError:
1972 s += unichr(int(item[:2], 16)) + item[2:]
1972 s += unichr(int(item[:2], 16)) + item[2:]
1973 return s
1973 return s
1974
1974
1975 class url(object):
1975 class url(object):
1976 r"""Reliable URL parser.
1976 r"""Reliable URL parser.
1977
1977
1978 This parses URLs and provides attributes for the following
1978 This parses URLs and provides attributes for the following
1979 components:
1979 components:
1980
1980
1981 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1981 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1982
1982
1983 Missing components are set to None. The only exception is
1983 Missing components are set to None. The only exception is
1984 fragment, which is set to '' if present but empty.
1984 fragment, which is set to '' if present but empty.
1985
1985
1986 If parsefragment is False, fragment is included in query. If
1986 If parsefragment is False, fragment is included in query. If
1987 parsequery is False, query is included in path. If both are
1987 parsequery is False, query is included in path. If both are
1988 False, both fragment and query are included in path.
1988 False, both fragment and query are included in path.
1989
1989
1990 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1990 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1991
1991
1992 Note that for backward compatibility reasons, bundle URLs do not
1992 Note that for backward compatibility reasons, bundle URLs do not
1993 take host names. That means 'bundle://../' has a path of '../'.
1993 take host names. That means 'bundle://../' has a path of '../'.
1994
1994
1995 Examples:
1995 Examples:
1996
1996
1997 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1997 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1998 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1998 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1999 >>> url('ssh://[::1]:2200//home/joe/repo')
1999 >>> url('ssh://[::1]:2200//home/joe/repo')
2000 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2000 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2001 >>> url('file:///home/joe/repo')
2001 >>> url('file:///home/joe/repo')
2002 <url scheme: 'file', path: '/home/joe/repo'>
2002 <url scheme: 'file', path: '/home/joe/repo'>
2003 >>> url('file:///c:/temp/foo/')
2003 >>> url('file:///c:/temp/foo/')
2004 <url scheme: 'file', path: 'c:/temp/foo/'>
2004 <url scheme: 'file', path: 'c:/temp/foo/'>
2005 >>> url('bundle:foo')
2005 >>> url('bundle:foo')
2006 <url scheme: 'bundle', path: 'foo'>
2006 <url scheme: 'bundle', path: 'foo'>
2007 >>> url('bundle://../foo')
2007 >>> url('bundle://../foo')
2008 <url scheme: 'bundle', path: '../foo'>
2008 <url scheme: 'bundle', path: '../foo'>
2009 >>> url(r'c:\foo\bar')
2009 >>> url(r'c:\foo\bar')
2010 <url path: 'c:\\foo\\bar'>
2010 <url path: 'c:\\foo\\bar'>
2011 >>> url(r'\\blah\blah\blah')
2011 >>> url(r'\\blah\blah\blah')
2012 <url path: '\\\\blah\\blah\\blah'>
2012 <url path: '\\\\blah\\blah\\blah'>
2013 >>> url(r'\\blah\blah\blah#baz')
2013 >>> url(r'\\blah\blah\blah#baz')
2014 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2014 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2015 >>> url(r'file:///C:\users\me')
2015 >>> url(r'file:///C:\users\me')
2016 <url scheme: 'file', path: 'C:\\users\\me'>
2016 <url scheme: 'file', path: 'C:\\users\\me'>
2017
2017
2018 Authentication credentials:
2018 Authentication credentials:
2019
2019
2020 >>> url('ssh://joe:xyz@x/repo')
2020 >>> url('ssh://joe:xyz@x/repo')
2021 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2021 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2022 >>> url('ssh://joe@x/repo')
2022 >>> url('ssh://joe@x/repo')
2023 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2023 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2024
2024
2025 Query strings and fragments:
2025 Query strings and fragments:
2026
2026
2027 >>> url('http://host/a?b#c')
2027 >>> url('http://host/a?b#c')
2028 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2028 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2029 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2029 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2030 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2030 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2031 """
2031 """
2032
2032
2033 _safechars = "!~*'()+"
2033 _safechars = "!~*'()+"
2034 _safepchars = "/!~*'()+:\\"
2034 _safepchars = "/!~*'()+:\\"
2035 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2035 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2036
2036
2037 def __init__(self, path, parsequery=True, parsefragment=True):
2037 def __init__(self, path, parsequery=True, parsefragment=True):
2038 # We slowly chomp away at path until we have only the path left
2038 # We slowly chomp away at path until we have only the path left
2039 self.scheme = self.user = self.passwd = self.host = None
2039 self.scheme = self.user = self.passwd = self.host = None
2040 self.port = self.path = self.query = self.fragment = None
2040 self.port = self.path = self.query = self.fragment = None
2041 self._localpath = True
2041 self._localpath = True
2042 self._hostport = ''
2042 self._hostport = ''
2043 self._origpath = path
2043 self._origpath = path
2044
2044
2045 if parsefragment and '#' in path:
2045 if parsefragment and '#' in path:
2046 path, self.fragment = path.split('#', 1)
2046 path, self.fragment = path.split('#', 1)
2047 if not path:
2047 if not path:
2048 path = None
2048 path = None
2049
2049
2050 # special case for Windows drive letters and UNC paths
2050 # special case for Windows drive letters and UNC paths
2051 if hasdriveletter(path) or path.startswith(r'\\'):
2051 if hasdriveletter(path) or path.startswith(r'\\'):
2052 self.path = path
2052 self.path = path
2053 return
2053 return
2054
2054
2055 # For compatibility reasons, we can't handle bundle paths as
2055 # For compatibility reasons, we can't handle bundle paths as
2056 # normal URLS
2056 # normal URLS
2057 if path.startswith('bundle:'):
2057 if path.startswith('bundle:'):
2058 self.scheme = 'bundle'
2058 self.scheme = 'bundle'
2059 path = path[7:]
2059 path = path[7:]
2060 if path.startswith('//'):
2060 if path.startswith('//'):
2061 path = path[2:]
2061 path = path[2:]
2062 self.path = path
2062 self.path = path
2063 return
2063 return
2064
2064
2065 if self._matchscheme(path):
2065 if self._matchscheme(path):
2066 parts = path.split(':', 1)
2066 parts = path.split(':', 1)
2067 if parts[0]:
2067 if parts[0]:
2068 self.scheme, path = parts
2068 self.scheme, path = parts
2069 self._localpath = False
2069 self._localpath = False
2070
2070
2071 if not path:
2071 if not path:
2072 path = None
2072 path = None
2073 if self._localpath:
2073 if self._localpath:
2074 self.path = ''
2074 self.path = ''
2075 return
2075 return
2076 else:
2076 else:
2077 if self._localpath:
2077 if self._localpath:
2078 self.path = path
2078 self.path = path
2079 return
2079 return
2080
2080
2081 if parsequery and '?' in path:
2081 if parsequery and '?' in path:
2082 path, self.query = path.split('?', 1)
2082 path, self.query = path.split('?', 1)
2083 if not path:
2083 if not path:
2084 path = None
2084 path = None
2085 if not self.query:
2085 if not self.query:
2086 self.query = None
2086 self.query = None
2087
2087
2088 # // is required to specify a host/authority
2088 # // is required to specify a host/authority
2089 if path and path.startswith('//'):
2089 if path and path.startswith('//'):
2090 parts = path[2:].split('/', 1)
2090 parts = path[2:].split('/', 1)
2091 if len(parts) > 1:
2091 if len(parts) > 1:
2092 self.host, path = parts
2092 self.host, path = parts
2093 else:
2093 else:
2094 self.host = parts[0]
2094 self.host = parts[0]
2095 path = None
2095 path = None
2096 if not self.host:
2096 if not self.host:
2097 self.host = None
2097 self.host = None
2098 # path of file:///d is /d
2098 # path of file:///d is /d
2099 # path of file:///d:/ is d:/, not /d:/
2099 # path of file:///d:/ is d:/, not /d:/
2100 if path and not hasdriveletter(path):
2100 if path and not hasdriveletter(path):
2101 path = '/' + path
2101 path = '/' + path
2102
2102
2103 if self.host and '@' in self.host:
2103 if self.host and '@' in self.host:
2104 self.user, self.host = self.host.rsplit('@', 1)
2104 self.user, self.host = self.host.rsplit('@', 1)
2105 if ':' in self.user:
2105 if ':' in self.user:
2106 self.user, self.passwd = self.user.split(':', 1)
2106 self.user, self.passwd = self.user.split(':', 1)
2107 if not self.host:
2107 if not self.host:
2108 self.host = None
2108 self.host = None
2109
2109
2110 # Don't split on colons in IPv6 addresses without ports
2110 # Don't split on colons in IPv6 addresses without ports
2111 if (self.host and ':' in self.host and
2111 if (self.host and ':' in self.host and
2112 not (self.host.startswith('[') and self.host.endswith(']'))):
2112 not (self.host.startswith('[') and self.host.endswith(']'))):
2113 self._hostport = self.host
2113 self._hostport = self.host
2114 self.host, self.port = self.host.rsplit(':', 1)
2114 self.host, self.port = self.host.rsplit(':', 1)
2115 if not self.host:
2115 if not self.host:
2116 self.host = None
2116 self.host = None
2117
2117
2118 if (self.host and self.scheme == 'file' and
2118 if (self.host and self.scheme == 'file' and
2119 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2119 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2120 raise Abort(_('file:// URLs can only refer to localhost'))
2120 raise Abort(_('file:// URLs can only refer to localhost'))
2121
2121
2122 self.path = path
2122 self.path = path
2123
2123
2124 # leave the query string escaped
2124 # leave the query string escaped
2125 for a in ('user', 'passwd', 'host', 'port',
2125 for a in ('user', 'passwd', 'host', 'port',
2126 'path', 'fragment'):
2126 'path', 'fragment'):
2127 v = getattr(self, a)
2127 v = getattr(self, a)
2128 if v is not None:
2128 if v is not None:
2129 setattr(self, a, _urlunquote(v))
2129 setattr(self, a, _urlunquote(v))
2130
2130
2131 def __repr__(self):
2131 def __repr__(self):
2132 attrs = []
2132 attrs = []
2133 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2133 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2134 'query', 'fragment'):
2134 'query', 'fragment'):
2135 v = getattr(self, a)
2135 v = getattr(self, a)
2136 if v is not None:
2136 if v is not None:
2137 attrs.append('%s: %r' % (a, v))
2137 attrs.append('%s: %r' % (a, v))
2138 return '<url %s>' % ', '.join(attrs)
2138 return '<url %s>' % ', '.join(attrs)
2139
2139
2140 def __str__(self):
2140 def __str__(self):
2141 r"""Join the URL's components back into a URL string.
2141 r"""Join the URL's components back into a URL string.
2142
2142
2143 Examples:
2143 Examples:
2144
2144
2145 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2145 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2146 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2146 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2147 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2147 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2148 'http://user:pw@host:80/?foo=bar&baz=42'
2148 'http://user:pw@host:80/?foo=bar&baz=42'
2149 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2149 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2150 'http://user:pw@host:80/?foo=bar%3dbaz'
2150 'http://user:pw@host:80/?foo=bar%3dbaz'
2151 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2151 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2152 'ssh://user:pw@[::1]:2200//home/joe#'
2152 'ssh://user:pw@[::1]:2200//home/joe#'
2153 >>> str(url('http://localhost:80//'))
2153 >>> str(url('http://localhost:80//'))
2154 'http://localhost:80//'
2154 'http://localhost:80//'
2155 >>> str(url('http://localhost:80/'))
2155 >>> str(url('http://localhost:80/'))
2156 'http://localhost:80/'
2156 'http://localhost:80/'
2157 >>> str(url('http://localhost:80'))
2157 >>> str(url('http://localhost:80'))
2158 'http://localhost:80/'
2158 'http://localhost:80/'
2159 >>> str(url('bundle:foo'))
2159 >>> str(url('bundle:foo'))
2160 'bundle:foo'
2160 'bundle:foo'
2161 >>> str(url('bundle://../foo'))
2161 >>> str(url('bundle://../foo'))
2162 'bundle:../foo'
2162 'bundle:../foo'
2163 >>> str(url('path'))
2163 >>> str(url('path'))
2164 'path'
2164 'path'
2165 >>> str(url('file:///tmp/foo/bar'))
2165 >>> str(url('file:///tmp/foo/bar'))
2166 'file:///tmp/foo/bar'
2166 'file:///tmp/foo/bar'
2167 >>> str(url('file:///c:/tmp/foo/bar'))
2167 >>> str(url('file:///c:/tmp/foo/bar'))
2168 'file:///c:/tmp/foo/bar'
2168 'file:///c:/tmp/foo/bar'
2169 >>> print url(r'bundle:foo\bar')
2169 >>> print url(r'bundle:foo\bar')
2170 bundle:foo\bar
2170 bundle:foo\bar
2171 >>> print url(r'file:///D:\data\hg')
2171 >>> print url(r'file:///D:\data\hg')
2172 file:///D:\data\hg
2172 file:///D:\data\hg
2173 """
2173 """
2174 if self._localpath:
2174 if self._localpath:
2175 s = self.path
2175 s = self.path
2176 if self.scheme == 'bundle':
2176 if self.scheme == 'bundle':
2177 s = 'bundle:' + s
2177 s = 'bundle:' + s
2178 if self.fragment:
2178 if self.fragment:
2179 s += '#' + self.fragment
2179 s += '#' + self.fragment
2180 return s
2180 return s
2181
2181
2182 s = self.scheme + ':'
2182 s = self.scheme + ':'
2183 if self.user or self.passwd or self.host:
2183 if self.user or self.passwd or self.host:
2184 s += '//'
2184 s += '//'
2185 elif self.scheme and (not self.path or self.path.startswith('/')
2185 elif self.scheme and (not self.path or self.path.startswith('/')
2186 or hasdriveletter(self.path)):
2186 or hasdriveletter(self.path)):
2187 s += '//'
2187 s += '//'
2188 if hasdriveletter(self.path):
2188 if hasdriveletter(self.path):
2189 s += '/'
2189 s += '/'
2190 if self.user:
2190 if self.user:
2191 s += urllib.quote(self.user, safe=self._safechars)
2191 s += urllib.quote(self.user, safe=self._safechars)
2192 if self.passwd:
2192 if self.passwd:
2193 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2193 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2194 if self.user or self.passwd:
2194 if self.user or self.passwd:
2195 s += '@'
2195 s += '@'
2196 if self.host:
2196 if self.host:
2197 if not (self.host.startswith('[') and self.host.endswith(']')):
2197 if not (self.host.startswith('[') and self.host.endswith(']')):
2198 s += urllib.quote(self.host)
2198 s += urllib.quote(self.host)
2199 else:
2199 else:
2200 s += self.host
2200 s += self.host
2201 if self.port:
2201 if self.port:
2202 s += ':' + urllib.quote(self.port)
2202 s += ':' + urllib.quote(self.port)
2203 if self.host:
2203 if self.host:
2204 s += '/'
2204 s += '/'
2205 if self.path:
2205 if self.path:
2206 # TODO: similar to the query string, we should not unescape the
2206 # TODO: similar to the query string, we should not unescape the
2207 # path when we store it, the path might contain '%2f' = '/',
2207 # path when we store it, the path might contain '%2f' = '/',
2208 # which we should *not* escape.
2208 # which we should *not* escape.
2209 s += urllib.quote(self.path, safe=self._safepchars)
2209 s += urllib.quote(self.path, safe=self._safepchars)
2210 if self.query:
2210 if self.query:
2211 # we store the query in escaped form.
2211 # we store the query in escaped form.
2212 s += '?' + self.query
2212 s += '?' + self.query
2213 if self.fragment is not None:
2213 if self.fragment is not None:
2214 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2214 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2215 return s
2215 return s
2216
2216
2217 def authinfo(self):
2217 def authinfo(self):
2218 user, passwd = self.user, self.passwd
2218 user, passwd = self.user, self.passwd
2219 try:
2219 try:
2220 self.user, self.passwd = None, None
2220 self.user, self.passwd = None, None
2221 s = str(self)
2221 s = str(self)
2222 finally:
2222 finally:
2223 self.user, self.passwd = user, passwd
2223 self.user, self.passwd = user, passwd
2224 if not self.user:
2224 if not self.user:
2225 return (s, None)
2225 return (s, None)
2226 # authinfo[1] is passed to urllib2 password manager, and its
2226 # authinfo[1] is passed to urllib2 password manager, and its
2227 # URIs must not contain credentials. The host is passed in the
2227 # URIs must not contain credentials. The host is passed in the
2228 # URIs list because Python < 2.4.3 uses only that to search for
2228 # URIs list because Python < 2.4.3 uses only that to search for
2229 # a password.
2229 # a password.
2230 return (s, (None, (s, self.host),
2230 return (s, (None, (s, self.host),
2231 self.user, self.passwd or ''))
2231 self.user, self.passwd or ''))
2232
2232
2233 def isabs(self):
2233 def isabs(self):
2234 if self.scheme and self.scheme != 'file':
2234 if self.scheme and self.scheme != 'file':
2235 return True # remote URL
2235 return True # remote URL
2236 if hasdriveletter(self.path):
2236 if hasdriveletter(self.path):
2237 return True # absolute for our purposes - can't be joined()
2237 return True # absolute for our purposes - can't be joined()
2238 if self.path.startswith(r'\\'):
2238 if self.path.startswith(r'\\'):
2239 return True # Windows UNC path
2239 return True # Windows UNC path
2240 if self.path.startswith('/'):
2240 if self.path.startswith('/'):
2241 return True # POSIX-style
2241 return True # POSIX-style
2242 return False
2242 return False
2243
2243
2244 def localpath(self):
2244 def localpath(self):
2245 if self.scheme == 'file' or self.scheme == 'bundle':
2245 if self.scheme == 'file' or self.scheme == 'bundle':
2246 path = self.path or '/'
2246 path = self.path or '/'
2247 # For Windows, we need to promote hosts containing drive
2247 # For Windows, we need to promote hosts containing drive
2248 # letters to paths with drive letters.
2248 # letters to paths with drive letters.
2249 if hasdriveletter(self._hostport):
2249 if hasdriveletter(self._hostport):
2250 path = self._hostport + '/' + self.path
2250 path = self._hostport + '/' + self.path
2251 elif (self.host is not None and self.path
2251 elif (self.host is not None and self.path
2252 and not hasdriveletter(path)):
2252 and not hasdriveletter(path)):
2253 path = '/' + path
2253 path = '/' + path
2254 return path
2254 return path
2255 return self._origpath
2255 return self._origpath
2256
2256
2257 def islocal(self):
2257 def islocal(self):
2258 '''whether localpath will return something that posixfile can open'''
2258 '''whether localpath will return something that posixfile can open'''
2259 return (not self.scheme or self.scheme == 'file'
2259 return (not self.scheme or self.scheme == 'file'
2260 or self.scheme == 'bundle')
2260 or self.scheme == 'bundle')
2261
2261
2262 def hasscheme(path):
2262 def hasscheme(path):
2263 return bool(url(path).scheme)
2263 return bool(url(path).scheme)
2264
2264
2265 def hasdriveletter(path):
2265 def hasdriveletter(path):
2266 return path and path[1:2] == ':' and path[0:1].isalpha()
2266 return path and path[1:2] == ':' and path[0:1].isalpha()
2267
2267
2268 def urllocalpath(path):
2268 def urllocalpath(path):
2269 return url(path, parsequery=False, parsefragment=False).localpath()
2269 return url(path, parsequery=False, parsefragment=False).localpath()
2270
2270
2271 def hidepassword(u):
2271 def hidepassword(u):
2272 '''hide user credential in a url string'''
2272 '''hide user credential in a url string'''
2273 u = url(u)
2273 u = url(u)
2274 if u.passwd:
2274 if u.passwd:
2275 u.passwd = '***'
2275 u.passwd = '***'
2276 return str(u)
2276 return str(u)
2277
2277
2278 def removeauth(u):
2278 def removeauth(u):
2279 '''remove all authentication information from a url string'''
2279 '''remove all authentication information from a url string'''
2280 u = url(u)
2280 u = url(u)
2281 u.user = u.passwd = None
2281 u.user = u.passwd = None
2282 return str(u)
2282 return str(u)
2283
2283
2284 def isatty(fd):
2284 def isatty(fd):
2285 try:
2285 try:
2286 return fd.isatty()
2286 return fd.isatty()
2287 except AttributeError:
2287 except AttributeError:
2288 return False
2288 return False
2289
2289
2290 timecount = unitcountfn(
2290 timecount = unitcountfn(
2291 (1, 1e3, _('%.0f s')),
2291 (1, 1e3, _('%.0f s')),
2292 (100, 1, _('%.1f s')),
2292 (100, 1, _('%.1f s')),
2293 (10, 1, _('%.2f s')),
2293 (10, 1, _('%.2f s')),
2294 (1, 1, _('%.3f s')),
2294 (1, 1, _('%.3f s')),
2295 (100, 0.001, _('%.1f ms')),
2295 (100, 0.001, _('%.1f ms')),
2296 (10, 0.001, _('%.2f ms')),
2296 (10, 0.001, _('%.2f ms')),
2297 (1, 0.001, _('%.3f ms')),
2297 (1, 0.001, _('%.3f ms')),
2298 (100, 0.000001, _('%.1f us')),
2298 (100, 0.000001, _('%.1f us')),
2299 (10, 0.000001, _('%.2f us')),
2299 (10, 0.000001, _('%.2f us')),
2300 (1, 0.000001, _('%.3f us')),
2300 (1, 0.000001, _('%.3f us')),
2301 (100, 0.000000001, _('%.1f ns')),
2301 (100, 0.000000001, _('%.1f ns')),
2302 (10, 0.000000001, _('%.2f ns')),
2302 (10, 0.000000001, _('%.2f ns')),
2303 (1, 0.000000001, _('%.3f ns')),
2303 (1, 0.000000001, _('%.3f ns')),
2304 )
2304 )
2305
2305
2306 _timenesting = [0]
2306 _timenesting = [0]
2307
2307
2308 def timed(func):
2308 def timed(func):
2309 '''Report the execution time of a function call to stderr.
2309 '''Report the execution time of a function call to stderr.
2310
2310
2311 During development, use as a decorator when you need to measure
2311 During development, use as a decorator when you need to measure
2312 the cost of a function, e.g. as follows:
2312 the cost of a function, e.g. as follows:
2313
2313
2314 @util.timed
2314 @util.timed
2315 def foo(a, b, c):
2315 def foo(a, b, c):
2316 pass
2316 pass
2317 '''
2317 '''
2318
2318
2319 def wrapper(*args, **kwargs):
2319 def wrapper(*args, **kwargs):
2320 start = time.time()
2320 start = time.time()
2321 indent = 2
2321 indent = 2
2322 _timenesting[0] += indent
2322 _timenesting[0] += indent
2323 try:
2323 try:
2324 return func(*args, **kwargs)
2324 return func(*args, **kwargs)
2325 finally:
2325 finally:
2326 elapsed = time.time() - start
2326 elapsed = time.time() - start
2327 _timenesting[0] -= indent
2327 _timenesting[0] -= indent
2328 sys.stderr.write('%s%s: %s\n' %
2328 sys.stderr.write('%s%s: %s\n' %
2329 (' ' * _timenesting[0], func.__name__,
2329 (' ' * _timenesting[0], func.__name__,
2330 timecount(elapsed)))
2330 timecount(elapsed)))
2331 return wrapper
2331 return wrapper
2332
2332
2333 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2333 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2334 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2334 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2335
2335
2336 def sizetoint(s):
2336 def sizetoint(s):
2337 '''Convert a space specifier to a byte count.
2337 '''Convert a space specifier to a byte count.
2338
2338
2339 >>> sizetoint('30')
2339 >>> sizetoint('30')
2340 30
2340 30
2341 >>> sizetoint('2.2kb')
2341 >>> sizetoint('2.2kb')
2342 2252
2342 2252
2343 >>> sizetoint('6M')
2343 >>> sizetoint('6M')
2344 6291456
2344 6291456
2345 '''
2345 '''
2346 t = s.strip().lower()
2346 t = s.strip().lower()
2347 try:
2347 try:
2348 for k, u in _sizeunits:
2348 for k, u in _sizeunits:
2349 if t.endswith(k):
2349 if t.endswith(k):
2350 return int(float(t[:-len(k)]) * u)
2350 return int(float(t[:-len(k)]) * u)
2351 return int(t)
2351 return int(t)
2352 except ValueError:
2352 except ValueError:
2353 raise error.ParseError(_("couldn't parse size: %s") % s)
2353 raise error.ParseError(_("couldn't parse size: %s") % s)
2354
2354
2355 class hooks(object):
2355 class hooks(object):
2356 '''A collection of hook functions that can be used to extend a
2356 '''A collection of hook functions that can be used to extend a
2357 function's behavior. Hooks are called in lexicographic order,
2357 function's behavior. Hooks are called in lexicographic order,
2358 based on the names of their sources.'''
2358 based on the names of their sources.'''
2359
2359
2360 def __init__(self):
2360 def __init__(self):
2361 self._hooks = []
2361 self._hooks = []
2362
2362
2363 def add(self, source, hook):
2363 def add(self, source, hook):
2364 self._hooks.append((source, hook))
2364 self._hooks.append((source, hook))
2365
2365
2366 def __call__(self, *args):
2366 def __call__(self, *args):
2367 self._hooks.sort(key=lambda x: x[0])
2367 self._hooks.sort(key=lambda x: x[0])
2368 results = []
2368 results = []
2369 for source, hook in self._hooks:
2369 for source, hook in self._hooks:
2370 results.append(hook(*args))
2370 results.append(hook(*args))
2371 return results
2371 return results
2372
2372
2373 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2373 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2374 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2374 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2375 Skips the 'skip' last entries. By default it will flush stdout first.
2375 Skips the 'skip' last entries. By default it will flush stdout first.
2376 It can be used everywhere and do intentionally not require an ui object.
2376 It can be used everywhere and do intentionally not require an ui object.
2377 Not be used in production code but very convenient while developing.
2377 Not be used in production code but very convenient while developing.
2378 '''
2378 '''
2379 if otherf:
2379 if otherf:
2380 otherf.flush()
2380 otherf.flush()
2381 f.write('%s at:\n' % msg)
2381 f.write('%s at:\n' % msg)
2382 entries = [('%s:%s' % (fn, ln), func)
2382 entries = [('%s:%s' % (fn, ln), func)
2383 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2383 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2384 if entries:
2384 if entries:
2385 fnmax = max(len(entry[0]) for entry in entries)
2385 fnmax = max(len(entry[0]) for entry in entries)
2386 for fnln, func in entries:
2386 for fnln, func in entries:
2387 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2387 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2388 f.flush()
2388 f.flush()
2389
2389
2390 class dirs(object):
2390 class dirs(object):
2391 '''a multiset of directory names from a dirstate or manifest'''
2391 '''a multiset of directory names from a dirstate or manifest'''
2392
2392
2393 def __init__(self, map, skip=None):
2393 def __init__(self, map, skip=None):
2394 self._dirs = {}
2394 self._dirs = {}
2395 addpath = self.addpath
2395 addpath = self.addpath
2396 if safehasattr(map, 'iteritems') and skip is not None:
2396 if safehasattr(map, 'iteritems') and skip is not None:
2397 for f, s in map.iteritems():
2397 for f, s in map.iteritems():
2398 if s[0] != skip:
2398 if s[0] != skip:
2399 addpath(f)
2399 addpath(f)
2400 else:
2400 else:
2401 for f in map:
2401 for f in map:
2402 addpath(f)
2402 addpath(f)
2403
2403
2404 def addpath(self, path):
2404 def addpath(self, path):
2405 dirs = self._dirs
2405 dirs = self._dirs
2406 for base in finddirs(path):
2406 for base in finddirs(path):
2407 if base in dirs:
2407 if base in dirs:
2408 dirs[base] += 1
2408 dirs[base] += 1
2409 return
2409 return
2410 dirs[base] = 1
2410 dirs[base] = 1
2411
2411
2412 def delpath(self, path):
2412 def delpath(self, path):
2413 dirs = self._dirs
2413 dirs = self._dirs
2414 for base in finddirs(path):
2414 for base in finddirs(path):
2415 if dirs[base] > 1:
2415 if dirs[base] > 1:
2416 dirs[base] -= 1
2416 dirs[base] -= 1
2417 return
2417 return
2418 del dirs[base]
2418 del dirs[base]
2419
2419
2420 def __iter__(self):
2420 def __iter__(self):
2421 return self._dirs.iterkeys()
2421 return self._dirs.iterkeys()
2422
2422
2423 def __contains__(self, d):
2423 def __contains__(self, d):
2424 return d in self._dirs
2424 return d in self._dirs
2425
2425
2426 if safehasattr(parsers, 'dirs'):
2426 if safehasattr(parsers, 'dirs'):
2427 dirs = parsers.dirs
2427 dirs = parsers.dirs
2428
2428
2429 def finddirs(path):
2429 def finddirs(path):
2430 pos = path.rfind('/')
2430 pos = path.rfind('/')
2431 while pos != -1:
2431 while pos != -1:
2432 yield path[:pos]
2432 yield path[:pos]
2433 pos = path.rfind('/', 0, pos)
2433 pos = path.rfind('/', 0, pos)
2434
2434
2435 # compression utility
2435 # compression utility
2436
2436
2437 class nocompress(object):
2437 class nocompress(object):
2438 def compress(self, x):
2438 def compress(self, x):
2439 return x
2439 return x
2440 def flush(self):
2440 def flush(self):
2441 return ""
2441 return ""
2442
2442
2443 compressors = {
2443 compressors = {
2444 None: nocompress,
2444 None: nocompress,
2445 # lambda to prevent early import
2445 # lambda to prevent early import
2446 'BZ': lambda: bz2.BZ2Compressor(),
2446 'BZ': lambda: bz2.BZ2Compressor(),
2447 'GZ': lambda: zlib.compressobj(),
2447 'GZ': lambda: zlib.compressobj(),
2448 }
2448 }
2449 # also support the old form by courtesies
2449 # also support the old form by courtesies
2450 compressors['UN'] = compressors[None]
2450 compressors['UN'] = compressors[None]
2451
2451
2452 def _makedecompressor(decompcls):
2452 def _makedecompressor(decompcls):
2453 def generator(f):
2453 def generator(f):
2454 d = decompcls()
2454 d = decompcls()
2455 for chunk in filechunkiter(f):
2455 for chunk in filechunkiter(f):
2456 yield d.decompress(chunk)
2456 yield d.decompress(chunk)
2457 def func(fh):
2457 def func(fh):
2458 return chunkbuffer(generator(fh))
2458 return chunkbuffer(generator(fh))
2459 return func
2459 return func
2460
2460
2461 def _bz2():
2461 def _bz2():
2462 d = bz2.BZ2Decompressor()
2462 d = bz2.BZ2Decompressor()
2463 # Bzip2 stream start with BZ, but we stripped it.
2463 # Bzip2 stream start with BZ, but we stripped it.
2464 # we put it back for good measure.
2464 # we put it back for good measure.
2465 d.decompress('BZ')
2465 d.decompress('BZ')
2466 return d
2466 return d
2467
2467
2468 decompressors = {None: lambda fh: fh,
2468 decompressors = {None: lambda fh: fh,
2469 '_truncatedBZ': _makedecompressor(_bz2),
2469 '_truncatedBZ': _makedecompressor(_bz2),
2470 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2470 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2471 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2471 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2472 }
2472 }
2473 # also support the old form by courtesies
2473 # also support the old form by courtesies
2474 decompressors['UN'] = decompressors[None]
2474 decompressors['UN'] = decompressors[None]
2475
2475
2476 # convenient shortcut
2476 # convenient shortcut
2477 dst = debugstacktrace
2477 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now