##// END OF EJS Templates
util: extract function that parses timezone string...
Yuya Nishihara -
r26126:7b625bae default
parent child Browse files
Show More
@@ -1,2342 +1,2342
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 import i18n
16 import i18n
17 _ = i18n._
17 _ = i18n._
18 import error, osutil, encoding, parsers
18 import error, osutil, encoding, parsers
19 import errno, shutil, sys, tempfile, traceback
19 import errno, shutil, sys, tempfile, traceback
20 import re as remod
20 import re as remod
21 import os, time, datetime, calendar, textwrap, signal, collections
21 import os, time, datetime, calendar, textwrap, signal, collections
22 import imp, socket, urllib
22 import imp, socket, urllib
23 import gc
23 import gc
24
24
25 if os.name == 'nt':
25 if os.name == 'nt':
26 import windows as platform
26 import windows as platform
27 else:
27 else:
28 import posix as platform
28 import posix as platform
29
29
30 cachestat = platform.cachestat
30 cachestat = platform.cachestat
31 checkexec = platform.checkexec
31 checkexec = platform.checkexec
32 checklink = platform.checklink
32 checklink = platform.checklink
33 copymode = platform.copymode
33 copymode = platform.copymode
34 executablepath = platform.executablepath
34 executablepath = platform.executablepath
35 expandglobs = platform.expandglobs
35 expandglobs = platform.expandglobs
36 explainexit = platform.explainexit
36 explainexit = platform.explainexit
37 findexe = platform.findexe
37 findexe = platform.findexe
38 gethgcmd = platform.gethgcmd
38 gethgcmd = platform.gethgcmd
39 getuser = platform.getuser
39 getuser = platform.getuser
40 groupmembers = platform.groupmembers
40 groupmembers = platform.groupmembers
41 groupname = platform.groupname
41 groupname = platform.groupname
42 hidewindow = platform.hidewindow
42 hidewindow = platform.hidewindow
43 isexec = platform.isexec
43 isexec = platform.isexec
44 isowner = platform.isowner
44 isowner = platform.isowner
45 localpath = platform.localpath
45 localpath = platform.localpath
46 lookupreg = platform.lookupreg
46 lookupreg = platform.lookupreg
47 makedir = platform.makedir
47 makedir = platform.makedir
48 nlinks = platform.nlinks
48 nlinks = platform.nlinks
49 normpath = platform.normpath
49 normpath = platform.normpath
50 normcase = platform.normcase
50 normcase = platform.normcase
51 normcasespec = platform.normcasespec
51 normcasespec = platform.normcasespec
52 normcasefallback = platform.normcasefallback
52 normcasefallback = platform.normcasefallback
53 openhardlinks = platform.openhardlinks
53 openhardlinks = platform.openhardlinks
54 oslink = platform.oslink
54 oslink = platform.oslink
55 parsepatchoutput = platform.parsepatchoutput
55 parsepatchoutput = platform.parsepatchoutput
56 pconvert = platform.pconvert
56 pconvert = platform.pconvert
57 poll = platform.poll
57 poll = platform.poll
58 popen = platform.popen
58 popen = platform.popen
59 posixfile = platform.posixfile
59 posixfile = platform.posixfile
60 quotecommand = platform.quotecommand
60 quotecommand = platform.quotecommand
61 readpipe = platform.readpipe
61 readpipe = platform.readpipe
62 rename = platform.rename
62 rename = platform.rename
63 removedirs = platform.removedirs
63 removedirs = platform.removedirs
64 samedevice = platform.samedevice
64 samedevice = platform.samedevice
65 samefile = platform.samefile
65 samefile = platform.samefile
66 samestat = platform.samestat
66 samestat = platform.samestat
67 setbinary = platform.setbinary
67 setbinary = platform.setbinary
68 setflags = platform.setflags
68 setflags = platform.setflags
69 setsignalhandler = platform.setsignalhandler
69 setsignalhandler = platform.setsignalhandler
70 shellquote = platform.shellquote
70 shellquote = platform.shellquote
71 spawndetached = platform.spawndetached
71 spawndetached = platform.spawndetached
72 split = platform.split
72 split = platform.split
73 sshargs = platform.sshargs
73 sshargs = platform.sshargs
74 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
74 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
75 statisexec = platform.statisexec
75 statisexec = platform.statisexec
76 statislink = platform.statislink
76 statislink = platform.statislink
77 termwidth = platform.termwidth
77 termwidth = platform.termwidth
78 testpid = platform.testpid
78 testpid = platform.testpid
79 umask = platform.umask
79 umask = platform.umask
80 unlink = platform.unlink
80 unlink = platform.unlink
81 unlinkpath = platform.unlinkpath
81 unlinkpath = platform.unlinkpath
82 username = platform.username
82 username = platform.username
83
83
84 # Python compatibility
84 # Python compatibility
85
85
86 _notset = object()
86 _notset = object()
87
87
88 def safehasattr(thing, attr):
88 def safehasattr(thing, attr):
89 return getattr(thing, attr, _notset) is not _notset
89 return getattr(thing, attr, _notset) is not _notset
90
90
91 def sha1(s=''):
91 def sha1(s=''):
92 '''
92 '''
93 Low-overhead wrapper around Python's SHA support
93 Low-overhead wrapper around Python's SHA support
94
94
95 >>> f = _fastsha1
95 >>> f = _fastsha1
96 >>> a = sha1()
96 >>> a = sha1()
97 >>> a = f()
97 >>> a = f()
98 >>> a.hexdigest()
98 >>> a.hexdigest()
99 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
99 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
100 '''
100 '''
101
101
102 return _fastsha1(s)
102 return _fastsha1(s)
103
103
104 def _fastsha1(s=''):
104 def _fastsha1(s=''):
105 # This function will import sha1 from hashlib or sha (whichever is
105 # This function will import sha1 from hashlib or sha (whichever is
106 # available) and overwrite itself with it on the first call.
106 # available) and overwrite itself with it on the first call.
107 # Subsequent calls will go directly to the imported function.
107 # Subsequent calls will go directly to the imported function.
108 if sys.version_info >= (2, 5):
108 if sys.version_info >= (2, 5):
109 from hashlib import sha1 as _sha1
109 from hashlib import sha1 as _sha1
110 else:
110 else:
111 from sha import sha as _sha1
111 from sha import sha as _sha1
112 global _fastsha1, sha1
112 global _fastsha1, sha1
113 _fastsha1 = sha1 = _sha1
113 _fastsha1 = sha1 = _sha1
114 return _sha1(s)
114 return _sha1(s)
115
115
116 def md5(s=''):
116 def md5(s=''):
117 try:
117 try:
118 from hashlib import md5 as _md5
118 from hashlib import md5 as _md5
119 except ImportError:
119 except ImportError:
120 from md5 import md5 as _md5
120 from md5 import md5 as _md5
121 global md5
121 global md5
122 md5 = _md5
122 md5 = _md5
123 return _md5(s)
123 return _md5(s)
124
124
125 DIGESTS = {
125 DIGESTS = {
126 'md5': md5,
126 'md5': md5,
127 'sha1': sha1,
127 'sha1': sha1,
128 }
128 }
129 # List of digest types from strongest to weakest
129 # List of digest types from strongest to weakest
130 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
130 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
131
131
132 try:
132 try:
133 import hashlib
133 import hashlib
134 DIGESTS.update({
134 DIGESTS.update({
135 'sha512': hashlib.sha512,
135 'sha512': hashlib.sha512,
136 })
136 })
137 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
137 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
138 except ImportError:
138 except ImportError:
139 pass
139 pass
140
140
141 for k in DIGESTS_BY_STRENGTH:
141 for k in DIGESTS_BY_STRENGTH:
142 assert k in DIGESTS
142 assert k in DIGESTS
143
143
144 class digester(object):
144 class digester(object):
145 """helper to compute digests.
145 """helper to compute digests.
146
146
147 This helper can be used to compute one or more digests given their name.
147 This helper can be used to compute one or more digests given their name.
148
148
149 >>> d = digester(['md5', 'sha1'])
149 >>> d = digester(['md5', 'sha1'])
150 >>> d.update('foo')
150 >>> d.update('foo')
151 >>> [k for k in sorted(d)]
151 >>> [k for k in sorted(d)]
152 ['md5', 'sha1']
152 ['md5', 'sha1']
153 >>> d['md5']
153 >>> d['md5']
154 'acbd18db4cc2f85cedef654fccc4a4d8'
154 'acbd18db4cc2f85cedef654fccc4a4d8'
155 >>> d['sha1']
155 >>> d['sha1']
156 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
156 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
157 >>> digester.preferred(['md5', 'sha1'])
157 >>> digester.preferred(['md5', 'sha1'])
158 'sha1'
158 'sha1'
159 """
159 """
160
160
161 def __init__(self, digests, s=''):
161 def __init__(self, digests, s=''):
162 self._hashes = {}
162 self._hashes = {}
163 for k in digests:
163 for k in digests:
164 if k not in DIGESTS:
164 if k not in DIGESTS:
165 raise Abort(_('unknown digest type: %s') % k)
165 raise Abort(_('unknown digest type: %s') % k)
166 self._hashes[k] = DIGESTS[k]()
166 self._hashes[k] = DIGESTS[k]()
167 if s:
167 if s:
168 self.update(s)
168 self.update(s)
169
169
170 def update(self, data):
170 def update(self, data):
171 for h in self._hashes.values():
171 for h in self._hashes.values():
172 h.update(data)
172 h.update(data)
173
173
174 def __getitem__(self, key):
174 def __getitem__(self, key):
175 if key not in DIGESTS:
175 if key not in DIGESTS:
176 raise Abort(_('unknown digest type: %s') % k)
176 raise Abort(_('unknown digest type: %s') % k)
177 return self._hashes[key].hexdigest()
177 return self._hashes[key].hexdigest()
178
178
179 def __iter__(self):
179 def __iter__(self):
180 return iter(self._hashes)
180 return iter(self._hashes)
181
181
182 @staticmethod
182 @staticmethod
183 def preferred(supported):
183 def preferred(supported):
184 """returns the strongest digest type in both supported and DIGESTS."""
184 """returns the strongest digest type in both supported and DIGESTS."""
185
185
186 for k in DIGESTS_BY_STRENGTH:
186 for k in DIGESTS_BY_STRENGTH:
187 if k in supported:
187 if k in supported:
188 return k
188 return k
189 return None
189 return None
190
190
191 class digestchecker(object):
191 class digestchecker(object):
192 """file handle wrapper that additionally checks content against a given
192 """file handle wrapper that additionally checks content against a given
193 size and digests.
193 size and digests.
194
194
195 d = digestchecker(fh, size, {'md5': '...'})
195 d = digestchecker(fh, size, {'md5': '...'})
196
196
197 When multiple digests are given, all of them are validated.
197 When multiple digests are given, all of them are validated.
198 """
198 """
199
199
200 def __init__(self, fh, size, digests):
200 def __init__(self, fh, size, digests):
201 self._fh = fh
201 self._fh = fh
202 self._size = size
202 self._size = size
203 self._got = 0
203 self._got = 0
204 self._digests = dict(digests)
204 self._digests = dict(digests)
205 self._digester = digester(self._digests.keys())
205 self._digester = digester(self._digests.keys())
206
206
207 def read(self, length=-1):
207 def read(self, length=-1):
208 content = self._fh.read(length)
208 content = self._fh.read(length)
209 self._digester.update(content)
209 self._digester.update(content)
210 self._got += len(content)
210 self._got += len(content)
211 return content
211 return content
212
212
213 def validate(self):
213 def validate(self):
214 if self._size != self._got:
214 if self._size != self._got:
215 raise Abort(_('size mismatch: expected %d, got %d') %
215 raise Abort(_('size mismatch: expected %d, got %d') %
216 (self._size, self._got))
216 (self._size, self._got))
217 for k, v in self._digests.items():
217 for k, v in self._digests.items():
218 if v != self._digester[k]:
218 if v != self._digester[k]:
219 # i18n: first parameter is a digest name
219 # i18n: first parameter is a digest name
220 raise Abort(_('%s mismatch: expected %s, got %s') %
220 raise Abort(_('%s mismatch: expected %s, got %s') %
221 (k, v, self._digester[k]))
221 (k, v, self._digester[k]))
222
222
223 try:
223 try:
224 buffer = buffer
224 buffer = buffer
225 except NameError:
225 except NameError:
226 if sys.version_info[0] < 3:
226 if sys.version_info[0] < 3:
227 def buffer(sliceable, offset=0):
227 def buffer(sliceable, offset=0):
228 return sliceable[offset:]
228 return sliceable[offset:]
229 else:
229 else:
230 def buffer(sliceable, offset=0):
230 def buffer(sliceable, offset=0):
231 return memoryview(sliceable)[offset:]
231 return memoryview(sliceable)[offset:]
232
232
233 import subprocess
233 import subprocess
234 closefds = os.name == 'posix'
234 closefds = os.name == 'posix'
235
235
236 _chunksize = 4096
236 _chunksize = 4096
237
237
238 class bufferedinputpipe(object):
238 class bufferedinputpipe(object):
239 """a manually buffered input pipe
239 """a manually buffered input pipe
240
240
241 Python will not let us use buffered IO and lazy reading with 'polling' at
241 Python will not let us use buffered IO and lazy reading with 'polling' at
242 the same time. We cannot probe the buffer state and select will not detect
242 the same time. We cannot probe the buffer state and select will not detect
243 that data are ready to read if they are already buffered.
243 that data are ready to read if they are already buffered.
244
244
245 This class let us work around that by implementing its own buffering
245 This class let us work around that by implementing its own buffering
246 (allowing efficient readline) while offering a way to know if the buffer is
246 (allowing efficient readline) while offering a way to know if the buffer is
247 empty from the output (allowing collaboration of the buffer with polling).
247 empty from the output (allowing collaboration of the buffer with polling).
248
248
249 This class lives in the 'util' module because it makes use of the 'os'
249 This class lives in the 'util' module because it makes use of the 'os'
250 module from the python stdlib.
250 module from the python stdlib.
251 """
251 """
252
252
253 def __init__(self, input):
253 def __init__(self, input):
254 self._input = input
254 self._input = input
255 self._buffer = []
255 self._buffer = []
256 self._eof = False
256 self._eof = False
257 self._lenbuf = 0
257 self._lenbuf = 0
258
258
259 @property
259 @property
260 def hasbuffer(self):
260 def hasbuffer(self):
261 """True is any data is currently buffered
261 """True is any data is currently buffered
262
262
263 This will be used externally a pre-step for polling IO. If there is
263 This will be used externally a pre-step for polling IO. If there is
264 already data then no polling should be set in place."""
264 already data then no polling should be set in place."""
265 return bool(self._buffer)
265 return bool(self._buffer)
266
266
267 @property
267 @property
268 def closed(self):
268 def closed(self):
269 return self._input.closed
269 return self._input.closed
270
270
271 def fileno(self):
271 def fileno(self):
272 return self._input.fileno()
272 return self._input.fileno()
273
273
274 def close(self):
274 def close(self):
275 return self._input.close()
275 return self._input.close()
276
276
277 def read(self, size):
277 def read(self, size):
278 while (not self._eof) and (self._lenbuf < size):
278 while (not self._eof) and (self._lenbuf < size):
279 self._fillbuffer()
279 self._fillbuffer()
280 return self._frombuffer(size)
280 return self._frombuffer(size)
281
281
282 def readline(self, *args, **kwargs):
282 def readline(self, *args, **kwargs):
283 if 1 < len(self._buffer):
283 if 1 < len(self._buffer):
284 # this should not happen because both read and readline end with a
284 # this should not happen because both read and readline end with a
285 # _frombuffer call that collapse it.
285 # _frombuffer call that collapse it.
286 self._buffer = [''.join(self._buffer)]
286 self._buffer = [''.join(self._buffer)]
287 self._lenbuf = len(self._buffer[0])
287 self._lenbuf = len(self._buffer[0])
288 lfi = -1
288 lfi = -1
289 if self._buffer:
289 if self._buffer:
290 lfi = self._buffer[-1].find('\n')
290 lfi = self._buffer[-1].find('\n')
291 while (not self._eof) and lfi < 0:
291 while (not self._eof) and lfi < 0:
292 self._fillbuffer()
292 self._fillbuffer()
293 if self._buffer:
293 if self._buffer:
294 lfi = self._buffer[-1].find('\n')
294 lfi = self._buffer[-1].find('\n')
295 size = lfi + 1
295 size = lfi + 1
296 if lfi < 0: # end of file
296 if lfi < 0: # end of file
297 size = self._lenbuf
297 size = self._lenbuf
298 elif 1 < len(self._buffer):
298 elif 1 < len(self._buffer):
299 # we need to take previous chunks into account
299 # we need to take previous chunks into account
300 size += self._lenbuf - len(self._buffer[-1])
300 size += self._lenbuf - len(self._buffer[-1])
301 return self._frombuffer(size)
301 return self._frombuffer(size)
302
302
303 def _frombuffer(self, size):
303 def _frombuffer(self, size):
304 """return at most 'size' data from the buffer
304 """return at most 'size' data from the buffer
305
305
306 The data are removed from the buffer."""
306 The data are removed from the buffer."""
307 if size == 0 or not self._buffer:
307 if size == 0 or not self._buffer:
308 return ''
308 return ''
309 buf = self._buffer[0]
309 buf = self._buffer[0]
310 if 1 < len(self._buffer):
310 if 1 < len(self._buffer):
311 buf = ''.join(self._buffer)
311 buf = ''.join(self._buffer)
312
312
313 data = buf[:size]
313 data = buf[:size]
314 buf = buf[len(data):]
314 buf = buf[len(data):]
315 if buf:
315 if buf:
316 self._buffer = [buf]
316 self._buffer = [buf]
317 self._lenbuf = len(buf)
317 self._lenbuf = len(buf)
318 else:
318 else:
319 self._buffer = []
319 self._buffer = []
320 self._lenbuf = 0
320 self._lenbuf = 0
321 return data
321 return data
322
322
323 def _fillbuffer(self):
323 def _fillbuffer(self):
324 """read data to the buffer"""
324 """read data to the buffer"""
325 data = os.read(self._input.fileno(), _chunksize)
325 data = os.read(self._input.fileno(), _chunksize)
326 if not data:
326 if not data:
327 self._eof = True
327 self._eof = True
328 else:
328 else:
329 self._lenbuf += len(data)
329 self._lenbuf += len(data)
330 self._buffer.append(data)
330 self._buffer.append(data)
331
331
332 def popen2(cmd, env=None, newlines=False):
332 def popen2(cmd, env=None, newlines=False):
333 # Setting bufsize to -1 lets the system decide the buffer size.
333 # Setting bufsize to -1 lets the system decide the buffer size.
334 # The default for bufsize is 0, meaning unbuffered. This leads to
334 # The default for bufsize is 0, meaning unbuffered. This leads to
335 # poor performance on Mac OS X: http://bugs.python.org/issue4194
335 # poor performance on Mac OS X: http://bugs.python.org/issue4194
336 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
336 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
337 close_fds=closefds,
337 close_fds=closefds,
338 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
338 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
339 universal_newlines=newlines,
339 universal_newlines=newlines,
340 env=env)
340 env=env)
341 return p.stdin, p.stdout
341 return p.stdin, p.stdout
342
342
343 def popen3(cmd, env=None, newlines=False):
343 def popen3(cmd, env=None, newlines=False):
344 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
344 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
345 return stdin, stdout, stderr
345 return stdin, stdout, stderr
346
346
347 def popen4(cmd, env=None, newlines=False, bufsize=-1):
347 def popen4(cmd, env=None, newlines=False, bufsize=-1):
348 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
348 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
349 close_fds=closefds,
349 close_fds=closefds,
350 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
350 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
351 stderr=subprocess.PIPE,
351 stderr=subprocess.PIPE,
352 universal_newlines=newlines,
352 universal_newlines=newlines,
353 env=env)
353 env=env)
354 return p.stdin, p.stdout, p.stderr, p
354 return p.stdin, p.stdout, p.stderr, p
355
355
356 def version():
356 def version():
357 """Return version information if available."""
357 """Return version information if available."""
358 try:
358 try:
359 import __version__
359 import __version__
360 return __version__.version
360 return __version__.version
361 except ImportError:
361 except ImportError:
362 return 'unknown'
362 return 'unknown'
363
363
364 # used by parsedate
364 # used by parsedate
365 defaultdateformats = (
365 defaultdateformats = (
366 '%Y-%m-%d %H:%M:%S',
366 '%Y-%m-%d %H:%M:%S',
367 '%Y-%m-%d %I:%M:%S%p',
367 '%Y-%m-%d %I:%M:%S%p',
368 '%Y-%m-%d %H:%M',
368 '%Y-%m-%d %H:%M',
369 '%Y-%m-%d %I:%M%p',
369 '%Y-%m-%d %I:%M%p',
370 '%Y-%m-%d',
370 '%Y-%m-%d',
371 '%m-%d',
371 '%m-%d',
372 '%m/%d',
372 '%m/%d',
373 '%m/%d/%y',
373 '%m/%d/%y',
374 '%m/%d/%Y',
374 '%m/%d/%Y',
375 '%a %b %d %H:%M:%S %Y',
375 '%a %b %d %H:%M:%S %Y',
376 '%a %b %d %I:%M:%S%p %Y',
376 '%a %b %d %I:%M:%S%p %Y',
377 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
377 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
378 '%b %d %H:%M:%S %Y',
378 '%b %d %H:%M:%S %Y',
379 '%b %d %I:%M:%S%p %Y',
379 '%b %d %I:%M:%S%p %Y',
380 '%b %d %H:%M:%S',
380 '%b %d %H:%M:%S',
381 '%b %d %I:%M:%S%p',
381 '%b %d %I:%M:%S%p',
382 '%b %d %H:%M',
382 '%b %d %H:%M',
383 '%b %d %I:%M%p',
383 '%b %d %I:%M%p',
384 '%b %d %Y',
384 '%b %d %Y',
385 '%b %d',
385 '%b %d',
386 '%H:%M:%S',
386 '%H:%M:%S',
387 '%I:%M:%S%p',
387 '%I:%M:%S%p',
388 '%H:%M',
388 '%H:%M',
389 '%I:%M%p',
389 '%I:%M%p',
390 )
390 )
391
391
392 extendeddateformats = defaultdateformats + (
392 extendeddateformats = defaultdateformats + (
393 "%Y",
393 "%Y",
394 "%Y-%m",
394 "%Y-%m",
395 "%b",
395 "%b",
396 "%b %Y",
396 "%b %Y",
397 )
397 )
398
398
399 def cachefunc(func):
399 def cachefunc(func):
400 '''cache the result of function calls'''
400 '''cache the result of function calls'''
401 # XXX doesn't handle keywords args
401 # XXX doesn't handle keywords args
402 if func.func_code.co_argcount == 0:
402 if func.func_code.co_argcount == 0:
403 cache = []
403 cache = []
404 def f():
404 def f():
405 if len(cache) == 0:
405 if len(cache) == 0:
406 cache.append(func())
406 cache.append(func())
407 return cache[0]
407 return cache[0]
408 return f
408 return f
409 cache = {}
409 cache = {}
410 if func.func_code.co_argcount == 1:
410 if func.func_code.co_argcount == 1:
411 # we gain a small amount of time because
411 # we gain a small amount of time because
412 # we don't need to pack/unpack the list
412 # we don't need to pack/unpack the list
413 def f(arg):
413 def f(arg):
414 if arg not in cache:
414 if arg not in cache:
415 cache[arg] = func(arg)
415 cache[arg] = func(arg)
416 return cache[arg]
416 return cache[arg]
417 else:
417 else:
418 def f(*args):
418 def f(*args):
419 if args not in cache:
419 if args not in cache:
420 cache[args] = func(*args)
420 cache[args] = func(*args)
421 return cache[args]
421 return cache[args]
422
422
423 return f
423 return f
424
424
425 class sortdict(dict):
425 class sortdict(dict):
426 '''a simple sorted dictionary'''
426 '''a simple sorted dictionary'''
427 def __init__(self, data=None):
427 def __init__(self, data=None):
428 self._list = []
428 self._list = []
429 if data:
429 if data:
430 self.update(data)
430 self.update(data)
431 def copy(self):
431 def copy(self):
432 return sortdict(self)
432 return sortdict(self)
433 def __setitem__(self, key, val):
433 def __setitem__(self, key, val):
434 if key in self:
434 if key in self:
435 self._list.remove(key)
435 self._list.remove(key)
436 self._list.append(key)
436 self._list.append(key)
437 dict.__setitem__(self, key, val)
437 dict.__setitem__(self, key, val)
438 def __iter__(self):
438 def __iter__(self):
439 return self._list.__iter__()
439 return self._list.__iter__()
440 def update(self, src):
440 def update(self, src):
441 if isinstance(src, dict):
441 if isinstance(src, dict):
442 src = src.iteritems()
442 src = src.iteritems()
443 for k, v in src:
443 for k, v in src:
444 self[k] = v
444 self[k] = v
445 def clear(self):
445 def clear(self):
446 dict.clear(self)
446 dict.clear(self)
447 self._list = []
447 self._list = []
448 def items(self):
448 def items(self):
449 return [(k, self[k]) for k in self._list]
449 return [(k, self[k]) for k in self._list]
450 def __delitem__(self, key):
450 def __delitem__(self, key):
451 dict.__delitem__(self, key)
451 dict.__delitem__(self, key)
452 self._list.remove(key)
452 self._list.remove(key)
453 def pop(self, key, *args, **kwargs):
453 def pop(self, key, *args, **kwargs):
454 dict.pop(self, key, *args, **kwargs)
454 dict.pop(self, key, *args, **kwargs)
455 try:
455 try:
456 self._list.remove(key)
456 self._list.remove(key)
457 except ValueError:
457 except ValueError:
458 pass
458 pass
459 def keys(self):
459 def keys(self):
460 return self._list
460 return self._list
461 def iterkeys(self):
461 def iterkeys(self):
462 return self._list.__iter__()
462 return self._list.__iter__()
463 def iteritems(self):
463 def iteritems(self):
464 for k in self._list:
464 for k in self._list:
465 yield k, self[k]
465 yield k, self[k]
466 def insert(self, index, key, val):
466 def insert(self, index, key, val):
467 self._list.insert(index, key)
467 self._list.insert(index, key)
468 dict.__setitem__(self, key, val)
468 dict.__setitem__(self, key, val)
469
469
470 class lrucachedict(object):
470 class lrucachedict(object):
471 '''cache most recent gets from or sets to this dictionary'''
471 '''cache most recent gets from or sets to this dictionary'''
472 def __init__(self, maxsize):
472 def __init__(self, maxsize):
473 self._cache = {}
473 self._cache = {}
474 self._maxsize = maxsize
474 self._maxsize = maxsize
475 self._order = collections.deque()
475 self._order = collections.deque()
476
476
477 def __getitem__(self, key):
477 def __getitem__(self, key):
478 value = self._cache[key]
478 value = self._cache[key]
479 self._order.remove(key)
479 self._order.remove(key)
480 self._order.append(key)
480 self._order.append(key)
481 return value
481 return value
482
482
483 def __setitem__(self, key, value):
483 def __setitem__(self, key, value):
484 if key not in self._cache:
484 if key not in self._cache:
485 if len(self._cache) >= self._maxsize:
485 if len(self._cache) >= self._maxsize:
486 del self._cache[self._order.popleft()]
486 del self._cache[self._order.popleft()]
487 else:
487 else:
488 self._order.remove(key)
488 self._order.remove(key)
489 self._cache[key] = value
489 self._cache[key] = value
490 self._order.append(key)
490 self._order.append(key)
491
491
492 def __contains__(self, key):
492 def __contains__(self, key):
493 return key in self._cache
493 return key in self._cache
494
494
495 def clear(self):
495 def clear(self):
496 self._cache.clear()
496 self._cache.clear()
497 self._order = collections.deque()
497 self._order = collections.deque()
498
498
499 def lrucachefunc(func):
499 def lrucachefunc(func):
500 '''cache most recent results of function calls'''
500 '''cache most recent results of function calls'''
501 cache = {}
501 cache = {}
502 order = collections.deque()
502 order = collections.deque()
503 if func.func_code.co_argcount == 1:
503 if func.func_code.co_argcount == 1:
504 def f(arg):
504 def f(arg):
505 if arg not in cache:
505 if arg not in cache:
506 if len(cache) > 20:
506 if len(cache) > 20:
507 del cache[order.popleft()]
507 del cache[order.popleft()]
508 cache[arg] = func(arg)
508 cache[arg] = func(arg)
509 else:
509 else:
510 order.remove(arg)
510 order.remove(arg)
511 order.append(arg)
511 order.append(arg)
512 return cache[arg]
512 return cache[arg]
513 else:
513 else:
514 def f(*args):
514 def f(*args):
515 if args not in cache:
515 if args not in cache:
516 if len(cache) > 20:
516 if len(cache) > 20:
517 del cache[order.popleft()]
517 del cache[order.popleft()]
518 cache[args] = func(*args)
518 cache[args] = func(*args)
519 else:
519 else:
520 order.remove(args)
520 order.remove(args)
521 order.append(args)
521 order.append(args)
522 return cache[args]
522 return cache[args]
523
523
524 return f
524 return f
525
525
526 class propertycache(object):
526 class propertycache(object):
527 def __init__(self, func):
527 def __init__(self, func):
528 self.func = func
528 self.func = func
529 self.name = func.__name__
529 self.name = func.__name__
530 def __get__(self, obj, type=None):
530 def __get__(self, obj, type=None):
531 result = self.func(obj)
531 result = self.func(obj)
532 self.cachevalue(obj, result)
532 self.cachevalue(obj, result)
533 return result
533 return result
534
534
535 def cachevalue(self, obj, value):
535 def cachevalue(self, obj, value):
536 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
536 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
537 obj.__dict__[self.name] = value
537 obj.__dict__[self.name] = value
538
538
539 def pipefilter(s, cmd):
539 def pipefilter(s, cmd):
540 '''filter string S through command CMD, returning its output'''
540 '''filter string S through command CMD, returning its output'''
541 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
541 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
542 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
542 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
543 pout, perr = p.communicate(s)
543 pout, perr = p.communicate(s)
544 return pout
544 return pout
545
545
546 def tempfilter(s, cmd):
546 def tempfilter(s, cmd):
547 '''filter string S through a pair of temporary files with CMD.
547 '''filter string S through a pair of temporary files with CMD.
548 CMD is used as a template to create the real command to be run,
548 CMD is used as a template to create the real command to be run,
549 with the strings INFILE and OUTFILE replaced by the real names of
549 with the strings INFILE and OUTFILE replaced by the real names of
550 the temporary files generated.'''
550 the temporary files generated.'''
551 inname, outname = None, None
551 inname, outname = None, None
552 try:
552 try:
553 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
553 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
554 fp = os.fdopen(infd, 'wb')
554 fp = os.fdopen(infd, 'wb')
555 fp.write(s)
555 fp.write(s)
556 fp.close()
556 fp.close()
557 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
557 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
558 os.close(outfd)
558 os.close(outfd)
559 cmd = cmd.replace('INFILE', inname)
559 cmd = cmd.replace('INFILE', inname)
560 cmd = cmd.replace('OUTFILE', outname)
560 cmd = cmd.replace('OUTFILE', outname)
561 code = os.system(cmd)
561 code = os.system(cmd)
562 if sys.platform == 'OpenVMS' and code & 1:
562 if sys.platform == 'OpenVMS' and code & 1:
563 code = 0
563 code = 0
564 if code:
564 if code:
565 raise Abort(_("command '%s' failed: %s") %
565 raise Abort(_("command '%s' failed: %s") %
566 (cmd, explainexit(code)))
566 (cmd, explainexit(code)))
567 fp = open(outname, 'rb')
567 fp = open(outname, 'rb')
568 r = fp.read()
568 r = fp.read()
569 fp.close()
569 fp.close()
570 return r
570 return r
571 finally:
571 finally:
572 try:
572 try:
573 if inname:
573 if inname:
574 os.unlink(inname)
574 os.unlink(inname)
575 except OSError:
575 except OSError:
576 pass
576 pass
577 try:
577 try:
578 if outname:
578 if outname:
579 os.unlink(outname)
579 os.unlink(outname)
580 except OSError:
580 except OSError:
581 pass
581 pass
582
582
583 filtertable = {
583 filtertable = {
584 'tempfile:': tempfilter,
584 'tempfile:': tempfilter,
585 'pipe:': pipefilter,
585 'pipe:': pipefilter,
586 }
586 }
587
587
588 def filter(s, cmd):
588 def filter(s, cmd):
589 "filter a string through a command that transforms its input to its output"
589 "filter a string through a command that transforms its input to its output"
590 for name, fn in filtertable.iteritems():
590 for name, fn in filtertable.iteritems():
591 if cmd.startswith(name):
591 if cmd.startswith(name):
592 return fn(s, cmd[len(name):].lstrip())
592 return fn(s, cmd[len(name):].lstrip())
593 return pipefilter(s, cmd)
593 return pipefilter(s, cmd)
594
594
595 def binary(s):
595 def binary(s):
596 """return true if a string is binary data"""
596 """return true if a string is binary data"""
597 return bool(s and '\0' in s)
597 return bool(s and '\0' in s)
598
598
599 def increasingchunks(source, min=1024, max=65536):
599 def increasingchunks(source, min=1024, max=65536):
600 '''return no less than min bytes per chunk while data remains,
600 '''return no less than min bytes per chunk while data remains,
601 doubling min after each chunk until it reaches max'''
601 doubling min after each chunk until it reaches max'''
602 def log2(x):
602 def log2(x):
603 if not x:
603 if not x:
604 return 0
604 return 0
605 i = 0
605 i = 0
606 while x:
606 while x:
607 x >>= 1
607 x >>= 1
608 i += 1
608 i += 1
609 return i - 1
609 return i - 1
610
610
611 buf = []
611 buf = []
612 blen = 0
612 blen = 0
613 for chunk in source:
613 for chunk in source:
614 buf.append(chunk)
614 buf.append(chunk)
615 blen += len(chunk)
615 blen += len(chunk)
616 if blen >= min:
616 if blen >= min:
617 if min < max:
617 if min < max:
618 min = min << 1
618 min = min << 1
619 nmin = 1 << log2(blen)
619 nmin = 1 << log2(blen)
620 if nmin > min:
620 if nmin > min:
621 min = nmin
621 min = nmin
622 if min > max:
622 if min > max:
623 min = max
623 min = max
624 yield ''.join(buf)
624 yield ''.join(buf)
625 blen = 0
625 blen = 0
626 buf = []
626 buf = []
627 if buf:
627 if buf:
628 yield ''.join(buf)
628 yield ''.join(buf)
629
629
630 Abort = error.Abort
630 Abort = error.Abort
631
631
632 def always(fn):
632 def always(fn):
633 return True
633 return True
634
634
635 def never(fn):
635 def never(fn):
636 return False
636 return False
637
637
638 def nogc(func):
638 def nogc(func):
639 """disable garbage collector
639 """disable garbage collector
640
640
641 Python's garbage collector triggers a GC each time a certain number of
641 Python's garbage collector triggers a GC each time a certain number of
642 container objects (the number being defined by gc.get_threshold()) are
642 container objects (the number being defined by gc.get_threshold()) are
643 allocated even when marked not to be tracked by the collector. Tracking has
643 allocated even when marked not to be tracked by the collector. Tracking has
644 no effect on when GCs are triggered, only on what objects the GC looks
644 no effect on when GCs are triggered, only on what objects the GC looks
645 into. As a workaround, disable GC while building complex (huge)
645 into. As a workaround, disable GC while building complex (huge)
646 containers.
646 containers.
647
647
648 This garbage collector issue have been fixed in 2.7.
648 This garbage collector issue have been fixed in 2.7.
649 """
649 """
650 def wrapper(*args, **kwargs):
650 def wrapper(*args, **kwargs):
651 gcenabled = gc.isenabled()
651 gcenabled = gc.isenabled()
652 gc.disable()
652 gc.disable()
653 try:
653 try:
654 return func(*args, **kwargs)
654 return func(*args, **kwargs)
655 finally:
655 finally:
656 if gcenabled:
656 if gcenabled:
657 gc.enable()
657 gc.enable()
658 return wrapper
658 return wrapper
659
659
660 def pathto(root, n1, n2):
660 def pathto(root, n1, n2):
661 '''return the relative path from one place to another.
661 '''return the relative path from one place to another.
662 root should use os.sep to separate directories
662 root should use os.sep to separate directories
663 n1 should use os.sep to separate directories
663 n1 should use os.sep to separate directories
664 n2 should use "/" to separate directories
664 n2 should use "/" to separate directories
665 returns an os.sep-separated path.
665 returns an os.sep-separated path.
666
666
667 If n1 is a relative path, it's assumed it's
667 If n1 is a relative path, it's assumed it's
668 relative to root.
668 relative to root.
669 n2 should always be relative to root.
669 n2 should always be relative to root.
670 '''
670 '''
671 if not n1:
671 if not n1:
672 return localpath(n2)
672 return localpath(n2)
673 if os.path.isabs(n1):
673 if os.path.isabs(n1):
674 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
674 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
675 return os.path.join(root, localpath(n2))
675 return os.path.join(root, localpath(n2))
676 n2 = '/'.join((pconvert(root), n2))
676 n2 = '/'.join((pconvert(root), n2))
677 a, b = splitpath(n1), n2.split('/')
677 a, b = splitpath(n1), n2.split('/')
678 a.reverse()
678 a.reverse()
679 b.reverse()
679 b.reverse()
680 while a and b and a[-1] == b[-1]:
680 while a and b and a[-1] == b[-1]:
681 a.pop()
681 a.pop()
682 b.pop()
682 b.pop()
683 b.reverse()
683 b.reverse()
684 return os.sep.join((['..'] * len(a)) + b) or '.'
684 return os.sep.join((['..'] * len(a)) + b) or '.'
685
685
686 def mainfrozen():
686 def mainfrozen():
687 """return True if we are a frozen executable.
687 """return True if we are a frozen executable.
688
688
689 The code supports py2exe (most common, Windows only) and tools/freeze
689 The code supports py2exe (most common, Windows only) and tools/freeze
690 (portable, not much used).
690 (portable, not much used).
691 """
691 """
692 return (safehasattr(sys, "frozen") or # new py2exe
692 return (safehasattr(sys, "frozen") or # new py2exe
693 safehasattr(sys, "importers") or # old py2exe
693 safehasattr(sys, "importers") or # old py2exe
694 imp.is_frozen("__main__")) # tools/freeze
694 imp.is_frozen("__main__")) # tools/freeze
695
695
696 # the location of data files matching the source code
696 # the location of data files matching the source code
697 if mainfrozen():
697 if mainfrozen():
698 # executable version (py2exe) doesn't support __file__
698 # executable version (py2exe) doesn't support __file__
699 datapath = os.path.dirname(sys.executable)
699 datapath = os.path.dirname(sys.executable)
700 else:
700 else:
701 datapath = os.path.dirname(__file__)
701 datapath = os.path.dirname(__file__)
702
702
703 i18n.setdatapath(datapath)
703 i18n.setdatapath(datapath)
704
704
705 _hgexecutable = None
705 _hgexecutable = None
706
706
707 def hgexecutable():
707 def hgexecutable():
708 """return location of the 'hg' executable.
708 """return location of the 'hg' executable.
709
709
710 Defaults to $HG or 'hg' in the search path.
710 Defaults to $HG or 'hg' in the search path.
711 """
711 """
712 if _hgexecutable is None:
712 if _hgexecutable is None:
713 hg = os.environ.get('HG')
713 hg = os.environ.get('HG')
714 mainmod = sys.modules['__main__']
714 mainmod = sys.modules['__main__']
715 if hg:
715 if hg:
716 _sethgexecutable(hg)
716 _sethgexecutable(hg)
717 elif mainfrozen():
717 elif mainfrozen():
718 _sethgexecutable(sys.executable)
718 _sethgexecutable(sys.executable)
719 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
719 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
720 _sethgexecutable(mainmod.__file__)
720 _sethgexecutable(mainmod.__file__)
721 else:
721 else:
722 exe = findexe('hg') or os.path.basename(sys.argv[0])
722 exe = findexe('hg') or os.path.basename(sys.argv[0])
723 _sethgexecutable(exe)
723 _sethgexecutable(exe)
724 return _hgexecutable
724 return _hgexecutable
725
725
726 def _sethgexecutable(path):
726 def _sethgexecutable(path):
727 """set location of the 'hg' executable"""
727 """set location of the 'hg' executable"""
728 global _hgexecutable
728 global _hgexecutable
729 _hgexecutable = path
729 _hgexecutable = path
730
730
731 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
731 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
732 '''enhanced shell command execution.
732 '''enhanced shell command execution.
733 run with environment maybe modified, maybe in different dir.
733 run with environment maybe modified, maybe in different dir.
734
734
735 if command fails and onerr is None, return status, else raise onerr
735 if command fails and onerr is None, return status, else raise onerr
736 object as exception.
736 object as exception.
737
737
738 if out is specified, it is assumed to be a file-like object that has a
738 if out is specified, it is assumed to be a file-like object that has a
739 write() method. stdout and stderr will be redirected to out.'''
739 write() method. stdout and stderr will be redirected to out.'''
740 try:
740 try:
741 sys.stdout.flush()
741 sys.stdout.flush()
742 except Exception:
742 except Exception:
743 pass
743 pass
744 def py2shell(val):
744 def py2shell(val):
745 'convert python object into string that is useful to shell'
745 'convert python object into string that is useful to shell'
746 if val is None or val is False:
746 if val is None or val is False:
747 return '0'
747 return '0'
748 if val is True:
748 if val is True:
749 return '1'
749 return '1'
750 return str(val)
750 return str(val)
751 origcmd = cmd
751 origcmd = cmd
752 cmd = quotecommand(cmd)
752 cmd = quotecommand(cmd)
753 if sys.platform == 'plan9' and (sys.version_info[0] == 2
753 if sys.platform == 'plan9' and (sys.version_info[0] == 2
754 and sys.version_info[1] < 7):
754 and sys.version_info[1] < 7):
755 # subprocess kludge to work around issues in half-baked Python
755 # subprocess kludge to work around issues in half-baked Python
756 # ports, notably bichued/python:
756 # ports, notably bichued/python:
757 if not cwd is None:
757 if not cwd is None:
758 os.chdir(cwd)
758 os.chdir(cwd)
759 rc = os.system(cmd)
759 rc = os.system(cmd)
760 else:
760 else:
761 env = dict(os.environ)
761 env = dict(os.environ)
762 env.update((k, py2shell(v)) for k, v in environ.iteritems())
762 env.update((k, py2shell(v)) for k, v in environ.iteritems())
763 env['HG'] = hgexecutable()
763 env['HG'] = hgexecutable()
764 if out is None or out == sys.__stdout__:
764 if out is None or out == sys.__stdout__:
765 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
765 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
766 env=env, cwd=cwd)
766 env=env, cwd=cwd)
767 else:
767 else:
768 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
768 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
769 env=env, cwd=cwd, stdout=subprocess.PIPE,
769 env=env, cwd=cwd, stdout=subprocess.PIPE,
770 stderr=subprocess.STDOUT)
770 stderr=subprocess.STDOUT)
771 while True:
771 while True:
772 line = proc.stdout.readline()
772 line = proc.stdout.readline()
773 if not line:
773 if not line:
774 break
774 break
775 out.write(line)
775 out.write(line)
776 proc.wait()
776 proc.wait()
777 rc = proc.returncode
777 rc = proc.returncode
778 if sys.platform == 'OpenVMS' and rc & 1:
778 if sys.platform == 'OpenVMS' and rc & 1:
779 rc = 0
779 rc = 0
780 if rc and onerr:
780 if rc and onerr:
781 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
781 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
782 explainexit(rc)[0])
782 explainexit(rc)[0])
783 if errprefix:
783 if errprefix:
784 errmsg = '%s: %s' % (errprefix, errmsg)
784 errmsg = '%s: %s' % (errprefix, errmsg)
785 raise onerr(errmsg)
785 raise onerr(errmsg)
786 return rc
786 return rc
787
787
788 def checksignature(func):
788 def checksignature(func):
789 '''wrap a function with code to check for calling errors'''
789 '''wrap a function with code to check for calling errors'''
790 def check(*args, **kwargs):
790 def check(*args, **kwargs):
791 try:
791 try:
792 return func(*args, **kwargs)
792 return func(*args, **kwargs)
793 except TypeError:
793 except TypeError:
794 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
794 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
795 raise error.SignatureError
795 raise error.SignatureError
796 raise
796 raise
797
797
798 return check
798 return check
799
799
800 def copyfile(src, dest, hardlink=False):
800 def copyfile(src, dest, hardlink=False):
801 "copy a file, preserving mode and atime/mtime"
801 "copy a file, preserving mode and atime/mtime"
802 if os.path.lexists(dest):
802 if os.path.lexists(dest):
803 unlink(dest)
803 unlink(dest)
804 # hardlinks are problematic on CIFS, quietly ignore this flag
804 # hardlinks are problematic on CIFS, quietly ignore this flag
805 # until we find a way to work around it cleanly (issue4546)
805 # until we find a way to work around it cleanly (issue4546)
806 if False and hardlink:
806 if False and hardlink:
807 try:
807 try:
808 oslink(src, dest)
808 oslink(src, dest)
809 return
809 return
810 except (IOError, OSError):
810 except (IOError, OSError):
811 pass # fall back to normal copy
811 pass # fall back to normal copy
812 if os.path.islink(src):
812 if os.path.islink(src):
813 os.symlink(os.readlink(src), dest)
813 os.symlink(os.readlink(src), dest)
814 else:
814 else:
815 try:
815 try:
816 shutil.copyfile(src, dest)
816 shutil.copyfile(src, dest)
817 shutil.copymode(src, dest)
817 shutil.copymode(src, dest)
818 except shutil.Error as inst:
818 except shutil.Error as inst:
819 raise Abort(str(inst))
819 raise Abort(str(inst))
820
820
821 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
821 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
822 """Copy a directory tree using hardlinks if possible."""
822 """Copy a directory tree using hardlinks if possible."""
823 num = 0
823 num = 0
824
824
825 if hardlink is None:
825 if hardlink is None:
826 hardlink = (os.stat(src).st_dev ==
826 hardlink = (os.stat(src).st_dev ==
827 os.stat(os.path.dirname(dst)).st_dev)
827 os.stat(os.path.dirname(dst)).st_dev)
828 if hardlink:
828 if hardlink:
829 topic = _('linking')
829 topic = _('linking')
830 else:
830 else:
831 topic = _('copying')
831 topic = _('copying')
832
832
833 if os.path.isdir(src):
833 if os.path.isdir(src):
834 os.mkdir(dst)
834 os.mkdir(dst)
835 for name, kind in osutil.listdir(src):
835 for name, kind in osutil.listdir(src):
836 srcname = os.path.join(src, name)
836 srcname = os.path.join(src, name)
837 dstname = os.path.join(dst, name)
837 dstname = os.path.join(dst, name)
838 def nprog(t, pos):
838 def nprog(t, pos):
839 if pos is not None:
839 if pos is not None:
840 return progress(t, pos + num)
840 return progress(t, pos + num)
841 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
841 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
842 num += n
842 num += n
843 else:
843 else:
844 if hardlink:
844 if hardlink:
845 try:
845 try:
846 oslink(src, dst)
846 oslink(src, dst)
847 except (IOError, OSError):
847 except (IOError, OSError):
848 hardlink = False
848 hardlink = False
849 shutil.copy(src, dst)
849 shutil.copy(src, dst)
850 else:
850 else:
851 shutil.copy(src, dst)
851 shutil.copy(src, dst)
852 num += 1
852 num += 1
853 progress(topic, num)
853 progress(topic, num)
854 progress(topic, None)
854 progress(topic, None)
855
855
856 return hardlink, num
856 return hardlink, num
857
857
858 _winreservednames = '''con prn aux nul
858 _winreservednames = '''con prn aux nul
859 com1 com2 com3 com4 com5 com6 com7 com8 com9
859 com1 com2 com3 com4 com5 com6 com7 com8 com9
860 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
860 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
861 _winreservedchars = ':*?"<>|'
861 _winreservedchars = ':*?"<>|'
862 def checkwinfilename(path):
862 def checkwinfilename(path):
863 r'''Check that the base-relative path is a valid filename on Windows.
863 r'''Check that the base-relative path is a valid filename on Windows.
864 Returns None if the path is ok, or a UI string describing the problem.
864 Returns None if the path is ok, or a UI string describing the problem.
865
865
866 >>> checkwinfilename("just/a/normal/path")
866 >>> checkwinfilename("just/a/normal/path")
867 >>> checkwinfilename("foo/bar/con.xml")
867 >>> checkwinfilename("foo/bar/con.xml")
868 "filename contains 'con', which is reserved on Windows"
868 "filename contains 'con', which is reserved on Windows"
869 >>> checkwinfilename("foo/con.xml/bar")
869 >>> checkwinfilename("foo/con.xml/bar")
870 "filename contains 'con', which is reserved on Windows"
870 "filename contains 'con', which is reserved on Windows"
871 >>> checkwinfilename("foo/bar/xml.con")
871 >>> checkwinfilename("foo/bar/xml.con")
872 >>> checkwinfilename("foo/bar/AUX/bla.txt")
872 >>> checkwinfilename("foo/bar/AUX/bla.txt")
873 "filename contains 'AUX', which is reserved on Windows"
873 "filename contains 'AUX', which is reserved on Windows"
874 >>> checkwinfilename("foo/bar/bla:.txt")
874 >>> checkwinfilename("foo/bar/bla:.txt")
875 "filename contains ':', which is reserved on Windows"
875 "filename contains ':', which is reserved on Windows"
876 >>> checkwinfilename("foo/bar/b\07la.txt")
876 >>> checkwinfilename("foo/bar/b\07la.txt")
877 "filename contains '\\x07', which is invalid on Windows"
877 "filename contains '\\x07', which is invalid on Windows"
878 >>> checkwinfilename("foo/bar/bla ")
878 >>> checkwinfilename("foo/bar/bla ")
879 "filename ends with ' ', which is not allowed on Windows"
879 "filename ends with ' ', which is not allowed on Windows"
880 >>> checkwinfilename("../bar")
880 >>> checkwinfilename("../bar")
881 >>> checkwinfilename("foo\\")
881 >>> checkwinfilename("foo\\")
882 "filename ends with '\\', which is invalid on Windows"
882 "filename ends with '\\', which is invalid on Windows"
883 >>> checkwinfilename("foo\\/bar")
883 >>> checkwinfilename("foo\\/bar")
884 "directory name ends with '\\', which is invalid on Windows"
884 "directory name ends with '\\', which is invalid on Windows"
885 '''
885 '''
886 if path.endswith('\\'):
886 if path.endswith('\\'):
887 return _("filename ends with '\\', which is invalid on Windows")
887 return _("filename ends with '\\', which is invalid on Windows")
888 if '\\/' in path:
888 if '\\/' in path:
889 return _("directory name ends with '\\', which is invalid on Windows")
889 return _("directory name ends with '\\', which is invalid on Windows")
890 for n in path.replace('\\', '/').split('/'):
890 for n in path.replace('\\', '/').split('/'):
891 if not n:
891 if not n:
892 continue
892 continue
893 for c in n:
893 for c in n:
894 if c in _winreservedchars:
894 if c in _winreservedchars:
895 return _("filename contains '%s', which is reserved "
895 return _("filename contains '%s', which is reserved "
896 "on Windows") % c
896 "on Windows") % c
897 if ord(c) <= 31:
897 if ord(c) <= 31:
898 return _("filename contains %r, which is invalid "
898 return _("filename contains %r, which is invalid "
899 "on Windows") % c
899 "on Windows") % c
900 base = n.split('.')[0]
900 base = n.split('.')[0]
901 if base and base.lower() in _winreservednames:
901 if base and base.lower() in _winreservednames:
902 return _("filename contains '%s', which is reserved "
902 return _("filename contains '%s', which is reserved "
903 "on Windows") % base
903 "on Windows") % base
904 t = n[-1]
904 t = n[-1]
905 if t in '. ' and n not in '..':
905 if t in '. ' and n not in '..':
906 return _("filename ends with '%s', which is not allowed "
906 return _("filename ends with '%s', which is not allowed "
907 "on Windows") % t
907 "on Windows") % t
908
908
909 if os.name == 'nt':
909 if os.name == 'nt':
910 checkosfilename = checkwinfilename
910 checkosfilename = checkwinfilename
911 else:
911 else:
912 checkosfilename = platform.checkosfilename
912 checkosfilename = platform.checkosfilename
913
913
914 def makelock(info, pathname):
914 def makelock(info, pathname):
915 try:
915 try:
916 return os.symlink(info, pathname)
916 return os.symlink(info, pathname)
917 except OSError as why:
917 except OSError as why:
918 if why.errno == errno.EEXIST:
918 if why.errno == errno.EEXIST:
919 raise
919 raise
920 except AttributeError: # no symlink in os
920 except AttributeError: # no symlink in os
921 pass
921 pass
922
922
923 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
923 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
924 os.write(ld, info)
924 os.write(ld, info)
925 os.close(ld)
925 os.close(ld)
926
926
927 def readlock(pathname):
927 def readlock(pathname):
928 try:
928 try:
929 return os.readlink(pathname)
929 return os.readlink(pathname)
930 except OSError as why:
930 except OSError as why:
931 if why.errno not in (errno.EINVAL, errno.ENOSYS):
931 if why.errno not in (errno.EINVAL, errno.ENOSYS):
932 raise
932 raise
933 except AttributeError: # no symlink in os
933 except AttributeError: # no symlink in os
934 pass
934 pass
935 fp = posixfile(pathname)
935 fp = posixfile(pathname)
936 r = fp.read()
936 r = fp.read()
937 fp.close()
937 fp.close()
938 return r
938 return r
939
939
940 def fstat(fp):
940 def fstat(fp):
941 '''stat file object that may not have fileno method.'''
941 '''stat file object that may not have fileno method.'''
942 try:
942 try:
943 return os.fstat(fp.fileno())
943 return os.fstat(fp.fileno())
944 except AttributeError:
944 except AttributeError:
945 return os.stat(fp.name)
945 return os.stat(fp.name)
946
946
947 # File system features
947 # File system features
948
948
949 def checkcase(path):
949 def checkcase(path):
950 """
950 """
951 Return true if the given path is on a case-sensitive filesystem
951 Return true if the given path is on a case-sensitive filesystem
952
952
953 Requires a path (like /foo/.hg) ending with a foldable final
953 Requires a path (like /foo/.hg) ending with a foldable final
954 directory component.
954 directory component.
955 """
955 """
956 s1 = os.lstat(path)
956 s1 = os.lstat(path)
957 d, b = os.path.split(path)
957 d, b = os.path.split(path)
958 b2 = b.upper()
958 b2 = b.upper()
959 if b == b2:
959 if b == b2:
960 b2 = b.lower()
960 b2 = b.lower()
961 if b == b2:
961 if b == b2:
962 return True # no evidence against case sensitivity
962 return True # no evidence against case sensitivity
963 p2 = os.path.join(d, b2)
963 p2 = os.path.join(d, b2)
964 try:
964 try:
965 s2 = os.lstat(p2)
965 s2 = os.lstat(p2)
966 if s2 == s1:
966 if s2 == s1:
967 return False
967 return False
968 return True
968 return True
969 except OSError:
969 except OSError:
970 return True
970 return True
971
971
972 try:
972 try:
973 import re2
973 import re2
974 _re2 = None
974 _re2 = None
975 except ImportError:
975 except ImportError:
976 _re2 = False
976 _re2 = False
977
977
978 class _re(object):
978 class _re(object):
979 def _checkre2(self):
979 def _checkre2(self):
980 global _re2
980 global _re2
981 try:
981 try:
982 # check if match works, see issue3964
982 # check if match works, see issue3964
983 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
983 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
984 except ImportError:
984 except ImportError:
985 _re2 = False
985 _re2 = False
986
986
987 def compile(self, pat, flags=0):
987 def compile(self, pat, flags=0):
988 '''Compile a regular expression, using re2 if possible
988 '''Compile a regular expression, using re2 if possible
989
989
990 For best performance, use only re2-compatible regexp features. The
990 For best performance, use only re2-compatible regexp features. The
991 only flags from the re module that are re2-compatible are
991 only flags from the re module that are re2-compatible are
992 IGNORECASE and MULTILINE.'''
992 IGNORECASE and MULTILINE.'''
993 if _re2 is None:
993 if _re2 is None:
994 self._checkre2()
994 self._checkre2()
995 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
995 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
996 if flags & remod.IGNORECASE:
996 if flags & remod.IGNORECASE:
997 pat = '(?i)' + pat
997 pat = '(?i)' + pat
998 if flags & remod.MULTILINE:
998 if flags & remod.MULTILINE:
999 pat = '(?m)' + pat
999 pat = '(?m)' + pat
1000 try:
1000 try:
1001 return re2.compile(pat)
1001 return re2.compile(pat)
1002 except re2.error:
1002 except re2.error:
1003 pass
1003 pass
1004 return remod.compile(pat, flags)
1004 return remod.compile(pat, flags)
1005
1005
1006 @propertycache
1006 @propertycache
1007 def escape(self):
1007 def escape(self):
1008 '''Return the version of escape corresponding to self.compile.
1008 '''Return the version of escape corresponding to self.compile.
1009
1009
1010 This is imperfect because whether re2 or re is used for a particular
1010 This is imperfect because whether re2 or re is used for a particular
1011 function depends on the flags, etc, but it's the best we can do.
1011 function depends on the flags, etc, but it's the best we can do.
1012 '''
1012 '''
1013 global _re2
1013 global _re2
1014 if _re2 is None:
1014 if _re2 is None:
1015 self._checkre2()
1015 self._checkre2()
1016 if _re2:
1016 if _re2:
1017 return re2.escape
1017 return re2.escape
1018 else:
1018 else:
1019 return remod.escape
1019 return remod.escape
1020
1020
1021 re = _re()
1021 re = _re()
1022
1022
1023 _fspathcache = {}
1023 _fspathcache = {}
1024 def fspath(name, root):
1024 def fspath(name, root):
1025 '''Get name in the case stored in the filesystem
1025 '''Get name in the case stored in the filesystem
1026
1026
1027 The name should be relative to root, and be normcase-ed for efficiency.
1027 The name should be relative to root, and be normcase-ed for efficiency.
1028
1028
1029 Note that this function is unnecessary, and should not be
1029 Note that this function is unnecessary, and should not be
1030 called, for case-sensitive filesystems (simply because it's expensive).
1030 called, for case-sensitive filesystems (simply because it's expensive).
1031
1031
1032 The root should be normcase-ed, too.
1032 The root should be normcase-ed, too.
1033 '''
1033 '''
1034 def _makefspathcacheentry(dir):
1034 def _makefspathcacheentry(dir):
1035 return dict((normcase(n), n) for n in os.listdir(dir))
1035 return dict((normcase(n), n) for n in os.listdir(dir))
1036
1036
1037 seps = os.sep
1037 seps = os.sep
1038 if os.altsep:
1038 if os.altsep:
1039 seps = seps + os.altsep
1039 seps = seps + os.altsep
1040 # Protect backslashes. This gets silly very quickly.
1040 # Protect backslashes. This gets silly very quickly.
1041 seps.replace('\\','\\\\')
1041 seps.replace('\\','\\\\')
1042 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1042 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1043 dir = os.path.normpath(root)
1043 dir = os.path.normpath(root)
1044 result = []
1044 result = []
1045 for part, sep in pattern.findall(name):
1045 for part, sep in pattern.findall(name):
1046 if sep:
1046 if sep:
1047 result.append(sep)
1047 result.append(sep)
1048 continue
1048 continue
1049
1049
1050 if dir not in _fspathcache:
1050 if dir not in _fspathcache:
1051 _fspathcache[dir] = _makefspathcacheentry(dir)
1051 _fspathcache[dir] = _makefspathcacheentry(dir)
1052 contents = _fspathcache[dir]
1052 contents = _fspathcache[dir]
1053
1053
1054 found = contents.get(part)
1054 found = contents.get(part)
1055 if not found:
1055 if not found:
1056 # retry "once per directory" per "dirstate.walk" which
1056 # retry "once per directory" per "dirstate.walk" which
1057 # may take place for each patches of "hg qpush", for example
1057 # may take place for each patches of "hg qpush", for example
1058 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1058 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1059 found = contents.get(part)
1059 found = contents.get(part)
1060
1060
1061 result.append(found or part)
1061 result.append(found or part)
1062 dir = os.path.join(dir, part)
1062 dir = os.path.join(dir, part)
1063
1063
1064 return ''.join(result)
1064 return ''.join(result)
1065
1065
1066 def checknlink(testfile):
1066 def checknlink(testfile):
1067 '''check whether hardlink count reporting works properly'''
1067 '''check whether hardlink count reporting works properly'''
1068
1068
1069 # testfile may be open, so we need a separate file for checking to
1069 # testfile may be open, so we need a separate file for checking to
1070 # work around issue2543 (or testfile may get lost on Samba shares)
1070 # work around issue2543 (or testfile may get lost on Samba shares)
1071 f1 = testfile + ".hgtmp1"
1071 f1 = testfile + ".hgtmp1"
1072 if os.path.lexists(f1):
1072 if os.path.lexists(f1):
1073 return False
1073 return False
1074 try:
1074 try:
1075 posixfile(f1, 'w').close()
1075 posixfile(f1, 'w').close()
1076 except IOError:
1076 except IOError:
1077 return False
1077 return False
1078
1078
1079 f2 = testfile + ".hgtmp2"
1079 f2 = testfile + ".hgtmp2"
1080 fd = None
1080 fd = None
1081 try:
1081 try:
1082 oslink(f1, f2)
1082 oslink(f1, f2)
1083 # nlinks() may behave differently for files on Windows shares if
1083 # nlinks() may behave differently for files on Windows shares if
1084 # the file is open.
1084 # the file is open.
1085 fd = posixfile(f2)
1085 fd = posixfile(f2)
1086 return nlinks(f2) > 1
1086 return nlinks(f2) > 1
1087 except OSError:
1087 except OSError:
1088 return False
1088 return False
1089 finally:
1089 finally:
1090 if fd is not None:
1090 if fd is not None:
1091 fd.close()
1091 fd.close()
1092 for f in (f1, f2):
1092 for f in (f1, f2):
1093 try:
1093 try:
1094 os.unlink(f)
1094 os.unlink(f)
1095 except OSError:
1095 except OSError:
1096 pass
1096 pass
1097
1097
1098 def endswithsep(path):
1098 def endswithsep(path):
1099 '''Check path ends with os.sep or os.altsep.'''
1099 '''Check path ends with os.sep or os.altsep.'''
1100 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1100 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1101
1101
1102 def splitpath(path):
1102 def splitpath(path):
1103 '''Split path by os.sep.
1103 '''Split path by os.sep.
1104 Note that this function does not use os.altsep because this is
1104 Note that this function does not use os.altsep because this is
1105 an alternative of simple "xxx.split(os.sep)".
1105 an alternative of simple "xxx.split(os.sep)".
1106 It is recommended to use os.path.normpath() before using this
1106 It is recommended to use os.path.normpath() before using this
1107 function if need.'''
1107 function if need.'''
1108 return path.split(os.sep)
1108 return path.split(os.sep)
1109
1109
1110 def gui():
1110 def gui():
1111 '''Are we running in a GUI?'''
1111 '''Are we running in a GUI?'''
1112 if sys.platform == 'darwin':
1112 if sys.platform == 'darwin':
1113 if 'SSH_CONNECTION' in os.environ:
1113 if 'SSH_CONNECTION' in os.environ:
1114 # handle SSH access to a box where the user is logged in
1114 # handle SSH access to a box where the user is logged in
1115 return False
1115 return False
1116 elif getattr(osutil, 'isgui', None):
1116 elif getattr(osutil, 'isgui', None):
1117 # check if a CoreGraphics session is available
1117 # check if a CoreGraphics session is available
1118 return osutil.isgui()
1118 return osutil.isgui()
1119 else:
1119 else:
1120 # pure build; use a safe default
1120 # pure build; use a safe default
1121 return True
1121 return True
1122 else:
1122 else:
1123 return os.name == "nt" or os.environ.get("DISPLAY")
1123 return os.name == "nt" or os.environ.get("DISPLAY")
1124
1124
1125 def mktempcopy(name, emptyok=False, createmode=None):
1125 def mktempcopy(name, emptyok=False, createmode=None):
1126 """Create a temporary file with the same contents from name
1126 """Create a temporary file with the same contents from name
1127
1127
1128 The permission bits are copied from the original file.
1128 The permission bits are copied from the original file.
1129
1129
1130 If the temporary file is going to be truncated immediately, you
1130 If the temporary file is going to be truncated immediately, you
1131 can use emptyok=True as an optimization.
1131 can use emptyok=True as an optimization.
1132
1132
1133 Returns the name of the temporary file.
1133 Returns the name of the temporary file.
1134 """
1134 """
1135 d, fn = os.path.split(name)
1135 d, fn = os.path.split(name)
1136 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1136 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1137 os.close(fd)
1137 os.close(fd)
1138 # Temporary files are created with mode 0600, which is usually not
1138 # Temporary files are created with mode 0600, which is usually not
1139 # what we want. If the original file already exists, just copy
1139 # what we want. If the original file already exists, just copy
1140 # its mode. Otherwise, manually obey umask.
1140 # its mode. Otherwise, manually obey umask.
1141 copymode(name, temp, createmode)
1141 copymode(name, temp, createmode)
1142 if emptyok:
1142 if emptyok:
1143 return temp
1143 return temp
1144 try:
1144 try:
1145 try:
1145 try:
1146 ifp = posixfile(name, "rb")
1146 ifp = posixfile(name, "rb")
1147 except IOError as inst:
1147 except IOError as inst:
1148 if inst.errno == errno.ENOENT:
1148 if inst.errno == errno.ENOENT:
1149 return temp
1149 return temp
1150 if not getattr(inst, 'filename', None):
1150 if not getattr(inst, 'filename', None):
1151 inst.filename = name
1151 inst.filename = name
1152 raise
1152 raise
1153 ofp = posixfile(temp, "wb")
1153 ofp = posixfile(temp, "wb")
1154 for chunk in filechunkiter(ifp):
1154 for chunk in filechunkiter(ifp):
1155 ofp.write(chunk)
1155 ofp.write(chunk)
1156 ifp.close()
1156 ifp.close()
1157 ofp.close()
1157 ofp.close()
1158 except: # re-raises
1158 except: # re-raises
1159 try: os.unlink(temp)
1159 try: os.unlink(temp)
1160 except OSError: pass
1160 except OSError: pass
1161 raise
1161 raise
1162 return temp
1162 return temp
1163
1163
1164 class atomictempfile(object):
1164 class atomictempfile(object):
1165 '''writable file object that atomically updates a file
1165 '''writable file object that atomically updates a file
1166
1166
1167 All writes will go to a temporary copy of the original file. Call
1167 All writes will go to a temporary copy of the original file. Call
1168 close() when you are done writing, and atomictempfile will rename
1168 close() when you are done writing, and atomictempfile will rename
1169 the temporary copy to the original name, making the changes
1169 the temporary copy to the original name, making the changes
1170 visible. If the object is destroyed without being closed, all your
1170 visible. If the object is destroyed without being closed, all your
1171 writes are discarded.
1171 writes are discarded.
1172 '''
1172 '''
1173 def __init__(self, name, mode='w+b', createmode=None):
1173 def __init__(self, name, mode='w+b', createmode=None):
1174 self.__name = name # permanent name
1174 self.__name = name # permanent name
1175 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1175 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1176 createmode=createmode)
1176 createmode=createmode)
1177 self._fp = posixfile(self._tempname, mode)
1177 self._fp = posixfile(self._tempname, mode)
1178
1178
1179 # delegated methods
1179 # delegated methods
1180 self.write = self._fp.write
1180 self.write = self._fp.write
1181 self.seek = self._fp.seek
1181 self.seek = self._fp.seek
1182 self.tell = self._fp.tell
1182 self.tell = self._fp.tell
1183 self.fileno = self._fp.fileno
1183 self.fileno = self._fp.fileno
1184
1184
1185 def close(self):
1185 def close(self):
1186 if not self._fp.closed:
1186 if not self._fp.closed:
1187 self._fp.close()
1187 self._fp.close()
1188 rename(self._tempname, localpath(self.__name))
1188 rename(self._tempname, localpath(self.__name))
1189
1189
1190 def discard(self):
1190 def discard(self):
1191 if not self._fp.closed:
1191 if not self._fp.closed:
1192 try:
1192 try:
1193 os.unlink(self._tempname)
1193 os.unlink(self._tempname)
1194 except OSError:
1194 except OSError:
1195 pass
1195 pass
1196 self._fp.close()
1196 self._fp.close()
1197
1197
1198 def __del__(self):
1198 def __del__(self):
1199 if safehasattr(self, '_fp'): # constructor actually did something
1199 if safehasattr(self, '_fp'): # constructor actually did something
1200 self.discard()
1200 self.discard()
1201
1201
1202 def makedirs(name, mode=None, notindexed=False):
1202 def makedirs(name, mode=None, notindexed=False):
1203 """recursive directory creation with parent mode inheritance"""
1203 """recursive directory creation with parent mode inheritance"""
1204 try:
1204 try:
1205 makedir(name, notindexed)
1205 makedir(name, notindexed)
1206 except OSError as err:
1206 except OSError as err:
1207 if err.errno == errno.EEXIST:
1207 if err.errno == errno.EEXIST:
1208 return
1208 return
1209 if err.errno != errno.ENOENT or not name:
1209 if err.errno != errno.ENOENT or not name:
1210 raise
1210 raise
1211 parent = os.path.dirname(os.path.abspath(name))
1211 parent = os.path.dirname(os.path.abspath(name))
1212 if parent == name:
1212 if parent == name:
1213 raise
1213 raise
1214 makedirs(parent, mode, notindexed)
1214 makedirs(parent, mode, notindexed)
1215 makedir(name, notindexed)
1215 makedir(name, notindexed)
1216 if mode is not None:
1216 if mode is not None:
1217 os.chmod(name, mode)
1217 os.chmod(name, mode)
1218
1218
1219 def ensuredirs(name, mode=None, notindexed=False):
1219 def ensuredirs(name, mode=None, notindexed=False):
1220 """race-safe recursive directory creation
1220 """race-safe recursive directory creation
1221
1221
1222 Newly created directories are marked as "not to be indexed by
1222 Newly created directories are marked as "not to be indexed by
1223 the content indexing service", if ``notindexed`` is specified
1223 the content indexing service", if ``notindexed`` is specified
1224 for "write" mode access.
1224 for "write" mode access.
1225 """
1225 """
1226 if os.path.isdir(name):
1226 if os.path.isdir(name):
1227 return
1227 return
1228 parent = os.path.dirname(os.path.abspath(name))
1228 parent = os.path.dirname(os.path.abspath(name))
1229 if parent != name:
1229 if parent != name:
1230 ensuredirs(parent, mode, notindexed)
1230 ensuredirs(parent, mode, notindexed)
1231 try:
1231 try:
1232 makedir(name, notindexed)
1232 makedir(name, notindexed)
1233 except OSError as err:
1233 except OSError as err:
1234 if err.errno == errno.EEXIST and os.path.isdir(name):
1234 if err.errno == errno.EEXIST and os.path.isdir(name):
1235 # someone else seems to have won a directory creation race
1235 # someone else seems to have won a directory creation race
1236 return
1236 return
1237 raise
1237 raise
1238 if mode is not None:
1238 if mode is not None:
1239 os.chmod(name, mode)
1239 os.chmod(name, mode)
1240
1240
1241 def readfile(path):
1241 def readfile(path):
1242 fp = open(path, 'rb')
1242 fp = open(path, 'rb')
1243 try:
1243 try:
1244 return fp.read()
1244 return fp.read()
1245 finally:
1245 finally:
1246 fp.close()
1246 fp.close()
1247
1247
1248 def writefile(path, text):
1248 def writefile(path, text):
1249 fp = open(path, 'wb')
1249 fp = open(path, 'wb')
1250 try:
1250 try:
1251 fp.write(text)
1251 fp.write(text)
1252 finally:
1252 finally:
1253 fp.close()
1253 fp.close()
1254
1254
1255 def appendfile(path, text):
1255 def appendfile(path, text):
1256 fp = open(path, 'ab')
1256 fp = open(path, 'ab')
1257 try:
1257 try:
1258 fp.write(text)
1258 fp.write(text)
1259 finally:
1259 finally:
1260 fp.close()
1260 fp.close()
1261
1261
1262 class chunkbuffer(object):
1262 class chunkbuffer(object):
1263 """Allow arbitrary sized chunks of data to be efficiently read from an
1263 """Allow arbitrary sized chunks of data to be efficiently read from an
1264 iterator over chunks of arbitrary size."""
1264 iterator over chunks of arbitrary size."""
1265
1265
1266 def __init__(self, in_iter):
1266 def __init__(self, in_iter):
1267 """in_iter is the iterator that's iterating over the input chunks.
1267 """in_iter is the iterator that's iterating over the input chunks.
1268 targetsize is how big a buffer to try to maintain."""
1268 targetsize is how big a buffer to try to maintain."""
1269 def splitbig(chunks):
1269 def splitbig(chunks):
1270 for chunk in chunks:
1270 for chunk in chunks:
1271 if len(chunk) > 2**20:
1271 if len(chunk) > 2**20:
1272 pos = 0
1272 pos = 0
1273 while pos < len(chunk):
1273 while pos < len(chunk):
1274 end = pos + 2 ** 18
1274 end = pos + 2 ** 18
1275 yield chunk[pos:end]
1275 yield chunk[pos:end]
1276 pos = end
1276 pos = end
1277 else:
1277 else:
1278 yield chunk
1278 yield chunk
1279 self.iter = splitbig(in_iter)
1279 self.iter = splitbig(in_iter)
1280 self._queue = collections.deque()
1280 self._queue = collections.deque()
1281
1281
1282 def read(self, l=None):
1282 def read(self, l=None):
1283 """Read L bytes of data from the iterator of chunks of data.
1283 """Read L bytes of data from the iterator of chunks of data.
1284 Returns less than L bytes if the iterator runs dry.
1284 Returns less than L bytes if the iterator runs dry.
1285
1285
1286 If size parameter is omitted, read everything"""
1286 If size parameter is omitted, read everything"""
1287 left = l
1287 left = l
1288 buf = []
1288 buf = []
1289 queue = self._queue
1289 queue = self._queue
1290 while left is None or left > 0:
1290 while left is None or left > 0:
1291 # refill the queue
1291 # refill the queue
1292 if not queue:
1292 if not queue:
1293 target = 2**18
1293 target = 2**18
1294 for chunk in self.iter:
1294 for chunk in self.iter:
1295 queue.append(chunk)
1295 queue.append(chunk)
1296 target -= len(chunk)
1296 target -= len(chunk)
1297 if target <= 0:
1297 if target <= 0:
1298 break
1298 break
1299 if not queue:
1299 if not queue:
1300 break
1300 break
1301
1301
1302 chunk = queue.popleft()
1302 chunk = queue.popleft()
1303 if left is not None:
1303 if left is not None:
1304 left -= len(chunk)
1304 left -= len(chunk)
1305 if left is not None and left < 0:
1305 if left is not None and left < 0:
1306 queue.appendleft(chunk[left:])
1306 queue.appendleft(chunk[left:])
1307 buf.append(chunk[:left])
1307 buf.append(chunk[:left])
1308 else:
1308 else:
1309 buf.append(chunk)
1309 buf.append(chunk)
1310
1310
1311 return ''.join(buf)
1311 return ''.join(buf)
1312
1312
1313 def filechunkiter(f, size=65536, limit=None):
1313 def filechunkiter(f, size=65536, limit=None):
1314 """Create a generator that produces the data in the file size
1314 """Create a generator that produces the data in the file size
1315 (default 65536) bytes at a time, up to optional limit (default is
1315 (default 65536) bytes at a time, up to optional limit (default is
1316 to read all data). Chunks may be less than size bytes if the
1316 to read all data). Chunks may be less than size bytes if the
1317 chunk is the last chunk in the file, or the file is a socket or
1317 chunk is the last chunk in the file, or the file is a socket or
1318 some other type of file that sometimes reads less data than is
1318 some other type of file that sometimes reads less data than is
1319 requested."""
1319 requested."""
1320 assert size >= 0
1320 assert size >= 0
1321 assert limit is None or limit >= 0
1321 assert limit is None or limit >= 0
1322 while True:
1322 while True:
1323 if limit is None:
1323 if limit is None:
1324 nbytes = size
1324 nbytes = size
1325 else:
1325 else:
1326 nbytes = min(limit, size)
1326 nbytes = min(limit, size)
1327 s = nbytes and f.read(nbytes)
1327 s = nbytes and f.read(nbytes)
1328 if not s:
1328 if not s:
1329 break
1329 break
1330 if limit:
1330 if limit:
1331 limit -= len(s)
1331 limit -= len(s)
1332 yield s
1332 yield s
1333
1333
1334 def makedate(timestamp=None):
1334 def makedate(timestamp=None):
1335 '''Return a unix timestamp (or the current time) as a (unixtime,
1335 '''Return a unix timestamp (or the current time) as a (unixtime,
1336 offset) tuple based off the local timezone.'''
1336 offset) tuple based off the local timezone.'''
1337 if timestamp is None:
1337 if timestamp is None:
1338 timestamp = time.time()
1338 timestamp = time.time()
1339 if timestamp < 0:
1339 if timestamp < 0:
1340 hint = _("check your clock")
1340 hint = _("check your clock")
1341 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1341 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1342 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1342 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1343 datetime.datetime.fromtimestamp(timestamp))
1343 datetime.datetime.fromtimestamp(timestamp))
1344 tz = delta.days * 86400 + delta.seconds
1344 tz = delta.days * 86400 + delta.seconds
1345 return timestamp, tz
1345 return timestamp, tz
1346
1346
1347 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1347 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1348 """represent a (unixtime, offset) tuple as a localized time.
1348 """represent a (unixtime, offset) tuple as a localized time.
1349 unixtime is seconds since the epoch, and offset is the time zone's
1349 unixtime is seconds since the epoch, and offset is the time zone's
1350 number of seconds away from UTC. if timezone is false, do not
1350 number of seconds away from UTC. if timezone is false, do not
1351 append time zone to string."""
1351 append time zone to string."""
1352 t, tz = date or makedate()
1352 t, tz = date or makedate()
1353 if t < 0:
1353 if t < 0:
1354 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1354 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1355 tz = 0
1355 tz = 0
1356 if "%1" in format or "%2" in format or "%z" in format:
1356 if "%1" in format or "%2" in format or "%z" in format:
1357 sign = (tz > 0) and "-" or "+"
1357 sign = (tz > 0) and "-" or "+"
1358 minutes = abs(tz) // 60
1358 minutes = abs(tz) // 60
1359 format = format.replace("%z", "%1%2")
1359 format = format.replace("%z", "%1%2")
1360 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1360 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1361 format = format.replace("%2", "%02d" % (minutes % 60))
1361 format = format.replace("%2", "%02d" % (minutes % 60))
1362 try:
1362 try:
1363 t = time.gmtime(float(t) - tz)
1363 t = time.gmtime(float(t) - tz)
1364 except ValueError:
1364 except ValueError:
1365 # time was out of range
1365 # time was out of range
1366 t = time.gmtime(sys.maxint)
1366 t = time.gmtime(sys.maxint)
1367 s = time.strftime(format, t)
1367 s = time.strftime(format, t)
1368 return s
1368 return s
1369
1369
1370 def shortdate(date=None):
1370 def shortdate(date=None):
1371 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1371 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1372 return datestr(date, format='%Y-%m-%d')
1372 return datestr(date, format='%Y-%m-%d')
1373
1373
1374 def parsetimezone(tz):
1375 """parse a timezone string and return an offset integer"""
1376 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1377 sign = (tz[0] == "+") and 1 or -1
1378 hours = int(tz[1:3])
1379 minutes = int(tz[3:5])
1380 return -sign * (hours * 60 + minutes) * 60
1381 if tz == "GMT" or tz == "UTC":
1382 return 0
1383 return None
1384
1374 def strdate(string, format, defaults=[]):
1385 def strdate(string, format, defaults=[]):
1375 """parse a localized time string and return a (unixtime, offset) tuple.
1386 """parse a localized time string and return a (unixtime, offset) tuple.
1376 if the string cannot be parsed, ValueError is raised."""
1387 if the string cannot be parsed, ValueError is raised."""
1377 def timezone(string):
1378 tz = string.split()[-1]
1379 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1380 sign = (tz[0] == "+") and 1 or -1
1381 hours = int(tz[1:3])
1382 minutes = int(tz[3:5])
1383 return -sign * (hours * 60 + minutes) * 60
1384 if tz == "GMT" or tz == "UTC":
1385 return 0
1386 return None
1387
1388 # NOTE: unixtime = localunixtime + offset
1388 # NOTE: unixtime = localunixtime + offset
1389 offset, date = timezone(string), string
1389 offset, date = parsetimezone(string.split()[-1]), string
1390 if offset is not None:
1390 if offset is not None:
1391 date = " ".join(string.split()[:-1])
1391 date = " ".join(string.split()[:-1])
1392
1392
1393 # add missing elements from defaults
1393 # add missing elements from defaults
1394 usenow = False # default to using biased defaults
1394 usenow = False # default to using biased defaults
1395 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1395 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1396 found = [True for p in part if ("%"+p) in format]
1396 found = [True for p in part if ("%"+p) in format]
1397 if not found:
1397 if not found:
1398 date += "@" + defaults[part][usenow]
1398 date += "@" + defaults[part][usenow]
1399 format += "@%" + part[0]
1399 format += "@%" + part[0]
1400 else:
1400 else:
1401 # We've found a specific time element, less specific time
1401 # We've found a specific time element, less specific time
1402 # elements are relative to today
1402 # elements are relative to today
1403 usenow = True
1403 usenow = True
1404
1404
1405 timetuple = time.strptime(date, format)
1405 timetuple = time.strptime(date, format)
1406 localunixtime = int(calendar.timegm(timetuple))
1406 localunixtime = int(calendar.timegm(timetuple))
1407 if offset is None:
1407 if offset is None:
1408 # local timezone
1408 # local timezone
1409 unixtime = int(time.mktime(timetuple))
1409 unixtime = int(time.mktime(timetuple))
1410 offset = unixtime - localunixtime
1410 offset = unixtime - localunixtime
1411 else:
1411 else:
1412 unixtime = localunixtime + offset
1412 unixtime = localunixtime + offset
1413 return unixtime, offset
1413 return unixtime, offset
1414
1414
1415 def parsedate(date, formats=None, bias={}):
1415 def parsedate(date, formats=None, bias={}):
1416 """parse a localized date/time and return a (unixtime, offset) tuple.
1416 """parse a localized date/time and return a (unixtime, offset) tuple.
1417
1417
1418 The date may be a "unixtime offset" string or in one of the specified
1418 The date may be a "unixtime offset" string or in one of the specified
1419 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1419 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1420
1420
1421 >>> parsedate(' today ') == parsedate(\
1421 >>> parsedate(' today ') == parsedate(\
1422 datetime.date.today().strftime('%b %d'))
1422 datetime.date.today().strftime('%b %d'))
1423 True
1423 True
1424 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1424 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1425 datetime.timedelta(days=1)\
1425 datetime.timedelta(days=1)\
1426 ).strftime('%b %d'))
1426 ).strftime('%b %d'))
1427 True
1427 True
1428 >>> now, tz = makedate()
1428 >>> now, tz = makedate()
1429 >>> strnow, strtz = parsedate('now')
1429 >>> strnow, strtz = parsedate('now')
1430 >>> (strnow - now) < 1
1430 >>> (strnow - now) < 1
1431 True
1431 True
1432 >>> tz == strtz
1432 >>> tz == strtz
1433 True
1433 True
1434 """
1434 """
1435 if not date:
1435 if not date:
1436 return 0, 0
1436 return 0, 0
1437 if isinstance(date, tuple) and len(date) == 2:
1437 if isinstance(date, tuple) and len(date) == 2:
1438 return date
1438 return date
1439 if not formats:
1439 if not formats:
1440 formats = defaultdateformats
1440 formats = defaultdateformats
1441 date = date.strip()
1441 date = date.strip()
1442
1442
1443 if date == 'now' or date == _('now'):
1443 if date == 'now' or date == _('now'):
1444 return makedate()
1444 return makedate()
1445 if date == 'today' or date == _('today'):
1445 if date == 'today' or date == _('today'):
1446 date = datetime.date.today().strftime('%b %d')
1446 date = datetime.date.today().strftime('%b %d')
1447 elif date == 'yesterday' or date == _('yesterday'):
1447 elif date == 'yesterday' or date == _('yesterday'):
1448 date = (datetime.date.today() -
1448 date = (datetime.date.today() -
1449 datetime.timedelta(days=1)).strftime('%b %d')
1449 datetime.timedelta(days=1)).strftime('%b %d')
1450
1450
1451 try:
1451 try:
1452 when, offset = map(int, date.split(' '))
1452 when, offset = map(int, date.split(' '))
1453 except ValueError:
1453 except ValueError:
1454 # fill out defaults
1454 # fill out defaults
1455 now = makedate()
1455 now = makedate()
1456 defaults = {}
1456 defaults = {}
1457 for part in ("d", "mb", "yY", "HI", "M", "S"):
1457 for part in ("d", "mb", "yY", "HI", "M", "S"):
1458 # this piece is for rounding the specific end of unknowns
1458 # this piece is for rounding the specific end of unknowns
1459 b = bias.get(part)
1459 b = bias.get(part)
1460 if b is None:
1460 if b is None:
1461 if part[0] in "HMS":
1461 if part[0] in "HMS":
1462 b = "00"
1462 b = "00"
1463 else:
1463 else:
1464 b = "0"
1464 b = "0"
1465
1465
1466 # this piece is for matching the generic end to today's date
1466 # this piece is for matching the generic end to today's date
1467 n = datestr(now, "%" + part[0])
1467 n = datestr(now, "%" + part[0])
1468
1468
1469 defaults[part] = (b, n)
1469 defaults[part] = (b, n)
1470
1470
1471 for format in formats:
1471 for format in formats:
1472 try:
1472 try:
1473 when, offset = strdate(date, format, defaults)
1473 when, offset = strdate(date, format, defaults)
1474 except (ValueError, OverflowError):
1474 except (ValueError, OverflowError):
1475 pass
1475 pass
1476 else:
1476 else:
1477 break
1477 break
1478 else:
1478 else:
1479 raise Abort(_('invalid date: %r') % date)
1479 raise Abort(_('invalid date: %r') % date)
1480 # validate explicit (probably user-specified) date and
1480 # validate explicit (probably user-specified) date and
1481 # time zone offset. values must fit in signed 32 bits for
1481 # time zone offset. values must fit in signed 32 bits for
1482 # current 32-bit linux runtimes. timezones go from UTC-12
1482 # current 32-bit linux runtimes. timezones go from UTC-12
1483 # to UTC+14
1483 # to UTC+14
1484 if abs(when) > 0x7fffffff:
1484 if abs(when) > 0x7fffffff:
1485 raise Abort(_('date exceeds 32 bits: %d') % when)
1485 raise Abort(_('date exceeds 32 bits: %d') % when)
1486 if when < 0:
1486 if when < 0:
1487 raise Abort(_('negative date value: %d') % when)
1487 raise Abort(_('negative date value: %d') % when)
1488 if offset < -50400 or offset > 43200:
1488 if offset < -50400 or offset > 43200:
1489 raise Abort(_('impossible time zone offset: %d') % offset)
1489 raise Abort(_('impossible time zone offset: %d') % offset)
1490 return when, offset
1490 return when, offset
1491
1491
1492 def matchdate(date):
1492 def matchdate(date):
1493 """Return a function that matches a given date match specifier
1493 """Return a function that matches a given date match specifier
1494
1494
1495 Formats include:
1495 Formats include:
1496
1496
1497 '{date}' match a given date to the accuracy provided
1497 '{date}' match a given date to the accuracy provided
1498
1498
1499 '<{date}' on or before a given date
1499 '<{date}' on or before a given date
1500
1500
1501 '>{date}' on or after a given date
1501 '>{date}' on or after a given date
1502
1502
1503 >>> p1 = parsedate("10:29:59")
1503 >>> p1 = parsedate("10:29:59")
1504 >>> p2 = parsedate("10:30:00")
1504 >>> p2 = parsedate("10:30:00")
1505 >>> p3 = parsedate("10:30:59")
1505 >>> p3 = parsedate("10:30:59")
1506 >>> p4 = parsedate("10:31:00")
1506 >>> p4 = parsedate("10:31:00")
1507 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1507 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1508 >>> f = matchdate("10:30")
1508 >>> f = matchdate("10:30")
1509 >>> f(p1[0])
1509 >>> f(p1[0])
1510 False
1510 False
1511 >>> f(p2[0])
1511 >>> f(p2[0])
1512 True
1512 True
1513 >>> f(p3[0])
1513 >>> f(p3[0])
1514 True
1514 True
1515 >>> f(p4[0])
1515 >>> f(p4[0])
1516 False
1516 False
1517 >>> f(p5[0])
1517 >>> f(p5[0])
1518 False
1518 False
1519 """
1519 """
1520
1520
1521 def lower(date):
1521 def lower(date):
1522 d = {'mb': "1", 'd': "1"}
1522 d = {'mb': "1", 'd': "1"}
1523 return parsedate(date, extendeddateformats, d)[0]
1523 return parsedate(date, extendeddateformats, d)[0]
1524
1524
1525 def upper(date):
1525 def upper(date):
1526 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1526 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1527 for days in ("31", "30", "29"):
1527 for days in ("31", "30", "29"):
1528 try:
1528 try:
1529 d["d"] = days
1529 d["d"] = days
1530 return parsedate(date, extendeddateformats, d)[0]
1530 return parsedate(date, extendeddateformats, d)[0]
1531 except Abort:
1531 except Abort:
1532 pass
1532 pass
1533 d["d"] = "28"
1533 d["d"] = "28"
1534 return parsedate(date, extendeddateformats, d)[0]
1534 return parsedate(date, extendeddateformats, d)[0]
1535
1535
1536 date = date.strip()
1536 date = date.strip()
1537
1537
1538 if not date:
1538 if not date:
1539 raise Abort(_("dates cannot consist entirely of whitespace"))
1539 raise Abort(_("dates cannot consist entirely of whitespace"))
1540 elif date[0] == "<":
1540 elif date[0] == "<":
1541 if not date[1:]:
1541 if not date[1:]:
1542 raise Abort(_("invalid day spec, use '<DATE'"))
1542 raise Abort(_("invalid day spec, use '<DATE'"))
1543 when = upper(date[1:])
1543 when = upper(date[1:])
1544 return lambda x: x <= when
1544 return lambda x: x <= when
1545 elif date[0] == ">":
1545 elif date[0] == ">":
1546 if not date[1:]:
1546 if not date[1:]:
1547 raise Abort(_("invalid day spec, use '>DATE'"))
1547 raise Abort(_("invalid day spec, use '>DATE'"))
1548 when = lower(date[1:])
1548 when = lower(date[1:])
1549 return lambda x: x >= when
1549 return lambda x: x >= when
1550 elif date[0] == "-":
1550 elif date[0] == "-":
1551 try:
1551 try:
1552 days = int(date[1:])
1552 days = int(date[1:])
1553 except ValueError:
1553 except ValueError:
1554 raise Abort(_("invalid day spec: %s") % date[1:])
1554 raise Abort(_("invalid day spec: %s") % date[1:])
1555 if days < 0:
1555 if days < 0:
1556 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1556 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1557 % date[1:])
1557 % date[1:])
1558 when = makedate()[0] - days * 3600 * 24
1558 when = makedate()[0] - days * 3600 * 24
1559 return lambda x: x >= when
1559 return lambda x: x >= when
1560 elif " to " in date:
1560 elif " to " in date:
1561 a, b = date.split(" to ")
1561 a, b = date.split(" to ")
1562 start, stop = lower(a), upper(b)
1562 start, stop = lower(a), upper(b)
1563 return lambda x: x >= start and x <= stop
1563 return lambda x: x >= start and x <= stop
1564 else:
1564 else:
1565 start, stop = lower(date), upper(date)
1565 start, stop = lower(date), upper(date)
1566 return lambda x: x >= start and x <= stop
1566 return lambda x: x >= start and x <= stop
1567
1567
1568 def shortuser(user):
1568 def shortuser(user):
1569 """Return a short representation of a user name or email address."""
1569 """Return a short representation of a user name or email address."""
1570 f = user.find('@')
1570 f = user.find('@')
1571 if f >= 0:
1571 if f >= 0:
1572 user = user[:f]
1572 user = user[:f]
1573 f = user.find('<')
1573 f = user.find('<')
1574 if f >= 0:
1574 if f >= 0:
1575 user = user[f + 1:]
1575 user = user[f + 1:]
1576 f = user.find(' ')
1576 f = user.find(' ')
1577 if f >= 0:
1577 if f >= 0:
1578 user = user[:f]
1578 user = user[:f]
1579 f = user.find('.')
1579 f = user.find('.')
1580 if f >= 0:
1580 if f >= 0:
1581 user = user[:f]
1581 user = user[:f]
1582 return user
1582 return user
1583
1583
1584 def emailuser(user):
1584 def emailuser(user):
1585 """Return the user portion of an email address."""
1585 """Return the user portion of an email address."""
1586 f = user.find('@')
1586 f = user.find('@')
1587 if f >= 0:
1587 if f >= 0:
1588 user = user[:f]
1588 user = user[:f]
1589 f = user.find('<')
1589 f = user.find('<')
1590 if f >= 0:
1590 if f >= 0:
1591 user = user[f + 1:]
1591 user = user[f + 1:]
1592 return user
1592 return user
1593
1593
1594 def email(author):
1594 def email(author):
1595 '''get email of author.'''
1595 '''get email of author.'''
1596 r = author.find('>')
1596 r = author.find('>')
1597 if r == -1:
1597 if r == -1:
1598 r = None
1598 r = None
1599 return author[author.find('<') + 1:r]
1599 return author[author.find('<') + 1:r]
1600
1600
1601 def ellipsis(text, maxlength=400):
1601 def ellipsis(text, maxlength=400):
1602 """Trim string to at most maxlength (default: 400) columns in display."""
1602 """Trim string to at most maxlength (default: 400) columns in display."""
1603 return encoding.trim(text, maxlength, ellipsis='...')
1603 return encoding.trim(text, maxlength, ellipsis='...')
1604
1604
1605 def unitcountfn(*unittable):
1605 def unitcountfn(*unittable):
1606 '''return a function that renders a readable count of some quantity'''
1606 '''return a function that renders a readable count of some quantity'''
1607
1607
1608 def go(count):
1608 def go(count):
1609 for multiplier, divisor, format in unittable:
1609 for multiplier, divisor, format in unittable:
1610 if count >= divisor * multiplier:
1610 if count >= divisor * multiplier:
1611 return format % (count / float(divisor))
1611 return format % (count / float(divisor))
1612 return unittable[-1][2] % count
1612 return unittable[-1][2] % count
1613
1613
1614 return go
1614 return go
1615
1615
1616 bytecount = unitcountfn(
1616 bytecount = unitcountfn(
1617 (100, 1 << 30, _('%.0f GB')),
1617 (100, 1 << 30, _('%.0f GB')),
1618 (10, 1 << 30, _('%.1f GB')),
1618 (10, 1 << 30, _('%.1f GB')),
1619 (1, 1 << 30, _('%.2f GB')),
1619 (1, 1 << 30, _('%.2f GB')),
1620 (100, 1 << 20, _('%.0f MB')),
1620 (100, 1 << 20, _('%.0f MB')),
1621 (10, 1 << 20, _('%.1f MB')),
1621 (10, 1 << 20, _('%.1f MB')),
1622 (1, 1 << 20, _('%.2f MB')),
1622 (1, 1 << 20, _('%.2f MB')),
1623 (100, 1 << 10, _('%.0f KB')),
1623 (100, 1 << 10, _('%.0f KB')),
1624 (10, 1 << 10, _('%.1f KB')),
1624 (10, 1 << 10, _('%.1f KB')),
1625 (1, 1 << 10, _('%.2f KB')),
1625 (1, 1 << 10, _('%.2f KB')),
1626 (1, 1, _('%.0f bytes')),
1626 (1, 1, _('%.0f bytes')),
1627 )
1627 )
1628
1628
1629 def uirepr(s):
1629 def uirepr(s):
1630 # Avoid double backslash in Windows path repr()
1630 # Avoid double backslash in Windows path repr()
1631 return repr(s).replace('\\\\', '\\')
1631 return repr(s).replace('\\\\', '\\')
1632
1632
1633 # delay import of textwrap
1633 # delay import of textwrap
1634 def MBTextWrapper(**kwargs):
1634 def MBTextWrapper(**kwargs):
1635 class tw(textwrap.TextWrapper):
1635 class tw(textwrap.TextWrapper):
1636 """
1636 """
1637 Extend TextWrapper for width-awareness.
1637 Extend TextWrapper for width-awareness.
1638
1638
1639 Neither number of 'bytes' in any encoding nor 'characters' is
1639 Neither number of 'bytes' in any encoding nor 'characters' is
1640 appropriate to calculate terminal columns for specified string.
1640 appropriate to calculate terminal columns for specified string.
1641
1641
1642 Original TextWrapper implementation uses built-in 'len()' directly,
1642 Original TextWrapper implementation uses built-in 'len()' directly,
1643 so overriding is needed to use width information of each characters.
1643 so overriding is needed to use width information of each characters.
1644
1644
1645 In addition, characters classified into 'ambiguous' width are
1645 In addition, characters classified into 'ambiguous' width are
1646 treated as wide in East Asian area, but as narrow in other.
1646 treated as wide in East Asian area, but as narrow in other.
1647
1647
1648 This requires use decision to determine width of such characters.
1648 This requires use decision to determine width of such characters.
1649 """
1649 """
1650 def _cutdown(self, ucstr, space_left):
1650 def _cutdown(self, ucstr, space_left):
1651 l = 0
1651 l = 0
1652 colwidth = encoding.ucolwidth
1652 colwidth = encoding.ucolwidth
1653 for i in xrange(len(ucstr)):
1653 for i in xrange(len(ucstr)):
1654 l += colwidth(ucstr[i])
1654 l += colwidth(ucstr[i])
1655 if space_left < l:
1655 if space_left < l:
1656 return (ucstr[:i], ucstr[i:])
1656 return (ucstr[:i], ucstr[i:])
1657 return ucstr, ''
1657 return ucstr, ''
1658
1658
1659 # overriding of base class
1659 # overriding of base class
1660 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1660 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1661 space_left = max(width - cur_len, 1)
1661 space_left = max(width - cur_len, 1)
1662
1662
1663 if self.break_long_words:
1663 if self.break_long_words:
1664 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1664 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1665 cur_line.append(cut)
1665 cur_line.append(cut)
1666 reversed_chunks[-1] = res
1666 reversed_chunks[-1] = res
1667 elif not cur_line:
1667 elif not cur_line:
1668 cur_line.append(reversed_chunks.pop())
1668 cur_line.append(reversed_chunks.pop())
1669
1669
1670 # this overriding code is imported from TextWrapper of python 2.6
1670 # this overriding code is imported from TextWrapper of python 2.6
1671 # to calculate columns of string by 'encoding.ucolwidth()'
1671 # to calculate columns of string by 'encoding.ucolwidth()'
1672 def _wrap_chunks(self, chunks):
1672 def _wrap_chunks(self, chunks):
1673 colwidth = encoding.ucolwidth
1673 colwidth = encoding.ucolwidth
1674
1674
1675 lines = []
1675 lines = []
1676 if self.width <= 0:
1676 if self.width <= 0:
1677 raise ValueError("invalid width %r (must be > 0)" % self.width)
1677 raise ValueError("invalid width %r (must be > 0)" % self.width)
1678
1678
1679 # Arrange in reverse order so items can be efficiently popped
1679 # Arrange in reverse order so items can be efficiently popped
1680 # from a stack of chucks.
1680 # from a stack of chucks.
1681 chunks.reverse()
1681 chunks.reverse()
1682
1682
1683 while chunks:
1683 while chunks:
1684
1684
1685 # Start the list of chunks that will make up the current line.
1685 # Start the list of chunks that will make up the current line.
1686 # cur_len is just the length of all the chunks in cur_line.
1686 # cur_len is just the length of all the chunks in cur_line.
1687 cur_line = []
1687 cur_line = []
1688 cur_len = 0
1688 cur_len = 0
1689
1689
1690 # Figure out which static string will prefix this line.
1690 # Figure out which static string will prefix this line.
1691 if lines:
1691 if lines:
1692 indent = self.subsequent_indent
1692 indent = self.subsequent_indent
1693 else:
1693 else:
1694 indent = self.initial_indent
1694 indent = self.initial_indent
1695
1695
1696 # Maximum width for this line.
1696 # Maximum width for this line.
1697 width = self.width - len(indent)
1697 width = self.width - len(indent)
1698
1698
1699 # First chunk on line is whitespace -- drop it, unless this
1699 # First chunk on line is whitespace -- drop it, unless this
1700 # is the very beginning of the text (i.e. no lines started yet).
1700 # is the very beginning of the text (i.e. no lines started yet).
1701 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1701 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1702 del chunks[-1]
1702 del chunks[-1]
1703
1703
1704 while chunks:
1704 while chunks:
1705 l = colwidth(chunks[-1])
1705 l = colwidth(chunks[-1])
1706
1706
1707 # Can at least squeeze this chunk onto the current line.
1707 # Can at least squeeze this chunk onto the current line.
1708 if cur_len + l <= width:
1708 if cur_len + l <= width:
1709 cur_line.append(chunks.pop())
1709 cur_line.append(chunks.pop())
1710 cur_len += l
1710 cur_len += l
1711
1711
1712 # Nope, this line is full.
1712 # Nope, this line is full.
1713 else:
1713 else:
1714 break
1714 break
1715
1715
1716 # The current line is full, and the next chunk is too big to
1716 # The current line is full, and the next chunk is too big to
1717 # fit on *any* line (not just this one).
1717 # fit on *any* line (not just this one).
1718 if chunks and colwidth(chunks[-1]) > width:
1718 if chunks and colwidth(chunks[-1]) > width:
1719 self._handle_long_word(chunks, cur_line, cur_len, width)
1719 self._handle_long_word(chunks, cur_line, cur_len, width)
1720
1720
1721 # If the last chunk on this line is all whitespace, drop it.
1721 # If the last chunk on this line is all whitespace, drop it.
1722 if (self.drop_whitespace and
1722 if (self.drop_whitespace and
1723 cur_line and cur_line[-1].strip() == ''):
1723 cur_line and cur_line[-1].strip() == ''):
1724 del cur_line[-1]
1724 del cur_line[-1]
1725
1725
1726 # Convert current line back to a string and store it in list
1726 # Convert current line back to a string and store it in list
1727 # of all lines (return value).
1727 # of all lines (return value).
1728 if cur_line:
1728 if cur_line:
1729 lines.append(indent + ''.join(cur_line))
1729 lines.append(indent + ''.join(cur_line))
1730
1730
1731 return lines
1731 return lines
1732
1732
1733 global MBTextWrapper
1733 global MBTextWrapper
1734 MBTextWrapper = tw
1734 MBTextWrapper = tw
1735 return tw(**kwargs)
1735 return tw(**kwargs)
1736
1736
1737 def wrap(line, width, initindent='', hangindent=''):
1737 def wrap(line, width, initindent='', hangindent=''):
1738 maxindent = max(len(hangindent), len(initindent))
1738 maxindent = max(len(hangindent), len(initindent))
1739 if width <= maxindent:
1739 if width <= maxindent:
1740 # adjust for weird terminal size
1740 # adjust for weird terminal size
1741 width = max(78, maxindent + 1)
1741 width = max(78, maxindent + 1)
1742 line = line.decode(encoding.encoding, encoding.encodingmode)
1742 line = line.decode(encoding.encoding, encoding.encodingmode)
1743 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1743 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1744 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1744 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1745 wrapper = MBTextWrapper(width=width,
1745 wrapper = MBTextWrapper(width=width,
1746 initial_indent=initindent,
1746 initial_indent=initindent,
1747 subsequent_indent=hangindent)
1747 subsequent_indent=hangindent)
1748 return wrapper.fill(line).encode(encoding.encoding)
1748 return wrapper.fill(line).encode(encoding.encoding)
1749
1749
1750 def iterlines(iterator):
1750 def iterlines(iterator):
1751 for chunk in iterator:
1751 for chunk in iterator:
1752 for line in chunk.splitlines():
1752 for line in chunk.splitlines():
1753 yield line
1753 yield line
1754
1754
1755 def expandpath(path):
1755 def expandpath(path):
1756 return os.path.expanduser(os.path.expandvars(path))
1756 return os.path.expanduser(os.path.expandvars(path))
1757
1757
1758 def hgcmd():
1758 def hgcmd():
1759 """Return the command used to execute current hg
1759 """Return the command used to execute current hg
1760
1760
1761 This is different from hgexecutable() because on Windows we want
1761 This is different from hgexecutable() because on Windows we want
1762 to avoid things opening new shell windows like batch files, so we
1762 to avoid things opening new shell windows like batch files, so we
1763 get either the python call or current executable.
1763 get either the python call or current executable.
1764 """
1764 """
1765 if mainfrozen():
1765 if mainfrozen():
1766 return [sys.executable]
1766 return [sys.executable]
1767 return gethgcmd()
1767 return gethgcmd()
1768
1768
1769 def rundetached(args, condfn):
1769 def rundetached(args, condfn):
1770 """Execute the argument list in a detached process.
1770 """Execute the argument list in a detached process.
1771
1771
1772 condfn is a callable which is called repeatedly and should return
1772 condfn is a callable which is called repeatedly and should return
1773 True once the child process is known to have started successfully.
1773 True once the child process is known to have started successfully.
1774 At this point, the child process PID is returned. If the child
1774 At this point, the child process PID is returned. If the child
1775 process fails to start or finishes before condfn() evaluates to
1775 process fails to start or finishes before condfn() evaluates to
1776 True, return -1.
1776 True, return -1.
1777 """
1777 """
1778 # Windows case is easier because the child process is either
1778 # Windows case is easier because the child process is either
1779 # successfully starting and validating the condition or exiting
1779 # successfully starting and validating the condition or exiting
1780 # on failure. We just poll on its PID. On Unix, if the child
1780 # on failure. We just poll on its PID. On Unix, if the child
1781 # process fails to start, it will be left in a zombie state until
1781 # process fails to start, it will be left in a zombie state until
1782 # the parent wait on it, which we cannot do since we expect a long
1782 # the parent wait on it, which we cannot do since we expect a long
1783 # running process on success. Instead we listen for SIGCHLD telling
1783 # running process on success. Instead we listen for SIGCHLD telling
1784 # us our child process terminated.
1784 # us our child process terminated.
1785 terminated = set()
1785 terminated = set()
1786 def handler(signum, frame):
1786 def handler(signum, frame):
1787 terminated.add(os.wait())
1787 terminated.add(os.wait())
1788 prevhandler = None
1788 prevhandler = None
1789 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1789 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1790 if SIGCHLD is not None:
1790 if SIGCHLD is not None:
1791 prevhandler = signal.signal(SIGCHLD, handler)
1791 prevhandler = signal.signal(SIGCHLD, handler)
1792 try:
1792 try:
1793 pid = spawndetached(args)
1793 pid = spawndetached(args)
1794 while not condfn():
1794 while not condfn():
1795 if ((pid in terminated or not testpid(pid))
1795 if ((pid in terminated or not testpid(pid))
1796 and not condfn()):
1796 and not condfn()):
1797 return -1
1797 return -1
1798 time.sleep(0.1)
1798 time.sleep(0.1)
1799 return pid
1799 return pid
1800 finally:
1800 finally:
1801 if prevhandler is not None:
1801 if prevhandler is not None:
1802 signal.signal(signal.SIGCHLD, prevhandler)
1802 signal.signal(signal.SIGCHLD, prevhandler)
1803
1803
1804 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1804 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1805 """Return the result of interpolating items in the mapping into string s.
1805 """Return the result of interpolating items in the mapping into string s.
1806
1806
1807 prefix is a single character string, or a two character string with
1807 prefix is a single character string, or a two character string with
1808 a backslash as the first character if the prefix needs to be escaped in
1808 a backslash as the first character if the prefix needs to be escaped in
1809 a regular expression.
1809 a regular expression.
1810
1810
1811 fn is an optional function that will be applied to the replacement text
1811 fn is an optional function that will be applied to the replacement text
1812 just before replacement.
1812 just before replacement.
1813
1813
1814 escape_prefix is an optional flag that allows using doubled prefix for
1814 escape_prefix is an optional flag that allows using doubled prefix for
1815 its escaping.
1815 its escaping.
1816 """
1816 """
1817 fn = fn or (lambda s: s)
1817 fn = fn or (lambda s: s)
1818 patterns = '|'.join(mapping.keys())
1818 patterns = '|'.join(mapping.keys())
1819 if escape_prefix:
1819 if escape_prefix:
1820 patterns += '|' + prefix
1820 patterns += '|' + prefix
1821 if len(prefix) > 1:
1821 if len(prefix) > 1:
1822 prefix_char = prefix[1:]
1822 prefix_char = prefix[1:]
1823 else:
1823 else:
1824 prefix_char = prefix
1824 prefix_char = prefix
1825 mapping[prefix_char] = prefix_char
1825 mapping[prefix_char] = prefix_char
1826 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1826 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1827 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1827 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1828
1828
1829 def getport(port):
1829 def getport(port):
1830 """Return the port for a given network service.
1830 """Return the port for a given network service.
1831
1831
1832 If port is an integer, it's returned as is. If it's a string, it's
1832 If port is an integer, it's returned as is. If it's a string, it's
1833 looked up using socket.getservbyname(). If there's no matching
1833 looked up using socket.getservbyname(). If there's no matching
1834 service, util.Abort is raised.
1834 service, util.Abort is raised.
1835 """
1835 """
1836 try:
1836 try:
1837 return int(port)
1837 return int(port)
1838 except ValueError:
1838 except ValueError:
1839 pass
1839 pass
1840
1840
1841 try:
1841 try:
1842 return socket.getservbyname(port)
1842 return socket.getservbyname(port)
1843 except socket.error:
1843 except socket.error:
1844 raise Abort(_("no port number associated with service '%s'") % port)
1844 raise Abort(_("no port number associated with service '%s'") % port)
1845
1845
1846 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1846 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1847 '0': False, 'no': False, 'false': False, 'off': False,
1847 '0': False, 'no': False, 'false': False, 'off': False,
1848 'never': False}
1848 'never': False}
1849
1849
1850 def parsebool(s):
1850 def parsebool(s):
1851 """Parse s into a boolean.
1851 """Parse s into a boolean.
1852
1852
1853 If s is not a valid boolean, returns None.
1853 If s is not a valid boolean, returns None.
1854 """
1854 """
1855 return _booleans.get(s.lower(), None)
1855 return _booleans.get(s.lower(), None)
1856
1856
1857 _hexdig = '0123456789ABCDEFabcdef'
1857 _hexdig = '0123456789ABCDEFabcdef'
1858 _hextochr = dict((a + b, chr(int(a + b, 16)))
1858 _hextochr = dict((a + b, chr(int(a + b, 16)))
1859 for a in _hexdig for b in _hexdig)
1859 for a in _hexdig for b in _hexdig)
1860
1860
1861 def _urlunquote(s):
1861 def _urlunquote(s):
1862 """Decode HTTP/HTML % encoding.
1862 """Decode HTTP/HTML % encoding.
1863
1863
1864 >>> _urlunquote('abc%20def')
1864 >>> _urlunquote('abc%20def')
1865 'abc def'
1865 'abc def'
1866 """
1866 """
1867 res = s.split('%')
1867 res = s.split('%')
1868 # fastpath
1868 # fastpath
1869 if len(res) == 1:
1869 if len(res) == 1:
1870 return s
1870 return s
1871 s = res[0]
1871 s = res[0]
1872 for item in res[1:]:
1872 for item in res[1:]:
1873 try:
1873 try:
1874 s += _hextochr[item[:2]] + item[2:]
1874 s += _hextochr[item[:2]] + item[2:]
1875 except KeyError:
1875 except KeyError:
1876 s += '%' + item
1876 s += '%' + item
1877 except UnicodeDecodeError:
1877 except UnicodeDecodeError:
1878 s += unichr(int(item[:2], 16)) + item[2:]
1878 s += unichr(int(item[:2], 16)) + item[2:]
1879 return s
1879 return s
1880
1880
1881 class url(object):
1881 class url(object):
1882 r"""Reliable URL parser.
1882 r"""Reliable URL parser.
1883
1883
1884 This parses URLs and provides attributes for the following
1884 This parses URLs and provides attributes for the following
1885 components:
1885 components:
1886
1886
1887 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1887 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1888
1888
1889 Missing components are set to None. The only exception is
1889 Missing components are set to None. The only exception is
1890 fragment, which is set to '' if present but empty.
1890 fragment, which is set to '' if present but empty.
1891
1891
1892 If parsefragment is False, fragment is included in query. If
1892 If parsefragment is False, fragment is included in query. If
1893 parsequery is False, query is included in path. If both are
1893 parsequery is False, query is included in path. If both are
1894 False, both fragment and query are included in path.
1894 False, both fragment and query are included in path.
1895
1895
1896 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1896 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1897
1897
1898 Note that for backward compatibility reasons, bundle URLs do not
1898 Note that for backward compatibility reasons, bundle URLs do not
1899 take host names. That means 'bundle://../' has a path of '../'.
1899 take host names. That means 'bundle://../' has a path of '../'.
1900
1900
1901 Examples:
1901 Examples:
1902
1902
1903 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1903 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1904 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1904 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1905 >>> url('ssh://[::1]:2200//home/joe/repo')
1905 >>> url('ssh://[::1]:2200//home/joe/repo')
1906 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1906 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1907 >>> url('file:///home/joe/repo')
1907 >>> url('file:///home/joe/repo')
1908 <url scheme: 'file', path: '/home/joe/repo'>
1908 <url scheme: 'file', path: '/home/joe/repo'>
1909 >>> url('file:///c:/temp/foo/')
1909 >>> url('file:///c:/temp/foo/')
1910 <url scheme: 'file', path: 'c:/temp/foo/'>
1910 <url scheme: 'file', path: 'c:/temp/foo/'>
1911 >>> url('bundle:foo')
1911 >>> url('bundle:foo')
1912 <url scheme: 'bundle', path: 'foo'>
1912 <url scheme: 'bundle', path: 'foo'>
1913 >>> url('bundle://../foo')
1913 >>> url('bundle://../foo')
1914 <url scheme: 'bundle', path: '../foo'>
1914 <url scheme: 'bundle', path: '../foo'>
1915 >>> url(r'c:\foo\bar')
1915 >>> url(r'c:\foo\bar')
1916 <url path: 'c:\\foo\\bar'>
1916 <url path: 'c:\\foo\\bar'>
1917 >>> url(r'\\blah\blah\blah')
1917 >>> url(r'\\blah\blah\blah')
1918 <url path: '\\\\blah\\blah\\blah'>
1918 <url path: '\\\\blah\\blah\\blah'>
1919 >>> url(r'\\blah\blah\blah#baz')
1919 >>> url(r'\\blah\blah\blah#baz')
1920 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1920 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1921 >>> url(r'file:///C:\users\me')
1921 >>> url(r'file:///C:\users\me')
1922 <url scheme: 'file', path: 'C:\\users\\me'>
1922 <url scheme: 'file', path: 'C:\\users\\me'>
1923
1923
1924 Authentication credentials:
1924 Authentication credentials:
1925
1925
1926 >>> url('ssh://joe:xyz@x/repo')
1926 >>> url('ssh://joe:xyz@x/repo')
1927 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1927 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1928 >>> url('ssh://joe@x/repo')
1928 >>> url('ssh://joe@x/repo')
1929 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1929 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1930
1930
1931 Query strings and fragments:
1931 Query strings and fragments:
1932
1932
1933 >>> url('http://host/a?b#c')
1933 >>> url('http://host/a?b#c')
1934 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1934 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1935 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1935 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1936 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1936 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1937 """
1937 """
1938
1938
1939 _safechars = "!~*'()+"
1939 _safechars = "!~*'()+"
1940 _safepchars = "/!~*'()+:\\"
1940 _safepchars = "/!~*'()+:\\"
1941 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
1941 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
1942
1942
1943 def __init__(self, path, parsequery=True, parsefragment=True):
1943 def __init__(self, path, parsequery=True, parsefragment=True):
1944 # We slowly chomp away at path until we have only the path left
1944 # We slowly chomp away at path until we have only the path left
1945 self.scheme = self.user = self.passwd = self.host = None
1945 self.scheme = self.user = self.passwd = self.host = None
1946 self.port = self.path = self.query = self.fragment = None
1946 self.port = self.path = self.query = self.fragment = None
1947 self._localpath = True
1947 self._localpath = True
1948 self._hostport = ''
1948 self._hostport = ''
1949 self._origpath = path
1949 self._origpath = path
1950
1950
1951 if parsefragment and '#' in path:
1951 if parsefragment and '#' in path:
1952 path, self.fragment = path.split('#', 1)
1952 path, self.fragment = path.split('#', 1)
1953 if not path:
1953 if not path:
1954 path = None
1954 path = None
1955
1955
1956 # special case for Windows drive letters and UNC paths
1956 # special case for Windows drive letters and UNC paths
1957 if hasdriveletter(path) or path.startswith(r'\\'):
1957 if hasdriveletter(path) or path.startswith(r'\\'):
1958 self.path = path
1958 self.path = path
1959 return
1959 return
1960
1960
1961 # For compatibility reasons, we can't handle bundle paths as
1961 # For compatibility reasons, we can't handle bundle paths as
1962 # normal URLS
1962 # normal URLS
1963 if path.startswith('bundle:'):
1963 if path.startswith('bundle:'):
1964 self.scheme = 'bundle'
1964 self.scheme = 'bundle'
1965 path = path[7:]
1965 path = path[7:]
1966 if path.startswith('//'):
1966 if path.startswith('//'):
1967 path = path[2:]
1967 path = path[2:]
1968 self.path = path
1968 self.path = path
1969 return
1969 return
1970
1970
1971 if self._matchscheme(path):
1971 if self._matchscheme(path):
1972 parts = path.split(':', 1)
1972 parts = path.split(':', 1)
1973 if parts[0]:
1973 if parts[0]:
1974 self.scheme, path = parts
1974 self.scheme, path = parts
1975 self._localpath = False
1975 self._localpath = False
1976
1976
1977 if not path:
1977 if not path:
1978 path = None
1978 path = None
1979 if self._localpath:
1979 if self._localpath:
1980 self.path = ''
1980 self.path = ''
1981 return
1981 return
1982 else:
1982 else:
1983 if self._localpath:
1983 if self._localpath:
1984 self.path = path
1984 self.path = path
1985 return
1985 return
1986
1986
1987 if parsequery and '?' in path:
1987 if parsequery and '?' in path:
1988 path, self.query = path.split('?', 1)
1988 path, self.query = path.split('?', 1)
1989 if not path:
1989 if not path:
1990 path = None
1990 path = None
1991 if not self.query:
1991 if not self.query:
1992 self.query = None
1992 self.query = None
1993
1993
1994 # // is required to specify a host/authority
1994 # // is required to specify a host/authority
1995 if path and path.startswith('//'):
1995 if path and path.startswith('//'):
1996 parts = path[2:].split('/', 1)
1996 parts = path[2:].split('/', 1)
1997 if len(parts) > 1:
1997 if len(parts) > 1:
1998 self.host, path = parts
1998 self.host, path = parts
1999 else:
1999 else:
2000 self.host = parts[0]
2000 self.host = parts[0]
2001 path = None
2001 path = None
2002 if not self.host:
2002 if not self.host:
2003 self.host = None
2003 self.host = None
2004 # path of file:///d is /d
2004 # path of file:///d is /d
2005 # path of file:///d:/ is d:/, not /d:/
2005 # path of file:///d:/ is d:/, not /d:/
2006 if path and not hasdriveletter(path):
2006 if path and not hasdriveletter(path):
2007 path = '/' + path
2007 path = '/' + path
2008
2008
2009 if self.host and '@' in self.host:
2009 if self.host and '@' in self.host:
2010 self.user, self.host = self.host.rsplit('@', 1)
2010 self.user, self.host = self.host.rsplit('@', 1)
2011 if ':' in self.user:
2011 if ':' in self.user:
2012 self.user, self.passwd = self.user.split(':', 1)
2012 self.user, self.passwd = self.user.split(':', 1)
2013 if not self.host:
2013 if not self.host:
2014 self.host = None
2014 self.host = None
2015
2015
2016 # Don't split on colons in IPv6 addresses without ports
2016 # Don't split on colons in IPv6 addresses without ports
2017 if (self.host and ':' in self.host and
2017 if (self.host and ':' in self.host and
2018 not (self.host.startswith('[') and self.host.endswith(']'))):
2018 not (self.host.startswith('[') and self.host.endswith(']'))):
2019 self._hostport = self.host
2019 self._hostport = self.host
2020 self.host, self.port = self.host.rsplit(':', 1)
2020 self.host, self.port = self.host.rsplit(':', 1)
2021 if not self.host:
2021 if not self.host:
2022 self.host = None
2022 self.host = None
2023
2023
2024 if (self.host and self.scheme == 'file' and
2024 if (self.host and self.scheme == 'file' and
2025 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2025 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2026 raise Abort(_('file:// URLs can only refer to localhost'))
2026 raise Abort(_('file:// URLs can only refer to localhost'))
2027
2027
2028 self.path = path
2028 self.path = path
2029
2029
2030 # leave the query string escaped
2030 # leave the query string escaped
2031 for a in ('user', 'passwd', 'host', 'port',
2031 for a in ('user', 'passwd', 'host', 'port',
2032 'path', 'fragment'):
2032 'path', 'fragment'):
2033 v = getattr(self, a)
2033 v = getattr(self, a)
2034 if v is not None:
2034 if v is not None:
2035 setattr(self, a, _urlunquote(v))
2035 setattr(self, a, _urlunquote(v))
2036
2036
2037 def __repr__(self):
2037 def __repr__(self):
2038 attrs = []
2038 attrs = []
2039 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2039 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2040 'query', 'fragment'):
2040 'query', 'fragment'):
2041 v = getattr(self, a)
2041 v = getattr(self, a)
2042 if v is not None:
2042 if v is not None:
2043 attrs.append('%s: %r' % (a, v))
2043 attrs.append('%s: %r' % (a, v))
2044 return '<url %s>' % ', '.join(attrs)
2044 return '<url %s>' % ', '.join(attrs)
2045
2045
2046 def __str__(self):
2046 def __str__(self):
2047 r"""Join the URL's components back into a URL string.
2047 r"""Join the URL's components back into a URL string.
2048
2048
2049 Examples:
2049 Examples:
2050
2050
2051 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2051 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2052 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2052 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2053 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2053 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2054 'http://user:pw@host:80/?foo=bar&baz=42'
2054 'http://user:pw@host:80/?foo=bar&baz=42'
2055 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2055 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2056 'http://user:pw@host:80/?foo=bar%3dbaz'
2056 'http://user:pw@host:80/?foo=bar%3dbaz'
2057 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2057 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2058 'ssh://user:pw@[::1]:2200//home/joe#'
2058 'ssh://user:pw@[::1]:2200//home/joe#'
2059 >>> str(url('http://localhost:80//'))
2059 >>> str(url('http://localhost:80//'))
2060 'http://localhost:80//'
2060 'http://localhost:80//'
2061 >>> str(url('http://localhost:80/'))
2061 >>> str(url('http://localhost:80/'))
2062 'http://localhost:80/'
2062 'http://localhost:80/'
2063 >>> str(url('http://localhost:80'))
2063 >>> str(url('http://localhost:80'))
2064 'http://localhost:80/'
2064 'http://localhost:80/'
2065 >>> str(url('bundle:foo'))
2065 >>> str(url('bundle:foo'))
2066 'bundle:foo'
2066 'bundle:foo'
2067 >>> str(url('bundle://../foo'))
2067 >>> str(url('bundle://../foo'))
2068 'bundle:../foo'
2068 'bundle:../foo'
2069 >>> str(url('path'))
2069 >>> str(url('path'))
2070 'path'
2070 'path'
2071 >>> str(url('file:///tmp/foo/bar'))
2071 >>> str(url('file:///tmp/foo/bar'))
2072 'file:///tmp/foo/bar'
2072 'file:///tmp/foo/bar'
2073 >>> str(url('file:///c:/tmp/foo/bar'))
2073 >>> str(url('file:///c:/tmp/foo/bar'))
2074 'file:///c:/tmp/foo/bar'
2074 'file:///c:/tmp/foo/bar'
2075 >>> print url(r'bundle:foo\bar')
2075 >>> print url(r'bundle:foo\bar')
2076 bundle:foo\bar
2076 bundle:foo\bar
2077 >>> print url(r'file:///D:\data\hg')
2077 >>> print url(r'file:///D:\data\hg')
2078 file:///D:\data\hg
2078 file:///D:\data\hg
2079 """
2079 """
2080 if self._localpath:
2080 if self._localpath:
2081 s = self.path
2081 s = self.path
2082 if self.scheme == 'bundle':
2082 if self.scheme == 'bundle':
2083 s = 'bundle:' + s
2083 s = 'bundle:' + s
2084 if self.fragment:
2084 if self.fragment:
2085 s += '#' + self.fragment
2085 s += '#' + self.fragment
2086 return s
2086 return s
2087
2087
2088 s = self.scheme + ':'
2088 s = self.scheme + ':'
2089 if self.user or self.passwd or self.host:
2089 if self.user or self.passwd or self.host:
2090 s += '//'
2090 s += '//'
2091 elif self.scheme and (not self.path or self.path.startswith('/')
2091 elif self.scheme and (not self.path or self.path.startswith('/')
2092 or hasdriveletter(self.path)):
2092 or hasdriveletter(self.path)):
2093 s += '//'
2093 s += '//'
2094 if hasdriveletter(self.path):
2094 if hasdriveletter(self.path):
2095 s += '/'
2095 s += '/'
2096 if self.user:
2096 if self.user:
2097 s += urllib.quote(self.user, safe=self._safechars)
2097 s += urllib.quote(self.user, safe=self._safechars)
2098 if self.passwd:
2098 if self.passwd:
2099 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2099 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2100 if self.user or self.passwd:
2100 if self.user or self.passwd:
2101 s += '@'
2101 s += '@'
2102 if self.host:
2102 if self.host:
2103 if not (self.host.startswith('[') and self.host.endswith(']')):
2103 if not (self.host.startswith('[') and self.host.endswith(']')):
2104 s += urllib.quote(self.host)
2104 s += urllib.quote(self.host)
2105 else:
2105 else:
2106 s += self.host
2106 s += self.host
2107 if self.port:
2107 if self.port:
2108 s += ':' + urllib.quote(self.port)
2108 s += ':' + urllib.quote(self.port)
2109 if self.host:
2109 if self.host:
2110 s += '/'
2110 s += '/'
2111 if self.path:
2111 if self.path:
2112 # TODO: similar to the query string, we should not unescape the
2112 # TODO: similar to the query string, we should not unescape the
2113 # path when we store it, the path might contain '%2f' = '/',
2113 # path when we store it, the path might contain '%2f' = '/',
2114 # which we should *not* escape.
2114 # which we should *not* escape.
2115 s += urllib.quote(self.path, safe=self._safepchars)
2115 s += urllib.quote(self.path, safe=self._safepchars)
2116 if self.query:
2116 if self.query:
2117 # we store the query in escaped form.
2117 # we store the query in escaped form.
2118 s += '?' + self.query
2118 s += '?' + self.query
2119 if self.fragment is not None:
2119 if self.fragment is not None:
2120 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2120 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2121 return s
2121 return s
2122
2122
2123 def authinfo(self):
2123 def authinfo(self):
2124 user, passwd = self.user, self.passwd
2124 user, passwd = self.user, self.passwd
2125 try:
2125 try:
2126 self.user, self.passwd = None, None
2126 self.user, self.passwd = None, None
2127 s = str(self)
2127 s = str(self)
2128 finally:
2128 finally:
2129 self.user, self.passwd = user, passwd
2129 self.user, self.passwd = user, passwd
2130 if not self.user:
2130 if not self.user:
2131 return (s, None)
2131 return (s, None)
2132 # authinfo[1] is passed to urllib2 password manager, and its
2132 # authinfo[1] is passed to urllib2 password manager, and its
2133 # URIs must not contain credentials. The host is passed in the
2133 # URIs must not contain credentials. The host is passed in the
2134 # URIs list because Python < 2.4.3 uses only that to search for
2134 # URIs list because Python < 2.4.3 uses only that to search for
2135 # a password.
2135 # a password.
2136 return (s, (None, (s, self.host),
2136 return (s, (None, (s, self.host),
2137 self.user, self.passwd or ''))
2137 self.user, self.passwd or ''))
2138
2138
2139 def isabs(self):
2139 def isabs(self):
2140 if self.scheme and self.scheme != 'file':
2140 if self.scheme and self.scheme != 'file':
2141 return True # remote URL
2141 return True # remote URL
2142 if hasdriveletter(self.path):
2142 if hasdriveletter(self.path):
2143 return True # absolute for our purposes - can't be joined()
2143 return True # absolute for our purposes - can't be joined()
2144 if self.path.startswith(r'\\'):
2144 if self.path.startswith(r'\\'):
2145 return True # Windows UNC path
2145 return True # Windows UNC path
2146 if self.path.startswith('/'):
2146 if self.path.startswith('/'):
2147 return True # POSIX-style
2147 return True # POSIX-style
2148 return False
2148 return False
2149
2149
2150 def localpath(self):
2150 def localpath(self):
2151 if self.scheme == 'file' or self.scheme == 'bundle':
2151 if self.scheme == 'file' or self.scheme == 'bundle':
2152 path = self.path or '/'
2152 path = self.path or '/'
2153 # For Windows, we need to promote hosts containing drive
2153 # For Windows, we need to promote hosts containing drive
2154 # letters to paths with drive letters.
2154 # letters to paths with drive letters.
2155 if hasdriveletter(self._hostport):
2155 if hasdriveletter(self._hostport):
2156 path = self._hostport + '/' + self.path
2156 path = self._hostport + '/' + self.path
2157 elif (self.host is not None and self.path
2157 elif (self.host is not None and self.path
2158 and not hasdriveletter(path)):
2158 and not hasdriveletter(path)):
2159 path = '/' + path
2159 path = '/' + path
2160 return path
2160 return path
2161 return self._origpath
2161 return self._origpath
2162
2162
2163 def islocal(self):
2163 def islocal(self):
2164 '''whether localpath will return something that posixfile can open'''
2164 '''whether localpath will return something that posixfile can open'''
2165 return (not self.scheme or self.scheme == 'file'
2165 return (not self.scheme or self.scheme == 'file'
2166 or self.scheme == 'bundle')
2166 or self.scheme == 'bundle')
2167
2167
2168 def hasscheme(path):
2168 def hasscheme(path):
2169 return bool(url(path).scheme)
2169 return bool(url(path).scheme)
2170
2170
2171 def hasdriveletter(path):
2171 def hasdriveletter(path):
2172 return path and path[1:2] == ':' and path[0:1].isalpha()
2172 return path and path[1:2] == ':' and path[0:1].isalpha()
2173
2173
2174 def urllocalpath(path):
2174 def urllocalpath(path):
2175 return url(path, parsequery=False, parsefragment=False).localpath()
2175 return url(path, parsequery=False, parsefragment=False).localpath()
2176
2176
2177 def hidepassword(u):
2177 def hidepassword(u):
2178 '''hide user credential in a url string'''
2178 '''hide user credential in a url string'''
2179 u = url(u)
2179 u = url(u)
2180 if u.passwd:
2180 if u.passwd:
2181 u.passwd = '***'
2181 u.passwd = '***'
2182 return str(u)
2182 return str(u)
2183
2183
2184 def removeauth(u):
2184 def removeauth(u):
2185 '''remove all authentication information from a url string'''
2185 '''remove all authentication information from a url string'''
2186 u = url(u)
2186 u = url(u)
2187 u.user = u.passwd = None
2187 u.user = u.passwd = None
2188 return str(u)
2188 return str(u)
2189
2189
2190 def isatty(fd):
2190 def isatty(fd):
2191 try:
2191 try:
2192 return fd.isatty()
2192 return fd.isatty()
2193 except AttributeError:
2193 except AttributeError:
2194 return False
2194 return False
2195
2195
2196 timecount = unitcountfn(
2196 timecount = unitcountfn(
2197 (1, 1e3, _('%.0f s')),
2197 (1, 1e3, _('%.0f s')),
2198 (100, 1, _('%.1f s')),
2198 (100, 1, _('%.1f s')),
2199 (10, 1, _('%.2f s')),
2199 (10, 1, _('%.2f s')),
2200 (1, 1, _('%.3f s')),
2200 (1, 1, _('%.3f s')),
2201 (100, 0.001, _('%.1f ms')),
2201 (100, 0.001, _('%.1f ms')),
2202 (10, 0.001, _('%.2f ms')),
2202 (10, 0.001, _('%.2f ms')),
2203 (1, 0.001, _('%.3f ms')),
2203 (1, 0.001, _('%.3f ms')),
2204 (100, 0.000001, _('%.1f us')),
2204 (100, 0.000001, _('%.1f us')),
2205 (10, 0.000001, _('%.2f us')),
2205 (10, 0.000001, _('%.2f us')),
2206 (1, 0.000001, _('%.3f us')),
2206 (1, 0.000001, _('%.3f us')),
2207 (100, 0.000000001, _('%.1f ns')),
2207 (100, 0.000000001, _('%.1f ns')),
2208 (10, 0.000000001, _('%.2f ns')),
2208 (10, 0.000000001, _('%.2f ns')),
2209 (1, 0.000000001, _('%.3f ns')),
2209 (1, 0.000000001, _('%.3f ns')),
2210 )
2210 )
2211
2211
2212 _timenesting = [0]
2212 _timenesting = [0]
2213
2213
2214 def timed(func):
2214 def timed(func):
2215 '''Report the execution time of a function call to stderr.
2215 '''Report the execution time of a function call to stderr.
2216
2216
2217 During development, use as a decorator when you need to measure
2217 During development, use as a decorator when you need to measure
2218 the cost of a function, e.g. as follows:
2218 the cost of a function, e.g. as follows:
2219
2219
2220 @util.timed
2220 @util.timed
2221 def foo(a, b, c):
2221 def foo(a, b, c):
2222 pass
2222 pass
2223 '''
2223 '''
2224
2224
2225 def wrapper(*args, **kwargs):
2225 def wrapper(*args, **kwargs):
2226 start = time.time()
2226 start = time.time()
2227 indent = 2
2227 indent = 2
2228 _timenesting[0] += indent
2228 _timenesting[0] += indent
2229 try:
2229 try:
2230 return func(*args, **kwargs)
2230 return func(*args, **kwargs)
2231 finally:
2231 finally:
2232 elapsed = time.time() - start
2232 elapsed = time.time() - start
2233 _timenesting[0] -= indent
2233 _timenesting[0] -= indent
2234 sys.stderr.write('%s%s: %s\n' %
2234 sys.stderr.write('%s%s: %s\n' %
2235 (' ' * _timenesting[0], func.__name__,
2235 (' ' * _timenesting[0], func.__name__,
2236 timecount(elapsed)))
2236 timecount(elapsed)))
2237 return wrapper
2237 return wrapper
2238
2238
2239 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2239 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2240 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2240 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2241
2241
2242 def sizetoint(s):
2242 def sizetoint(s):
2243 '''Convert a space specifier to a byte count.
2243 '''Convert a space specifier to a byte count.
2244
2244
2245 >>> sizetoint('30')
2245 >>> sizetoint('30')
2246 30
2246 30
2247 >>> sizetoint('2.2kb')
2247 >>> sizetoint('2.2kb')
2248 2252
2248 2252
2249 >>> sizetoint('6M')
2249 >>> sizetoint('6M')
2250 6291456
2250 6291456
2251 '''
2251 '''
2252 t = s.strip().lower()
2252 t = s.strip().lower()
2253 try:
2253 try:
2254 for k, u in _sizeunits:
2254 for k, u in _sizeunits:
2255 if t.endswith(k):
2255 if t.endswith(k):
2256 return int(float(t[:-len(k)]) * u)
2256 return int(float(t[:-len(k)]) * u)
2257 return int(t)
2257 return int(t)
2258 except ValueError:
2258 except ValueError:
2259 raise error.ParseError(_("couldn't parse size: %s") % s)
2259 raise error.ParseError(_("couldn't parse size: %s") % s)
2260
2260
2261 class hooks(object):
2261 class hooks(object):
2262 '''A collection of hook functions that can be used to extend a
2262 '''A collection of hook functions that can be used to extend a
2263 function's behavior. Hooks are called in lexicographic order,
2263 function's behavior. Hooks are called in lexicographic order,
2264 based on the names of their sources.'''
2264 based on the names of their sources.'''
2265
2265
2266 def __init__(self):
2266 def __init__(self):
2267 self._hooks = []
2267 self._hooks = []
2268
2268
2269 def add(self, source, hook):
2269 def add(self, source, hook):
2270 self._hooks.append((source, hook))
2270 self._hooks.append((source, hook))
2271
2271
2272 def __call__(self, *args):
2272 def __call__(self, *args):
2273 self._hooks.sort(key=lambda x: x[0])
2273 self._hooks.sort(key=lambda x: x[0])
2274 results = []
2274 results = []
2275 for source, hook in self._hooks:
2275 for source, hook in self._hooks:
2276 results.append(hook(*args))
2276 results.append(hook(*args))
2277 return results
2277 return results
2278
2278
2279 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2279 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2280 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2280 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2281 Skips the 'skip' last entries. By default it will flush stdout first.
2281 Skips the 'skip' last entries. By default it will flush stdout first.
2282 It can be used everywhere and do intentionally not require an ui object.
2282 It can be used everywhere and do intentionally not require an ui object.
2283 Not be used in production code but very convenient while developing.
2283 Not be used in production code but very convenient while developing.
2284 '''
2284 '''
2285 if otherf:
2285 if otherf:
2286 otherf.flush()
2286 otherf.flush()
2287 f.write('%s at:\n' % msg)
2287 f.write('%s at:\n' % msg)
2288 entries = [('%s:%s' % (fn, ln), func)
2288 entries = [('%s:%s' % (fn, ln), func)
2289 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2289 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2290 if entries:
2290 if entries:
2291 fnmax = max(len(entry[0]) for entry in entries)
2291 fnmax = max(len(entry[0]) for entry in entries)
2292 for fnln, func in entries:
2292 for fnln, func in entries:
2293 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2293 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2294 f.flush()
2294 f.flush()
2295
2295
2296 class dirs(object):
2296 class dirs(object):
2297 '''a multiset of directory names from a dirstate or manifest'''
2297 '''a multiset of directory names from a dirstate or manifest'''
2298
2298
2299 def __init__(self, map, skip=None):
2299 def __init__(self, map, skip=None):
2300 self._dirs = {}
2300 self._dirs = {}
2301 addpath = self.addpath
2301 addpath = self.addpath
2302 if safehasattr(map, 'iteritems') and skip is not None:
2302 if safehasattr(map, 'iteritems') and skip is not None:
2303 for f, s in map.iteritems():
2303 for f, s in map.iteritems():
2304 if s[0] != skip:
2304 if s[0] != skip:
2305 addpath(f)
2305 addpath(f)
2306 else:
2306 else:
2307 for f in map:
2307 for f in map:
2308 addpath(f)
2308 addpath(f)
2309
2309
2310 def addpath(self, path):
2310 def addpath(self, path):
2311 dirs = self._dirs
2311 dirs = self._dirs
2312 for base in finddirs(path):
2312 for base in finddirs(path):
2313 if base in dirs:
2313 if base in dirs:
2314 dirs[base] += 1
2314 dirs[base] += 1
2315 return
2315 return
2316 dirs[base] = 1
2316 dirs[base] = 1
2317
2317
2318 def delpath(self, path):
2318 def delpath(self, path):
2319 dirs = self._dirs
2319 dirs = self._dirs
2320 for base in finddirs(path):
2320 for base in finddirs(path):
2321 if dirs[base] > 1:
2321 if dirs[base] > 1:
2322 dirs[base] -= 1
2322 dirs[base] -= 1
2323 return
2323 return
2324 del dirs[base]
2324 del dirs[base]
2325
2325
2326 def __iter__(self):
2326 def __iter__(self):
2327 return self._dirs.iterkeys()
2327 return self._dirs.iterkeys()
2328
2328
2329 def __contains__(self, d):
2329 def __contains__(self, d):
2330 return d in self._dirs
2330 return d in self._dirs
2331
2331
2332 if safehasattr(parsers, 'dirs'):
2332 if safehasattr(parsers, 'dirs'):
2333 dirs = parsers.dirs
2333 dirs = parsers.dirs
2334
2334
2335 def finddirs(path):
2335 def finddirs(path):
2336 pos = path.rfind('/')
2336 pos = path.rfind('/')
2337 while pos != -1:
2337 while pos != -1:
2338 yield path[:pos]
2338 yield path[:pos]
2339 pos = path.rfind('/', 0, pos)
2339 pos = path.rfind('/', 0, pos)
2340
2340
2341 # convenient shortcut
2341 # convenient shortcut
2342 dst = debugstacktrace
2342 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now