##// END OF EJS Templates
util: add a file handle wrapper class that does hash digest validation...
Mike Hommey -
r22963:56e04741 default
parent child Browse files
Show More
@@ -1,2153 +1,2184 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 import i18n
16 import i18n
17 _ = i18n._
17 _ = i18n._
18 import error, osutil, encoding
18 import error, osutil, encoding
19 import errno, shutil, sys, tempfile, traceback
19 import errno, shutil, sys, tempfile, traceback
20 import re as remod
20 import re as remod
21 import os, time, datetime, calendar, textwrap, signal, collections
21 import os, time, datetime, calendar, textwrap, signal, collections
22 import imp, socket, urllib
22 import imp, socket, urllib
23
23
24 if os.name == 'nt':
24 if os.name == 'nt':
25 import windows as platform
25 import windows as platform
26 else:
26 else:
27 import posix as platform
27 import posix as platform
28
28
29 cachestat = platform.cachestat
29 cachestat = platform.cachestat
30 checkexec = platform.checkexec
30 checkexec = platform.checkexec
31 checklink = platform.checklink
31 checklink = platform.checklink
32 copymode = platform.copymode
32 copymode = platform.copymode
33 executablepath = platform.executablepath
33 executablepath = platform.executablepath
34 expandglobs = platform.expandglobs
34 expandglobs = platform.expandglobs
35 explainexit = platform.explainexit
35 explainexit = platform.explainexit
36 findexe = platform.findexe
36 findexe = platform.findexe
37 gethgcmd = platform.gethgcmd
37 gethgcmd = platform.gethgcmd
38 getuser = platform.getuser
38 getuser = platform.getuser
39 groupmembers = platform.groupmembers
39 groupmembers = platform.groupmembers
40 groupname = platform.groupname
40 groupname = platform.groupname
41 hidewindow = platform.hidewindow
41 hidewindow = platform.hidewindow
42 isexec = platform.isexec
42 isexec = platform.isexec
43 isowner = platform.isowner
43 isowner = platform.isowner
44 localpath = platform.localpath
44 localpath = platform.localpath
45 lookupreg = platform.lookupreg
45 lookupreg = platform.lookupreg
46 makedir = platform.makedir
46 makedir = platform.makedir
47 nlinks = platform.nlinks
47 nlinks = platform.nlinks
48 normpath = platform.normpath
48 normpath = platform.normpath
49 normcase = platform.normcase
49 normcase = platform.normcase
50 openhardlinks = platform.openhardlinks
50 openhardlinks = platform.openhardlinks
51 oslink = platform.oslink
51 oslink = platform.oslink
52 parsepatchoutput = platform.parsepatchoutput
52 parsepatchoutput = platform.parsepatchoutput
53 pconvert = platform.pconvert
53 pconvert = platform.pconvert
54 popen = platform.popen
54 popen = platform.popen
55 posixfile = platform.posixfile
55 posixfile = platform.posixfile
56 quotecommand = platform.quotecommand
56 quotecommand = platform.quotecommand
57 readpipe = platform.readpipe
57 readpipe = platform.readpipe
58 rename = platform.rename
58 rename = platform.rename
59 samedevice = platform.samedevice
59 samedevice = platform.samedevice
60 samefile = platform.samefile
60 samefile = platform.samefile
61 samestat = platform.samestat
61 samestat = platform.samestat
62 setbinary = platform.setbinary
62 setbinary = platform.setbinary
63 setflags = platform.setflags
63 setflags = platform.setflags
64 setsignalhandler = platform.setsignalhandler
64 setsignalhandler = platform.setsignalhandler
65 shellquote = platform.shellquote
65 shellquote = platform.shellquote
66 spawndetached = platform.spawndetached
66 spawndetached = platform.spawndetached
67 split = platform.split
67 split = platform.split
68 sshargs = platform.sshargs
68 sshargs = platform.sshargs
69 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
69 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
70 statisexec = platform.statisexec
70 statisexec = platform.statisexec
71 statislink = platform.statislink
71 statislink = platform.statislink
72 termwidth = platform.termwidth
72 termwidth = platform.termwidth
73 testpid = platform.testpid
73 testpid = platform.testpid
74 umask = platform.umask
74 umask = platform.umask
75 unlink = platform.unlink
75 unlink = platform.unlink
76 unlinkpath = platform.unlinkpath
76 unlinkpath = platform.unlinkpath
77 username = platform.username
77 username = platform.username
78
78
79 # Python compatibility
79 # Python compatibility
80
80
81 _notset = object()
81 _notset = object()
82
82
83 def safehasattr(thing, attr):
83 def safehasattr(thing, attr):
84 return getattr(thing, attr, _notset) is not _notset
84 return getattr(thing, attr, _notset) is not _notset
85
85
86 def sha1(s=''):
86 def sha1(s=''):
87 '''
87 '''
88 Low-overhead wrapper around Python's SHA support
88 Low-overhead wrapper around Python's SHA support
89
89
90 >>> f = _fastsha1
90 >>> f = _fastsha1
91 >>> a = sha1()
91 >>> a = sha1()
92 >>> a = f()
92 >>> a = f()
93 >>> a.hexdigest()
93 >>> a.hexdigest()
94 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
94 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
95 '''
95 '''
96
96
97 return _fastsha1(s)
97 return _fastsha1(s)
98
98
99 def _fastsha1(s=''):
99 def _fastsha1(s=''):
100 # This function will import sha1 from hashlib or sha (whichever is
100 # This function will import sha1 from hashlib or sha (whichever is
101 # available) and overwrite itself with it on the first call.
101 # available) and overwrite itself with it on the first call.
102 # Subsequent calls will go directly to the imported function.
102 # Subsequent calls will go directly to the imported function.
103 if sys.version_info >= (2, 5):
103 if sys.version_info >= (2, 5):
104 from hashlib import sha1 as _sha1
104 from hashlib import sha1 as _sha1
105 else:
105 else:
106 from sha import sha as _sha1
106 from sha import sha as _sha1
107 global _fastsha1, sha1
107 global _fastsha1, sha1
108 _fastsha1 = sha1 = _sha1
108 _fastsha1 = sha1 = _sha1
109 return _sha1(s)
109 return _sha1(s)
110
110
111 def md5(s=''):
111 def md5(s=''):
112 try:
112 try:
113 from hashlib import md5 as _md5
113 from hashlib import md5 as _md5
114 except ImportError:
114 except ImportError:
115 from md5 import md5 as _md5
115 from md5 import md5 as _md5
116 global md5
116 global md5
117 md5 = _md5
117 md5 = _md5
118 return _md5(s)
118 return _md5(s)
119
119
120 DIGESTS = {
120 DIGESTS = {
121 'md5': md5,
121 'md5': md5,
122 'sha1': sha1,
122 'sha1': sha1,
123 }
123 }
124 # List of digest types from strongest to weakest
124 # List of digest types from strongest to weakest
125 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
125 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
126
126
127 try:
127 try:
128 import hashlib
128 import hashlib
129 DIGESTS.update({
129 DIGESTS.update({
130 'sha512': hashlib.sha512,
130 'sha512': hashlib.sha512,
131 })
131 })
132 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
132 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
133 except ImportError:
133 except ImportError:
134 pass
134 pass
135
135
136 for k in DIGESTS_BY_STRENGTH:
136 for k in DIGESTS_BY_STRENGTH:
137 assert k in DIGESTS
137 assert k in DIGESTS
138
138
139 class digester(object):
139 class digester(object):
140 """helper to compute digests.
140 """helper to compute digests.
141
141
142 This helper can be used to compute one or more digests given their name.
142 This helper can be used to compute one or more digests given their name.
143
143
144 >>> d = digester(['md5', 'sha1'])
144 >>> d = digester(['md5', 'sha1'])
145 >>> d.update('foo')
145 >>> d.update('foo')
146 >>> [k for k in sorted(d)]
146 >>> [k for k in sorted(d)]
147 ['md5', 'sha1']
147 ['md5', 'sha1']
148 >>> d['md5']
148 >>> d['md5']
149 'acbd18db4cc2f85cedef654fccc4a4d8'
149 'acbd18db4cc2f85cedef654fccc4a4d8'
150 >>> d['sha1']
150 >>> d['sha1']
151 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
151 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
152 >>> digester.preferred(['md5', 'sha1'])
152 >>> digester.preferred(['md5', 'sha1'])
153 'sha1'
153 'sha1'
154 """
154 """
155
155
156 def __init__(self, digests, s=''):
156 def __init__(self, digests, s=''):
157 self._hashes = {}
157 self._hashes = {}
158 for k in digests:
158 for k in digests:
159 if k not in DIGESTS:
159 if k not in DIGESTS:
160 raise Abort(_('unknown digest type: %s') % k)
160 raise Abort(_('unknown digest type: %s') % k)
161 self._hashes[k] = DIGESTS[k]()
161 self._hashes[k] = DIGESTS[k]()
162 if s:
162 if s:
163 self.update(s)
163 self.update(s)
164
164
165 def update(self, data):
165 def update(self, data):
166 for h in self._hashes.values():
166 for h in self._hashes.values():
167 h.update(data)
167 h.update(data)
168
168
169 def __getitem__(self, key):
169 def __getitem__(self, key):
170 if key not in DIGESTS:
170 if key not in DIGESTS:
171 raise Abort(_('unknown digest type: %s') % k)
171 raise Abort(_('unknown digest type: %s') % k)
172 return self._hashes[key].hexdigest()
172 return self._hashes[key].hexdigest()
173
173
174 def __iter__(self):
174 def __iter__(self):
175 return iter(self._hashes)
175 return iter(self._hashes)
176
176
177 @staticmethod
177 @staticmethod
178 def preferred(supported):
178 def preferred(supported):
179 """returns the strongest digest type in both supported and DIGESTS."""
179 """returns the strongest digest type in both supported and DIGESTS."""
180
180
181 for k in DIGESTS_BY_STRENGTH:
181 for k in DIGESTS_BY_STRENGTH:
182 if k in supported:
182 if k in supported:
183 return k
183 return k
184 return None
184 return None
185
185
186 class digestchecker(object):
187 """file handle wrapper that additionally checks content against a given
188 size and digests.
189
190 d = digestchecker(fh, size, {'md5': '...'})
191
192 When multiple digests are given, all of them are validated.
193 """
194
195 def __init__(self, fh, size, digests):
196 self._fh = fh
197 self._size = size
198 self._got = 0
199 self._digests = dict(digests)
200 self._digester = digester(self._digests.keys())
201
202 def read(self, length=-1):
203 content = self._fh.read(length)
204 self._digester.update(content)
205 self._got += len(content)
206 return content
207
208 def validate(self):
209 if self._size != self._got:
210 raise Abort(_('size mismatch: expected %d, got %d') %
211 (self._size, self._got))
212 for k, v in self._digests.items():
213 if v != self._digester[k]:
214 raise Abort(_('%s mismatch: expected %s, got %s') %
215 (k, v, self._digester[k]))
216
186 try:
217 try:
187 buffer = buffer
218 buffer = buffer
188 except NameError:
219 except NameError:
189 if sys.version_info[0] < 3:
220 if sys.version_info[0] < 3:
190 def buffer(sliceable, offset=0):
221 def buffer(sliceable, offset=0):
191 return sliceable[offset:]
222 return sliceable[offset:]
192 else:
223 else:
193 def buffer(sliceable, offset=0):
224 def buffer(sliceable, offset=0):
194 return memoryview(sliceable)[offset:]
225 return memoryview(sliceable)[offset:]
195
226
196 import subprocess
227 import subprocess
197 closefds = os.name == 'posix'
228 closefds = os.name == 'posix'
198
229
199 def popen2(cmd, env=None, newlines=False):
230 def popen2(cmd, env=None, newlines=False):
200 # Setting bufsize to -1 lets the system decide the buffer size.
231 # Setting bufsize to -1 lets the system decide the buffer size.
201 # The default for bufsize is 0, meaning unbuffered. This leads to
232 # The default for bufsize is 0, meaning unbuffered. This leads to
202 # poor performance on Mac OS X: http://bugs.python.org/issue4194
233 # poor performance on Mac OS X: http://bugs.python.org/issue4194
203 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
234 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
204 close_fds=closefds,
235 close_fds=closefds,
205 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
236 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
206 universal_newlines=newlines,
237 universal_newlines=newlines,
207 env=env)
238 env=env)
208 return p.stdin, p.stdout
239 return p.stdin, p.stdout
209
240
210 def popen3(cmd, env=None, newlines=False):
241 def popen3(cmd, env=None, newlines=False):
211 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
242 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
212 return stdin, stdout, stderr
243 return stdin, stdout, stderr
213
244
214 def popen4(cmd, env=None, newlines=False):
245 def popen4(cmd, env=None, newlines=False):
215 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
246 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
216 close_fds=closefds,
247 close_fds=closefds,
217 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
248 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
218 stderr=subprocess.PIPE,
249 stderr=subprocess.PIPE,
219 universal_newlines=newlines,
250 universal_newlines=newlines,
220 env=env)
251 env=env)
221 return p.stdin, p.stdout, p.stderr, p
252 return p.stdin, p.stdout, p.stderr, p
222
253
223 def version():
254 def version():
224 """Return version information if available."""
255 """Return version information if available."""
225 try:
256 try:
226 import __version__
257 import __version__
227 return __version__.version
258 return __version__.version
228 except ImportError:
259 except ImportError:
229 return 'unknown'
260 return 'unknown'
230
261
231 # used by parsedate
262 # used by parsedate
232 defaultdateformats = (
263 defaultdateformats = (
233 '%Y-%m-%d %H:%M:%S',
264 '%Y-%m-%d %H:%M:%S',
234 '%Y-%m-%d %I:%M:%S%p',
265 '%Y-%m-%d %I:%M:%S%p',
235 '%Y-%m-%d %H:%M',
266 '%Y-%m-%d %H:%M',
236 '%Y-%m-%d %I:%M%p',
267 '%Y-%m-%d %I:%M%p',
237 '%Y-%m-%d',
268 '%Y-%m-%d',
238 '%m-%d',
269 '%m-%d',
239 '%m/%d',
270 '%m/%d',
240 '%m/%d/%y',
271 '%m/%d/%y',
241 '%m/%d/%Y',
272 '%m/%d/%Y',
242 '%a %b %d %H:%M:%S %Y',
273 '%a %b %d %H:%M:%S %Y',
243 '%a %b %d %I:%M:%S%p %Y',
274 '%a %b %d %I:%M:%S%p %Y',
244 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
275 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
245 '%b %d %H:%M:%S %Y',
276 '%b %d %H:%M:%S %Y',
246 '%b %d %I:%M:%S%p %Y',
277 '%b %d %I:%M:%S%p %Y',
247 '%b %d %H:%M:%S',
278 '%b %d %H:%M:%S',
248 '%b %d %I:%M:%S%p',
279 '%b %d %I:%M:%S%p',
249 '%b %d %H:%M',
280 '%b %d %H:%M',
250 '%b %d %I:%M%p',
281 '%b %d %I:%M%p',
251 '%b %d %Y',
282 '%b %d %Y',
252 '%b %d',
283 '%b %d',
253 '%H:%M:%S',
284 '%H:%M:%S',
254 '%I:%M:%S%p',
285 '%I:%M:%S%p',
255 '%H:%M',
286 '%H:%M',
256 '%I:%M%p',
287 '%I:%M%p',
257 )
288 )
258
289
259 extendeddateformats = defaultdateformats + (
290 extendeddateformats = defaultdateformats + (
260 "%Y",
291 "%Y",
261 "%Y-%m",
292 "%Y-%m",
262 "%b",
293 "%b",
263 "%b %Y",
294 "%b %Y",
264 )
295 )
265
296
266 def cachefunc(func):
297 def cachefunc(func):
267 '''cache the result of function calls'''
298 '''cache the result of function calls'''
268 # XXX doesn't handle keywords args
299 # XXX doesn't handle keywords args
269 if func.func_code.co_argcount == 0:
300 if func.func_code.co_argcount == 0:
270 cache = []
301 cache = []
271 def f():
302 def f():
272 if len(cache) == 0:
303 if len(cache) == 0:
273 cache.append(func())
304 cache.append(func())
274 return cache[0]
305 return cache[0]
275 return f
306 return f
276 cache = {}
307 cache = {}
277 if func.func_code.co_argcount == 1:
308 if func.func_code.co_argcount == 1:
278 # we gain a small amount of time because
309 # we gain a small amount of time because
279 # we don't need to pack/unpack the list
310 # we don't need to pack/unpack the list
280 def f(arg):
311 def f(arg):
281 if arg not in cache:
312 if arg not in cache:
282 cache[arg] = func(arg)
313 cache[arg] = func(arg)
283 return cache[arg]
314 return cache[arg]
284 else:
315 else:
285 def f(*args):
316 def f(*args):
286 if args not in cache:
317 if args not in cache:
287 cache[args] = func(*args)
318 cache[args] = func(*args)
288 return cache[args]
319 return cache[args]
289
320
290 return f
321 return f
291
322
292 try:
323 try:
293 collections.deque.remove
324 collections.deque.remove
294 deque = collections.deque
325 deque = collections.deque
295 except AttributeError:
326 except AttributeError:
296 # python 2.4 lacks deque.remove
327 # python 2.4 lacks deque.remove
297 class deque(collections.deque):
328 class deque(collections.deque):
298 def remove(self, val):
329 def remove(self, val):
299 for i, v in enumerate(self):
330 for i, v in enumerate(self):
300 if v == val:
331 if v == val:
301 del self[i]
332 del self[i]
302 break
333 break
303
334
304 class sortdict(dict):
335 class sortdict(dict):
305 '''a simple sorted dictionary'''
336 '''a simple sorted dictionary'''
306 def __init__(self, data=None):
337 def __init__(self, data=None):
307 self._list = []
338 self._list = []
308 if data:
339 if data:
309 self.update(data)
340 self.update(data)
310 def copy(self):
341 def copy(self):
311 return sortdict(self)
342 return sortdict(self)
312 def __setitem__(self, key, val):
343 def __setitem__(self, key, val):
313 if key in self:
344 if key in self:
314 self._list.remove(key)
345 self._list.remove(key)
315 self._list.append(key)
346 self._list.append(key)
316 dict.__setitem__(self, key, val)
347 dict.__setitem__(self, key, val)
317 def __iter__(self):
348 def __iter__(self):
318 return self._list.__iter__()
349 return self._list.__iter__()
319 def update(self, src):
350 def update(self, src):
320 for k in src:
351 for k in src:
321 self[k] = src[k]
352 self[k] = src[k]
322 def clear(self):
353 def clear(self):
323 dict.clear(self)
354 dict.clear(self)
324 self._list = []
355 self._list = []
325 def items(self):
356 def items(self):
326 return [(k, self[k]) for k in self._list]
357 return [(k, self[k]) for k in self._list]
327 def __delitem__(self, key):
358 def __delitem__(self, key):
328 dict.__delitem__(self, key)
359 dict.__delitem__(self, key)
329 self._list.remove(key)
360 self._list.remove(key)
330 def pop(self, key, *args, **kwargs):
361 def pop(self, key, *args, **kwargs):
331 dict.pop(self, key, *args, **kwargs)
362 dict.pop(self, key, *args, **kwargs)
332 try:
363 try:
333 self._list.remove(key)
364 self._list.remove(key)
334 except ValueError:
365 except ValueError:
335 pass
366 pass
336 def keys(self):
367 def keys(self):
337 return self._list
368 return self._list
338 def iterkeys(self):
369 def iterkeys(self):
339 return self._list.__iter__()
370 return self._list.__iter__()
340
371
341 class lrucachedict(object):
372 class lrucachedict(object):
342 '''cache most recent gets from or sets to this dictionary'''
373 '''cache most recent gets from or sets to this dictionary'''
343 def __init__(self, maxsize):
374 def __init__(self, maxsize):
344 self._cache = {}
375 self._cache = {}
345 self._maxsize = maxsize
376 self._maxsize = maxsize
346 self._order = deque()
377 self._order = deque()
347
378
348 def __getitem__(self, key):
379 def __getitem__(self, key):
349 value = self._cache[key]
380 value = self._cache[key]
350 self._order.remove(key)
381 self._order.remove(key)
351 self._order.append(key)
382 self._order.append(key)
352 return value
383 return value
353
384
354 def __setitem__(self, key, value):
385 def __setitem__(self, key, value):
355 if key not in self._cache:
386 if key not in self._cache:
356 if len(self._cache) >= self._maxsize:
387 if len(self._cache) >= self._maxsize:
357 del self._cache[self._order.popleft()]
388 del self._cache[self._order.popleft()]
358 else:
389 else:
359 self._order.remove(key)
390 self._order.remove(key)
360 self._cache[key] = value
391 self._cache[key] = value
361 self._order.append(key)
392 self._order.append(key)
362
393
363 def __contains__(self, key):
394 def __contains__(self, key):
364 return key in self._cache
395 return key in self._cache
365
396
366 def clear(self):
397 def clear(self):
367 self._cache.clear()
398 self._cache.clear()
368 self._order = deque()
399 self._order = deque()
369
400
370 def lrucachefunc(func):
401 def lrucachefunc(func):
371 '''cache most recent results of function calls'''
402 '''cache most recent results of function calls'''
372 cache = {}
403 cache = {}
373 order = deque()
404 order = deque()
374 if func.func_code.co_argcount == 1:
405 if func.func_code.co_argcount == 1:
375 def f(arg):
406 def f(arg):
376 if arg not in cache:
407 if arg not in cache:
377 if len(cache) > 20:
408 if len(cache) > 20:
378 del cache[order.popleft()]
409 del cache[order.popleft()]
379 cache[arg] = func(arg)
410 cache[arg] = func(arg)
380 else:
411 else:
381 order.remove(arg)
412 order.remove(arg)
382 order.append(arg)
413 order.append(arg)
383 return cache[arg]
414 return cache[arg]
384 else:
415 else:
385 def f(*args):
416 def f(*args):
386 if args not in cache:
417 if args not in cache:
387 if len(cache) > 20:
418 if len(cache) > 20:
388 del cache[order.popleft()]
419 del cache[order.popleft()]
389 cache[args] = func(*args)
420 cache[args] = func(*args)
390 else:
421 else:
391 order.remove(args)
422 order.remove(args)
392 order.append(args)
423 order.append(args)
393 return cache[args]
424 return cache[args]
394
425
395 return f
426 return f
396
427
397 class propertycache(object):
428 class propertycache(object):
398 def __init__(self, func):
429 def __init__(self, func):
399 self.func = func
430 self.func = func
400 self.name = func.__name__
431 self.name = func.__name__
401 def __get__(self, obj, type=None):
432 def __get__(self, obj, type=None):
402 result = self.func(obj)
433 result = self.func(obj)
403 self.cachevalue(obj, result)
434 self.cachevalue(obj, result)
404 return result
435 return result
405
436
406 def cachevalue(self, obj, value):
437 def cachevalue(self, obj, value):
407 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
438 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
408 obj.__dict__[self.name] = value
439 obj.__dict__[self.name] = value
409
440
410 def pipefilter(s, cmd):
441 def pipefilter(s, cmd):
411 '''filter string S through command CMD, returning its output'''
442 '''filter string S through command CMD, returning its output'''
412 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
443 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
413 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
444 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
414 pout, perr = p.communicate(s)
445 pout, perr = p.communicate(s)
415 return pout
446 return pout
416
447
417 def tempfilter(s, cmd):
448 def tempfilter(s, cmd):
418 '''filter string S through a pair of temporary files with CMD.
449 '''filter string S through a pair of temporary files with CMD.
419 CMD is used as a template to create the real command to be run,
450 CMD is used as a template to create the real command to be run,
420 with the strings INFILE and OUTFILE replaced by the real names of
451 with the strings INFILE and OUTFILE replaced by the real names of
421 the temporary files generated.'''
452 the temporary files generated.'''
422 inname, outname = None, None
453 inname, outname = None, None
423 try:
454 try:
424 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
455 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
425 fp = os.fdopen(infd, 'wb')
456 fp = os.fdopen(infd, 'wb')
426 fp.write(s)
457 fp.write(s)
427 fp.close()
458 fp.close()
428 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
459 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
429 os.close(outfd)
460 os.close(outfd)
430 cmd = cmd.replace('INFILE', inname)
461 cmd = cmd.replace('INFILE', inname)
431 cmd = cmd.replace('OUTFILE', outname)
462 cmd = cmd.replace('OUTFILE', outname)
432 code = os.system(cmd)
463 code = os.system(cmd)
433 if sys.platform == 'OpenVMS' and code & 1:
464 if sys.platform == 'OpenVMS' and code & 1:
434 code = 0
465 code = 0
435 if code:
466 if code:
436 raise Abort(_("command '%s' failed: %s") %
467 raise Abort(_("command '%s' failed: %s") %
437 (cmd, explainexit(code)))
468 (cmd, explainexit(code)))
438 fp = open(outname, 'rb')
469 fp = open(outname, 'rb')
439 r = fp.read()
470 r = fp.read()
440 fp.close()
471 fp.close()
441 return r
472 return r
442 finally:
473 finally:
443 try:
474 try:
444 if inname:
475 if inname:
445 os.unlink(inname)
476 os.unlink(inname)
446 except OSError:
477 except OSError:
447 pass
478 pass
448 try:
479 try:
449 if outname:
480 if outname:
450 os.unlink(outname)
481 os.unlink(outname)
451 except OSError:
482 except OSError:
452 pass
483 pass
453
484
454 filtertable = {
485 filtertable = {
455 'tempfile:': tempfilter,
486 'tempfile:': tempfilter,
456 'pipe:': pipefilter,
487 'pipe:': pipefilter,
457 }
488 }
458
489
459 def filter(s, cmd):
490 def filter(s, cmd):
460 "filter a string through a command that transforms its input to its output"
491 "filter a string through a command that transforms its input to its output"
461 for name, fn in filtertable.iteritems():
492 for name, fn in filtertable.iteritems():
462 if cmd.startswith(name):
493 if cmd.startswith(name):
463 return fn(s, cmd[len(name):].lstrip())
494 return fn(s, cmd[len(name):].lstrip())
464 return pipefilter(s, cmd)
495 return pipefilter(s, cmd)
465
496
466 def binary(s):
497 def binary(s):
467 """return true if a string is binary data"""
498 """return true if a string is binary data"""
468 return bool(s and '\0' in s)
499 return bool(s and '\0' in s)
469
500
470 def increasingchunks(source, min=1024, max=65536):
501 def increasingchunks(source, min=1024, max=65536):
471 '''return no less than min bytes per chunk while data remains,
502 '''return no less than min bytes per chunk while data remains,
472 doubling min after each chunk until it reaches max'''
503 doubling min after each chunk until it reaches max'''
473 def log2(x):
504 def log2(x):
474 if not x:
505 if not x:
475 return 0
506 return 0
476 i = 0
507 i = 0
477 while x:
508 while x:
478 x >>= 1
509 x >>= 1
479 i += 1
510 i += 1
480 return i - 1
511 return i - 1
481
512
482 buf = []
513 buf = []
483 blen = 0
514 blen = 0
484 for chunk in source:
515 for chunk in source:
485 buf.append(chunk)
516 buf.append(chunk)
486 blen += len(chunk)
517 blen += len(chunk)
487 if blen >= min:
518 if blen >= min:
488 if min < max:
519 if min < max:
489 min = min << 1
520 min = min << 1
490 nmin = 1 << log2(blen)
521 nmin = 1 << log2(blen)
491 if nmin > min:
522 if nmin > min:
492 min = nmin
523 min = nmin
493 if min > max:
524 if min > max:
494 min = max
525 min = max
495 yield ''.join(buf)
526 yield ''.join(buf)
496 blen = 0
527 blen = 0
497 buf = []
528 buf = []
498 if buf:
529 if buf:
499 yield ''.join(buf)
530 yield ''.join(buf)
500
531
501 Abort = error.Abort
532 Abort = error.Abort
502
533
503 def always(fn):
534 def always(fn):
504 return True
535 return True
505
536
506 def never(fn):
537 def never(fn):
507 return False
538 return False
508
539
509 def pathto(root, n1, n2):
540 def pathto(root, n1, n2):
510 '''return the relative path from one place to another.
541 '''return the relative path from one place to another.
511 root should use os.sep to separate directories
542 root should use os.sep to separate directories
512 n1 should use os.sep to separate directories
543 n1 should use os.sep to separate directories
513 n2 should use "/" to separate directories
544 n2 should use "/" to separate directories
514 returns an os.sep-separated path.
545 returns an os.sep-separated path.
515
546
516 If n1 is a relative path, it's assumed it's
547 If n1 is a relative path, it's assumed it's
517 relative to root.
548 relative to root.
518 n2 should always be relative to root.
549 n2 should always be relative to root.
519 '''
550 '''
520 if not n1:
551 if not n1:
521 return localpath(n2)
552 return localpath(n2)
522 if os.path.isabs(n1):
553 if os.path.isabs(n1):
523 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
554 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
524 return os.path.join(root, localpath(n2))
555 return os.path.join(root, localpath(n2))
525 n2 = '/'.join((pconvert(root), n2))
556 n2 = '/'.join((pconvert(root), n2))
526 a, b = splitpath(n1), n2.split('/')
557 a, b = splitpath(n1), n2.split('/')
527 a.reverse()
558 a.reverse()
528 b.reverse()
559 b.reverse()
529 while a and b and a[-1] == b[-1]:
560 while a and b and a[-1] == b[-1]:
530 a.pop()
561 a.pop()
531 b.pop()
562 b.pop()
532 b.reverse()
563 b.reverse()
533 return os.sep.join((['..'] * len(a)) + b) or '.'
564 return os.sep.join((['..'] * len(a)) + b) or '.'
534
565
535 def mainfrozen():
566 def mainfrozen():
536 """return True if we are a frozen executable.
567 """return True if we are a frozen executable.
537
568
538 The code supports py2exe (most common, Windows only) and tools/freeze
569 The code supports py2exe (most common, Windows only) and tools/freeze
539 (portable, not much used).
570 (portable, not much used).
540 """
571 """
541 return (safehasattr(sys, "frozen") or # new py2exe
572 return (safehasattr(sys, "frozen") or # new py2exe
542 safehasattr(sys, "importers") or # old py2exe
573 safehasattr(sys, "importers") or # old py2exe
543 imp.is_frozen("__main__")) # tools/freeze
574 imp.is_frozen("__main__")) # tools/freeze
544
575
545 # the location of data files matching the source code
576 # the location of data files matching the source code
546 if mainfrozen():
577 if mainfrozen():
547 # executable version (py2exe) doesn't support __file__
578 # executable version (py2exe) doesn't support __file__
548 datapath = os.path.dirname(sys.executable)
579 datapath = os.path.dirname(sys.executable)
549 else:
580 else:
550 datapath = os.path.dirname(__file__)
581 datapath = os.path.dirname(__file__)
551
582
552 i18n.setdatapath(datapath)
583 i18n.setdatapath(datapath)
553
584
554 _hgexecutable = None
585 _hgexecutable = None
555
586
556 def hgexecutable():
587 def hgexecutable():
557 """return location of the 'hg' executable.
588 """return location of the 'hg' executable.
558
589
559 Defaults to $HG or 'hg' in the search path.
590 Defaults to $HG or 'hg' in the search path.
560 """
591 """
561 if _hgexecutable is None:
592 if _hgexecutable is None:
562 hg = os.environ.get('HG')
593 hg = os.environ.get('HG')
563 mainmod = sys.modules['__main__']
594 mainmod = sys.modules['__main__']
564 if hg:
595 if hg:
565 _sethgexecutable(hg)
596 _sethgexecutable(hg)
566 elif mainfrozen():
597 elif mainfrozen():
567 _sethgexecutable(sys.executable)
598 _sethgexecutable(sys.executable)
568 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
599 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
569 _sethgexecutable(mainmod.__file__)
600 _sethgexecutable(mainmod.__file__)
570 else:
601 else:
571 exe = findexe('hg') or os.path.basename(sys.argv[0])
602 exe = findexe('hg') or os.path.basename(sys.argv[0])
572 _sethgexecutable(exe)
603 _sethgexecutable(exe)
573 return _hgexecutable
604 return _hgexecutable
574
605
575 def _sethgexecutable(path):
606 def _sethgexecutable(path):
576 """set location of the 'hg' executable"""
607 """set location of the 'hg' executable"""
577 global _hgexecutable
608 global _hgexecutable
578 _hgexecutable = path
609 _hgexecutable = path
579
610
580 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
611 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
581 '''enhanced shell command execution.
612 '''enhanced shell command execution.
582 run with environment maybe modified, maybe in different dir.
613 run with environment maybe modified, maybe in different dir.
583
614
584 if command fails and onerr is None, return status. if ui object,
615 if command fails and onerr is None, return status. if ui object,
585 print error message and return status, else raise onerr object as
616 print error message and return status, else raise onerr object as
586 exception.
617 exception.
587
618
588 if out is specified, it is assumed to be a file-like object that has a
619 if out is specified, it is assumed to be a file-like object that has a
589 write() method. stdout and stderr will be redirected to out.'''
620 write() method. stdout and stderr will be redirected to out.'''
590 try:
621 try:
591 sys.stdout.flush()
622 sys.stdout.flush()
592 except Exception:
623 except Exception:
593 pass
624 pass
594 def py2shell(val):
625 def py2shell(val):
595 'convert python object into string that is useful to shell'
626 'convert python object into string that is useful to shell'
596 if val is None or val is False:
627 if val is None or val is False:
597 return '0'
628 return '0'
598 if val is True:
629 if val is True:
599 return '1'
630 return '1'
600 return str(val)
631 return str(val)
601 origcmd = cmd
632 origcmd = cmd
602 cmd = quotecommand(cmd)
633 cmd = quotecommand(cmd)
603 if sys.platform == 'plan9' and (sys.version_info[0] == 2
634 if sys.platform == 'plan9' and (sys.version_info[0] == 2
604 and sys.version_info[1] < 7):
635 and sys.version_info[1] < 7):
605 # subprocess kludge to work around issues in half-baked Python
636 # subprocess kludge to work around issues in half-baked Python
606 # ports, notably bichued/python:
637 # ports, notably bichued/python:
607 if not cwd is None:
638 if not cwd is None:
608 os.chdir(cwd)
639 os.chdir(cwd)
609 rc = os.system(cmd)
640 rc = os.system(cmd)
610 else:
641 else:
611 env = dict(os.environ)
642 env = dict(os.environ)
612 env.update((k, py2shell(v)) for k, v in environ.iteritems())
643 env.update((k, py2shell(v)) for k, v in environ.iteritems())
613 env['HG'] = hgexecutable()
644 env['HG'] = hgexecutable()
614 if out is None or out == sys.__stdout__:
645 if out is None or out == sys.__stdout__:
615 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
646 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
616 env=env, cwd=cwd)
647 env=env, cwd=cwd)
617 else:
648 else:
618 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
649 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
619 env=env, cwd=cwd, stdout=subprocess.PIPE,
650 env=env, cwd=cwd, stdout=subprocess.PIPE,
620 stderr=subprocess.STDOUT)
651 stderr=subprocess.STDOUT)
621 for line in proc.stdout:
652 for line in proc.stdout:
622 out.write(line)
653 out.write(line)
623 proc.wait()
654 proc.wait()
624 rc = proc.returncode
655 rc = proc.returncode
625 if sys.platform == 'OpenVMS' and rc & 1:
656 if sys.platform == 'OpenVMS' and rc & 1:
626 rc = 0
657 rc = 0
627 if rc and onerr:
658 if rc and onerr:
628 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
659 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
629 explainexit(rc)[0])
660 explainexit(rc)[0])
630 if errprefix:
661 if errprefix:
631 errmsg = '%s: %s' % (errprefix, errmsg)
662 errmsg = '%s: %s' % (errprefix, errmsg)
632 try:
663 try:
633 onerr.warn(errmsg + '\n')
664 onerr.warn(errmsg + '\n')
634 except AttributeError:
665 except AttributeError:
635 raise onerr(errmsg)
666 raise onerr(errmsg)
636 return rc
667 return rc
637
668
638 def checksignature(func):
669 def checksignature(func):
639 '''wrap a function with code to check for calling errors'''
670 '''wrap a function with code to check for calling errors'''
640 def check(*args, **kwargs):
671 def check(*args, **kwargs):
641 try:
672 try:
642 return func(*args, **kwargs)
673 return func(*args, **kwargs)
643 except TypeError:
674 except TypeError:
644 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
675 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
645 raise error.SignatureError
676 raise error.SignatureError
646 raise
677 raise
647
678
648 return check
679 return check
649
680
650 def copyfile(src, dest):
681 def copyfile(src, dest):
651 "copy a file, preserving mode and atime/mtime"
682 "copy a file, preserving mode and atime/mtime"
652 if os.path.lexists(dest):
683 if os.path.lexists(dest):
653 unlink(dest)
684 unlink(dest)
654 if os.path.islink(src):
685 if os.path.islink(src):
655 os.symlink(os.readlink(src), dest)
686 os.symlink(os.readlink(src), dest)
656 else:
687 else:
657 try:
688 try:
658 shutil.copyfile(src, dest)
689 shutil.copyfile(src, dest)
659 shutil.copymode(src, dest)
690 shutil.copymode(src, dest)
660 except shutil.Error, inst:
691 except shutil.Error, inst:
661 raise Abort(str(inst))
692 raise Abort(str(inst))
662
693
663 def copyfiles(src, dst, hardlink=None):
694 def copyfiles(src, dst, hardlink=None):
664 """Copy a directory tree using hardlinks if possible"""
695 """Copy a directory tree using hardlinks if possible"""
665
696
666 if hardlink is None:
697 if hardlink is None:
667 hardlink = (os.stat(src).st_dev ==
698 hardlink = (os.stat(src).st_dev ==
668 os.stat(os.path.dirname(dst)).st_dev)
699 os.stat(os.path.dirname(dst)).st_dev)
669
700
670 num = 0
701 num = 0
671 if os.path.isdir(src):
702 if os.path.isdir(src):
672 os.mkdir(dst)
703 os.mkdir(dst)
673 for name, kind in osutil.listdir(src):
704 for name, kind in osutil.listdir(src):
674 srcname = os.path.join(src, name)
705 srcname = os.path.join(src, name)
675 dstname = os.path.join(dst, name)
706 dstname = os.path.join(dst, name)
676 hardlink, n = copyfiles(srcname, dstname, hardlink)
707 hardlink, n = copyfiles(srcname, dstname, hardlink)
677 num += n
708 num += n
678 else:
709 else:
679 if hardlink:
710 if hardlink:
680 try:
711 try:
681 oslink(src, dst)
712 oslink(src, dst)
682 except (IOError, OSError):
713 except (IOError, OSError):
683 hardlink = False
714 hardlink = False
684 shutil.copy(src, dst)
715 shutil.copy(src, dst)
685 else:
716 else:
686 shutil.copy(src, dst)
717 shutil.copy(src, dst)
687 num += 1
718 num += 1
688
719
689 return hardlink, num
720 return hardlink, num
690
721
691 _winreservednames = '''con prn aux nul
722 _winreservednames = '''con prn aux nul
692 com1 com2 com3 com4 com5 com6 com7 com8 com9
723 com1 com2 com3 com4 com5 com6 com7 com8 com9
693 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
724 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
694 _winreservedchars = ':*?"<>|'
725 _winreservedchars = ':*?"<>|'
695 def checkwinfilename(path):
726 def checkwinfilename(path):
696 r'''Check that the base-relative path is a valid filename on Windows.
727 r'''Check that the base-relative path is a valid filename on Windows.
697 Returns None if the path is ok, or a UI string describing the problem.
728 Returns None if the path is ok, or a UI string describing the problem.
698
729
699 >>> checkwinfilename("just/a/normal/path")
730 >>> checkwinfilename("just/a/normal/path")
700 >>> checkwinfilename("foo/bar/con.xml")
731 >>> checkwinfilename("foo/bar/con.xml")
701 "filename contains 'con', which is reserved on Windows"
732 "filename contains 'con', which is reserved on Windows"
702 >>> checkwinfilename("foo/con.xml/bar")
733 >>> checkwinfilename("foo/con.xml/bar")
703 "filename contains 'con', which is reserved on Windows"
734 "filename contains 'con', which is reserved on Windows"
704 >>> checkwinfilename("foo/bar/xml.con")
735 >>> checkwinfilename("foo/bar/xml.con")
705 >>> checkwinfilename("foo/bar/AUX/bla.txt")
736 >>> checkwinfilename("foo/bar/AUX/bla.txt")
706 "filename contains 'AUX', which is reserved on Windows"
737 "filename contains 'AUX', which is reserved on Windows"
707 >>> checkwinfilename("foo/bar/bla:.txt")
738 >>> checkwinfilename("foo/bar/bla:.txt")
708 "filename contains ':', which is reserved on Windows"
739 "filename contains ':', which is reserved on Windows"
709 >>> checkwinfilename("foo/bar/b\07la.txt")
740 >>> checkwinfilename("foo/bar/b\07la.txt")
710 "filename contains '\\x07', which is invalid on Windows"
741 "filename contains '\\x07', which is invalid on Windows"
711 >>> checkwinfilename("foo/bar/bla ")
742 >>> checkwinfilename("foo/bar/bla ")
712 "filename ends with ' ', which is not allowed on Windows"
743 "filename ends with ' ', which is not allowed on Windows"
713 >>> checkwinfilename("../bar")
744 >>> checkwinfilename("../bar")
714 >>> checkwinfilename("foo\\")
745 >>> checkwinfilename("foo\\")
715 "filename ends with '\\', which is invalid on Windows"
746 "filename ends with '\\', which is invalid on Windows"
716 >>> checkwinfilename("foo\\/bar")
747 >>> checkwinfilename("foo\\/bar")
717 "directory name ends with '\\', which is invalid on Windows"
748 "directory name ends with '\\', which is invalid on Windows"
718 '''
749 '''
719 if path.endswith('\\'):
750 if path.endswith('\\'):
720 return _("filename ends with '\\', which is invalid on Windows")
751 return _("filename ends with '\\', which is invalid on Windows")
721 if '\\/' in path:
752 if '\\/' in path:
722 return _("directory name ends with '\\', which is invalid on Windows")
753 return _("directory name ends with '\\', which is invalid on Windows")
723 for n in path.replace('\\', '/').split('/'):
754 for n in path.replace('\\', '/').split('/'):
724 if not n:
755 if not n:
725 continue
756 continue
726 for c in n:
757 for c in n:
727 if c in _winreservedchars:
758 if c in _winreservedchars:
728 return _("filename contains '%s', which is reserved "
759 return _("filename contains '%s', which is reserved "
729 "on Windows") % c
760 "on Windows") % c
730 if ord(c) <= 31:
761 if ord(c) <= 31:
731 return _("filename contains %r, which is invalid "
762 return _("filename contains %r, which is invalid "
732 "on Windows") % c
763 "on Windows") % c
733 base = n.split('.')[0]
764 base = n.split('.')[0]
734 if base and base.lower() in _winreservednames:
765 if base and base.lower() in _winreservednames:
735 return _("filename contains '%s', which is reserved "
766 return _("filename contains '%s', which is reserved "
736 "on Windows") % base
767 "on Windows") % base
737 t = n[-1]
768 t = n[-1]
738 if t in '. ' and n not in '..':
769 if t in '. ' and n not in '..':
739 return _("filename ends with '%s', which is not allowed "
770 return _("filename ends with '%s', which is not allowed "
740 "on Windows") % t
771 "on Windows") % t
741
772
742 if os.name == 'nt':
773 if os.name == 'nt':
743 checkosfilename = checkwinfilename
774 checkosfilename = checkwinfilename
744 else:
775 else:
745 checkosfilename = platform.checkosfilename
776 checkosfilename = platform.checkosfilename
746
777
747 def makelock(info, pathname):
778 def makelock(info, pathname):
748 try:
779 try:
749 return os.symlink(info, pathname)
780 return os.symlink(info, pathname)
750 except OSError, why:
781 except OSError, why:
751 if why.errno == errno.EEXIST:
782 if why.errno == errno.EEXIST:
752 raise
783 raise
753 except AttributeError: # no symlink in os
784 except AttributeError: # no symlink in os
754 pass
785 pass
755
786
756 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
787 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
757 os.write(ld, info)
788 os.write(ld, info)
758 os.close(ld)
789 os.close(ld)
759
790
760 def readlock(pathname):
791 def readlock(pathname):
761 try:
792 try:
762 return os.readlink(pathname)
793 return os.readlink(pathname)
763 except OSError, why:
794 except OSError, why:
764 if why.errno not in (errno.EINVAL, errno.ENOSYS):
795 if why.errno not in (errno.EINVAL, errno.ENOSYS):
765 raise
796 raise
766 except AttributeError: # no symlink in os
797 except AttributeError: # no symlink in os
767 pass
798 pass
768 fp = posixfile(pathname)
799 fp = posixfile(pathname)
769 r = fp.read()
800 r = fp.read()
770 fp.close()
801 fp.close()
771 return r
802 return r
772
803
773 def fstat(fp):
804 def fstat(fp):
774 '''stat file object that may not have fileno method.'''
805 '''stat file object that may not have fileno method.'''
775 try:
806 try:
776 return os.fstat(fp.fileno())
807 return os.fstat(fp.fileno())
777 except AttributeError:
808 except AttributeError:
778 return os.stat(fp.name)
809 return os.stat(fp.name)
779
810
780 # File system features
811 # File system features
781
812
782 def checkcase(path):
813 def checkcase(path):
783 """
814 """
784 Return true if the given path is on a case-sensitive filesystem
815 Return true if the given path is on a case-sensitive filesystem
785
816
786 Requires a path (like /foo/.hg) ending with a foldable final
817 Requires a path (like /foo/.hg) ending with a foldable final
787 directory component.
818 directory component.
788 """
819 """
789 s1 = os.stat(path)
820 s1 = os.stat(path)
790 d, b = os.path.split(path)
821 d, b = os.path.split(path)
791 b2 = b.upper()
822 b2 = b.upper()
792 if b == b2:
823 if b == b2:
793 b2 = b.lower()
824 b2 = b.lower()
794 if b == b2:
825 if b == b2:
795 return True # no evidence against case sensitivity
826 return True # no evidence against case sensitivity
796 p2 = os.path.join(d, b2)
827 p2 = os.path.join(d, b2)
797 try:
828 try:
798 s2 = os.stat(p2)
829 s2 = os.stat(p2)
799 if s2 == s1:
830 if s2 == s1:
800 return False
831 return False
801 return True
832 return True
802 except OSError:
833 except OSError:
803 return True
834 return True
804
835
805 try:
836 try:
806 import re2
837 import re2
807 _re2 = None
838 _re2 = None
808 except ImportError:
839 except ImportError:
809 _re2 = False
840 _re2 = False
810
841
811 class _re(object):
842 class _re(object):
812 def _checkre2(self):
843 def _checkre2(self):
813 global _re2
844 global _re2
814 try:
845 try:
815 # check if match works, see issue3964
846 # check if match works, see issue3964
816 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
847 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
817 except ImportError:
848 except ImportError:
818 _re2 = False
849 _re2 = False
819
850
820 def compile(self, pat, flags=0):
851 def compile(self, pat, flags=0):
821 '''Compile a regular expression, using re2 if possible
852 '''Compile a regular expression, using re2 if possible
822
853
823 For best performance, use only re2-compatible regexp features. The
854 For best performance, use only re2-compatible regexp features. The
824 only flags from the re module that are re2-compatible are
855 only flags from the re module that are re2-compatible are
825 IGNORECASE and MULTILINE.'''
856 IGNORECASE and MULTILINE.'''
826 if _re2 is None:
857 if _re2 is None:
827 self._checkre2()
858 self._checkre2()
828 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
859 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
829 if flags & remod.IGNORECASE:
860 if flags & remod.IGNORECASE:
830 pat = '(?i)' + pat
861 pat = '(?i)' + pat
831 if flags & remod.MULTILINE:
862 if flags & remod.MULTILINE:
832 pat = '(?m)' + pat
863 pat = '(?m)' + pat
833 try:
864 try:
834 return re2.compile(pat)
865 return re2.compile(pat)
835 except re2.error:
866 except re2.error:
836 pass
867 pass
837 return remod.compile(pat, flags)
868 return remod.compile(pat, flags)
838
869
839 @propertycache
870 @propertycache
840 def escape(self):
871 def escape(self):
841 '''Return the version of escape corresponding to self.compile.
872 '''Return the version of escape corresponding to self.compile.
842
873
843 This is imperfect because whether re2 or re is used for a particular
874 This is imperfect because whether re2 or re is used for a particular
844 function depends on the flags, etc, but it's the best we can do.
875 function depends on the flags, etc, but it's the best we can do.
845 '''
876 '''
846 global _re2
877 global _re2
847 if _re2 is None:
878 if _re2 is None:
848 self._checkre2()
879 self._checkre2()
849 if _re2:
880 if _re2:
850 return re2.escape
881 return re2.escape
851 else:
882 else:
852 return remod.escape
883 return remod.escape
853
884
854 re = _re()
885 re = _re()
855
886
856 _fspathcache = {}
887 _fspathcache = {}
857 def fspath(name, root):
888 def fspath(name, root):
858 '''Get name in the case stored in the filesystem
889 '''Get name in the case stored in the filesystem
859
890
860 The name should be relative to root, and be normcase-ed for efficiency.
891 The name should be relative to root, and be normcase-ed for efficiency.
861
892
862 Note that this function is unnecessary, and should not be
893 Note that this function is unnecessary, and should not be
863 called, for case-sensitive filesystems (simply because it's expensive).
894 called, for case-sensitive filesystems (simply because it's expensive).
864
895
865 The root should be normcase-ed, too.
896 The root should be normcase-ed, too.
866 '''
897 '''
867 def find(p, contents):
898 def find(p, contents):
868 for n in contents:
899 for n in contents:
869 if normcase(n) == p:
900 if normcase(n) == p:
870 return n
901 return n
871 return None
902 return None
872
903
873 seps = os.sep
904 seps = os.sep
874 if os.altsep:
905 if os.altsep:
875 seps = seps + os.altsep
906 seps = seps + os.altsep
876 # Protect backslashes. This gets silly very quickly.
907 # Protect backslashes. This gets silly very quickly.
877 seps.replace('\\','\\\\')
908 seps.replace('\\','\\\\')
878 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
909 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
879 dir = os.path.normpath(root)
910 dir = os.path.normpath(root)
880 result = []
911 result = []
881 for part, sep in pattern.findall(name):
912 for part, sep in pattern.findall(name):
882 if sep:
913 if sep:
883 result.append(sep)
914 result.append(sep)
884 continue
915 continue
885
916
886 if dir not in _fspathcache:
917 if dir not in _fspathcache:
887 _fspathcache[dir] = os.listdir(dir)
918 _fspathcache[dir] = os.listdir(dir)
888 contents = _fspathcache[dir]
919 contents = _fspathcache[dir]
889
920
890 found = find(part, contents)
921 found = find(part, contents)
891 if not found:
922 if not found:
892 # retry "once per directory" per "dirstate.walk" which
923 # retry "once per directory" per "dirstate.walk" which
893 # may take place for each patches of "hg qpush", for example
924 # may take place for each patches of "hg qpush", for example
894 contents = os.listdir(dir)
925 contents = os.listdir(dir)
895 _fspathcache[dir] = contents
926 _fspathcache[dir] = contents
896 found = find(part, contents)
927 found = find(part, contents)
897
928
898 result.append(found or part)
929 result.append(found or part)
899 dir = os.path.join(dir, part)
930 dir = os.path.join(dir, part)
900
931
901 return ''.join(result)
932 return ''.join(result)
902
933
903 def checknlink(testfile):
934 def checknlink(testfile):
904 '''check whether hardlink count reporting works properly'''
935 '''check whether hardlink count reporting works properly'''
905
936
906 # testfile may be open, so we need a separate file for checking to
937 # testfile may be open, so we need a separate file for checking to
907 # work around issue2543 (or testfile may get lost on Samba shares)
938 # work around issue2543 (or testfile may get lost on Samba shares)
908 f1 = testfile + ".hgtmp1"
939 f1 = testfile + ".hgtmp1"
909 if os.path.lexists(f1):
940 if os.path.lexists(f1):
910 return False
941 return False
911 try:
942 try:
912 posixfile(f1, 'w').close()
943 posixfile(f1, 'w').close()
913 except IOError:
944 except IOError:
914 return False
945 return False
915
946
916 f2 = testfile + ".hgtmp2"
947 f2 = testfile + ".hgtmp2"
917 fd = None
948 fd = None
918 try:
949 try:
919 try:
950 try:
920 oslink(f1, f2)
951 oslink(f1, f2)
921 except OSError:
952 except OSError:
922 return False
953 return False
923
954
924 # nlinks() may behave differently for files on Windows shares if
955 # nlinks() may behave differently for files on Windows shares if
925 # the file is open.
956 # the file is open.
926 fd = posixfile(f2)
957 fd = posixfile(f2)
927 return nlinks(f2) > 1
958 return nlinks(f2) > 1
928 finally:
959 finally:
929 if fd is not None:
960 if fd is not None:
930 fd.close()
961 fd.close()
931 for f in (f1, f2):
962 for f in (f1, f2):
932 try:
963 try:
933 os.unlink(f)
964 os.unlink(f)
934 except OSError:
965 except OSError:
935 pass
966 pass
936
967
937 def endswithsep(path):
968 def endswithsep(path):
938 '''Check path ends with os.sep or os.altsep.'''
969 '''Check path ends with os.sep or os.altsep.'''
939 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
970 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
940
971
941 def splitpath(path):
972 def splitpath(path):
942 '''Split path by os.sep.
973 '''Split path by os.sep.
943 Note that this function does not use os.altsep because this is
974 Note that this function does not use os.altsep because this is
944 an alternative of simple "xxx.split(os.sep)".
975 an alternative of simple "xxx.split(os.sep)".
945 It is recommended to use os.path.normpath() before using this
976 It is recommended to use os.path.normpath() before using this
946 function if need.'''
977 function if need.'''
947 return path.split(os.sep)
978 return path.split(os.sep)
948
979
949 def gui():
980 def gui():
950 '''Are we running in a GUI?'''
981 '''Are we running in a GUI?'''
951 if sys.platform == 'darwin':
982 if sys.platform == 'darwin':
952 if 'SSH_CONNECTION' in os.environ:
983 if 'SSH_CONNECTION' in os.environ:
953 # handle SSH access to a box where the user is logged in
984 # handle SSH access to a box where the user is logged in
954 return False
985 return False
955 elif getattr(osutil, 'isgui', None):
986 elif getattr(osutil, 'isgui', None):
956 # check if a CoreGraphics session is available
987 # check if a CoreGraphics session is available
957 return osutil.isgui()
988 return osutil.isgui()
958 else:
989 else:
959 # pure build; use a safe default
990 # pure build; use a safe default
960 return True
991 return True
961 else:
992 else:
962 return os.name == "nt" or os.environ.get("DISPLAY")
993 return os.name == "nt" or os.environ.get("DISPLAY")
963
994
964 def mktempcopy(name, emptyok=False, createmode=None):
995 def mktempcopy(name, emptyok=False, createmode=None):
965 """Create a temporary file with the same contents from name
996 """Create a temporary file with the same contents from name
966
997
967 The permission bits are copied from the original file.
998 The permission bits are copied from the original file.
968
999
969 If the temporary file is going to be truncated immediately, you
1000 If the temporary file is going to be truncated immediately, you
970 can use emptyok=True as an optimization.
1001 can use emptyok=True as an optimization.
971
1002
972 Returns the name of the temporary file.
1003 Returns the name of the temporary file.
973 """
1004 """
974 d, fn = os.path.split(name)
1005 d, fn = os.path.split(name)
975 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1006 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
976 os.close(fd)
1007 os.close(fd)
977 # Temporary files are created with mode 0600, which is usually not
1008 # Temporary files are created with mode 0600, which is usually not
978 # what we want. If the original file already exists, just copy
1009 # what we want. If the original file already exists, just copy
979 # its mode. Otherwise, manually obey umask.
1010 # its mode. Otherwise, manually obey umask.
980 copymode(name, temp, createmode)
1011 copymode(name, temp, createmode)
981 if emptyok:
1012 if emptyok:
982 return temp
1013 return temp
983 try:
1014 try:
984 try:
1015 try:
985 ifp = posixfile(name, "rb")
1016 ifp = posixfile(name, "rb")
986 except IOError, inst:
1017 except IOError, inst:
987 if inst.errno == errno.ENOENT:
1018 if inst.errno == errno.ENOENT:
988 return temp
1019 return temp
989 if not getattr(inst, 'filename', None):
1020 if not getattr(inst, 'filename', None):
990 inst.filename = name
1021 inst.filename = name
991 raise
1022 raise
992 ofp = posixfile(temp, "wb")
1023 ofp = posixfile(temp, "wb")
993 for chunk in filechunkiter(ifp):
1024 for chunk in filechunkiter(ifp):
994 ofp.write(chunk)
1025 ofp.write(chunk)
995 ifp.close()
1026 ifp.close()
996 ofp.close()
1027 ofp.close()
997 except: # re-raises
1028 except: # re-raises
998 try: os.unlink(temp)
1029 try: os.unlink(temp)
999 except OSError: pass
1030 except OSError: pass
1000 raise
1031 raise
1001 return temp
1032 return temp
1002
1033
1003 class atomictempfile(object):
1034 class atomictempfile(object):
1004 '''writable file object that atomically updates a file
1035 '''writable file object that atomically updates a file
1005
1036
1006 All writes will go to a temporary copy of the original file. Call
1037 All writes will go to a temporary copy of the original file. Call
1007 close() when you are done writing, and atomictempfile will rename
1038 close() when you are done writing, and atomictempfile will rename
1008 the temporary copy to the original name, making the changes
1039 the temporary copy to the original name, making the changes
1009 visible. If the object is destroyed without being closed, all your
1040 visible. If the object is destroyed without being closed, all your
1010 writes are discarded.
1041 writes are discarded.
1011 '''
1042 '''
1012 def __init__(self, name, mode='w+b', createmode=None):
1043 def __init__(self, name, mode='w+b', createmode=None):
1013 self.__name = name # permanent name
1044 self.__name = name # permanent name
1014 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1045 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1015 createmode=createmode)
1046 createmode=createmode)
1016 self._fp = posixfile(self._tempname, mode)
1047 self._fp = posixfile(self._tempname, mode)
1017
1048
1018 # delegated methods
1049 # delegated methods
1019 self.write = self._fp.write
1050 self.write = self._fp.write
1020 self.seek = self._fp.seek
1051 self.seek = self._fp.seek
1021 self.tell = self._fp.tell
1052 self.tell = self._fp.tell
1022 self.fileno = self._fp.fileno
1053 self.fileno = self._fp.fileno
1023
1054
1024 def close(self):
1055 def close(self):
1025 if not self._fp.closed:
1056 if not self._fp.closed:
1026 self._fp.close()
1057 self._fp.close()
1027 rename(self._tempname, localpath(self.__name))
1058 rename(self._tempname, localpath(self.__name))
1028
1059
1029 def discard(self):
1060 def discard(self):
1030 if not self._fp.closed:
1061 if not self._fp.closed:
1031 try:
1062 try:
1032 os.unlink(self._tempname)
1063 os.unlink(self._tempname)
1033 except OSError:
1064 except OSError:
1034 pass
1065 pass
1035 self._fp.close()
1066 self._fp.close()
1036
1067
1037 def __del__(self):
1068 def __del__(self):
1038 if safehasattr(self, '_fp'): # constructor actually did something
1069 if safehasattr(self, '_fp'): # constructor actually did something
1039 self.discard()
1070 self.discard()
1040
1071
1041 def makedirs(name, mode=None, notindexed=False):
1072 def makedirs(name, mode=None, notindexed=False):
1042 """recursive directory creation with parent mode inheritance"""
1073 """recursive directory creation with parent mode inheritance"""
1043 try:
1074 try:
1044 makedir(name, notindexed)
1075 makedir(name, notindexed)
1045 except OSError, err:
1076 except OSError, err:
1046 if err.errno == errno.EEXIST:
1077 if err.errno == errno.EEXIST:
1047 return
1078 return
1048 if err.errno != errno.ENOENT or not name:
1079 if err.errno != errno.ENOENT or not name:
1049 raise
1080 raise
1050 parent = os.path.dirname(os.path.abspath(name))
1081 parent = os.path.dirname(os.path.abspath(name))
1051 if parent == name:
1082 if parent == name:
1052 raise
1083 raise
1053 makedirs(parent, mode, notindexed)
1084 makedirs(parent, mode, notindexed)
1054 makedir(name, notindexed)
1085 makedir(name, notindexed)
1055 if mode is not None:
1086 if mode is not None:
1056 os.chmod(name, mode)
1087 os.chmod(name, mode)
1057
1088
1058 def ensuredirs(name, mode=None):
1089 def ensuredirs(name, mode=None):
1059 """race-safe recursive directory creation"""
1090 """race-safe recursive directory creation"""
1060 if os.path.isdir(name):
1091 if os.path.isdir(name):
1061 return
1092 return
1062 parent = os.path.dirname(os.path.abspath(name))
1093 parent = os.path.dirname(os.path.abspath(name))
1063 if parent != name:
1094 if parent != name:
1064 ensuredirs(parent, mode)
1095 ensuredirs(parent, mode)
1065 try:
1096 try:
1066 os.mkdir(name)
1097 os.mkdir(name)
1067 except OSError, err:
1098 except OSError, err:
1068 if err.errno == errno.EEXIST and os.path.isdir(name):
1099 if err.errno == errno.EEXIST and os.path.isdir(name):
1069 # someone else seems to have won a directory creation race
1100 # someone else seems to have won a directory creation race
1070 return
1101 return
1071 raise
1102 raise
1072 if mode is not None:
1103 if mode is not None:
1073 os.chmod(name, mode)
1104 os.chmod(name, mode)
1074
1105
1075 def readfile(path):
1106 def readfile(path):
1076 fp = open(path, 'rb')
1107 fp = open(path, 'rb')
1077 try:
1108 try:
1078 return fp.read()
1109 return fp.read()
1079 finally:
1110 finally:
1080 fp.close()
1111 fp.close()
1081
1112
1082 def writefile(path, text):
1113 def writefile(path, text):
1083 fp = open(path, 'wb')
1114 fp = open(path, 'wb')
1084 try:
1115 try:
1085 fp.write(text)
1116 fp.write(text)
1086 finally:
1117 finally:
1087 fp.close()
1118 fp.close()
1088
1119
1089 def appendfile(path, text):
1120 def appendfile(path, text):
1090 fp = open(path, 'ab')
1121 fp = open(path, 'ab')
1091 try:
1122 try:
1092 fp.write(text)
1123 fp.write(text)
1093 finally:
1124 finally:
1094 fp.close()
1125 fp.close()
1095
1126
1096 class chunkbuffer(object):
1127 class chunkbuffer(object):
1097 """Allow arbitrary sized chunks of data to be efficiently read from an
1128 """Allow arbitrary sized chunks of data to be efficiently read from an
1098 iterator over chunks of arbitrary size."""
1129 iterator over chunks of arbitrary size."""
1099
1130
1100 def __init__(self, in_iter):
1131 def __init__(self, in_iter):
1101 """in_iter is the iterator that's iterating over the input chunks.
1132 """in_iter is the iterator that's iterating over the input chunks.
1102 targetsize is how big a buffer to try to maintain."""
1133 targetsize is how big a buffer to try to maintain."""
1103 def splitbig(chunks):
1134 def splitbig(chunks):
1104 for chunk in chunks:
1135 for chunk in chunks:
1105 if len(chunk) > 2**20:
1136 if len(chunk) > 2**20:
1106 pos = 0
1137 pos = 0
1107 while pos < len(chunk):
1138 while pos < len(chunk):
1108 end = pos + 2 ** 18
1139 end = pos + 2 ** 18
1109 yield chunk[pos:end]
1140 yield chunk[pos:end]
1110 pos = end
1141 pos = end
1111 else:
1142 else:
1112 yield chunk
1143 yield chunk
1113 self.iter = splitbig(in_iter)
1144 self.iter = splitbig(in_iter)
1114 self._queue = deque()
1145 self._queue = deque()
1115
1146
1116 def read(self, l=None):
1147 def read(self, l=None):
1117 """Read L bytes of data from the iterator of chunks of data.
1148 """Read L bytes of data from the iterator of chunks of data.
1118 Returns less than L bytes if the iterator runs dry.
1149 Returns less than L bytes if the iterator runs dry.
1119
1150
1120 If size parameter is ommited, read everything"""
1151 If size parameter is ommited, read everything"""
1121 left = l
1152 left = l
1122 buf = []
1153 buf = []
1123 queue = self._queue
1154 queue = self._queue
1124 while left is None or left > 0:
1155 while left is None or left > 0:
1125 # refill the queue
1156 # refill the queue
1126 if not queue:
1157 if not queue:
1127 target = 2**18
1158 target = 2**18
1128 for chunk in self.iter:
1159 for chunk in self.iter:
1129 queue.append(chunk)
1160 queue.append(chunk)
1130 target -= len(chunk)
1161 target -= len(chunk)
1131 if target <= 0:
1162 if target <= 0:
1132 break
1163 break
1133 if not queue:
1164 if not queue:
1134 break
1165 break
1135
1166
1136 chunk = queue.popleft()
1167 chunk = queue.popleft()
1137 if left is not None:
1168 if left is not None:
1138 left -= len(chunk)
1169 left -= len(chunk)
1139 if left is not None and left < 0:
1170 if left is not None and left < 0:
1140 queue.appendleft(chunk[left:])
1171 queue.appendleft(chunk[left:])
1141 buf.append(chunk[:left])
1172 buf.append(chunk[:left])
1142 else:
1173 else:
1143 buf.append(chunk)
1174 buf.append(chunk)
1144
1175
1145 return ''.join(buf)
1176 return ''.join(buf)
1146
1177
1147 def filechunkiter(f, size=65536, limit=None):
1178 def filechunkiter(f, size=65536, limit=None):
1148 """Create a generator that produces the data in the file size
1179 """Create a generator that produces the data in the file size
1149 (default 65536) bytes at a time, up to optional limit (default is
1180 (default 65536) bytes at a time, up to optional limit (default is
1150 to read all data). Chunks may be less than size bytes if the
1181 to read all data). Chunks may be less than size bytes if the
1151 chunk is the last chunk in the file, or the file is a socket or
1182 chunk is the last chunk in the file, or the file is a socket or
1152 some other type of file that sometimes reads less data than is
1183 some other type of file that sometimes reads less data than is
1153 requested."""
1184 requested."""
1154 assert size >= 0
1185 assert size >= 0
1155 assert limit is None or limit >= 0
1186 assert limit is None or limit >= 0
1156 while True:
1187 while True:
1157 if limit is None:
1188 if limit is None:
1158 nbytes = size
1189 nbytes = size
1159 else:
1190 else:
1160 nbytes = min(limit, size)
1191 nbytes = min(limit, size)
1161 s = nbytes and f.read(nbytes)
1192 s = nbytes and f.read(nbytes)
1162 if not s:
1193 if not s:
1163 break
1194 break
1164 if limit:
1195 if limit:
1165 limit -= len(s)
1196 limit -= len(s)
1166 yield s
1197 yield s
1167
1198
1168 def makedate(timestamp=None):
1199 def makedate(timestamp=None):
1169 '''Return a unix timestamp (or the current time) as a (unixtime,
1200 '''Return a unix timestamp (or the current time) as a (unixtime,
1170 offset) tuple based off the local timezone.'''
1201 offset) tuple based off the local timezone.'''
1171 if timestamp is None:
1202 if timestamp is None:
1172 timestamp = time.time()
1203 timestamp = time.time()
1173 if timestamp < 0:
1204 if timestamp < 0:
1174 hint = _("check your clock")
1205 hint = _("check your clock")
1175 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1206 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1176 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1207 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1177 datetime.datetime.fromtimestamp(timestamp))
1208 datetime.datetime.fromtimestamp(timestamp))
1178 tz = delta.days * 86400 + delta.seconds
1209 tz = delta.days * 86400 + delta.seconds
1179 return timestamp, tz
1210 return timestamp, tz
1180
1211
1181 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1212 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1182 """represent a (unixtime, offset) tuple as a localized time.
1213 """represent a (unixtime, offset) tuple as a localized time.
1183 unixtime is seconds since the epoch, and offset is the time zone's
1214 unixtime is seconds since the epoch, and offset is the time zone's
1184 number of seconds away from UTC. if timezone is false, do not
1215 number of seconds away from UTC. if timezone is false, do not
1185 append time zone to string."""
1216 append time zone to string."""
1186 t, tz = date or makedate()
1217 t, tz = date or makedate()
1187 if t < 0:
1218 if t < 0:
1188 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1219 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1189 tz = 0
1220 tz = 0
1190 if "%1" in format or "%2" in format or "%z" in format:
1221 if "%1" in format or "%2" in format or "%z" in format:
1191 sign = (tz > 0) and "-" or "+"
1222 sign = (tz > 0) and "-" or "+"
1192 minutes = abs(tz) // 60
1223 minutes = abs(tz) // 60
1193 format = format.replace("%z", "%1%2")
1224 format = format.replace("%z", "%1%2")
1194 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1225 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1195 format = format.replace("%2", "%02d" % (minutes % 60))
1226 format = format.replace("%2", "%02d" % (minutes % 60))
1196 try:
1227 try:
1197 t = time.gmtime(float(t) - tz)
1228 t = time.gmtime(float(t) - tz)
1198 except ValueError:
1229 except ValueError:
1199 # time was out of range
1230 # time was out of range
1200 t = time.gmtime(sys.maxint)
1231 t = time.gmtime(sys.maxint)
1201 s = time.strftime(format, t)
1232 s = time.strftime(format, t)
1202 return s
1233 return s
1203
1234
1204 def shortdate(date=None):
1235 def shortdate(date=None):
1205 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1236 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1206 return datestr(date, format='%Y-%m-%d')
1237 return datestr(date, format='%Y-%m-%d')
1207
1238
1208 def strdate(string, format, defaults=[]):
1239 def strdate(string, format, defaults=[]):
1209 """parse a localized time string and return a (unixtime, offset) tuple.
1240 """parse a localized time string and return a (unixtime, offset) tuple.
1210 if the string cannot be parsed, ValueError is raised."""
1241 if the string cannot be parsed, ValueError is raised."""
1211 def timezone(string):
1242 def timezone(string):
1212 tz = string.split()[-1]
1243 tz = string.split()[-1]
1213 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1244 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1214 sign = (tz[0] == "+") and 1 or -1
1245 sign = (tz[0] == "+") and 1 or -1
1215 hours = int(tz[1:3])
1246 hours = int(tz[1:3])
1216 minutes = int(tz[3:5])
1247 minutes = int(tz[3:5])
1217 return -sign * (hours * 60 + minutes) * 60
1248 return -sign * (hours * 60 + minutes) * 60
1218 if tz == "GMT" or tz == "UTC":
1249 if tz == "GMT" or tz == "UTC":
1219 return 0
1250 return 0
1220 return None
1251 return None
1221
1252
1222 # NOTE: unixtime = localunixtime + offset
1253 # NOTE: unixtime = localunixtime + offset
1223 offset, date = timezone(string), string
1254 offset, date = timezone(string), string
1224 if offset is not None:
1255 if offset is not None:
1225 date = " ".join(string.split()[:-1])
1256 date = " ".join(string.split()[:-1])
1226
1257
1227 # add missing elements from defaults
1258 # add missing elements from defaults
1228 usenow = False # default to using biased defaults
1259 usenow = False # default to using biased defaults
1229 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1260 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1230 found = [True for p in part if ("%"+p) in format]
1261 found = [True for p in part if ("%"+p) in format]
1231 if not found:
1262 if not found:
1232 date += "@" + defaults[part][usenow]
1263 date += "@" + defaults[part][usenow]
1233 format += "@%" + part[0]
1264 format += "@%" + part[0]
1234 else:
1265 else:
1235 # We've found a specific time element, less specific time
1266 # We've found a specific time element, less specific time
1236 # elements are relative to today
1267 # elements are relative to today
1237 usenow = True
1268 usenow = True
1238
1269
1239 timetuple = time.strptime(date, format)
1270 timetuple = time.strptime(date, format)
1240 localunixtime = int(calendar.timegm(timetuple))
1271 localunixtime = int(calendar.timegm(timetuple))
1241 if offset is None:
1272 if offset is None:
1242 # local timezone
1273 # local timezone
1243 unixtime = int(time.mktime(timetuple))
1274 unixtime = int(time.mktime(timetuple))
1244 offset = unixtime - localunixtime
1275 offset = unixtime - localunixtime
1245 else:
1276 else:
1246 unixtime = localunixtime + offset
1277 unixtime = localunixtime + offset
1247 return unixtime, offset
1278 return unixtime, offset
1248
1279
1249 def parsedate(date, formats=None, bias={}):
1280 def parsedate(date, formats=None, bias={}):
1250 """parse a localized date/time and return a (unixtime, offset) tuple.
1281 """parse a localized date/time and return a (unixtime, offset) tuple.
1251
1282
1252 The date may be a "unixtime offset" string or in one of the specified
1283 The date may be a "unixtime offset" string or in one of the specified
1253 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1284 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1254
1285
1255 >>> parsedate(' today ') == parsedate(\
1286 >>> parsedate(' today ') == parsedate(\
1256 datetime.date.today().strftime('%b %d'))
1287 datetime.date.today().strftime('%b %d'))
1257 True
1288 True
1258 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1289 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1259 datetime.timedelta(days=1)\
1290 datetime.timedelta(days=1)\
1260 ).strftime('%b %d'))
1291 ).strftime('%b %d'))
1261 True
1292 True
1262 >>> now, tz = makedate()
1293 >>> now, tz = makedate()
1263 >>> strnow, strtz = parsedate('now')
1294 >>> strnow, strtz = parsedate('now')
1264 >>> (strnow - now) < 1
1295 >>> (strnow - now) < 1
1265 True
1296 True
1266 >>> tz == strtz
1297 >>> tz == strtz
1267 True
1298 True
1268 """
1299 """
1269 if not date:
1300 if not date:
1270 return 0, 0
1301 return 0, 0
1271 if isinstance(date, tuple) and len(date) == 2:
1302 if isinstance(date, tuple) and len(date) == 2:
1272 return date
1303 return date
1273 if not formats:
1304 if not formats:
1274 formats = defaultdateformats
1305 formats = defaultdateformats
1275 date = date.strip()
1306 date = date.strip()
1276
1307
1277 if date == _('now'):
1308 if date == _('now'):
1278 return makedate()
1309 return makedate()
1279 if date == _('today'):
1310 if date == _('today'):
1280 date = datetime.date.today().strftime('%b %d')
1311 date = datetime.date.today().strftime('%b %d')
1281 elif date == _('yesterday'):
1312 elif date == _('yesterday'):
1282 date = (datetime.date.today() -
1313 date = (datetime.date.today() -
1283 datetime.timedelta(days=1)).strftime('%b %d')
1314 datetime.timedelta(days=1)).strftime('%b %d')
1284
1315
1285 try:
1316 try:
1286 when, offset = map(int, date.split(' '))
1317 when, offset = map(int, date.split(' '))
1287 except ValueError:
1318 except ValueError:
1288 # fill out defaults
1319 # fill out defaults
1289 now = makedate()
1320 now = makedate()
1290 defaults = {}
1321 defaults = {}
1291 for part in ("d", "mb", "yY", "HI", "M", "S"):
1322 for part in ("d", "mb", "yY", "HI", "M", "S"):
1292 # this piece is for rounding the specific end of unknowns
1323 # this piece is for rounding the specific end of unknowns
1293 b = bias.get(part)
1324 b = bias.get(part)
1294 if b is None:
1325 if b is None:
1295 if part[0] in "HMS":
1326 if part[0] in "HMS":
1296 b = "00"
1327 b = "00"
1297 else:
1328 else:
1298 b = "0"
1329 b = "0"
1299
1330
1300 # this piece is for matching the generic end to today's date
1331 # this piece is for matching the generic end to today's date
1301 n = datestr(now, "%" + part[0])
1332 n = datestr(now, "%" + part[0])
1302
1333
1303 defaults[part] = (b, n)
1334 defaults[part] = (b, n)
1304
1335
1305 for format in formats:
1336 for format in formats:
1306 try:
1337 try:
1307 when, offset = strdate(date, format, defaults)
1338 when, offset = strdate(date, format, defaults)
1308 except (ValueError, OverflowError):
1339 except (ValueError, OverflowError):
1309 pass
1340 pass
1310 else:
1341 else:
1311 break
1342 break
1312 else:
1343 else:
1313 raise Abort(_('invalid date: %r') % date)
1344 raise Abort(_('invalid date: %r') % date)
1314 # validate explicit (probably user-specified) date and
1345 # validate explicit (probably user-specified) date and
1315 # time zone offset. values must fit in signed 32 bits for
1346 # time zone offset. values must fit in signed 32 bits for
1316 # current 32-bit linux runtimes. timezones go from UTC-12
1347 # current 32-bit linux runtimes. timezones go from UTC-12
1317 # to UTC+14
1348 # to UTC+14
1318 if abs(when) > 0x7fffffff:
1349 if abs(when) > 0x7fffffff:
1319 raise Abort(_('date exceeds 32 bits: %d') % when)
1350 raise Abort(_('date exceeds 32 bits: %d') % when)
1320 if when < 0:
1351 if when < 0:
1321 raise Abort(_('negative date value: %d') % when)
1352 raise Abort(_('negative date value: %d') % when)
1322 if offset < -50400 or offset > 43200:
1353 if offset < -50400 or offset > 43200:
1323 raise Abort(_('impossible time zone offset: %d') % offset)
1354 raise Abort(_('impossible time zone offset: %d') % offset)
1324 return when, offset
1355 return when, offset
1325
1356
1326 def matchdate(date):
1357 def matchdate(date):
1327 """Return a function that matches a given date match specifier
1358 """Return a function that matches a given date match specifier
1328
1359
1329 Formats include:
1360 Formats include:
1330
1361
1331 '{date}' match a given date to the accuracy provided
1362 '{date}' match a given date to the accuracy provided
1332
1363
1333 '<{date}' on or before a given date
1364 '<{date}' on or before a given date
1334
1365
1335 '>{date}' on or after a given date
1366 '>{date}' on or after a given date
1336
1367
1337 >>> p1 = parsedate("10:29:59")
1368 >>> p1 = parsedate("10:29:59")
1338 >>> p2 = parsedate("10:30:00")
1369 >>> p2 = parsedate("10:30:00")
1339 >>> p3 = parsedate("10:30:59")
1370 >>> p3 = parsedate("10:30:59")
1340 >>> p4 = parsedate("10:31:00")
1371 >>> p4 = parsedate("10:31:00")
1341 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1372 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1342 >>> f = matchdate("10:30")
1373 >>> f = matchdate("10:30")
1343 >>> f(p1[0])
1374 >>> f(p1[0])
1344 False
1375 False
1345 >>> f(p2[0])
1376 >>> f(p2[0])
1346 True
1377 True
1347 >>> f(p3[0])
1378 >>> f(p3[0])
1348 True
1379 True
1349 >>> f(p4[0])
1380 >>> f(p4[0])
1350 False
1381 False
1351 >>> f(p5[0])
1382 >>> f(p5[0])
1352 False
1383 False
1353 """
1384 """
1354
1385
1355 def lower(date):
1386 def lower(date):
1356 d = {'mb': "1", 'd': "1"}
1387 d = {'mb': "1", 'd': "1"}
1357 return parsedate(date, extendeddateformats, d)[0]
1388 return parsedate(date, extendeddateformats, d)[0]
1358
1389
1359 def upper(date):
1390 def upper(date):
1360 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1391 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1361 for days in ("31", "30", "29"):
1392 for days in ("31", "30", "29"):
1362 try:
1393 try:
1363 d["d"] = days
1394 d["d"] = days
1364 return parsedate(date, extendeddateformats, d)[0]
1395 return parsedate(date, extendeddateformats, d)[0]
1365 except Abort:
1396 except Abort:
1366 pass
1397 pass
1367 d["d"] = "28"
1398 d["d"] = "28"
1368 return parsedate(date, extendeddateformats, d)[0]
1399 return parsedate(date, extendeddateformats, d)[0]
1369
1400
1370 date = date.strip()
1401 date = date.strip()
1371
1402
1372 if not date:
1403 if not date:
1373 raise Abort(_("dates cannot consist entirely of whitespace"))
1404 raise Abort(_("dates cannot consist entirely of whitespace"))
1374 elif date[0] == "<":
1405 elif date[0] == "<":
1375 if not date[1:]:
1406 if not date[1:]:
1376 raise Abort(_("invalid day spec, use '<DATE'"))
1407 raise Abort(_("invalid day spec, use '<DATE'"))
1377 when = upper(date[1:])
1408 when = upper(date[1:])
1378 return lambda x: x <= when
1409 return lambda x: x <= when
1379 elif date[0] == ">":
1410 elif date[0] == ">":
1380 if not date[1:]:
1411 if not date[1:]:
1381 raise Abort(_("invalid day spec, use '>DATE'"))
1412 raise Abort(_("invalid day spec, use '>DATE'"))
1382 when = lower(date[1:])
1413 when = lower(date[1:])
1383 return lambda x: x >= when
1414 return lambda x: x >= when
1384 elif date[0] == "-":
1415 elif date[0] == "-":
1385 try:
1416 try:
1386 days = int(date[1:])
1417 days = int(date[1:])
1387 except ValueError:
1418 except ValueError:
1388 raise Abort(_("invalid day spec: %s") % date[1:])
1419 raise Abort(_("invalid day spec: %s") % date[1:])
1389 if days < 0:
1420 if days < 0:
1390 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1421 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1391 % date[1:])
1422 % date[1:])
1392 when = makedate()[0] - days * 3600 * 24
1423 when = makedate()[0] - days * 3600 * 24
1393 return lambda x: x >= when
1424 return lambda x: x >= when
1394 elif " to " in date:
1425 elif " to " in date:
1395 a, b = date.split(" to ")
1426 a, b = date.split(" to ")
1396 start, stop = lower(a), upper(b)
1427 start, stop = lower(a), upper(b)
1397 return lambda x: x >= start and x <= stop
1428 return lambda x: x >= start and x <= stop
1398 else:
1429 else:
1399 start, stop = lower(date), upper(date)
1430 start, stop = lower(date), upper(date)
1400 return lambda x: x >= start and x <= stop
1431 return lambda x: x >= start and x <= stop
1401
1432
1402 def shortuser(user):
1433 def shortuser(user):
1403 """Return a short representation of a user name or email address."""
1434 """Return a short representation of a user name or email address."""
1404 f = user.find('@')
1435 f = user.find('@')
1405 if f >= 0:
1436 if f >= 0:
1406 user = user[:f]
1437 user = user[:f]
1407 f = user.find('<')
1438 f = user.find('<')
1408 if f >= 0:
1439 if f >= 0:
1409 user = user[f + 1:]
1440 user = user[f + 1:]
1410 f = user.find(' ')
1441 f = user.find(' ')
1411 if f >= 0:
1442 if f >= 0:
1412 user = user[:f]
1443 user = user[:f]
1413 f = user.find('.')
1444 f = user.find('.')
1414 if f >= 0:
1445 if f >= 0:
1415 user = user[:f]
1446 user = user[:f]
1416 return user
1447 return user
1417
1448
1418 def emailuser(user):
1449 def emailuser(user):
1419 """Return the user portion of an email address."""
1450 """Return the user portion of an email address."""
1420 f = user.find('@')
1451 f = user.find('@')
1421 if f >= 0:
1452 if f >= 0:
1422 user = user[:f]
1453 user = user[:f]
1423 f = user.find('<')
1454 f = user.find('<')
1424 if f >= 0:
1455 if f >= 0:
1425 user = user[f + 1:]
1456 user = user[f + 1:]
1426 return user
1457 return user
1427
1458
1428 def email(author):
1459 def email(author):
1429 '''get email of author.'''
1460 '''get email of author.'''
1430 r = author.find('>')
1461 r = author.find('>')
1431 if r == -1:
1462 if r == -1:
1432 r = None
1463 r = None
1433 return author[author.find('<') + 1:r]
1464 return author[author.find('<') + 1:r]
1434
1465
1435 def ellipsis(text, maxlength=400):
1466 def ellipsis(text, maxlength=400):
1436 """Trim string to at most maxlength (default: 400) columns in display."""
1467 """Trim string to at most maxlength (default: 400) columns in display."""
1437 return encoding.trim(text, maxlength, ellipsis='...')
1468 return encoding.trim(text, maxlength, ellipsis='...')
1438
1469
1439 def unitcountfn(*unittable):
1470 def unitcountfn(*unittable):
1440 '''return a function that renders a readable count of some quantity'''
1471 '''return a function that renders a readable count of some quantity'''
1441
1472
1442 def go(count):
1473 def go(count):
1443 for multiplier, divisor, format in unittable:
1474 for multiplier, divisor, format in unittable:
1444 if count >= divisor * multiplier:
1475 if count >= divisor * multiplier:
1445 return format % (count / float(divisor))
1476 return format % (count / float(divisor))
1446 return unittable[-1][2] % count
1477 return unittable[-1][2] % count
1447
1478
1448 return go
1479 return go
1449
1480
1450 bytecount = unitcountfn(
1481 bytecount = unitcountfn(
1451 (100, 1 << 30, _('%.0f GB')),
1482 (100, 1 << 30, _('%.0f GB')),
1452 (10, 1 << 30, _('%.1f GB')),
1483 (10, 1 << 30, _('%.1f GB')),
1453 (1, 1 << 30, _('%.2f GB')),
1484 (1, 1 << 30, _('%.2f GB')),
1454 (100, 1 << 20, _('%.0f MB')),
1485 (100, 1 << 20, _('%.0f MB')),
1455 (10, 1 << 20, _('%.1f MB')),
1486 (10, 1 << 20, _('%.1f MB')),
1456 (1, 1 << 20, _('%.2f MB')),
1487 (1, 1 << 20, _('%.2f MB')),
1457 (100, 1 << 10, _('%.0f KB')),
1488 (100, 1 << 10, _('%.0f KB')),
1458 (10, 1 << 10, _('%.1f KB')),
1489 (10, 1 << 10, _('%.1f KB')),
1459 (1, 1 << 10, _('%.2f KB')),
1490 (1, 1 << 10, _('%.2f KB')),
1460 (1, 1, _('%.0f bytes')),
1491 (1, 1, _('%.0f bytes')),
1461 )
1492 )
1462
1493
1463 def uirepr(s):
1494 def uirepr(s):
1464 # Avoid double backslash in Windows path repr()
1495 # Avoid double backslash in Windows path repr()
1465 return repr(s).replace('\\\\', '\\')
1496 return repr(s).replace('\\\\', '\\')
1466
1497
1467 # delay import of textwrap
1498 # delay import of textwrap
1468 def MBTextWrapper(**kwargs):
1499 def MBTextWrapper(**kwargs):
1469 class tw(textwrap.TextWrapper):
1500 class tw(textwrap.TextWrapper):
1470 """
1501 """
1471 Extend TextWrapper for width-awareness.
1502 Extend TextWrapper for width-awareness.
1472
1503
1473 Neither number of 'bytes' in any encoding nor 'characters' is
1504 Neither number of 'bytes' in any encoding nor 'characters' is
1474 appropriate to calculate terminal columns for specified string.
1505 appropriate to calculate terminal columns for specified string.
1475
1506
1476 Original TextWrapper implementation uses built-in 'len()' directly,
1507 Original TextWrapper implementation uses built-in 'len()' directly,
1477 so overriding is needed to use width information of each characters.
1508 so overriding is needed to use width information of each characters.
1478
1509
1479 In addition, characters classified into 'ambiguous' width are
1510 In addition, characters classified into 'ambiguous' width are
1480 treated as wide in East Asian area, but as narrow in other.
1511 treated as wide in East Asian area, but as narrow in other.
1481
1512
1482 This requires use decision to determine width of such characters.
1513 This requires use decision to determine width of such characters.
1483 """
1514 """
1484 def __init__(self, **kwargs):
1515 def __init__(self, **kwargs):
1485 textwrap.TextWrapper.__init__(self, **kwargs)
1516 textwrap.TextWrapper.__init__(self, **kwargs)
1486
1517
1487 # for compatibility between 2.4 and 2.6
1518 # for compatibility between 2.4 and 2.6
1488 if getattr(self, 'drop_whitespace', None) is None:
1519 if getattr(self, 'drop_whitespace', None) is None:
1489 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1520 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1490
1521
1491 def _cutdown(self, ucstr, space_left):
1522 def _cutdown(self, ucstr, space_left):
1492 l = 0
1523 l = 0
1493 colwidth = encoding.ucolwidth
1524 colwidth = encoding.ucolwidth
1494 for i in xrange(len(ucstr)):
1525 for i in xrange(len(ucstr)):
1495 l += colwidth(ucstr[i])
1526 l += colwidth(ucstr[i])
1496 if space_left < l:
1527 if space_left < l:
1497 return (ucstr[:i], ucstr[i:])
1528 return (ucstr[:i], ucstr[i:])
1498 return ucstr, ''
1529 return ucstr, ''
1499
1530
1500 # overriding of base class
1531 # overriding of base class
1501 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1532 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1502 space_left = max(width - cur_len, 1)
1533 space_left = max(width - cur_len, 1)
1503
1534
1504 if self.break_long_words:
1535 if self.break_long_words:
1505 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1536 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1506 cur_line.append(cut)
1537 cur_line.append(cut)
1507 reversed_chunks[-1] = res
1538 reversed_chunks[-1] = res
1508 elif not cur_line:
1539 elif not cur_line:
1509 cur_line.append(reversed_chunks.pop())
1540 cur_line.append(reversed_chunks.pop())
1510
1541
1511 # this overriding code is imported from TextWrapper of python 2.6
1542 # this overriding code is imported from TextWrapper of python 2.6
1512 # to calculate columns of string by 'encoding.ucolwidth()'
1543 # to calculate columns of string by 'encoding.ucolwidth()'
1513 def _wrap_chunks(self, chunks):
1544 def _wrap_chunks(self, chunks):
1514 colwidth = encoding.ucolwidth
1545 colwidth = encoding.ucolwidth
1515
1546
1516 lines = []
1547 lines = []
1517 if self.width <= 0:
1548 if self.width <= 0:
1518 raise ValueError("invalid width %r (must be > 0)" % self.width)
1549 raise ValueError("invalid width %r (must be > 0)" % self.width)
1519
1550
1520 # Arrange in reverse order so items can be efficiently popped
1551 # Arrange in reverse order so items can be efficiently popped
1521 # from a stack of chucks.
1552 # from a stack of chucks.
1522 chunks.reverse()
1553 chunks.reverse()
1523
1554
1524 while chunks:
1555 while chunks:
1525
1556
1526 # Start the list of chunks that will make up the current line.
1557 # Start the list of chunks that will make up the current line.
1527 # cur_len is just the length of all the chunks in cur_line.
1558 # cur_len is just the length of all the chunks in cur_line.
1528 cur_line = []
1559 cur_line = []
1529 cur_len = 0
1560 cur_len = 0
1530
1561
1531 # Figure out which static string will prefix this line.
1562 # Figure out which static string will prefix this line.
1532 if lines:
1563 if lines:
1533 indent = self.subsequent_indent
1564 indent = self.subsequent_indent
1534 else:
1565 else:
1535 indent = self.initial_indent
1566 indent = self.initial_indent
1536
1567
1537 # Maximum width for this line.
1568 # Maximum width for this line.
1538 width = self.width - len(indent)
1569 width = self.width - len(indent)
1539
1570
1540 # First chunk on line is whitespace -- drop it, unless this
1571 # First chunk on line is whitespace -- drop it, unless this
1541 # is the very beginning of the text (i.e. no lines started yet).
1572 # is the very beginning of the text (i.e. no lines started yet).
1542 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1573 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1543 del chunks[-1]
1574 del chunks[-1]
1544
1575
1545 while chunks:
1576 while chunks:
1546 l = colwidth(chunks[-1])
1577 l = colwidth(chunks[-1])
1547
1578
1548 # Can at least squeeze this chunk onto the current line.
1579 # Can at least squeeze this chunk onto the current line.
1549 if cur_len + l <= width:
1580 if cur_len + l <= width:
1550 cur_line.append(chunks.pop())
1581 cur_line.append(chunks.pop())
1551 cur_len += l
1582 cur_len += l
1552
1583
1553 # Nope, this line is full.
1584 # Nope, this line is full.
1554 else:
1585 else:
1555 break
1586 break
1556
1587
1557 # The current line is full, and the next chunk is too big to
1588 # The current line is full, and the next chunk is too big to
1558 # fit on *any* line (not just this one).
1589 # fit on *any* line (not just this one).
1559 if chunks and colwidth(chunks[-1]) > width:
1590 if chunks and colwidth(chunks[-1]) > width:
1560 self._handle_long_word(chunks, cur_line, cur_len, width)
1591 self._handle_long_word(chunks, cur_line, cur_len, width)
1561
1592
1562 # If the last chunk on this line is all whitespace, drop it.
1593 # If the last chunk on this line is all whitespace, drop it.
1563 if (self.drop_whitespace and
1594 if (self.drop_whitespace and
1564 cur_line and cur_line[-1].strip() == ''):
1595 cur_line and cur_line[-1].strip() == ''):
1565 del cur_line[-1]
1596 del cur_line[-1]
1566
1597
1567 # Convert current line back to a string and store it in list
1598 # Convert current line back to a string and store it in list
1568 # of all lines (return value).
1599 # of all lines (return value).
1569 if cur_line:
1600 if cur_line:
1570 lines.append(indent + ''.join(cur_line))
1601 lines.append(indent + ''.join(cur_line))
1571
1602
1572 return lines
1603 return lines
1573
1604
1574 global MBTextWrapper
1605 global MBTextWrapper
1575 MBTextWrapper = tw
1606 MBTextWrapper = tw
1576 return tw(**kwargs)
1607 return tw(**kwargs)
1577
1608
1578 def wrap(line, width, initindent='', hangindent=''):
1609 def wrap(line, width, initindent='', hangindent=''):
1579 maxindent = max(len(hangindent), len(initindent))
1610 maxindent = max(len(hangindent), len(initindent))
1580 if width <= maxindent:
1611 if width <= maxindent:
1581 # adjust for weird terminal size
1612 # adjust for weird terminal size
1582 width = max(78, maxindent + 1)
1613 width = max(78, maxindent + 1)
1583 line = line.decode(encoding.encoding, encoding.encodingmode)
1614 line = line.decode(encoding.encoding, encoding.encodingmode)
1584 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1615 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1585 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1616 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1586 wrapper = MBTextWrapper(width=width,
1617 wrapper = MBTextWrapper(width=width,
1587 initial_indent=initindent,
1618 initial_indent=initindent,
1588 subsequent_indent=hangindent)
1619 subsequent_indent=hangindent)
1589 return wrapper.fill(line).encode(encoding.encoding)
1620 return wrapper.fill(line).encode(encoding.encoding)
1590
1621
1591 def iterlines(iterator):
1622 def iterlines(iterator):
1592 for chunk in iterator:
1623 for chunk in iterator:
1593 for line in chunk.splitlines():
1624 for line in chunk.splitlines():
1594 yield line
1625 yield line
1595
1626
1596 def expandpath(path):
1627 def expandpath(path):
1597 return os.path.expanduser(os.path.expandvars(path))
1628 return os.path.expanduser(os.path.expandvars(path))
1598
1629
1599 def hgcmd():
1630 def hgcmd():
1600 """Return the command used to execute current hg
1631 """Return the command used to execute current hg
1601
1632
1602 This is different from hgexecutable() because on Windows we want
1633 This is different from hgexecutable() because on Windows we want
1603 to avoid things opening new shell windows like batch files, so we
1634 to avoid things opening new shell windows like batch files, so we
1604 get either the python call or current executable.
1635 get either the python call or current executable.
1605 """
1636 """
1606 if mainfrozen():
1637 if mainfrozen():
1607 return [sys.executable]
1638 return [sys.executable]
1608 return gethgcmd()
1639 return gethgcmd()
1609
1640
1610 def rundetached(args, condfn):
1641 def rundetached(args, condfn):
1611 """Execute the argument list in a detached process.
1642 """Execute the argument list in a detached process.
1612
1643
1613 condfn is a callable which is called repeatedly and should return
1644 condfn is a callable which is called repeatedly and should return
1614 True once the child process is known to have started successfully.
1645 True once the child process is known to have started successfully.
1615 At this point, the child process PID is returned. If the child
1646 At this point, the child process PID is returned. If the child
1616 process fails to start or finishes before condfn() evaluates to
1647 process fails to start or finishes before condfn() evaluates to
1617 True, return -1.
1648 True, return -1.
1618 """
1649 """
1619 # Windows case is easier because the child process is either
1650 # Windows case is easier because the child process is either
1620 # successfully starting and validating the condition or exiting
1651 # successfully starting and validating the condition or exiting
1621 # on failure. We just poll on its PID. On Unix, if the child
1652 # on failure. We just poll on its PID. On Unix, if the child
1622 # process fails to start, it will be left in a zombie state until
1653 # process fails to start, it will be left in a zombie state until
1623 # the parent wait on it, which we cannot do since we expect a long
1654 # the parent wait on it, which we cannot do since we expect a long
1624 # running process on success. Instead we listen for SIGCHLD telling
1655 # running process on success. Instead we listen for SIGCHLD telling
1625 # us our child process terminated.
1656 # us our child process terminated.
1626 terminated = set()
1657 terminated = set()
1627 def handler(signum, frame):
1658 def handler(signum, frame):
1628 terminated.add(os.wait())
1659 terminated.add(os.wait())
1629 prevhandler = None
1660 prevhandler = None
1630 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1661 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1631 if SIGCHLD is not None:
1662 if SIGCHLD is not None:
1632 prevhandler = signal.signal(SIGCHLD, handler)
1663 prevhandler = signal.signal(SIGCHLD, handler)
1633 try:
1664 try:
1634 pid = spawndetached(args)
1665 pid = spawndetached(args)
1635 while not condfn():
1666 while not condfn():
1636 if ((pid in terminated or not testpid(pid))
1667 if ((pid in terminated or not testpid(pid))
1637 and not condfn()):
1668 and not condfn()):
1638 return -1
1669 return -1
1639 time.sleep(0.1)
1670 time.sleep(0.1)
1640 return pid
1671 return pid
1641 finally:
1672 finally:
1642 if prevhandler is not None:
1673 if prevhandler is not None:
1643 signal.signal(signal.SIGCHLD, prevhandler)
1674 signal.signal(signal.SIGCHLD, prevhandler)
1644
1675
1645 try:
1676 try:
1646 any, all = any, all
1677 any, all = any, all
1647 except NameError:
1678 except NameError:
1648 def any(iterable):
1679 def any(iterable):
1649 for i in iterable:
1680 for i in iterable:
1650 if i:
1681 if i:
1651 return True
1682 return True
1652 return False
1683 return False
1653
1684
1654 def all(iterable):
1685 def all(iterable):
1655 for i in iterable:
1686 for i in iterable:
1656 if not i:
1687 if not i:
1657 return False
1688 return False
1658 return True
1689 return True
1659
1690
1660 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1691 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1661 """Return the result of interpolating items in the mapping into string s.
1692 """Return the result of interpolating items in the mapping into string s.
1662
1693
1663 prefix is a single character string, or a two character string with
1694 prefix is a single character string, or a two character string with
1664 a backslash as the first character if the prefix needs to be escaped in
1695 a backslash as the first character if the prefix needs to be escaped in
1665 a regular expression.
1696 a regular expression.
1666
1697
1667 fn is an optional function that will be applied to the replacement text
1698 fn is an optional function that will be applied to the replacement text
1668 just before replacement.
1699 just before replacement.
1669
1700
1670 escape_prefix is an optional flag that allows using doubled prefix for
1701 escape_prefix is an optional flag that allows using doubled prefix for
1671 its escaping.
1702 its escaping.
1672 """
1703 """
1673 fn = fn or (lambda s: s)
1704 fn = fn or (lambda s: s)
1674 patterns = '|'.join(mapping.keys())
1705 patterns = '|'.join(mapping.keys())
1675 if escape_prefix:
1706 if escape_prefix:
1676 patterns += '|' + prefix
1707 patterns += '|' + prefix
1677 if len(prefix) > 1:
1708 if len(prefix) > 1:
1678 prefix_char = prefix[1:]
1709 prefix_char = prefix[1:]
1679 else:
1710 else:
1680 prefix_char = prefix
1711 prefix_char = prefix
1681 mapping[prefix_char] = prefix_char
1712 mapping[prefix_char] = prefix_char
1682 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1713 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1683 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1714 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1684
1715
1685 def getport(port):
1716 def getport(port):
1686 """Return the port for a given network service.
1717 """Return the port for a given network service.
1687
1718
1688 If port is an integer, it's returned as is. If it's a string, it's
1719 If port is an integer, it's returned as is. If it's a string, it's
1689 looked up using socket.getservbyname(). If there's no matching
1720 looked up using socket.getservbyname(). If there's no matching
1690 service, util.Abort is raised.
1721 service, util.Abort is raised.
1691 """
1722 """
1692 try:
1723 try:
1693 return int(port)
1724 return int(port)
1694 except ValueError:
1725 except ValueError:
1695 pass
1726 pass
1696
1727
1697 try:
1728 try:
1698 return socket.getservbyname(port)
1729 return socket.getservbyname(port)
1699 except socket.error:
1730 except socket.error:
1700 raise Abort(_("no port number associated with service '%s'") % port)
1731 raise Abort(_("no port number associated with service '%s'") % port)
1701
1732
1702 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1733 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1703 '0': False, 'no': False, 'false': False, 'off': False,
1734 '0': False, 'no': False, 'false': False, 'off': False,
1704 'never': False}
1735 'never': False}
1705
1736
1706 def parsebool(s):
1737 def parsebool(s):
1707 """Parse s into a boolean.
1738 """Parse s into a boolean.
1708
1739
1709 If s is not a valid boolean, returns None.
1740 If s is not a valid boolean, returns None.
1710 """
1741 """
1711 return _booleans.get(s.lower(), None)
1742 return _booleans.get(s.lower(), None)
1712
1743
1713 _hexdig = '0123456789ABCDEFabcdef'
1744 _hexdig = '0123456789ABCDEFabcdef'
1714 _hextochr = dict((a + b, chr(int(a + b, 16)))
1745 _hextochr = dict((a + b, chr(int(a + b, 16)))
1715 for a in _hexdig for b in _hexdig)
1746 for a in _hexdig for b in _hexdig)
1716
1747
1717 def _urlunquote(s):
1748 def _urlunquote(s):
1718 """Decode HTTP/HTML % encoding.
1749 """Decode HTTP/HTML % encoding.
1719
1750
1720 >>> _urlunquote('abc%20def')
1751 >>> _urlunquote('abc%20def')
1721 'abc def'
1752 'abc def'
1722 """
1753 """
1723 res = s.split('%')
1754 res = s.split('%')
1724 # fastpath
1755 # fastpath
1725 if len(res) == 1:
1756 if len(res) == 1:
1726 return s
1757 return s
1727 s = res[0]
1758 s = res[0]
1728 for item in res[1:]:
1759 for item in res[1:]:
1729 try:
1760 try:
1730 s += _hextochr[item[:2]] + item[2:]
1761 s += _hextochr[item[:2]] + item[2:]
1731 except KeyError:
1762 except KeyError:
1732 s += '%' + item
1763 s += '%' + item
1733 except UnicodeDecodeError:
1764 except UnicodeDecodeError:
1734 s += unichr(int(item[:2], 16)) + item[2:]
1765 s += unichr(int(item[:2], 16)) + item[2:]
1735 return s
1766 return s
1736
1767
1737 class url(object):
1768 class url(object):
1738 r"""Reliable URL parser.
1769 r"""Reliable URL parser.
1739
1770
1740 This parses URLs and provides attributes for the following
1771 This parses URLs and provides attributes for the following
1741 components:
1772 components:
1742
1773
1743 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1774 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1744
1775
1745 Missing components are set to None. The only exception is
1776 Missing components are set to None. The only exception is
1746 fragment, which is set to '' if present but empty.
1777 fragment, which is set to '' if present but empty.
1747
1778
1748 If parsefragment is False, fragment is included in query. If
1779 If parsefragment is False, fragment is included in query. If
1749 parsequery is False, query is included in path. If both are
1780 parsequery is False, query is included in path. If both are
1750 False, both fragment and query are included in path.
1781 False, both fragment and query are included in path.
1751
1782
1752 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1783 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1753
1784
1754 Note that for backward compatibility reasons, bundle URLs do not
1785 Note that for backward compatibility reasons, bundle URLs do not
1755 take host names. That means 'bundle://../' has a path of '../'.
1786 take host names. That means 'bundle://../' has a path of '../'.
1756
1787
1757 Examples:
1788 Examples:
1758
1789
1759 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1790 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1760 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1791 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1761 >>> url('ssh://[::1]:2200//home/joe/repo')
1792 >>> url('ssh://[::1]:2200//home/joe/repo')
1762 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1793 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1763 >>> url('file:///home/joe/repo')
1794 >>> url('file:///home/joe/repo')
1764 <url scheme: 'file', path: '/home/joe/repo'>
1795 <url scheme: 'file', path: '/home/joe/repo'>
1765 >>> url('file:///c:/temp/foo/')
1796 >>> url('file:///c:/temp/foo/')
1766 <url scheme: 'file', path: 'c:/temp/foo/'>
1797 <url scheme: 'file', path: 'c:/temp/foo/'>
1767 >>> url('bundle:foo')
1798 >>> url('bundle:foo')
1768 <url scheme: 'bundle', path: 'foo'>
1799 <url scheme: 'bundle', path: 'foo'>
1769 >>> url('bundle://../foo')
1800 >>> url('bundle://../foo')
1770 <url scheme: 'bundle', path: '../foo'>
1801 <url scheme: 'bundle', path: '../foo'>
1771 >>> url(r'c:\foo\bar')
1802 >>> url(r'c:\foo\bar')
1772 <url path: 'c:\\foo\\bar'>
1803 <url path: 'c:\\foo\\bar'>
1773 >>> url(r'\\blah\blah\blah')
1804 >>> url(r'\\blah\blah\blah')
1774 <url path: '\\\\blah\\blah\\blah'>
1805 <url path: '\\\\blah\\blah\\blah'>
1775 >>> url(r'\\blah\blah\blah#baz')
1806 >>> url(r'\\blah\blah\blah#baz')
1776 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1807 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1777 >>> url(r'file:///C:\users\me')
1808 >>> url(r'file:///C:\users\me')
1778 <url scheme: 'file', path: 'C:\\users\\me'>
1809 <url scheme: 'file', path: 'C:\\users\\me'>
1779
1810
1780 Authentication credentials:
1811 Authentication credentials:
1781
1812
1782 >>> url('ssh://joe:xyz@x/repo')
1813 >>> url('ssh://joe:xyz@x/repo')
1783 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1814 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1784 >>> url('ssh://joe@x/repo')
1815 >>> url('ssh://joe@x/repo')
1785 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1816 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1786
1817
1787 Query strings and fragments:
1818 Query strings and fragments:
1788
1819
1789 >>> url('http://host/a?b#c')
1820 >>> url('http://host/a?b#c')
1790 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1821 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1791 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1822 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1792 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1823 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1793 """
1824 """
1794
1825
1795 _safechars = "!~*'()+"
1826 _safechars = "!~*'()+"
1796 _safepchars = "/!~*'()+:\\"
1827 _safepchars = "/!~*'()+:\\"
1797 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
1828 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
1798
1829
1799 def __init__(self, path, parsequery=True, parsefragment=True):
1830 def __init__(self, path, parsequery=True, parsefragment=True):
1800 # We slowly chomp away at path until we have only the path left
1831 # We slowly chomp away at path until we have only the path left
1801 self.scheme = self.user = self.passwd = self.host = None
1832 self.scheme = self.user = self.passwd = self.host = None
1802 self.port = self.path = self.query = self.fragment = None
1833 self.port = self.path = self.query = self.fragment = None
1803 self._localpath = True
1834 self._localpath = True
1804 self._hostport = ''
1835 self._hostport = ''
1805 self._origpath = path
1836 self._origpath = path
1806
1837
1807 if parsefragment and '#' in path:
1838 if parsefragment and '#' in path:
1808 path, self.fragment = path.split('#', 1)
1839 path, self.fragment = path.split('#', 1)
1809 if not path:
1840 if not path:
1810 path = None
1841 path = None
1811
1842
1812 # special case for Windows drive letters and UNC paths
1843 # special case for Windows drive letters and UNC paths
1813 if hasdriveletter(path) or path.startswith(r'\\'):
1844 if hasdriveletter(path) or path.startswith(r'\\'):
1814 self.path = path
1845 self.path = path
1815 return
1846 return
1816
1847
1817 # For compatibility reasons, we can't handle bundle paths as
1848 # For compatibility reasons, we can't handle bundle paths as
1818 # normal URLS
1849 # normal URLS
1819 if path.startswith('bundle:'):
1850 if path.startswith('bundle:'):
1820 self.scheme = 'bundle'
1851 self.scheme = 'bundle'
1821 path = path[7:]
1852 path = path[7:]
1822 if path.startswith('//'):
1853 if path.startswith('//'):
1823 path = path[2:]
1854 path = path[2:]
1824 self.path = path
1855 self.path = path
1825 return
1856 return
1826
1857
1827 if self._matchscheme(path):
1858 if self._matchscheme(path):
1828 parts = path.split(':', 1)
1859 parts = path.split(':', 1)
1829 if parts[0]:
1860 if parts[0]:
1830 self.scheme, path = parts
1861 self.scheme, path = parts
1831 self._localpath = False
1862 self._localpath = False
1832
1863
1833 if not path:
1864 if not path:
1834 path = None
1865 path = None
1835 if self._localpath:
1866 if self._localpath:
1836 self.path = ''
1867 self.path = ''
1837 return
1868 return
1838 else:
1869 else:
1839 if self._localpath:
1870 if self._localpath:
1840 self.path = path
1871 self.path = path
1841 return
1872 return
1842
1873
1843 if parsequery and '?' in path:
1874 if parsequery and '?' in path:
1844 path, self.query = path.split('?', 1)
1875 path, self.query = path.split('?', 1)
1845 if not path:
1876 if not path:
1846 path = None
1877 path = None
1847 if not self.query:
1878 if not self.query:
1848 self.query = None
1879 self.query = None
1849
1880
1850 # // is required to specify a host/authority
1881 # // is required to specify a host/authority
1851 if path and path.startswith('//'):
1882 if path and path.startswith('//'):
1852 parts = path[2:].split('/', 1)
1883 parts = path[2:].split('/', 1)
1853 if len(parts) > 1:
1884 if len(parts) > 1:
1854 self.host, path = parts
1885 self.host, path = parts
1855 else:
1886 else:
1856 self.host = parts[0]
1887 self.host = parts[0]
1857 path = None
1888 path = None
1858 if not self.host:
1889 if not self.host:
1859 self.host = None
1890 self.host = None
1860 # path of file:///d is /d
1891 # path of file:///d is /d
1861 # path of file:///d:/ is d:/, not /d:/
1892 # path of file:///d:/ is d:/, not /d:/
1862 if path and not hasdriveletter(path):
1893 if path and not hasdriveletter(path):
1863 path = '/' + path
1894 path = '/' + path
1864
1895
1865 if self.host and '@' in self.host:
1896 if self.host and '@' in self.host:
1866 self.user, self.host = self.host.rsplit('@', 1)
1897 self.user, self.host = self.host.rsplit('@', 1)
1867 if ':' in self.user:
1898 if ':' in self.user:
1868 self.user, self.passwd = self.user.split(':', 1)
1899 self.user, self.passwd = self.user.split(':', 1)
1869 if not self.host:
1900 if not self.host:
1870 self.host = None
1901 self.host = None
1871
1902
1872 # Don't split on colons in IPv6 addresses without ports
1903 # Don't split on colons in IPv6 addresses without ports
1873 if (self.host and ':' in self.host and
1904 if (self.host and ':' in self.host and
1874 not (self.host.startswith('[') and self.host.endswith(']'))):
1905 not (self.host.startswith('[') and self.host.endswith(']'))):
1875 self._hostport = self.host
1906 self._hostport = self.host
1876 self.host, self.port = self.host.rsplit(':', 1)
1907 self.host, self.port = self.host.rsplit(':', 1)
1877 if not self.host:
1908 if not self.host:
1878 self.host = None
1909 self.host = None
1879
1910
1880 if (self.host and self.scheme == 'file' and
1911 if (self.host and self.scheme == 'file' and
1881 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1912 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1882 raise Abort(_('file:// URLs can only refer to localhost'))
1913 raise Abort(_('file:// URLs can only refer to localhost'))
1883
1914
1884 self.path = path
1915 self.path = path
1885
1916
1886 # leave the query string escaped
1917 # leave the query string escaped
1887 for a in ('user', 'passwd', 'host', 'port',
1918 for a in ('user', 'passwd', 'host', 'port',
1888 'path', 'fragment'):
1919 'path', 'fragment'):
1889 v = getattr(self, a)
1920 v = getattr(self, a)
1890 if v is not None:
1921 if v is not None:
1891 setattr(self, a, _urlunquote(v))
1922 setattr(self, a, _urlunquote(v))
1892
1923
1893 def __repr__(self):
1924 def __repr__(self):
1894 attrs = []
1925 attrs = []
1895 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1926 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1896 'query', 'fragment'):
1927 'query', 'fragment'):
1897 v = getattr(self, a)
1928 v = getattr(self, a)
1898 if v is not None:
1929 if v is not None:
1899 attrs.append('%s: %r' % (a, v))
1930 attrs.append('%s: %r' % (a, v))
1900 return '<url %s>' % ', '.join(attrs)
1931 return '<url %s>' % ', '.join(attrs)
1901
1932
1902 def __str__(self):
1933 def __str__(self):
1903 r"""Join the URL's components back into a URL string.
1934 r"""Join the URL's components back into a URL string.
1904
1935
1905 Examples:
1936 Examples:
1906
1937
1907 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1938 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1908 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1939 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1909 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1940 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1910 'http://user:pw@host:80/?foo=bar&baz=42'
1941 'http://user:pw@host:80/?foo=bar&baz=42'
1911 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1942 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1912 'http://user:pw@host:80/?foo=bar%3dbaz'
1943 'http://user:pw@host:80/?foo=bar%3dbaz'
1913 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1944 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1914 'ssh://user:pw@[::1]:2200//home/joe#'
1945 'ssh://user:pw@[::1]:2200//home/joe#'
1915 >>> str(url('http://localhost:80//'))
1946 >>> str(url('http://localhost:80//'))
1916 'http://localhost:80//'
1947 'http://localhost:80//'
1917 >>> str(url('http://localhost:80/'))
1948 >>> str(url('http://localhost:80/'))
1918 'http://localhost:80/'
1949 'http://localhost:80/'
1919 >>> str(url('http://localhost:80'))
1950 >>> str(url('http://localhost:80'))
1920 'http://localhost:80/'
1951 'http://localhost:80/'
1921 >>> str(url('bundle:foo'))
1952 >>> str(url('bundle:foo'))
1922 'bundle:foo'
1953 'bundle:foo'
1923 >>> str(url('bundle://../foo'))
1954 >>> str(url('bundle://../foo'))
1924 'bundle:../foo'
1955 'bundle:../foo'
1925 >>> str(url('path'))
1956 >>> str(url('path'))
1926 'path'
1957 'path'
1927 >>> str(url('file:///tmp/foo/bar'))
1958 >>> str(url('file:///tmp/foo/bar'))
1928 'file:///tmp/foo/bar'
1959 'file:///tmp/foo/bar'
1929 >>> str(url('file:///c:/tmp/foo/bar'))
1960 >>> str(url('file:///c:/tmp/foo/bar'))
1930 'file:///c:/tmp/foo/bar'
1961 'file:///c:/tmp/foo/bar'
1931 >>> print url(r'bundle:foo\bar')
1962 >>> print url(r'bundle:foo\bar')
1932 bundle:foo\bar
1963 bundle:foo\bar
1933 >>> print url(r'file:///D:\data\hg')
1964 >>> print url(r'file:///D:\data\hg')
1934 file:///D:\data\hg
1965 file:///D:\data\hg
1935 """
1966 """
1936 if self._localpath:
1967 if self._localpath:
1937 s = self.path
1968 s = self.path
1938 if self.scheme == 'bundle':
1969 if self.scheme == 'bundle':
1939 s = 'bundle:' + s
1970 s = 'bundle:' + s
1940 if self.fragment:
1971 if self.fragment:
1941 s += '#' + self.fragment
1972 s += '#' + self.fragment
1942 return s
1973 return s
1943
1974
1944 s = self.scheme + ':'
1975 s = self.scheme + ':'
1945 if self.user or self.passwd or self.host:
1976 if self.user or self.passwd or self.host:
1946 s += '//'
1977 s += '//'
1947 elif self.scheme and (not self.path or self.path.startswith('/')
1978 elif self.scheme and (not self.path or self.path.startswith('/')
1948 or hasdriveletter(self.path)):
1979 or hasdriveletter(self.path)):
1949 s += '//'
1980 s += '//'
1950 if hasdriveletter(self.path):
1981 if hasdriveletter(self.path):
1951 s += '/'
1982 s += '/'
1952 if self.user:
1983 if self.user:
1953 s += urllib.quote(self.user, safe=self._safechars)
1984 s += urllib.quote(self.user, safe=self._safechars)
1954 if self.passwd:
1985 if self.passwd:
1955 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
1986 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
1956 if self.user or self.passwd:
1987 if self.user or self.passwd:
1957 s += '@'
1988 s += '@'
1958 if self.host:
1989 if self.host:
1959 if not (self.host.startswith('[') and self.host.endswith(']')):
1990 if not (self.host.startswith('[') and self.host.endswith(']')):
1960 s += urllib.quote(self.host)
1991 s += urllib.quote(self.host)
1961 else:
1992 else:
1962 s += self.host
1993 s += self.host
1963 if self.port:
1994 if self.port:
1964 s += ':' + urllib.quote(self.port)
1995 s += ':' + urllib.quote(self.port)
1965 if self.host:
1996 if self.host:
1966 s += '/'
1997 s += '/'
1967 if self.path:
1998 if self.path:
1968 # TODO: similar to the query string, we should not unescape the
1999 # TODO: similar to the query string, we should not unescape the
1969 # path when we store it, the path might contain '%2f' = '/',
2000 # path when we store it, the path might contain '%2f' = '/',
1970 # which we should *not* escape.
2001 # which we should *not* escape.
1971 s += urllib.quote(self.path, safe=self._safepchars)
2002 s += urllib.quote(self.path, safe=self._safepchars)
1972 if self.query:
2003 if self.query:
1973 # we store the query in escaped form.
2004 # we store the query in escaped form.
1974 s += '?' + self.query
2005 s += '?' + self.query
1975 if self.fragment is not None:
2006 if self.fragment is not None:
1976 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2007 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
1977 return s
2008 return s
1978
2009
1979 def authinfo(self):
2010 def authinfo(self):
1980 user, passwd = self.user, self.passwd
2011 user, passwd = self.user, self.passwd
1981 try:
2012 try:
1982 self.user, self.passwd = None, None
2013 self.user, self.passwd = None, None
1983 s = str(self)
2014 s = str(self)
1984 finally:
2015 finally:
1985 self.user, self.passwd = user, passwd
2016 self.user, self.passwd = user, passwd
1986 if not self.user:
2017 if not self.user:
1987 return (s, None)
2018 return (s, None)
1988 # authinfo[1] is passed to urllib2 password manager, and its
2019 # authinfo[1] is passed to urllib2 password manager, and its
1989 # URIs must not contain credentials. The host is passed in the
2020 # URIs must not contain credentials. The host is passed in the
1990 # URIs list because Python < 2.4.3 uses only that to search for
2021 # URIs list because Python < 2.4.3 uses only that to search for
1991 # a password.
2022 # a password.
1992 return (s, (None, (s, self.host),
2023 return (s, (None, (s, self.host),
1993 self.user, self.passwd or ''))
2024 self.user, self.passwd or ''))
1994
2025
1995 def isabs(self):
2026 def isabs(self):
1996 if self.scheme and self.scheme != 'file':
2027 if self.scheme and self.scheme != 'file':
1997 return True # remote URL
2028 return True # remote URL
1998 if hasdriveletter(self.path):
2029 if hasdriveletter(self.path):
1999 return True # absolute for our purposes - can't be joined()
2030 return True # absolute for our purposes - can't be joined()
2000 if self.path.startswith(r'\\'):
2031 if self.path.startswith(r'\\'):
2001 return True # Windows UNC path
2032 return True # Windows UNC path
2002 if self.path.startswith('/'):
2033 if self.path.startswith('/'):
2003 return True # POSIX-style
2034 return True # POSIX-style
2004 return False
2035 return False
2005
2036
2006 def localpath(self):
2037 def localpath(self):
2007 if self.scheme == 'file' or self.scheme == 'bundle':
2038 if self.scheme == 'file' or self.scheme == 'bundle':
2008 path = self.path or '/'
2039 path = self.path or '/'
2009 # For Windows, we need to promote hosts containing drive
2040 # For Windows, we need to promote hosts containing drive
2010 # letters to paths with drive letters.
2041 # letters to paths with drive letters.
2011 if hasdriveletter(self._hostport):
2042 if hasdriveletter(self._hostport):
2012 path = self._hostport + '/' + self.path
2043 path = self._hostport + '/' + self.path
2013 elif (self.host is not None and self.path
2044 elif (self.host is not None and self.path
2014 and not hasdriveletter(path)):
2045 and not hasdriveletter(path)):
2015 path = '/' + path
2046 path = '/' + path
2016 return path
2047 return path
2017 return self._origpath
2048 return self._origpath
2018
2049
2019 def islocal(self):
2050 def islocal(self):
2020 '''whether localpath will return something that posixfile can open'''
2051 '''whether localpath will return something that posixfile can open'''
2021 return (not self.scheme or self.scheme == 'file'
2052 return (not self.scheme or self.scheme == 'file'
2022 or self.scheme == 'bundle')
2053 or self.scheme == 'bundle')
2023
2054
2024 def hasscheme(path):
2055 def hasscheme(path):
2025 return bool(url(path).scheme)
2056 return bool(url(path).scheme)
2026
2057
2027 def hasdriveletter(path):
2058 def hasdriveletter(path):
2028 return path and path[1:2] == ':' and path[0:1].isalpha()
2059 return path and path[1:2] == ':' and path[0:1].isalpha()
2029
2060
2030 def urllocalpath(path):
2061 def urllocalpath(path):
2031 return url(path, parsequery=False, parsefragment=False).localpath()
2062 return url(path, parsequery=False, parsefragment=False).localpath()
2032
2063
2033 def hidepassword(u):
2064 def hidepassword(u):
2034 '''hide user credential in a url string'''
2065 '''hide user credential in a url string'''
2035 u = url(u)
2066 u = url(u)
2036 if u.passwd:
2067 if u.passwd:
2037 u.passwd = '***'
2068 u.passwd = '***'
2038 return str(u)
2069 return str(u)
2039
2070
2040 def removeauth(u):
2071 def removeauth(u):
2041 '''remove all authentication information from a url string'''
2072 '''remove all authentication information from a url string'''
2042 u = url(u)
2073 u = url(u)
2043 u.user = u.passwd = None
2074 u.user = u.passwd = None
2044 return str(u)
2075 return str(u)
2045
2076
2046 def isatty(fd):
2077 def isatty(fd):
2047 try:
2078 try:
2048 return fd.isatty()
2079 return fd.isatty()
2049 except AttributeError:
2080 except AttributeError:
2050 return False
2081 return False
2051
2082
2052 timecount = unitcountfn(
2083 timecount = unitcountfn(
2053 (1, 1e3, _('%.0f s')),
2084 (1, 1e3, _('%.0f s')),
2054 (100, 1, _('%.1f s')),
2085 (100, 1, _('%.1f s')),
2055 (10, 1, _('%.2f s')),
2086 (10, 1, _('%.2f s')),
2056 (1, 1, _('%.3f s')),
2087 (1, 1, _('%.3f s')),
2057 (100, 0.001, _('%.1f ms')),
2088 (100, 0.001, _('%.1f ms')),
2058 (10, 0.001, _('%.2f ms')),
2089 (10, 0.001, _('%.2f ms')),
2059 (1, 0.001, _('%.3f ms')),
2090 (1, 0.001, _('%.3f ms')),
2060 (100, 0.000001, _('%.1f us')),
2091 (100, 0.000001, _('%.1f us')),
2061 (10, 0.000001, _('%.2f us')),
2092 (10, 0.000001, _('%.2f us')),
2062 (1, 0.000001, _('%.3f us')),
2093 (1, 0.000001, _('%.3f us')),
2063 (100, 0.000000001, _('%.1f ns')),
2094 (100, 0.000000001, _('%.1f ns')),
2064 (10, 0.000000001, _('%.2f ns')),
2095 (10, 0.000000001, _('%.2f ns')),
2065 (1, 0.000000001, _('%.3f ns')),
2096 (1, 0.000000001, _('%.3f ns')),
2066 )
2097 )
2067
2098
2068 _timenesting = [0]
2099 _timenesting = [0]
2069
2100
2070 def timed(func):
2101 def timed(func):
2071 '''Report the execution time of a function call to stderr.
2102 '''Report the execution time of a function call to stderr.
2072
2103
2073 During development, use as a decorator when you need to measure
2104 During development, use as a decorator when you need to measure
2074 the cost of a function, e.g. as follows:
2105 the cost of a function, e.g. as follows:
2075
2106
2076 @util.timed
2107 @util.timed
2077 def foo(a, b, c):
2108 def foo(a, b, c):
2078 pass
2109 pass
2079 '''
2110 '''
2080
2111
2081 def wrapper(*args, **kwargs):
2112 def wrapper(*args, **kwargs):
2082 start = time.time()
2113 start = time.time()
2083 indent = 2
2114 indent = 2
2084 _timenesting[0] += indent
2115 _timenesting[0] += indent
2085 try:
2116 try:
2086 return func(*args, **kwargs)
2117 return func(*args, **kwargs)
2087 finally:
2118 finally:
2088 elapsed = time.time() - start
2119 elapsed = time.time() - start
2089 _timenesting[0] -= indent
2120 _timenesting[0] -= indent
2090 sys.stderr.write('%s%s: %s\n' %
2121 sys.stderr.write('%s%s: %s\n' %
2091 (' ' * _timenesting[0], func.__name__,
2122 (' ' * _timenesting[0], func.__name__,
2092 timecount(elapsed)))
2123 timecount(elapsed)))
2093 return wrapper
2124 return wrapper
2094
2125
2095 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2126 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2096 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2127 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2097
2128
2098 def sizetoint(s):
2129 def sizetoint(s):
2099 '''Convert a space specifier to a byte count.
2130 '''Convert a space specifier to a byte count.
2100
2131
2101 >>> sizetoint('30')
2132 >>> sizetoint('30')
2102 30
2133 30
2103 >>> sizetoint('2.2kb')
2134 >>> sizetoint('2.2kb')
2104 2252
2135 2252
2105 >>> sizetoint('6M')
2136 >>> sizetoint('6M')
2106 6291456
2137 6291456
2107 '''
2138 '''
2108 t = s.strip().lower()
2139 t = s.strip().lower()
2109 try:
2140 try:
2110 for k, u in _sizeunits:
2141 for k, u in _sizeunits:
2111 if t.endswith(k):
2142 if t.endswith(k):
2112 return int(float(t[:-len(k)]) * u)
2143 return int(float(t[:-len(k)]) * u)
2113 return int(t)
2144 return int(t)
2114 except ValueError:
2145 except ValueError:
2115 raise error.ParseError(_("couldn't parse size: %s") % s)
2146 raise error.ParseError(_("couldn't parse size: %s") % s)
2116
2147
2117 class hooks(object):
2148 class hooks(object):
2118 '''A collection of hook functions that can be used to extend a
2149 '''A collection of hook functions that can be used to extend a
2119 function's behaviour. Hooks are called in lexicographic order,
2150 function's behaviour. Hooks are called in lexicographic order,
2120 based on the names of their sources.'''
2151 based on the names of their sources.'''
2121
2152
2122 def __init__(self):
2153 def __init__(self):
2123 self._hooks = []
2154 self._hooks = []
2124
2155
2125 def add(self, source, hook):
2156 def add(self, source, hook):
2126 self._hooks.append((source, hook))
2157 self._hooks.append((source, hook))
2127
2158
2128 def __call__(self, *args):
2159 def __call__(self, *args):
2129 self._hooks.sort(key=lambda x: x[0])
2160 self._hooks.sort(key=lambda x: x[0])
2130 results = []
2161 results = []
2131 for source, hook in self._hooks:
2162 for source, hook in self._hooks:
2132 results.append(hook(*args))
2163 results.append(hook(*args))
2133 return results
2164 return results
2134
2165
2135 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2166 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2136 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2167 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2137 Skips the 'skip' last entries. By default it will flush stdout first.
2168 Skips the 'skip' last entries. By default it will flush stdout first.
2138 It can be used everywhere and do intentionally not require an ui object.
2169 It can be used everywhere and do intentionally not require an ui object.
2139 Not be used in production code but very convenient while developing.
2170 Not be used in production code but very convenient while developing.
2140 '''
2171 '''
2141 if otherf:
2172 if otherf:
2142 otherf.flush()
2173 otherf.flush()
2143 f.write('%s at:\n' % msg)
2174 f.write('%s at:\n' % msg)
2144 entries = [('%s:%s' % (fn, ln), func)
2175 entries = [('%s:%s' % (fn, ln), func)
2145 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2176 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2146 if entries:
2177 if entries:
2147 fnmax = max(len(entry[0]) for entry in entries)
2178 fnmax = max(len(entry[0]) for entry in entries)
2148 for fnln, func in entries:
2179 for fnln, func in entries:
2149 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2180 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2150 f.flush()
2181 f.flush()
2151
2182
2152 # convenient shortcut
2183 # convenient shortcut
2153 dst = debugstacktrace
2184 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now