##// END OF EJS Templates
i18n: add hint to digest mismatch message
Wagner Bruna -
r23076:c312ef38 stable
parent child Browse files
Show More
@@ -1,2187 +1,2188 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 import i18n
16 import i18n
17 _ = i18n._
17 _ = i18n._
18 import error, osutil, encoding
18 import error, osutil, encoding
19 import errno, shutil, sys, tempfile, traceback
19 import errno, shutil, sys, tempfile, traceback
20 import re as remod
20 import re as remod
21 import os, time, datetime, calendar, textwrap, signal, collections
21 import os, time, datetime, calendar, textwrap, signal, collections
22 import imp, socket, urllib
22 import imp, socket, urllib
23
23
24 if os.name == 'nt':
24 if os.name == 'nt':
25 import windows as platform
25 import windows as platform
26 else:
26 else:
27 import posix as platform
27 import posix as platform
28
28
29 cachestat = platform.cachestat
29 cachestat = platform.cachestat
30 checkexec = platform.checkexec
30 checkexec = platform.checkexec
31 checklink = platform.checklink
31 checklink = platform.checklink
32 copymode = platform.copymode
32 copymode = platform.copymode
33 executablepath = platform.executablepath
33 executablepath = platform.executablepath
34 expandglobs = platform.expandglobs
34 expandglobs = platform.expandglobs
35 explainexit = platform.explainexit
35 explainexit = platform.explainexit
36 findexe = platform.findexe
36 findexe = platform.findexe
37 gethgcmd = platform.gethgcmd
37 gethgcmd = platform.gethgcmd
38 getuser = platform.getuser
38 getuser = platform.getuser
39 groupmembers = platform.groupmembers
39 groupmembers = platform.groupmembers
40 groupname = platform.groupname
40 groupname = platform.groupname
41 hidewindow = platform.hidewindow
41 hidewindow = platform.hidewindow
42 isexec = platform.isexec
42 isexec = platform.isexec
43 isowner = platform.isowner
43 isowner = platform.isowner
44 localpath = platform.localpath
44 localpath = platform.localpath
45 lookupreg = platform.lookupreg
45 lookupreg = platform.lookupreg
46 makedir = platform.makedir
46 makedir = platform.makedir
47 nlinks = platform.nlinks
47 nlinks = platform.nlinks
48 normpath = platform.normpath
48 normpath = platform.normpath
49 normcase = platform.normcase
49 normcase = platform.normcase
50 openhardlinks = platform.openhardlinks
50 openhardlinks = platform.openhardlinks
51 oslink = platform.oslink
51 oslink = platform.oslink
52 parsepatchoutput = platform.parsepatchoutput
52 parsepatchoutput = platform.parsepatchoutput
53 pconvert = platform.pconvert
53 pconvert = platform.pconvert
54 popen = platform.popen
54 popen = platform.popen
55 posixfile = platform.posixfile
55 posixfile = platform.posixfile
56 quotecommand = platform.quotecommand
56 quotecommand = platform.quotecommand
57 readpipe = platform.readpipe
57 readpipe = platform.readpipe
58 rename = platform.rename
58 rename = platform.rename
59 samedevice = platform.samedevice
59 samedevice = platform.samedevice
60 samefile = platform.samefile
60 samefile = platform.samefile
61 samestat = platform.samestat
61 samestat = platform.samestat
62 setbinary = platform.setbinary
62 setbinary = platform.setbinary
63 setflags = platform.setflags
63 setflags = platform.setflags
64 setsignalhandler = platform.setsignalhandler
64 setsignalhandler = platform.setsignalhandler
65 shellquote = platform.shellquote
65 shellquote = platform.shellquote
66 spawndetached = platform.spawndetached
66 spawndetached = platform.spawndetached
67 split = platform.split
67 split = platform.split
68 sshargs = platform.sshargs
68 sshargs = platform.sshargs
69 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
69 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
70 statisexec = platform.statisexec
70 statisexec = platform.statisexec
71 statislink = platform.statislink
71 statislink = platform.statislink
72 termwidth = platform.termwidth
72 termwidth = platform.termwidth
73 testpid = platform.testpid
73 testpid = platform.testpid
74 umask = platform.umask
74 umask = platform.umask
75 unlink = platform.unlink
75 unlink = platform.unlink
76 unlinkpath = platform.unlinkpath
76 unlinkpath = platform.unlinkpath
77 username = platform.username
77 username = platform.username
78
78
79 # Python compatibility
79 # Python compatibility
80
80
81 _notset = object()
81 _notset = object()
82
82
83 def safehasattr(thing, attr):
83 def safehasattr(thing, attr):
84 return getattr(thing, attr, _notset) is not _notset
84 return getattr(thing, attr, _notset) is not _notset
85
85
86 def sha1(s=''):
86 def sha1(s=''):
87 '''
87 '''
88 Low-overhead wrapper around Python's SHA support
88 Low-overhead wrapper around Python's SHA support
89
89
90 >>> f = _fastsha1
90 >>> f = _fastsha1
91 >>> a = sha1()
91 >>> a = sha1()
92 >>> a = f()
92 >>> a = f()
93 >>> a.hexdigest()
93 >>> a.hexdigest()
94 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
94 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
95 '''
95 '''
96
96
97 return _fastsha1(s)
97 return _fastsha1(s)
98
98
99 def _fastsha1(s=''):
99 def _fastsha1(s=''):
100 # This function will import sha1 from hashlib or sha (whichever is
100 # This function will import sha1 from hashlib or sha (whichever is
101 # available) and overwrite itself with it on the first call.
101 # available) and overwrite itself with it on the first call.
102 # Subsequent calls will go directly to the imported function.
102 # Subsequent calls will go directly to the imported function.
103 if sys.version_info >= (2, 5):
103 if sys.version_info >= (2, 5):
104 from hashlib import sha1 as _sha1
104 from hashlib import sha1 as _sha1
105 else:
105 else:
106 from sha import sha as _sha1
106 from sha import sha as _sha1
107 global _fastsha1, sha1
107 global _fastsha1, sha1
108 _fastsha1 = sha1 = _sha1
108 _fastsha1 = sha1 = _sha1
109 return _sha1(s)
109 return _sha1(s)
110
110
111 def md5(s=''):
111 def md5(s=''):
112 try:
112 try:
113 from hashlib import md5 as _md5
113 from hashlib import md5 as _md5
114 except ImportError:
114 except ImportError:
115 from md5 import md5 as _md5
115 from md5 import md5 as _md5
116 global md5
116 global md5
117 md5 = _md5
117 md5 = _md5
118 return _md5(s)
118 return _md5(s)
119
119
120 DIGESTS = {
120 DIGESTS = {
121 'md5': md5,
121 'md5': md5,
122 'sha1': sha1,
122 'sha1': sha1,
123 }
123 }
124 # List of digest types from strongest to weakest
124 # List of digest types from strongest to weakest
125 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
125 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
126
126
127 try:
127 try:
128 import hashlib
128 import hashlib
129 DIGESTS.update({
129 DIGESTS.update({
130 'sha512': hashlib.sha512,
130 'sha512': hashlib.sha512,
131 })
131 })
132 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
132 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
133 except ImportError:
133 except ImportError:
134 pass
134 pass
135
135
136 for k in DIGESTS_BY_STRENGTH:
136 for k in DIGESTS_BY_STRENGTH:
137 assert k in DIGESTS
137 assert k in DIGESTS
138
138
139 class digester(object):
139 class digester(object):
140 """helper to compute digests.
140 """helper to compute digests.
141
141
142 This helper can be used to compute one or more digests given their name.
142 This helper can be used to compute one or more digests given their name.
143
143
144 >>> d = digester(['md5', 'sha1'])
144 >>> d = digester(['md5', 'sha1'])
145 >>> d.update('foo')
145 >>> d.update('foo')
146 >>> [k for k in sorted(d)]
146 >>> [k for k in sorted(d)]
147 ['md5', 'sha1']
147 ['md5', 'sha1']
148 >>> d['md5']
148 >>> d['md5']
149 'acbd18db4cc2f85cedef654fccc4a4d8'
149 'acbd18db4cc2f85cedef654fccc4a4d8'
150 >>> d['sha1']
150 >>> d['sha1']
151 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
151 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
152 >>> digester.preferred(['md5', 'sha1'])
152 >>> digester.preferred(['md5', 'sha1'])
153 'sha1'
153 'sha1'
154 """
154 """
155
155
156 def __init__(self, digests, s=''):
156 def __init__(self, digests, s=''):
157 self._hashes = {}
157 self._hashes = {}
158 for k in digests:
158 for k in digests:
159 if k not in DIGESTS:
159 if k not in DIGESTS:
160 raise Abort(_('unknown digest type: %s') % k)
160 raise Abort(_('unknown digest type: %s') % k)
161 self._hashes[k] = DIGESTS[k]()
161 self._hashes[k] = DIGESTS[k]()
162 if s:
162 if s:
163 self.update(s)
163 self.update(s)
164
164
165 def update(self, data):
165 def update(self, data):
166 for h in self._hashes.values():
166 for h in self._hashes.values():
167 h.update(data)
167 h.update(data)
168
168
169 def __getitem__(self, key):
169 def __getitem__(self, key):
170 if key not in DIGESTS:
170 if key not in DIGESTS:
171 raise Abort(_('unknown digest type: %s') % k)
171 raise Abort(_('unknown digest type: %s') % k)
172 return self._hashes[key].hexdigest()
172 return self._hashes[key].hexdigest()
173
173
174 def __iter__(self):
174 def __iter__(self):
175 return iter(self._hashes)
175 return iter(self._hashes)
176
176
177 @staticmethod
177 @staticmethod
178 def preferred(supported):
178 def preferred(supported):
179 """returns the strongest digest type in both supported and DIGESTS."""
179 """returns the strongest digest type in both supported and DIGESTS."""
180
180
181 for k in DIGESTS_BY_STRENGTH:
181 for k in DIGESTS_BY_STRENGTH:
182 if k in supported:
182 if k in supported:
183 return k
183 return k
184 return None
184 return None
185
185
186 class digestchecker(object):
186 class digestchecker(object):
187 """file handle wrapper that additionally checks content against a given
187 """file handle wrapper that additionally checks content against a given
188 size and digests.
188 size and digests.
189
189
190 d = digestchecker(fh, size, {'md5': '...'})
190 d = digestchecker(fh, size, {'md5': '...'})
191
191
192 When multiple digests are given, all of them are validated.
192 When multiple digests are given, all of them are validated.
193 """
193 """
194
194
195 def __init__(self, fh, size, digests):
195 def __init__(self, fh, size, digests):
196 self._fh = fh
196 self._fh = fh
197 self._size = size
197 self._size = size
198 self._got = 0
198 self._got = 0
199 self._digests = dict(digests)
199 self._digests = dict(digests)
200 self._digester = digester(self._digests.keys())
200 self._digester = digester(self._digests.keys())
201
201
202 def read(self, length=-1):
202 def read(self, length=-1):
203 content = self._fh.read(length)
203 content = self._fh.read(length)
204 self._digester.update(content)
204 self._digester.update(content)
205 self._got += len(content)
205 self._got += len(content)
206 return content
206 return content
207
207
208 def validate(self):
208 def validate(self):
209 if self._size != self._got:
209 if self._size != self._got:
210 raise Abort(_('size mismatch: expected %d, got %d') %
210 raise Abort(_('size mismatch: expected %d, got %d') %
211 (self._size, self._got))
211 (self._size, self._got))
212 for k, v in self._digests.items():
212 for k, v in self._digests.items():
213 if v != self._digester[k]:
213 if v != self._digester[k]:
214 # i18n: first parameter is a digest name
214 raise Abort(_('%s mismatch: expected %s, got %s') %
215 raise Abort(_('%s mismatch: expected %s, got %s') %
215 (k, v, self._digester[k]))
216 (k, v, self._digester[k]))
216
217
217 try:
218 try:
218 buffer = buffer
219 buffer = buffer
219 except NameError:
220 except NameError:
220 if sys.version_info[0] < 3:
221 if sys.version_info[0] < 3:
221 def buffer(sliceable, offset=0):
222 def buffer(sliceable, offset=0):
222 return sliceable[offset:]
223 return sliceable[offset:]
223 else:
224 else:
224 def buffer(sliceable, offset=0):
225 def buffer(sliceable, offset=0):
225 return memoryview(sliceable)[offset:]
226 return memoryview(sliceable)[offset:]
226
227
227 import subprocess
228 import subprocess
228 closefds = os.name == 'posix'
229 closefds = os.name == 'posix'
229
230
230 def popen2(cmd, env=None, newlines=False):
231 def popen2(cmd, env=None, newlines=False):
231 # Setting bufsize to -1 lets the system decide the buffer size.
232 # Setting bufsize to -1 lets the system decide the buffer size.
232 # The default for bufsize is 0, meaning unbuffered. This leads to
233 # The default for bufsize is 0, meaning unbuffered. This leads to
233 # poor performance on Mac OS X: http://bugs.python.org/issue4194
234 # poor performance on Mac OS X: http://bugs.python.org/issue4194
234 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
235 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
235 close_fds=closefds,
236 close_fds=closefds,
236 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
237 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
237 universal_newlines=newlines,
238 universal_newlines=newlines,
238 env=env)
239 env=env)
239 return p.stdin, p.stdout
240 return p.stdin, p.stdout
240
241
241 def popen3(cmd, env=None, newlines=False):
242 def popen3(cmd, env=None, newlines=False):
242 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
243 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
243 return stdin, stdout, stderr
244 return stdin, stdout, stderr
244
245
245 def popen4(cmd, env=None, newlines=False):
246 def popen4(cmd, env=None, newlines=False):
246 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
247 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
247 close_fds=closefds,
248 close_fds=closefds,
248 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
249 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
249 stderr=subprocess.PIPE,
250 stderr=subprocess.PIPE,
250 universal_newlines=newlines,
251 universal_newlines=newlines,
251 env=env)
252 env=env)
252 return p.stdin, p.stdout, p.stderr, p
253 return p.stdin, p.stdout, p.stderr, p
253
254
254 def version():
255 def version():
255 """Return version information if available."""
256 """Return version information if available."""
256 try:
257 try:
257 import __version__
258 import __version__
258 return __version__.version
259 return __version__.version
259 except ImportError:
260 except ImportError:
260 return 'unknown'
261 return 'unknown'
261
262
262 # used by parsedate
263 # used by parsedate
263 defaultdateformats = (
264 defaultdateformats = (
264 '%Y-%m-%d %H:%M:%S',
265 '%Y-%m-%d %H:%M:%S',
265 '%Y-%m-%d %I:%M:%S%p',
266 '%Y-%m-%d %I:%M:%S%p',
266 '%Y-%m-%d %H:%M',
267 '%Y-%m-%d %H:%M',
267 '%Y-%m-%d %I:%M%p',
268 '%Y-%m-%d %I:%M%p',
268 '%Y-%m-%d',
269 '%Y-%m-%d',
269 '%m-%d',
270 '%m-%d',
270 '%m/%d',
271 '%m/%d',
271 '%m/%d/%y',
272 '%m/%d/%y',
272 '%m/%d/%Y',
273 '%m/%d/%Y',
273 '%a %b %d %H:%M:%S %Y',
274 '%a %b %d %H:%M:%S %Y',
274 '%a %b %d %I:%M:%S%p %Y',
275 '%a %b %d %I:%M:%S%p %Y',
275 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
276 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
276 '%b %d %H:%M:%S %Y',
277 '%b %d %H:%M:%S %Y',
277 '%b %d %I:%M:%S%p %Y',
278 '%b %d %I:%M:%S%p %Y',
278 '%b %d %H:%M:%S',
279 '%b %d %H:%M:%S',
279 '%b %d %I:%M:%S%p',
280 '%b %d %I:%M:%S%p',
280 '%b %d %H:%M',
281 '%b %d %H:%M',
281 '%b %d %I:%M%p',
282 '%b %d %I:%M%p',
282 '%b %d %Y',
283 '%b %d %Y',
283 '%b %d',
284 '%b %d',
284 '%H:%M:%S',
285 '%H:%M:%S',
285 '%I:%M:%S%p',
286 '%I:%M:%S%p',
286 '%H:%M',
287 '%H:%M',
287 '%I:%M%p',
288 '%I:%M%p',
288 )
289 )
289
290
290 extendeddateformats = defaultdateformats + (
291 extendeddateformats = defaultdateformats + (
291 "%Y",
292 "%Y",
292 "%Y-%m",
293 "%Y-%m",
293 "%b",
294 "%b",
294 "%b %Y",
295 "%b %Y",
295 )
296 )
296
297
297 def cachefunc(func):
298 def cachefunc(func):
298 '''cache the result of function calls'''
299 '''cache the result of function calls'''
299 # XXX doesn't handle keywords args
300 # XXX doesn't handle keywords args
300 if func.func_code.co_argcount == 0:
301 if func.func_code.co_argcount == 0:
301 cache = []
302 cache = []
302 def f():
303 def f():
303 if len(cache) == 0:
304 if len(cache) == 0:
304 cache.append(func())
305 cache.append(func())
305 return cache[0]
306 return cache[0]
306 return f
307 return f
307 cache = {}
308 cache = {}
308 if func.func_code.co_argcount == 1:
309 if func.func_code.co_argcount == 1:
309 # we gain a small amount of time because
310 # we gain a small amount of time because
310 # we don't need to pack/unpack the list
311 # we don't need to pack/unpack the list
311 def f(arg):
312 def f(arg):
312 if arg not in cache:
313 if arg not in cache:
313 cache[arg] = func(arg)
314 cache[arg] = func(arg)
314 return cache[arg]
315 return cache[arg]
315 else:
316 else:
316 def f(*args):
317 def f(*args):
317 if args not in cache:
318 if args not in cache:
318 cache[args] = func(*args)
319 cache[args] = func(*args)
319 return cache[args]
320 return cache[args]
320
321
321 return f
322 return f
322
323
323 try:
324 try:
324 collections.deque.remove
325 collections.deque.remove
325 deque = collections.deque
326 deque = collections.deque
326 except AttributeError:
327 except AttributeError:
327 # python 2.4 lacks deque.remove
328 # python 2.4 lacks deque.remove
328 class deque(collections.deque):
329 class deque(collections.deque):
329 def remove(self, val):
330 def remove(self, val):
330 for i, v in enumerate(self):
331 for i, v in enumerate(self):
331 if v == val:
332 if v == val:
332 del self[i]
333 del self[i]
333 break
334 break
334
335
335 class sortdict(dict):
336 class sortdict(dict):
336 '''a simple sorted dictionary'''
337 '''a simple sorted dictionary'''
337 def __init__(self, data=None):
338 def __init__(self, data=None):
338 self._list = []
339 self._list = []
339 if data:
340 if data:
340 self.update(data)
341 self.update(data)
341 def copy(self):
342 def copy(self):
342 return sortdict(self)
343 return sortdict(self)
343 def __setitem__(self, key, val):
344 def __setitem__(self, key, val):
344 if key in self:
345 if key in self:
345 self._list.remove(key)
346 self._list.remove(key)
346 self._list.append(key)
347 self._list.append(key)
347 dict.__setitem__(self, key, val)
348 dict.__setitem__(self, key, val)
348 def __iter__(self):
349 def __iter__(self):
349 return self._list.__iter__()
350 return self._list.__iter__()
350 def update(self, src):
351 def update(self, src):
351 for k in src:
352 for k in src:
352 self[k] = src[k]
353 self[k] = src[k]
353 def clear(self):
354 def clear(self):
354 dict.clear(self)
355 dict.clear(self)
355 self._list = []
356 self._list = []
356 def items(self):
357 def items(self):
357 return [(k, self[k]) for k in self._list]
358 return [(k, self[k]) for k in self._list]
358 def __delitem__(self, key):
359 def __delitem__(self, key):
359 dict.__delitem__(self, key)
360 dict.__delitem__(self, key)
360 self._list.remove(key)
361 self._list.remove(key)
361 def pop(self, key, *args, **kwargs):
362 def pop(self, key, *args, **kwargs):
362 dict.pop(self, key, *args, **kwargs)
363 dict.pop(self, key, *args, **kwargs)
363 try:
364 try:
364 self._list.remove(key)
365 self._list.remove(key)
365 except ValueError:
366 except ValueError:
366 pass
367 pass
367 def keys(self):
368 def keys(self):
368 return self._list
369 return self._list
369 def iterkeys(self):
370 def iterkeys(self):
370 return self._list.__iter__()
371 return self._list.__iter__()
371
372
372 class lrucachedict(object):
373 class lrucachedict(object):
373 '''cache most recent gets from or sets to this dictionary'''
374 '''cache most recent gets from or sets to this dictionary'''
374 def __init__(self, maxsize):
375 def __init__(self, maxsize):
375 self._cache = {}
376 self._cache = {}
376 self._maxsize = maxsize
377 self._maxsize = maxsize
377 self._order = deque()
378 self._order = deque()
378
379
379 def __getitem__(self, key):
380 def __getitem__(self, key):
380 value = self._cache[key]
381 value = self._cache[key]
381 self._order.remove(key)
382 self._order.remove(key)
382 self._order.append(key)
383 self._order.append(key)
383 return value
384 return value
384
385
385 def __setitem__(self, key, value):
386 def __setitem__(self, key, value):
386 if key not in self._cache:
387 if key not in self._cache:
387 if len(self._cache) >= self._maxsize:
388 if len(self._cache) >= self._maxsize:
388 del self._cache[self._order.popleft()]
389 del self._cache[self._order.popleft()]
389 else:
390 else:
390 self._order.remove(key)
391 self._order.remove(key)
391 self._cache[key] = value
392 self._cache[key] = value
392 self._order.append(key)
393 self._order.append(key)
393
394
394 def __contains__(self, key):
395 def __contains__(self, key):
395 return key in self._cache
396 return key in self._cache
396
397
397 def clear(self):
398 def clear(self):
398 self._cache.clear()
399 self._cache.clear()
399 self._order = deque()
400 self._order = deque()
400
401
401 def lrucachefunc(func):
402 def lrucachefunc(func):
402 '''cache most recent results of function calls'''
403 '''cache most recent results of function calls'''
403 cache = {}
404 cache = {}
404 order = deque()
405 order = deque()
405 if func.func_code.co_argcount == 1:
406 if func.func_code.co_argcount == 1:
406 def f(arg):
407 def f(arg):
407 if arg not in cache:
408 if arg not in cache:
408 if len(cache) > 20:
409 if len(cache) > 20:
409 del cache[order.popleft()]
410 del cache[order.popleft()]
410 cache[arg] = func(arg)
411 cache[arg] = func(arg)
411 else:
412 else:
412 order.remove(arg)
413 order.remove(arg)
413 order.append(arg)
414 order.append(arg)
414 return cache[arg]
415 return cache[arg]
415 else:
416 else:
416 def f(*args):
417 def f(*args):
417 if args not in cache:
418 if args not in cache:
418 if len(cache) > 20:
419 if len(cache) > 20:
419 del cache[order.popleft()]
420 del cache[order.popleft()]
420 cache[args] = func(*args)
421 cache[args] = func(*args)
421 else:
422 else:
422 order.remove(args)
423 order.remove(args)
423 order.append(args)
424 order.append(args)
424 return cache[args]
425 return cache[args]
425
426
426 return f
427 return f
427
428
428 class propertycache(object):
429 class propertycache(object):
429 def __init__(self, func):
430 def __init__(self, func):
430 self.func = func
431 self.func = func
431 self.name = func.__name__
432 self.name = func.__name__
432 def __get__(self, obj, type=None):
433 def __get__(self, obj, type=None):
433 result = self.func(obj)
434 result = self.func(obj)
434 self.cachevalue(obj, result)
435 self.cachevalue(obj, result)
435 return result
436 return result
436
437
437 def cachevalue(self, obj, value):
438 def cachevalue(self, obj, value):
438 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
439 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
439 obj.__dict__[self.name] = value
440 obj.__dict__[self.name] = value
440
441
441 def pipefilter(s, cmd):
442 def pipefilter(s, cmd):
442 '''filter string S through command CMD, returning its output'''
443 '''filter string S through command CMD, returning its output'''
443 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
444 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
444 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
445 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
445 pout, perr = p.communicate(s)
446 pout, perr = p.communicate(s)
446 return pout
447 return pout
447
448
448 def tempfilter(s, cmd):
449 def tempfilter(s, cmd):
449 '''filter string S through a pair of temporary files with CMD.
450 '''filter string S through a pair of temporary files with CMD.
450 CMD is used as a template to create the real command to be run,
451 CMD is used as a template to create the real command to be run,
451 with the strings INFILE and OUTFILE replaced by the real names of
452 with the strings INFILE and OUTFILE replaced by the real names of
452 the temporary files generated.'''
453 the temporary files generated.'''
453 inname, outname = None, None
454 inname, outname = None, None
454 try:
455 try:
455 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
456 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
456 fp = os.fdopen(infd, 'wb')
457 fp = os.fdopen(infd, 'wb')
457 fp.write(s)
458 fp.write(s)
458 fp.close()
459 fp.close()
459 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
460 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
460 os.close(outfd)
461 os.close(outfd)
461 cmd = cmd.replace('INFILE', inname)
462 cmd = cmd.replace('INFILE', inname)
462 cmd = cmd.replace('OUTFILE', outname)
463 cmd = cmd.replace('OUTFILE', outname)
463 code = os.system(cmd)
464 code = os.system(cmd)
464 if sys.platform == 'OpenVMS' and code & 1:
465 if sys.platform == 'OpenVMS' and code & 1:
465 code = 0
466 code = 0
466 if code:
467 if code:
467 raise Abort(_("command '%s' failed: %s") %
468 raise Abort(_("command '%s' failed: %s") %
468 (cmd, explainexit(code)))
469 (cmd, explainexit(code)))
469 fp = open(outname, 'rb')
470 fp = open(outname, 'rb')
470 r = fp.read()
471 r = fp.read()
471 fp.close()
472 fp.close()
472 return r
473 return r
473 finally:
474 finally:
474 try:
475 try:
475 if inname:
476 if inname:
476 os.unlink(inname)
477 os.unlink(inname)
477 except OSError:
478 except OSError:
478 pass
479 pass
479 try:
480 try:
480 if outname:
481 if outname:
481 os.unlink(outname)
482 os.unlink(outname)
482 except OSError:
483 except OSError:
483 pass
484 pass
484
485
485 filtertable = {
486 filtertable = {
486 'tempfile:': tempfilter,
487 'tempfile:': tempfilter,
487 'pipe:': pipefilter,
488 'pipe:': pipefilter,
488 }
489 }
489
490
490 def filter(s, cmd):
491 def filter(s, cmd):
491 "filter a string through a command that transforms its input to its output"
492 "filter a string through a command that transforms its input to its output"
492 for name, fn in filtertable.iteritems():
493 for name, fn in filtertable.iteritems():
493 if cmd.startswith(name):
494 if cmd.startswith(name):
494 return fn(s, cmd[len(name):].lstrip())
495 return fn(s, cmd[len(name):].lstrip())
495 return pipefilter(s, cmd)
496 return pipefilter(s, cmd)
496
497
497 def binary(s):
498 def binary(s):
498 """return true if a string is binary data"""
499 """return true if a string is binary data"""
499 return bool(s and '\0' in s)
500 return bool(s and '\0' in s)
500
501
501 def increasingchunks(source, min=1024, max=65536):
502 def increasingchunks(source, min=1024, max=65536):
502 '''return no less than min bytes per chunk while data remains,
503 '''return no less than min bytes per chunk while data remains,
503 doubling min after each chunk until it reaches max'''
504 doubling min after each chunk until it reaches max'''
504 def log2(x):
505 def log2(x):
505 if not x:
506 if not x:
506 return 0
507 return 0
507 i = 0
508 i = 0
508 while x:
509 while x:
509 x >>= 1
510 x >>= 1
510 i += 1
511 i += 1
511 return i - 1
512 return i - 1
512
513
513 buf = []
514 buf = []
514 blen = 0
515 blen = 0
515 for chunk in source:
516 for chunk in source:
516 buf.append(chunk)
517 buf.append(chunk)
517 blen += len(chunk)
518 blen += len(chunk)
518 if blen >= min:
519 if blen >= min:
519 if min < max:
520 if min < max:
520 min = min << 1
521 min = min << 1
521 nmin = 1 << log2(blen)
522 nmin = 1 << log2(blen)
522 if nmin > min:
523 if nmin > min:
523 min = nmin
524 min = nmin
524 if min > max:
525 if min > max:
525 min = max
526 min = max
526 yield ''.join(buf)
527 yield ''.join(buf)
527 blen = 0
528 blen = 0
528 buf = []
529 buf = []
529 if buf:
530 if buf:
530 yield ''.join(buf)
531 yield ''.join(buf)
531
532
532 Abort = error.Abort
533 Abort = error.Abort
533
534
534 def always(fn):
535 def always(fn):
535 return True
536 return True
536
537
537 def never(fn):
538 def never(fn):
538 return False
539 return False
539
540
540 def pathto(root, n1, n2):
541 def pathto(root, n1, n2):
541 '''return the relative path from one place to another.
542 '''return the relative path from one place to another.
542 root should use os.sep to separate directories
543 root should use os.sep to separate directories
543 n1 should use os.sep to separate directories
544 n1 should use os.sep to separate directories
544 n2 should use "/" to separate directories
545 n2 should use "/" to separate directories
545 returns an os.sep-separated path.
546 returns an os.sep-separated path.
546
547
547 If n1 is a relative path, it's assumed it's
548 If n1 is a relative path, it's assumed it's
548 relative to root.
549 relative to root.
549 n2 should always be relative to root.
550 n2 should always be relative to root.
550 '''
551 '''
551 if not n1:
552 if not n1:
552 return localpath(n2)
553 return localpath(n2)
553 if os.path.isabs(n1):
554 if os.path.isabs(n1):
554 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
555 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
555 return os.path.join(root, localpath(n2))
556 return os.path.join(root, localpath(n2))
556 n2 = '/'.join((pconvert(root), n2))
557 n2 = '/'.join((pconvert(root), n2))
557 a, b = splitpath(n1), n2.split('/')
558 a, b = splitpath(n1), n2.split('/')
558 a.reverse()
559 a.reverse()
559 b.reverse()
560 b.reverse()
560 while a and b and a[-1] == b[-1]:
561 while a and b and a[-1] == b[-1]:
561 a.pop()
562 a.pop()
562 b.pop()
563 b.pop()
563 b.reverse()
564 b.reverse()
564 return os.sep.join((['..'] * len(a)) + b) or '.'
565 return os.sep.join((['..'] * len(a)) + b) or '.'
565
566
566 def mainfrozen():
567 def mainfrozen():
567 """return True if we are a frozen executable.
568 """return True if we are a frozen executable.
568
569
569 The code supports py2exe (most common, Windows only) and tools/freeze
570 The code supports py2exe (most common, Windows only) and tools/freeze
570 (portable, not much used).
571 (portable, not much used).
571 """
572 """
572 return (safehasattr(sys, "frozen") or # new py2exe
573 return (safehasattr(sys, "frozen") or # new py2exe
573 safehasattr(sys, "importers") or # old py2exe
574 safehasattr(sys, "importers") or # old py2exe
574 imp.is_frozen("__main__")) # tools/freeze
575 imp.is_frozen("__main__")) # tools/freeze
575
576
576 # the location of data files matching the source code
577 # the location of data files matching the source code
577 if mainfrozen():
578 if mainfrozen():
578 # executable version (py2exe) doesn't support __file__
579 # executable version (py2exe) doesn't support __file__
579 datapath = os.path.dirname(sys.executable)
580 datapath = os.path.dirname(sys.executable)
580 else:
581 else:
581 datapath = os.path.dirname(__file__)
582 datapath = os.path.dirname(__file__)
582
583
583 i18n.setdatapath(datapath)
584 i18n.setdatapath(datapath)
584
585
585 _hgexecutable = None
586 _hgexecutable = None
586
587
587 def hgexecutable():
588 def hgexecutable():
588 """return location of the 'hg' executable.
589 """return location of the 'hg' executable.
589
590
590 Defaults to $HG or 'hg' in the search path.
591 Defaults to $HG or 'hg' in the search path.
591 """
592 """
592 if _hgexecutable is None:
593 if _hgexecutable is None:
593 hg = os.environ.get('HG')
594 hg = os.environ.get('HG')
594 mainmod = sys.modules['__main__']
595 mainmod = sys.modules['__main__']
595 if hg:
596 if hg:
596 _sethgexecutable(hg)
597 _sethgexecutable(hg)
597 elif mainfrozen():
598 elif mainfrozen():
598 _sethgexecutable(sys.executable)
599 _sethgexecutable(sys.executable)
599 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
600 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
600 _sethgexecutable(mainmod.__file__)
601 _sethgexecutable(mainmod.__file__)
601 else:
602 else:
602 exe = findexe('hg') or os.path.basename(sys.argv[0])
603 exe = findexe('hg') or os.path.basename(sys.argv[0])
603 _sethgexecutable(exe)
604 _sethgexecutable(exe)
604 return _hgexecutable
605 return _hgexecutable
605
606
606 def _sethgexecutable(path):
607 def _sethgexecutable(path):
607 """set location of the 'hg' executable"""
608 """set location of the 'hg' executable"""
608 global _hgexecutable
609 global _hgexecutable
609 _hgexecutable = path
610 _hgexecutable = path
610
611
611 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
612 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
612 '''enhanced shell command execution.
613 '''enhanced shell command execution.
613 run with environment maybe modified, maybe in different dir.
614 run with environment maybe modified, maybe in different dir.
614
615
615 if command fails and onerr is None, return status. if ui object,
616 if command fails and onerr is None, return status. if ui object,
616 print error message and return status, else raise onerr object as
617 print error message and return status, else raise onerr object as
617 exception.
618 exception.
618
619
619 if out is specified, it is assumed to be a file-like object that has a
620 if out is specified, it is assumed to be a file-like object that has a
620 write() method. stdout and stderr will be redirected to out.'''
621 write() method. stdout and stderr will be redirected to out.'''
621 try:
622 try:
622 sys.stdout.flush()
623 sys.stdout.flush()
623 except Exception:
624 except Exception:
624 pass
625 pass
625 def py2shell(val):
626 def py2shell(val):
626 'convert python object into string that is useful to shell'
627 'convert python object into string that is useful to shell'
627 if val is None or val is False:
628 if val is None or val is False:
628 return '0'
629 return '0'
629 if val is True:
630 if val is True:
630 return '1'
631 return '1'
631 return str(val)
632 return str(val)
632 origcmd = cmd
633 origcmd = cmd
633 cmd = quotecommand(cmd)
634 cmd = quotecommand(cmd)
634 if sys.platform == 'plan9' and (sys.version_info[0] == 2
635 if sys.platform == 'plan9' and (sys.version_info[0] == 2
635 and sys.version_info[1] < 7):
636 and sys.version_info[1] < 7):
636 # subprocess kludge to work around issues in half-baked Python
637 # subprocess kludge to work around issues in half-baked Python
637 # ports, notably bichued/python:
638 # ports, notably bichued/python:
638 if not cwd is None:
639 if not cwd is None:
639 os.chdir(cwd)
640 os.chdir(cwd)
640 rc = os.system(cmd)
641 rc = os.system(cmd)
641 else:
642 else:
642 env = dict(os.environ)
643 env = dict(os.environ)
643 env.update((k, py2shell(v)) for k, v in environ.iteritems())
644 env.update((k, py2shell(v)) for k, v in environ.iteritems())
644 env['HG'] = hgexecutable()
645 env['HG'] = hgexecutable()
645 if out is None or out == sys.__stdout__:
646 if out is None or out == sys.__stdout__:
646 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
647 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
647 env=env, cwd=cwd)
648 env=env, cwd=cwd)
648 else:
649 else:
649 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
650 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
650 env=env, cwd=cwd, stdout=subprocess.PIPE,
651 env=env, cwd=cwd, stdout=subprocess.PIPE,
651 stderr=subprocess.STDOUT)
652 stderr=subprocess.STDOUT)
652 while True:
653 while True:
653 line = proc.stdout.readline()
654 line = proc.stdout.readline()
654 if not line:
655 if not line:
655 break
656 break
656 out.write(line)
657 out.write(line)
657 proc.wait()
658 proc.wait()
658 rc = proc.returncode
659 rc = proc.returncode
659 if sys.platform == 'OpenVMS' and rc & 1:
660 if sys.platform == 'OpenVMS' and rc & 1:
660 rc = 0
661 rc = 0
661 if rc and onerr:
662 if rc and onerr:
662 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
663 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
663 explainexit(rc)[0])
664 explainexit(rc)[0])
664 if errprefix:
665 if errprefix:
665 errmsg = '%s: %s' % (errprefix, errmsg)
666 errmsg = '%s: %s' % (errprefix, errmsg)
666 try:
667 try:
667 onerr.warn(errmsg + '\n')
668 onerr.warn(errmsg + '\n')
668 except AttributeError:
669 except AttributeError:
669 raise onerr(errmsg)
670 raise onerr(errmsg)
670 return rc
671 return rc
671
672
672 def checksignature(func):
673 def checksignature(func):
673 '''wrap a function with code to check for calling errors'''
674 '''wrap a function with code to check for calling errors'''
674 def check(*args, **kwargs):
675 def check(*args, **kwargs):
675 try:
676 try:
676 return func(*args, **kwargs)
677 return func(*args, **kwargs)
677 except TypeError:
678 except TypeError:
678 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
679 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
679 raise error.SignatureError
680 raise error.SignatureError
680 raise
681 raise
681
682
682 return check
683 return check
683
684
684 def copyfile(src, dest):
685 def copyfile(src, dest):
685 "copy a file, preserving mode and atime/mtime"
686 "copy a file, preserving mode and atime/mtime"
686 if os.path.lexists(dest):
687 if os.path.lexists(dest):
687 unlink(dest)
688 unlink(dest)
688 if os.path.islink(src):
689 if os.path.islink(src):
689 os.symlink(os.readlink(src), dest)
690 os.symlink(os.readlink(src), dest)
690 else:
691 else:
691 try:
692 try:
692 shutil.copyfile(src, dest)
693 shutil.copyfile(src, dest)
693 shutil.copymode(src, dest)
694 shutil.copymode(src, dest)
694 except shutil.Error, inst:
695 except shutil.Error, inst:
695 raise Abort(str(inst))
696 raise Abort(str(inst))
696
697
697 def copyfiles(src, dst, hardlink=None):
698 def copyfiles(src, dst, hardlink=None):
698 """Copy a directory tree using hardlinks if possible"""
699 """Copy a directory tree using hardlinks if possible"""
699
700
700 if hardlink is None:
701 if hardlink is None:
701 hardlink = (os.stat(src).st_dev ==
702 hardlink = (os.stat(src).st_dev ==
702 os.stat(os.path.dirname(dst)).st_dev)
703 os.stat(os.path.dirname(dst)).st_dev)
703
704
704 num = 0
705 num = 0
705 if os.path.isdir(src):
706 if os.path.isdir(src):
706 os.mkdir(dst)
707 os.mkdir(dst)
707 for name, kind in osutil.listdir(src):
708 for name, kind in osutil.listdir(src):
708 srcname = os.path.join(src, name)
709 srcname = os.path.join(src, name)
709 dstname = os.path.join(dst, name)
710 dstname = os.path.join(dst, name)
710 hardlink, n = copyfiles(srcname, dstname, hardlink)
711 hardlink, n = copyfiles(srcname, dstname, hardlink)
711 num += n
712 num += n
712 else:
713 else:
713 if hardlink:
714 if hardlink:
714 try:
715 try:
715 oslink(src, dst)
716 oslink(src, dst)
716 except (IOError, OSError):
717 except (IOError, OSError):
717 hardlink = False
718 hardlink = False
718 shutil.copy(src, dst)
719 shutil.copy(src, dst)
719 else:
720 else:
720 shutil.copy(src, dst)
721 shutil.copy(src, dst)
721 num += 1
722 num += 1
722
723
723 return hardlink, num
724 return hardlink, num
724
725
725 _winreservednames = '''con prn aux nul
726 _winreservednames = '''con prn aux nul
726 com1 com2 com3 com4 com5 com6 com7 com8 com9
727 com1 com2 com3 com4 com5 com6 com7 com8 com9
727 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
728 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
728 _winreservedchars = ':*?"<>|'
729 _winreservedchars = ':*?"<>|'
729 def checkwinfilename(path):
730 def checkwinfilename(path):
730 r'''Check that the base-relative path is a valid filename on Windows.
731 r'''Check that the base-relative path is a valid filename on Windows.
731 Returns None if the path is ok, or a UI string describing the problem.
732 Returns None if the path is ok, or a UI string describing the problem.
732
733
733 >>> checkwinfilename("just/a/normal/path")
734 >>> checkwinfilename("just/a/normal/path")
734 >>> checkwinfilename("foo/bar/con.xml")
735 >>> checkwinfilename("foo/bar/con.xml")
735 "filename contains 'con', which is reserved on Windows"
736 "filename contains 'con', which is reserved on Windows"
736 >>> checkwinfilename("foo/con.xml/bar")
737 >>> checkwinfilename("foo/con.xml/bar")
737 "filename contains 'con', which is reserved on Windows"
738 "filename contains 'con', which is reserved on Windows"
738 >>> checkwinfilename("foo/bar/xml.con")
739 >>> checkwinfilename("foo/bar/xml.con")
739 >>> checkwinfilename("foo/bar/AUX/bla.txt")
740 >>> checkwinfilename("foo/bar/AUX/bla.txt")
740 "filename contains 'AUX', which is reserved on Windows"
741 "filename contains 'AUX', which is reserved on Windows"
741 >>> checkwinfilename("foo/bar/bla:.txt")
742 >>> checkwinfilename("foo/bar/bla:.txt")
742 "filename contains ':', which is reserved on Windows"
743 "filename contains ':', which is reserved on Windows"
743 >>> checkwinfilename("foo/bar/b\07la.txt")
744 >>> checkwinfilename("foo/bar/b\07la.txt")
744 "filename contains '\\x07', which is invalid on Windows"
745 "filename contains '\\x07', which is invalid on Windows"
745 >>> checkwinfilename("foo/bar/bla ")
746 >>> checkwinfilename("foo/bar/bla ")
746 "filename ends with ' ', which is not allowed on Windows"
747 "filename ends with ' ', which is not allowed on Windows"
747 >>> checkwinfilename("../bar")
748 >>> checkwinfilename("../bar")
748 >>> checkwinfilename("foo\\")
749 >>> checkwinfilename("foo\\")
749 "filename ends with '\\', which is invalid on Windows"
750 "filename ends with '\\', which is invalid on Windows"
750 >>> checkwinfilename("foo\\/bar")
751 >>> checkwinfilename("foo\\/bar")
751 "directory name ends with '\\', which is invalid on Windows"
752 "directory name ends with '\\', which is invalid on Windows"
752 '''
753 '''
753 if path.endswith('\\'):
754 if path.endswith('\\'):
754 return _("filename ends with '\\', which is invalid on Windows")
755 return _("filename ends with '\\', which is invalid on Windows")
755 if '\\/' in path:
756 if '\\/' in path:
756 return _("directory name ends with '\\', which is invalid on Windows")
757 return _("directory name ends with '\\', which is invalid on Windows")
757 for n in path.replace('\\', '/').split('/'):
758 for n in path.replace('\\', '/').split('/'):
758 if not n:
759 if not n:
759 continue
760 continue
760 for c in n:
761 for c in n:
761 if c in _winreservedchars:
762 if c in _winreservedchars:
762 return _("filename contains '%s', which is reserved "
763 return _("filename contains '%s', which is reserved "
763 "on Windows") % c
764 "on Windows") % c
764 if ord(c) <= 31:
765 if ord(c) <= 31:
765 return _("filename contains %r, which is invalid "
766 return _("filename contains %r, which is invalid "
766 "on Windows") % c
767 "on Windows") % c
767 base = n.split('.')[0]
768 base = n.split('.')[0]
768 if base and base.lower() in _winreservednames:
769 if base and base.lower() in _winreservednames:
769 return _("filename contains '%s', which is reserved "
770 return _("filename contains '%s', which is reserved "
770 "on Windows") % base
771 "on Windows") % base
771 t = n[-1]
772 t = n[-1]
772 if t in '. ' and n not in '..':
773 if t in '. ' and n not in '..':
773 return _("filename ends with '%s', which is not allowed "
774 return _("filename ends with '%s', which is not allowed "
774 "on Windows") % t
775 "on Windows") % t
775
776
776 if os.name == 'nt':
777 if os.name == 'nt':
777 checkosfilename = checkwinfilename
778 checkosfilename = checkwinfilename
778 else:
779 else:
779 checkosfilename = platform.checkosfilename
780 checkosfilename = platform.checkosfilename
780
781
781 def makelock(info, pathname):
782 def makelock(info, pathname):
782 try:
783 try:
783 return os.symlink(info, pathname)
784 return os.symlink(info, pathname)
784 except OSError, why:
785 except OSError, why:
785 if why.errno == errno.EEXIST:
786 if why.errno == errno.EEXIST:
786 raise
787 raise
787 except AttributeError: # no symlink in os
788 except AttributeError: # no symlink in os
788 pass
789 pass
789
790
790 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
791 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
791 os.write(ld, info)
792 os.write(ld, info)
792 os.close(ld)
793 os.close(ld)
793
794
794 def readlock(pathname):
795 def readlock(pathname):
795 try:
796 try:
796 return os.readlink(pathname)
797 return os.readlink(pathname)
797 except OSError, why:
798 except OSError, why:
798 if why.errno not in (errno.EINVAL, errno.ENOSYS):
799 if why.errno not in (errno.EINVAL, errno.ENOSYS):
799 raise
800 raise
800 except AttributeError: # no symlink in os
801 except AttributeError: # no symlink in os
801 pass
802 pass
802 fp = posixfile(pathname)
803 fp = posixfile(pathname)
803 r = fp.read()
804 r = fp.read()
804 fp.close()
805 fp.close()
805 return r
806 return r
806
807
807 def fstat(fp):
808 def fstat(fp):
808 '''stat file object that may not have fileno method.'''
809 '''stat file object that may not have fileno method.'''
809 try:
810 try:
810 return os.fstat(fp.fileno())
811 return os.fstat(fp.fileno())
811 except AttributeError:
812 except AttributeError:
812 return os.stat(fp.name)
813 return os.stat(fp.name)
813
814
814 # File system features
815 # File system features
815
816
816 def checkcase(path):
817 def checkcase(path):
817 """
818 """
818 Return true if the given path is on a case-sensitive filesystem
819 Return true if the given path is on a case-sensitive filesystem
819
820
820 Requires a path (like /foo/.hg) ending with a foldable final
821 Requires a path (like /foo/.hg) ending with a foldable final
821 directory component.
822 directory component.
822 """
823 """
823 s1 = os.stat(path)
824 s1 = os.stat(path)
824 d, b = os.path.split(path)
825 d, b = os.path.split(path)
825 b2 = b.upper()
826 b2 = b.upper()
826 if b == b2:
827 if b == b2:
827 b2 = b.lower()
828 b2 = b.lower()
828 if b == b2:
829 if b == b2:
829 return True # no evidence against case sensitivity
830 return True # no evidence against case sensitivity
830 p2 = os.path.join(d, b2)
831 p2 = os.path.join(d, b2)
831 try:
832 try:
832 s2 = os.stat(p2)
833 s2 = os.stat(p2)
833 if s2 == s1:
834 if s2 == s1:
834 return False
835 return False
835 return True
836 return True
836 except OSError:
837 except OSError:
837 return True
838 return True
838
839
839 try:
840 try:
840 import re2
841 import re2
841 _re2 = None
842 _re2 = None
842 except ImportError:
843 except ImportError:
843 _re2 = False
844 _re2 = False
844
845
845 class _re(object):
846 class _re(object):
846 def _checkre2(self):
847 def _checkre2(self):
847 global _re2
848 global _re2
848 try:
849 try:
849 # check if match works, see issue3964
850 # check if match works, see issue3964
850 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
851 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
851 except ImportError:
852 except ImportError:
852 _re2 = False
853 _re2 = False
853
854
854 def compile(self, pat, flags=0):
855 def compile(self, pat, flags=0):
855 '''Compile a regular expression, using re2 if possible
856 '''Compile a regular expression, using re2 if possible
856
857
857 For best performance, use only re2-compatible regexp features. The
858 For best performance, use only re2-compatible regexp features. The
858 only flags from the re module that are re2-compatible are
859 only flags from the re module that are re2-compatible are
859 IGNORECASE and MULTILINE.'''
860 IGNORECASE and MULTILINE.'''
860 if _re2 is None:
861 if _re2 is None:
861 self._checkre2()
862 self._checkre2()
862 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
863 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
863 if flags & remod.IGNORECASE:
864 if flags & remod.IGNORECASE:
864 pat = '(?i)' + pat
865 pat = '(?i)' + pat
865 if flags & remod.MULTILINE:
866 if flags & remod.MULTILINE:
866 pat = '(?m)' + pat
867 pat = '(?m)' + pat
867 try:
868 try:
868 return re2.compile(pat)
869 return re2.compile(pat)
869 except re2.error:
870 except re2.error:
870 pass
871 pass
871 return remod.compile(pat, flags)
872 return remod.compile(pat, flags)
872
873
873 @propertycache
874 @propertycache
874 def escape(self):
875 def escape(self):
875 '''Return the version of escape corresponding to self.compile.
876 '''Return the version of escape corresponding to self.compile.
876
877
877 This is imperfect because whether re2 or re is used for a particular
878 This is imperfect because whether re2 or re is used for a particular
878 function depends on the flags, etc, but it's the best we can do.
879 function depends on the flags, etc, but it's the best we can do.
879 '''
880 '''
880 global _re2
881 global _re2
881 if _re2 is None:
882 if _re2 is None:
882 self._checkre2()
883 self._checkre2()
883 if _re2:
884 if _re2:
884 return re2.escape
885 return re2.escape
885 else:
886 else:
886 return remod.escape
887 return remod.escape
887
888
888 re = _re()
889 re = _re()
889
890
890 _fspathcache = {}
891 _fspathcache = {}
891 def fspath(name, root):
892 def fspath(name, root):
892 '''Get name in the case stored in the filesystem
893 '''Get name in the case stored in the filesystem
893
894
894 The name should be relative to root, and be normcase-ed for efficiency.
895 The name should be relative to root, and be normcase-ed for efficiency.
895
896
896 Note that this function is unnecessary, and should not be
897 Note that this function is unnecessary, and should not be
897 called, for case-sensitive filesystems (simply because it's expensive).
898 called, for case-sensitive filesystems (simply because it's expensive).
898
899
899 The root should be normcase-ed, too.
900 The root should be normcase-ed, too.
900 '''
901 '''
901 def find(p, contents):
902 def find(p, contents):
902 for n in contents:
903 for n in contents:
903 if normcase(n) == p:
904 if normcase(n) == p:
904 return n
905 return n
905 return None
906 return None
906
907
907 seps = os.sep
908 seps = os.sep
908 if os.altsep:
909 if os.altsep:
909 seps = seps + os.altsep
910 seps = seps + os.altsep
910 # Protect backslashes. This gets silly very quickly.
911 # Protect backslashes. This gets silly very quickly.
911 seps.replace('\\','\\\\')
912 seps.replace('\\','\\\\')
912 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
913 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
913 dir = os.path.normpath(root)
914 dir = os.path.normpath(root)
914 result = []
915 result = []
915 for part, sep in pattern.findall(name):
916 for part, sep in pattern.findall(name):
916 if sep:
917 if sep:
917 result.append(sep)
918 result.append(sep)
918 continue
919 continue
919
920
920 if dir not in _fspathcache:
921 if dir not in _fspathcache:
921 _fspathcache[dir] = os.listdir(dir)
922 _fspathcache[dir] = os.listdir(dir)
922 contents = _fspathcache[dir]
923 contents = _fspathcache[dir]
923
924
924 found = find(part, contents)
925 found = find(part, contents)
925 if not found:
926 if not found:
926 # retry "once per directory" per "dirstate.walk" which
927 # retry "once per directory" per "dirstate.walk" which
927 # may take place for each patches of "hg qpush", for example
928 # may take place for each patches of "hg qpush", for example
928 contents = os.listdir(dir)
929 contents = os.listdir(dir)
929 _fspathcache[dir] = contents
930 _fspathcache[dir] = contents
930 found = find(part, contents)
931 found = find(part, contents)
931
932
932 result.append(found or part)
933 result.append(found or part)
933 dir = os.path.join(dir, part)
934 dir = os.path.join(dir, part)
934
935
935 return ''.join(result)
936 return ''.join(result)
936
937
937 def checknlink(testfile):
938 def checknlink(testfile):
938 '''check whether hardlink count reporting works properly'''
939 '''check whether hardlink count reporting works properly'''
939
940
940 # testfile may be open, so we need a separate file for checking to
941 # testfile may be open, so we need a separate file for checking to
941 # work around issue2543 (or testfile may get lost on Samba shares)
942 # work around issue2543 (or testfile may get lost on Samba shares)
942 f1 = testfile + ".hgtmp1"
943 f1 = testfile + ".hgtmp1"
943 if os.path.lexists(f1):
944 if os.path.lexists(f1):
944 return False
945 return False
945 try:
946 try:
946 posixfile(f1, 'w').close()
947 posixfile(f1, 'w').close()
947 except IOError:
948 except IOError:
948 return False
949 return False
949
950
950 f2 = testfile + ".hgtmp2"
951 f2 = testfile + ".hgtmp2"
951 fd = None
952 fd = None
952 try:
953 try:
953 try:
954 try:
954 oslink(f1, f2)
955 oslink(f1, f2)
955 except OSError:
956 except OSError:
956 return False
957 return False
957
958
958 # nlinks() may behave differently for files on Windows shares if
959 # nlinks() may behave differently for files on Windows shares if
959 # the file is open.
960 # the file is open.
960 fd = posixfile(f2)
961 fd = posixfile(f2)
961 return nlinks(f2) > 1
962 return nlinks(f2) > 1
962 finally:
963 finally:
963 if fd is not None:
964 if fd is not None:
964 fd.close()
965 fd.close()
965 for f in (f1, f2):
966 for f in (f1, f2):
966 try:
967 try:
967 os.unlink(f)
968 os.unlink(f)
968 except OSError:
969 except OSError:
969 pass
970 pass
970
971
971 def endswithsep(path):
972 def endswithsep(path):
972 '''Check path ends with os.sep or os.altsep.'''
973 '''Check path ends with os.sep or os.altsep.'''
973 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
974 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
974
975
975 def splitpath(path):
976 def splitpath(path):
976 '''Split path by os.sep.
977 '''Split path by os.sep.
977 Note that this function does not use os.altsep because this is
978 Note that this function does not use os.altsep because this is
978 an alternative of simple "xxx.split(os.sep)".
979 an alternative of simple "xxx.split(os.sep)".
979 It is recommended to use os.path.normpath() before using this
980 It is recommended to use os.path.normpath() before using this
980 function if need.'''
981 function if need.'''
981 return path.split(os.sep)
982 return path.split(os.sep)
982
983
983 def gui():
984 def gui():
984 '''Are we running in a GUI?'''
985 '''Are we running in a GUI?'''
985 if sys.platform == 'darwin':
986 if sys.platform == 'darwin':
986 if 'SSH_CONNECTION' in os.environ:
987 if 'SSH_CONNECTION' in os.environ:
987 # handle SSH access to a box where the user is logged in
988 # handle SSH access to a box where the user is logged in
988 return False
989 return False
989 elif getattr(osutil, 'isgui', None):
990 elif getattr(osutil, 'isgui', None):
990 # check if a CoreGraphics session is available
991 # check if a CoreGraphics session is available
991 return osutil.isgui()
992 return osutil.isgui()
992 else:
993 else:
993 # pure build; use a safe default
994 # pure build; use a safe default
994 return True
995 return True
995 else:
996 else:
996 return os.name == "nt" or os.environ.get("DISPLAY")
997 return os.name == "nt" or os.environ.get("DISPLAY")
997
998
998 def mktempcopy(name, emptyok=False, createmode=None):
999 def mktempcopy(name, emptyok=False, createmode=None):
999 """Create a temporary file with the same contents from name
1000 """Create a temporary file with the same contents from name
1000
1001
1001 The permission bits are copied from the original file.
1002 The permission bits are copied from the original file.
1002
1003
1003 If the temporary file is going to be truncated immediately, you
1004 If the temporary file is going to be truncated immediately, you
1004 can use emptyok=True as an optimization.
1005 can use emptyok=True as an optimization.
1005
1006
1006 Returns the name of the temporary file.
1007 Returns the name of the temporary file.
1007 """
1008 """
1008 d, fn = os.path.split(name)
1009 d, fn = os.path.split(name)
1009 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1010 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1010 os.close(fd)
1011 os.close(fd)
1011 # Temporary files are created with mode 0600, which is usually not
1012 # Temporary files are created with mode 0600, which is usually not
1012 # what we want. If the original file already exists, just copy
1013 # what we want. If the original file already exists, just copy
1013 # its mode. Otherwise, manually obey umask.
1014 # its mode. Otherwise, manually obey umask.
1014 copymode(name, temp, createmode)
1015 copymode(name, temp, createmode)
1015 if emptyok:
1016 if emptyok:
1016 return temp
1017 return temp
1017 try:
1018 try:
1018 try:
1019 try:
1019 ifp = posixfile(name, "rb")
1020 ifp = posixfile(name, "rb")
1020 except IOError, inst:
1021 except IOError, inst:
1021 if inst.errno == errno.ENOENT:
1022 if inst.errno == errno.ENOENT:
1022 return temp
1023 return temp
1023 if not getattr(inst, 'filename', None):
1024 if not getattr(inst, 'filename', None):
1024 inst.filename = name
1025 inst.filename = name
1025 raise
1026 raise
1026 ofp = posixfile(temp, "wb")
1027 ofp = posixfile(temp, "wb")
1027 for chunk in filechunkiter(ifp):
1028 for chunk in filechunkiter(ifp):
1028 ofp.write(chunk)
1029 ofp.write(chunk)
1029 ifp.close()
1030 ifp.close()
1030 ofp.close()
1031 ofp.close()
1031 except: # re-raises
1032 except: # re-raises
1032 try: os.unlink(temp)
1033 try: os.unlink(temp)
1033 except OSError: pass
1034 except OSError: pass
1034 raise
1035 raise
1035 return temp
1036 return temp
1036
1037
1037 class atomictempfile(object):
1038 class atomictempfile(object):
1038 '''writable file object that atomically updates a file
1039 '''writable file object that atomically updates a file
1039
1040
1040 All writes will go to a temporary copy of the original file. Call
1041 All writes will go to a temporary copy of the original file. Call
1041 close() when you are done writing, and atomictempfile will rename
1042 close() when you are done writing, and atomictempfile will rename
1042 the temporary copy to the original name, making the changes
1043 the temporary copy to the original name, making the changes
1043 visible. If the object is destroyed without being closed, all your
1044 visible. If the object is destroyed without being closed, all your
1044 writes are discarded.
1045 writes are discarded.
1045 '''
1046 '''
1046 def __init__(self, name, mode='w+b', createmode=None):
1047 def __init__(self, name, mode='w+b', createmode=None):
1047 self.__name = name # permanent name
1048 self.__name = name # permanent name
1048 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1049 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1049 createmode=createmode)
1050 createmode=createmode)
1050 self._fp = posixfile(self._tempname, mode)
1051 self._fp = posixfile(self._tempname, mode)
1051
1052
1052 # delegated methods
1053 # delegated methods
1053 self.write = self._fp.write
1054 self.write = self._fp.write
1054 self.seek = self._fp.seek
1055 self.seek = self._fp.seek
1055 self.tell = self._fp.tell
1056 self.tell = self._fp.tell
1056 self.fileno = self._fp.fileno
1057 self.fileno = self._fp.fileno
1057
1058
1058 def close(self):
1059 def close(self):
1059 if not self._fp.closed:
1060 if not self._fp.closed:
1060 self._fp.close()
1061 self._fp.close()
1061 rename(self._tempname, localpath(self.__name))
1062 rename(self._tempname, localpath(self.__name))
1062
1063
1063 def discard(self):
1064 def discard(self):
1064 if not self._fp.closed:
1065 if not self._fp.closed:
1065 try:
1066 try:
1066 os.unlink(self._tempname)
1067 os.unlink(self._tempname)
1067 except OSError:
1068 except OSError:
1068 pass
1069 pass
1069 self._fp.close()
1070 self._fp.close()
1070
1071
1071 def __del__(self):
1072 def __del__(self):
1072 if safehasattr(self, '_fp'): # constructor actually did something
1073 if safehasattr(self, '_fp'): # constructor actually did something
1073 self.discard()
1074 self.discard()
1074
1075
1075 def makedirs(name, mode=None, notindexed=False):
1076 def makedirs(name, mode=None, notindexed=False):
1076 """recursive directory creation with parent mode inheritance"""
1077 """recursive directory creation with parent mode inheritance"""
1077 try:
1078 try:
1078 makedir(name, notindexed)
1079 makedir(name, notindexed)
1079 except OSError, err:
1080 except OSError, err:
1080 if err.errno == errno.EEXIST:
1081 if err.errno == errno.EEXIST:
1081 return
1082 return
1082 if err.errno != errno.ENOENT or not name:
1083 if err.errno != errno.ENOENT or not name:
1083 raise
1084 raise
1084 parent = os.path.dirname(os.path.abspath(name))
1085 parent = os.path.dirname(os.path.abspath(name))
1085 if parent == name:
1086 if parent == name:
1086 raise
1087 raise
1087 makedirs(parent, mode, notindexed)
1088 makedirs(parent, mode, notindexed)
1088 makedir(name, notindexed)
1089 makedir(name, notindexed)
1089 if mode is not None:
1090 if mode is not None:
1090 os.chmod(name, mode)
1091 os.chmod(name, mode)
1091
1092
1092 def ensuredirs(name, mode=None):
1093 def ensuredirs(name, mode=None):
1093 """race-safe recursive directory creation"""
1094 """race-safe recursive directory creation"""
1094 if os.path.isdir(name):
1095 if os.path.isdir(name):
1095 return
1096 return
1096 parent = os.path.dirname(os.path.abspath(name))
1097 parent = os.path.dirname(os.path.abspath(name))
1097 if parent != name:
1098 if parent != name:
1098 ensuredirs(parent, mode)
1099 ensuredirs(parent, mode)
1099 try:
1100 try:
1100 os.mkdir(name)
1101 os.mkdir(name)
1101 except OSError, err:
1102 except OSError, err:
1102 if err.errno == errno.EEXIST and os.path.isdir(name):
1103 if err.errno == errno.EEXIST and os.path.isdir(name):
1103 # someone else seems to have won a directory creation race
1104 # someone else seems to have won a directory creation race
1104 return
1105 return
1105 raise
1106 raise
1106 if mode is not None:
1107 if mode is not None:
1107 os.chmod(name, mode)
1108 os.chmod(name, mode)
1108
1109
1109 def readfile(path):
1110 def readfile(path):
1110 fp = open(path, 'rb')
1111 fp = open(path, 'rb')
1111 try:
1112 try:
1112 return fp.read()
1113 return fp.read()
1113 finally:
1114 finally:
1114 fp.close()
1115 fp.close()
1115
1116
1116 def writefile(path, text):
1117 def writefile(path, text):
1117 fp = open(path, 'wb')
1118 fp = open(path, 'wb')
1118 try:
1119 try:
1119 fp.write(text)
1120 fp.write(text)
1120 finally:
1121 finally:
1121 fp.close()
1122 fp.close()
1122
1123
1123 def appendfile(path, text):
1124 def appendfile(path, text):
1124 fp = open(path, 'ab')
1125 fp = open(path, 'ab')
1125 try:
1126 try:
1126 fp.write(text)
1127 fp.write(text)
1127 finally:
1128 finally:
1128 fp.close()
1129 fp.close()
1129
1130
1130 class chunkbuffer(object):
1131 class chunkbuffer(object):
1131 """Allow arbitrary sized chunks of data to be efficiently read from an
1132 """Allow arbitrary sized chunks of data to be efficiently read from an
1132 iterator over chunks of arbitrary size."""
1133 iterator over chunks of arbitrary size."""
1133
1134
1134 def __init__(self, in_iter):
1135 def __init__(self, in_iter):
1135 """in_iter is the iterator that's iterating over the input chunks.
1136 """in_iter is the iterator that's iterating over the input chunks.
1136 targetsize is how big a buffer to try to maintain."""
1137 targetsize is how big a buffer to try to maintain."""
1137 def splitbig(chunks):
1138 def splitbig(chunks):
1138 for chunk in chunks:
1139 for chunk in chunks:
1139 if len(chunk) > 2**20:
1140 if len(chunk) > 2**20:
1140 pos = 0
1141 pos = 0
1141 while pos < len(chunk):
1142 while pos < len(chunk):
1142 end = pos + 2 ** 18
1143 end = pos + 2 ** 18
1143 yield chunk[pos:end]
1144 yield chunk[pos:end]
1144 pos = end
1145 pos = end
1145 else:
1146 else:
1146 yield chunk
1147 yield chunk
1147 self.iter = splitbig(in_iter)
1148 self.iter = splitbig(in_iter)
1148 self._queue = deque()
1149 self._queue = deque()
1149
1150
1150 def read(self, l=None):
1151 def read(self, l=None):
1151 """Read L bytes of data from the iterator of chunks of data.
1152 """Read L bytes of data from the iterator of chunks of data.
1152 Returns less than L bytes if the iterator runs dry.
1153 Returns less than L bytes if the iterator runs dry.
1153
1154
1154 If size parameter is ommited, read everything"""
1155 If size parameter is ommited, read everything"""
1155 left = l
1156 left = l
1156 buf = []
1157 buf = []
1157 queue = self._queue
1158 queue = self._queue
1158 while left is None or left > 0:
1159 while left is None or left > 0:
1159 # refill the queue
1160 # refill the queue
1160 if not queue:
1161 if not queue:
1161 target = 2**18
1162 target = 2**18
1162 for chunk in self.iter:
1163 for chunk in self.iter:
1163 queue.append(chunk)
1164 queue.append(chunk)
1164 target -= len(chunk)
1165 target -= len(chunk)
1165 if target <= 0:
1166 if target <= 0:
1166 break
1167 break
1167 if not queue:
1168 if not queue:
1168 break
1169 break
1169
1170
1170 chunk = queue.popleft()
1171 chunk = queue.popleft()
1171 if left is not None:
1172 if left is not None:
1172 left -= len(chunk)
1173 left -= len(chunk)
1173 if left is not None and left < 0:
1174 if left is not None and left < 0:
1174 queue.appendleft(chunk[left:])
1175 queue.appendleft(chunk[left:])
1175 buf.append(chunk[:left])
1176 buf.append(chunk[:left])
1176 else:
1177 else:
1177 buf.append(chunk)
1178 buf.append(chunk)
1178
1179
1179 return ''.join(buf)
1180 return ''.join(buf)
1180
1181
1181 def filechunkiter(f, size=65536, limit=None):
1182 def filechunkiter(f, size=65536, limit=None):
1182 """Create a generator that produces the data in the file size
1183 """Create a generator that produces the data in the file size
1183 (default 65536) bytes at a time, up to optional limit (default is
1184 (default 65536) bytes at a time, up to optional limit (default is
1184 to read all data). Chunks may be less than size bytes if the
1185 to read all data). Chunks may be less than size bytes if the
1185 chunk is the last chunk in the file, or the file is a socket or
1186 chunk is the last chunk in the file, or the file is a socket or
1186 some other type of file that sometimes reads less data than is
1187 some other type of file that sometimes reads less data than is
1187 requested."""
1188 requested."""
1188 assert size >= 0
1189 assert size >= 0
1189 assert limit is None or limit >= 0
1190 assert limit is None or limit >= 0
1190 while True:
1191 while True:
1191 if limit is None:
1192 if limit is None:
1192 nbytes = size
1193 nbytes = size
1193 else:
1194 else:
1194 nbytes = min(limit, size)
1195 nbytes = min(limit, size)
1195 s = nbytes and f.read(nbytes)
1196 s = nbytes and f.read(nbytes)
1196 if not s:
1197 if not s:
1197 break
1198 break
1198 if limit:
1199 if limit:
1199 limit -= len(s)
1200 limit -= len(s)
1200 yield s
1201 yield s
1201
1202
1202 def makedate(timestamp=None):
1203 def makedate(timestamp=None):
1203 '''Return a unix timestamp (or the current time) as a (unixtime,
1204 '''Return a unix timestamp (or the current time) as a (unixtime,
1204 offset) tuple based off the local timezone.'''
1205 offset) tuple based off the local timezone.'''
1205 if timestamp is None:
1206 if timestamp is None:
1206 timestamp = time.time()
1207 timestamp = time.time()
1207 if timestamp < 0:
1208 if timestamp < 0:
1208 hint = _("check your clock")
1209 hint = _("check your clock")
1209 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1210 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1210 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1211 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1211 datetime.datetime.fromtimestamp(timestamp))
1212 datetime.datetime.fromtimestamp(timestamp))
1212 tz = delta.days * 86400 + delta.seconds
1213 tz = delta.days * 86400 + delta.seconds
1213 return timestamp, tz
1214 return timestamp, tz
1214
1215
1215 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1216 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1216 """represent a (unixtime, offset) tuple as a localized time.
1217 """represent a (unixtime, offset) tuple as a localized time.
1217 unixtime is seconds since the epoch, and offset is the time zone's
1218 unixtime is seconds since the epoch, and offset is the time zone's
1218 number of seconds away from UTC. if timezone is false, do not
1219 number of seconds away from UTC. if timezone is false, do not
1219 append time zone to string."""
1220 append time zone to string."""
1220 t, tz = date or makedate()
1221 t, tz = date or makedate()
1221 if t < 0:
1222 if t < 0:
1222 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1223 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1223 tz = 0
1224 tz = 0
1224 if "%1" in format or "%2" in format or "%z" in format:
1225 if "%1" in format or "%2" in format or "%z" in format:
1225 sign = (tz > 0) and "-" or "+"
1226 sign = (tz > 0) and "-" or "+"
1226 minutes = abs(tz) // 60
1227 minutes = abs(tz) // 60
1227 format = format.replace("%z", "%1%2")
1228 format = format.replace("%z", "%1%2")
1228 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1229 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1229 format = format.replace("%2", "%02d" % (minutes % 60))
1230 format = format.replace("%2", "%02d" % (minutes % 60))
1230 try:
1231 try:
1231 t = time.gmtime(float(t) - tz)
1232 t = time.gmtime(float(t) - tz)
1232 except ValueError:
1233 except ValueError:
1233 # time was out of range
1234 # time was out of range
1234 t = time.gmtime(sys.maxint)
1235 t = time.gmtime(sys.maxint)
1235 s = time.strftime(format, t)
1236 s = time.strftime(format, t)
1236 return s
1237 return s
1237
1238
1238 def shortdate(date=None):
1239 def shortdate(date=None):
1239 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1240 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1240 return datestr(date, format='%Y-%m-%d')
1241 return datestr(date, format='%Y-%m-%d')
1241
1242
1242 def strdate(string, format, defaults=[]):
1243 def strdate(string, format, defaults=[]):
1243 """parse a localized time string and return a (unixtime, offset) tuple.
1244 """parse a localized time string and return a (unixtime, offset) tuple.
1244 if the string cannot be parsed, ValueError is raised."""
1245 if the string cannot be parsed, ValueError is raised."""
1245 def timezone(string):
1246 def timezone(string):
1246 tz = string.split()[-1]
1247 tz = string.split()[-1]
1247 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1248 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1248 sign = (tz[0] == "+") and 1 or -1
1249 sign = (tz[0] == "+") and 1 or -1
1249 hours = int(tz[1:3])
1250 hours = int(tz[1:3])
1250 minutes = int(tz[3:5])
1251 minutes = int(tz[3:5])
1251 return -sign * (hours * 60 + minutes) * 60
1252 return -sign * (hours * 60 + minutes) * 60
1252 if tz == "GMT" or tz == "UTC":
1253 if tz == "GMT" or tz == "UTC":
1253 return 0
1254 return 0
1254 return None
1255 return None
1255
1256
1256 # NOTE: unixtime = localunixtime + offset
1257 # NOTE: unixtime = localunixtime + offset
1257 offset, date = timezone(string), string
1258 offset, date = timezone(string), string
1258 if offset is not None:
1259 if offset is not None:
1259 date = " ".join(string.split()[:-1])
1260 date = " ".join(string.split()[:-1])
1260
1261
1261 # add missing elements from defaults
1262 # add missing elements from defaults
1262 usenow = False # default to using biased defaults
1263 usenow = False # default to using biased defaults
1263 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1264 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1264 found = [True for p in part if ("%"+p) in format]
1265 found = [True for p in part if ("%"+p) in format]
1265 if not found:
1266 if not found:
1266 date += "@" + defaults[part][usenow]
1267 date += "@" + defaults[part][usenow]
1267 format += "@%" + part[0]
1268 format += "@%" + part[0]
1268 else:
1269 else:
1269 # We've found a specific time element, less specific time
1270 # We've found a specific time element, less specific time
1270 # elements are relative to today
1271 # elements are relative to today
1271 usenow = True
1272 usenow = True
1272
1273
1273 timetuple = time.strptime(date, format)
1274 timetuple = time.strptime(date, format)
1274 localunixtime = int(calendar.timegm(timetuple))
1275 localunixtime = int(calendar.timegm(timetuple))
1275 if offset is None:
1276 if offset is None:
1276 # local timezone
1277 # local timezone
1277 unixtime = int(time.mktime(timetuple))
1278 unixtime = int(time.mktime(timetuple))
1278 offset = unixtime - localunixtime
1279 offset = unixtime - localunixtime
1279 else:
1280 else:
1280 unixtime = localunixtime + offset
1281 unixtime = localunixtime + offset
1281 return unixtime, offset
1282 return unixtime, offset
1282
1283
1283 def parsedate(date, formats=None, bias={}):
1284 def parsedate(date, formats=None, bias={}):
1284 """parse a localized date/time and return a (unixtime, offset) tuple.
1285 """parse a localized date/time and return a (unixtime, offset) tuple.
1285
1286
1286 The date may be a "unixtime offset" string or in one of the specified
1287 The date may be a "unixtime offset" string or in one of the specified
1287 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1288 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1288
1289
1289 >>> parsedate(' today ') == parsedate(\
1290 >>> parsedate(' today ') == parsedate(\
1290 datetime.date.today().strftime('%b %d'))
1291 datetime.date.today().strftime('%b %d'))
1291 True
1292 True
1292 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1293 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1293 datetime.timedelta(days=1)\
1294 datetime.timedelta(days=1)\
1294 ).strftime('%b %d'))
1295 ).strftime('%b %d'))
1295 True
1296 True
1296 >>> now, tz = makedate()
1297 >>> now, tz = makedate()
1297 >>> strnow, strtz = parsedate('now')
1298 >>> strnow, strtz = parsedate('now')
1298 >>> (strnow - now) < 1
1299 >>> (strnow - now) < 1
1299 True
1300 True
1300 >>> tz == strtz
1301 >>> tz == strtz
1301 True
1302 True
1302 """
1303 """
1303 if not date:
1304 if not date:
1304 return 0, 0
1305 return 0, 0
1305 if isinstance(date, tuple) and len(date) == 2:
1306 if isinstance(date, tuple) and len(date) == 2:
1306 return date
1307 return date
1307 if not formats:
1308 if not formats:
1308 formats = defaultdateformats
1309 formats = defaultdateformats
1309 date = date.strip()
1310 date = date.strip()
1310
1311
1311 if date == _('now'):
1312 if date == _('now'):
1312 return makedate()
1313 return makedate()
1313 if date == _('today'):
1314 if date == _('today'):
1314 date = datetime.date.today().strftime('%b %d')
1315 date = datetime.date.today().strftime('%b %d')
1315 elif date == _('yesterday'):
1316 elif date == _('yesterday'):
1316 date = (datetime.date.today() -
1317 date = (datetime.date.today() -
1317 datetime.timedelta(days=1)).strftime('%b %d')
1318 datetime.timedelta(days=1)).strftime('%b %d')
1318
1319
1319 try:
1320 try:
1320 when, offset = map(int, date.split(' '))
1321 when, offset = map(int, date.split(' '))
1321 except ValueError:
1322 except ValueError:
1322 # fill out defaults
1323 # fill out defaults
1323 now = makedate()
1324 now = makedate()
1324 defaults = {}
1325 defaults = {}
1325 for part in ("d", "mb", "yY", "HI", "M", "S"):
1326 for part in ("d", "mb", "yY", "HI", "M", "S"):
1326 # this piece is for rounding the specific end of unknowns
1327 # this piece is for rounding the specific end of unknowns
1327 b = bias.get(part)
1328 b = bias.get(part)
1328 if b is None:
1329 if b is None:
1329 if part[0] in "HMS":
1330 if part[0] in "HMS":
1330 b = "00"
1331 b = "00"
1331 else:
1332 else:
1332 b = "0"
1333 b = "0"
1333
1334
1334 # this piece is for matching the generic end to today's date
1335 # this piece is for matching the generic end to today's date
1335 n = datestr(now, "%" + part[0])
1336 n = datestr(now, "%" + part[0])
1336
1337
1337 defaults[part] = (b, n)
1338 defaults[part] = (b, n)
1338
1339
1339 for format in formats:
1340 for format in formats:
1340 try:
1341 try:
1341 when, offset = strdate(date, format, defaults)
1342 when, offset = strdate(date, format, defaults)
1342 except (ValueError, OverflowError):
1343 except (ValueError, OverflowError):
1343 pass
1344 pass
1344 else:
1345 else:
1345 break
1346 break
1346 else:
1347 else:
1347 raise Abort(_('invalid date: %r') % date)
1348 raise Abort(_('invalid date: %r') % date)
1348 # validate explicit (probably user-specified) date and
1349 # validate explicit (probably user-specified) date and
1349 # time zone offset. values must fit in signed 32 bits for
1350 # time zone offset. values must fit in signed 32 bits for
1350 # current 32-bit linux runtimes. timezones go from UTC-12
1351 # current 32-bit linux runtimes. timezones go from UTC-12
1351 # to UTC+14
1352 # to UTC+14
1352 if abs(when) > 0x7fffffff:
1353 if abs(when) > 0x7fffffff:
1353 raise Abort(_('date exceeds 32 bits: %d') % when)
1354 raise Abort(_('date exceeds 32 bits: %d') % when)
1354 if when < 0:
1355 if when < 0:
1355 raise Abort(_('negative date value: %d') % when)
1356 raise Abort(_('negative date value: %d') % when)
1356 if offset < -50400 or offset > 43200:
1357 if offset < -50400 or offset > 43200:
1357 raise Abort(_('impossible time zone offset: %d') % offset)
1358 raise Abort(_('impossible time zone offset: %d') % offset)
1358 return when, offset
1359 return when, offset
1359
1360
1360 def matchdate(date):
1361 def matchdate(date):
1361 """Return a function that matches a given date match specifier
1362 """Return a function that matches a given date match specifier
1362
1363
1363 Formats include:
1364 Formats include:
1364
1365
1365 '{date}' match a given date to the accuracy provided
1366 '{date}' match a given date to the accuracy provided
1366
1367
1367 '<{date}' on or before a given date
1368 '<{date}' on or before a given date
1368
1369
1369 '>{date}' on or after a given date
1370 '>{date}' on or after a given date
1370
1371
1371 >>> p1 = parsedate("10:29:59")
1372 >>> p1 = parsedate("10:29:59")
1372 >>> p2 = parsedate("10:30:00")
1373 >>> p2 = parsedate("10:30:00")
1373 >>> p3 = parsedate("10:30:59")
1374 >>> p3 = parsedate("10:30:59")
1374 >>> p4 = parsedate("10:31:00")
1375 >>> p4 = parsedate("10:31:00")
1375 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1376 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1376 >>> f = matchdate("10:30")
1377 >>> f = matchdate("10:30")
1377 >>> f(p1[0])
1378 >>> f(p1[0])
1378 False
1379 False
1379 >>> f(p2[0])
1380 >>> f(p2[0])
1380 True
1381 True
1381 >>> f(p3[0])
1382 >>> f(p3[0])
1382 True
1383 True
1383 >>> f(p4[0])
1384 >>> f(p4[0])
1384 False
1385 False
1385 >>> f(p5[0])
1386 >>> f(p5[0])
1386 False
1387 False
1387 """
1388 """
1388
1389
1389 def lower(date):
1390 def lower(date):
1390 d = {'mb': "1", 'd': "1"}
1391 d = {'mb': "1", 'd': "1"}
1391 return parsedate(date, extendeddateformats, d)[0]
1392 return parsedate(date, extendeddateformats, d)[0]
1392
1393
1393 def upper(date):
1394 def upper(date):
1394 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1395 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1395 for days in ("31", "30", "29"):
1396 for days in ("31", "30", "29"):
1396 try:
1397 try:
1397 d["d"] = days
1398 d["d"] = days
1398 return parsedate(date, extendeddateformats, d)[0]
1399 return parsedate(date, extendeddateformats, d)[0]
1399 except Abort:
1400 except Abort:
1400 pass
1401 pass
1401 d["d"] = "28"
1402 d["d"] = "28"
1402 return parsedate(date, extendeddateformats, d)[0]
1403 return parsedate(date, extendeddateformats, d)[0]
1403
1404
1404 date = date.strip()
1405 date = date.strip()
1405
1406
1406 if not date:
1407 if not date:
1407 raise Abort(_("dates cannot consist entirely of whitespace"))
1408 raise Abort(_("dates cannot consist entirely of whitespace"))
1408 elif date[0] == "<":
1409 elif date[0] == "<":
1409 if not date[1:]:
1410 if not date[1:]:
1410 raise Abort(_("invalid day spec, use '<DATE'"))
1411 raise Abort(_("invalid day spec, use '<DATE'"))
1411 when = upper(date[1:])
1412 when = upper(date[1:])
1412 return lambda x: x <= when
1413 return lambda x: x <= when
1413 elif date[0] == ">":
1414 elif date[0] == ">":
1414 if not date[1:]:
1415 if not date[1:]:
1415 raise Abort(_("invalid day spec, use '>DATE'"))
1416 raise Abort(_("invalid day spec, use '>DATE'"))
1416 when = lower(date[1:])
1417 when = lower(date[1:])
1417 return lambda x: x >= when
1418 return lambda x: x >= when
1418 elif date[0] == "-":
1419 elif date[0] == "-":
1419 try:
1420 try:
1420 days = int(date[1:])
1421 days = int(date[1:])
1421 except ValueError:
1422 except ValueError:
1422 raise Abort(_("invalid day spec: %s") % date[1:])
1423 raise Abort(_("invalid day spec: %s") % date[1:])
1423 if days < 0:
1424 if days < 0:
1424 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1425 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1425 % date[1:])
1426 % date[1:])
1426 when = makedate()[0] - days * 3600 * 24
1427 when = makedate()[0] - days * 3600 * 24
1427 return lambda x: x >= when
1428 return lambda x: x >= when
1428 elif " to " in date:
1429 elif " to " in date:
1429 a, b = date.split(" to ")
1430 a, b = date.split(" to ")
1430 start, stop = lower(a), upper(b)
1431 start, stop = lower(a), upper(b)
1431 return lambda x: x >= start and x <= stop
1432 return lambda x: x >= start and x <= stop
1432 else:
1433 else:
1433 start, stop = lower(date), upper(date)
1434 start, stop = lower(date), upper(date)
1434 return lambda x: x >= start and x <= stop
1435 return lambda x: x >= start and x <= stop
1435
1436
1436 def shortuser(user):
1437 def shortuser(user):
1437 """Return a short representation of a user name or email address."""
1438 """Return a short representation of a user name or email address."""
1438 f = user.find('@')
1439 f = user.find('@')
1439 if f >= 0:
1440 if f >= 0:
1440 user = user[:f]
1441 user = user[:f]
1441 f = user.find('<')
1442 f = user.find('<')
1442 if f >= 0:
1443 if f >= 0:
1443 user = user[f + 1:]
1444 user = user[f + 1:]
1444 f = user.find(' ')
1445 f = user.find(' ')
1445 if f >= 0:
1446 if f >= 0:
1446 user = user[:f]
1447 user = user[:f]
1447 f = user.find('.')
1448 f = user.find('.')
1448 if f >= 0:
1449 if f >= 0:
1449 user = user[:f]
1450 user = user[:f]
1450 return user
1451 return user
1451
1452
1452 def emailuser(user):
1453 def emailuser(user):
1453 """Return the user portion of an email address."""
1454 """Return the user portion of an email address."""
1454 f = user.find('@')
1455 f = user.find('@')
1455 if f >= 0:
1456 if f >= 0:
1456 user = user[:f]
1457 user = user[:f]
1457 f = user.find('<')
1458 f = user.find('<')
1458 if f >= 0:
1459 if f >= 0:
1459 user = user[f + 1:]
1460 user = user[f + 1:]
1460 return user
1461 return user
1461
1462
1462 def email(author):
1463 def email(author):
1463 '''get email of author.'''
1464 '''get email of author.'''
1464 r = author.find('>')
1465 r = author.find('>')
1465 if r == -1:
1466 if r == -1:
1466 r = None
1467 r = None
1467 return author[author.find('<') + 1:r]
1468 return author[author.find('<') + 1:r]
1468
1469
1469 def ellipsis(text, maxlength=400):
1470 def ellipsis(text, maxlength=400):
1470 """Trim string to at most maxlength (default: 400) columns in display."""
1471 """Trim string to at most maxlength (default: 400) columns in display."""
1471 return encoding.trim(text, maxlength, ellipsis='...')
1472 return encoding.trim(text, maxlength, ellipsis='...')
1472
1473
1473 def unitcountfn(*unittable):
1474 def unitcountfn(*unittable):
1474 '''return a function that renders a readable count of some quantity'''
1475 '''return a function that renders a readable count of some quantity'''
1475
1476
1476 def go(count):
1477 def go(count):
1477 for multiplier, divisor, format in unittable:
1478 for multiplier, divisor, format in unittable:
1478 if count >= divisor * multiplier:
1479 if count >= divisor * multiplier:
1479 return format % (count / float(divisor))
1480 return format % (count / float(divisor))
1480 return unittable[-1][2] % count
1481 return unittable[-1][2] % count
1481
1482
1482 return go
1483 return go
1483
1484
1484 bytecount = unitcountfn(
1485 bytecount = unitcountfn(
1485 (100, 1 << 30, _('%.0f GB')),
1486 (100, 1 << 30, _('%.0f GB')),
1486 (10, 1 << 30, _('%.1f GB')),
1487 (10, 1 << 30, _('%.1f GB')),
1487 (1, 1 << 30, _('%.2f GB')),
1488 (1, 1 << 30, _('%.2f GB')),
1488 (100, 1 << 20, _('%.0f MB')),
1489 (100, 1 << 20, _('%.0f MB')),
1489 (10, 1 << 20, _('%.1f MB')),
1490 (10, 1 << 20, _('%.1f MB')),
1490 (1, 1 << 20, _('%.2f MB')),
1491 (1, 1 << 20, _('%.2f MB')),
1491 (100, 1 << 10, _('%.0f KB')),
1492 (100, 1 << 10, _('%.0f KB')),
1492 (10, 1 << 10, _('%.1f KB')),
1493 (10, 1 << 10, _('%.1f KB')),
1493 (1, 1 << 10, _('%.2f KB')),
1494 (1, 1 << 10, _('%.2f KB')),
1494 (1, 1, _('%.0f bytes')),
1495 (1, 1, _('%.0f bytes')),
1495 )
1496 )
1496
1497
1497 def uirepr(s):
1498 def uirepr(s):
1498 # Avoid double backslash in Windows path repr()
1499 # Avoid double backslash in Windows path repr()
1499 return repr(s).replace('\\\\', '\\')
1500 return repr(s).replace('\\\\', '\\')
1500
1501
1501 # delay import of textwrap
1502 # delay import of textwrap
1502 def MBTextWrapper(**kwargs):
1503 def MBTextWrapper(**kwargs):
1503 class tw(textwrap.TextWrapper):
1504 class tw(textwrap.TextWrapper):
1504 """
1505 """
1505 Extend TextWrapper for width-awareness.
1506 Extend TextWrapper for width-awareness.
1506
1507
1507 Neither number of 'bytes' in any encoding nor 'characters' is
1508 Neither number of 'bytes' in any encoding nor 'characters' is
1508 appropriate to calculate terminal columns for specified string.
1509 appropriate to calculate terminal columns for specified string.
1509
1510
1510 Original TextWrapper implementation uses built-in 'len()' directly,
1511 Original TextWrapper implementation uses built-in 'len()' directly,
1511 so overriding is needed to use width information of each characters.
1512 so overriding is needed to use width information of each characters.
1512
1513
1513 In addition, characters classified into 'ambiguous' width are
1514 In addition, characters classified into 'ambiguous' width are
1514 treated as wide in East Asian area, but as narrow in other.
1515 treated as wide in East Asian area, but as narrow in other.
1515
1516
1516 This requires use decision to determine width of such characters.
1517 This requires use decision to determine width of such characters.
1517 """
1518 """
1518 def __init__(self, **kwargs):
1519 def __init__(self, **kwargs):
1519 textwrap.TextWrapper.__init__(self, **kwargs)
1520 textwrap.TextWrapper.__init__(self, **kwargs)
1520
1521
1521 # for compatibility between 2.4 and 2.6
1522 # for compatibility between 2.4 and 2.6
1522 if getattr(self, 'drop_whitespace', None) is None:
1523 if getattr(self, 'drop_whitespace', None) is None:
1523 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1524 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1524
1525
1525 def _cutdown(self, ucstr, space_left):
1526 def _cutdown(self, ucstr, space_left):
1526 l = 0
1527 l = 0
1527 colwidth = encoding.ucolwidth
1528 colwidth = encoding.ucolwidth
1528 for i in xrange(len(ucstr)):
1529 for i in xrange(len(ucstr)):
1529 l += colwidth(ucstr[i])
1530 l += colwidth(ucstr[i])
1530 if space_left < l:
1531 if space_left < l:
1531 return (ucstr[:i], ucstr[i:])
1532 return (ucstr[:i], ucstr[i:])
1532 return ucstr, ''
1533 return ucstr, ''
1533
1534
1534 # overriding of base class
1535 # overriding of base class
1535 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1536 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1536 space_left = max(width - cur_len, 1)
1537 space_left = max(width - cur_len, 1)
1537
1538
1538 if self.break_long_words:
1539 if self.break_long_words:
1539 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1540 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1540 cur_line.append(cut)
1541 cur_line.append(cut)
1541 reversed_chunks[-1] = res
1542 reversed_chunks[-1] = res
1542 elif not cur_line:
1543 elif not cur_line:
1543 cur_line.append(reversed_chunks.pop())
1544 cur_line.append(reversed_chunks.pop())
1544
1545
1545 # this overriding code is imported from TextWrapper of python 2.6
1546 # this overriding code is imported from TextWrapper of python 2.6
1546 # to calculate columns of string by 'encoding.ucolwidth()'
1547 # to calculate columns of string by 'encoding.ucolwidth()'
1547 def _wrap_chunks(self, chunks):
1548 def _wrap_chunks(self, chunks):
1548 colwidth = encoding.ucolwidth
1549 colwidth = encoding.ucolwidth
1549
1550
1550 lines = []
1551 lines = []
1551 if self.width <= 0:
1552 if self.width <= 0:
1552 raise ValueError("invalid width %r (must be > 0)" % self.width)
1553 raise ValueError("invalid width %r (must be > 0)" % self.width)
1553
1554
1554 # Arrange in reverse order so items can be efficiently popped
1555 # Arrange in reverse order so items can be efficiently popped
1555 # from a stack of chucks.
1556 # from a stack of chucks.
1556 chunks.reverse()
1557 chunks.reverse()
1557
1558
1558 while chunks:
1559 while chunks:
1559
1560
1560 # Start the list of chunks that will make up the current line.
1561 # Start the list of chunks that will make up the current line.
1561 # cur_len is just the length of all the chunks in cur_line.
1562 # cur_len is just the length of all the chunks in cur_line.
1562 cur_line = []
1563 cur_line = []
1563 cur_len = 0
1564 cur_len = 0
1564
1565
1565 # Figure out which static string will prefix this line.
1566 # Figure out which static string will prefix this line.
1566 if lines:
1567 if lines:
1567 indent = self.subsequent_indent
1568 indent = self.subsequent_indent
1568 else:
1569 else:
1569 indent = self.initial_indent
1570 indent = self.initial_indent
1570
1571
1571 # Maximum width for this line.
1572 # Maximum width for this line.
1572 width = self.width - len(indent)
1573 width = self.width - len(indent)
1573
1574
1574 # First chunk on line is whitespace -- drop it, unless this
1575 # First chunk on line is whitespace -- drop it, unless this
1575 # is the very beginning of the text (i.e. no lines started yet).
1576 # is the very beginning of the text (i.e. no lines started yet).
1576 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1577 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1577 del chunks[-1]
1578 del chunks[-1]
1578
1579
1579 while chunks:
1580 while chunks:
1580 l = colwidth(chunks[-1])
1581 l = colwidth(chunks[-1])
1581
1582
1582 # Can at least squeeze this chunk onto the current line.
1583 # Can at least squeeze this chunk onto the current line.
1583 if cur_len + l <= width:
1584 if cur_len + l <= width:
1584 cur_line.append(chunks.pop())
1585 cur_line.append(chunks.pop())
1585 cur_len += l
1586 cur_len += l
1586
1587
1587 # Nope, this line is full.
1588 # Nope, this line is full.
1588 else:
1589 else:
1589 break
1590 break
1590
1591
1591 # The current line is full, and the next chunk is too big to
1592 # The current line is full, and the next chunk is too big to
1592 # fit on *any* line (not just this one).
1593 # fit on *any* line (not just this one).
1593 if chunks and colwidth(chunks[-1]) > width:
1594 if chunks and colwidth(chunks[-1]) > width:
1594 self._handle_long_word(chunks, cur_line, cur_len, width)
1595 self._handle_long_word(chunks, cur_line, cur_len, width)
1595
1596
1596 # If the last chunk on this line is all whitespace, drop it.
1597 # If the last chunk on this line is all whitespace, drop it.
1597 if (self.drop_whitespace and
1598 if (self.drop_whitespace and
1598 cur_line and cur_line[-1].strip() == ''):
1599 cur_line and cur_line[-1].strip() == ''):
1599 del cur_line[-1]
1600 del cur_line[-1]
1600
1601
1601 # Convert current line back to a string and store it in list
1602 # Convert current line back to a string and store it in list
1602 # of all lines (return value).
1603 # of all lines (return value).
1603 if cur_line:
1604 if cur_line:
1604 lines.append(indent + ''.join(cur_line))
1605 lines.append(indent + ''.join(cur_line))
1605
1606
1606 return lines
1607 return lines
1607
1608
1608 global MBTextWrapper
1609 global MBTextWrapper
1609 MBTextWrapper = tw
1610 MBTextWrapper = tw
1610 return tw(**kwargs)
1611 return tw(**kwargs)
1611
1612
1612 def wrap(line, width, initindent='', hangindent=''):
1613 def wrap(line, width, initindent='', hangindent=''):
1613 maxindent = max(len(hangindent), len(initindent))
1614 maxindent = max(len(hangindent), len(initindent))
1614 if width <= maxindent:
1615 if width <= maxindent:
1615 # adjust for weird terminal size
1616 # adjust for weird terminal size
1616 width = max(78, maxindent + 1)
1617 width = max(78, maxindent + 1)
1617 line = line.decode(encoding.encoding, encoding.encodingmode)
1618 line = line.decode(encoding.encoding, encoding.encodingmode)
1618 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1619 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1619 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1620 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1620 wrapper = MBTextWrapper(width=width,
1621 wrapper = MBTextWrapper(width=width,
1621 initial_indent=initindent,
1622 initial_indent=initindent,
1622 subsequent_indent=hangindent)
1623 subsequent_indent=hangindent)
1623 return wrapper.fill(line).encode(encoding.encoding)
1624 return wrapper.fill(line).encode(encoding.encoding)
1624
1625
1625 def iterlines(iterator):
1626 def iterlines(iterator):
1626 for chunk in iterator:
1627 for chunk in iterator:
1627 for line in chunk.splitlines():
1628 for line in chunk.splitlines():
1628 yield line
1629 yield line
1629
1630
1630 def expandpath(path):
1631 def expandpath(path):
1631 return os.path.expanduser(os.path.expandvars(path))
1632 return os.path.expanduser(os.path.expandvars(path))
1632
1633
1633 def hgcmd():
1634 def hgcmd():
1634 """Return the command used to execute current hg
1635 """Return the command used to execute current hg
1635
1636
1636 This is different from hgexecutable() because on Windows we want
1637 This is different from hgexecutable() because on Windows we want
1637 to avoid things opening new shell windows like batch files, so we
1638 to avoid things opening new shell windows like batch files, so we
1638 get either the python call or current executable.
1639 get either the python call or current executable.
1639 """
1640 """
1640 if mainfrozen():
1641 if mainfrozen():
1641 return [sys.executable]
1642 return [sys.executable]
1642 return gethgcmd()
1643 return gethgcmd()
1643
1644
1644 def rundetached(args, condfn):
1645 def rundetached(args, condfn):
1645 """Execute the argument list in a detached process.
1646 """Execute the argument list in a detached process.
1646
1647
1647 condfn is a callable which is called repeatedly and should return
1648 condfn is a callable which is called repeatedly and should return
1648 True once the child process is known to have started successfully.
1649 True once the child process is known to have started successfully.
1649 At this point, the child process PID is returned. If the child
1650 At this point, the child process PID is returned. If the child
1650 process fails to start or finishes before condfn() evaluates to
1651 process fails to start or finishes before condfn() evaluates to
1651 True, return -1.
1652 True, return -1.
1652 """
1653 """
1653 # Windows case is easier because the child process is either
1654 # Windows case is easier because the child process is either
1654 # successfully starting and validating the condition or exiting
1655 # successfully starting and validating the condition or exiting
1655 # on failure. We just poll on its PID. On Unix, if the child
1656 # on failure. We just poll on its PID. On Unix, if the child
1656 # process fails to start, it will be left in a zombie state until
1657 # process fails to start, it will be left in a zombie state until
1657 # the parent wait on it, which we cannot do since we expect a long
1658 # the parent wait on it, which we cannot do since we expect a long
1658 # running process on success. Instead we listen for SIGCHLD telling
1659 # running process on success. Instead we listen for SIGCHLD telling
1659 # us our child process terminated.
1660 # us our child process terminated.
1660 terminated = set()
1661 terminated = set()
1661 def handler(signum, frame):
1662 def handler(signum, frame):
1662 terminated.add(os.wait())
1663 terminated.add(os.wait())
1663 prevhandler = None
1664 prevhandler = None
1664 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1665 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1665 if SIGCHLD is not None:
1666 if SIGCHLD is not None:
1666 prevhandler = signal.signal(SIGCHLD, handler)
1667 prevhandler = signal.signal(SIGCHLD, handler)
1667 try:
1668 try:
1668 pid = spawndetached(args)
1669 pid = spawndetached(args)
1669 while not condfn():
1670 while not condfn():
1670 if ((pid in terminated or not testpid(pid))
1671 if ((pid in terminated or not testpid(pid))
1671 and not condfn()):
1672 and not condfn()):
1672 return -1
1673 return -1
1673 time.sleep(0.1)
1674 time.sleep(0.1)
1674 return pid
1675 return pid
1675 finally:
1676 finally:
1676 if prevhandler is not None:
1677 if prevhandler is not None:
1677 signal.signal(signal.SIGCHLD, prevhandler)
1678 signal.signal(signal.SIGCHLD, prevhandler)
1678
1679
1679 try:
1680 try:
1680 any, all = any, all
1681 any, all = any, all
1681 except NameError:
1682 except NameError:
1682 def any(iterable):
1683 def any(iterable):
1683 for i in iterable:
1684 for i in iterable:
1684 if i:
1685 if i:
1685 return True
1686 return True
1686 return False
1687 return False
1687
1688
1688 def all(iterable):
1689 def all(iterable):
1689 for i in iterable:
1690 for i in iterable:
1690 if not i:
1691 if not i:
1691 return False
1692 return False
1692 return True
1693 return True
1693
1694
1694 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1695 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1695 """Return the result of interpolating items in the mapping into string s.
1696 """Return the result of interpolating items in the mapping into string s.
1696
1697
1697 prefix is a single character string, or a two character string with
1698 prefix is a single character string, or a two character string with
1698 a backslash as the first character if the prefix needs to be escaped in
1699 a backslash as the first character if the prefix needs to be escaped in
1699 a regular expression.
1700 a regular expression.
1700
1701
1701 fn is an optional function that will be applied to the replacement text
1702 fn is an optional function that will be applied to the replacement text
1702 just before replacement.
1703 just before replacement.
1703
1704
1704 escape_prefix is an optional flag that allows using doubled prefix for
1705 escape_prefix is an optional flag that allows using doubled prefix for
1705 its escaping.
1706 its escaping.
1706 """
1707 """
1707 fn = fn or (lambda s: s)
1708 fn = fn or (lambda s: s)
1708 patterns = '|'.join(mapping.keys())
1709 patterns = '|'.join(mapping.keys())
1709 if escape_prefix:
1710 if escape_prefix:
1710 patterns += '|' + prefix
1711 patterns += '|' + prefix
1711 if len(prefix) > 1:
1712 if len(prefix) > 1:
1712 prefix_char = prefix[1:]
1713 prefix_char = prefix[1:]
1713 else:
1714 else:
1714 prefix_char = prefix
1715 prefix_char = prefix
1715 mapping[prefix_char] = prefix_char
1716 mapping[prefix_char] = prefix_char
1716 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1717 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1717 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1718 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1718
1719
1719 def getport(port):
1720 def getport(port):
1720 """Return the port for a given network service.
1721 """Return the port for a given network service.
1721
1722
1722 If port is an integer, it's returned as is. If it's a string, it's
1723 If port is an integer, it's returned as is. If it's a string, it's
1723 looked up using socket.getservbyname(). If there's no matching
1724 looked up using socket.getservbyname(). If there's no matching
1724 service, util.Abort is raised.
1725 service, util.Abort is raised.
1725 """
1726 """
1726 try:
1727 try:
1727 return int(port)
1728 return int(port)
1728 except ValueError:
1729 except ValueError:
1729 pass
1730 pass
1730
1731
1731 try:
1732 try:
1732 return socket.getservbyname(port)
1733 return socket.getservbyname(port)
1733 except socket.error:
1734 except socket.error:
1734 raise Abort(_("no port number associated with service '%s'") % port)
1735 raise Abort(_("no port number associated with service '%s'") % port)
1735
1736
1736 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1737 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1737 '0': False, 'no': False, 'false': False, 'off': False,
1738 '0': False, 'no': False, 'false': False, 'off': False,
1738 'never': False}
1739 'never': False}
1739
1740
1740 def parsebool(s):
1741 def parsebool(s):
1741 """Parse s into a boolean.
1742 """Parse s into a boolean.
1742
1743
1743 If s is not a valid boolean, returns None.
1744 If s is not a valid boolean, returns None.
1744 """
1745 """
1745 return _booleans.get(s.lower(), None)
1746 return _booleans.get(s.lower(), None)
1746
1747
1747 _hexdig = '0123456789ABCDEFabcdef'
1748 _hexdig = '0123456789ABCDEFabcdef'
1748 _hextochr = dict((a + b, chr(int(a + b, 16)))
1749 _hextochr = dict((a + b, chr(int(a + b, 16)))
1749 for a in _hexdig for b in _hexdig)
1750 for a in _hexdig for b in _hexdig)
1750
1751
1751 def _urlunquote(s):
1752 def _urlunquote(s):
1752 """Decode HTTP/HTML % encoding.
1753 """Decode HTTP/HTML % encoding.
1753
1754
1754 >>> _urlunquote('abc%20def')
1755 >>> _urlunquote('abc%20def')
1755 'abc def'
1756 'abc def'
1756 """
1757 """
1757 res = s.split('%')
1758 res = s.split('%')
1758 # fastpath
1759 # fastpath
1759 if len(res) == 1:
1760 if len(res) == 1:
1760 return s
1761 return s
1761 s = res[0]
1762 s = res[0]
1762 for item in res[1:]:
1763 for item in res[1:]:
1763 try:
1764 try:
1764 s += _hextochr[item[:2]] + item[2:]
1765 s += _hextochr[item[:2]] + item[2:]
1765 except KeyError:
1766 except KeyError:
1766 s += '%' + item
1767 s += '%' + item
1767 except UnicodeDecodeError:
1768 except UnicodeDecodeError:
1768 s += unichr(int(item[:2], 16)) + item[2:]
1769 s += unichr(int(item[:2], 16)) + item[2:]
1769 return s
1770 return s
1770
1771
1771 class url(object):
1772 class url(object):
1772 r"""Reliable URL parser.
1773 r"""Reliable URL parser.
1773
1774
1774 This parses URLs and provides attributes for the following
1775 This parses URLs and provides attributes for the following
1775 components:
1776 components:
1776
1777
1777 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1778 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1778
1779
1779 Missing components are set to None. The only exception is
1780 Missing components are set to None. The only exception is
1780 fragment, which is set to '' if present but empty.
1781 fragment, which is set to '' if present but empty.
1781
1782
1782 If parsefragment is False, fragment is included in query. If
1783 If parsefragment is False, fragment is included in query. If
1783 parsequery is False, query is included in path. If both are
1784 parsequery is False, query is included in path. If both are
1784 False, both fragment and query are included in path.
1785 False, both fragment and query are included in path.
1785
1786
1786 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1787 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1787
1788
1788 Note that for backward compatibility reasons, bundle URLs do not
1789 Note that for backward compatibility reasons, bundle URLs do not
1789 take host names. That means 'bundle://../' has a path of '../'.
1790 take host names. That means 'bundle://../' has a path of '../'.
1790
1791
1791 Examples:
1792 Examples:
1792
1793
1793 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1794 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1794 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1795 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1795 >>> url('ssh://[::1]:2200//home/joe/repo')
1796 >>> url('ssh://[::1]:2200//home/joe/repo')
1796 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1797 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1797 >>> url('file:///home/joe/repo')
1798 >>> url('file:///home/joe/repo')
1798 <url scheme: 'file', path: '/home/joe/repo'>
1799 <url scheme: 'file', path: '/home/joe/repo'>
1799 >>> url('file:///c:/temp/foo/')
1800 >>> url('file:///c:/temp/foo/')
1800 <url scheme: 'file', path: 'c:/temp/foo/'>
1801 <url scheme: 'file', path: 'c:/temp/foo/'>
1801 >>> url('bundle:foo')
1802 >>> url('bundle:foo')
1802 <url scheme: 'bundle', path: 'foo'>
1803 <url scheme: 'bundle', path: 'foo'>
1803 >>> url('bundle://../foo')
1804 >>> url('bundle://../foo')
1804 <url scheme: 'bundle', path: '../foo'>
1805 <url scheme: 'bundle', path: '../foo'>
1805 >>> url(r'c:\foo\bar')
1806 >>> url(r'c:\foo\bar')
1806 <url path: 'c:\\foo\\bar'>
1807 <url path: 'c:\\foo\\bar'>
1807 >>> url(r'\\blah\blah\blah')
1808 >>> url(r'\\blah\blah\blah')
1808 <url path: '\\\\blah\\blah\\blah'>
1809 <url path: '\\\\blah\\blah\\blah'>
1809 >>> url(r'\\blah\blah\blah#baz')
1810 >>> url(r'\\blah\blah\blah#baz')
1810 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1811 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1811 >>> url(r'file:///C:\users\me')
1812 >>> url(r'file:///C:\users\me')
1812 <url scheme: 'file', path: 'C:\\users\\me'>
1813 <url scheme: 'file', path: 'C:\\users\\me'>
1813
1814
1814 Authentication credentials:
1815 Authentication credentials:
1815
1816
1816 >>> url('ssh://joe:xyz@x/repo')
1817 >>> url('ssh://joe:xyz@x/repo')
1817 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1818 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1818 >>> url('ssh://joe@x/repo')
1819 >>> url('ssh://joe@x/repo')
1819 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1820 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1820
1821
1821 Query strings and fragments:
1822 Query strings and fragments:
1822
1823
1823 >>> url('http://host/a?b#c')
1824 >>> url('http://host/a?b#c')
1824 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1825 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1825 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1826 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1826 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1827 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1827 """
1828 """
1828
1829
1829 _safechars = "!~*'()+"
1830 _safechars = "!~*'()+"
1830 _safepchars = "/!~*'()+:\\"
1831 _safepchars = "/!~*'()+:\\"
1831 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
1832 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
1832
1833
1833 def __init__(self, path, parsequery=True, parsefragment=True):
1834 def __init__(self, path, parsequery=True, parsefragment=True):
1834 # We slowly chomp away at path until we have only the path left
1835 # We slowly chomp away at path until we have only the path left
1835 self.scheme = self.user = self.passwd = self.host = None
1836 self.scheme = self.user = self.passwd = self.host = None
1836 self.port = self.path = self.query = self.fragment = None
1837 self.port = self.path = self.query = self.fragment = None
1837 self._localpath = True
1838 self._localpath = True
1838 self._hostport = ''
1839 self._hostport = ''
1839 self._origpath = path
1840 self._origpath = path
1840
1841
1841 if parsefragment and '#' in path:
1842 if parsefragment and '#' in path:
1842 path, self.fragment = path.split('#', 1)
1843 path, self.fragment = path.split('#', 1)
1843 if not path:
1844 if not path:
1844 path = None
1845 path = None
1845
1846
1846 # special case for Windows drive letters and UNC paths
1847 # special case for Windows drive letters and UNC paths
1847 if hasdriveletter(path) or path.startswith(r'\\'):
1848 if hasdriveletter(path) or path.startswith(r'\\'):
1848 self.path = path
1849 self.path = path
1849 return
1850 return
1850
1851
1851 # For compatibility reasons, we can't handle bundle paths as
1852 # For compatibility reasons, we can't handle bundle paths as
1852 # normal URLS
1853 # normal URLS
1853 if path.startswith('bundle:'):
1854 if path.startswith('bundle:'):
1854 self.scheme = 'bundle'
1855 self.scheme = 'bundle'
1855 path = path[7:]
1856 path = path[7:]
1856 if path.startswith('//'):
1857 if path.startswith('//'):
1857 path = path[2:]
1858 path = path[2:]
1858 self.path = path
1859 self.path = path
1859 return
1860 return
1860
1861
1861 if self._matchscheme(path):
1862 if self._matchscheme(path):
1862 parts = path.split(':', 1)
1863 parts = path.split(':', 1)
1863 if parts[0]:
1864 if parts[0]:
1864 self.scheme, path = parts
1865 self.scheme, path = parts
1865 self._localpath = False
1866 self._localpath = False
1866
1867
1867 if not path:
1868 if not path:
1868 path = None
1869 path = None
1869 if self._localpath:
1870 if self._localpath:
1870 self.path = ''
1871 self.path = ''
1871 return
1872 return
1872 else:
1873 else:
1873 if self._localpath:
1874 if self._localpath:
1874 self.path = path
1875 self.path = path
1875 return
1876 return
1876
1877
1877 if parsequery and '?' in path:
1878 if parsequery and '?' in path:
1878 path, self.query = path.split('?', 1)
1879 path, self.query = path.split('?', 1)
1879 if not path:
1880 if not path:
1880 path = None
1881 path = None
1881 if not self.query:
1882 if not self.query:
1882 self.query = None
1883 self.query = None
1883
1884
1884 # // is required to specify a host/authority
1885 # // is required to specify a host/authority
1885 if path and path.startswith('//'):
1886 if path and path.startswith('//'):
1886 parts = path[2:].split('/', 1)
1887 parts = path[2:].split('/', 1)
1887 if len(parts) > 1:
1888 if len(parts) > 1:
1888 self.host, path = parts
1889 self.host, path = parts
1889 else:
1890 else:
1890 self.host = parts[0]
1891 self.host = parts[0]
1891 path = None
1892 path = None
1892 if not self.host:
1893 if not self.host:
1893 self.host = None
1894 self.host = None
1894 # path of file:///d is /d
1895 # path of file:///d is /d
1895 # path of file:///d:/ is d:/, not /d:/
1896 # path of file:///d:/ is d:/, not /d:/
1896 if path and not hasdriveletter(path):
1897 if path and not hasdriveletter(path):
1897 path = '/' + path
1898 path = '/' + path
1898
1899
1899 if self.host and '@' in self.host:
1900 if self.host and '@' in self.host:
1900 self.user, self.host = self.host.rsplit('@', 1)
1901 self.user, self.host = self.host.rsplit('@', 1)
1901 if ':' in self.user:
1902 if ':' in self.user:
1902 self.user, self.passwd = self.user.split(':', 1)
1903 self.user, self.passwd = self.user.split(':', 1)
1903 if not self.host:
1904 if not self.host:
1904 self.host = None
1905 self.host = None
1905
1906
1906 # Don't split on colons in IPv6 addresses without ports
1907 # Don't split on colons in IPv6 addresses without ports
1907 if (self.host and ':' in self.host and
1908 if (self.host and ':' in self.host and
1908 not (self.host.startswith('[') and self.host.endswith(']'))):
1909 not (self.host.startswith('[') and self.host.endswith(']'))):
1909 self._hostport = self.host
1910 self._hostport = self.host
1910 self.host, self.port = self.host.rsplit(':', 1)
1911 self.host, self.port = self.host.rsplit(':', 1)
1911 if not self.host:
1912 if not self.host:
1912 self.host = None
1913 self.host = None
1913
1914
1914 if (self.host and self.scheme == 'file' and
1915 if (self.host and self.scheme == 'file' and
1915 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1916 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1916 raise Abort(_('file:// URLs can only refer to localhost'))
1917 raise Abort(_('file:// URLs can only refer to localhost'))
1917
1918
1918 self.path = path
1919 self.path = path
1919
1920
1920 # leave the query string escaped
1921 # leave the query string escaped
1921 for a in ('user', 'passwd', 'host', 'port',
1922 for a in ('user', 'passwd', 'host', 'port',
1922 'path', 'fragment'):
1923 'path', 'fragment'):
1923 v = getattr(self, a)
1924 v = getattr(self, a)
1924 if v is not None:
1925 if v is not None:
1925 setattr(self, a, _urlunquote(v))
1926 setattr(self, a, _urlunquote(v))
1926
1927
1927 def __repr__(self):
1928 def __repr__(self):
1928 attrs = []
1929 attrs = []
1929 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1930 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1930 'query', 'fragment'):
1931 'query', 'fragment'):
1931 v = getattr(self, a)
1932 v = getattr(self, a)
1932 if v is not None:
1933 if v is not None:
1933 attrs.append('%s: %r' % (a, v))
1934 attrs.append('%s: %r' % (a, v))
1934 return '<url %s>' % ', '.join(attrs)
1935 return '<url %s>' % ', '.join(attrs)
1935
1936
1936 def __str__(self):
1937 def __str__(self):
1937 r"""Join the URL's components back into a URL string.
1938 r"""Join the URL's components back into a URL string.
1938
1939
1939 Examples:
1940 Examples:
1940
1941
1941 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1942 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1942 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1943 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1943 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1944 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1944 'http://user:pw@host:80/?foo=bar&baz=42'
1945 'http://user:pw@host:80/?foo=bar&baz=42'
1945 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1946 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1946 'http://user:pw@host:80/?foo=bar%3dbaz'
1947 'http://user:pw@host:80/?foo=bar%3dbaz'
1947 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1948 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1948 'ssh://user:pw@[::1]:2200//home/joe#'
1949 'ssh://user:pw@[::1]:2200//home/joe#'
1949 >>> str(url('http://localhost:80//'))
1950 >>> str(url('http://localhost:80//'))
1950 'http://localhost:80//'
1951 'http://localhost:80//'
1951 >>> str(url('http://localhost:80/'))
1952 >>> str(url('http://localhost:80/'))
1952 'http://localhost:80/'
1953 'http://localhost:80/'
1953 >>> str(url('http://localhost:80'))
1954 >>> str(url('http://localhost:80'))
1954 'http://localhost:80/'
1955 'http://localhost:80/'
1955 >>> str(url('bundle:foo'))
1956 >>> str(url('bundle:foo'))
1956 'bundle:foo'
1957 'bundle:foo'
1957 >>> str(url('bundle://../foo'))
1958 >>> str(url('bundle://../foo'))
1958 'bundle:../foo'
1959 'bundle:../foo'
1959 >>> str(url('path'))
1960 >>> str(url('path'))
1960 'path'
1961 'path'
1961 >>> str(url('file:///tmp/foo/bar'))
1962 >>> str(url('file:///tmp/foo/bar'))
1962 'file:///tmp/foo/bar'
1963 'file:///tmp/foo/bar'
1963 >>> str(url('file:///c:/tmp/foo/bar'))
1964 >>> str(url('file:///c:/tmp/foo/bar'))
1964 'file:///c:/tmp/foo/bar'
1965 'file:///c:/tmp/foo/bar'
1965 >>> print url(r'bundle:foo\bar')
1966 >>> print url(r'bundle:foo\bar')
1966 bundle:foo\bar
1967 bundle:foo\bar
1967 >>> print url(r'file:///D:\data\hg')
1968 >>> print url(r'file:///D:\data\hg')
1968 file:///D:\data\hg
1969 file:///D:\data\hg
1969 """
1970 """
1970 if self._localpath:
1971 if self._localpath:
1971 s = self.path
1972 s = self.path
1972 if self.scheme == 'bundle':
1973 if self.scheme == 'bundle':
1973 s = 'bundle:' + s
1974 s = 'bundle:' + s
1974 if self.fragment:
1975 if self.fragment:
1975 s += '#' + self.fragment
1976 s += '#' + self.fragment
1976 return s
1977 return s
1977
1978
1978 s = self.scheme + ':'
1979 s = self.scheme + ':'
1979 if self.user or self.passwd or self.host:
1980 if self.user or self.passwd or self.host:
1980 s += '//'
1981 s += '//'
1981 elif self.scheme and (not self.path or self.path.startswith('/')
1982 elif self.scheme and (not self.path or self.path.startswith('/')
1982 or hasdriveletter(self.path)):
1983 or hasdriveletter(self.path)):
1983 s += '//'
1984 s += '//'
1984 if hasdriveletter(self.path):
1985 if hasdriveletter(self.path):
1985 s += '/'
1986 s += '/'
1986 if self.user:
1987 if self.user:
1987 s += urllib.quote(self.user, safe=self._safechars)
1988 s += urllib.quote(self.user, safe=self._safechars)
1988 if self.passwd:
1989 if self.passwd:
1989 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
1990 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
1990 if self.user or self.passwd:
1991 if self.user or self.passwd:
1991 s += '@'
1992 s += '@'
1992 if self.host:
1993 if self.host:
1993 if not (self.host.startswith('[') and self.host.endswith(']')):
1994 if not (self.host.startswith('[') and self.host.endswith(']')):
1994 s += urllib.quote(self.host)
1995 s += urllib.quote(self.host)
1995 else:
1996 else:
1996 s += self.host
1997 s += self.host
1997 if self.port:
1998 if self.port:
1998 s += ':' + urllib.quote(self.port)
1999 s += ':' + urllib.quote(self.port)
1999 if self.host:
2000 if self.host:
2000 s += '/'
2001 s += '/'
2001 if self.path:
2002 if self.path:
2002 # TODO: similar to the query string, we should not unescape the
2003 # TODO: similar to the query string, we should not unescape the
2003 # path when we store it, the path might contain '%2f' = '/',
2004 # path when we store it, the path might contain '%2f' = '/',
2004 # which we should *not* escape.
2005 # which we should *not* escape.
2005 s += urllib.quote(self.path, safe=self._safepchars)
2006 s += urllib.quote(self.path, safe=self._safepchars)
2006 if self.query:
2007 if self.query:
2007 # we store the query in escaped form.
2008 # we store the query in escaped form.
2008 s += '?' + self.query
2009 s += '?' + self.query
2009 if self.fragment is not None:
2010 if self.fragment is not None:
2010 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2011 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2011 return s
2012 return s
2012
2013
2013 def authinfo(self):
2014 def authinfo(self):
2014 user, passwd = self.user, self.passwd
2015 user, passwd = self.user, self.passwd
2015 try:
2016 try:
2016 self.user, self.passwd = None, None
2017 self.user, self.passwd = None, None
2017 s = str(self)
2018 s = str(self)
2018 finally:
2019 finally:
2019 self.user, self.passwd = user, passwd
2020 self.user, self.passwd = user, passwd
2020 if not self.user:
2021 if not self.user:
2021 return (s, None)
2022 return (s, None)
2022 # authinfo[1] is passed to urllib2 password manager, and its
2023 # authinfo[1] is passed to urllib2 password manager, and its
2023 # URIs must not contain credentials. The host is passed in the
2024 # URIs must not contain credentials. The host is passed in the
2024 # URIs list because Python < 2.4.3 uses only that to search for
2025 # URIs list because Python < 2.4.3 uses only that to search for
2025 # a password.
2026 # a password.
2026 return (s, (None, (s, self.host),
2027 return (s, (None, (s, self.host),
2027 self.user, self.passwd or ''))
2028 self.user, self.passwd or ''))
2028
2029
2029 def isabs(self):
2030 def isabs(self):
2030 if self.scheme and self.scheme != 'file':
2031 if self.scheme and self.scheme != 'file':
2031 return True # remote URL
2032 return True # remote URL
2032 if hasdriveletter(self.path):
2033 if hasdriveletter(self.path):
2033 return True # absolute for our purposes - can't be joined()
2034 return True # absolute for our purposes - can't be joined()
2034 if self.path.startswith(r'\\'):
2035 if self.path.startswith(r'\\'):
2035 return True # Windows UNC path
2036 return True # Windows UNC path
2036 if self.path.startswith('/'):
2037 if self.path.startswith('/'):
2037 return True # POSIX-style
2038 return True # POSIX-style
2038 return False
2039 return False
2039
2040
2040 def localpath(self):
2041 def localpath(self):
2041 if self.scheme == 'file' or self.scheme == 'bundle':
2042 if self.scheme == 'file' or self.scheme == 'bundle':
2042 path = self.path or '/'
2043 path = self.path or '/'
2043 # For Windows, we need to promote hosts containing drive
2044 # For Windows, we need to promote hosts containing drive
2044 # letters to paths with drive letters.
2045 # letters to paths with drive letters.
2045 if hasdriveletter(self._hostport):
2046 if hasdriveletter(self._hostport):
2046 path = self._hostport + '/' + self.path
2047 path = self._hostport + '/' + self.path
2047 elif (self.host is not None and self.path
2048 elif (self.host is not None and self.path
2048 and not hasdriveletter(path)):
2049 and not hasdriveletter(path)):
2049 path = '/' + path
2050 path = '/' + path
2050 return path
2051 return path
2051 return self._origpath
2052 return self._origpath
2052
2053
2053 def islocal(self):
2054 def islocal(self):
2054 '''whether localpath will return something that posixfile can open'''
2055 '''whether localpath will return something that posixfile can open'''
2055 return (not self.scheme or self.scheme == 'file'
2056 return (not self.scheme or self.scheme == 'file'
2056 or self.scheme == 'bundle')
2057 or self.scheme == 'bundle')
2057
2058
2058 def hasscheme(path):
2059 def hasscheme(path):
2059 return bool(url(path).scheme)
2060 return bool(url(path).scheme)
2060
2061
2061 def hasdriveletter(path):
2062 def hasdriveletter(path):
2062 return path and path[1:2] == ':' and path[0:1].isalpha()
2063 return path and path[1:2] == ':' and path[0:1].isalpha()
2063
2064
2064 def urllocalpath(path):
2065 def urllocalpath(path):
2065 return url(path, parsequery=False, parsefragment=False).localpath()
2066 return url(path, parsequery=False, parsefragment=False).localpath()
2066
2067
2067 def hidepassword(u):
2068 def hidepassword(u):
2068 '''hide user credential in a url string'''
2069 '''hide user credential in a url string'''
2069 u = url(u)
2070 u = url(u)
2070 if u.passwd:
2071 if u.passwd:
2071 u.passwd = '***'
2072 u.passwd = '***'
2072 return str(u)
2073 return str(u)
2073
2074
2074 def removeauth(u):
2075 def removeauth(u):
2075 '''remove all authentication information from a url string'''
2076 '''remove all authentication information from a url string'''
2076 u = url(u)
2077 u = url(u)
2077 u.user = u.passwd = None
2078 u.user = u.passwd = None
2078 return str(u)
2079 return str(u)
2079
2080
2080 def isatty(fd):
2081 def isatty(fd):
2081 try:
2082 try:
2082 return fd.isatty()
2083 return fd.isatty()
2083 except AttributeError:
2084 except AttributeError:
2084 return False
2085 return False
2085
2086
2086 timecount = unitcountfn(
2087 timecount = unitcountfn(
2087 (1, 1e3, _('%.0f s')),
2088 (1, 1e3, _('%.0f s')),
2088 (100, 1, _('%.1f s')),
2089 (100, 1, _('%.1f s')),
2089 (10, 1, _('%.2f s')),
2090 (10, 1, _('%.2f s')),
2090 (1, 1, _('%.3f s')),
2091 (1, 1, _('%.3f s')),
2091 (100, 0.001, _('%.1f ms')),
2092 (100, 0.001, _('%.1f ms')),
2092 (10, 0.001, _('%.2f ms')),
2093 (10, 0.001, _('%.2f ms')),
2093 (1, 0.001, _('%.3f ms')),
2094 (1, 0.001, _('%.3f ms')),
2094 (100, 0.000001, _('%.1f us')),
2095 (100, 0.000001, _('%.1f us')),
2095 (10, 0.000001, _('%.2f us')),
2096 (10, 0.000001, _('%.2f us')),
2096 (1, 0.000001, _('%.3f us')),
2097 (1, 0.000001, _('%.3f us')),
2097 (100, 0.000000001, _('%.1f ns')),
2098 (100, 0.000000001, _('%.1f ns')),
2098 (10, 0.000000001, _('%.2f ns')),
2099 (10, 0.000000001, _('%.2f ns')),
2099 (1, 0.000000001, _('%.3f ns')),
2100 (1, 0.000000001, _('%.3f ns')),
2100 )
2101 )
2101
2102
2102 _timenesting = [0]
2103 _timenesting = [0]
2103
2104
2104 def timed(func):
2105 def timed(func):
2105 '''Report the execution time of a function call to stderr.
2106 '''Report the execution time of a function call to stderr.
2106
2107
2107 During development, use as a decorator when you need to measure
2108 During development, use as a decorator when you need to measure
2108 the cost of a function, e.g. as follows:
2109 the cost of a function, e.g. as follows:
2109
2110
2110 @util.timed
2111 @util.timed
2111 def foo(a, b, c):
2112 def foo(a, b, c):
2112 pass
2113 pass
2113 '''
2114 '''
2114
2115
2115 def wrapper(*args, **kwargs):
2116 def wrapper(*args, **kwargs):
2116 start = time.time()
2117 start = time.time()
2117 indent = 2
2118 indent = 2
2118 _timenesting[0] += indent
2119 _timenesting[0] += indent
2119 try:
2120 try:
2120 return func(*args, **kwargs)
2121 return func(*args, **kwargs)
2121 finally:
2122 finally:
2122 elapsed = time.time() - start
2123 elapsed = time.time() - start
2123 _timenesting[0] -= indent
2124 _timenesting[0] -= indent
2124 sys.stderr.write('%s%s: %s\n' %
2125 sys.stderr.write('%s%s: %s\n' %
2125 (' ' * _timenesting[0], func.__name__,
2126 (' ' * _timenesting[0], func.__name__,
2126 timecount(elapsed)))
2127 timecount(elapsed)))
2127 return wrapper
2128 return wrapper
2128
2129
2129 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2130 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2130 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2131 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2131
2132
2132 def sizetoint(s):
2133 def sizetoint(s):
2133 '''Convert a space specifier to a byte count.
2134 '''Convert a space specifier to a byte count.
2134
2135
2135 >>> sizetoint('30')
2136 >>> sizetoint('30')
2136 30
2137 30
2137 >>> sizetoint('2.2kb')
2138 >>> sizetoint('2.2kb')
2138 2252
2139 2252
2139 >>> sizetoint('6M')
2140 >>> sizetoint('6M')
2140 6291456
2141 6291456
2141 '''
2142 '''
2142 t = s.strip().lower()
2143 t = s.strip().lower()
2143 try:
2144 try:
2144 for k, u in _sizeunits:
2145 for k, u in _sizeunits:
2145 if t.endswith(k):
2146 if t.endswith(k):
2146 return int(float(t[:-len(k)]) * u)
2147 return int(float(t[:-len(k)]) * u)
2147 return int(t)
2148 return int(t)
2148 except ValueError:
2149 except ValueError:
2149 raise error.ParseError(_("couldn't parse size: %s") % s)
2150 raise error.ParseError(_("couldn't parse size: %s") % s)
2150
2151
2151 class hooks(object):
2152 class hooks(object):
2152 '''A collection of hook functions that can be used to extend a
2153 '''A collection of hook functions that can be used to extend a
2153 function's behaviour. Hooks are called in lexicographic order,
2154 function's behaviour. Hooks are called in lexicographic order,
2154 based on the names of their sources.'''
2155 based on the names of their sources.'''
2155
2156
2156 def __init__(self):
2157 def __init__(self):
2157 self._hooks = []
2158 self._hooks = []
2158
2159
2159 def add(self, source, hook):
2160 def add(self, source, hook):
2160 self._hooks.append((source, hook))
2161 self._hooks.append((source, hook))
2161
2162
2162 def __call__(self, *args):
2163 def __call__(self, *args):
2163 self._hooks.sort(key=lambda x: x[0])
2164 self._hooks.sort(key=lambda x: x[0])
2164 results = []
2165 results = []
2165 for source, hook in self._hooks:
2166 for source, hook in self._hooks:
2166 results.append(hook(*args))
2167 results.append(hook(*args))
2167 return results
2168 return results
2168
2169
2169 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2170 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2170 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2171 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2171 Skips the 'skip' last entries. By default it will flush stdout first.
2172 Skips the 'skip' last entries. By default it will flush stdout first.
2172 It can be used everywhere and do intentionally not require an ui object.
2173 It can be used everywhere and do intentionally not require an ui object.
2173 Not be used in production code but very convenient while developing.
2174 Not be used in production code but very convenient while developing.
2174 '''
2175 '''
2175 if otherf:
2176 if otherf:
2176 otherf.flush()
2177 otherf.flush()
2177 f.write('%s at:\n' % msg)
2178 f.write('%s at:\n' % msg)
2178 entries = [('%s:%s' % (fn, ln), func)
2179 entries = [('%s:%s' % (fn, ln), func)
2179 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2180 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2180 if entries:
2181 if entries:
2181 fnmax = max(len(entry[0]) for entry in entries)
2182 fnmax = max(len(entry[0]) for entry in entries)
2182 for fnln, func in entries:
2183 for fnln, func in entries:
2183 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2184 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2184 f.flush()
2185 f.flush()
2185
2186
2186 # convenient shortcut
2187 # convenient shortcut
2187 dst = debugstacktrace
2188 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now