##// END OF EJS Templates
util: add a 'nogc' decorator to disable the garbage collection...
Pierre-Yves David -
r23495:b25f07cb default
parent child Browse files
Show More
@@ -1,2191 +1,2214 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 import i18n
16 import i18n
17 _ = i18n._
17 _ = i18n._
18 import error, osutil, encoding
18 import error, osutil, encoding
19 import errno, shutil, sys, tempfile, traceback
19 import errno, shutil, sys, tempfile, traceback
20 import re as remod
20 import re as remod
21 import os, time, datetime, calendar, textwrap, signal, collections
21 import os, time, datetime, calendar, textwrap, signal, collections
22 import imp, socket, urllib
22 import imp, socket, urllib
23 import gc
23
24
24 if os.name == 'nt':
25 if os.name == 'nt':
25 import windows as platform
26 import windows as platform
26 else:
27 else:
27 import posix as platform
28 import posix as platform
28
29
29 cachestat = platform.cachestat
30 cachestat = platform.cachestat
30 checkexec = platform.checkexec
31 checkexec = platform.checkexec
31 checklink = platform.checklink
32 checklink = platform.checklink
32 copymode = platform.copymode
33 copymode = platform.copymode
33 executablepath = platform.executablepath
34 executablepath = platform.executablepath
34 expandglobs = platform.expandglobs
35 expandglobs = platform.expandglobs
35 explainexit = platform.explainexit
36 explainexit = platform.explainexit
36 findexe = platform.findexe
37 findexe = platform.findexe
37 gethgcmd = platform.gethgcmd
38 gethgcmd = platform.gethgcmd
38 getuser = platform.getuser
39 getuser = platform.getuser
39 groupmembers = platform.groupmembers
40 groupmembers = platform.groupmembers
40 groupname = platform.groupname
41 groupname = platform.groupname
41 hidewindow = platform.hidewindow
42 hidewindow = platform.hidewindow
42 isexec = platform.isexec
43 isexec = platform.isexec
43 isowner = platform.isowner
44 isowner = platform.isowner
44 localpath = platform.localpath
45 localpath = platform.localpath
45 lookupreg = platform.lookupreg
46 lookupreg = platform.lookupreg
46 makedir = platform.makedir
47 makedir = platform.makedir
47 nlinks = platform.nlinks
48 nlinks = platform.nlinks
48 normpath = platform.normpath
49 normpath = platform.normpath
49 normcase = platform.normcase
50 normcase = platform.normcase
50 openhardlinks = platform.openhardlinks
51 openhardlinks = platform.openhardlinks
51 oslink = platform.oslink
52 oslink = platform.oslink
52 parsepatchoutput = platform.parsepatchoutput
53 parsepatchoutput = platform.parsepatchoutput
53 pconvert = platform.pconvert
54 pconvert = platform.pconvert
54 popen = platform.popen
55 popen = platform.popen
55 posixfile = platform.posixfile
56 posixfile = platform.posixfile
56 quotecommand = platform.quotecommand
57 quotecommand = platform.quotecommand
57 readpipe = platform.readpipe
58 readpipe = platform.readpipe
58 rename = platform.rename
59 rename = platform.rename
59 samedevice = platform.samedevice
60 samedevice = platform.samedevice
60 samefile = platform.samefile
61 samefile = platform.samefile
61 samestat = platform.samestat
62 samestat = platform.samestat
62 setbinary = platform.setbinary
63 setbinary = platform.setbinary
63 setflags = platform.setflags
64 setflags = platform.setflags
64 setsignalhandler = platform.setsignalhandler
65 setsignalhandler = platform.setsignalhandler
65 shellquote = platform.shellquote
66 shellquote = platform.shellquote
66 spawndetached = platform.spawndetached
67 spawndetached = platform.spawndetached
67 split = platform.split
68 split = platform.split
68 sshargs = platform.sshargs
69 sshargs = platform.sshargs
69 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
70 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
70 statisexec = platform.statisexec
71 statisexec = platform.statisexec
71 statislink = platform.statislink
72 statislink = platform.statislink
72 termwidth = platform.termwidth
73 termwidth = platform.termwidth
73 testpid = platform.testpid
74 testpid = platform.testpid
74 umask = platform.umask
75 umask = platform.umask
75 unlink = platform.unlink
76 unlink = platform.unlink
76 unlinkpath = platform.unlinkpath
77 unlinkpath = platform.unlinkpath
77 username = platform.username
78 username = platform.username
78
79
79 # Python compatibility
80 # Python compatibility
80
81
81 _notset = object()
82 _notset = object()
82
83
83 def safehasattr(thing, attr):
84 def safehasattr(thing, attr):
84 return getattr(thing, attr, _notset) is not _notset
85 return getattr(thing, attr, _notset) is not _notset
85
86
86 def sha1(s=''):
87 def sha1(s=''):
87 '''
88 '''
88 Low-overhead wrapper around Python's SHA support
89 Low-overhead wrapper around Python's SHA support
89
90
90 >>> f = _fastsha1
91 >>> f = _fastsha1
91 >>> a = sha1()
92 >>> a = sha1()
92 >>> a = f()
93 >>> a = f()
93 >>> a.hexdigest()
94 >>> a.hexdigest()
94 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
95 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
95 '''
96 '''
96
97
97 return _fastsha1(s)
98 return _fastsha1(s)
98
99
99 def _fastsha1(s=''):
100 def _fastsha1(s=''):
100 # This function will import sha1 from hashlib or sha (whichever is
101 # This function will import sha1 from hashlib or sha (whichever is
101 # available) and overwrite itself with it on the first call.
102 # available) and overwrite itself with it on the first call.
102 # Subsequent calls will go directly to the imported function.
103 # Subsequent calls will go directly to the imported function.
103 if sys.version_info >= (2, 5):
104 if sys.version_info >= (2, 5):
104 from hashlib import sha1 as _sha1
105 from hashlib import sha1 as _sha1
105 else:
106 else:
106 from sha import sha as _sha1
107 from sha import sha as _sha1
107 global _fastsha1, sha1
108 global _fastsha1, sha1
108 _fastsha1 = sha1 = _sha1
109 _fastsha1 = sha1 = _sha1
109 return _sha1(s)
110 return _sha1(s)
110
111
111 def md5(s=''):
112 def md5(s=''):
112 try:
113 try:
113 from hashlib import md5 as _md5
114 from hashlib import md5 as _md5
114 except ImportError:
115 except ImportError:
115 from md5 import md5 as _md5
116 from md5 import md5 as _md5
116 global md5
117 global md5
117 md5 = _md5
118 md5 = _md5
118 return _md5(s)
119 return _md5(s)
119
120
120 DIGESTS = {
121 DIGESTS = {
121 'md5': md5,
122 'md5': md5,
122 'sha1': sha1,
123 'sha1': sha1,
123 }
124 }
124 # List of digest types from strongest to weakest
125 # List of digest types from strongest to weakest
125 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
126 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
126
127
127 try:
128 try:
128 import hashlib
129 import hashlib
129 DIGESTS.update({
130 DIGESTS.update({
130 'sha512': hashlib.sha512,
131 'sha512': hashlib.sha512,
131 })
132 })
132 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
133 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
133 except ImportError:
134 except ImportError:
134 pass
135 pass
135
136
136 for k in DIGESTS_BY_STRENGTH:
137 for k in DIGESTS_BY_STRENGTH:
137 assert k in DIGESTS
138 assert k in DIGESTS
138
139
139 class digester(object):
140 class digester(object):
140 """helper to compute digests.
141 """helper to compute digests.
141
142
142 This helper can be used to compute one or more digests given their name.
143 This helper can be used to compute one or more digests given their name.
143
144
144 >>> d = digester(['md5', 'sha1'])
145 >>> d = digester(['md5', 'sha1'])
145 >>> d.update('foo')
146 >>> d.update('foo')
146 >>> [k for k in sorted(d)]
147 >>> [k for k in sorted(d)]
147 ['md5', 'sha1']
148 ['md5', 'sha1']
148 >>> d['md5']
149 >>> d['md5']
149 'acbd18db4cc2f85cedef654fccc4a4d8'
150 'acbd18db4cc2f85cedef654fccc4a4d8'
150 >>> d['sha1']
151 >>> d['sha1']
151 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
152 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
152 >>> digester.preferred(['md5', 'sha1'])
153 >>> digester.preferred(['md5', 'sha1'])
153 'sha1'
154 'sha1'
154 """
155 """
155
156
156 def __init__(self, digests, s=''):
157 def __init__(self, digests, s=''):
157 self._hashes = {}
158 self._hashes = {}
158 for k in digests:
159 for k in digests:
159 if k not in DIGESTS:
160 if k not in DIGESTS:
160 raise Abort(_('unknown digest type: %s') % k)
161 raise Abort(_('unknown digest type: %s') % k)
161 self._hashes[k] = DIGESTS[k]()
162 self._hashes[k] = DIGESTS[k]()
162 if s:
163 if s:
163 self.update(s)
164 self.update(s)
164
165
165 def update(self, data):
166 def update(self, data):
166 for h in self._hashes.values():
167 for h in self._hashes.values():
167 h.update(data)
168 h.update(data)
168
169
169 def __getitem__(self, key):
170 def __getitem__(self, key):
170 if key not in DIGESTS:
171 if key not in DIGESTS:
171 raise Abort(_('unknown digest type: %s') % k)
172 raise Abort(_('unknown digest type: %s') % k)
172 return self._hashes[key].hexdigest()
173 return self._hashes[key].hexdigest()
173
174
174 def __iter__(self):
175 def __iter__(self):
175 return iter(self._hashes)
176 return iter(self._hashes)
176
177
177 @staticmethod
178 @staticmethod
178 def preferred(supported):
179 def preferred(supported):
179 """returns the strongest digest type in both supported and DIGESTS."""
180 """returns the strongest digest type in both supported and DIGESTS."""
180
181
181 for k in DIGESTS_BY_STRENGTH:
182 for k in DIGESTS_BY_STRENGTH:
182 if k in supported:
183 if k in supported:
183 return k
184 return k
184 return None
185 return None
185
186
186 class digestchecker(object):
187 class digestchecker(object):
187 """file handle wrapper that additionally checks content against a given
188 """file handle wrapper that additionally checks content against a given
188 size and digests.
189 size and digests.
189
190
190 d = digestchecker(fh, size, {'md5': '...'})
191 d = digestchecker(fh, size, {'md5': '...'})
191
192
192 When multiple digests are given, all of them are validated.
193 When multiple digests are given, all of them are validated.
193 """
194 """
194
195
195 def __init__(self, fh, size, digests):
196 def __init__(self, fh, size, digests):
196 self._fh = fh
197 self._fh = fh
197 self._size = size
198 self._size = size
198 self._got = 0
199 self._got = 0
199 self._digests = dict(digests)
200 self._digests = dict(digests)
200 self._digester = digester(self._digests.keys())
201 self._digester = digester(self._digests.keys())
201
202
202 def read(self, length=-1):
203 def read(self, length=-1):
203 content = self._fh.read(length)
204 content = self._fh.read(length)
204 self._digester.update(content)
205 self._digester.update(content)
205 self._got += len(content)
206 self._got += len(content)
206 return content
207 return content
207
208
208 def validate(self):
209 def validate(self):
209 if self._size != self._got:
210 if self._size != self._got:
210 raise Abort(_('size mismatch: expected %d, got %d') %
211 raise Abort(_('size mismatch: expected %d, got %d') %
211 (self._size, self._got))
212 (self._size, self._got))
212 for k, v in self._digests.items():
213 for k, v in self._digests.items():
213 if v != self._digester[k]:
214 if v != self._digester[k]:
214 # i18n: first parameter is a digest name
215 # i18n: first parameter is a digest name
215 raise Abort(_('%s mismatch: expected %s, got %s') %
216 raise Abort(_('%s mismatch: expected %s, got %s') %
216 (k, v, self._digester[k]))
217 (k, v, self._digester[k]))
217
218
218 try:
219 try:
219 buffer = buffer
220 buffer = buffer
220 except NameError:
221 except NameError:
221 if sys.version_info[0] < 3:
222 if sys.version_info[0] < 3:
222 def buffer(sliceable, offset=0):
223 def buffer(sliceable, offset=0):
223 return sliceable[offset:]
224 return sliceable[offset:]
224 else:
225 else:
225 def buffer(sliceable, offset=0):
226 def buffer(sliceable, offset=0):
226 return memoryview(sliceable)[offset:]
227 return memoryview(sliceable)[offset:]
227
228
228 import subprocess
229 import subprocess
229 closefds = os.name == 'posix'
230 closefds = os.name == 'posix'
230
231
231 def popen2(cmd, env=None, newlines=False):
232 def popen2(cmd, env=None, newlines=False):
232 # Setting bufsize to -1 lets the system decide the buffer size.
233 # Setting bufsize to -1 lets the system decide the buffer size.
233 # The default for bufsize is 0, meaning unbuffered. This leads to
234 # The default for bufsize is 0, meaning unbuffered. This leads to
234 # poor performance on Mac OS X: http://bugs.python.org/issue4194
235 # poor performance on Mac OS X: http://bugs.python.org/issue4194
235 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
236 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
236 close_fds=closefds,
237 close_fds=closefds,
237 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
238 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
238 universal_newlines=newlines,
239 universal_newlines=newlines,
239 env=env)
240 env=env)
240 return p.stdin, p.stdout
241 return p.stdin, p.stdout
241
242
242 def popen3(cmd, env=None, newlines=False):
243 def popen3(cmd, env=None, newlines=False):
243 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
244 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
244 return stdin, stdout, stderr
245 return stdin, stdout, stderr
245
246
246 def popen4(cmd, env=None, newlines=False):
247 def popen4(cmd, env=None, newlines=False):
247 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
248 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
248 close_fds=closefds,
249 close_fds=closefds,
249 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
250 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
250 stderr=subprocess.PIPE,
251 stderr=subprocess.PIPE,
251 universal_newlines=newlines,
252 universal_newlines=newlines,
252 env=env)
253 env=env)
253 return p.stdin, p.stdout, p.stderr, p
254 return p.stdin, p.stdout, p.stderr, p
254
255
255 def version():
256 def version():
256 """Return version information if available."""
257 """Return version information if available."""
257 try:
258 try:
258 import __version__
259 import __version__
259 return __version__.version
260 return __version__.version
260 except ImportError:
261 except ImportError:
261 return 'unknown'
262 return 'unknown'
262
263
263 # used by parsedate
264 # used by parsedate
264 defaultdateformats = (
265 defaultdateformats = (
265 '%Y-%m-%d %H:%M:%S',
266 '%Y-%m-%d %H:%M:%S',
266 '%Y-%m-%d %I:%M:%S%p',
267 '%Y-%m-%d %I:%M:%S%p',
267 '%Y-%m-%d %H:%M',
268 '%Y-%m-%d %H:%M',
268 '%Y-%m-%d %I:%M%p',
269 '%Y-%m-%d %I:%M%p',
269 '%Y-%m-%d',
270 '%Y-%m-%d',
270 '%m-%d',
271 '%m-%d',
271 '%m/%d',
272 '%m/%d',
272 '%m/%d/%y',
273 '%m/%d/%y',
273 '%m/%d/%Y',
274 '%m/%d/%Y',
274 '%a %b %d %H:%M:%S %Y',
275 '%a %b %d %H:%M:%S %Y',
275 '%a %b %d %I:%M:%S%p %Y',
276 '%a %b %d %I:%M:%S%p %Y',
276 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
277 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
277 '%b %d %H:%M:%S %Y',
278 '%b %d %H:%M:%S %Y',
278 '%b %d %I:%M:%S%p %Y',
279 '%b %d %I:%M:%S%p %Y',
279 '%b %d %H:%M:%S',
280 '%b %d %H:%M:%S',
280 '%b %d %I:%M:%S%p',
281 '%b %d %I:%M:%S%p',
281 '%b %d %H:%M',
282 '%b %d %H:%M',
282 '%b %d %I:%M%p',
283 '%b %d %I:%M%p',
283 '%b %d %Y',
284 '%b %d %Y',
284 '%b %d',
285 '%b %d',
285 '%H:%M:%S',
286 '%H:%M:%S',
286 '%I:%M:%S%p',
287 '%I:%M:%S%p',
287 '%H:%M',
288 '%H:%M',
288 '%I:%M%p',
289 '%I:%M%p',
289 )
290 )
290
291
291 extendeddateformats = defaultdateformats + (
292 extendeddateformats = defaultdateformats + (
292 "%Y",
293 "%Y",
293 "%Y-%m",
294 "%Y-%m",
294 "%b",
295 "%b",
295 "%b %Y",
296 "%b %Y",
296 )
297 )
297
298
298 def cachefunc(func):
299 def cachefunc(func):
299 '''cache the result of function calls'''
300 '''cache the result of function calls'''
300 # XXX doesn't handle keywords args
301 # XXX doesn't handle keywords args
301 if func.func_code.co_argcount == 0:
302 if func.func_code.co_argcount == 0:
302 cache = []
303 cache = []
303 def f():
304 def f():
304 if len(cache) == 0:
305 if len(cache) == 0:
305 cache.append(func())
306 cache.append(func())
306 return cache[0]
307 return cache[0]
307 return f
308 return f
308 cache = {}
309 cache = {}
309 if func.func_code.co_argcount == 1:
310 if func.func_code.co_argcount == 1:
310 # we gain a small amount of time because
311 # we gain a small amount of time because
311 # we don't need to pack/unpack the list
312 # we don't need to pack/unpack the list
312 def f(arg):
313 def f(arg):
313 if arg not in cache:
314 if arg not in cache:
314 cache[arg] = func(arg)
315 cache[arg] = func(arg)
315 return cache[arg]
316 return cache[arg]
316 else:
317 else:
317 def f(*args):
318 def f(*args):
318 if args not in cache:
319 if args not in cache:
319 cache[args] = func(*args)
320 cache[args] = func(*args)
320 return cache[args]
321 return cache[args]
321
322
322 return f
323 return f
323
324
324 try:
325 try:
325 collections.deque.remove
326 collections.deque.remove
326 deque = collections.deque
327 deque = collections.deque
327 except AttributeError:
328 except AttributeError:
328 # python 2.4 lacks deque.remove
329 # python 2.4 lacks deque.remove
329 class deque(collections.deque):
330 class deque(collections.deque):
330 def remove(self, val):
331 def remove(self, val):
331 for i, v in enumerate(self):
332 for i, v in enumerate(self):
332 if v == val:
333 if v == val:
333 del self[i]
334 del self[i]
334 break
335 break
335
336
336 class sortdict(dict):
337 class sortdict(dict):
337 '''a simple sorted dictionary'''
338 '''a simple sorted dictionary'''
338 def __init__(self, data=None):
339 def __init__(self, data=None):
339 self._list = []
340 self._list = []
340 if data:
341 if data:
341 self.update(data)
342 self.update(data)
342 def copy(self):
343 def copy(self):
343 return sortdict(self)
344 return sortdict(self)
344 def __setitem__(self, key, val):
345 def __setitem__(self, key, val):
345 if key in self:
346 if key in self:
346 self._list.remove(key)
347 self._list.remove(key)
347 self._list.append(key)
348 self._list.append(key)
348 dict.__setitem__(self, key, val)
349 dict.__setitem__(self, key, val)
349 def __iter__(self):
350 def __iter__(self):
350 return self._list.__iter__()
351 return self._list.__iter__()
351 def update(self, src):
352 def update(self, src):
352 for k in src:
353 for k in src:
353 self[k] = src[k]
354 self[k] = src[k]
354 def clear(self):
355 def clear(self):
355 dict.clear(self)
356 dict.clear(self)
356 self._list = []
357 self._list = []
357 def items(self):
358 def items(self):
358 return [(k, self[k]) for k in self._list]
359 return [(k, self[k]) for k in self._list]
359 def __delitem__(self, key):
360 def __delitem__(self, key):
360 dict.__delitem__(self, key)
361 dict.__delitem__(self, key)
361 self._list.remove(key)
362 self._list.remove(key)
362 def pop(self, key, *args, **kwargs):
363 def pop(self, key, *args, **kwargs):
363 dict.pop(self, key, *args, **kwargs)
364 dict.pop(self, key, *args, **kwargs)
364 try:
365 try:
365 self._list.remove(key)
366 self._list.remove(key)
366 except ValueError:
367 except ValueError:
367 pass
368 pass
368 def keys(self):
369 def keys(self):
369 return self._list
370 return self._list
370 def iterkeys(self):
371 def iterkeys(self):
371 return self._list.__iter__()
372 return self._list.__iter__()
372 def iteritems(self):
373 def iteritems(self):
373 for k in self._list:
374 for k in self._list:
374 yield k, self[k]
375 yield k, self[k]
375 def insert(self, index, key, val):
376 def insert(self, index, key, val):
376 self._list.insert(index, key)
377 self._list.insert(index, key)
377 dict.__setitem__(self, key, val)
378 dict.__setitem__(self, key, val)
378
379
379 class lrucachedict(object):
380 class lrucachedict(object):
380 '''cache most recent gets from or sets to this dictionary'''
381 '''cache most recent gets from or sets to this dictionary'''
381 def __init__(self, maxsize):
382 def __init__(self, maxsize):
382 self._cache = {}
383 self._cache = {}
383 self._maxsize = maxsize
384 self._maxsize = maxsize
384 self._order = deque()
385 self._order = deque()
385
386
386 def __getitem__(self, key):
387 def __getitem__(self, key):
387 value = self._cache[key]
388 value = self._cache[key]
388 self._order.remove(key)
389 self._order.remove(key)
389 self._order.append(key)
390 self._order.append(key)
390 return value
391 return value
391
392
392 def __setitem__(self, key, value):
393 def __setitem__(self, key, value):
393 if key not in self._cache:
394 if key not in self._cache:
394 if len(self._cache) >= self._maxsize:
395 if len(self._cache) >= self._maxsize:
395 del self._cache[self._order.popleft()]
396 del self._cache[self._order.popleft()]
396 else:
397 else:
397 self._order.remove(key)
398 self._order.remove(key)
398 self._cache[key] = value
399 self._cache[key] = value
399 self._order.append(key)
400 self._order.append(key)
400
401
401 def __contains__(self, key):
402 def __contains__(self, key):
402 return key in self._cache
403 return key in self._cache
403
404
404 def clear(self):
405 def clear(self):
405 self._cache.clear()
406 self._cache.clear()
406 self._order = deque()
407 self._order = deque()
407
408
408 def lrucachefunc(func):
409 def lrucachefunc(func):
409 '''cache most recent results of function calls'''
410 '''cache most recent results of function calls'''
410 cache = {}
411 cache = {}
411 order = deque()
412 order = deque()
412 if func.func_code.co_argcount == 1:
413 if func.func_code.co_argcount == 1:
413 def f(arg):
414 def f(arg):
414 if arg not in cache:
415 if arg not in cache:
415 if len(cache) > 20:
416 if len(cache) > 20:
416 del cache[order.popleft()]
417 del cache[order.popleft()]
417 cache[arg] = func(arg)
418 cache[arg] = func(arg)
418 else:
419 else:
419 order.remove(arg)
420 order.remove(arg)
420 order.append(arg)
421 order.append(arg)
421 return cache[arg]
422 return cache[arg]
422 else:
423 else:
423 def f(*args):
424 def f(*args):
424 if args not in cache:
425 if args not in cache:
425 if len(cache) > 20:
426 if len(cache) > 20:
426 del cache[order.popleft()]
427 del cache[order.popleft()]
427 cache[args] = func(*args)
428 cache[args] = func(*args)
428 else:
429 else:
429 order.remove(args)
430 order.remove(args)
430 order.append(args)
431 order.append(args)
431 return cache[args]
432 return cache[args]
432
433
433 return f
434 return f
434
435
435 class propertycache(object):
436 class propertycache(object):
436 def __init__(self, func):
437 def __init__(self, func):
437 self.func = func
438 self.func = func
438 self.name = func.__name__
439 self.name = func.__name__
439 def __get__(self, obj, type=None):
440 def __get__(self, obj, type=None):
440 result = self.func(obj)
441 result = self.func(obj)
441 self.cachevalue(obj, result)
442 self.cachevalue(obj, result)
442 return result
443 return result
443
444
444 def cachevalue(self, obj, value):
445 def cachevalue(self, obj, value):
445 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
446 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
446 obj.__dict__[self.name] = value
447 obj.__dict__[self.name] = value
447
448
448 def pipefilter(s, cmd):
449 def pipefilter(s, cmd):
449 '''filter string S through command CMD, returning its output'''
450 '''filter string S through command CMD, returning its output'''
450 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
451 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
451 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
452 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
452 pout, perr = p.communicate(s)
453 pout, perr = p.communicate(s)
453 return pout
454 return pout
454
455
455 def tempfilter(s, cmd):
456 def tempfilter(s, cmd):
456 '''filter string S through a pair of temporary files with CMD.
457 '''filter string S through a pair of temporary files with CMD.
457 CMD is used as a template to create the real command to be run,
458 CMD is used as a template to create the real command to be run,
458 with the strings INFILE and OUTFILE replaced by the real names of
459 with the strings INFILE and OUTFILE replaced by the real names of
459 the temporary files generated.'''
460 the temporary files generated.'''
460 inname, outname = None, None
461 inname, outname = None, None
461 try:
462 try:
462 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
463 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
463 fp = os.fdopen(infd, 'wb')
464 fp = os.fdopen(infd, 'wb')
464 fp.write(s)
465 fp.write(s)
465 fp.close()
466 fp.close()
466 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
467 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
467 os.close(outfd)
468 os.close(outfd)
468 cmd = cmd.replace('INFILE', inname)
469 cmd = cmd.replace('INFILE', inname)
469 cmd = cmd.replace('OUTFILE', outname)
470 cmd = cmd.replace('OUTFILE', outname)
470 code = os.system(cmd)
471 code = os.system(cmd)
471 if sys.platform == 'OpenVMS' and code & 1:
472 if sys.platform == 'OpenVMS' and code & 1:
472 code = 0
473 code = 0
473 if code:
474 if code:
474 raise Abort(_("command '%s' failed: %s") %
475 raise Abort(_("command '%s' failed: %s") %
475 (cmd, explainexit(code)))
476 (cmd, explainexit(code)))
476 fp = open(outname, 'rb')
477 fp = open(outname, 'rb')
477 r = fp.read()
478 r = fp.read()
478 fp.close()
479 fp.close()
479 return r
480 return r
480 finally:
481 finally:
481 try:
482 try:
482 if inname:
483 if inname:
483 os.unlink(inname)
484 os.unlink(inname)
484 except OSError:
485 except OSError:
485 pass
486 pass
486 try:
487 try:
487 if outname:
488 if outname:
488 os.unlink(outname)
489 os.unlink(outname)
489 except OSError:
490 except OSError:
490 pass
491 pass
491
492
492 filtertable = {
493 filtertable = {
493 'tempfile:': tempfilter,
494 'tempfile:': tempfilter,
494 'pipe:': pipefilter,
495 'pipe:': pipefilter,
495 }
496 }
496
497
497 def filter(s, cmd):
498 def filter(s, cmd):
498 "filter a string through a command that transforms its input to its output"
499 "filter a string through a command that transforms its input to its output"
499 for name, fn in filtertable.iteritems():
500 for name, fn in filtertable.iteritems():
500 if cmd.startswith(name):
501 if cmd.startswith(name):
501 return fn(s, cmd[len(name):].lstrip())
502 return fn(s, cmd[len(name):].lstrip())
502 return pipefilter(s, cmd)
503 return pipefilter(s, cmd)
503
504
504 def binary(s):
505 def binary(s):
505 """return true if a string is binary data"""
506 """return true if a string is binary data"""
506 return bool(s and '\0' in s)
507 return bool(s and '\0' in s)
507
508
508 def increasingchunks(source, min=1024, max=65536):
509 def increasingchunks(source, min=1024, max=65536):
509 '''return no less than min bytes per chunk while data remains,
510 '''return no less than min bytes per chunk while data remains,
510 doubling min after each chunk until it reaches max'''
511 doubling min after each chunk until it reaches max'''
511 def log2(x):
512 def log2(x):
512 if not x:
513 if not x:
513 return 0
514 return 0
514 i = 0
515 i = 0
515 while x:
516 while x:
516 x >>= 1
517 x >>= 1
517 i += 1
518 i += 1
518 return i - 1
519 return i - 1
519
520
520 buf = []
521 buf = []
521 blen = 0
522 blen = 0
522 for chunk in source:
523 for chunk in source:
523 buf.append(chunk)
524 buf.append(chunk)
524 blen += len(chunk)
525 blen += len(chunk)
525 if blen >= min:
526 if blen >= min:
526 if min < max:
527 if min < max:
527 min = min << 1
528 min = min << 1
528 nmin = 1 << log2(blen)
529 nmin = 1 << log2(blen)
529 if nmin > min:
530 if nmin > min:
530 min = nmin
531 min = nmin
531 if min > max:
532 if min > max:
532 min = max
533 min = max
533 yield ''.join(buf)
534 yield ''.join(buf)
534 blen = 0
535 blen = 0
535 buf = []
536 buf = []
536 if buf:
537 if buf:
537 yield ''.join(buf)
538 yield ''.join(buf)
538
539
539 Abort = error.Abort
540 Abort = error.Abort
540
541
541 def always(fn):
542 def always(fn):
542 return True
543 return True
543
544
544 def never(fn):
545 def never(fn):
545 return False
546 return False
546
547
548 def nogc(func):
549 """disable garbage collector
550
551 Python's garbage collector triggers a GC each time a certain number of
552 container objects (the number being defined by gc.get_threshold()) are
553 allocated even when marked not to be tracked by the collector. Tracking has
554 no effect on when GCs are triggered, only on what objects the GC looks
555 into. As a workaround, disable GC while building complexe (huge)
556 containers.
557
558 This garbage collector issue have been fixed in 2.7.
559 """
560 def wrapper(*args, **kwargs):
561 gcenabled = gc.isenabled()
562 gc.disable()
563 try:
564 return func(*args, **kwargs)
565 finally:
566 if gcenabled:
567 gc.enable()
568 return wrapper
569
547 def pathto(root, n1, n2):
570 def pathto(root, n1, n2):
548 '''return the relative path from one place to another.
571 '''return the relative path from one place to another.
549 root should use os.sep to separate directories
572 root should use os.sep to separate directories
550 n1 should use os.sep to separate directories
573 n1 should use os.sep to separate directories
551 n2 should use "/" to separate directories
574 n2 should use "/" to separate directories
552 returns an os.sep-separated path.
575 returns an os.sep-separated path.
553
576
554 If n1 is a relative path, it's assumed it's
577 If n1 is a relative path, it's assumed it's
555 relative to root.
578 relative to root.
556 n2 should always be relative to root.
579 n2 should always be relative to root.
557 '''
580 '''
558 if not n1:
581 if not n1:
559 return localpath(n2)
582 return localpath(n2)
560 if os.path.isabs(n1):
583 if os.path.isabs(n1):
561 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
584 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
562 return os.path.join(root, localpath(n2))
585 return os.path.join(root, localpath(n2))
563 n2 = '/'.join((pconvert(root), n2))
586 n2 = '/'.join((pconvert(root), n2))
564 a, b = splitpath(n1), n2.split('/')
587 a, b = splitpath(n1), n2.split('/')
565 a.reverse()
588 a.reverse()
566 b.reverse()
589 b.reverse()
567 while a and b and a[-1] == b[-1]:
590 while a and b and a[-1] == b[-1]:
568 a.pop()
591 a.pop()
569 b.pop()
592 b.pop()
570 b.reverse()
593 b.reverse()
571 return os.sep.join((['..'] * len(a)) + b) or '.'
594 return os.sep.join((['..'] * len(a)) + b) or '.'
572
595
573 def mainfrozen():
596 def mainfrozen():
574 """return True if we are a frozen executable.
597 """return True if we are a frozen executable.
575
598
576 The code supports py2exe (most common, Windows only) and tools/freeze
599 The code supports py2exe (most common, Windows only) and tools/freeze
577 (portable, not much used).
600 (portable, not much used).
578 """
601 """
579 return (safehasattr(sys, "frozen") or # new py2exe
602 return (safehasattr(sys, "frozen") or # new py2exe
580 safehasattr(sys, "importers") or # old py2exe
603 safehasattr(sys, "importers") or # old py2exe
581 imp.is_frozen("__main__")) # tools/freeze
604 imp.is_frozen("__main__")) # tools/freeze
582
605
583 # the location of data files matching the source code
606 # the location of data files matching the source code
584 if mainfrozen():
607 if mainfrozen():
585 # executable version (py2exe) doesn't support __file__
608 # executable version (py2exe) doesn't support __file__
586 datapath = os.path.dirname(sys.executable)
609 datapath = os.path.dirname(sys.executable)
587 else:
610 else:
588 datapath = os.path.dirname(__file__)
611 datapath = os.path.dirname(__file__)
589
612
590 i18n.setdatapath(datapath)
613 i18n.setdatapath(datapath)
591
614
592 _hgexecutable = None
615 _hgexecutable = None
593
616
594 def hgexecutable():
617 def hgexecutable():
595 """return location of the 'hg' executable.
618 """return location of the 'hg' executable.
596
619
597 Defaults to $HG or 'hg' in the search path.
620 Defaults to $HG or 'hg' in the search path.
598 """
621 """
599 if _hgexecutable is None:
622 if _hgexecutable is None:
600 hg = os.environ.get('HG')
623 hg = os.environ.get('HG')
601 mainmod = sys.modules['__main__']
624 mainmod = sys.modules['__main__']
602 if hg:
625 if hg:
603 _sethgexecutable(hg)
626 _sethgexecutable(hg)
604 elif mainfrozen():
627 elif mainfrozen():
605 _sethgexecutable(sys.executable)
628 _sethgexecutable(sys.executable)
606 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
629 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
607 _sethgexecutable(mainmod.__file__)
630 _sethgexecutable(mainmod.__file__)
608 else:
631 else:
609 exe = findexe('hg') or os.path.basename(sys.argv[0])
632 exe = findexe('hg') or os.path.basename(sys.argv[0])
610 _sethgexecutable(exe)
633 _sethgexecutable(exe)
611 return _hgexecutable
634 return _hgexecutable
612
635
613 def _sethgexecutable(path):
636 def _sethgexecutable(path):
614 """set location of the 'hg' executable"""
637 """set location of the 'hg' executable"""
615 global _hgexecutable
638 global _hgexecutable
616 _hgexecutable = path
639 _hgexecutable = path
617
640
618 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
641 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
619 '''enhanced shell command execution.
642 '''enhanced shell command execution.
620 run with environment maybe modified, maybe in different dir.
643 run with environment maybe modified, maybe in different dir.
621
644
622 if command fails and onerr is None, return status, else raise onerr
645 if command fails and onerr is None, return status, else raise onerr
623 object as exception.
646 object as exception.
624
647
625 if out is specified, it is assumed to be a file-like object that has a
648 if out is specified, it is assumed to be a file-like object that has a
626 write() method. stdout and stderr will be redirected to out.'''
649 write() method. stdout and stderr will be redirected to out.'''
627 try:
650 try:
628 sys.stdout.flush()
651 sys.stdout.flush()
629 except Exception:
652 except Exception:
630 pass
653 pass
631 def py2shell(val):
654 def py2shell(val):
632 'convert python object into string that is useful to shell'
655 'convert python object into string that is useful to shell'
633 if val is None or val is False:
656 if val is None or val is False:
634 return '0'
657 return '0'
635 if val is True:
658 if val is True:
636 return '1'
659 return '1'
637 return str(val)
660 return str(val)
638 origcmd = cmd
661 origcmd = cmd
639 cmd = quotecommand(cmd)
662 cmd = quotecommand(cmd)
640 if sys.platform == 'plan9' and (sys.version_info[0] == 2
663 if sys.platform == 'plan9' and (sys.version_info[0] == 2
641 and sys.version_info[1] < 7):
664 and sys.version_info[1] < 7):
642 # subprocess kludge to work around issues in half-baked Python
665 # subprocess kludge to work around issues in half-baked Python
643 # ports, notably bichued/python:
666 # ports, notably bichued/python:
644 if not cwd is None:
667 if not cwd is None:
645 os.chdir(cwd)
668 os.chdir(cwd)
646 rc = os.system(cmd)
669 rc = os.system(cmd)
647 else:
670 else:
648 env = dict(os.environ)
671 env = dict(os.environ)
649 env.update((k, py2shell(v)) for k, v in environ.iteritems())
672 env.update((k, py2shell(v)) for k, v in environ.iteritems())
650 env['HG'] = hgexecutable()
673 env['HG'] = hgexecutable()
651 if out is None or out == sys.__stdout__:
674 if out is None or out == sys.__stdout__:
652 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
675 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
653 env=env, cwd=cwd)
676 env=env, cwd=cwd)
654 else:
677 else:
655 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
678 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
656 env=env, cwd=cwd, stdout=subprocess.PIPE,
679 env=env, cwd=cwd, stdout=subprocess.PIPE,
657 stderr=subprocess.STDOUT)
680 stderr=subprocess.STDOUT)
658 while True:
681 while True:
659 line = proc.stdout.readline()
682 line = proc.stdout.readline()
660 if not line:
683 if not line:
661 break
684 break
662 out.write(line)
685 out.write(line)
663 proc.wait()
686 proc.wait()
664 rc = proc.returncode
687 rc = proc.returncode
665 if sys.platform == 'OpenVMS' and rc & 1:
688 if sys.platform == 'OpenVMS' and rc & 1:
666 rc = 0
689 rc = 0
667 if rc and onerr:
690 if rc and onerr:
668 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
691 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
669 explainexit(rc)[0])
692 explainexit(rc)[0])
670 if errprefix:
693 if errprefix:
671 errmsg = '%s: %s' % (errprefix, errmsg)
694 errmsg = '%s: %s' % (errprefix, errmsg)
672 raise onerr(errmsg)
695 raise onerr(errmsg)
673 return rc
696 return rc
674
697
675 def checksignature(func):
698 def checksignature(func):
676 '''wrap a function with code to check for calling errors'''
699 '''wrap a function with code to check for calling errors'''
677 def check(*args, **kwargs):
700 def check(*args, **kwargs):
678 try:
701 try:
679 return func(*args, **kwargs)
702 return func(*args, **kwargs)
680 except TypeError:
703 except TypeError:
681 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
704 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
682 raise error.SignatureError
705 raise error.SignatureError
683 raise
706 raise
684
707
685 return check
708 return check
686
709
687 def copyfile(src, dest):
710 def copyfile(src, dest):
688 "copy a file, preserving mode and atime/mtime"
711 "copy a file, preserving mode and atime/mtime"
689 if os.path.lexists(dest):
712 if os.path.lexists(dest):
690 unlink(dest)
713 unlink(dest)
691 if os.path.islink(src):
714 if os.path.islink(src):
692 os.symlink(os.readlink(src), dest)
715 os.symlink(os.readlink(src), dest)
693 else:
716 else:
694 try:
717 try:
695 shutil.copyfile(src, dest)
718 shutil.copyfile(src, dest)
696 shutil.copymode(src, dest)
719 shutil.copymode(src, dest)
697 except shutil.Error, inst:
720 except shutil.Error, inst:
698 raise Abort(str(inst))
721 raise Abort(str(inst))
699
722
700 def copyfiles(src, dst, hardlink=None):
723 def copyfiles(src, dst, hardlink=None):
701 """Copy a directory tree using hardlinks if possible"""
724 """Copy a directory tree using hardlinks if possible"""
702
725
703 if hardlink is None:
726 if hardlink is None:
704 hardlink = (os.stat(src).st_dev ==
727 hardlink = (os.stat(src).st_dev ==
705 os.stat(os.path.dirname(dst)).st_dev)
728 os.stat(os.path.dirname(dst)).st_dev)
706
729
707 num = 0
730 num = 0
708 if os.path.isdir(src):
731 if os.path.isdir(src):
709 os.mkdir(dst)
732 os.mkdir(dst)
710 for name, kind in osutil.listdir(src):
733 for name, kind in osutil.listdir(src):
711 srcname = os.path.join(src, name)
734 srcname = os.path.join(src, name)
712 dstname = os.path.join(dst, name)
735 dstname = os.path.join(dst, name)
713 hardlink, n = copyfiles(srcname, dstname, hardlink)
736 hardlink, n = copyfiles(srcname, dstname, hardlink)
714 num += n
737 num += n
715 else:
738 else:
716 if hardlink:
739 if hardlink:
717 try:
740 try:
718 oslink(src, dst)
741 oslink(src, dst)
719 except (IOError, OSError):
742 except (IOError, OSError):
720 hardlink = False
743 hardlink = False
721 shutil.copy(src, dst)
744 shutil.copy(src, dst)
722 else:
745 else:
723 shutil.copy(src, dst)
746 shutil.copy(src, dst)
724 num += 1
747 num += 1
725
748
726 return hardlink, num
749 return hardlink, num
727
750
728 _winreservednames = '''con prn aux nul
751 _winreservednames = '''con prn aux nul
729 com1 com2 com3 com4 com5 com6 com7 com8 com9
752 com1 com2 com3 com4 com5 com6 com7 com8 com9
730 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
753 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
731 _winreservedchars = ':*?"<>|'
754 _winreservedchars = ':*?"<>|'
732 def checkwinfilename(path):
755 def checkwinfilename(path):
733 r'''Check that the base-relative path is a valid filename on Windows.
756 r'''Check that the base-relative path is a valid filename on Windows.
734 Returns None if the path is ok, or a UI string describing the problem.
757 Returns None if the path is ok, or a UI string describing the problem.
735
758
736 >>> checkwinfilename("just/a/normal/path")
759 >>> checkwinfilename("just/a/normal/path")
737 >>> checkwinfilename("foo/bar/con.xml")
760 >>> checkwinfilename("foo/bar/con.xml")
738 "filename contains 'con', which is reserved on Windows"
761 "filename contains 'con', which is reserved on Windows"
739 >>> checkwinfilename("foo/con.xml/bar")
762 >>> checkwinfilename("foo/con.xml/bar")
740 "filename contains 'con', which is reserved on Windows"
763 "filename contains 'con', which is reserved on Windows"
741 >>> checkwinfilename("foo/bar/xml.con")
764 >>> checkwinfilename("foo/bar/xml.con")
742 >>> checkwinfilename("foo/bar/AUX/bla.txt")
765 >>> checkwinfilename("foo/bar/AUX/bla.txt")
743 "filename contains 'AUX', which is reserved on Windows"
766 "filename contains 'AUX', which is reserved on Windows"
744 >>> checkwinfilename("foo/bar/bla:.txt")
767 >>> checkwinfilename("foo/bar/bla:.txt")
745 "filename contains ':', which is reserved on Windows"
768 "filename contains ':', which is reserved on Windows"
746 >>> checkwinfilename("foo/bar/b\07la.txt")
769 >>> checkwinfilename("foo/bar/b\07la.txt")
747 "filename contains '\\x07', which is invalid on Windows"
770 "filename contains '\\x07', which is invalid on Windows"
748 >>> checkwinfilename("foo/bar/bla ")
771 >>> checkwinfilename("foo/bar/bla ")
749 "filename ends with ' ', which is not allowed on Windows"
772 "filename ends with ' ', which is not allowed on Windows"
750 >>> checkwinfilename("../bar")
773 >>> checkwinfilename("../bar")
751 >>> checkwinfilename("foo\\")
774 >>> checkwinfilename("foo\\")
752 "filename ends with '\\', which is invalid on Windows"
775 "filename ends with '\\', which is invalid on Windows"
753 >>> checkwinfilename("foo\\/bar")
776 >>> checkwinfilename("foo\\/bar")
754 "directory name ends with '\\', which is invalid on Windows"
777 "directory name ends with '\\', which is invalid on Windows"
755 '''
778 '''
756 if path.endswith('\\'):
779 if path.endswith('\\'):
757 return _("filename ends with '\\', which is invalid on Windows")
780 return _("filename ends with '\\', which is invalid on Windows")
758 if '\\/' in path:
781 if '\\/' in path:
759 return _("directory name ends with '\\', which is invalid on Windows")
782 return _("directory name ends with '\\', which is invalid on Windows")
760 for n in path.replace('\\', '/').split('/'):
783 for n in path.replace('\\', '/').split('/'):
761 if not n:
784 if not n:
762 continue
785 continue
763 for c in n:
786 for c in n:
764 if c in _winreservedchars:
787 if c in _winreservedchars:
765 return _("filename contains '%s', which is reserved "
788 return _("filename contains '%s', which is reserved "
766 "on Windows") % c
789 "on Windows") % c
767 if ord(c) <= 31:
790 if ord(c) <= 31:
768 return _("filename contains %r, which is invalid "
791 return _("filename contains %r, which is invalid "
769 "on Windows") % c
792 "on Windows") % c
770 base = n.split('.')[0]
793 base = n.split('.')[0]
771 if base and base.lower() in _winreservednames:
794 if base and base.lower() in _winreservednames:
772 return _("filename contains '%s', which is reserved "
795 return _("filename contains '%s', which is reserved "
773 "on Windows") % base
796 "on Windows") % base
774 t = n[-1]
797 t = n[-1]
775 if t in '. ' and n not in '..':
798 if t in '. ' and n not in '..':
776 return _("filename ends with '%s', which is not allowed "
799 return _("filename ends with '%s', which is not allowed "
777 "on Windows") % t
800 "on Windows") % t
778
801
779 if os.name == 'nt':
802 if os.name == 'nt':
780 checkosfilename = checkwinfilename
803 checkosfilename = checkwinfilename
781 else:
804 else:
782 checkosfilename = platform.checkosfilename
805 checkosfilename = platform.checkosfilename
783
806
784 def makelock(info, pathname):
807 def makelock(info, pathname):
785 try:
808 try:
786 return os.symlink(info, pathname)
809 return os.symlink(info, pathname)
787 except OSError, why:
810 except OSError, why:
788 if why.errno == errno.EEXIST:
811 if why.errno == errno.EEXIST:
789 raise
812 raise
790 except AttributeError: # no symlink in os
813 except AttributeError: # no symlink in os
791 pass
814 pass
792
815
793 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
816 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
794 os.write(ld, info)
817 os.write(ld, info)
795 os.close(ld)
818 os.close(ld)
796
819
797 def readlock(pathname):
820 def readlock(pathname):
798 try:
821 try:
799 return os.readlink(pathname)
822 return os.readlink(pathname)
800 except OSError, why:
823 except OSError, why:
801 if why.errno not in (errno.EINVAL, errno.ENOSYS):
824 if why.errno not in (errno.EINVAL, errno.ENOSYS):
802 raise
825 raise
803 except AttributeError: # no symlink in os
826 except AttributeError: # no symlink in os
804 pass
827 pass
805 fp = posixfile(pathname)
828 fp = posixfile(pathname)
806 r = fp.read()
829 r = fp.read()
807 fp.close()
830 fp.close()
808 return r
831 return r
809
832
810 def fstat(fp):
833 def fstat(fp):
811 '''stat file object that may not have fileno method.'''
834 '''stat file object that may not have fileno method.'''
812 try:
835 try:
813 return os.fstat(fp.fileno())
836 return os.fstat(fp.fileno())
814 except AttributeError:
837 except AttributeError:
815 return os.stat(fp.name)
838 return os.stat(fp.name)
816
839
817 # File system features
840 # File system features
818
841
819 def checkcase(path):
842 def checkcase(path):
820 """
843 """
821 Return true if the given path is on a case-sensitive filesystem
844 Return true if the given path is on a case-sensitive filesystem
822
845
823 Requires a path (like /foo/.hg) ending with a foldable final
846 Requires a path (like /foo/.hg) ending with a foldable final
824 directory component.
847 directory component.
825 """
848 """
826 s1 = os.stat(path)
849 s1 = os.stat(path)
827 d, b = os.path.split(path)
850 d, b = os.path.split(path)
828 b2 = b.upper()
851 b2 = b.upper()
829 if b == b2:
852 if b == b2:
830 b2 = b.lower()
853 b2 = b.lower()
831 if b == b2:
854 if b == b2:
832 return True # no evidence against case sensitivity
855 return True # no evidence against case sensitivity
833 p2 = os.path.join(d, b2)
856 p2 = os.path.join(d, b2)
834 try:
857 try:
835 s2 = os.stat(p2)
858 s2 = os.stat(p2)
836 if s2 == s1:
859 if s2 == s1:
837 return False
860 return False
838 return True
861 return True
839 except OSError:
862 except OSError:
840 return True
863 return True
841
864
842 try:
865 try:
843 import re2
866 import re2
844 _re2 = None
867 _re2 = None
845 except ImportError:
868 except ImportError:
846 _re2 = False
869 _re2 = False
847
870
848 class _re(object):
871 class _re(object):
849 def _checkre2(self):
872 def _checkre2(self):
850 global _re2
873 global _re2
851 try:
874 try:
852 # check if match works, see issue3964
875 # check if match works, see issue3964
853 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
876 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
854 except ImportError:
877 except ImportError:
855 _re2 = False
878 _re2 = False
856
879
857 def compile(self, pat, flags=0):
880 def compile(self, pat, flags=0):
858 '''Compile a regular expression, using re2 if possible
881 '''Compile a regular expression, using re2 if possible
859
882
860 For best performance, use only re2-compatible regexp features. The
883 For best performance, use only re2-compatible regexp features. The
861 only flags from the re module that are re2-compatible are
884 only flags from the re module that are re2-compatible are
862 IGNORECASE and MULTILINE.'''
885 IGNORECASE and MULTILINE.'''
863 if _re2 is None:
886 if _re2 is None:
864 self._checkre2()
887 self._checkre2()
865 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
888 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
866 if flags & remod.IGNORECASE:
889 if flags & remod.IGNORECASE:
867 pat = '(?i)' + pat
890 pat = '(?i)' + pat
868 if flags & remod.MULTILINE:
891 if flags & remod.MULTILINE:
869 pat = '(?m)' + pat
892 pat = '(?m)' + pat
870 try:
893 try:
871 return re2.compile(pat)
894 return re2.compile(pat)
872 except re2.error:
895 except re2.error:
873 pass
896 pass
874 return remod.compile(pat, flags)
897 return remod.compile(pat, flags)
875
898
876 @propertycache
899 @propertycache
877 def escape(self):
900 def escape(self):
878 '''Return the version of escape corresponding to self.compile.
901 '''Return the version of escape corresponding to self.compile.
879
902
880 This is imperfect because whether re2 or re is used for a particular
903 This is imperfect because whether re2 or re is used for a particular
881 function depends on the flags, etc, but it's the best we can do.
904 function depends on the flags, etc, but it's the best we can do.
882 '''
905 '''
883 global _re2
906 global _re2
884 if _re2 is None:
907 if _re2 is None:
885 self._checkre2()
908 self._checkre2()
886 if _re2:
909 if _re2:
887 return re2.escape
910 return re2.escape
888 else:
911 else:
889 return remod.escape
912 return remod.escape
890
913
891 re = _re()
914 re = _re()
892
915
893 _fspathcache = {}
916 _fspathcache = {}
894 def fspath(name, root):
917 def fspath(name, root):
895 '''Get name in the case stored in the filesystem
918 '''Get name in the case stored in the filesystem
896
919
897 The name should be relative to root, and be normcase-ed for efficiency.
920 The name should be relative to root, and be normcase-ed for efficiency.
898
921
899 Note that this function is unnecessary, and should not be
922 Note that this function is unnecessary, and should not be
900 called, for case-sensitive filesystems (simply because it's expensive).
923 called, for case-sensitive filesystems (simply because it's expensive).
901
924
902 The root should be normcase-ed, too.
925 The root should be normcase-ed, too.
903 '''
926 '''
904 def _makefspathcacheentry(dir):
927 def _makefspathcacheentry(dir):
905 return dict((normcase(n), n) for n in os.listdir(dir))
928 return dict((normcase(n), n) for n in os.listdir(dir))
906
929
907 seps = os.sep
930 seps = os.sep
908 if os.altsep:
931 if os.altsep:
909 seps = seps + os.altsep
932 seps = seps + os.altsep
910 # Protect backslashes. This gets silly very quickly.
933 # Protect backslashes. This gets silly very quickly.
911 seps.replace('\\','\\\\')
934 seps.replace('\\','\\\\')
912 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
935 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
913 dir = os.path.normpath(root)
936 dir = os.path.normpath(root)
914 result = []
937 result = []
915 for part, sep in pattern.findall(name):
938 for part, sep in pattern.findall(name):
916 if sep:
939 if sep:
917 result.append(sep)
940 result.append(sep)
918 continue
941 continue
919
942
920 if dir not in _fspathcache:
943 if dir not in _fspathcache:
921 _fspathcache[dir] = _makefspathcacheentry(dir)
944 _fspathcache[dir] = _makefspathcacheentry(dir)
922 contents = _fspathcache[dir]
945 contents = _fspathcache[dir]
923
946
924 found = contents.get(part)
947 found = contents.get(part)
925 if not found:
948 if not found:
926 # retry "once per directory" per "dirstate.walk" which
949 # retry "once per directory" per "dirstate.walk" which
927 # may take place for each patches of "hg qpush", for example
950 # may take place for each patches of "hg qpush", for example
928 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
951 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
929 found = contents.get(part)
952 found = contents.get(part)
930
953
931 result.append(found or part)
954 result.append(found or part)
932 dir = os.path.join(dir, part)
955 dir = os.path.join(dir, part)
933
956
934 return ''.join(result)
957 return ''.join(result)
935
958
936 def checknlink(testfile):
959 def checknlink(testfile):
937 '''check whether hardlink count reporting works properly'''
960 '''check whether hardlink count reporting works properly'''
938
961
939 # testfile may be open, so we need a separate file for checking to
962 # testfile may be open, so we need a separate file for checking to
940 # work around issue2543 (or testfile may get lost on Samba shares)
963 # work around issue2543 (or testfile may get lost on Samba shares)
941 f1 = testfile + ".hgtmp1"
964 f1 = testfile + ".hgtmp1"
942 if os.path.lexists(f1):
965 if os.path.lexists(f1):
943 return False
966 return False
944 try:
967 try:
945 posixfile(f1, 'w').close()
968 posixfile(f1, 'w').close()
946 except IOError:
969 except IOError:
947 return False
970 return False
948
971
949 f2 = testfile + ".hgtmp2"
972 f2 = testfile + ".hgtmp2"
950 fd = None
973 fd = None
951 try:
974 try:
952 try:
975 try:
953 oslink(f1, f2)
976 oslink(f1, f2)
954 except OSError:
977 except OSError:
955 return False
978 return False
956
979
957 # nlinks() may behave differently for files on Windows shares if
980 # nlinks() may behave differently for files on Windows shares if
958 # the file is open.
981 # the file is open.
959 fd = posixfile(f2)
982 fd = posixfile(f2)
960 return nlinks(f2) > 1
983 return nlinks(f2) > 1
961 finally:
984 finally:
962 if fd is not None:
985 if fd is not None:
963 fd.close()
986 fd.close()
964 for f in (f1, f2):
987 for f in (f1, f2):
965 try:
988 try:
966 os.unlink(f)
989 os.unlink(f)
967 except OSError:
990 except OSError:
968 pass
991 pass
969
992
970 def endswithsep(path):
993 def endswithsep(path):
971 '''Check path ends with os.sep or os.altsep.'''
994 '''Check path ends with os.sep or os.altsep.'''
972 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
995 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
973
996
974 def splitpath(path):
997 def splitpath(path):
975 '''Split path by os.sep.
998 '''Split path by os.sep.
976 Note that this function does not use os.altsep because this is
999 Note that this function does not use os.altsep because this is
977 an alternative of simple "xxx.split(os.sep)".
1000 an alternative of simple "xxx.split(os.sep)".
978 It is recommended to use os.path.normpath() before using this
1001 It is recommended to use os.path.normpath() before using this
979 function if need.'''
1002 function if need.'''
980 return path.split(os.sep)
1003 return path.split(os.sep)
981
1004
982 def gui():
1005 def gui():
983 '''Are we running in a GUI?'''
1006 '''Are we running in a GUI?'''
984 if sys.platform == 'darwin':
1007 if sys.platform == 'darwin':
985 if 'SSH_CONNECTION' in os.environ:
1008 if 'SSH_CONNECTION' in os.environ:
986 # handle SSH access to a box where the user is logged in
1009 # handle SSH access to a box where the user is logged in
987 return False
1010 return False
988 elif getattr(osutil, 'isgui', None):
1011 elif getattr(osutil, 'isgui', None):
989 # check if a CoreGraphics session is available
1012 # check if a CoreGraphics session is available
990 return osutil.isgui()
1013 return osutil.isgui()
991 else:
1014 else:
992 # pure build; use a safe default
1015 # pure build; use a safe default
993 return True
1016 return True
994 else:
1017 else:
995 return os.name == "nt" or os.environ.get("DISPLAY")
1018 return os.name == "nt" or os.environ.get("DISPLAY")
996
1019
997 def mktempcopy(name, emptyok=False, createmode=None):
1020 def mktempcopy(name, emptyok=False, createmode=None):
998 """Create a temporary file with the same contents from name
1021 """Create a temporary file with the same contents from name
999
1022
1000 The permission bits are copied from the original file.
1023 The permission bits are copied from the original file.
1001
1024
1002 If the temporary file is going to be truncated immediately, you
1025 If the temporary file is going to be truncated immediately, you
1003 can use emptyok=True as an optimization.
1026 can use emptyok=True as an optimization.
1004
1027
1005 Returns the name of the temporary file.
1028 Returns the name of the temporary file.
1006 """
1029 """
1007 d, fn = os.path.split(name)
1030 d, fn = os.path.split(name)
1008 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1031 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1009 os.close(fd)
1032 os.close(fd)
1010 # Temporary files are created with mode 0600, which is usually not
1033 # Temporary files are created with mode 0600, which is usually not
1011 # what we want. If the original file already exists, just copy
1034 # what we want. If the original file already exists, just copy
1012 # its mode. Otherwise, manually obey umask.
1035 # its mode. Otherwise, manually obey umask.
1013 copymode(name, temp, createmode)
1036 copymode(name, temp, createmode)
1014 if emptyok:
1037 if emptyok:
1015 return temp
1038 return temp
1016 try:
1039 try:
1017 try:
1040 try:
1018 ifp = posixfile(name, "rb")
1041 ifp = posixfile(name, "rb")
1019 except IOError, inst:
1042 except IOError, inst:
1020 if inst.errno == errno.ENOENT:
1043 if inst.errno == errno.ENOENT:
1021 return temp
1044 return temp
1022 if not getattr(inst, 'filename', None):
1045 if not getattr(inst, 'filename', None):
1023 inst.filename = name
1046 inst.filename = name
1024 raise
1047 raise
1025 ofp = posixfile(temp, "wb")
1048 ofp = posixfile(temp, "wb")
1026 for chunk in filechunkiter(ifp):
1049 for chunk in filechunkiter(ifp):
1027 ofp.write(chunk)
1050 ofp.write(chunk)
1028 ifp.close()
1051 ifp.close()
1029 ofp.close()
1052 ofp.close()
1030 except: # re-raises
1053 except: # re-raises
1031 try: os.unlink(temp)
1054 try: os.unlink(temp)
1032 except OSError: pass
1055 except OSError: pass
1033 raise
1056 raise
1034 return temp
1057 return temp
1035
1058
1036 class atomictempfile(object):
1059 class atomictempfile(object):
1037 '''writable file object that atomically updates a file
1060 '''writable file object that atomically updates a file
1038
1061
1039 All writes will go to a temporary copy of the original file. Call
1062 All writes will go to a temporary copy of the original file. Call
1040 close() when you are done writing, and atomictempfile will rename
1063 close() when you are done writing, and atomictempfile will rename
1041 the temporary copy to the original name, making the changes
1064 the temporary copy to the original name, making the changes
1042 visible. If the object is destroyed without being closed, all your
1065 visible. If the object is destroyed without being closed, all your
1043 writes are discarded.
1066 writes are discarded.
1044 '''
1067 '''
1045 def __init__(self, name, mode='w+b', createmode=None):
1068 def __init__(self, name, mode='w+b', createmode=None):
1046 self.__name = name # permanent name
1069 self.__name = name # permanent name
1047 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1070 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1048 createmode=createmode)
1071 createmode=createmode)
1049 self._fp = posixfile(self._tempname, mode)
1072 self._fp = posixfile(self._tempname, mode)
1050
1073
1051 # delegated methods
1074 # delegated methods
1052 self.write = self._fp.write
1075 self.write = self._fp.write
1053 self.seek = self._fp.seek
1076 self.seek = self._fp.seek
1054 self.tell = self._fp.tell
1077 self.tell = self._fp.tell
1055 self.fileno = self._fp.fileno
1078 self.fileno = self._fp.fileno
1056
1079
1057 def close(self):
1080 def close(self):
1058 if not self._fp.closed:
1081 if not self._fp.closed:
1059 self._fp.close()
1082 self._fp.close()
1060 rename(self._tempname, localpath(self.__name))
1083 rename(self._tempname, localpath(self.__name))
1061
1084
1062 def discard(self):
1085 def discard(self):
1063 if not self._fp.closed:
1086 if not self._fp.closed:
1064 try:
1087 try:
1065 os.unlink(self._tempname)
1088 os.unlink(self._tempname)
1066 except OSError:
1089 except OSError:
1067 pass
1090 pass
1068 self._fp.close()
1091 self._fp.close()
1069
1092
1070 def __del__(self):
1093 def __del__(self):
1071 if safehasattr(self, '_fp'): # constructor actually did something
1094 if safehasattr(self, '_fp'): # constructor actually did something
1072 self.discard()
1095 self.discard()
1073
1096
1074 def makedirs(name, mode=None, notindexed=False):
1097 def makedirs(name, mode=None, notindexed=False):
1075 """recursive directory creation with parent mode inheritance"""
1098 """recursive directory creation with parent mode inheritance"""
1076 try:
1099 try:
1077 makedir(name, notindexed)
1100 makedir(name, notindexed)
1078 except OSError, err:
1101 except OSError, err:
1079 if err.errno == errno.EEXIST:
1102 if err.errno == errno.EEXIST:
1080 return
1103 return
1081 if err.errno != errno.ENOENT or not name:
1104 if err.errno != errno.ENOENT or not name:
1082 raise
1105 raise
1083 parent = os.path.dirname(os.path.abspath(name))
1106 parent = os.path.dirname(os.path.abspath(name))
1084 if parent == name:
1107 if parent == name:
1085 raise
1108 raise
1086 makedirs(parent, mode, notindexed)
1109 makedirs(parent, mode, notindexed)
1087 makedir(name, notindexed)
1110 makedir(name, notindexed)
1088 if mode is not None:
1111 if mode is not None:
1089 os.chmod(name, mode)
1112 os.chmod(name, mode)
1090
1113
1091 def ensuredirs(name, mode=None, notindexed=False):
1114 def ensuredirs(name, mode=None, notindexed=False):
1092 """race-safe recursive directory creation
1115 """race-safe recursive directory creation
1093
1116
1094 Newly created directories are marked as "not to be indexed by
1117 Newly created directories are marked as "not to be indexed by
1095 the content indexing service", if ``notindexed`` is specified
1118 the content indexing service", if ``notindexed`` is specified
1096 for "write" mode access.
1119 for "write" mode access.
1097 """
1120 """
1098 if os.path.isdir(name):
1121 if os.path.isdir(name):
1099 return
1122 return
1100 parent = os.path.dirname(os.path.abspath(name))
1123 parent = os.path.dirname(os.path.abspath(name))
1101 if parent != name:
1124 if parent != name:
1102 ensuredirs(parent, mode, notindexed)
1125 ensuredirs(parent, mode, notindexed)
1103 try:
1126 try:
1104 makedir(name, notindexed)
1127 makedir(name, notindexed)
1105 except OSError, err:
1128 except OSError, err:
1106 if err.errno == errno.EEXIST and os.path.isdir(name):
1129 if err.errno == errno.EEXIST and os.path.isdir(name):
1107 # someone else seems to have won a directory creation race
1130 # someone else seems to have won a directory creation race
1108 return
1131 return
1109 raise
1132 raise
1110 if mode is not None:
1133 if mode is not None:
1111 os.chmod(name, mode)
1134 os.chmod(name, mode)
1112
1135
1113 def readfile(path):
1136 def readfile(path):
1114 fp = open(path, 'rb')
1137 fp = open(path, 'rb')
1115 try:
1138 try:
1116 return fp.read()
1139 return fp.read()
1117 finally:
1140 finally:
1118 fp.close()
1141 fp.close()
1119
1142
1120 def writefile(path, text):
1143 def writefile(path, text):
1121 fp = open(path, 'wb')
1144 fp = open(path, 'wb')
1122 try:
1145 try:
1123 fp.write(text)
1146 fp.write(text)
1124 finally:
1147 finally:
1125 fp.close()
1148 fp.close()
1126
1149
1127 def appendfile(path, text):
1150 def appendfile(path, text):
1128 fp = open(path, 'ab')
1151 fp = open(path, 'ab')
1129 try:
1152 try:
1130 fp.write(text)
1153 fp.write(text)
1131 finally:
1154 finally:
1132 fp.close()
1155 fp.close()
1133
1156
1134 class chunkbuffer(object):
1157 class chunkbuffer(object):
1135 """Allow arbitrary sized chunks of data to be efficiently read from an
1158 """Allow arbitrary sized chunks of data to be efficiently read from an
1136 iterator over chunks of arbitrary size."""
1159 iterator over chunks of arbitrary size."""
1137
1160
1138 def __init__(self, in_iter):
1161 def __init__(self, in_iter):
1139 """in_iter is the iterator that's iterating over the input chunks.
1162 """in_iter is the iterator that's iterating over the input chunks.
1140 targetsize is how big a buffer to try to maintain."""
1163 targetsize is how big a buffer to try to maintain."""
1141 def splitbig(chunks):
1164 def splitbig(chunks):
1142 for chunk in chunks:
1165 for chunk in chunks:
1143 if len(chunk) > 2**20:
1166 if len(chunk) > 2**20:
1144 pos = 0
1167 pos = 0
1145 while pos < len(chunk):
1168 while pos < len(chunk):
1146 end = pos + 2 ** 18
1169 end = pos + 2 ** 18
1147 yield chunk[pos:end]
1170 yield chunk[pos:end]
1148 pos = end
1171 pos = end
1149 else:
1172 else:
1150 yield chunk
1173 yield chunk
1151 self.iter = splitbig(in_iter)
1174 self.iter = splitbig(in_iter)
1152 self._queue = deque()
1175 self._queue = deque()
1153
1176
1154 def read(self, l=None):
1177 def read(self, l=None):
1155 """Read L bytes of data from the iterator of chunks of data.
1178 """Read L bytes of data from the iterator of chunks of data.
1156 Returns less than L bytes if the iterator runs dry.
1179 Returns less than L bytes if the iterator runs dry.
1157
1180
1158 If size parameter is omitted, read everything"""
1181 If size parameter is omitted, read everything"""
1159 left = l
1182 left = l
1160 buf = []
1183 buf = []
1161 queue = self._queue
1184 queue = self._queue
1162 while left is None or left > 0:
1185 while left is None or left > 0:
1163 # refill the queue
1186 # refill the queue
1164 if not queue:
1187 if not queue:
1165 target = 2**18
1188 target = 2**18
1166 for chunk in self.iter:
1189 for chunk in self.iter:
1167 queue.append(chunk)
1190 queue.append(chunk)
1168 target -= len(chunk)
1191 target -= len(chunk)
1169 if target <= 0:
1192 if target <= 0:
1170 break
1193 break
1171 if not queue:
1194 if not queue:
1172 break
1195 break
1173
1196
1174 chunk = queue.popleft()
1197 chunk = queue.popleft()
1175 if left is not None:
1198 if left is not None:
1176 left -= len(chunk)
1199 left -= len(chunk)
1177 if left is not None and left < 0:
1200 if left is not None and left < 0:
1178 queue.appendleft(chunk[left:])
1201 queue.appendleft(chunk[left:])
1179 buf.append(chunk[:left])
1202 buf.append(chunk[:left])
1180 else:
1203 else:
1181 buf.append(chunk)
1204 buf.append(chunk)
1182
1205
1183 return ''.join(buf)
1206 return ''.join(buf)
1184
1207
1185 def filechunkiter(f, size=65536, limit=None):
1208 def filechunkiter(f, size=65536, limit=None):
1186 """Create a generator that produces the data in the file size
1209 """Create a generator that produces the data in the file size
1187 (default 65536) bytes at a time, up to optional limit (default is
1210 (default 65536) bytes at a time, up to optional limit (default is
1188 to read all data). Chunks may be less than size bytes if the
1211 to read all data). Chunks may be less than size bytes if the
1189 chunk is the last chunk in the file, or the file is a socket or
1212 chunk is the last chunk in the file, or the file is a socket or
1190 some other type of file that sometimes reads less data than is
1213 some other type of file that sometimes reads less data than is
1191 requested."""
1214 requested."""
1192 assert size >= 0
1215 assert size >= 0
1193 assert limit is None or limit >= 0
1216 assert limit is None or limit >= 0
1194 while True:
1217 while True:
1195 if limit is None:
1218 if limit is None:
1196 nbytes = size
1219 nbytes = size
1197 else:
1220 else:
1198 nbytes = min(limit, size)
1221 nbytes = min(limit, size)
1199 s = nbytes and f.read(nbytes)
1222 s = nbytes and f.read(nbytes)
1200 if not s:
1223 if not s:
1201 break
1224 break
1202 if limit:
1225 if limit:
1203 limit -= len(s)
1226 limit -= len(s)
1204 yield s
1227 yield s
1205
1228
1206 def makedate(timestamp=None):
1229 def makedate(timestamp=None):
1207 '''Return a unix timestamp (or the current time) as a (unixtime,
1230 '''Return a unix timestamp (or the current time) as a (unixtime,
1208 offset) tuple based off the local timezone.'''
1231 offset) tuple based off the local timezone.'''
1209 if timestamp is None:
1232 if timestamp is None:
1210 timestamp = time.time()
1233 timestamp = time.time()
1211 if timestamp < 0:
1234 if timestamp < 0:
1212 hint = _("check your clock")
1235 hint = _("check your clock")
1213 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1236 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1214 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1237 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1215 datetime.datetime.fromtimestamp(timestamp))
1238 datetime.datetime.fromtimestamp(timestamp))
1216 tz = delta.days * 86400 + delta.seconds
1239 tz = delta.days * 86400 + delta.seconds
1217 return timestamp, tz
1240 return timestamp, tz
1218
1241
1219 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1242 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1220 """represent a (unixtime, offset) tuple as a localized time.
1243 """represent a (unixtime, offset) tuple as a localized time.
1221 unixtime is seconds since the epoch, and offset is the time zone's
1244 unixtime is seconds since the epoch, and offset is the time zone's
1222 number of seconds away from UTC. if timezone is false, do not
1245 number of seconds away from UTC. if timezone is false, do not
1223 append time zone to string."""
1246 append time zone to string."""
1224 t, tz = date or makedate()
1247 t, tz = date or makedate()
1225 if t < 0:
1248 if t < 0:
1226 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1249 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1227 tz = 0
1250 tz = 0
1228 if "%1" in format or "%2" in format or "%z" in format:
1251 if "%1" in format or "%2" in format or "%z" in format:
1229 sign = (tz > 0) and "-" or "+"
1252 sign = (tz > 0) and "-" or "+"
1230 minutes = abs(tz) // 60
1253 minutes = abs(tz) // 60
1231 format = format.replace("%z", "%1%2")
1254 format = format.replace("%z", "%1%2")
1232 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1255 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1233 format = format.replace("%2", "%02d" % (minutes % 60))
1256 format = format.replace("%2", "%02d" % (minutes % 60))
1234 try:
1257 try:
1235 t = time.gmtime(float(t) - tz)
1258 t = time.gmtime(float(t) - tz)
1236 except ValueError:
1259 except ValueError:
1237 # time was out of range
1260 # time was out of range
1238 t = time.gmtime(sys.maxint)
1261 t = time.gmtime(sys.maxint)
1239 s = time.strftime(format, t)
1262 s = time.strftime(format, t)
1240 return s
1263 return s
1241
1264
1242 def shortdate(date=None):
1265 def shortdate(date=None):
1243 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1266 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1244 return datestr(date, format='%Y-%m-%d')
1267 return datestr(date, format='%Y-%m-%d')
1245
1268
1246 def strdate(string, format, defaults=[]):
1269 def strdate(string, format, defaults=[]):
1247 """parse a localized time string and return a (unixtime, offset) tuple.
1270 """parse a localized time string and return a (unixtime, offset) tuple.
1248 if the string cannot be parsed, ValueError is raised."""
1271 if the string cannot be parsed, ValueError is raised."""
1249 def timezone(string):
1272 def timezone(string):
1250 tz = string.split()[-1]
1273 tz = string.split()[-1]
1251 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1274 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1252 sign = (tz[0] == "+") and 1 or -1
1275 sign = (tz[0] == "+") and 1 or -1
1253 hours = int(tz[1:3])
1276 hours = int(tz[1:3])
1254 minutes = int(tz[3:5])
1277 minutes = int(tz[3:5])
1255 return -sign * (hours * 60 + minutes) * 60
1278 return -sign * (hours * 60 + minutes) * 60
1256 if tz == "GMT" or tz == "UTC":
1279 if tz == "GMT" or tz == "UTC":
1257 return 0
1280 return 0
1258 return None
1281 return None
1259
1282
1260 # NOTE: unixtime = localunixtime + offset
1283 # NOTE: unixtime = localunixtime + offset
1261 offset, date = timezone(string), string
1284 offset, date = timezone(string), string
1262 if offset is not None:
1285 if offset is not None:
1263 date = " ".join(string.split()[:-1])
1286 date = " ".join(string.split()[:-1])
1264
1287
1265 # add missing elements from defaults
1288 # add missing elements from defaults
1266 usenow = False # default to using biased defaults
1289 usenow = False # default to using biased defaults
1267 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1290 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1268 found = [True for p in part if ("%"+p) in format]
1291 found = [True for p in part if ("%"+p) in format]
1269 if not found:
1292 if not found:
1270 date += "@" + defaults[part][usenow]
1293 date += "@" + defaults[part][usenow]
1271 format += "@%" + part[0]
1294 format += "@%" + part[0]
1272 else:
1295 else:
1273 # We've found a specific time element, less specific time
1296 # We've found a specific time element, less specific time
1274 # elements are relative to today
1297 # elements are relative to today
1275 usenow = True
1298 usenow = True
1276
1299
1277 timetuple = time.strptime(date, format)
1300 timetuple = time.strptime(date, format)
1278 localunixtime = int(calendar.timegm(timetuple))
1301 localunixtime = int(calendar.timegm(timetuple))
1279 if offset is None:
1302 if offset is None:
1280 # local timezone
1303 # local timezone
1281 unixtime = int(time.mktime(timetuple))
1304 unixtime = int(time.mktime(timetuple))
1282 offset = unixtime - localunixtime
1305 offset = unixtime - localunixtime
1283 else:
1306 else:
1284 unixtime = localunixtime + offset
1307 unixtime = localunixtime + offset
1285 return unixtime, offset
1308 return unixtime, offset
1286
1309
1287 def parsedate(date, formats=None, bias={}):
1310 def parsedate(date, formats=None, bias={}):
1288 """parse a localized date/time and return a (unixtime, offset) tuple.
1311 """parse a localized date/time and return a (unixtime, offset) tuple.
1289
1312
1290 The date may be a "unixtime offset" string or in one of the specified
1313 The date may be a "unixtime offset" string or in one of the specified
1291 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1314 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1292
1315
1293 >>> parsedate(' today ') == parsedate(\
1316 >>> parsedate(' today ') == parsedate(\
1294 datetime.date.today().strftime('%b %d'))
1317 datetime.date.today().strftime('%b %d'))
1295 True
1318 True
1296 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1319 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1297 datetime.timedelta(days=1)\
1320 datetime.timedelta(days=1)\
1298 ).strftime('%b %d'))
1321 ).strftime('%b %d'))
1299 True
1322 True
1300 >>> now, tz = makedate()
1323 >>> now, tz = makedate()
1301 >>> strnow, strtz = parsedate('now')
1324 >>> strnow, strtz = parsedate('now')
1302 >>> (strnow - now) < 1
1325 >>> (strnow - now) < 1
1303 True
1326 True
1304 >>> tz == strtz
1327 >>> tz == strtz
1305 True
1328 True
1306 """
1329 """
1307 if not date:
1330 if not date:
1308 return 0, 0
1331 return 0, 0
1309 if isinstance(date, tuple) and len(date) == 2:
1332 if isinstance(date, tuple) and len(date) == 2:
1310 return date
1333 return date
1311 if not formats:
1334 if not formats:
1312 formats = defaultdateformats
1335 formats = defaultdateformats
1313 date = date.strip()
1336 date = date.strip()
1314
1337
1315 if date == _('now'):
1338 if date == _('now'):
1316 return makedate()
1339 return makedate()
1317 if date == _('today'):
1340 if date == _('today'):
1318 date = datetime.date.today().strftime('%b %d')
1341 date = datetime.date.today().strftime('%b %d')
1319 elif date == _('yesterday'):
1342 elif date == _('yesterday'):
1320 date = (datetime.date.today() -
1343 date = (datetime.date.today() -
1321 datetime.timedelta(days=1)).strftime('%b %d')
1344 datetime.timedelta(days=1)).strftime('%b %d')
1322
1345
1323 try:
1346 try:
1324 when, offset = map(int, date.split(' '))
1347 when, offset = map(int, date.split(' '))
1325 except ValueError:
1348 except ValueError:
1326 # fill out defaults
1349 # fill out defaults
1327 now = makedate()
1350 now = makedate()
1328 defaults = {}
1351 defaults = {}
1329 for part in ("d", "mb", "yY", "HI", "M", "S"):
1352 for part in ("d", "mb", "yY", "HI", "M", "S"):
1330 # this piece is for rounding the specific end of unknowns
1353 # this piece is for rounding the specific end of unknowns
1331 b = bias.get(part)
1354 b = bias.get(part)
1332 if b is None:
1355 if b is None:
1333 if part[0] in "HMS":
1356 if part[0] in "HMS":
1334 b = "00"
1357 b = "00"
1335 else:
1358 else:
1336 b = "0"
1359 b = "0"
1337
1360
1338 # this piece is for matching the generic end to today's date
1361 # this piece is for matching the generic end to today's date
1339 n = datestr(now, "%" + part[0])
1362 n = datestr(now, "%" + part[0])
1340
1363
1341 defaults[part] = (b, n)
1364 defaults[part] = (b, n)
1342
1365
1343 for format in formats:
1366 for format in formats:
1344 try:
1367 try:
1345 when, offset = strdate(date, format, defaults)
1368 when, offset = strdate(date, format, defaults)
1346 except (ValueError, OverflowError):
1369 except (ValueError, OverflowError):
1347 pass
1370 pass
1348 else:
1371 else:
1349 break
1372 break
1350 else:
1373 else:
1351 raise Abort(_('invalid date: %r') % date)
1374 raise Abort(_('invalid date: %r') % date)
1352 # validate explicit (probably user-specified) date and
1375 # validate explicit (probably user-specified) date and
1353 # time zone offset. values must fit in signed 32 bits for
1376 # time zone offset. values must fit in signed 32 bits for
1354 # current 32-bit linux runtimes. timezones go from UTC-12
1377 # current 32-bit linux runtimes. timezones go from UTC-12
1355 # to UTC+14
1378 # to UTC+14
1356 if abs(when) > 0x7fffffff:
1379 if abs(when) > 0x7fffffff:
1357 raise Abort(_('date exceeds 32 bits: %d') % when)
1380 raise Abort(_('date exceeds 32 bits: %d') % when)
1358 if when < 0:
1381 if when < 0:
1359 raise Abort(_('negative date value: %d') % when)
1382 raise Abort(_('negative date value: %d') % when)
1360 if offset < -50400 or offset > 43200:
1383 if offset < -50400 or offset > 43200:
1361 raise Abort(_('impossible time zone offset: %d') % offset)
1384 raise Abort(_('impossible time zone offset: %d') % offset)
1362 return when, offset
1385 return when, offset
1363
1386
1364 def matchdate(date):
1387 def matchdate(date):
1365 """Return a function that matches a given date match specifier
1388 """Return a function that matches a given date match specifier
1366
1389
1367 Formats include:
1390 Formats include:
1368
1391
1369 '{date}' match a given date to the accuracy provided
1392 '{date}' match a given date to the accuracy provided
1370
1393
1371 '<{date}' on or before a given date
1394 '<{date}' on or before a given date
1372
1395
1373 '>{date}' on or after a given date
1396 '>{date}' on or after a given date
1374
1397
1375 >>> p1 = parsedate("10:29:59")
1398 >>> p1 = parsedate("10:29:59")
1376 >>> p2 = parsedate("10:30:00")
1399 >>> p2 = parsedate("10:30:00")
1377 >>> p3 = parsedate("10:30:59")
1400 >>> p3 = parsedate("10:30:59")
1378 >>> p4 = parsedate("10:31:00")
1401 >>> p4 = parsedate("10:31:00")
1379 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1402 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1380 >>> f = matchdate("10:30")
1403 >>> f = matchdate("10:30")
1381 >>> f(p1[0])
1404 >>> f(p1[0])
1382 False
1405 False
1383 >>> f(p2[0])
1406 >>> f(p2[0])
1384 True
1407 True
1385 >>> f(p3[0])
1408 >>> f(p3[0])
1386 True
1409 True
1387 >>> f(p4[0])
1410 >>> f(p4[0])
1388 False
1411 False
1389 >>> f(p5[0])
1412 >>> f(p5[0])
1390 False
1413 False
1391 """
1414 """
1392
1415
1393 def lower(date):
1416 def lower(date):
1394 d = {'mb': "1", 'd': "1"}
1417 d = {'mb': "1", 'd': "1"}
1395 return parsedate(date, extendeddateformats, d)[0]
1418 return parsedate(date, extendeddateformats, d)[0]
1396
1419
1397 def upper(date):
1420 def upper(date):
1398 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1421 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1399 for days in ("31", "30", "29"):
1422 for days in ("31", "30", "29"):
1400 try:
1423 try:
1401 d["d"] = days
1424 d["d"] = days
1402 return parsedate(date, extendeddateformats, d)[0]
1425 return parsedate(date, extendeddateformats, d)[0]
1403 except Abort:
1426 except Abort:
1404 pass
1427 pass
1405 d["d"] = "28"
1428 d["d"] = "28"
1406 return parsedate(date, extendeddateformats, d)[0]
1429 return parsedate(date, extendeddateformats, d)[0]
1407
1430
1408 date = date.strip()
1431 date = date.strip()
1409
1432
1410 if not date:
1433 if not date:
1411 raise Abort(_("dates cannot consist entirely of whitespace"))
1434 raise Abort(_("dates cannot consist entirely of whitespace"))
1412 elif date[0] == "<":
1435 elif date[0] == "<":
1413 if not date[1:]:
1436 if not date[1:]:
1414 raise Abort(_("invalid day spec, use '<DATE'"))
1437 raise Abort(_("invalid day spec, use '<DATE'"))
1415 when = upper(date[1:])
1438 when = upper(date[1:])
1416 return lambda x: x <= when
1439 return lambda x: x <= when
1417 elif date[0] == ">":
1440 elif date[0] == ">":
1418 if not date[1:]:
1441 if not date[1:]:
1419 raise Abort(_("invalid day spec, use '>DATE'"))
1442 raise Abort(_("invalid day spec, use '>DATE'"))
1420 when = lower(date[1:])
1443 when = lower(date[1:])
1421 return lambda x: x >= when
1444 return lambda x: x >= when
1422 elif date[0] == "-":
1445 elif date[0] == "-":
1423 try:
1446 try:
1424 days = int(date[1:])
1447 days = int(date[1:])
1425 except ValueError:
1448 except ValueError:
1426 raise Abort(_("invalid day spec: %s") % date[1:])
1449 raise Abort(_("invalid day spec: %s") % date[1:])
1427 if days < 0:
1450 if days < 0:
1428 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1451 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1429 % date[1:])
1452 % date[1:])
1430 when = makedate()[0] - days * 3600 * 24
1453 when = makedate()[0] - days * 3600 * 24
1431 return lambda x: x >= when
1454 return lambda x: x >= when
1432 elif " to " in date:
1455 elif " to " in date:
1433 a, b = date.split(" to ")
1456 a, b = date.split(" to ")
1434 start, stop = lower(a), upper(b)
1457 start, stop = lower(a), upper(b)
1435 return lambda x: x >= start and x <= stop
1458 return lambda x: x >= start and x <= stop
1436 else:
1459 else:
1437 start, stop = lower(date), upper(date)
1460 start, stop = lower(date), upper(date)
1438 return lambda x: x >= start and x <= stop
1461 return lambda x: x >= start and x <= stop
1439
1462
1440 def shortuser(user):
1463 def shortuser(user):
1441 """Return a short representation of a user name or email address."""
1464 """Return a short representation of a user name or email address."""
1442 f = user.find('@')
1465 f = user.find('@')
1443 if f >= 0:
1466 if f >= 0:
1444 user = user[:f]
1467 user = user[:f]
1445 f = user.find('<')
1468 f = user.find('<')
1446 if f >= 0:
1469 if f >= 0:
1447 user = user[f + 1:]
1470 user = user[f + 1:]
1448 f = user.find(' ')
1471 f = user.find(' ')
1449 if f >= 0:
1472 if f >= 0:
1450 user = user[:f]
1473 user = user[:f]
1451 f = user.find('.')
1474 f = user.find('.')
1452 if f >= 0:
1475 if f >= 0:
1453 user = user[:f]
1476 user = user[:f]
1454 return user
1477 return user
1455
1478
1456 def emailuser(user):
1479 def emailuser(user):
1457 """Return the user portion of an email address."""
1480 """Return the user portion of an email address."""
1458 f = user.find('@')
1481 f = user.find('@')
1459 if f >= 0:
1482 if f >= 0:
1460 user = user[:f]
1483 user = user[:f]
1461 f = user.find('<')
1484 f = user.find('<')
1462 if f >= 0:
1485 if f >= 0:
1463 user = user[f + 1:]
1486 user = user[f + 1:]
1464 return user
1487 return user
1465
1488
1466 def email(author):
1489 def email(author):
1467 '''get email of author.'''
1490 '''get email of author.'''
1468 r = author.find('>')
1491 r = author.find('>')
1469 if r == -1:
1492 if r == -1:
1470 r = None
1493 r = None
1471 return author[author.find('<') + 1:r]
1494 return author[author.find('<') + 1:r]
1472
1495
1473 def ellipsis(text, maxlength=400):
1496 def ellipsis(text, maxlength=400):
1474 """Trim string to at most maxlength (default: 400) columns in display."""
1497 """Trim string to at most maxlength (default: 400) columns in display."""
1475 return encoding.trim(text, maxlength, ellipsis='...')
1498 return encoding.trim(text, maxlength, ellipsis='...')
1476
1499
1477 def unitcountfn(*unittable):
1500 def unitcountfn(*unittable):
1478 '''return a function that renders a readable count of some quantity'''
1501 '''return a function that renders a readable count of some quantity'''
1479
1502
1480 def go(count):
1503 def go(count):
1481 for multiplier, divisor, format in unittable:
1504 for multiplier, divisor, format in unittable:
1482 if count >= divisor * multiplier:
1505 if count >= divisor * multiplier:
1483 return format % (count / float(divisor))
1506 return format % (count / float(divisor))
1484 return unittable[-1][2] % count
1507 return unittable[-1][2] % count
1485
1508
1486 return go
1509 return go
1487
1510
1488 bytecount = unitcountfn(
1511 bytecount = unitcountfn(
1489 (100, 1 << 30, _('%.0f GB')),
1512 (100, 1 << 30, _('%.0f GB')),
1490 (10, 1 << 30, _('%.1f GB')),
1513 (10, 1 << 30, _('%.1f GB')),
1491 (1, 1 << 30, _('%.2f GB')),
1514 (1, 1 << 30, _('%.2f GB')),
1492 (100, 1 << 20, _('%.0f MB')),
1515 (100, 1 << 20, _('%.0f MB')),
1493 (10, 1 << 20, _('%.1f MB')),
1516 (10, 1 << 20, _('%.1f MB')),
1494 (1, 1 << 20, _('%.2f MB')),
1517 (1, 1 << 20, _('%.2f MB')),
1495 (100, 1 << 10, _('%.0f KB')),
1518 (100, 1 << 10, _('%.0f KB')),
1496 (10, 1 << 10, _('%.1f KB')),
1519 (10, 1 << 10, _('%.1f KB')),
1497 (1, 1 << 10, _('%.2f KB')),
1520 (1, 1 << 10, _('%.2f KB')),
1498 (1, 1, _('%.0f bytes')),
1521 (1, 1, _('%.0f bytes')),
1499 )
1522 )
1500
1523
1501 def uirepr(s):
1524 def uirepr(s):
1502 # Avoid double backslash in Windows path repr()
1525 # Avoid double backslash in Windows path repr()
1503 return repr(s).replace('\\\\', '\\')
1526 return repr(s).replace('\\\\', '\\')
1504
1527
1505 # delay import of textwrap
1528 # delay import of textwrap
1506 def MBTextWrapper(**kwargs):
1529 def MBTextWrapper(**kwargs):
1507 class tw(textwrap.TextWrapper):
1530 class tw(textwrap.TextWrapper):
1508 """
1531 """
1509 Extend TextWrapper for width-awareness.
1532 Extend TextWrapper for width-awareness.
1510
1533
1511 Neither number of 'bytes' in any encoding nor 'characters' is
1534 Neither number of 'bytes' in any encoding nor 'characters' is
1512 appropriate to calculate terminal columns for specified string.
1535 appropriate to calculate terminal columns for specified string.
1513
1536
1514 Original TextWrapper implementation uses built-in 'len()' directly,
1537 Original TextWrapper implementation uses built-in 'len()' directly,
1515 so overriding is needed to use width information of each characters.
1538 so overriding is needed to use width information of each characters.
1516
1539
1517 In addition, characters classified into 'ambiguous' width are
1540 In addition, characters classified into 'ambiguous' width are
1518 treated as wide in East Asian area, but as narrow in other.
1541 treated as wide in East Asian area, but as narrow in other.
1519
1542
1520 This requires use decision to determine width of such characters.
1543 This requires use decision to determine width of such characters.
1521 """
1544 """
1522 def __init__(self, **kwargs):
1545 def __init__(self, **kwargs):
1523 textwrap.TextWrapper.__init__(self, **kwargs)
1546 textwrap.TextWrapper.__init__(self, **kwargs)
1524
1547
1525 # for compatibility between 2.4 and 2.6
1548 # for compatibility between 2.4 and 2.6
1526 if getattr(self, 'drop_whitespace', None) is None:
1549 if getattr(self, 'drop_whitespace', None) is None:
1527 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1550 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1528
1551
1529 def _cutdown(self, ucstr, space_left):
1552 def _cutdown(self, ucstr, space_left):
1530 l = 0
1553 l = 0
1531 colwidth = encoding.ucolwidth
1554 colwidth = encoding.ucolwidth
1532 for i in xrange(len(ucstr)):
1555 for i in xrange(len(ucstr)):
1533 l += colwidth(ucstr[i])
1556 l += colwidth(ucstr[i])
1534 if space_left < l:
1557 if space_left < l:
1535 return (ucstr[:i], ucstr[i:])
1558 return (ucstr[:i], ucstr[i:])
1536 return ucstr, ''
1559 return ucstr, ''
1537
1560
1538 # overriding of base class
1561 # overriding of base class
1539 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1562 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1540 space_left = max(width - cur_len, 1)
1563 space_left = max(width - cur_len, 1)
1541
1564
1542 if self.break_long_words:
1565 if self.break_long_words:
1543 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1566 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1544 cur_line.append(cut)
1567 cur_line.append(cut)
1545 reversed_chunks[-1] = res
1568 reversed_chunks[-1] = res
1546 elif not cur_line:
1569 elif not cur_line:
1547 cur_line.append(reversed_chunks.pop())
1570 cur_line.append(reversed_chunks.pop())
1548
1571
1549 # this overriding code is imported from TextWrapper of python 2.6
1572 # this overriding code is imported from TextWrapper of python 2.6
1550 # to calculate columns of string by 'encoding.ucolwidth()'
1573 # to calculate columns of string by 'encoding.ucolwidth()'
1551 def _wrap_chunks(self, chunks):
1574 def _wrap_chunks(self, chunks):
1552 colwidth = encoding.ucolwidth
1575 colwidth = encoding.ucolwidth
1553
1576
1554 lines = []
1577 lines = []
1555 if self.width <= 0:
1578 if self.width <= 0:
1556 raise ValueError("invalid width %r (must be > 0)" % self.width)
1579 raise ValueError("invalid width %r (must be > 0)" % self.width)
1557
1580
1558 # Arrange in reverse order so items can be efficiently popped
1581 # Arrange in reverse order so items can be efficiently popped
1559 # from a stack of chucks.
1582 # from a stack of chucks.
1560 chunks.reverse()
1583 chunks.reverse()
1561
1584
1562 while chunks:
1585 while chunks:
1563
1586
1564 # Start the list of chunks that will make up the current line.
1587 # Start the list of chunks that will make up the current line.
1565 # cur_len is just the length of all the chunks in cur_line.
1588 # cur_len is just the length of all the chunks in cur_line.
1566 cur_line = []
1589 cur_line = []
1567 cur_len = 0
1590 cur_len = 0
1568
1591
1569 # Figure out which static string will prefix this line.
1592 # Figure out which static string will prefix this line.
1570 if lines:
1593 if lines:
1571 indent = self.subsequent_indent
1594 indent = self.subsequent_indent
1572 else:
1595 else:
1573 indent = self.initial_indent
1596 indent = self.initial_indent
1574
1597
1575 # Maximum width for this line.
1598 # Maximum width for this line.
1576 width = self.width - len(indent)
1599 width = self.width - len(indent)
1577
1600
1578 # First chunk on line is whitespace -- drop it, unless this
1601 # First chunk on line is whitespace -- drop it, unless this
1579 # is the very beginning of the text (i.e. no lines started yet).
1602 # is the very beginning of the text (i.e. no lines started yet).
1580 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1603 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1581 del chunks[-1]
1604 del chunks[-1]
1582
1605
1583 while chunks:
1606 while chunks:
1584 l = colwidth(chunks[-1])
1607 l = colwidth(chunks[-1])
1585
1608
1586 # Can at least squeeze this chunk onto the current line.
1609 # Can at least squeeze this chunk onto the current line.
1587 if cur_len + l <= width:
1610 if cur_len + l <= width:
1588 cur_line.append(chunks.pop())
1611 cur_line.append(chunks.pop())
1589 cur_len += l
1612 cur_len += l
1590
1613
1591 # Nope, this line is full.
1614 # Nope, this line is full.
1592 else:
1615 else:
1593 break
1616 break
1594
1617
1595 # The current line is full, and the next chunk is too big to
1618 # The current line is full, and the next chunk is too big to
1596 # fit on *any* line (not just this one).
1619 # fit on *any* line (not just this one).
1597 if chunks and colwidth(chunks[-1]) > width:
1620 if chunks and colwidth(chunks[-1]) > width:
1598 self._handle_long_word(chunks, cur_line, cur_len, width)
1621 self._handle_long_word(chunks, cur_line, cur_len, width)
1599
1622
1600 # If the last chunk on this line is all whitespace, drop it.
1623 # If the last chunk on this line is all whitespace, drop it.
1601 if (self.drop_whitespace and
1624 if (self.drop_whitespace and
1602 cur_line and cur_line[-1].strip() == ''):
1625 cur_line and cur_line[-1].strip() == ''):
1603 del cur_line[-1]
1626 del cur_line[-1]
1604
1627
1605 # Convert current line back to a string and store it in list
1628 # Convert current line back to a string and store it in list
1606 # of all lines (return value).
1629 # of all lines (return value).
1607 if cur_line:
1630 if cur_line:
1608 lines.append(indent + ''.join(cur_line))
1631 lines.append(indent + ''.join(cur_line))
1609
1632
1610 return lines
1633 return lines
1611
1634
1612 global MBTextWrapper
1635 global MBTextWrapper
1613 MBTextWrapper = tw
1636 MBTextWrapper = tw
1614 return tw(**kwargs)
1637 return tw(**kwargs)
1615
1638
1616 def wrap(line, width, initindent='', hangindent=''):
1639 def wrap(line, width, initindent='', hangindent=''):
1617 maxindent = max(len(hangindent), len(initindent))
1640 maxindent = max(len(hangindent), len(initindent))
1618 if width <= maxindent:
1641 if width <= maxindent:
1619 # adjust for weird terminal size
1642 # adjust for weird terminal size
1620 width = max(78, maxindent + 1)
1643 width = max(78, maxindent + 1)
1621 line = line.decode(encoding.encoding, encoding.encodingmode)
1644 line = line.decode(encoding.encoding, encoding.encodingmode)
1622 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1645 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1623 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1646 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1624 wrapper = MBTextWrapper(width=width,
1647 wrapper = MBTextWrapper(width=width,
1625 initial_indent=initindent,
1648 initial_indent=initindent,
1626 subsequent_indent=hangindent)
1649 subsequent_indent=hangindent)
1627 return wrapper.fill(line).encode(encoding.encoding)
1650 return wrapper.fill(line).encode(encoding.encoding)
1628
1651
1629 def iterlines(iterator):
1652 def iterlines(iterator):
1630 for chunk in iterator:
1653 for chunk in iterator:
1631 for line in chunk.splitlines():
1654 for line in chunk.splitlines():
1632 yield line
1655 yield line
1633
1656
1634 def expandpath(path):
1657 def expandpath(path):
1635 return os.path.expanduser(os.path.expandvars(path))
1658 return os.path.expanduser(os.path.expandvars(path))
1636
1659
1637 def hgcmd():
1660 def hgcmd():
1638 """Return the command used to execute current hg
1661 """Return the command used to execute current hg
1639
1662
1640 This is different from hgexecutable() because on Windows we want
1663 This is different from hgexecutable() because on Windows we want
1641 to avoid things opening new shell windows like batch files, so we
1664 to avoid things opening new shell windows like batch files, so we
1642 get either the python call or current executable.
1665 get either the python call or current executable.
1643 """
1666 """
1644 if mainfrozen():
1667 if mainfrozen():
1645 return [sys.executable]
1668 return [sys.executable]
1646 return gethgcmd()
1669 return gethgcmd()
1647
1670
1648 def rundetached(args, condfn):
1671 def rundetached(args, condfn):
1649 """Execute the argument list in a detached process.
1672 """Execute the argument list in a detached process.
1650
1673
1651 condfn is a callable which is called repeatedly and should return
1674 condfn is a callable which is called repeatedly and should return
1652 True once the child process is known to have started successfully.
1675 True once the child process is known to have started successfully.
1653 At this point, the child process PID is returned. If the child
1676 At this point, the child process PID is returned. If the child
1654 process fails to start or finishes before condfn() evaluates to
1677 process fails to start or finishes before condfn() evaluates to
1655 True, return -1.
1678 True, return -1.
1656 """
1679 """
1657 # Windows case is easier because the child process is either
1680 # Windows case is easier because the child process is either
1658 # successfully starting and validating the condition or exiting
1681 # successfully starting and validating the condition or exiting
1659 # on failure. We just poll on its PID. On Unix, if the child
1682 # on failure. We just poll on its PID. On Unix, if the child
1660 # process fails to start, it will be left in a zombie state until
1683 # process fails to start, it will be left in a zombie state until
1661 # the parent wait on it, which we cannot do since we expect a long
1684 # the parent wait on it, which we cannot do since we expect a long
1662 # running process on success. Instead we listen for SIGCHLD telling
1685 # running process on success. Instead we listen for SIGCHLD telling
1663 # us our child process terminated.
1686 # us our child process terminated.
1664 terminated = set()
1687 terminated = set()
1665 def handler(signum, frame):
1688 def handler(signum, frame):
1666 terminated.add(os.wait())
1689 terminated.add(os.wait())
1667 prevhandler = None
1690 prevhandler = None
1668 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1691 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1669 if SIGCHLD is not None:
1692 if SIGCHLD is not None:
1670 prevhandler = signal.signal(SIGCHLD, handler)
1693 prevhandler = signal.signal(SIGCHLD, handler)
1671 try:
1694 try:
1672 pid = spawndetached(args)
1695 pid = spawndetached(args)
1673 while not condfn():
1696 while not condfn():
1674 if ((pid in terminated or not testpid(pid))
1697 if ((pid in terminated or not testpid(pid))
1675 and not condfn()):
1698 and not condfn()):
1676 return -1
1699 return -1
1677 time.sleep(0.1)
1700 time.sleep(0.1)
1678 return pid
1701 return pid
1679 finally:
1702 finally:
1680 if prevhandler is not None:
1703 if prevhandler is not None:
1681 signal.signal(signal.SIGCHLD, prevhandler)
1704 signal.signal(signal.SIGCHLD, prevhandler)
1682
1705
1683 try:
1706 try:
1684 any, all = any, all
1707 any, all = any, all
1685 except NameError:
1708 except NameError:
1686 def any(iterable):
1709 def any(iterable):
1687 for i in iterable:
1710 for i in iterable:
1688 if i:
1711 if i:
1689 return True
1712 return True
1690 return False
1713 return False
1691
1714
1692 def all(iterable):
1715 def all(iterable):
1693 for i in iterable:
1716 for i in iterable:
1694 if not i:
1717 if not i:
1695 return False
1718 return False
1696 return True
1719 return True
1697
1720
1698 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1721 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1699 """Return the result of interpolating items in the mapping into string s.
1722 """Return the result of interpolating items in the mapping into string s.
1700
1723
1701 prefix is a single character string, or a two character string with
1724 prefix is a single character string, or a two character string with
1702 a backslash as the first character if the prefix needs to be escaped in
1725 a backslash as the first character if the prefix needs to be escaped in
1703 a regular expression.
1726 a regular expression.
1704
1727
1705 fn is an optional function that will be applied to the replacement text
1728 fn is an optional function that will be applied to the replacement text
1706 just before replacement.
1729 just before replacement.
1707
1730
1708 escape_prefix is an optional flag that allows using doubled prefix for
1731 escape_prefix is an optional flag that allows using doubled prefix for
1709 its escaping.
1732 its escaping.
1710 """
1733 """
1711 fn = fn or (lambda s: s)
1734 fn = fn or (lambda s: s)
1712 patterns = '|'.join(mapping.keys())
1735 patterns = '|'.join(mapping.keys())
1713 if escape_prefix:
1736 if escape_prefix:
1714 patterns += '|' + prefix
1737 patterns += '|' + prefix
1715 if len(prefix) > 1:
1738 if len(prefix) > 1:
1716 prefix_char = prefix[1:]
1739 prefix_char = prefix[1:]
1717 else:
1740 else:
1718 prefix_char = prefix
1741 prefix_char = prefix
1719 mapping[prefix_char] = prefix_char
1742 mapping[prefix_char] = prefix_char
1720 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1743 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1721 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1744 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1722
1745
1723 def getport(port):
1746 def getport(port):
1724 """Return the port for a given network service.
1747 """Return the port for a given network service.
1725
1748
1726 If port is an integer, it's returned as is. If it's a string, it's
1749 If port is an integer, it's returned as is. If it's a string, it's
1727 looked up using socket.getservbyname(). If there's no matching
1750 looked up using socket.getservbyname(). If there's no matching
1728 service, util.Abort is raised.
1751 service, util.Abort is raised.
1729 """
1752 """
1730 try:
1753 try:
1731 return int(port)
1754 return int(port)
1732 except ValueError:
1755 except ValueError:
1733 pass
1756 pass
1734
1757
1735 try:
1758 try:
1736 return socket.getservbyname(port)
1759 return socket.getservbyname(port)
1737 except socket.error:
1760 except socket.error:
1738 raise Abort(_("no port number associated with service '%s'") % port)
1761 raise Abort(_("no port number associated with service '%s'") % port)
1739
1762
1740 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1763 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1741 '0': False, 'no': False, 'false': False, 'off': False,
1764 '0': False, 'no': False, 'false': False, 'off': False,
1742 'never': False}
1765 'never': False}
1743
1766
1744 def parsebool(s):
1767 def parsebool(s):
1745 """Parse s into a boolean.
1768 """Parse s into a boolean.
1746
1769
1747 If s is not a valid boolean, returns None.
1770 If s is not a valid boolean, returns None.
1748 """
1771 """
1749 return _booleans.get(s.lower(), None)
1772 return _booleans.get(s.lower(), None)
1750
1773
1751 _hexdig = '0123456789ABCDEFabcdef'
1774 _hexdig = '0123456789ABCDEFabcdef'
1752 _hextochr = dict((a + b, chr(int(a + b, 16)))
1775 _hextochr = dict((a + b, chr(int(a + b, 16)))
1753 for a in _hexdig for b in _hexdig)
1776 for a in _hexdig for b in _hexdig)
1754
1777
1755 def _urlunquote(s):
1778 def _urlunquote(s):
1756 """Decode HTTP/HTML % encoding.
1779 """Decode HTTP/HTML % encoding.
1757
1780
1758 >>> _urlunquote('abc%20def')
1781 >>> _urlunquote('abc%20def')
1759 'abc def'
1782 'abc def'
1760 """
1783 """
1761 res = s.split('%')
1784 res = s.split('%')
1762 # fastpath
1785 # fastpath
1763 if len(res) == 1:
1786 if len(res) == 1:
1764 return s
1787 return s
1765 s = res[0]
1788 s = res[0]
1766 for item in res[1:]:
1789 for item in res[1:]:
1767 try:
1790 try:
1768 s += _hextochr[item[:2]] + item[2:]
1791 s += _hextochr[item[:2]] + item[2:]
1769 except KeyError:
1792 except KeyError:
1770 s += '%' + item
1793 s += '%' + item
1771 except UnicodeDecodeError:
1794 except UnicodeDecodeError:
1772 s += unichr(int(item[:2], 16)) + item[2:]
1795 s += unichr(int(item[:2], 16)) + item[2:]
1773 return s
1796 return s
1774
1797
1775 class url(object):
1798 class url(object):
1776 r"""Reliable URL parser.
1799 r"""Reliable URL parser.
1777
1800
1778 This parses URLs and provides attributes for the following
1801 This parses URLs and provides attributes for the following
1779 components:
1802 components:
1780
1803
1781 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1804 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1782
1805
1783 Missing components are set to None. The only exception is
1806 Missing components are set to None. The only exception is
1784 fragment, which is set to '' if present but empty.
1807 fragment, which is set to '' if present but empty.
1785
1808
1786 If parsefragment is False, fragment is included in query. If
1809 If parsefragment is False, fragment is included in query. If
1787 parsequery is False, query is included in path. If both are
1810 parsequery is False, query is included in path. If both are
1788 False, both fragment and query are included in path.
1811 False, both fragment and query are included in path.
1789
1812
1790 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1813 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1791
1814
1792 Note that for backward compatibility reasons, bundle URLs do not
1815 Note that for backward compatibility reasons, bundle URLs do not
1793 take host names. That means 'bundle://../' has a path of '../'.
1816 take host names. That means 'bundle://../' has a path of '../'.
1794
1817
1795 Examples:
1818 Examples:
1796
1819
1797 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1820 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1798 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1821 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1799 >>> url('ssh://[::1]:2200//home/joe/repo')
1822 >>> url('ssh://[::1]:2200//home/joe/repo')
1800 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1823 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1801 >>> url('file:///home/joe/repo')
1824 >>> url('file:///home/joe/repo')
1802 <url scheme: 'file', path: '/home/joe/repo'>
1825 <url scheme: 'file', path: '/home/joe/repo'>
1803 >>> url('file:///c:/temp/foo/')
1826 >>> url('file:///c:/temp/foo/')
1804 <url scheme: 'file', path: 'c:/temp/foo/'>
1827 <url scheme: 'file', path: 'c:/temp/foo/'>
1805 >>> url('bundle:foo')
1828 >>> url('bundle:foo')
1806 <url scheme: 'bundle', path: 'foo'>
1829 <url scheme: 'bundle', path: 'foo'>
1807 >>> url('bundle://../foo')
1830 >>> url('bundle://../foo')
1808 <url scheme: 'bundle', path: '../foo'>
1831 <url scheme: 'bundle', path: '../foo'>
1809 >>> url(r'c:\foo\bar')
1832 >>> url(r'c:\foo\bar')
1810 <url path: 'c:\\foo\\bar'>
1833 <url path: 'c:\\foo\\bar'>
1811 >>> url(r'\\blah\blah\blah')
1834 >>> url(r'\\blah\blah\blah')
1812 <url path: '\\\\blah\\blah\\blah'>
1835 <url path: '\\\\blah\\blah\\blah'>
1813 >>> url(r'\\blah\blah\blah#baz')
1836 >>> url(r'\\blah\blah\blah#baz')
1814 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1837 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1815 >>> url(r'file:///C:\users\me')
1838 >>> url(r'file:///C:\users\me')
1816 <url scheme: 'file', path: 'C:\\users\\me'>
1839 <url scheme: 'file', path: 'C:\\users\\me'>
1817
1840
1818 Authentication credentials:
1841 Authentication credentials:
1819
1842
1820 >>> url('ssh://joe:xyz@x/repo')
1843 >>> url('ssh://joe:xyz@x/repo')
1821 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1844 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1822 >>> url('ssh://joe@x/repo')
1845 >>> url('ssh://joe@x/repo')
1823 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1846 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1824
1847
1825 Query strings and fragments:
1848 Query strings and fragments:
1826
1849
1827 >>> url('http://host/a?b#c')
1850 >>> url('http://host/a?b#c')
1828 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1851 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1829 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1852 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1830 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1853 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1831 """
1854 """
1832
1855
1833 _safechars = "!~*'()+"
1856 _safechars = "!~*'()+"
1834 _safepchars = "/!~*'()+:\\"
1857 _safepchars = "/!~*'()+:\\"
1835 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
1858 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
1836
1859
1837 def __init__(self, path, parsequery=True, parsefragment=True):
1860 def __init__(self, path, parsequery=True, parsefragment=True):
1838 # We slowly chomp away at path until we have only the path left
1861 # We slowly chomp away at path until we have only the path left
1839 self.scheme = self.user = self.passwd = self.host = None
1862 self.scheme = self.user = self.passwd = self.host = None
1840 self.port = self.path = self.query = self.fragment = None
1863 self.port = self.path = self.query = self.fragment = None
1841 self._localpath = True
1864 self._localpath = True
1842 self._hostport = ''
1865 self._hostport = ''
1843 self._origpath = path
1866 self._origpath = path
1844
1867
1845 if parsefragment and '#' in path:
1868 if parsefragment and '#' in path:
1846 path, self.fragment = path.split('#', 1)
1869 path, self.fragment = path.split('#', 1)
1847 if not path:
1870 if not path:
1848 path = None
1871 path = None
1849
1872
1850 # special case for Windows drive letters and UNC paths
1873 # special case for Windows drive letters and UNC paths
1851 if hasdriveletter(path) or path.startswith(r'\\'):
1874 if hasdriveletter(path) or path.startswith(r'\\'):
1852 self.path = path
1875 self.path = path
1853 return
1876 return
1854
1877
1855 # For compatibility reasons, we can't handle bundle paths as
1878 # For compatibility reasons, we can't handle bundle paths as
1856 # normal URLS
1879 # normal URLS
1857 if path.startswith('bundle:'):
1880 if path.startswith('bundle:'):
1858 self.scheme = 'bundle'
1881 self.scheme = 'bundle'
1859 path = path[7:]
1882 path = path[7:]
1860 if path.startswith('//'):
1883 if path.startswith('//'):
1861 path = path[2:]
1884 path = path[2:]
1862 self.path = path
1885 self.path = path
1863 return
1886 return
1864
1887
1865 if self._matchscheme(path):
1888 if self._matchscheme(path):
1866 parts = path.split(':', 1)
1889 parts = path.split(':', 1)
1867 if parts[0]:
1890 if parts[0]:
1868 self.scheme, path = parts
1891 self.scheme, path = parts
1869 self._localpath = False
1892 self._localpath = False
1870
1893
1871 if not path:
1894 if not path:
1872 path = None
1895 path = None
1873 if self._localpath:
1896 if self._localpath:
1874 self.path = ''
1897 self.path = ''
1875 return
1898 return
1876 else:
1899 else:
1877 if self._localpath:
1900 if self._localpath:
1878 self.path = path
1901 self.path = path
1879 return
1902 return
1880
1903
1881 if parsequery and '?' in path:
1904 if parsequery and '?' in path:
1882 path, self.query = path.split('?', 1)
1905 path, self.query = path.split('?', 1)
1883 if not path:
1906 if not path:
1884 path = None
1907 path = None
1885 if not self.query:
1908 if not self.query:
1886 self.query = None
1909 self.query = None
1887
1910
1888 # // is required to specify a host/authority
1911 # // is required to specify a host/authority
1889 if path and path.startswith('//'):
1912 if path and path.startswith('//'):
1890 parts = path[2:].split('/', 1)
1913 parts = path[2:].split('/', 1)
1891 if len(parts) > 1:
1914 if len(parts) > 1:
1892 self.host, path = parts
1915 self.host, path = parts
1893 else:
1916 else:
1894 self.host = parts[0]
1917 self.host = parts[0]
1895 path = None
1918 path = None
1896 if not self.host:
1919 if not self.host:
1897 self.host = None
1920 self.host = None
1898 # path of file:///d is /d
1921 # path of file:///d is /d
1899 # path of file:///d:/ is d:/, not /d:/
1922 # path of file:///d:/ is d:/, not /d:/
1900 if path and not hasdriveletter(path):
1923 if path and not hasdriveletter(path):
1901 path = '/' + path
1924 path = '/' + path
1902
1925
1903 if self.host and '@' in self.host:
1926 if self.host and '@' in self.host:
1904 self.user, self.host = self.host.rsplit('@', 1)
1927 self.user, self.host = self.host.rsplit('@', 1)
1905 if ':' in self.user:
1928 if ':' in self.user:
1906 self.user, self.passwd = self.user.split(':', 1)
1929 self.user, self.passwd = self.user.split(':', 1)
1907 if not self.host:
1930 if not self.host:
1908 self.host = None
1931 self.host = None
1909
1932
1910 # Don't split on colons in IPv6 addresses without ports
1933 # Don't split on colons in IPv6 addresses without ports
1911 if (self.host and ':' in self.host and
1934 if (self.host and ':' in self.host and
1912 not (self.host.startswith('[') and self.host.endswith(']'))):
1935 not (self.host.startswith('[') and self.host.endswith(']'))):
1913 self._hostport = self.host
1936 self._hostport = self.host
1914 self.host, self.port = self.host.rsplit(':', 1)
1937 self.host, self.port = self.host.rsplit(':', 1)
1915 if not self.host:
1938 if not self.host:
1916 self.host = None
1939 self.host = None
1917
1940
1918 if (self.host and self.scheme == 'file' and
1941 if (self.host and self.scheme == 'file' and
1919 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1942 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1920 raise Abort(_('file:// URLs can only refer to localhost'))
1943 raise Abort(_('file:// URLs can only refer to localhost'))
1921
1944
1922 self.path = path
1945 self.path = path
1923
1946
1924 # leave the query string escaped
1947 # leave the query string escaped
1925 for a in ('user', 'passwd', 'host', 'port',
1948 for a in ('user', 'passwd', 'host', 'port',
1926 'path', 'fragment'):
1949 'path', 'fragment'):
1927 v = getattr(self, a)
1950 v = getattr(self, a)
1928 if v is not None:
1951 if v is not None:
1929 setattr(self, a, _urlunquote(v))
1952 setattr(self, a, _urlunquote(v))
1930
1953
1931 def __repr__(self):
1954 def __repr__(self):
1932 attrs = []
1955 attrs = []
1933 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1956 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1934 'query', 'fragment'):
1957 'query', 'fragment'):
1935 v = getattr(self, a)
1958 v = getattr(self, a)
1936 if v is not None:
1959 if v is not None:
1937 attrs.append('%s: %r' % (a, v))
1960 attrs.append('%s: %r' % (a, v))
1938 return '<url %s>' % ', '.join(attrs)
1961 return '<url %s>' % ', '.join(attrs)
1939
1962
1940 def __str__(self):
1963 def __str__(self):
1941 r"""Join the URL's components back into a URL string.
1964 r"""Join the URL's components back into a URL string.
1942
1965
1943 Examples:
1966 Examples:
1944
1967
1945 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1968 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1946 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1969 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1947 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1970 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1948 'http://user:pw@host:80/?foo=bar&baz=42'
1971 'http://user:pw@host:80/?foo=bar&baz=42'
1949 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1972 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1950 'http://user:pw@host:80/?foo=bar%3dbaz'
1973 'http://user:pw@host:80/?foo=bar%3dbaz'
1951 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1974 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1952 'ssh://user:pw@[::1]:2200//home/joe#'
1975 'ssh://user:pw@[::1]:2200//home/joe#'
1953 >>> str(url('http://localhost:80//'))
1976 >>> str(url('http://localhost:80//'))
1954 'http://localhost:80//'
1977 'http://localhost:80//'
1955 >>> str(url('http://localhost:80/'))
1978 >>> str(url('http://localhost:80/'))
1956 'http://localhost:80/'
1979 'http://localhost:80/'
1957 >>> str(url('http://localhost:80'))
1980 >>> str(url('http://localhost:80'))
1958 'http://localhost:80/'
1981 'http://localhost:80/'
1959 >>> str(url('bundle:foo'))
1982 >>> str(url('bundle:foo'))
1960 'bundle:foo'
1983 'bundle:foo'
1961 >>> str(url('bundle://../foo'))
1984 >>> str(url('bundle://../foo'))
1962 'bundle:../foo'
1985 'bundle:../foo'
1963 >>> str(url('path'))
1986 >>> str(url('path'))
1964 'path'
1987 'path'
1965 >>> str(url('file:///tmp/foo/bar'))
1988 >>> str(url('file:///tmp/foo/bar'))
1966 'file:///tmp/foo/bar'
1989 'file:///tmp/foo/bar'
1967 >>> str(url('file:///c:/tmp/foo/bar'))
1990 >>> str(url('file:///c:/tmp/foo/bar'))
1968 'file:///c:/tmp/foo/bar'
1991 'file:///c:/tmp/foo/bar'
1969 >>> print url(r'bundle:foo\bar')
1992 >>> print url(r'bundle:foo\bar')
1970 bundle:foo\bar
1993 bundle:foo\bar
1971 >>> print url(r'file:///D:\data\hg')
1994 >>> print url(r'file:///D:\data\hg')
1972 file:///D:\data\hg
1995 file:///D:\data\hg
1973 """
1996 """
1974 if self._localpath:
1997 if self._localpath:
1975 s = self.path
1998 s = self.path
1976 if self.scheme == 'bundle':
1999 if self.scheme == 'bundle':
1977 s = 'bundle:' + s
2000 s = 'bundle:' + s
1978 if self.fragment:
2001 if self.fragment:
1979 s += '#' + self.fragment
2002 s += '#' + self.fragment
1980 return s
2003 return s
1981
2004
1982 s = self.scheme + ':'
2005 s = self.scheme + ':'
1983 if self.user or self.passwd or self.host:
2006 if self.user or self.passwd or self.host:
1984 s += '//'
2007 s += '//'
1985 elif self.scheme and (not self.path or self.path.startswith('/')
2008 elif self.scheme and (not self.path or self.path.startswith('/')
1986 or hasdriveletter(self.path)):
2009 or hasdriveletter(self.path)):
1987 s += '//'
2010 s += '//'
1988 if hasdriveletter(self.path):
2011 if hasdriveletter(self.path):
1989 s += '/'
2012 s += '/'
1990 if self.user:
2013 if self.user:
1991 s += urllib.quote(self.user, safe=self._safechars)
2014 s += urllib.quote(self.user, safe=self._safechars)
1992 if self.passwd:
2015 if self.passwd:
1993 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2016 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
1994 if self.user or self.passwd:
2017 if self.user or self.passwd:
1995 s += '@'
2018 s += '@'
1996 if self.host:
2019 if self.host:
1997 if not (self.host.startswith('[') and self.host.endswith(']')):
2020 if not (self.host.startswith('[') and self.host.endswith(']')):
1998 s += urllib.quote(self.host)
2021 s += urllib.quote(self.host)
1999 else:
2022 else:
2000 s += self.host
2023 s += self.host
2001 if self.port:
2024 if self.port:
2002 s += ':' + urllib.quote(self.port)
2025 s += ':' + urllib.quote(self.port)
2003 if self.host:
2026 if self.host:
2004 s += '/'
2027 s += '/'
2005 if self.path:
2028 if self.path:
2006 # TODO: similar to the query string, we should not unescape the
2029 # TODO: similar to the query string, we should not unescape the
2007 # path when we store it, the path might contain '%2f' = '/',
2030 # path when we store it, the path might contain '%2f' = '/',
2008 # which we should *not* escape.
2031 # which we should *not* escape.
2009 s += urllib.quote(self.path, safe=self._safepchars)
2032 s += urllib.quote(self.path, safe=self._safepchars)
2010 if self.query:
2033 if self.query:
2011 # we store the query in escaped form.
2034 # we store the query in escaped form.
2012 s += '?' + self.query
2035 s += '?' + self.query
2013 if self.fragment is not None:
2036 if self.fragment is not None:
2014 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2037 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2015 return s
2038 return s
2016
2039
2017 def authinfo(self):
2040 def authinfo(self):
2018 user, passwd = self.user, self.passwd
2041 user, passwd = self.user, self.passwd
2019 try:
2042 try:
2020 self.user, self.passwd = None, None
2043 self.user, self.passwd = None, None
2021 s = str(self)
2044 s = str(self)
2022 finally:
2045 finally:
2023 self.user, self.passwd = user, passwd
2046 self.user, self.passwd = user, passwd
2024 if not self.user:
2047 if not self.user:
2025 return (s, None)
2048 return (s, None)
2026 # authinfo[1] is passed to urllib2 password manager, and its
2049 # authinfo[1] is passed to urllib2 password manager, and its
2027 # URIs must not contain credentials. The host is passed in the
2050 # URIs must not contain credentials. The host is passed in the
2028 # URIs list because Python < 2.4.3 uses only that to search for
2051 # URIs list because Python < 2.4.3 uses only that to search for
2029 # a password.
2052 # a password.
2030 return (s, (None, (s, self.host),
2053 return (s, (None, (s, self.host),
2031 self.user, self.passwd or ''))
2054 self.user, self.passwd or ''))
2032
2055
2033 def isabs(self):
2056 def isabs(self):
2034 if self.scheme and self.scheme != 'file':
2057 if self.scheme and self.scheme != 'file':
2035 return True # remote URL
2058 return True # remote URL
2036 if hasdriveletter(self.path):
2059 if hasdriveletter(self.path):
2037 return True # absolute for our purposes - can't be joined()
2060 return True # absolute for our purposes - can't be joined()
2038 if self.path.startswith(r'\\'):
2061 if self.path.startswith(r'\\'):
2039 return True # Windows UNC path
2062 return True # Windows UNC path
2040 if self.path.startswith('/'):
2063 if self.path.startswith('/'):
2041 return True # POSIX-style
2064 return True # POSIX-style
2042 return False
2065 return False
2043
2066
2044 def localpath(self):
2067 def localpath(self):
2045 if self.scheme == 'file' or self.scheme == 'bundle':
2068 if self.scheme == 'file' or self.scheme == 'bundle':
2046 path = self.path or '/'
2069 path = self.path or '/'
2047 # For Windows, we need to promote hosts containing drive
2070 # For Windows, we need to promote hosts containing drive
2048 # letters to paths with drive letters.
2071 # letters to paths with drive letters.
2049 if hasdriveletter(self._hostport):
2072 if hasdriveletter(self._hostport):
2050 path = self._hostport + '/' + self.path
2073 path = self._hostport + '/' + self.path
2051 elif (self.host is not None and self.path
2074 elif (self.host is not None and self.path
2052 and not hasdriveletter(path)):
2075 and not hasdriveletter(path)):
2053 path = '/' + path
2076 path = '/' + path
2054 return path
2077 return path
2055 return self._origpath
2078 return self._origpath
2056
2079
2057 def islocal(self):
2080 def islocal(self):
2058 '''whether localpath will return something that posixfile can open'''
2081 '''whether localpath will return something that posixfile can open'''
2059 return (not self.scheme or self.scheme == 'file'
2082 return (not self.scheme or self.scheme == 'file'
2060 or self.scheme == 'bundle')
2083 or self.scheme == 'bundle')
2061
2084
2062 def hasscheme(path):
2085 def hasscheme(path):
2063 return bool(url(path).scheme)
2086 return bool(url(path).scheme)
2064
2087
2065 def hasdriveletter(path):
2088 def hasdriveletter(path):
2066 return path and path[1:2] == ':' and path[0:1].isalpha()
2089 return path and path[1:2] == ':' and path[0:1].isalpha()
2067
2090
2068 def urllocalpath(path):
2091 def urllocalpath(path):
2069 return url(path, parsequery=False, parsefragment=False).localpath()
2092 return url(path, parsequery=False, parsefragment=False).localpath()
2070
2093
2071 def hidepassword(u):
2094 def hidepassword(u):
2072 '''hide user credential in a url string'''
2095 '''hide user credential in a url string'''
2073 u = url(u)
2096 u = url(u)
2074 if u.passwd:
2097 if u.passwd:
2075 u.passwd = '***'
2098 u.passwd = '***'
2076 return str(u)
2099 return str(u)
2077
2100
2078 def removeauth(u):
2101 def removeauth(u):
2079 '''remove all authentication information from a url string'''
2102 '''remove all authentication information from a url string'''
2080 u = url(u)
2103 u = url(u)
2081 u.user = u.passwd = None
2104 u.user = u.passwd = None
2082 return str(u)
2105 return str(u)
2083
2106
2084 def isatty(fd):
2107 def isatty(fd):
2085 try:
2108 try:
2086 return fd.isatty()
2109 return fd.isatty()
2087 except AttributeError:
2110 except AttributeError:
2088 return False
2111 return False
2089
2112
2090 timecount = unitcountfn(
2113 timecount = unitcountfn(
2091 (1, 1e3, _('%.0f s')),
2114 (1, 1e3, _('%.0f s')),
2092 (100, 1, _('%.1f s')),
2115 (100, 1, _('%.1f s')),
2093 (10, 1, _('%.2f s')),
2116 (10, 1, _('%.2f s')),
2094 (1, 1, _('%.3f s')),
2117 (1, 1, _('%.3f s')),
2095 (100, 0.001, _('%.1f ms')),
2118 (100, 0.001, _('%.1f ms')),
2096 (10, 0.001, _('%.2f ms')),
2119 (10, 0.001, _('%.2f ms')),
2097 (1, 0.001, _('%.3f ms')),
2120 (1, 0.001, _('%.3f ms')),
2098 (100, 0.000001, _('%.1f us')),
2121 (100, 0.000001, _('%.1f us')),
2099 (10, 0.000001, _('%.2f us')),
2122 (10, 0.000001, _('%.2f us')),
2100 (1, 0.000001, _('%.3f us')),
2123 (1, 0.000001, _('%.3f us')),
2101 (100, 0.000000001, _('%.1f ns')),
2124 (100, 0.000000001, _('%.1f ns')),
2102 (10, 0.000000001, _('%.2f ns')),
2125 (10, 0.000000001, _('%.2f ns')),
2103 (1, 0.000000001, _('%.3f ns')),
2126 (1, 0.000000001, _('%.3f ns')),
2104 )
2127 )
2105
2128
2106 _timenesting = [0]
2129 _timenesting = [0]
2107
2130
2108 def timed(func):
2131 def timed(func):
2109 '''Report the execution time of a function call to stderr.
2132 '''Report the execution time of a function call to stderr.
2110
2133
2111 During development, use as a decorator when you need to measure
2134 During development, use as a decorator when you need to measure
2112 the cost of a function, e.g. as follows:
2135 the cost of a function, e.g. as follows:
2113
2136
2114 @util.timed
2137 @util.timed
2115 def foo(a, b, c):
2138 def foo(a, b, c):
2116 pass
2139 pass
2117 '''
2140 '''
2118
2141
2119 def wrapper(*args, **kwargs):
2142 def wrapper(*args, **kwargs):
2120 start = time.time()
2143 start = time.time()
2121 indent = 2
2144 indent = 2
2122 _timenesting[0] += indent
2145 _timenesting[0] += indent
2123 try:
2146 try:
2124 return func(*args, **kwargs)
2147 return func(*args, **kwargs)
2125 finally:
2148 finally:
2126 elapsed = time.time() - start
2149 elapsed = time.time() - start
2127 _timenesting[0] -= indent
2150 _timenesting[0] -= indent
2128 sys.stderr.write('%s%s: %s\n' %
2151 sys.stderr.write('%s%s: %s\n' %
2129 (' ' * _timenesting[0], func.__name__,
2152 (' ' * _timenesting[0], func.__name__,
2130 timecount(elapsed)))
2153 timecount(elapsed)))
2131 return wrapper
2154 return wrapper
2132
2155
2133 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2156 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2134 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2157 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2135
2158
2136 def sizetoint(s):
2159 def sizetoint(s):
2137 '''Convert a space specifier to a byte count.
2160 '''Convert a space specifier to a byte count.
2138
2161
2139 >>> sizetoint('30')
2162 >>> sizetoint('30')
2140 30
2163 30
2141 >>> sizetoint('2.2kb')
2164 >>> sizetoint('2.2kb')
2142 2252
2165 2252
2143 >>> sizetoint('6M')
2166 >>> sizetoint('6M')
2144 6291456
2167 6291456
2145 '''
2168 '''
2146 t = s.strip().lower()
2169 t = s.strip().lower()
2147 try:
2170 try:
2148 for k, u in _sizeunits:
2171 for k, u in _sizeunits:
2149 if t.endswith(k):
2172 if t.endswith(k):
2150 return int(float(t[:-len(k)]) * u)
2173 return int(float(t[:-len(k)]) * u)
2151 return int(t)
2174 return int(t)
2152 except ValueError:
2175 except ValueError:
2153 raise error.ParseError(_("couldn't parse size: %s") % s)
2176 raise error.ParseError(_("couldn't parse size: %s") % s)
2154
2177
2155 class hooks(object):
2178 class hooks(object):
2156 '''A collection of hook functions that can be used to extend a
2179 '''A collection of hook functions that can be used to extend a
2157 function's behaviour. Hooks are called in lexicographic order,
2180 function's behaviour. Hooks are called in lexicographic order,
2158 based on the names of their sources.'''
2181 based on the names of their sources.'''
2159
2182
2160 def __init__(self):
2183 def __init__(self):
2161 self._hooks = []
2184 self._hooks = []
2162
2185
2163 def add(self, source, hook):
2186 def add(self, source, hook):
2164 self._hooks.append((source, hook))
2187 self._hooks.append((source, hook))
2165
2188
2166 def __call__(self, *args):
2189 def __call__(self, *args):
2167 self._hooks.sort(key=lambda x: x[0])
2190 self._hooks.sort(key=lambda x: x[0])
2168 results = []
2191 results = []
2169 for source, hook in self._hooks:
2192 for source, hook in self._hooks:
2170 results.append(hook(*args))
2193 results.append(hook(*args))
2171 return results
2194 return results
2172
2195
2173 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2196 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2174 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2197 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2175 Skips the 'skip' last entries. By default it will flush stdout first.
2198 Skips the 'skip' last entries. By default it will flush stdout first.
2176 It can be used everywhere and do intentionally not require an ui object.
2199 It can be used everywhere and do intentionally not require an ui object.
2177 Not be used in production code but very convenient while developing.
2200 Not be used in production code but very convenient while developing.
2178 '''
2201 '''
2179 if otherf:
2202 if otherf:
2180 otherf.flush()
2203 otherf.flush()
2181 f.write('%s at:\n' % msg)
2204 f.write('%s at:\n' % msg)
2182 entries = [('%s:%s' % (fn, ln), func)
2205 entries = [('%s:%s' % (fn, ln), func)
2183 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2206 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2184 if entries:
2207 if entries:
2185 fnmax = max(len(entry[0]) for entry in entries)
2208 fnmax = max(len(entry[0]) for entry in entries)
2186 for fnln, func in entries:
2209 for fnln, func in entries:
2187 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2210 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2188 f.flush()
2211 f.flush()
2189
2212
2190 # convenient shortcut
2213 # convenient shortcut
2191 dst = debugstacktrace
2214 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now