##// END OF EJS Templates
util: add progress callback support to copyfiles
Augie Fackler -
r24439:2ddfac2f default
parent child Browse files
Show More
@@ -1,2233 +1,2242 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 import i18n
16 import i18n
17 _ = i18n._
17 _ = i18n._
18 import error, osutil, encoding
18 import error, osutil, encoding
19 import errno, shutil, sys, tempfile, traceback
19 import errno, shutil, sys, tempfile, traceback
20 import re as remod
20 import re as remod
21 import os, time, datetime, calendar, textwrap, signal, collections
21 import os, time, datetime, calendar, textwrap, signal, collections
22 import imp, socket, urllib, struct
22 import imp, socket, urllib, struct
23 import gc
23 import gc
24
24
25 if os.name == 'nt':
25 if os.name == 'nt':
26 import windows as platform
26 import windows as platform
27 else:
27 else:
28 import posix as platform
28 import posix as platform
29
29
30 cachestat = platform.cachestat
30 cachestat = platform.cachestat
31 checkexec = platform.checkexec
31 checkexec = platform.checkexec
32 checklink = platform.checklink
32 checklink = platform.checklink
33 copymode = platform.copymode
33 copymode = platform.copymode
34 executablepath = platform.executablepath
34 executablepath = platform.executablepath
35 expandglobs = platform.expandglobs
35 expandglobs = platform.expandglobs
36 explainexit = platform.explainexit
36 explainexit = platform.explainexit
37 findexe = platform.findexe
37 findexe = platform.findexe
38 gethgcmd = platform.gethgcmd
38 gethgcmd = platform.gethgcmd
39 getuser = platform.getuser
39 getuser = platform.getuser
40 groupmembers = platform.groupmembers
40 groupmembers = platform.groupmembers
41 groupname = platform.groupname
41 groupname = platform.groupname
42 hidewindow = platform.hidewindow
42 hidewindow = platform.hidewindow
43 isexec = platform.isexec
43 isexec = platform.isexec
44 isowner = platform.isowner
44 isowner = platform.isowner
45 localpath = platform.localpath
45 localpath = platform.localpath
46 lookupreg = platform.lookupreg
46 lookupreg = platform.lookupreg
47 makedir = platform.makedir
47 makedir = platform.makedir
48 nlinks = platform.nlinks
48 nlinks = platform.nlinks
49 normpath = platform.normpath
49 normpath = platform.normpath
50 normcase = platform.normcase
50 normcase = platform.normcase
51 openhardlinks = platform.openhardlinks
51 openhardlinks = platform.openhardlinks
52 oslink = platform.oslink
52 oslink = platform.oslink
53 parsepatchoutput = platform.parsepatchoutput
53 parsepatchoutput = platform.parsepatchoutput
54 pconvert = platform.pconvert
54 pconvert = platform.pconvert
55 popen = platform.popen
55 popen = platform.popen
56 posixfile = platform.posixfile
56 posixfile = platform.posixfile
57 quotecommand = platform.quotecommand
57 quotecommand = platform.quotecommand
58 readpipe = platform.readpipe
58 readpipe = platform.readpipe
59 rename = platform.rename
59 rename = platform.rename
60 samedevice = platform.samedevice
60 samedevice = platform.samedevice
61 samefile = platform.samefile
61 samefile = platform.samefile
62 samestat = platform.samestat
62 samestat = platform.samestat
63 setbinary = platform.setbinary
63 setbinary = platform.setbinary
64 setflags = platform.setflags
64 setflags = platform.setflags
65 setsignalhandler = platform.setsignalhandler
65 setsignalhandler = platform.setsignalhandler
66 shellquote = platform.shellquote
66 shellquote = platform.shellquote
67 spawndetached = platform.spawndetached
67 spawndetached = platform.spawndetached
68 split = platform.split
68 split = platform.split
69 sshargs = platform.sshargs
69 sshargs = platform.sshargs
70 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
70 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
71 statisexec = platform.statisexec
71 statisexec = platform.statisexec
72 statislink = platform.statislink
72 statislink = platform.statislink
73 termwidth = platform.termwidth
73 termwidth = platform.termwidth
74 testpid = platform.testpid
74 testpid = platform.testpid
75 umask = platform.umask
75 umask = platform.umask
76 unlink = platform.unlink
76 unlink = platform.unlink
77 unlinkpath = platform.unlinkpath
77 unlinkpath = platform.unlinkpath
78 username = platform.username
78 username = platform.username
79
79
80 # Python compatibility
80 # Python compatibility
81
81
82 _notset = object()
82 _notset = object()
83
83
84 def safehasattr(thing, attr):
84 def safehasattr(thing, attr):
85 return getattr(thing, attr, _notset) is not _notset
85 return getattr(thing, attr, _notset) is not _notset
86
86
87 def sha1(s=''):
87 def sha1(s=''):
88 '''
88 '''
89 Low-overhead wrapper around Python's SHA support
89 Low-overhead wrapper around Python's SHA support
90
90
91 >>> f = _fastsha1
91 >>> f = _fastsha1
92 >>> a = sha1()
92 >>> a = sha1()
93 >>> a = f()
93 >>> a = f()
94 >>> a.hexdigest()
94 >>> a.hexdigest()
95 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
95 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
96 '''
96 '''
97
97
98 return _fastsha1(s)
98 return _fastsha1(s)
99
99
100 def _fastsha1(s=''):
100 def _fastsha1(s=''):
101 # This function will import sha1 from hashlib or sha (whichever is
101 # This function will import sha1 from hashlib or sha (whichever is
102 # available) and overwrite itself with it on the first call.
102 # available) and overwrite itself with it on the first call.
103 # Subsequent calls will go directly to the imported function.
103 # Subsequent calls will go directly to the imported function.
104 if sys.version_info >= (2, 5):
104 if sys.version_info >= (2, 5):
105 from hashlib import sha1 as _sha1
105 from hashlib import sha1 as _sha1
106 else:
106 else:
107 from sha import sha as _sha1
107 from sha import sha as _sha1
108 global _fastsha1, sha1
108 global _fastsha1, sha1
109 _fastsha1 = sha1 = _sha1
109 _fastsha1 = sha1 = _sha1
110 return _sha1(s)
110 return _sha1(s)
111
111
112 def md5(s=''):
112 def md5(s=''):
113 try:
113 try:
114 from hashlib import md5 as _md5
114 from hashlib import md5 as _md5
115 except ImportError:
115 except ImportError:
116 from md5 import md5 as _md5
116 from md5 import md5 as _md5
117 global md5
117 global md5
118 md5 = _md5
118 md5 = _md5
119 return _md5(s)
119 return _md5(s)
120
120
121 DIGESTS = {
121 DIGESTS = {
122 'md5': md5,
122 'md5': md5,
123 'sha1': sha1,
123 'sha1': sha1,
124 }
124 }
125 # List of digest types from strongest to weakest
125 # List of digest types from strongest to weakest
126 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
126 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
127
127
128 try:
128 try:
129 import hashlib
129 import hashlib
130 DIGESTS.update({
130 DIGESTS.update({
131 'sha512': hashlib.sha512,
131 'sha512': hashlib.sha512,
132 })
132 })
133 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
133 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
134 except ImportError:
134 except ImportError:
135 pass
135 pass
136
136
137 for k in DIGESTS_BY_STRENGTH:
137 for k in DIGESTS_BY_STRENGTH:
138 assert k in DIGESTS
138 assert k in DIGESTS
139
139
140 class digester(object):
140 class digester(object):
141 """helper to compute digests.
141 """helper to compute digests.
142
142
143 This helper can be used to compute one or more digests given their name.
143 This helper can be used to compute one or more digests given their name.
144
144
145 >>> d = digester(['md5', 'sha1'])
145 >>> d = digester(['md5', 'sha1'])
146 >>> d.update('foo')
146 >>> d.update('foo')
147 >>> [k for k in sorted(d)]
147 >>> [k for k in sorted(d)]
148 ['md5', 'sha1']
148 ['md5', 'sha1']
149 >>> d['md5']
149 >>> d['md5']
150 'acbd18db4cc2f85cedef654fccc4a4d8'
150 'acbd18db4cc2f85cedef654fccc4a4d8'
151 >>> d['sha1']
151 >>> d['sha1']
152 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
152 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
153 >>> digester.preferred(['md5', 'sha1'])
153 >>> digester.preferred(['md5', 'sha1'])
154 'sha1'
154 'sha1'
155 """
155 """
156
156
157 def __init__(self, digests, s=''):
157 def __init__(self, digests, s=''):
158 self._hashes = {}
158 self._hashes = {}
159 for k in digests:
159 for k in digests:
160 if k not in DIGESTS:
160 if k not in DIGESTS:
161 raise Abort(_('unknown digest type: %s') % k)
161 raise Abort(_('unknown digest type: %s') % k)
162 self._hashes[k] = DIGESTS[k]()
162 self._hashes[k] = DIGESTS[k]()
163 if s:
163 if s:
164 self.update(s)
164 self.update(s)
165
165
166 def update(self, data):
166 def update(self, data):
167 for h in self._hashes.values():
167 for h in self._hashes.values():
168 h.update(data)
168 h.update(data)
169
169
170 def __getitem__(self, key):
170 def __getitem__(self, key):
171 if key not in DIGESTS:
171 if key not in DIGESTS:
172 raise Abort(_('unknown digest type: %s') % k)
172 raise Abort(_('unknown digest type: %s') % k)
173 return self._hashes[key].hexdigest()
173 return self._hashes[key].hexdigest()
174
174
175 def __iter__(self):
175 def __iter__(self):
176 return iter(self._hashes)
176 return iter(self._hashes)
177
177
178 @staticmethod
178 @staticmethod
179 def preferred(supported):
179 def preferred(supported):
180 """returns the strongest digest type in both supported and DIGESTS."""
180 """returns the strongest digest type in both supported and DIGESTS."""
181
181
182 for k in DIGESTS_BY_STRENGTH:
182 for k in DIGESTS_BY_STRENGTH:
183 if k in supported:
183 if k in supported:
184 return k
184 return k
185 return None
185 return None
186
186
187 class digestchecker(object):
187 class digestchecker(object):
188 """file handle wrapper that additionally checks content against a given
188 """file handle wrapper that additionally checks content against a given
189 size and digests.
189 size and digests.
190
190
191 d = digestchecker(fh, size, {'md5': '...'})
191 d = digestchecker(fh, size, {'md5': '...'})
192
192
193 When multiple digests are given, all of them are validated.
193 When multiple digests are given, all of them are validated.
194 """
194 """
195
195
196 def __init__(self, fh, size, digests):
196 def __init__(self, fh, size, digests):
197 self._fh = fh
197 self._fh = fh
198 self._size = size
198 self._size = size
199 self._got = 0
199 self._got = 0
200 self._digests = dict(digests)
200 self._digests = dict(digests)
201 self._digester = digester(self._digests.keys())
201 self._digester = digester(self._digests.keys())
202
202
203 def read(self, length=-1):
203 def read(self, length=-1):
204 content = self._fh.read(length)
204 content = self._fh.read(length)
205 self._digester.update(content)
205 self._digester.update(content)
206 self._got += len(content)
206 self._got += len(content)
207 return content
207 return content
208
208
209 def validate(self):
209 def validate(self):
210 if self._size != self._got:
210 if self._size != self._got:
211 raise Abort(_('size mismatch: expected %d, got %d') %
211 raise Abort(_('size mismatch: expected %d, got %d') %
212 (self._size, self._got))
212 (self._size, self._got))
213 for k, v in self._digests.items():
213 for k, v in self._digests.items():
214 if v != self._digester[k]:
214 if v != self._digester[k]:
215 # i18n: first parameter is a digest name
215 # i18n: first parameter is a digest name
216 raise Abort(_('%s mismatch: expected %s, got %s') %
216 raise Abort(_('%s mismatch: expected %s, got %s') %
217 (k, v, self._digester[k]))
217 (k, v, self._digester[k]))
218
218
219 try:
219 try:
220 buffer = buffer
220 buffer = buffer
221 except NameError:
221 except NameError:
222 if sys.version_info[0] < 3:
222 if sys.version_info[0] < 3:
223 def buffer(sliceable, offset=0):
223 def buffer(sliceable, offset=0):
224 return sliceable[offset:]
224 return sliceable[offset:]
225 else:
225 else:
226 def buffer(sliceable, offset=0):
226 def buffer(sliceable, offset=0):
227 return memoryview(sliceable)[offset:]
227 return memoryview(sliceable)[offset:]
228
228
229 import subprocess
229 import subprocess
230 closefds = os.name == 'posix'
230 closefds = os.name == 'posix'
231
231
232 def unpacker(fmt):
232 def unpacker(fmt):
233 """create a struct unpacker for the specified format"""
233 """create a struct unpacker for the specified format"""
234 try:
234 try:
235 # 2.5+
235 # 2.5+
236 return struct.Struct(fmt).unpack
236 return struct.Struct(fmt).unpack
237 except AttributeError:
237 except AttributeError:
238 # 2.4
238 # 2.4
239 return lambda buf: struct.unpack(fmt, buf)
239 return lambda buf: struct.unpack(fmt, buf)
240
240
241 def popen2(cmd, env=None, newlines=False):
241 def popen2(cmd, env=None, newlines=False):
242 # Setting bufsize to -1 lets the system decide the buffer size.
242 # Setting bufsize to -1 lets the system decide the buffer size.
243 # The default for bufsize is 0, meaning unbuffered. This leads to
243 # The default for bufsize is 0, meaning unbuffered. This leads to
244 # poor performance on Mac OS X: http://bugs.python.org/issue4194
244 # poor performance on Mac OS X: http://bugs.python.org/issue4194
245 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
245 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
246 close_fds=closefds,
246 close_fds=closefds,
247 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
247 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
248 universal_newlines=newlines,
248 universal_newlines=newlines,
249 env=env)
249 env=env)
250 return p.stdin, p.stdout
250 return p.stdin, p.stdout
251
251
252 def popen3(cmd, env=None, newlines=False):
252 def popen3(cmd, env=None, newlines=False):
253 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
253 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
254 return stdin, stdout, stderr
254 return stdin, stdout, stderr
255
255
256 def popen4(cmd, env=None, newlines=False):
256 def popen4(cmd, env=None, newlines=False):
257 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
257 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
258 close_fds=closefds,
258 close_fds=closefds,
259 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
259 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
260 stderr=subprocess.PIPE,
260 stderr=subprocess.PIPE,
261 universal_newlines=newlines,
261 universal_newlines=newlines,
262 env=env)
262 env=env)
263 return p.stdin, p.stdout, p.stderr, p
263 return p.stdin, p.stdout, p.stderr, p
264
264
265 def version():
265 def version():
266 """Return version information if available."""
266 """Return version information if available."""
267 try:
267 try:
268 import __version__
268 import __version__
269 return __version__.version
269 return __version__.version
270 except ImportError:
270 except ImportError:
271 return 'unknown'
271 return 'unknown'
272
272
273 # used by parsedate
273 # used by parsedate
274 defaultdateformats = (
274 defaultdateformats = (
275 '%Y-%m-%d %H:%M:%S',
275 '%Y-%m-%d %H:%M:%S',
276 '%Y-%m-%d %I:%M:%S%p',
276 '%Y-%m-%d %I:%M:%S%p',
277 '%Y-%m-%d %H:%M',
277 '%Y-%m-%d %H:%M',
278 '%Y-%m-%d %I:%M%p',
278 '%Y-%m-%d %I:%M%p',
279 '%Y-%m-%d',
279 '%Y-%m-%d',
280 '%m-%d',
280 '%m-%d',
281 '%m/%d',
281 '%m/%d',
282 '%m/%d/%y',
282 '%m/%d/%y',
283 '%m/%d/%Y',
283 '%m/%d/%Y',
284 '%a %b %d %H:%M:%S %Y',
284 '%a %b %d %H:%M:%S %Y',
285 '%a %b %d %I:%M:%S%p %Y',
285 '%a %b %d %I:%M:%S%p %Y',
286 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
286 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
287 '%b %d %H:%M:%S %Y',
287 '%b %d %H:%M:%S %Y',
288 '%b %d %I:%M:%S%p %Y',
288 '%b %d %I:%M:%S%p %Y',
289 '%b %d %H:%M:%S',
289 '%b %d %H:%M:%S',
290 '%b %d %I:%M:%S%p',
290 '%b %d %I:%M:%S%p',
291 '%b %d %H:%M',
291 '%b %d %H:%M',
292 '%b %d %I:%M%p',
292 '%b %d %I:%M%p',
293 '%b %d %Y',
293 '%b %d %Y',
294 '%b %d',
294 '%b %d',
295 '%H:%M:%S',
295 '%H:%M:%S',
296 '%I:%M:%S%p',
296 '%I:%M:%S%p',
297 '%H:%M',
297 '%H:%M',
298 '%I:%M%p',
298 '%I:%M%p',
299 )
299 )
300
300
301 extendeddateformats = defaultdateformats + (
301 extendeddateformats = defaultdateformats + (
302 "%Y",
302 "%Y",
303 "%Y-%m",
303 "%Y-%m",
304 "%b",
304 "%b",
305 "%b %Y",
305 "%b %Y",
306 )
306 )
307
307
308 def cachefunc(func):
308 def cachefunc(func):
309 '''cache the result of function calls'''
309 '''cache the result of function calls'''
310 # XXX doesn't handle keywords args
310 # XXX doesn't handle keywords args
311 if func.func_code.co_argcount == 0:
311 if func.func_code.co_argcount == 0:
312 cache = []
312 cache = []
313 def f():
313 def f():
314 if len(cache) == 0:
314 if len(cache) == 0:
315 cache.append(func())
315 cache.append(func())
316 return cache[0]
316 return cache[0]
317 return f
317 return f
318 cache = {}
318 cache = {}
319 if func.func_code.co_argcount == 1:
319 if func.func_code.co_argcount == 1:
320 # we gain a small amount of time because
320 # we gain a small amount of time because
321 # we don't need to pack/unpack the list
321 # we don't need to pack/unpack the list
322 def f(arg):
322 def f(arg):
323 if arg not in cache:
323 if arg not in cache:
324 cache[arg] = func(arg)
324 cache[arg] = func(arg)
325 return cache[arg]
325 return cache[arg]
326 else:
326 else:
327 def f(*args):
327 def f(*args):
328 if args not in cache:
328 if args not in cache:
329 cache[args] = func(*args)
329 cache[args] = func(*args)
330 return cache[args]
330 return cache[args]
331
331
332 return f
332 return f
333
333
334 try:
334 try:
335 collections.deque.remove
335 collections.deque.remove
336 deque = collections.deque
336 deque = collections.deque
337 except AttributeError:
337 except AttributeError:
338 # python 2.4 lacks deque.remove
338 # python 2.4 lacks deque.remove
339 class deque(collections.deque):
339 class deque(collections.deque):
340 def remove(self, val):
340 def remove(self, val):
341 for i, v in enumerate(self):
341 for i, v in enumerate(self):
342 if v == val:
342 if v == val:
343 del self[i]
343 del self[i]
344 break
344 break
345
345
346 class sortdict(dict):
346 class sortdict(dict):
347 '''a simple sorted dictionary'''
347 '''a simple sorted dictionary'''
348 def __init__(self, data=None):
348 def __init__(self, data=None):
349 self._list = []
349 self._list = []
350 if data:
350 if data:
351 self.update(data)
351 self.update(data)
352 def copy(self):
352 def copy(self):
353 return sortdict(self)
353 return sortdict(self)
354 def __setitem__(self, key, val):
354 def __setitem__(self, key, val):
355 if key in self:
355 if key in self:
356 self._list.remove(key)
356 self._list.remove(key)
357 self._list.append(key)
357 self._list.append(key)
358 dict.__setitem__(self, key, val)
358 dict.__setitem__(self, key, val)
359 def __iter__(self):
359 def __iter__(self):
360 return self._list.__iter__()
360 return self._list.__iter__()
361 def update(self, src):
361 def update(self, src):
362 if isinstance(src, dict):
362 if isinstance(src, dict):
363 src = src.iteritems()
363 src = src.iteritems()
364 for k, v in src:
364 for k, v in src:
365 self[k] = v
365 self[k] = v
366 def clear(self):
366 def clear(self):
367 dict.clear(self)
367 dict.clear(self)
368 self._list = []
368 self._list = []
369 def items(self):
369 def items(self):
370 return [(k, self[k]) for k in self._list]
370 return [(k, self[k]) for k in self._list]
371 def __delitem__(self, key):
371 def __delitem__(self, key):
372 dict.__delitem__(self, key)
372 dict.__delitem__(self, key)
373 self._list.remove(key)
373 self._list.remove(key)
374 def pop(self, key, *args, **kwargs):
374 def pop(self, key, *args, **kwargs):
375 dict.pop(self, key, *args, **kwargs)
375 dict.pop(self, key, *args, **kwargs)
376 try:
376 try:
377 self._list.remove(key)
377 self._list.remove(key)
378 except ValueError:
378 except ValueError:
379 pass
379 pass
380 def keys(self):
380 def keys(self):
381 return self._list
381 return self._list
382 def iterkeys(self):
382 def iterkeys(self):
383 return self._list.__iter__()
383 return self._list.__iter__()
384 def iteritems(self):
384 def iteritems(self):
385 for k in self._list:
385 for k in self._list:
386 yield k, self[k]
386 yield k, self[k]
387 def insert(self, index, key, val):
387 def insert(self, index, key, val):
388 self._list.insert(index, key)
388 self._list.insert(index, key)
389 dict.__setitem__(self, key, val)
389 dict.__setitem__(self, key, val)
390
390
391 class lrucachedict(object):
391 class lrucachedict(object):
392 '''cache most recent gets from or sets to this dictionary'''
392 '''cache most recent gets from or sets to this dictionary'''
393 def __init__(self, maxsize):
393 def __init__(self, maxsize):
394 self._cache = {}
394 self._cache = {}
395 self._maxsize = maxsize
395 self._maxsize = maxsize
396 self._order = deque()
396 self._order = deque()
397
397
398 def __getitem__(self, key):
398 def __getitem__(self, key):
399 value = self._cache[key]
399 value = self._cache[key]
400 self._order.remove(key)
400 self._order.remove(key)
401 self._order.append(key)
401 self._order.append(key)
402 return value
402 return value
403
403
404 def __setitem__(self, key, value):
404 def __setitem__(self, key, value):
405 if key not in self._cache:
405 if key not in self._cache:
406 if len(self._cache) >= self._maxsize:
406 if len(self._cache) >= self._maxsize:
407 del self._cache[self._order.popleft()]
407 del self._cache[self._order.popleft()]
408 else:
408 else:
409 self._order.remove(key)
409 self._order.remove(key)
410 self._cache[key] = value
410 self._cache[key] = value
411 self._order.append(key)
411 self._order.append(key)
412
412
413 def __contains__(self, key):
413 def __contains__(self, key):
414 return key in self._cache
414 return key in self._cache
415
415
416 def clear(self):
416 def clear(self):
417 self._cache.clear()
417 self._cache.clear()
418 self._order = deque()
418 self._order = deque()
419
419
420 def lrucachefunc(func):
420 def lrucachefunc(func):
421 '''cache most recent results of function calls'''
421 '''cache most recent results of function calls'''
422 cache = {}
422 cache = {}
423 order = deque()
423 order = deque()
424 if func.func_code.co_argcount == 1:
424 if func.func_code.co_argcount == 1:
425 def f(arg):
425 def f(arg):
426 if arg not in cache:
426 if arg not in cache:
427 if len(cache) > 20:
427 if len(cache) > 20:
428 del cache[order.popleft()]
428 del cache[order.popleft()]
429 cache[arg] = func(arg)
429 cache[arg] = func(arg)
430 else:
430 else:
431 order.remove(arg)
431 order.remove(arg)
432 order.append(arg)
432 order.append(arg)
433 return cache[arg]
433 return cache[arg]
434 else:
434 else:
435 def f(*args):
435 def f(*args):
436 if args not in cache:
436 if args not in cache:
437 if len(cache) > 20:
437 if len(cache) > 20:
438 del cache[order.popleft()]
438 del cache[order.popleft()]
439 cache[args] = func(*args)
439 cache[args] = func(*args)
440 else:
440 else:
441 order.remove(args)
441 order.remove(args)
442 order.append(args)
442 order.append(args)
443 return cache[args]
443 return cache[args]
444
444
445 return f
445 return f
446
446
447 class propertycache(object):
447 class propertycache(object):
448 def __init__(self, func):
448 def __init__(self, func):
449 self.func = func
449 self.func = func
450 self.name = func.__name__
450 self.name = func.__name__
451 def __get__(self, obj, type=None):
451 def __get__(self, obj, type=None):
452 result = self.func(obj)
452 result = self.func(obj)
453 self.cachevalue(obj, result)
453 self.cachevalue(obj, result)
454 return result
454 return result
455
455
456 def cachevalue(self, obj, value):
456 def cachevalue(self, obj, value):
457 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
457 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
458 obj.__dict__[self.name] = value
458 obj.__dict__[self.name] = value
459
459
460 def pipefilter(s, cmd):
460 def pipefilter(s, cmd):
461 '''filter string S through command CMD, returning its output'''
461 '''filter string S through command CMD, returning its output'''
462 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
462 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
463 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
463 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
464 pout, perr = p.communicate(s)
464 pout, perr = p.communicate(s)
465 return pout
465 return pout
466
466
467 def tempfilter(s, cmd):
467 def tempfilter(s, cmd):
468 '''filter string S through a pair of temporary files with CMD.
468 '''filter string S through a pair of temporary files with CMD.
469 CMD is used as a template to create the real command to be run,
469 CMD is used as a template to create the real command to be run,
470 with the strings INFILE and OUTFILE replaced by the real names of
470 with the strings INFILE and OUTFILE replaced by the real names of
471 the temporary files generated.'''
471 the temporary files generated.'''
472 inname, outname = None, None
472 inname, outname = None, None
473 try:
473 try:
474 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
474 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
475 fp = os.fdopen(infd, 'wb')
475 fp = os.fdopen(infd, 'wb')
476 fp.write(s)
476 fp.write(s)
477 fp.close()
477 fp.close()
478 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
478 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
479 os.close(outfd)
479 os.close(outfd)
480 cmd = cmd.replace('INFILE', inname)
480 cmd = cmd.replace('INFILE', inname)
481 cmd = cmd.replace('OUTFILE', outname)
481 cmd = cmd.replace('OUTFILE', outname)
482 code = os.system(cmd)
482 code = os.system(cmd)
483 if sys.platform == 'OpenVMS' and code & 1:
483 if sys.platform == 'OpenVMS' and code & 1:
484 code = 0
484 code = 0
485 if code:
485 if code:
486 raise Abort(_("command '%s' failed: %s") %
486 raise Abort(_("command '%s' failed: %s") %
487 (cmd, explainexit(code)))
487 (cmd, explainexit(code)))
488 fp = open(outname, 'rb')
488 fp = open(outname, 'rb')
489 r = fp.read()
489 r = fp.read()
490 fp.close()
490 fp.close()
491 return r
491 return r
492 finally:
492 finally:
493 try:
493 try:
494 if inname:
494 if inname:
495 os.unlink(inname)
495 os.unlink(inname)
496 except OSError:
496 except OSError:
497 pass
497 pass
498 try:
498 try:
499 if outname:
499 if outname:
500 os.unlink(outname)
500 os.unlink(outname)
501 except OSError:
501 except OSError:
502 pass
502 pass
503
503
504 filtertable = {
504 filtertable = {
505 'tempfile:': tempfilter,
505 'tempfile:': tempfilter,
506 'pipe:': pipefilter,
506 'pipe:': pipefilter,
507 }
507 }
508
508
509 def filter(s, cmd):
509 def filter(s, cmd):
510 "filter a string through a command that transforms its input to its output"
510 "filter a string through a command that transforms its input to its output"
511 for name, fn in filtertable.iteritems():
511 for name, fn in filtertable.iteritems():
512 if cmd.startswith(name):
512 if cmd.startswith(name):
513 return fn(s, cmd[len(name):].lstrip())
513 return fn(s, cmd[len(name):].lstrip())
514 return pipefilter(s, cmd)
514 return pipefilter(s, cmd)
515
515
516 def binary(s):
516 def binary(s):
517 """return true if a string is binary data"""
517 """return true if a string is binary data"""
518 return bool(s and '\0' in s)
518 return bool(s and '\0' in s)
519
519
520 def increasingchunks(source, min=1024, max=65536):
520 def increasingchunks(source, min=1024, max=65536):
521 '''return no less than min bytes per chunk while data remains,
521 '''return no less than min bytes per chunk while data remains,
522 doubling min after each chunk until it reaches max'''
522 doubling min after each chunk until it reaches max'''
523 def log2(x):
523 def log2(x):
524 if not x:
524 if not x:
525 return 0
525 return 0
526 i = 0
526 i = 0
527 while x:
527 while x:
528 x >>= 1
528 x >>= 1
529 i += 1
529 i += 1
530 return i - 1
530 return i - 1
531
531
532 buf = []
532 buf = []
533 blen = 0
533 blen = 0
534 for chunk in source:
534 for chunk in source:
535 buf.append(chunk)
535 buf.append(chunk)
536 blen += len(chunk)
536 blen += len(chunk)
537 if blen >= min:
537 if blen >= min:
538 if min < max:
538 if min < max:
539 min = min << 1
539 min = min << 1
540 nmin = 1 << log2(blen)
540 nmin = 1 << log2(blen)
541 if nmin > min:
541 if nmin > min:
542 min = nmin
542 min = nmin
543 if min > max:
543 if min > max:
544 min = max
544 min = max
545 yield ''.join(buf)
545 yield ''.join(buf)
546 blen = 0
546 blen = 0
547 buf = []
547 buf = []
548 if buf:
548 if buf:
549 yield ''.join(buf)
549 yield ''.join(buf)
550
550
551 Abort = error.Abort
551 Abort = error.Abort
552
552
553 def always(fn):
553 def always(fn):
554 return True
554 return True
555
555
556 def never(fn):
556 def never(fn):
557 return False
557 return False
558
558
559 def nogc(func):
559 def nogc(func):
560 """disable garbage collector
560 """disable garbage collector
561
561
562 Python's garbage collector triggers a GC each time a certain number of
562 Python's garbage collector triggers a GC each time a certain number of
563 container objects (the number being defined by gc.get_threshold()) are
563 container objects (the number being defined by gc.get_threshold()) are
564 allocated even when marked not to be tracked by the collector. Tracking has
564 allocated even when marked not to be tracked by the collector. Tracking has
565 no effect on when GCs are triggered, only on what objects the GC looks
565 no effect on when GCs are triggered, only on what objects the GC looks
566 into. As a workaround, disable GC while building complex (huge)
566 into. As a workaround, disable GC while building complex (huge)
567 containers.
567 containers.
568
568
569 This garbage collector issue have been fixed in 2.7.
569 This garbage collector issue have been fixed in 2.7.
570 """
570 """
571 def wrapper(*args, **kwargs):
571 def wrapper(*args, **kwargs):
572 gcenabled = gc.isenabled()
572 gcenabled = gc.isenabled()
573 gc.disable()
573 gc.disable()
574 try:
574 try:
575 return func(*args, **kwargs)
575 return func(*args, **kwargs)
576 finally:
576 finally:
577 if gcenabled:
577 if gcenabled:
578 gc.enable()
578 gc.enable()
579 return wrapper
579 return wrapper
580
580
581 def pathto(root, n1, n2):
581 def pathto(root, n1, n2):
582 '''return the relative path from one place to another.
582 '''return the relative path from one place to another.
583 root should use os.sep to separate directories
583 root should use os.sep to separate directories
584 n1 should use os.sep to separate directories
584 n1 should use os.sep to separate directories
585 n2 should use "/" to separate directories
585 n2 should use "/" to separate directories
586 returns an os.sep-separated path.
586 returns an os.sep-separated path.
587
587
588 If n1 is a relative path, it's assumed it's
588 If n1 is a relative path, it's assumed it's
589 relative to root.
589 relative to root.
590 n2 should always be relative to root.
590 n2 should always be relative to root.
591 '''
591 '''
592 if not n1:
592 if not n1:
593 return localpath(n2)
593 return localpath(n2)
594 if os.path.isabs(n1):
594 if os.path.isabs(n1):
595 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
595 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
596 return os.path.join(root, localpath(n2))
596 return os.path.join(root, localpath(n2))
597 n2 = '/'.join((pconvert(root), n2))
597 n2 = '/'.join((pconvert(root), n2))
598 a, b = splitpath(n1), n2.split('/')
598 a, b = splitpath(n1), n2.split('/')
599 a.reverse()
599 a.reverse()
600 b.reverse()
600 b.reverse()
601 while a and b and a[-1] == b[-1]:
601 while a and b and a[-1] == b[-1]:
602 a.pop()
602 a.pop()
603 b.pop()
603 b.pop()
604 b.reverse()
604 b.reverse()
605 return os.sep.join((['..'] * len(a)) + b) or '.'
605 return os.sep.join((['..'] * len(a)) + b) or '.'
606
606
607 def mainfrozen():
607 def mainfrozen():
608 """return True if we are a frozen executable.
608 """return True if we are a frozen executable.
609
609
610 The code supports py2exe (most common, Windows only) and tools/freeze
610 The code supports py2exe (most common, Windows only) and tools/freeze
611 (portable, not much used).
611 (portable, not much used).
612 """
612 """
613 return (safehasattr(sys, "frozen") or # new py2exe
613 return (safehasattr(sys, "frozen") or # new py2exe
614 safehasattr(sys, "importers") or # old py2exe
614 safehasattr(sys, "importers") or # old py2exe
615 imp.is_frozen("__main__")) # tools/freeze
615 imp.is_frozen("__main__")) # tools/freeze
616
616
617 # the location of data files matching the source code
617 # the location of data files matching the source code
618 if mainfrozen():
618 if mainfrozen():
619 # executable version (py2exe) doesn't support __file__
619 # executable version (py2exe) doesn't support __file__
620 datapath = os.path.dirname(sys.executable)
620 datapath = os.path.dirname(sys.executable)
621 else:
621 else:
622 datapath = os.path.dirname(__file__)
622 datapath = os.path.dirname(__file__)
623
623
624 i18n.setdatapath(datapath)
624 i18n.setdatapath(datapath)
625
625
626 _hgexecutable = None
626 _hgexecutable = None
627
627
628 def hgexecutable():
628 def hgexecutable():
629 """return location of the 'hg' executable.
629 """return location of the 'hg' executable.
630
630
631 Defaults to $HG or 'hg' in the search path.
631 Defaults to $HG or 'hg' in the search path.
632 """
632 """
633 if _hgexecutable is None:
633 if _hgexecutable is None:
634 hg = os.environ.get('HG')
634 hg = os.environ.get('HG')
635 mainmod = sys.modules['__main__']
635 mainmod = sys.modules['__main__']
636 if hg:
636 if hg:
637 _sethgexecutable(hg)
637 _sethgexecutable(hg)
638 elif mainfrozen():
638 elif mainfrozen():
639 _sethgexecutable(sys.executable)
639 _sethgexecutable(sys.executable)
640 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
640 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
641 _sethgexecutable(mainmod.__file__)
641 _sethgexecutable(mainmod.__file__)
642 else:
642 else:
643 exe = findexe('hg') or os.path.basename(sys.argv[0])
643 exe = findexe('hg') or os.path.basename(sys.argv[0])
644 _sethgexecutable(exe)
644 _sethgexecutable(exe)
645 return _hgexecutable
645 return _hgexecutable
646
646
647 def _sethgexecutable(path):
647 def _sethgexecutable(path):
648 """set location of the 'hg' executable"""
648 """set location of the 'hg' executable"""
649 global _hgexecutable
649 global _hgexecutable
650 _hgexecutable = path
650 _hgexecutable = path
651
651
652 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
652 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
653 '''enhanced shell command execution.
653 '''enhanced shell command execution.
654 run with environment maybe modified, maybe in different dir.
654 run with environment maybe modified, maybe in different dir.
655
655
656 if command fails and onerr is None, return status, else raise onerr
656 if command fails and onerr is None, return status, else raise onerr
657 object as exception.
657 object as exception.
658
658
659 if out is specified, it is assumed to be a file-like object that has a
659 if out is specified, it is assumed to be a file-like object that has a
660 write() method. stdout and stderr will be redirected to out.'''
660 write() method. stdout and stderr will be redirected to out.'''
661 try:
661 try:
662 sys.stdout.flush()
662 sys.stdout.flush()
663 except Exception:
663 except Exception:
664 pass
664 pass
665 def py2shell(val):
665 def py2shell(val):
666 'convert python object into string that is useful to shell'
666 'convert python object into string that is useful to shell'
667 if val is None or val is False:
667 if val is None or val is False:
668 return '0'
668 return '0'
669 if val is True:
669 if val is True:
670 return '1'
670 return '1'
671 return str(val)
671 return str(val)
672 origcmd = cmd
672 origcmd = cmd
673 cmd = quotecommand(cmd)
673 cmd = quotecommand(cmd)
674 if sys.platform == 'plan9' and (sys.version_info[0] == 2
674 if sys.platform == 'plan9' and (sys.version_info[0] == 2
675 and sys.version_info[1] < 7):
675 and sys.version_info[1] < 7):
676 # subprocess kludge to work around issues in half-baked Python
676 # subprocess kludge to work around issues in half-baked Python
677 # ports, notably bichued/python:
677 # ports, notably bichued/python:
678 if not cwd is None:
678 if not cwd is None:
679 os.chdir(cwd)
679 os.chdir(cwd)
680 rc = os.system(cmd)
680 rc = os.system(cmd)
681 else:
681 else:
682 env = dict(os.environ)
682 env = dict(os.environ)
683 env.update((k, py2shell(v)) for k, v in environ.iteritems())
683 env.update((k, py2shell(v)) for k, v in environ.iteritems())
684 env['HG'] = hgexecutable()
684 env['HG'] = hgexecutable()
685 if out is None or out == sys.__stdout__:
685 if out is None or out == sys.__stdout__:
686 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
686 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
687 env=env, cwd=cwd)
687 env=env, cwd=cwd)
688 else:
688 else:
689 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
689 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
690 env=env, cwd=cwd, stdout=subprocess.PIPE,
690 env=env, cwd=cwd, stdout=subprocess.PIPE,
691 stderr=subprocess.STDOUT)
691 stderr=subprocess.STDOUT)
692 while True:
692 while True:
693 line = proc.stdout.readline()
693 line = proc.stdout.readline()
694 if not line:
694 if not line:
695 break
695 break
696 out.write(line)
696 out.write(line)
697 proc.wait()
697 proc.wait()
698 rc = proc.returncode
698 rc = proc.returncode
699 if sys.platform == 'OpenVMS' and rc & 1:
699 if sys.platform == 'OpenVMS' and rc & 1:
700 rc = 0
700 rc = 0
701 if rc and onerr:
701 if rc and onerr:
702 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
702 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
703 explainexit(rc)[0])
703 explainexit(rc)[0])
704 if errprefix:
704 if errprefix:
705 errmsg = '%s: %s' % (errprefix, errmsg)
705 errmsg = '%s: %s' % (errprefix, errmsg)
706 raise onerr(errmsg)
706 raise onerr(errmsg)
707 return rc
707 return rc
708
708
709 def checksignature(func):
709 def checksignature(func):
710 '''wrap a function with code to check for calling errors'''
710 '''wrap a function with code to check for calling errors'''
711 def check(*args, **kwargs):
711 def check(*args, **kwargs):
712 try:
712 try:
713 return func(*args, **kwargs)
713 return func(*args, **kwargs)
714 except TypeError:
714 except TypeError:
715 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
715 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
716 raise error.SignatureError
716 raise error.SignatureError
717 raise
717 raise
718
718
719 return check
719 return check
720
720
721 def copyfile(src, dest, hardlink=False):
721 def copyfile(src, dest, hardlink=False):
722 "copy a file, preserving mode and atime/mtime"
722 "copy a file, preserving mode and atime/mtime"
723 if os.path.lexists(dest):
723 if os.path.lexists(dest):
724 unlink(dest)
724 unlink(dest)
725 # hardlinks are problematic on CIFS, quietly ignore this flag
725 # hardlinks are problematic on CIFS, quietly ignore this flag
726 # until we find a way to work around it cleanly (issue4546)
726 # until we find a way to work around it cleanly (issue4546)
727 if False and hardlink:
727 if False and hardlink:
728 try:
728 try:
729 oslink(src, dest)
729 oslink(src, dest)
730 return
730 return
731 except (IOError, OSError):
731 except (IOError, OSError):
732 pass # fall back to normal copy
732 pass # fall back to normal copy
733 if os.path.islink(src):
733 if os.path.islink(src):
734 os.symlink(os.readlink(src), dest)
734 os.symlink(os.readlink(src), dest)
735 else:
735 else:
736 try:
736 try:
737 shutil.copyfile(src, dest)
737 shutil.copyfile(src, dest)
738 shutil.copymode(src, dest)
738 shutil.copymode(src, dest)
739 except shutil.Error, inst:
739 except shutil.Error, inst:
740 raise Abort(str(inst))
740 raise Abort(str(inst))
741
741
742 def copyfiles(src, dst, hardlink=None):
742 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
743 """Copy a directory tree using hardlinks if possible"""
743 """Copy a directory tree using hardlinks if possible."""
744 num = 0
744
745
745 if hardlink is None:
746 if hardlink is None:
746 hardlink = (os.stat(src).st_dev ==
747 hardlink = (os.stat(src).st_dev ==
747 os.stat(os.path.dirname(dst)).st_dev)
748 os.stat(os.path.dirname(dst)).st_dev)
749 if hardlink:
750 topic = _('linking')
751 else:
752 topic = _('copying')
748
753
749 num = 0
750 if os.path.isdir(src):
754 if os.path.isdir(src):
751 os.mkdir(dst)
755 os.mkdir(dst)
752 for name, kind in osutil.listdir(src):
756 for name, kind in osutil.listdir(src):
753 srcname = os.path.join(src, name)
757 srcname = os.path.join(src, name)
754 dstname = os.path.join(dst, name)
758 dstname = os.path.join(dst, name)
755 hardlink, n = copyfiles(srcname, dstname, hardlink)
759 def nprog(t, pos):
760 if pos is not None:
761 return progress(t, pos + num)
762 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
756 num += n
763 num += n
757 else:
764 else:
758 if hardlink:
765 if hardlink:
759 try:
766 try:
760 oslink(src, dst)
767 oslink(src, dst)
761 except (IOError, OSError):
768 except (IOError, OSError):
762 hardlink = False
769 hardlink = False
763 shutil.copy(src, dst)
770 shutil.copy(src, dst)
764 else:
771 else:
765 shutil.copy(src, dst)
772 shutil.copy(src, dst)
766 num += 1
773 num += 1
774 progress(topic, num)
775 progress(topic, None)
767
776
768 return hardlink, num
777 return hardlink, num
769
778
770 _winreservednames = '''con prn aux nul
779 _winreservednames = '''con prn aux nul
771 com1 com2 com3 com4 com5 com6 com7 com8 com9
780 com1 com2 com3 com4 com5 com6 com7 com8 com9
772 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
781 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
773 _winreservedchars = ':*?"<>|'
782 _winreservedchars = ':*?"<>|'
774 def checkwinfilename(path):
783 def checkwinfilename(path):
775 r'''Check that the base-relative path is a valid filename on Windows.
784 r'''Check that the base-relative path is a valid filename on Windows.
776 Returns None if the path is ok, or a UI string describing the problem.
785 Returns None if the path is ok, or a UI string describing the problem.
777
786
778 >>> checkwinfilename("just/a/normal/path")
787 >>> checkwinfilename("just/a/normal/path")
779 >>> checkwinfilename("foo/bar/con.xml")
788 >>> checkwinfilename("foo/bar/con.xml")
780 "filename contains 'con', which is reserved on Windows"
789 "filename contains 'con', which is reserved on Windows"
781 >>> checkwinfilename("foo/con.xml/bar")
790 >>> checkwinfilename("foo/con.xml/bar")
782 "filename contains 'con', which is reserved on Windows"
791 "filename contains 'con', which is reserved on Windows"
783 >>> checkwinfilename("foo/bar/xml.con")
792 >>> checkwinfilename("foo/bar/xml.con")
784 >>> checkwinfilename("foo/bar/AUX/bla.txt")
793 >>> checkwinfilename("foo/bar/AUX/bla.txt")
785 "filename contains 'AUX', which is reserved on Windows"
794 "filename contains 'AUX', which is reserved on Windows"
786 >>> checkwinfilename("foo/bar/bla:.txt")
795 >>> checkwinfilename("foo/bar/bla:.txt")
787 "filename contains ':', which is reserved on Windows"
796 "filename contains ':', which is reserved on Windows"
788 >>> checkwinfilename("foo/bar/b\07la.txt")
797 >>> checkwinfilename("foo/bar/b\07la.txt")
789 "filename contains '\\x07', which is invalid on Windows"
798 "filename contains '\\x07', which is invalid on Windows"
790 >>> checkwinfilename("foo/bar/bla ")
799 >>> checkwinfilename("foo/bar/bla ")
791 "filename ends with ' ', which is not allowed on Windows"
800 "filename ends with ' ', which is not allowed on Windows"
792 >>> checkwinfilename("../bar")
801 >>> checkwinfilename("../bar")
793 >>> checkwinfilename("foo\\")
802 >>> checkwinfilename("foo\\")
794 "filename ends with '\\', which is invalid on Windows"
803 "filename ends with '\\', which is invalid on Windows"
795 >>> checkwinfilename("foo\\/bar")
804 >>> checkwinfilename("foo\\/bar")
796 "directory name ends with '\\', which is invalid on Windows"
805 "directory name ends with '\\', which is invalid on Windows"
797 '''
806 '''
798 if path.endswith('\\'):
807 if path.endswith('\\'):
799 return _("filename ends with '\\', which is invalid on Windows")
808 return _("filename ends with '\\', which is invalid on Windows")
800 if '\\/' in path:
809 if '\\/' in path:
801 return _("directory name ends with '\\', which is invalid on Windows")
810 return _("directory name ends with '\\', which is invalid on Windows")
802 for n in path.replace('\\', '/').split('/'):
811 for n in path.replace('\\', '/').split('/'):
803 if not n:
812 if not n:
804 continue
813 continue
805 for c in n:
814 for c in n:
806 if c in _winreservedchars:
815 if c in _winreservedchars:
807 return _("filename contains '%s', which is reserved "
816 return _("filename contains '%s', which is reserved "
808 "on Windows") % c
817 "on Windows") % c
809 if ord(c) <= 31:
818 if ord(c) <= 31:
810 return _("filename contains %r, which is invalid "
819 return _("filename contains %r, which is invalid "
811 "on Windows") % c
820 "on Windows") % c
812 base = n.split('.')[0]
821 base = n.split('.')[0]
813 if base and base.lower() in _winreservednames:
822 if base and base.lower() in _winreservednames:
814 return _("filename contains '%s', which is reserved "
823 return _("filename contains '%s', which is reserved "
815 "on Windows") % base
824 "on Windows") % base
816 t = n[-1]
825 t = n[-1]
817 if t in '. ' and n not in '..':
826 if t in '. ' and n not in '..':
818 return _("filename ends with '%s', which is not allowed "
827 return _("filename ends with '%s', which is not allowed "
819 "on Windows") % t
828 "on Windows") % t
820
829
821 if os.name == 'nt':
830 if os.name == 'nt':
822 checkosfilename = checkwinfilename
831 checkosfilename = checkwinfilename
823 else:
832 else:
824 checkosfilename = platform.checkosfilename
833 checkosfilename = platform.checkosfilename
825
834
826 def makelock(info, pathname):
835 def makelock(info, pathname):
827 try:
836 try:
828 return os.symlink(info, pathname)
837 return os.symlink(info, pathname)
829 except OSError, why:
838 except OSError, why:
830 if why.errno == errno.EEXIST:
839 if why.errno == errno.EEXIST:
831 raise
840 raise
832 except AttributeError: # no symlink in os
841 except AttributeError: # no symlink in os
833 pass
842 pass
834
843
835 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
844 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
836 os.write(ld, info)
845 os.write(ld, info)
837 os.close(ld)
846 os.close(ld)
838
847
839 def readlock(pathname):
848 def readlock(pathname):
840 try:
849 try:
841 return os.readlink(pathname)
850 return os.readlink(pathname)
842 except OSError, why:
851 except OSError, why:
843 if why.errno not in (errno.EINVAL, errno.ENOSYS):
852 if why.errno not in (errno.EINVAL, errno.ENOSYS):
844 raise
853 raise
845 except AttributeError: # no symlink in os
854 except AttributeError: # no symlink in os
846 pass
855 pass
847 fp = posixfile(pathname)
856 fp = posixfile(pathname)
848 r = fp.read()
857 r = fp.read()
849 fp.close()
858 fp.close()
850 return r
859 return r
851
860
852 def fstat(fp):
861 def fstat(fp):
853 '''stat file object that may not have fileno method.'''
862 '''stat file object that may not have fileno method.'''
854 try:
863 try:
855 return os.fstat(fp.fileno())
864 return os.fstat(fp.fileno())
856 except AttributeError:
865 except AttributeError:
857 return os.stat(fp.name)
866 return os.stat(fp.name)
858
867
859 # File system features
868 # File system features
860
869
861 def checkcase(path):
870 def checkcase(path):
862 """
871 """
863 Return true if the given path is on a case-sensitive filesystem
872 Return true if the given path is on a case-sensitive filesystem
864
873
865 Requires a path (like /foo/.hg) ending with a foldable final
874 Requires a path (like /foo/.hg) ending with a foldable final
866 directory component.
875 directory component.
867 """
876 """
868 s1 = os.stat(path)
877 s1 = os.stat(path)
869 d, b = os.path.split(path)
878 d, b = os.path.split(path)
870 b2 = b.upper()
879 b2 = b.upper()
871 if b == b2:
880 if b == b2:
872 b2 = b.lower()
881 b2 = b.lower()
873 if b == b2:
882 if b == b2:
874 return True # no evidence against case sensitivity
883 return True # no evidence against case sensitivity
875 p2 = os.path.join(d, b2)
884 p2 = os.path.join(d, b2)
876 try:
885 try:
877 s2 = os.stat(p2)
886 s2 = os.stat(p2)
878 if s2 == s1:
887 if s2 == s1:
879 return False
888 return False
880 return True
889 return True
881 except OSError:
890 except OSError:
882 return True
891 return True
883
892
884 try:
893 try:
885 import re2
894 import re2
886 _re2 = None
895 _re2 = None
887 except ImportError:
896 except ImportError:
888 _re2 = False
897 _re2 = False
889
898
890 class _re(object):
899 class _re(object):
891 def _checkre2(self):
900 def _checkre2(self):
892 global _re2
901 global _re2
893 try:
902 try:
894 # check if match works, see issue3964
903 # check if match works, see issue3964
895 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
904 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
896 except ImportError:
905 except ImportError:
897 _re2 = False
906 _re2 = False
898
907
899 def compile(self, pat, flags=0):
908 def compile(self, pat, flags=0):
900 '''Compile a regular expression, using re2 if possible
909 '''Compile a regular expression, using re2 if possible
901
910
902 For best performance, use only re2-compatible regexp features. The
911 For best performance, use only re2-compatible regexp features. The
903 only flags from the re module that are re2-compatible are
912 only flags from the re module that are re2-compatible are
904 IGNORECASE and MULTILINE.'''
913 IGNORECASE and MULTILINE.'''
905 if _re2 is None:
914 if _re2 is None:
906 self._checkre2()
915 self._checkre2()
907 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
916 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
908 if flags & remod.IGNORECASE:
917 if flags & remod.IGNORECASE:
909 pat = '(?i)' + pat
918 pat = '(?i)' + pat
910 if flags & remod.MULTILINE:
919 if flags & remod.MULTILINE:
911 pat = '(?m)' + pat
920 pat = '(?m)' + pat
912 try:
921 try:
913 return re2.compile(pat)
922 return re2.compile(pat)
914 except re2.error:
923 except re2.error:
915 pass
924 pass
916 return remod.compile(pat, flags)
925 return remod.compile(pat, flags)
917
926
918 @propertycache
927 @propertycache
919 def escape(self):
928 def escape(self):
920 '''Return the version of escape corresponding to self.compile.
929 '''Return the version of escape corresponding to self.compile.
921
930
922 This is imperfect because whether re2 or re is used for a particular
931 This is imperfect because whether re2 or re is used for a particular
923 function depends on the flags, etc, but it's the best we can do.
932 function depends on the flags, etc, but it's the best we can do.
924 '''
933 '''
925 global _re2
934 global _re2
926 if _re2 is None:
935 if _re2 is None:
927 self._checkre2()
936 self._checkre2()
928 if _re2:
937 if _re2:
929 return re2.escape
938 return re2.escape
930 else:
939 else:
931 return remod.escape
940 return remod.escape
932
941
933 re = _re()
942 re = _re()
934
943
935 _fspathcache = {}
944 _fspathcache = {}
936 def fspath(name, root):
945 def fspath(name, root):
937 '''Get name in the case stored in the filesystem
946 '''Get name in the case stored in the filesystem
938
947
939 The name should be relative to root, and be normcase-ed for efficiency.
948 The name should be relative to root, and be normcase-ed for efficiency.
940
949
941 Note that this function is unnecessary, and should not be
950 Note that this function is unnecessary, and should not be
942 called, for case-sensitive filesystems (simply because it's expensive).
951 called, for case-sensitive filesystems (simply because it's expensive).
943
952
944 The root should be normcase-ed, too.
953 The root should be normcase-ed, too.
945 '''
954 '''
946 def _makefspathcacheentry(dir):
955 def _makefspathcacheentry(dir):
947 return dict((normcase(n), n) for n in os.listdir(dir))
956 return dict((normcase(n), n) for n in os.listdir(dir))
948
957
949 seps = os.sep
958 seps = os.sep
950 if os.altsep:
959 if os.altsep:
951 seps = seps + os.altsep
960 seps = seps + os.altsep
952 # Protect backslashes. This gets silly very quickly.
961 # Protect backslashes. This gets silly very quickly.
953 seps.replace('\\','\\\\')
962 seps.replace('\\','\\\\')
954 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
963 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
955 dir = os.path.normpath(root)
964 dir = os.path.normpath(root)
956 result = []
965 result = []
957 for part, sep in pattern.findall(name):
966 for part, sep in pattern.findall(name):
958 if sep:
967 if sep:
959 result.append(sep)
968 result.append(sep)
960 continue
969 continue
961
970
962 if dir not in _fspathcache:
971 if dir not in _fspathcache:
963 _fspathcache[dir] = _makefspathcacheentry(dir)
972 _fspathcache[dir] = _makefspathcacheentry(dir)
964 contents = _fspathcache[dir]
973 contents = _fspathcache[dir]
965
974
966 found = contents.get(part)
975 found = contents.get(part)
967 if not found:
976 if not found:
968 # retry "once per directory" per "dirstate.walk" which
977 # retry "once per directory" per "dirstate.walk" which
969 # may take place for each patches of "hg qpush", for example
978 # may take place for each patches of "hg qpush", for example
970 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
979 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
971 found = contents.get(part)
980 found = contents.get(part)
972
981
973 result.append(found or part)
982 result.append(found or part)
974 dir = os.path.join(dir, part)
983 dir = os.path.join(dir, part)
975
984
976 return ''.join(result)
985 return ''.join(result)
977
986
978 def checknlink(testfile):
987 def checknlink(testfile):
979 '''check whether hardlink count reporting works properly'''
988 '''check whether hardlink count reporting works properly'''
980
989
981 # testfile may be open, so we need a separate file for checking to
990 # testfile may be open, so we need a separate file for checking to
982 # work around issue2543 (or testfile may get lost on Samba shares)
991 # work around issue2543 (or testfile may get lost on Samba shares)
983 f1 = testfile + ".hgtmp1"
992 f1 = testfile + ".hgtmp1"
984 if os.path.lexists(f1):
993 if os.path.lexists(f1):
985 return False
994 return False
986 try:
995 try:
987 posixfile(f1, 'w').close()
996 posixfile(f1, 'w').close()
988 except IOError:
997 except IOError:
989 return False
998 return False
990
999
991 f2 = testfile + ".hgtmp2"
1000 f2 = testfile + ".hgtmp2"
992 fd = None
1001 fd = None
993 try:
1002 try:
994 try:
1003 try:
995 oslink(f1, f2)
1004 oslink(f1, f2)
996 except OSError:
1005 except OSError:
997 return False
1006 return False
998
1007
999 # nlinks() may behave differently for files on Windows shares if
1008 # nlinks() may behave differently for files on Windows shares if
1000 # the file is open.
1009 # the file is open.
1001 fd = posixfile(f2)
1010 fd = posixfile(f2)
1002 return nlinks(f2) > 1
1011 return nlinks(f2) > 1
1003 finally:
1012 finally:
1004 if fd is not None:
1013 if fd is not None:
1005 fd.close()
1014 fd.close()
1006 for f in (f1, f2):
1015 for f in (f1, f2):
1007 try:
1016 try:
1008 os.unlink(f)
1017 os.unlink(f)
1009 except OSError:
1018 except OSError:
1010 pass
1019 pass
1011
1020
1012 def endswithsep(path):
1021 def endswithsep(path):
1013 '''Check path ends with os.sep or os.altsep.'''
1022 '''Check path ends with os.sep or os.altsep.'''
1014 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1023 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1015
1024
1016 def splitpath(path):
1025 def splitpath(path):
1017 '''Split path by os.sep.
1026 '''Split path by os.sep.
1018 Note that this function does not use os.altsep because this is
1027 Note that this function does not use os.altsep because this is
1019 an alternative of simple "xxx.split(os.sep)".
1028 an alternative of simple "xxx.split(os.sep)".
1020 It is recommended to use os.path.normpath() before using this
1029 It is recommended to use os.path.normpath() before using this
1021 function if need.'''
1030 function if need.'''
1022 return path.split(os.sep)
1031 return path.split(os.sep)
1023
1032
1024 def gui():
1033 def gui():
1025 '''Are we running in a GUI?'''
1034 '''Are we running in a GUI?'''
1026 if sys.platform == 'darwin':
1035 if sys.platform == 'darwin':
1027 if 'SSH_CONNECTION' in os.environ:
1036 if 'SSH_CONNECTION' in os.environ:
1028 # handle SSH access to a box where the user is logged in
1037 # handle SSH access to a box where the user is logged in
1029 return False
1038 return False
1030 elif getattr(osutil, 'isgui', None):
1039 elif getattr(osutil, 'isgui', None):
1031 # check if a CoreGraphics session is available
1040 # check if a CoreGraphics session is available
1032 return osutil.isgui()
1041 return osutil.isgui()
1033 else:
1042 else:
1034 # pure build; use a safe default
1043 # pure build; use a safe default
1035 return True
1044 return True
1036 else:
1045 else:
1037 return os.name == "nt" or os.environ.get("DISPLAY")
1046 return os.name == "nt" or os.environ.get("DISPLAY")
1038
1047
1039 def mktempcopy(name, emptyok=False, createmode=None):
1048 def mktempcopy(name, emptyok=False, createmode=None):
1040 """Create a temporary file with the same contents from name
1049 """Create a temporary file with the same contents from name
1041
1050
1042 The permission bits are copied from the original file.
1051 The permission bits are copied from the original file.
1043
1052
1044 If the temporary file is going to be truncated immediately, you
1053 If the temporary file is going to be truncated immediately, you
1045 can use emptyok=True as an optimization.
1054 can use emptyok=True as an optimization.
1046
1055
1047 Returns the name of the temporary file.
1056 Returns the name of the temporary file.
1048 """
1057 """
1049 d, fn = os.path.split(name)
1058 d, fn = os.path.split(name)
1050 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1059 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1051 os.close(fd)
1060 os.close(fd)
1052 # Temporary files are created with mode 0600, which is usually not
1061 # Temporary files are created with mode 0600, which is usually not
1053 # what we want. If the original file already exists, just copy
1062 # what we want. If the original file already exists, just copy
1054 # its mode. Otherwise, manually obey umask.
1063 # its mode. Otherwise, manually obey umask.
1055 copymode(name, temp, createmode)
1064 copymode(name, temp, createmode)
1056 if emptyok:
1065 if emptyok:
1057 return temp
1066 return temp
1058 try:
1067 try:
1059 try:
1068 try:
1060 ifp = posixfile(name, "rb")
1069 ifp = posixfile(name, "rb")
1061 except IOError, inst:
1070 except IOError, inst:
1062 if inst.errno == errno.ENOENT:
1071 if inst.errno == errno.ENOENT:
1063 return temp
1072 return temp
1064 if not getattr(inst, 'filename', None):
1073 if not getattr(inst, 'filename', None):
1065 inst.filename = name
1074 inst.filename = name
1066 raise
1075 raise
1067 ofp = posixfile(temp, "wb")
1076 ofp = posixfile(temp, "wb")
1068 for chunk in filechunkiter(ifp):
1077 for chunk in filechunkiter(ifp):
1069 ofp.write(chunk)
1078 ofp.write(chunk)
1070 ifp.close()
1079 ifp.close()
1071 ofp.close()
1080 ofp.close()
1072 except: # re-raises
1081 except: # re-raises
1073 try: os.unlink(temp)
1082 try: os.unlink(temp)
1074 except OSError: pass
1083 except OSError: pass
1075 raise
1084 raise
1076 return temp
1085 return temp
1077
1086
1078 class atomictempfile(object):
1087 class atomictempfile(object):
1079 '''writable file object that atomically updates a file
1088 '''writable file object that atomically updates a file
1080
1089
1081 All writes will go to a temporary copy of the original file. Call
1090 All writes will go to a temporary copy of the original file. Call
1082 close() when you are done writing, and atomictempfile will rename
1091 close() when you are done writing, and atomictempfile will rename
1083 the temporary copy to the original name, making the changes
1092 the temporary copy to the original name, making the changes
1084 visible. If the object is destroyed without being closed, all your
1093 visible. If the object is destroyed without being closed, all your
1085 writes are discarded.
1094 writes are discarded.
1086 '''
1095 '''
1087 def __init__(self, name, mode='w+b', createmode=None):
1096 def __init__(self, name, mode='w+b', createmode=None):
1088 self.__name = name # permanent name
1097 self.__name = name # permanent name
1089 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1098 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1090 createmode=createmode)
1099 createmode=createmode)
1091 self._fp = posixfile(self._tempname, mode)
1100 self._fp = posixfile(self._tempname, mode)
1092
1101
1093 # delegated methods
1102 # delegated methods
1094 self.write = self._fp.write
1103 self.write = self._fp.write
1095 self.seek = self._fp.seek
1104 self.seek = self._fp.seek
1096 self.tell = self._fp.tell
1105 self.tell = self._fp.tell
1097 self.fileno = self._fp.fileno
1106 self.fileno = self._fp.fileno
1098
1107
1099 def close(self):
1108 def close(self):
1100 if not self._fp.closed:
1109 if not self._fp.closed:
1101 self._fp.close()
1110 self._fp.close()
1102 rename(self._tempname, localpath(self.__name))
1111 rename(self._tempname, localpath(self.__name))
1103
1112
1104 def discard(self):
1113 def discard(self):
1105 if not self._fp.closed:
1114 if not self._fp.closed:
1106 try:
1115 try:
1107 os.unlink(self._tempname)
1116 os.unlink(self._tempname)
1108 except OSError:
1117 except OSError:
1109 pass
1118 pass
1110 self._fp.close()
1119 self._fp.close()
1111
1120
1112 def __del__(self):
1121 def __del__(self):
1113 if safehasattr(self, '_fp'): # constructor actually did something
1122 if safehasattr(self, '_fp'): # constructor actually did something
1114 self.discard()
1123 self.discard()
1115
1124
1116 def makedirs(name, mode=None, notindexed=False):
1125 def makedirs(name, mode=None, notindexed=False):
1117 """recursive directory creation with parent mode inheritance"""
1126 """recursive directory creation with parent mode inheritance"""
1118 try:
1127 try:
1119 makedir(name, notindexed)
1128 makedir(name, notindexed)
1120 except OSError, err:
1129 except OSError, err:
1121 if err.errno == errno.EEXIST:
1130 if err.errno == errno.EEXIST:
1122 return
1131 return
1123 if err.errno != errno.ENOENT or not name:
1132 if err.errno != errno.ENOENT or not name:
1124 raise
1133 raise
1125 parent = os.path.dirname(os.path.abspath(name))
1134 parent = os.path.dirname(os.path.abspath(name))
1126 if parent == name:
1135 if parent == name:
1127 raise
1136 raise
1128 makedirs(parent, mode, notindexed)
1137 makedirs(parent, mode, notindexed)
1129 makedir(name, notindexed)
1138 makedir(name, notindexed)
1130 if mode is not None:
1139 if mode is not None:
1131 os.chmod(name, mode)
1140 os.chmod(name, mode)
1132
1141
1133 def ensuredirs(name, mode=None, notindexed=False):
1142 def ensuredirs(name, mode=None, notindexed=False):
1134 """race-safe recursive directory creation
1143 """race-safe recursive directory creation
1135
1144
1136 Newly created directories are marked as "not to be indexed by
1145 Newly created directories are marked as "not to be indexed by
1137 the content indexing service", if ``notindexed`` is specified
1146 the content indexing service", if ``notindexed`` is specified
1138 for "write" mode access.
1147 for "write" mode access.
1139 """
1148 """
1140 if os.path.isdir(name):
1149 if os.path.isdir(name):
1141 return
1150 return
1142 parent = os.path.dirname(os.path.abspath(name))
1151 parent = os.path.dirname(os.path.abspath(name))
1143 if parent != name:
1152 if parent != name:
1144 ensuredirs(parent, mode, notindexed)
1153 ensuredirs(parent, mode, notindexed)
1145 try:
1154 try:
1146 makedir(name, notindexed)
1155 makedir(name, notindexed)
1147 except OSError, err:
1156 except OSError, err:
1148 if err.errno == errno.EEXIST and os.path.isdir(name):
1157 if err.errno == errno.EEXIST and os.path.isdir(name):
1149 # someone else seems to have won a directory creation race
1158 # someone else seems to have won a directory creation race
1150 return
1159 return
1151 raise
1160 raise
1152 if mode is not None:
1161 if mode is not None:
1153 os.chmod(name, mode)
1162 os.chmod(name, mode)
1154
1163
1155 def readfile(path):
1164 def readfile(path):
1156 fp = open(path, 'rb')
1165 fp = open(path, 'rb')
1157 try:
1166 try:
1158 return fp.read()
1167 return fp.read()
1159 finally:
1168 finally:
1160 fp.close()
1169 fp.close()
1161
1170
1162 def writefile(path, text):
1171 def writefile(path, text):
1163 fp = open(path, 'wb')
1172 fp = open(path, 'wb')
1164 try:
1173 try:
1165 fp.write(text)
1174 fp.write(text)
1166 finally:
1175 finally:
1167 fp.close()
1176 fp.close()
1168
1177
1169 def appendfile(path, text):
1178 def appendfile(path, text):
1170 fp = open(path, 'ab')
1179 fp = open(path, 'ab')
1171 try:
1180 try:
1172 fp.write(text)
1181 fp.write(text)
1173 finally:
1182 finally:
1174 fp.close()
1183 fp.close()
1175
1184
1176 class chunkbuffer(object):
1185 class chunkbuffer(object):
1177 """Allow arbitrary sized chunks of data to be efficiently read from an
1186 """Allow arbitrary sized chunks of data to be efficiently read from an
1178 iterator over chunks of arbitrary size."""
1187 iterator over chunks of arbitrary size."""
1179
1188
1180 def __init__(self, in_iter):
1189 def __init__(self, in_iter):
1181 """in_iter is the iterator that's iterating over the input chunks.
1190 """in_iter is the iterator that's iterating over the input chunks.
1182 targetsize is how big a buffer to try to maintain."""
1191 targetsize is how big a buffer to try to maintain."""
1183 def splitbig(chunks):
1192 def splitbig(chunks):
1184 for chunk in chunks:
1193 for chunk in chunks:
1185 if len(chunk) > 2**20:
1194 if len(chunk) > 2**20:
1186 pos = 0
1195 pos = 0
1187 while pos < len(chunk):
1196 while pos < len(chunk):
1188 end = pos + 2 ** 18
1197 end = pos + 2 ** 18
1189 yield chunk[pos:end]
1198 yield chunk[pos:end]
1190 pos = end
1199 pos = end
1191 else:
1200 else:
1192 yield chunk
1201 yield chunk
1193 self.iter = splitbig(in_iter)
1202 self.iter = splitbig(in_iter)
1194 self._queue = deque()
1203 self._queue = deque()
1195
1204
1196 def read(self, l=None):
1205 def read(self, l=None):
1197 """Read L bytes of data from the iterator of chunks of data.
1206 """Read L bytes of data from the iterator of chunks of data.
1198 Returns less than L bytes if the iterator runs dry.
1207 Returns less than L bytes if the iterator runs dry.
1199
1208
1200 If size parameter is omitted, read everything"""
1209 If size parameter is omitted, read everything"""
1201 left = l
1210 left = l
1202 buf = []
1211 buf = []
1203 queue = self._queue
1212 queue = self._queue
1204 while left is None or left > 0:
1213 while left is None or left > 0:
1205 # refill the queue
1214 # refill the queue
1206 if not queue:
1215 if not queue:
1207 target = 2**18
1216 target = 2**18
1208 for chunk in self.iter:
1217 for chunk in self.iter:
1209 queue.append(chunk)
1218 queue.append(chunk)
1210 target -= len(chunk)
1219 target -= len(chunk)
1211 if target <= 0:
1220 if target <= 0:
1212 break
1221 break
1213 if not queue:
1222 if not queue:
1214 break
1223 break
1215
1224
1216 chunk = queue.popleft()
1225 chunk = queue.popleft()
1217 if left is not None:
1226 if left is not None:
1218 left -= len(chunk)
1227 left -= len(chunk)
1219 if left is not None and left < 0:
1228 if left is not None and left < 0:
1220 queue.appendleft(chunk[left:])
1229 queue.appendleft(chunk[left:])
1221 buf.append(chunk[:left])
1230 buf.append(chunk[:left])
1222 else:
1231 else:
1223 buf.append(chunk)
1232 buf.append(chunk)
1224
1233
1225 return ''.join(buf)
1234 return ''.join(buf)
1226
1235
1227 def filechunkiter(f, size=65536, limit=None):
1236 def filechunkiter(f, size=65536, limit=None):
1228 """Create a generator that produces the data in the file size
1237 """Create a generator that produces the data in the file size
1229 (default 65536) bytes at a time, up to optional limit (default is
1238 (default 65536) bytes at a time, up to optional limit (default is
1230 to read all data). Chunks may be less than size bytes if the
1239 to read all data). Chunks may be less than size bytes if the
1231 chunk is the last chunk in the file, or the file is a socket or
1240 chunk is the last chunk in the file, or the file is a socket or
1232 some other type of file that sometimes reads less data than is
1241 some other type of file that sometimes reads less data than is
1233 requested."""
1242 requested."""
1234 assert size >= 0
1243 assert size >= 0
1235 assert limit is None or limit >= 0
1244 assert limit is None or limit >= 0
1236 while True:
1245 while True:
1237 if limit is None:
1246 if limit is None:
1238 nbytes = size
1247 nbytes = size
1239 else:
1248 else:
1240 nbytes = min(limit, size)
1249 nbytes = min(limit, size)
1241 s = nbytes and f.read(nbytes)
1250 s = nbytes and f.read(nbytes)
1242 if not s:
1251 if not s:
1243 break
1252 break
1244 if limit:
1253 if limit:
1245 limit -= len(s)
1254 limit -= len(s)
1246 yield s
1255 yield s
1247
1256
1248 def makedate(timestamp=None):
1257 def makedate(timestamp=None):
1249 '''Return a unix timestamp (or the current time) as a (unixtime,
1258 '''Return a unix timestamp (or the current time) as a (unixtime,
1250 offset) tuple based off the local timezone.'''
1259 offset) tuple based off the local timezone.'''
1251 if timestamp is None:
1260 if timestamp is None:
1252 timestamp = time.time()
1261 timestamp = time.time()
1253 if timestamp < 0:
1262 if timestamp < 0:
1254 hint = _("check your clock")
1263 hint = _("check your clock")
1255 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1264 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1256 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1265 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1257 datetime.datetime.fromtimestamp(timestamp))
1266 datetime.datetime.fromtimestamp(timestamp))
1258 tz = delta.days * 86400 + delta.seconds
1267 tz = delta.days * 86400 + delta.seconds
1259 return timestamp, tz
1268 return timestamp, tz
1260
1269
1261 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1270 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1262 """represent a (unixtime, offset) tuple as a localized time.
1271 """represent a (unixtime, offset) tuple as a localized time.
1263 unixtime is seconds since the epoch, and offset is the time zone's
1272 unixtime is seconds since the epoch, and offset is the time zone's
1264 number of seconds away from UTC. if timezone is false, do not
1273 number of seconds away from UTC. if timezone is false, do not
1265 append time zone to string."""
1274 append time zone to string."""
1266 t, tz = date or makedate()
1275 t, tz = date or makedate()
1267 if t < 0:
1276 if t < 0:
1268 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1277 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1269 tz = 0
1278 tz = 0
1270 if "%1" in format or "%2" in format or "%z" in format:
1279 if "%1" in format or "%2" in format or "%z" in format:
1271 sign = (tz > 0) and "-" or "+"
1280 sign = (tz > 0) and "-" or "+"
1272 minutes = abs(tz) // 60
1281 minutes = abs(tz) // 60
1273 format = format.replace("%z", "%1%2")
1282 format = format.replace("%z", "%1%2")
1274 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1283 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1275 format = format.replace("%2", "%02d" % (minutes % 60))
1284 format = format.replace("%2", "%02d" % (minutes % 60))
1276 try:
1285 try:
1277 t = time.gmtime(float(t) - tz)
1286 t = time.gmtime(float(t) - tz)
1278 except ValueError:
1287 except ValueError:
1279 # time was out of range
1288 # time was out of range
1280 t = time.gmtime(sys.maxint)
1289 t = time.gmtime(sys.maxint)
1281 s = time.strftime(format, t)
1290 s = time.strftime(format, t)
1282 return s
1291 return s
1283
1292
1284 def shortdate(date=None):
1293 def shortdate(date=None):
1285 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1294 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1286 return datestr(date, format='%Y-%m-%d')
1295 return datestr(date, format='%Y-%m-%d')
1287
1296
1288 def strdate(string, format, defaults=[]):
1297 def strdate(string, format, defaults=[]):
1289 """parse a localized time string and return a (unixtime, offset) tuple.
1298 """parse a localized time string and return a (unixtime, offset) tuple.
1290 if the string cannot be parsed, ValueError is raised."""
1299 if the string cannot be parsed, ValueError is raised."""
1291 def timezone(string):
1300 def timezone(string):
1292 tz = string.split()[-1]
1301 tz = string.split()[-1]
1293 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1302 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1294 sign = (tz[0] == "+") and 1 or -1
1303 sign = (tz[0] == "+") and 1 or -1
1295 hours = int(tz[1:3])
1304 hours = int(tz[1:3])
1296 minutes = int(tz[3:5])
1305 minutes = int(tz[3:5])
1297 return -sign * (hours * 60 + minutes) * 60
1306 return -sign * (hours * 60 + minutes) * 60
1298 if tz == "GMT" or tz == "UTC":
1307 if tz == "GMT" or tz == "UTC":
1299 return 0
1308 return 0
1300 return None
1309 return None
1301
1310
1302 # NOTE: unixtime = localunixtime + offset
1311 # NOTE: unixtime = localunixtime + offset
1303 offset, date = timezone(string), string
1312 offset, date = timezone(string), string
1304 if offset is not None:
1313 if offset is not None:
1305 date = " ".join(string.split()[:-1])
1314 date = " ".join(string.split()[:-1])
1306
1315
1307 # add missing elements from defaults
1316 # add missing elements from defaults
1308 usenow = False # default to using biased defaults
1317 usenow = False # default to using biased defaults
1309 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1318 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1310 found = [True for p in part if ("%"+p) in format]
1319 found = [True for p in part if ("%"+p) in format]
1311 if not found:
1320 if not found:
1312 date += "@" + defaults[part][usenow]
1321 date += "@" + defaults[part][usenow]
1313 format += "@%" + part[0]
1322 format += "@%" + part[0]
1314 else:
1323 else:
1315 # We've found a specific time element, less specific time
1324 # We've found a specific time element, less specific time
1316 # elements are relative to today
1325 # elements are relative to today
1317 usenow = True
1326 usenow = True
1318
1327
1319 timetuple = time.strptime(date, format)
1328 timetuple = time.strptime(date, format)
1320 localunixtime = int(calendar.timegm(timetuple))
1329 localunixtime = int(calendar.timegm(timetuple))
1321 if offset is None:
1330 if offset is None:
1322 # local timezone
1331 # local timezone
1323 unixtime = int(time.mktime(timetuple))
1332 unixtime = int(time.mktime(timetuple))
1324 offset = unixtime - localunixtime
1333 offset = unixtime - localunixtime
1325 else:
1334 else:
1326 unixtime = localunixtime + offset
1335 unixtime = localunixtime + offset
1327 return unixtime, offset
1336 return unixtime, offset
1328
1337
1329 def parsedate(date, formats=None, bias={}):
1338 def parsedate(date, formats=None, bias={}):
1330 """parse a localized date/time and return a (unixtime, offset) tuple.
1339 """parse a localized date/time and return a (unixtime, offset) tuple.
1331
1340
1332 The date may be a "unixtime offset" string or in one of the specified
1341 The date may be a "unixtime offset" string or in one of the specified
1333 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1342 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1334
1343
1335 >>> parsedate(' today ') == parsedate(\
1344 >>> parsedate(' today ') == parsedate(\
1336 datetime.date.today().strftime('%b %d'))
1345 datetime.date.today().strftime('%b %d'))
1337 True
1346 True
1338 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1347 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1339 datetime.timedelta(days=1)\
1348 datetime.timedelta(days=1)\
1340 ).strftime('%b %d'))
1349 ).strftime('%b %d'))
1341 True
1350 True
1342 >>> now, tz = makedate()
1351 >>> now, tz = makedate()
1343 >>> strnow, strtz = parsedate('now')
1352 >>> strnow, strtz = parsedate('now')
1344 >>> (strnow - now) < 1
1353 >>> (strnow - now) < 1
1345 True
1354 True
1346 >>> tz == strtz
1355 >>> tz == strtz
1347 True
1356 True
1348 """
1357 """
1349 if not date:
1358 if not date:
1350 return 0, 0
1359 return 0, 0
1351 if isinstance(date, tuple) and len(date) == 2:
1360 if isinstance(date, tuple) and len(date) == 2:
1352 return date
1361 return date
1353 if not formats:
1362 if not formats:
1354 formats = defaultdateformats
1363 formats = defaultdateformats
1355 date = date.strip()
1364 date = date.strip()
1356
1365
1357 if date == 'now' or date == _('now'):
1366 if date == 'now' or date == _('now'):
1358 return makedate()
1367 return makedate()
1359 if date == 'today' or date == _('today'):
1368 if date == 'today' or date == _('today'):
1360 date = datetime.date.today().strftime('%b %d')
1369 date = datetime.date.today().strftime('%b %d')
1361 elif date == 'yesterday' or date == _('yesterday'):
1370 elif date == 'yesterday' or date == _('yesterday'):
1362 date = (datetime.date.today() -
1371 date = (datetime.date.today() -
1363 datetime.timedelta(days=1)).strftime('%b %d')
1372 datetime.timedelta(days=1)).strftime('%b %d')
1364
1373
1365 try:
1374 try:
1366 when, offset = map(int, date.split(' '))
1375 when, offset = map(int, date.split(' '))
1367 except ValueError:
1376 except ValueError:
1368 # fill out defaults
1377 # fill out defaults
1369 now = makedate()
1378 now = makedate()
1370 defaults = {}
1379 defaults = {}
1371 for part in ("d", "mb", "yY", "HI", "M", "S"):
1380 for part in ("d", "mb", "yY", "HI", "M", "S"):
1372 # this piece is for rounding the specific end of unknowns
1381 # this piece is for rounding the specific end of unknowns
1373 b = bias.get(part)
1382 b = bias.get(part)
1374 if b is None:
1383 if b is None:
1375 if part[0] in "HMS":
1384 if part[0] in "HMS":
1376 b = "00"
1385 b = "00"
1377 else:
1386 else:
1378 b = "0"
1387 b = "0"
1379
1388
1380 # this piece is for matching the generic end to today's date
1389 # this piece is for matching the generic end to today's date
1381 n = datestr(now, "%" + part[0])
1390 n = datestr(now, "%" + part[0])
1382
1391
1383 defaults[part] = (b, n)
1392 defaults[part] = (b, n)
1384
1393
1385 for format in formats:
1394 for format in formats:
1386 try:
1395 try:
1387 when, offset = strdate(date, format, defaults)
1396 when, offset = strdate(date, format, defaults)
1388 except (ValueError, OverflowError):
1397 except (ValueError, OverflowError):
1389 pass
1398 pass
1390 else:
1399 else:
1391 break
1400 break
1392 else:
1401 else:
1393 raise Abort(_('invalid date: %r') % date)
1402 raise Abort(_('invalid date: %r') % date)
1394 # validate explicit (probably user-specified) date and
1403 # validate explicit (probably user-specified) date and
1395 # time zone offset. values must fit in signed 32 bits for
1404 # time zone offset. values must fit in signed 32 bits for
1396 # current 32-bit linux runtimes. timezones go from UTC-12
1405 # current 32-bit linux runtimes. timezones go from UTC-12
1397 # to UTC+14
1406 # to UTC+14
1398 if abs(when) > 0x7fffffff:
1407 if abs(when) > 0x7fffffff:
1399 raise Abort(_('date exceeds 32 bits: %d') % when)
1408 raise Abort(_('date exceeds 32 bits: %d') % when)
1400 if when < 0:
1409 if when < 0:
1401 raise Abort(_('negative date value: %d') % when)
1410 raise Abort(_('negative date value: %d') % when)
1402 if offset < -50400 or offset > 43200:
1411 if offset < -50400 or offset > 43200:
1403 raise Abort(_('impossible time zone offset: %d') % offset)
1412 raise Abort(_('impossible time zone offset: %d') % offset)
1404 return when, offset
1413 return when, offset
1405
1414
1406 def matchdate(date):
1415 def matchdate(date):
1407 """Return a function that matches a given date match specifier
1416 """Return a function that matches a given date match specifier
1408
1417
1409 Formats include:
1418 Formats include:
1410
1419
1411 '{date}' match a given date to the accuracy provided
1420 '{date}' match a given date to the accuracy provided
1412
1421
1413 '<{date}' on or before a given date
1422 '<{date}' on or before a given date
1414
1423
1415 '>{date}' on or after a given date
1424 '>{date}' on or after a given date
1416
1425
1417 >>> p1 = parsedate("10:29:59")
1426 >>> p1 = parsedate("10:29:59")
1418 >>> p2 = parsedate("10:30:00")
1427 >>> p2 = parsedate("10:30:00")
1419 >>> p3 = parsedate("10:30:59")
1428 >>> p3 = parsedate("10:30:59")
1420 >>> p4 = parsedate("10:31:00")
1429 >>> p4 = parsedate("10:31:00")
1421 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1430 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1422 >>> f = matchdate("10:30")
1431 >>> f = matchdate("10:30")
1423 >>> f(p1[0])
1432 >>> f(p1[0])
1424 False
1433 False
1425 >>> f(p2[0])
1434 >>> f(p2[0])
1426 True
1435 True
1427 >>> f(p3[0])
1436 >>> f(p3[0])
1428 True
1437 True
1429 >>> f(p4[0])
1438 >>> f(p4[0])
1430 False
1439 False
1431 >>> f(p5[0])
1440 >>> f(p5[0])
1432 False
1441 False
1433 """
1442 """
1434
1443
1435 def lower(date):
1444 def lower(date):
1436 d = {'mb': "1", 'd': "1"}
1445 d = {'mb': "1", 'd': "1"}
1437 return parsedate(date, extendeddateformats, d)[0]
1446 return parsedate(date, extendeddateformats, d)[0]
1438
1447
1439 def upper(date):
1448 def upper(date):
1440 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1449 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1441 for days in ("31", "30", "29"):
1450 for days in ("31", "30", "29"):
1442 try:
1451 try:
1443 d["d"] = days
1452 d["d"] = days
1444 return parsedate(date, extendeddateformats, d)[0]
1453 return parsedate(date, extendeddateformats, d)[0]
1445 except Abort:
1454 except Abort:
1446 pass
1455 pass
1447 d["d"] = "28"
1456 d["d"] = "28"
1448 return parsedate(date, extendeddateformats, d)[0]
1457 return parsedate(date, extendeddateformats, d)[0]
1449
1458
1450 date = date.strip()
1459 date = date.strip()
1451
1460
1452 if not date:
1461 if not date:
1453 raise Abort(_("dates cannot consist entirely of whitespace"))
1462 raise Abort(_("dates cannot consist entirely of whitespace"))
1454 elif date[0] == "<":
1463 elif date[0] == "<":
1455 if not date[1:]:
1464 if not date[1:]:
1456 raise Abort(_("invalid day spec, use '<DATE'"))
1465 raise Abort(_("invalid day spec, use '<DATE'"))
1457 when = upper(date[1:])
1466 when = upper(date[1:])
1458 return lambda x: x <= when
1467 return lambda x: x <= when
1459 elif date[0] == ">":
1468 elif date[0] == ">":
1460 if not date[1:]:
1469 if not date[1:]:
1461 raise Abort(_("invalid day spec, use '>DATE'"))
1470 raise Abort(_("invalid day spec, use '>DATE'"))
1462 when = lower(date[1:])
1471 when = lower(date[1:])
1463 return lambda x: x >= when
1472 return lambda x: x >= when
1464 elif date[0] == "-":
1473 elif date[0] == "-":
1465 try:
1474 try:
1466 days = int(date[1:])
1475 days = int(date[1:])
1467 except ValueError:
1476 except ValueError:
1468 raise Abort(_("invalid day spec: %s") % date[1:])
1477 raise Abort(_("invalid day spec: %s") % date[1:])
1469 if days < 0:
1478 if days < 0:
1470 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1479 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1471 % date[1:])
1480 % date[1:])
1472 when = makedate()[0] - days * 3600 * 24
1481 when = makedate()[0] - days * 3600 * 24
1473 return lambda x: x >= when
1482 return lambda x: x >= when
1474 elif " to " in date:
1483 elif " to " in date:
1475 a, b = date.split(" to ")
1484 a, b = date.split(" to ")
1476 start, stop = lower(a), upper(b)
1485 start, stop = lower(a), upper(b)
1477 return lambda x: x >= start and x <= stop
1486 return lambda x: x >= start and x <= stop
1478 else:
1487 else:
1479 start, stop = lower(date), upper(date)
1488 start, stop = lower(date), upper(date)
1480 return lambda x: x >= start and x <= stop
1489 return lambda x: x >= start and x <= stop
1481
1490
1482 def shortuser(user):
1491 def shortuser(user):
1483 """Return a short representation of a user name or email address."""
1492 """Return a short representation of a user name or email address."""
1484 f = user.find('@')
1493 f = user.find('@')
1485 if f >= 0:
1494 if f >= 0:
1486 user = user[:f]
1495 user = user[:f]
1487 f = user.find('<')
1496 f = user.find('<')
1488 if f >= 0:
1497 if f >= 0:
1489 user = user[f + 1:]
1498 user = user[f + 1:]
1490 f = user.find(' ')
1499 f = user.find(' ')
1491 if f >= 0:
1500 if f >= 0:
1492 user = user[:f]
1501 user = user[:f]
1493 f = user.find('.')
1502 f = user.find('.')
1494 if f >= 0:
1503 if f >= 0:
1495 user = user[:f]
1504 user = user[:f]
1496 return user
1505 return user
1497
1506
1498 def emailuser(user):
1507 def emailuser(user):
1499 """Return the user portion of an email address."""
1508 """Return the user portion of an email address."""
1500 f = user.find('@')
1509 f = user.find('@')
1501 if f >= 0:
1510 if f >= 0:
1502 user = user[:f]
1511 user = user[:f]
1503 f = user.find('<')
1512 f = user.find('<')
1504 if f >= 0:
1513 if f >= 0:
1505 user = user[f + 1:]
1514 user = user[f + 1:]
1506 return user
1515 return user
1507
1516
1508 def email(author):
1517 def email(author):
1509 '''get email of author.'''
1518 '''get email of author.'''
1510 r = author.find('>')
1519 r = author.find('>')
1511 if r == -1:
1520 if r == -1:
1512 r = None
1521 r = None
1513 return author[author.find('<') + 1:r]
1522 return author[author.find('<') + 1:r]
1514
1523
1515 def ellipsis(text, maxlength=400):
1524 def ellipsis(text, maxlength=400):
1516 """Trim string to at most maxlength (default: 400) columns in display."""
1525 """Trim string to at most maxlength (default: 400) columns in display."""
1517 return encoding.trim(text, maxlength, ellipsis='...')
1526 return encoding.trim(text, maxlength, ellipsis='...')
1518
1527
1519 def unitcountfn(*unittable):
1528 def unitcountfn(*unittable):
1520 '''return a function that renders a readable count of some quantity'''
1529 '''return a function that renders a readable count of some quantity'''
1521
1530
1522 def go(count):
1531 def go(count):
1523 for multiplier, divisor, format in unittable:
1532 for multiplier, divisor, format in unittable:
1524 if count >= divisor * multiplier:
1533 if count >= divisor * multiplier:
1525 return format % (count / float(divisor))
1534 return format % (count / float(divisor))
1526 return unittable[-1][2] % count
1535 return unittable[-1][2] % count
1527
1536
1528 return go
1537 return go
1529
1538
1530 bytecount = unitcountfn(
1539 bytecount = unitcountfn(
1531 (100, 1 << 30, _('%.0f GB')),
1540 (100, 1 << 30, _('%.0f GB')),
1532 (10, 1 << 30, _('%.1f GB')),
1541 (10, 1 << 30, _('%.1f GB')),
1533 (1, 1 << 30, _('%.2f GB')),
1542 (1, 1 << 30, _('%.2f GB')),
1534 (100, 1 << 20, _('%.0f MB')),
1543 (100, 1 << 20, _('%.0f MB')),
1535 (10, 1 << 20, _('%.1f MB')),
1544 (10, 1 << 20, _('%.1f MB')),
1536 (1, 1 << 20, _('%.2f MB')),
1545 (1, 1 << 20, _('%.2f MB')),
1537 (100, 1 << 10, _('%.0f KB')),
1546 (100, 1 << 10, _('%.0f KB')),
1538 (10, 1 << 10, _('%.1f KB')),
1547 (10, 1 << 10, _('%.1f KB')),
1539 (1, 1 << 10, _('%.2f KB')),
1548 (1, 1 << 10, _('%.2f KB')),
1540 (1, 1, _('%.0f bytes')),
1549 (1, 1, _('%.0f bytes')),
1541 )
1550 )
1542
1551
1543 def uirepr(s):
1552 def uirepr(s):
1544 # Avoid double backslash in Windows path repr()
1553 # Avoid double backslash in Windows path repr()
1545 return repr(s).replace('\\\\', '\\')
1554 return repr(s).replace('\\\\', '\\')
1546
1555
1547 # delay import of textwrap
1556 # delay import of textwrap
1548 def MBTextWrapper(**kwargs):
1557 def MBTextWrapper(**kwargs):
1549 class tw(textwrap.TextWrapper):
1558 class tw(textwrap.TextWrapper):
1550 """
1559 """
1551 Extend TextWrapper for width-awareness.
1560 Extend TextWrapper for width-awareness.
1552
1561
1553 Neither number of 'bytes' in any encoding nor 'characters' is
1562 Neither number of 'bytes' in any encoding nor 'characters' is
1554 appropriate to calculate terminal columns for specified string.
1563 appropriate to calculate terminal columns for specified string.
1555
1564
1556 Original TextWrapper implementation uses built-in 'len()' directly,
1565 Original TextWrapper implementation uses built-in 'len()' directly,
1557 so overriding is needed to use width information of each characters.
1566 so overriding is needed to use width information of each characters.
1558
1567
1559 In addition, characters classified into 'ambiguous' width are
1568 In addition, characters classified into 'ambiguous' width are
1560 treated as wide in East Asian area, but as narrow in other.
1569 treated as wide in East Asian area, but as narrow in other.
1561
1570
1562 This requires use decision to determine width of such characters.
1571 This requires use decision to determine width of such characters.
1563 """
1572 """
1564 def __init__(self, **kwargs):
1573 def __init__(self, **kwargs):
1565 textwrap.TextWrapper.__init__(self, **kwargs)
1574 textwrap.TextWrapper.__init__(self, **kwargs)
1566
1575
1567 # for compatibility between 2.4 and 2.6
1576 # for compatibility between 2.4 and 2.6
1568 if getattr(self, 'drop_whitespace', None) is None:
1577 if getattr(self, 'drop_whitespace', None) is None:
1569 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1578 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1570
1579
1571 def _cutdown(self, ucstr, space_left):
1580 def _cutdown(self, ucstr, space_left):
1572 l = 0
1581 l = 0
1573 colwidth = encoding.ucolwidth
1582 colwidth = encoding.ucolwidth
1574 for i in xrange(len(ucstr)):
1583 for i in xrange(len(ucstr)):
1575 l += colwidth(ucstr[i])
1584 l += colwidth(ucstr[i])
1576 if space_left < l:
1585 if space_left < l:
1577 return (ucstr[:i], ucstr[i:])
1586 return (ucstr[:i], ucstr[i:])
1578 return ucstr, ''
1587 return ucstr, ''
1579
1588
1580 # overriding of base class
1589 # overriding of base class
1581 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1590 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1582 space_left = max(width - cur_len, 1)
1591 space_left = max(width - cur_len, 1)
1583
1592
1584 if self.break_long_words:
1593 if self.break_long_words:
1585 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1594 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1586 cur_line.append(cut)
1595 cur_line.append(cut)
1587 reversed_chunks[-1] = res
1596 reversed_chunks[-1] = res
1588 elif not cur_line:
1597 elif not cur_line:
1589 cur_line.append(reversed_chunks.pop())
1598 cur_line.append(reversed_chunks.pop())
1590
1599
1591 # this overriding code is imported from TextWrapper of python 2.6
1600 # this overriding code is imported from TextWrapper of python 2.6
1592 # to calculate columns of string by 'encoding.ucolwidth()'
1601 # to calculate columns of string by 'encoding.ucolwidth()'
1593 def _wrap_chunks(self, chunks):
1602 def _wrap_chunks(self, chunks):
1594 colwidth = encoding.ucolwidth
1603 colwidth = encoding.ucolwidth
1595
1604
1596 lines = []
1605 lines = []
1597 if self.width <= 0:
1606 if self.width <= 0:
1598 raise ValueError("invalid width %r (must be > 0)" % self.width)
1607 raise ValueError("invalid width %r (must be > 0)" % self.width)
1599
1608
1600 # Arrange in reverse order so items can be efficiently popped
1609 # Arrange in reverse order so items can be efficiently popped
1601 # from a stack of chucks.
1610 # from a stack of chucks.
1602 chunks.reverse()
1611 chunks.reverse()
1603
1612
1604 while chunks:
1613 while chunks:
1605
1614
1606 # Start the list of chunks that will make up the current line.
1615 # Start the list of chunks that will make up the current line.
1607 # cur_len is just the length of all the chunks in cur_line.
1616 # cur_len is just the length of all the chunks in cur_line.
1608 cur_line = []
1617 cur_line = []
1609 cur_len = 0
1618 cur_len = 0
1610
1619
1611 # Figure out which static string will prefix this line.
1620 # Figure out which static string will prefix this line.
1612 if lines:
1621 if lines:
1613 indent = self.subsequent_indent
1622 indent = self.subsequent_indent
1614 else:
1623 else:
1615 indent = self.initial_indent
1624 indent = self.initial_indent
1616
1625
1617 # Maximum width for this line.
1626 # Maximum width for this line.
1618 width = self.width - len(indent)
1627 width = self.width - len(indent)
1619
1628
1620 # First chunk on line is whitespace -- drop it, unless this
1629 # First chunk on line is whitespace -- drop it, unless this
1621 # is the very beginning of the text (i.e. no lines started yet).
1630 # is the very beginning of the text (i.e. no lines started yet).
1622 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1631 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1623 del chunks[-1]
1632 del chunks[-1]
1624
1633
1625 while chunks:
1634 while chunks:
1626 l = colwidth(chunks[-1])
1635 l = colwidth(chunks[-1])
1627
1636
1628 # Can at least squeeze this chunk onto the current line.
1637 # Can at least squeeze this chunk onto the current line.
1629 if cur_len + l <= width:
1638 if cur_len + l <= width:
1630 cur_line.append(chunks.pop())
1639 cur_line.append(chunks.pop())
1631 cur_len += l
1640 cur_len += l
1632
1641
1633 # Nope, this line is full.
1642 # Nope, this line is full.
1634 else:
1643 else:
1635 break
1644 break
1636
1645
1637 # The current line is full, and the next chunk is too big to
1646 # The current line is full, and the next chunk is too big to
1638 # fit on *any* line (not just this one).
1647 # fit on *any* line (not just this one).
1639 if chunks and colwidth(chunks[-1]) > width:
1648 if chunks and colwidth(chunks[-1]) > width:
1640 self._handle_long_word(chunks, cur_line, cur_len, width)
1649 self._handle_long_word(chunks, cur_line, cur_len, width)
1641
1650
1642 # If the last chunk on this line is all whitespace, drop it.
1651 # If the last chunk on this line is all whitespace, drop it.
1643 if (self.drop_whitespace and
1652 if (self.drop_whitespace and
1644 cur_line and cur_line[-1].strip() == ''):
1653 cur_line and cur_line[-1].strip() == ''):
1645 del cur_line[-1]
1654 del cur_line[-1]
1646
1655
1647 # Convert current line back to a string and store it in list
1656 # Convert current line back to a string and store it in list
1648 # of all lines (return value).
1657 # of all lines (return value).
1649 if cur_line:
1658 if cur_line:
1650 lines.append(indent + ''.join(cur_line))
1659 lines.append(indent + ''.join(cur_line))
1651
1660
1652 return lines
1661 return lines
1653
1662
1654 global MBTextWrapper
1663 global MBTextWrapper
1655 MBTextWrapper = tw
1664 MBTextWrapper = tw
1656 return tw(**kwargs)
1665 return tw(**kwargs)
1657
1666
1658 def wrap(line, width, initindent='', hangindent=''):
1667 def wrap(line, width, initindent='', hangindent=''):
1659 maxindent = max(len(hangindent), len(initindent))
1668 maxindent = max(len(hangindent), len(initindent))
1660 if width <= maxindent:
1669 if width <= maxindent:
1661 # adjust for weird terminal size
1670 # adjust for weird terminal size
1662 width = max(78, maxindent + 1)
1671 width = max(78, maxindent + 1)
1663 line = line.decode(encoding.encoding, encoding.encodingmode)
1672 line = line.decode(encoding.encoding, encoding.encodingmode)
1664 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1673 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1665 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1674 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1666 wrapper = MBTextWrapper(width=width,
1675 wrapper = MBTextWrapper(width=width,
1667 initial_indent=initindent,
1676 initial_indent=initindent,
1668 subsequent_indent=hangindent)
1677 subsequent_indent=hangindent)
1669 return wrapper.fill(line).encode(encoding.encoding)
1678 return wrapper.fill(line).encode(encoding.encoding)
1670
1679
1671 def iterlines(iterator):
1680 def iterlines(iterator):
1672 for chunk in iterator:
1681 for chunk in iterator:
1673 for line in chunk.splitlines():
1682 for line in chunk.splitlines():
1674 yield line
1683 yield line
1675
1684
1676 def expandpath(path):
1685 def expandpath(path):
1677 return os.path.expanduser(os.path.expandvars(path))
1686 return os.path.expanduser(os.path.expandvars(path))
1678
1687
1679 def hgcmd():
1688 def hgcmd():
1680 """Return the command used to execute current hg
1689 """Return the command used to execute current hg
1681
1690
1682 This is different from hgexecutable() because on Windows we want
1691 This is different from hgexecutable() because on Windows we want
1683 to avoid things opening new shell windows like batch files, so we
1692 to avoid things opening new shell windows like batch files, so we
1684 get either the python call or current executable.
1693 get either the python call or current executable.
1685 """
1694 """
1686 if mainfrozen():
1695 if mainfrozen():
1687 return [sys.executable]
1696 return [sys.executable]
1688 return gethgcmd()
1697 return gethgcmd()
1689
1698
1690 def rundetached(args, condfn):
1699 def rundetached(args, condfn):
1691 """Execute the argument list in a detached process.
1700 """Execute the argument list in a detached process.
1692
1701
1693 condfn is a callable which is called repeatedly and should return
1702 condfn is a callable which is called repeatedly and should return
1694 True once the child process is known to have started successfully.
1703 True once the child process is known to have started successfully.
1695 At this point, the child process PID is returned. If the child
1704 At this point, the child process PID is returned. If the child
1696 process fails to start or finishes before condfn() evaluates to
1705 process fails to start or finishes before condfn() evaluates to
1697 True, return -1.
1706 True, return -1.
1698 """
1707 """
1699 # Windows case is easier because the child process is either
1708 # Windows case is easier because the child process is either
1700 # successfully starting and validating the condition or exiting
1709 # successfully starting and validating the condition or exiting
1701 # on failure. We just poll on its PID. On Unix, if the child
1710 # on failure. We just poll on its PID. On Unix, if the child
1702 # process fails to start, it will be left in a zombie state until
1711 # process fails to start, it will be left in a zombie state until
1703 # the parent wait on it, which we cannot do since we expect a long
1712 # the parent wait on it, which we cannot do since we expect a long
1704 # running process on success. Instead we listen for SIGCHLD telling
1713 # running process on success. Instead we listen for SIGCHLD telling
1705 # us our child process terminated.
1714 # us our child process terminated.
1706 terminated = set()
1715 terminated = set()
1707 def handler(signum, frame):
1716 def handler(signum, frame):
1708 terminated.add(os.wait())
1717 terminated.add(os.wait())
1709 prevhandler = None
1718 prevhandler = None
1710 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1719 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1711 if SIGCHLD is not None:
1720 if SIGCHLD is not None:
1712 prevhandler = signal.signal(SIGCHLD, handler)
1721 prevhandler = signal.signal(SIGCHLD, handler)
1713 try:
1722 try:
1714 pid = spawndetached(args)
1723 pid = spawndetached(args)
1715 while not condfn():
1724 while not condfn():
1716 if ((pid in terminated or not testpid(pid))
1725 if ((pid in terminated or not testpid(pid))
1717 and not condfn()):
1726 and not condfn()):
1718 return -1
1727 return -1
1719 time.sleep(0.1)
1728 time.sleep(0.1)
1720 return pid
1729 return pid
1721 finally:
1730 finally:
1722 if prevhandler is not None:
1731 if prevhandler is not None:
1723 signal.signal(signal.SIGCHLD, prevhandler)
1732 signal.signal(signal.SIGCHLD, prevhandler)
1724
1733
1725 try:
1734 try:
1726 any, all = any, all
1735 any, all = any, all
1727 except NameError:
1736 except NameError:
1728 def any(iterable):
1737 def any(iterable):
1729 for i in iterable:
1738 for i in iterable:
1730 if i:
1739 if i:
1731 return True
1740 return True
1732 return False
1741 return False
1733
1742
1734 def all(iterable):
1743 def all(iterable):
1735 for i in iterable:
1744 for i in iterable:
1736 if not i:
1745 if not i:
1737 return False
1746 return False
1738 return True
1747 return True
1739
1748
1740 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1749 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1741 """Return the result of interpolating items in the mapping into string s.
1750 """Return the result of interpolating items in the mapping into string s.
1742
1751
1743 prefix is a single character string, or a two character string with
1752 prefix is a single character string, or a two character string with
1744 a backslash as the first character if the prefix needs to be escaped in
1753 a backslash as the first character if the prefix needs to be escaped in
1745 a regular expression.
1754 a regular expression.
1746
1755
1747 fn is an optional function that will be applied to the replacement text
1756 fn is an optional function that will be applied to the replacement text
1748 just before replacement.
1757 just before replacement.
1749
1758
1750 escape_prefix is an optional flag that allows using doubled prefix for
1759 escape_prefix is an optional flag that allows using doubled prefix for
1751 its escaping.
1760 its escaping.
1752 """
1761 """
1753 fn = fn or (lambda s: s)
1762 fn = fn or (lambda s: s)
1754 patterns = '|'.join(mapping.keys())
1763 patterns = '|'.join(mapping.keys())
1755 if escape_prefix:
1764 if escape_prefix:
1756 patterns += '|' + prefix
1765 patterns += '|' + prefix
1757 if len(prefix) > 1:
1766 if len(prefix) > 1:
1758 prefix_char = prefix[1:]
1767 prefix_char = prefix[1:]
1759 else:
1768 else:
1760 prefix_char = prefix
1769 prefix_char = prefix
1761 mapping[prefix_char] = prefix_char
1770 mapping[prefix_char] = prefix_char
1762 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1771 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1763 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1772 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1764
1773
1765 def getport(port):
1774 def getport(port):
1766 """Return the port for a given network service.
1775 """Return the port for a given network service.
1767
1776
1768 If port is an integer, it's returned as is. If it's a string, it's
1777 If port is an integer, it's returned as is. If it's a string, it's
1769 looked up using socket.getservbyname(). If there's no matching
1778 looked up using socket.getservbyname(). If there's no matching
1770 service, util.Abort is raised.
1779 service, util.Abort is raised.
1771 """
1780 """
1772 try:
1781 try:
1773 return int(port)
1782 return int(port)
1774 except ValueError:
1783 except ValueError:
1775 pass
1784 pass
1776
1785
1777 try:
1786 try:
1778 return socket.getservbyname(port)
1787 return socket.getservbyname(port)
1779 except socket.error:
1788 except socket.error:
1780 raise Abort(_("no port number associated with service '%s'") % port)
1789 raise Abort(_("no port number associated with service '%s'") % port)
1781
1790
1782 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1791 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1783 '0': False, 'no': False, 'false': False, 'off': False,
1792 '0': False, 'no': False, 'false': False, 'off': False,
1784 'never': False}
1793 'never': False}
1785
1794
1786 def parsebool(s):
1795 def parsebool(s):
1787 """Parse s into a boolean.
1796 """Parse s into a boolean.
1788
1797
1789 If s is not a valid boolean, returns None.
1798 If s is not a valid boolean, returns None.
1790 """
1799 """
1791 return _booleans.get(s.lower(), None)
1800 return _booleans.get(s.lower(), None)
1792
1801
1793 _hexdig = '0123456789ABCDEFabcdef'
1802 _hexdig = '0123456789ABCDEFabcdef'
1794 _hextochr = dict((a + b, chr(int(a + b, 16)))
1803 _hextochr = dict((a + b, chr(int(a + b, 16)))
1795 for a in _hexdig for b in _hexdig)
1804 for a in _hexdig for b in _hexdig)
1796
1805
1797 def _urlunquote(s):
1806 def _urlunquote(s):
1798 """Decode HTTP/HTML % encoding.
1807 """Decode HTTP/HTML % encoding.
1799
1808
1800 >>> _urlunquote('abc%20def')
1809 >>> _urlunquote('abc%20def')
1801 'abc def'
1810 'abc def'
1802 """
1811 """
1803 res = s.split('%')
1812 res = s.split('%')
1804 # fastpath
1813 # fastpath
1805 if len(res) == 1:
1814 if len(res) == 1:
1806 return s
1815 return s
1807 s = res[0]
1816 s = res[0]
1808 for item in res[1:]:
1817 for item in res[1:]:
1809 try:
1818 try:
1810 s += _hextochr[item[:2]] + item[2:]
1819 s += _hextochr[item[:2]] + item[2:]
1811 except KeyError:
1820 except KeyError:
1812 s += '%' + item
1821 s += '%' + item
1813 except UnicodeDecodeError:
1822 except UnicodeDecodeError:
1814 s += unichr(int(item[:2], 16)) + item[2:]
1823 s += unichr(int(item[:2], 16)) + item[2:]
1815 return s
1824 return s
1816
1825
1817 class url(object):
1826 class url(object):
1818 r"""Reliable URL parser.
1827 r"""Reliable URL parser.
1819
1828
1820 This parses URLs and provides attributes for the following
1829 This parses URLs and provides attributes for the following
1821 components:
1830 components:
1822
1831
1823 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1832 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1824
1833
1825 Missing components are set to None. The only exception is
1834 Missing components are set to None. The only exception is
1826 fragment, which is set to '' if present but empty.
1835 fragment, which is set to '' if present but empty.
1827
1836
1828 If parsefragment is False, fragment is included in query. If
1837 If parsefragment is False, fragment is included in query. If
1829 parsequery is False, query is included in path. If both are
1838 parsequery is False, query is included in path. If both are
1830 False, both fragment and query are included in path.
1839 False, both fragment and query are included in path.
1831
1840
1832 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1841 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1833
1842
1834 Note that for backward compatibility reasons, bundle URLs do not
1843 Note that for backward compatibility reasons, bundle URLs do not
1835 take host names. That means 'bundle://../' has a path of '../'.
1844 take host names. That means 'bundle://../' has a path of '../'.
1836
1845
1837 Examples:
1846 Examples:
1838
1847
1839 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1848 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1840 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1849 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1841 >>> url('ssh://[::1]:2200//home/joe/repo')
1850 >>> url('ssh://[::1]:2200//home/joe/repo')
1842 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1851 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1843 >>> url('file:///home/joe/repo')
1852 >>> url('file:///home/joe/repo')
1844 <url scheme: 'file', path: '/home/joe/repo'>
1853 <url scheme: 'file', path: '/home/joe/repo'>
1845 >>> url('file:///c:/temp/foo/')
1854 >>> url('file:///c:/temp/foo/')
1846 <url scheme: 'file', path: 'c:/temp/foo/'>
1855 <url scheme: 'file', path: 'c:/temp/foo/'>
1847 >>> url('bundle:foo')
1856 >>> url('bundle:foo')
1848 <url scheme: 'bundle', path: 'foo'>
1857 <url scheme: 'bundle', path: 'foo'>
1849 >>> url('bundle://../foo')
1858 >>> url('bundle://../foo')
1850 <url scheme: 'bundle', path: '../foo'>
1859 <url scheme: 'bundle', path: '../foo'>
1851 >>> url(r'c:\foo\bar')
1860 >>> url(r'c:\foo\bar')
1852 <url path: 'c:\\foo\\bar'>
1861 <url path: 'c:\\foo\\bar'>
1853 >>> url(r'\\blah\blah\blah')
1862 >>> url(r'\\blah\blah\blah')
1854 <url path: '\\\\blah\\blah\\blah'>
1863 <url path: '\\\\blah\\blah\\blah'>
1855 >>> url(r'\\blah\blah\blah#baz')
1864 >>> url(r'\\blah\blah\blah#baz')
1856 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1865 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1857 >>> url(r'file:///C:\users\me')
1866 >>> url(r'file:///C:\users\me')
1858 <url scheme: 'file', path: 'C:\\users\\me'>
1867 <url scheme: 'file', path: 'C:\\users\\me'>
1859
1868
1860 Authentication credentials:
1869 Authentication credentials:
1861
1870
1862 >>> url('ssh://joe:xyz@x/repo')
1871 >>> url('ssh://joe:xyz@x/repo')
1863 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1872 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1864 >>> url('ssh://joe@x/repo')
1873 >>> url('ssh://joe@x/repo')
1865 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1874 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1866
1875
1867 Query strings and fragments:
1876 Query strings and fragments:
1868
1877
1869 >>> url('http://host/a?b#c')
1878 >>> url('http://host/a?b#c')
1870 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1879 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1871 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1880 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1872 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1881 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1873 """
1882 """
1874
1883
1875 _safechars = "!~*'()+"
1884 _safechars = "!~*'()+"
1876 _safepchars = "/!~*'()+:\\"
1885 _safepchars = "/!~*'()+:\\"
1877 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
1886 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
1878
1887
1879 def __init__(self, path, parsequery=True, parsefragment=True):
1888 def __init__(self, path, parsequery=True, parsefragment=True):
1880 # We slowly chomp away at path until we have only the path left
1889 # We slowly chomp away at path until we have only the path left
1881 self.scheme = self.user = self.passwd = self.host = None
1890 self.scheme = self.user = self.passwd = self.host = None
1882 self.port = self.path = self.query = self.fragment = None
1891 self.port = self.path = self.query = self.fragment = None
1883 self._localpath = True
1892 self._localpath = True
1884 self._hostport = ''
1893 self._hostport = ''
1885 self._origpath = path
1894 self._origpath = path
1886
1895
1887 if parsefragment and '#' in path:
1896 if parsefragment and '#' in path:
1888 path, self.fragment = path.split('#', 1)
1897 path, self.fragment = path.split('#', 1)
1889 if not path:
1898 if not path:
1890 path = None
1899 path = None
1891
1900
1892 # special case for Windows drive letters and UNC paths
1901 # special case for Windows drive letters and UNC paths
1893 if hasdriveletter(path) or path.startswith(r'\\'):
1902 if hasdriveletter(path) or path.startswith(r'\\'):
1894 self.path = path
1903 self.path = path
1895 return
1904 return
1896
1905
1897 # For compatibility reasons, we can't handle bundle paths as
1906 # For compatibility reasons, we can't handle bundle paths as
1898 # normal URLS
1907 # normal URLS
1899 if path.startswith('bundle:'):
1908 if path.startswith('bundle:'):
1900 self.scheme = 'bundle'
1909 self.scheme = 'bundle'
1901 path = path[7:]
1910 path = path[7:]
1902 if path.startswith('//'):
1911 if path.startswith('//'):
1903 path = path[2:]
1912 path = path[2:]
1904 self.path = path
1913 self.path = path
1905 return
1914 return
1906
1915
1907 if self._matchscheme(path):
1916 if self._matchscheme(path):
1908 parts = path.split(':', 1)
1917 parts = path.split(':', 1)
1909 if parts[0]:
1918 if parts[0]:
1910 self.scheme, path = parts
1919 self.scheme, path = parts
1911 self._localpath = False
1920 self._localpath = False
1912
1921
1913 if not path:
1922 if not path:
1914 path = None
1923 path = None
1915 if self._localpath:
1924 if self._localpath:
1916 self.path = ''
1925 self.path = ''
1917 return
1926 return
1918 else:
1927 else:
1919 if self._localpath:
1928 if self._localpath:
1920 self.path = path
1929 self.path = path
1921 return
1930 return
1922
1931
1923 if parsequery and '?' in path:
1932 if parsequery and '?' in path:
1924 path, self.query = path.split('?', 1)
1933 path, self.query = path.split('?', 1)
1925 if not path:
1934 if not path:
1926 path = None
1935 path = None
1927 if not self.query:
1936 if not self.query:
1928 self.query = None
1937 self.query = None
1929
1938
1930 # // is required to specify a host/authority
1939 # // is required to specify a host/authority
1931 if path and path.startswith('//'):
1940 if path and path.startswith('//'):
1932 parts = path[2:].split('/', 1)
1941 parts = path[2:].split('/', 1)
1933 if len(parts) > 1:
1942 if len(parts) > 1:
1934 self.host, path = parts
1943 self.host, path = parts
1935 else:
1944 else:
1936 self.host = parts[0]
1945 self.host = parts[0]
1937 path = None
1946 path = None
1938 if not self.host:
1947 if not self.host:
1939 self.host = None
1948 self.host = None
1940 # path of file:///d is /d
1949 # path of file:///d is /d
1941 # path of file:///d:/ is d:/, not /d:/
1950 # path of file:///d:/ is d:/, not /d:/
1942 if path and not hasdriveletter(path):
1951 if path and not hasdriveletter(path):
1943 path = '/' + path
1952 path = '/' + path
1944
1953
1945 if self.host and '@' in self.host:
1954 if self.host and '@' in self.host:
1946 self.user, self.host = self.host.rsplit('@', 1)
1955 self.user, self.host = self.host.rsplit('@', 1)
1947 if ':' in self.user:
1956 if ':' in self.user:
1948 self.user, self.passwd = self.user.split(':', 1)
1957 self.user, self.passwd = self.user.split(':', 1)
1949 if not self.host:
1958 if not self.host:
1950 self.host = None
1959 self.host = None
1951
1960
1952 # Don't split on colons in IPv6 addresses without ports
1961 # Don't split on colons in IPv6 addresses without ports
1953 if (self.host and ':' in self.host and
1962 if (self.host and ':' in self.host and
1954 not (self.host.startswith('[') and self.host.endswith(']'))):
1963 not (self.host.startswith('[') and self.host.endswith(']'))):
1955 self._hostport = self.host
1964 self._hostport = self.host
1956 self.host, self.port = self.host.rsplit(':', 1)
1965 self.host, self.port = self.host.rsplit(':', 1)
1957 if not self.host:
1966 if not self.host:
1958 self.host = None
1967 self.host = None
1959
1968
1960 if (self.host and self.scheme == 'file' and
1969 if (self.host and self.scheme == 'file' and
1961 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1970 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1962 raise Abort(_('file:// URLs can only refer to localhost'))
1971 raise Abort(_('file:// URLs can only refer to localhost'))
1963
1972
1964 self.path = path
1973 self.path = path
1965
1974
1966 # leave the query string escaped
1975 # leave the query string escaped
1967 for a in ('user', 'passwd', 'host', 'port',
1976 for a in ('user', 'passwd', 'host', 'port',
1968 'path', 'fragment'):
1977 'path', 'fragment'):
1969 v = getattr(self, a)
1978 v = getattr(self, a)
1970 if v is not None:
1979 if v is not None:
1971 setattr(self, a, _urlunquote(v))
1980 setattr(self, a, _urlunquote(v))
1972
1981
1973 def __repr__(self):
1982 def __repr__(self):
1974 attrs = []
1983 attrs = []
1975 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1984 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1976 'query', 'fragment'):
1985 'query', 'fragment'):
1977 v = getattr(self, a)
1986 v = getattr(self, a)
1978 if v is not None:
1987 if v is not None:
1979 attrs.append('%s: %r' % (a, v))
1988 attrs.append('%s: %r' % (a, v))
1980 return '<url %s>' % ', '.join(attrs)
1989 return '<url %s>' % ', '.join(attrs)
1981
1990
1982 def __str__(self):
1991 def __str__(self):
1983 r"""Join the URL's components back into a URL string.
1992 r"""Join the URL's components back into a URL string.
1984
1993
1985 Examples:
1994 Examples:
1986
1995
1987 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1996 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1988 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1997 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1989 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1998 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1990 'http://user:pw@host:80/?foo=bar&baz=42'
1999 'http://user:pw@host:80/?foo=bar&baz=42'
1991 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2000 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1992 'http://user:pw@host:80/?foo=bar%3dbaz'
2001 'http://user:pw@host:80/?foo=bar%3dbaz'
1993 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2002 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1994 'ssh://user:pw@[::1]:2200//home/joe#'
2003 'ssh://user:pw@[::1]:2200//home/joe#'
1995 >>> str(url('http://localhost:80//'))
2004 >>> str(url('http://localhost:80//'))
1996 'http://localhost:80//'
2005 'http://localhost:80//'
1997 >>> str(url('http://localhost:80/'))
2006 >>> str(url('http://localhost:80/'))
1998 'http://localhost:80/'
2007 'http://localhost:80/'
1999 >>> str(url('http://localhost:80'))
2008 >>> str(url('http://localhost:80'))
2000 'http://localhost:80/'
2009 'http://localhost:80/'
2001 >>> str(url('bundle:foo'))
2010 >>> str(url('bundle:foo'))
2002 'bundle:foo'
2011 'bundle:foo'
2003 >>> str(url('bundle://../foo'))
2012 >>> str(url('bundle://../foo'))
2004 'bundle:../foo'
2013 'bundle:../foo'
2005 >>> str(url('path'))
2014 >>> str(url('path'))
2006 'path'
2015 'path'
2007 >>> str(url('file:///tmp/foo/bar'))
2016 >>> str(url('file:///tmp/foo/bar'))
2008 'file:///tmp/foo/bar'
2017 'file:///tmp/foo/bar'
2009 >>> str(url('file:///c:/tmp/foo/bar'))
2018 >>> str(url('file:///c:/tmp/foo/bar'))
2010 'file:///c:/tmp/foo/bar'
2019 'file:///c:/tmp/foo/bar'
2011 >>> print url(r'bundle:foo\bar')
2020 >>> print url(r'bundle:foo\bar')
2012 bundle:foo\bar
2021 bundle:foo\bar
2013 >>> print url(r'file:///D:\data\hg')
2022 >>> print url(r'file:///D:\data\hg')
2014 file:///D:\data\hg
2023 file:///D:\data\hg
2015 """
2024 """
2016 if self._localpath:
2025 if self._localpath:
2017 s = self.path
2026 s = self.path
2018 if self.scheme == 'bundle':
2027 if self.scheme == 'bundle':
2019 s = 'bundle:' + s
2028 s = 'bundle:' + s
2020 if self.fragment:
2029 if self.fragment:
2021 s += '#' + self.fragment
2030 s += '#' + self.fragment
2022 return s
2031 return s
2023
2032
2024 s = self.scheme + ':'
2033 s = self.scheme + ':'
2025 if self.user or self.passwd or self.host:
2034 if self.user or self.passwd or self.host:
2026 s += '//'
2035 s += '//'
2027 elif self.scheme and (not self.path or self.path.startswith('/')
2036 elif self.scheme and (not self.path or self.path.startswith('/')
2028 or hasdriveletter(self.path)):
2037 or hasdriveletter(self.path)):
2029 s += '//'
2038 s += '//'
2030 if hasdriveletter(self.path):
2039 if hasdriveletter(self.path):
2031 s += '/'
2040 s += '/'
2032 if self.user:
2041 if self.user:
2033 s += urllib.quote(self.user, safe=self._safechars)
2042 s += urllib.quote(self.user, safe=self._safechars)
2034 if self.passwd:
2043 if self.passwd:
2035 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2044 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2036 if self.user or self.passwd:
2045 if self.user or self.passwd:
2037 s += '@'
2046 s += '@'
2038 if self.host:
2047 if self.host:
2039 if not (self.host.startswith('[') and self.host.endswith(']')):
2048 if not (self.host.startswith('[') and self.host.endswith(']')):
2040 s += urllib.quote(self.host)
2049 s += urllib.quote(self.host)
2041 else:
2050 else:
2042 s += self.host
2051 s += self.host
2043 if self.port:
2052 if self.port:
2044 s += ':' + urllib.quote(self.port)
2053 s += ':' + urllib.quote(self.port)
2045 if self.host:
2054 if self.host:
2046 s += '/'
2055 s += '/'
2047 if self.path:
2056 if self.path:
2048 # TODO: similar to the query string, we should not unescape the
2057 # TODO: similar to the query string, we should not unescape the
2049 # path when we store it, the path might contain '%2f' = '/',
2058 # path when we store it, the path might contain '%2f' = '/',
2050 # which we should *not* escape.
2059 # which we should *not* escape.
2051 s += urllib.quote(self.path, safe=self._safepchars)
2060 s += urllib.quote(self.path, safe=self._safepchars)
2052 if self.query:
2061 if self.query:
2053 # we store the query in escaped form.
2062 # we store the query in escaped form.
2054 s += '?' + self.query
2063 s += '?' + self.query
2055 if self.fragment is not None:
2064 if self.fragment is not None:
2056 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2065 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2057 return s
2066 return s
2058
2067
2059 def authinfo(self):
2068 def authinfo(self):
2060 user, passwd = self.user, self.passwd
2069 user, passwd = self.user, self.passwd
2061 try:
2070 try:
2062 self.user, self.passwd = None, None
2071 self.user, self.passwd = None, None
2063 s = str(self)
2072 s = str(self)
2064 finally:
2073 finally:
2065 self.user, self.passwd = user, passwd
2074 self.user, self.passwd = user, passwd
2066 if not self.user:
2075 if not self.user:
2067 return (s, None)
2076 return (s, None)
2068 # authinfo[1] is passed to urllib2 password manager, and its
2077 # authinfo[1] is passed to urllib2 password manager, and its
2069 # URIs must not contain credentials. The host is passed in the
2078 # URIs must not contain credentials. The host is passed in the
2070 # URIs list because Python < 2.4.3 uses only that to search for
2079 # URIs list because Python < 2.4.3 uses only that to search for
2071 # a password.
2080 # a password.
2072 return (s, (None, (s, self.host),
2081 return (s, (None, (s, self.host),
2073 self.user, self.passwd or ''))
2082 self.user, self.passwd or ''))
2074
2083
2075 def isabs(self):
2084 def isabs(self):
2076 if self.scheme and self.scheme != 'file':
2085 if self.scheme and self.scheme != 'file':
2077 return True # remote URL
2086 return True # remote URL
2078 if hasdriveletter(self.path):
2087 if hasdriveletter(self.path):
2079 return True # absolute for our purposes - can't be joined()
2088 return True # absolute for our purposes - can't be joined()
2080 if self.path.startswith(r'\\'):
2089 if self.path.startswith(r'\\'):
2081 return True # Windows UNC path
2090 return True # Windows UNC path
2082 if self.path.startswith('/'):
2091 if self.path.startswith('/'):
2083 return True # POSIX-style
2092 return True # POSIX-style
2084 return False
2093 return False
2085
2094
2086 def localpath(self):
2095 def localpath(self):
2087 if self.scheme == 'file' or self.scheme == 'bundle':
2096 if self.scheme == 'file' or self.scheme == 'bundle':
2088 path = self.path or '/'
2097 path = self.path or '/'
2089 # For Windows, we need to promote hosts containing drive
2098 # For Windows, we need to promote hosts containing drive
2090 # letters to paths with drive letters.
2099 # letters to paths with drive letters.
2091 if hasdriveletter(self._hostport):
2100 if hasdriveletter(self._hostport):
2092 path = self._hostport + '/' + self.path
2101 path = self._hostport + '/' + self.path
2093 elif (self.host is not None and self.path
2102 elif (self.host is not None and self.path
2094 and not hasdriveletter(path)):
2103 and not hasdriveletter(path)):
2095 path = '/' + path
2104 path = '/' + path
2096 return path
2105 return path
2097 return self._origpath
2106 return self._origpath
2098
2107
2099 def islocal(self):
2108 def islocal(self):
2100 '''whether localpath will return something that posixfile can open'''
2109 '''whether localpath will return something that posixfile can open'''
2101 return (not self.scheme or self.scheme == 'file'
2110 return (not self.scheme or self.scheme == 'file'
2102 or self.scheme == 'bundle')
2111 or self.scheme == 'bundle')
2103
2112
2104 def hasscheme(path):
2113 def hasscheme(path):
2105 return bool(url(path).scheme)
2114 return bool(url(path).scheme)
2106
2115
2107 def hasdriveletter(path):
2116 def hasdriveletter(path):
2108 return path and path[1:2] == ':' and path[0:1].isalpha()
2117 return path and path[1:2] == ':' and path[0:1].isalpha()
2109
2118
2110 def urllocalpath(path):
2119 def urllocalpath(path):
2111 return url(path, parsequery=False, parsefragment=False).localpath()
2120 return url(path, parsequery=False, parsefragment=False).localpath()
2112
2121
2113 def hidepassword(u):
2122 def hidepassword(u):
2114 '''hide user credential in a url string'''
2123 '''hide user credential in a url string'''
2115 u = url(u)
2124 u = url(u)
2116 if u.passwd:
2125 if u.passwd:
2117 u.passwd = '***'
2126 u.passwd = '***'
2118 return str(u)
2127 return str(u)
2119
2128
2120 def removeauth(u):
2129 def removeauth(u):
2121 '''remove all authentication information from a url string'''
2130 '''remove all authentication information from a url string'''
2122 u = url(u)
2131 u = url(u)
2123 u.user = u.passwd = None
2132 u.user = u.passwd = None
2124 return str(u)
2133 return str(u)
2125
2134
2126 def isatty(fd):
2135 def isatty(fd):
2127 try:
2136 try:
2128 return fd.isatty()
2137 return fd.isatty()
2129 except AttributeError:
2138 except AttributeError:
2130 return False
2139 return False
2131
2140
2132 timecount = unitcountfn(
2141 timecount = unitcountfn(
2133 (1, 1e3, _('%.0f s')),
2142 (1, 1e3, _('%.0f s')),
2134 (100, 1, _('%.1f s')),
2143 (100, 1, _('%.1f s')),
2135 (10, 1, _('%.2f s')),
2144 (10, 1, _('%.2f s')),
2136 (1, 1, _('%.3f s')),
2145 (1, 1, _('%.3f s')),
2137 (100, 0.001, _('%.1f ms')),
2146 (100, 0.001, _('%.1f ms')),
2138 (10, 0.001, _('%.2f ms')),
2147 (10, 0.001, _('%.2f ms')),
2139 (1, 0.001, _('%.3f ms')),
2148 (1, 0.001, _('%.3f ms')),
2140 (100, 0.000001, _('%.1f us')),
2149 (100, 0.000001, _('%.1f us')),
2141 (10, 0.000001, _('%.2f us')),
2150 (10, 0.000001, _('%.2f us')),
2142 (1, 0.000001, _('%.3f us')),
2151 (1, 0.000001, _('%.3f us')),
2143 (100, 0.000000001, _('%.1f ns')),
2152 (100, 0.000000001, _('%.1f ns')),
2144 (10, 0.000000001, _('%.2f ns')),
2153 (10, 0.000000001, _('%.2f ns')),
2145 (1, 0.000000001, _('%.3f ns')),
2154 (1, 0.000000001, _('%.3f ns')),
2146 )
2155 )
2147
2156
2148 _timenesting = [0]
2157 _timenesting = [0]
2149
2158
2150 def timed(func):
2159 def timed(func):
2151 '''Report the execution time of a function call to stderr.
2160 '''Report the execution time of a function call to stderr.
2152
2161
2153 During development, use as a decorator when you need to measure
2162 During development, use as a decorator when you need to measure
2154 the cost of a function, e.g. as follows:
2163 the cost of a function, e.g. as follows:
2155
2164
2156 @util.timed
2165 @util.timed
2157 def foo(a, b, c):
2166 def foo(a, b, c):
2158 pass
2167 pass
2159 '''
2168 '''
2160
2169
2161 def wrapper(*args, **kwargs):
2170 def wrapper(*args, **kwargs):
2162 start = time.time()
2171 start = time.time()
2163 indent = 2
2172 indent = 2
2164 _timenesting[0] += indent
2173 _timenesting[0] += indent
2165 try:
2174 try:
2166 return func(*args, **kwargs)
2175 return func(*args, **kwargs)
2167 finally:
2176 finally:
2168 elapsed = time.time() - start
2177 elapsed = time.time() - start
2169 _timenesting[0] -= indent
2178 _timenesting[0] -= indent
2170 sys.stderr.write('%s%s: %s\n' %
2179 sys.stderr.write('%s%s: %s\n' %
2171 (' ' * _timenesting[0], func.__name__,
2180 (' ' * _timenesting[0], func.__name__,
2172 timecount(elapsed)))
2181 timecount(elapsed)))
2173 return wrapper
2182 return wrapper
2174
2183
2175 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2184 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2176 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2185 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2177
2186
2178 def sizetoint(s):
2187 def sizetoint(s):
2179 '''Convert a space specifier to a byte count.
2188 '''Convert a space specifier to a byte count.
2180
2189
2181 >>> sizetoint('30')
2190 >>> sizetoint('30')
2182 30
2191 30
2183 >>> sizetoint('2.2kb')
2192 >>> sizetoint('2.2kb')
2184 2252
2193 2252
2185 >>> sizetoint('6M')
2194 >>> sizetoint('6M')
2186 6291456
2195 6291456
2187 '''
2196 '''
2188 t = s.strip().lower()
2197 t = s.strip().lower()
2189 try:
2198 try:
2190 for k, u in _sizeunits:
2199 for k, u in _sizeunits:
2191 if t.endswith(k):
2200 if t.endswith(k):
2192 return int(float(t[:-len(k)]) * u)
2201 return int(float(t[:-len(k)]) * u)
2193 return int(t)
2202 return int(t)
2194 except ValueError:
2203 except ValueError:
2195 raise error.ParseError(_("couldn't parse size: %s") % s)
2204 raise error.ParseError(_("couldn't parse size: %s") % s)
2196
2205
2197 class hooks(object):
2206 class hooks(object):
2198 '''A collection of hook functions that can be used to extend a
2207 '''A collection of hook functions that can be used to extend a
2199 function's behaviour. Hooks are called in lexicographic order,
2208 function's behaviour. Hooks are called in lexicographic order,
2200 based on the names of their sources.'''
2209 based on the names of their sources.'''
2201
2210
2202 def __init__(self):
2211 def __init__(self):
2203 self._hooks = []
2212 self._hooks = []
2204
2213
2205 def add(self, source, hook):
2214 def add(self, source, hook):
2206 self._hooks.append((source, hook))
2215 self._hooks.append((source, hook))
2207
2216
2208 def __call__(self, *args):
2217 def __call__(self, *args):
2209 self._hooks.sort(key=lambda x: x[0])
2218 self._hooks.sort(key=lambda x: x[0])
2210 results = []
2219 results = []
2211 for source, hook in self._hooks:
2220 for source, hook in self._hooks:
2212 results.append(hook(*args))
2221 results.append(hook(*args))
2213 return results
2222 return results
2214
2223
2215 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2224 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2216 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2225 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2217 Skips the 'skip' last entries. By default it will flush stdout first.
2226 Skips the 'skip' last entries. By default it will flush stdout first.
2218 It can be used everywhere and do intentionally not require an ui object.
2227 It can be used everywhere and do intentionally not require an ui object.
2219 Not be used in production code but very convenient while developing.
2228 Not be used in production code but very convenient while developing.
2220 '''
2229 '''
2221 if otherf:
2230 if otherf:
2222 otherf.flush()
2231 otherf.flush()
2223 f.write('%s at:\n' % msg)
2232 f.write('%s at:\n' % msg)
2224 entries = [('%s:%s' % (fn, ln), func)
2233 entries = [('%s:%s' % (fn, ln), func)
2225 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2234 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2226 if entries:
2235 if entries:
2227 fnmax = max(len(entry[0]) for entry in entries)
2236 fnmax = max(len(entry[0]) for entry in entries)
2228 for fnln, func in entries:
2237 for fnln, func in entries:
2229 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2238 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2230 f.flush()
2239 f.flush()
2231
2240
2232 # convenient shortcut
2241 # convenient shortcut
2233 dst = debugstacktrace
2242 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now