##// END OF EJS Templates
util: use try/except/finally
Matt Mackall -
r25088:754df8e9 default
parent child Browse files
Show More
@@ -1,2290 +1,2288 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 import i18n
16 import i18n
17 _ = i18n._
17 _ = i18n._
18 import error, osutil, encoding, parsers
18 import error, osutil, encoding, parsers
19 import errno, shutil, sys, tempfile, traceback
19 import errno, shutil, sys, tempfile, traceback
20 import re as remod
20 import re as remod
21 import os, time, datetime, calendar, textwrap, signal, collections
21 import os, time, datetime, calendar, textwrap, signal, collections
22 import imp, socket, urllib, struct
22 import imp, socket, urllib, struct
23 import gc
23 import gc
24
24
25 if os.name == 'nt':
25 if os.name == 'nt':
26 import windows as platform
26 import windows as platform
27 else:
27 else:
28 import posix as platform
28 import posix as platform
29
29
30 cachestat = platform.cachestat
30 cachestat = platform.cachestat
31 checkexec = platform.checkexec
31 checkexec = platform.checkexec
32 checklink = platform.checklink
32 checklink = platform.checklink
33 copymode = platform.copymode
33 copymode = platform.copymode
34 executablepath = platform.executablepath
34 executablepath = platform.executablepath
35 expandglobs = platform.expandglobs
35 expandglobs = platform.expandglobs
36 explainexit = platform.explainexit
36 explainexit = platform.explainexit
37 findexe = platform.findexe
37 findexe = platform.findexe
38 gethgcmd = platform.gethgcmd
38 gethgcmd = platform.gethgcmd
39 getuser = platform.getuser
39 getuser = platform.getuser
40 groupmembers = platform.groupmembers
40 groupmembers = platform.groupmembers
41 groupname = platform.groupname
41 groupname = platform.groupname
42 hidewindow = platform.hidewindow
42 hidewindow = platform.hidewindow
43 isexec = platform.isexec
43 isexec = platform.isexec
44 isowner = platform.isowner
44 isowner = platform.isowner
45 localpath = platform.localpath
45 localpath = platform.localpath
46 lookupreg = platform.lookupreg
46 lookupreg = platform.lookupreg
47 makedir = platform.makedir
47 makedir = platform.makedir
48 nlinks = platform.nlinks
48 nlinks = platform.nlinks
49 normpath = platform.normpath
49 normpath = platform.normpath
50 normcase = platform.normcase
50 normcase = platform.normcase
51 normcasespec = platform.normcasespec
51 normcasespec = platform.normcasespec
52 normcasefallback = platform.normcasefallback
52 normcasefallback = platform.normcasefallback
53 openhardlinks = platform.openhardlinks
53 openhardlinks = platform.openhardlinks
54 oslink = platform.oslink
54 oslink = platform.oslink
55 parsepatchoutput = platform.parsepatchoutput
55 parsepatchoutput = platform.parsepatchoutput
56 pconvert = platform.pconvert
56 pconvert = platform.pconvert
57 popen = platform.popen
57 popen = platform.popen
58 posixfile = platform.posixfile
58 posixfile = platform.posixfile
59 quotecommand = platform.quotecommand
59 quotecommand = platform.quotecommand
60 readpipe = platform.readpipe
60 readpipe = platform.readpipe
61 rename = platform.rename
61 rename = platform.rename
62 removedirs = platform.removedirs
62 removedirs = platform.removedirs
63 samedevice = platform.samedevice
63 samedevice = platform.samedevice
64 samefile = platform.samefile
64 samefile = platform.samefile
65 samestat = platform.samestat
65 samestat = platform.samestat
66 setbinary = platform.setbinary
66 setbinary = platform.setbinary
67 setflags = platform.setflags
67 setflags = platform.setflags
68 setsignalhandler = platform.setsignalhandler
68 setsignalhandler = platform.setsignalhandler
69 shellquote = platform.shellquote
69 shellquote = platform.shellquote
70 spawndetached = platform.spawndetached
70 spawndetached = platform.spawndetached
71 split = platform.split
71 split = platform.split
72 sshargs = platform.sshargs
72 sshargs = platform.sshargs
73 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
73 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
74 statisexec = platform.statisexec
74 statisexec = platform.statisexec
75 statislink = platform.statislink
75 statislink = platform.statislink
76 termwidth = platform.termwidth
76 termwidth = platform.termwidth
77 testpid = platform.testpid
77 testpid = platform.testpid
78 umask = platform.umask
78 umask = platform.umask
79 unlink = platform.unlink
79 unlink = platform.unlink
80 unlinkpath = platform.unlinkpath
80 unlinkpath = platform.unlinkpath
81 username = platform.username
81 username = platform.username
82
82
83 # Python compatibility
83 # Python compatibility
84
84
85 _notset = object()
85 _notset = object()
86
86
87 def safehasattr(thing, attr):
87 def safehasattr(thing, attr):
88 return getattr(thing, attr, _notset) is not _notset
88 return getattr(thing, attr, _notset) is not _notset
89
89
90 def sha1(s=''):
90 def sha1(s=''):
91 '''
91 '''
92 Low-overhead wrapper around Python's SHA support
92 Low-overhead wrapper around Python's SHA support
93
93
94 >>> f = _fastsha1
94 >>> f = _fastsha1
95 >>> a = sha1()
95 >>> a = sha1()
96 >>> a = f()
96 >>> a = f()
97 >>> a.hexdigest()
97 >>> a.hexdigest()
98 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
98 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
99 '''
99 '''
100
100
101 return _fastsha1(s)
101 return _fastsha1(s)
102
102
103 def _fastsha1(s=''):
103 def _fastsha1(s=''):
104 # This function will import sha1 from hashlib or sha (whichever is
104 # This function will import sha1 from hashlib or sha (whichever is
105 # available) and overwrite itself with it on the first call.
105 # available) and overwrite itself with it on the first call.
106 # Subsequent calls will go directly to the imported function.
106 # Subsequent calls will go directly to the imported function.
107 if sys.version_info >= (2, 5):
107 if sys.version_info >= (2, 5):
108 from hashlib import sha1 as _sha1
108 from hashlib import sha1 as _sha1
109 else:
109 else:
110 from sha import sha as _sha1
110 from sha import sha as _sha1
111 global _fastsha1, sha1
111 global _fastsha1, sha1
112 _fastsha1 = sha1 = _sha1
112 _fastsha1 = sha1 = _sha1
113 return _sha1(s)
113 return _sha1(s)
114
114
115 def md5(s=''):
115 def md5(s=''):
116 try:
116 try:
117 from hashlib import md5 as _md5
117 from hashlib import md5 as _md5
118 except ImportError:
118 except ImportError:
119 from md5 import md5 as _md5
119 from md5 import md5 as _md5
120 global md5
120 global md5
121 md5 = _md5
121 md5 = _md5
122 return _md5(s)
122 return _md5(s)
123
123
124 DIGESTS = {
124 DIGESTS = {
125 'md5': md5,
125 'md5': md5,
126 'sha1': sha1,
126 'sha1': sha1,
127 }
127 }
128 # List of digest types from strongest to weakest
128 # List of digest types from strongest to weakest
129 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
129 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
130
130
131 try:
131 try:
132 import hashlib
132 import hashlib
133 DIGESTS.update({
133 DIGESTS.update({
134 'sha512': hashlib.sha512,
134 'sha512': hashlib.sha512,
135 })
135 })
136 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
136 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
137 except ImportError:
137 except ImportError:
138 pass
138 pass
139
139
140 for k in DIGESTS_BY_STRENGTH:
140 for k in DIGESTS_BY_STRENGTH:
141 assert k in DIGESTS
141 assert k in DIGESTS
142
142
143 class digester(object):
143 class digester(object):
144 """helper to compute digests.
144 """helper to compute digests.
145
145
146 This helper can be used to compute one or more digests given their name.
146 This helper can be used to compute one or more digests given their name.
147
147
148 >>> d = digester(['md5', 'sha1'])
148 >>> d = digester(['md5', 'sha1'])
149 >>> d.update('foo')
149 >>> d.update('foo')
150 >>> [k for k in sorted(d)]
150 >>> [k for k in sorted(d)]
151 ['md5', 'sha1']
151 ['md5', 'sha1']
152 >>> d['md5']
152 >>> d['md5']
153 'acbd18db4cc2f85cedef654fccc4a4d8'
153 'acbd18db4cc2f85cedef654fccc4a4d8'
154 >>> d['sha1']
154 >>> d['sha1']
155 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
155 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
156 >>> digester.preferred(['md5', 'sha1'])
156 >>> digester.preferred(['md5', 'sha1'])
157 'sha1'
157 'sha1'
158 """
158 """
159
159
160 def __init__(self, digests, s=''):
160 def __init__(self, digests, s=''):
161 self._hashes = {}
161 self._hashes = {}
162 for k in digests:
162 for k in digests:
163 if k not in DIGESTS:
163 if k not in DIGESTS:
164 raise Abort(_('unknown digest type: %s') % k)
164 raise Abort(_('unknown digest type: %s') % k)
165 self._hashes[k] = DIGESTS[k]()
165 self._hashes[k] = DIGESTS[k]()
166 if s:
166 if s:
167 self.update(s)
167 self.update(s)
168
168
169 def update(self, data):
169 def update(self, data):
170 for h in self._hashes.values():
170 for h in self._hashes.values():
171 h.update(data)
171 h.update(data)
172
172
173 def __getitem__(self, key):
173 def __getitem__(self, key):
174 if key not in DIGESTS:
174 if key not in DIGESTS:
175 raise Abort(_('unknown digest type: %s') % k)
175 raise Abort(_('unknown digest type: %s') % k)
176 return self._hashes[key].hexdigest()
176 return self._hashes[key].hexdigest()
177
177
178 def __iter__(self):
178 def __iter__(self):
179 return iter(self._hashes)
179 return iter(self._hashes)
180
180
181 @staticmethod
181 @staticmethod
182 def preferred(supported):
182 def preferred(supported):
183 """returns the strongest digest type in both supported and DIGESTS."""
183 """returns the strongest digest type in both supported and DIGESTS."""
184
184
185 for k in DIGESTS_BY_STRENGTH:
185 for k in DIGESTS_BY_STRENGTH:
186 if k in supported:
186 if k in supported:
187 return k
187 return k
188 return None
188 return None
189
189
190 class digestchecker(object):
190 class digestchecker(object):
191 """file handle wrapper that additionally checks content against a given
191 """file handle wrapper that additionally checks content against a given
192 size and digests.
192 size and digests.
193
193
194 d = digestchecker(fh, size, {'md5': '...'})
194 d = digestchecker(fh, size, {'md5': '...'})
195
195
196 When multiple digests are given, all of them are validated.
196 When multiple digests are given, all of them are validated.
197 """
197 """
198
198
199 def __init__(self, fh, size, digests):
199 def __init__(self, fh, size, digests):
200 self._fh = fh
200 self._fh = fh
201 self._size = size
201 self._size = size
202 self._got = 0
202 self._got = 0
203 self._digests = dict(digests)
203 self._digests = dict(digests)
204 self._digester = digester(self._digests.keys())
204 self._digester = digester(self._digests.keys())
205
205
206 def read(self, length=-1):
206 def read(self, length=-1):
207 content = self._fh.read(length)
207 content = self._fh.read(length)
208 self._digester.update(content)
208 self._digester.update(content)
209 self._got += len(content)
209 self._got += len(content)
210 return content
210 return content
211
211
212 def validate(self):
212 def validate(self):
213 if self._size != self._got:
213 if self._size != self._got:
214 raise Abort(_('size mismatch: expected %d, got %d') %
214 raise Abort(_('size mismatch: expected %d, got %d') %
215 (self._size, self._got))
215 (self._size, self._got))
216 for k, v in self._digests.items():
216 for k, v in self._digests.items():
217 if v != self._digester[k]:
217 if v != self._digester[k]:
218 # i18n: first parameter is a digest name
218 # i18n: first parameter is a digest name
219 raise Abort(_('%s mismatch: expected %s, got %s') %
219 raise Abort(_('%s mismatch: expected %s, got %s') %
220 (k, v, self._digester[k]))
220 (k, v, self._digester[k]))
221
221
222 try:
222 try:
223 buffer = buffer
223 buffer = buffer
224 except NameError:
224 except NameError:
225 if sys.version_info[0] < 3:
225 if sys.version_info[0] < 3:
226 def buffer(sliceable, offset=0):
226 def buffer(sliceable, offset=0):
227 return sliceable[offset:]
227 return sliceable[offset:]
228 else:
228 else:
229 def buffer(sliceable, offset=0):
229 def buffer(sliceable, offset=0):
230 return memoryview(sliceable)[offset:]
230 return memoryview(sliceable)[offset:]
231
231
232 import subprocess
232 import subprocess
233 closefds = os.name == 'posix'
233 closefds = os.name == 'posix'
234
234
235 def unpacker(fmt):
235 def unpacker(fmt):
236 """create a struct unpacker for the specified format"""
236 """create a struct unpacker for the specified format"""
237 try:
237 try:
238 # 2.5+
238 # 2.5+
239 return struct.Struct(fmt).unpack
239 return struct.Struct(fmt).unpack
240 except AttributeError:
240 except AttributeError:
241 # 2.4
241 # 2.4
242 return lambda buf: struct.unpack(fmt, buf)
242 return lambda buf: struct.unpack(fmt, buf)
243
243
244 def popen2(cmd, env=None, newlines=False):
244 def popen2(cmd, env=None, newlines=False):
245 # Setting bufsize to -1 lets the system decide the buffer size.
245 # Setting bufsize to -1 lets the system decide the buffer size.
246 # The default for bufsize is 0, meaning unbuffered. This leads to
246 # The default for bufsize is 0, meaning unbuffered. This leads to
247 # poor performance on Mac OS X: http://bugs.python.org/issue4194
247 # poor performance on Mac OS X: http://bugs.python.org/issue4194
248 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
248 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
249 close_fds=closefds,
249 close_fds=closefds,
250 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
250 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
251 universal_newlines=newlines,
251 universal_newlines=newlines,
252 env=env)
252 env=env)
253 return p.stdin, p.stdout
253 return p.stdin, p.stdout
254
254
255 def popen3(cmd, env=None, newlines=False):
255 def popen3(cmd, env=None, newlines=False):
256 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
256 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
257 return stdin, stdout, stderr
257 return stdin, stdout, stderr
258
258
259 def popen4(cmd, env=None, newlines=False):
259 def popen4(cmd, env=None, newlines=False):
260 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
260 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
261 close_fds=closefds,
261 close_fds=closefds,
262 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
262 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
263 stderr=subprocess.PIPE,
263 stderr=subprocess.PIPE,
264 universal_newlines=newlines,
264 universal_newlines=newlines,
265 env=env)
265 env=env)
266 return p.stdin, p.stdout, p.stderr, p
266 return p.stdin, p.stdout, p.stderr, p
267
267
268 def version():
268 def version():
269 """Return version information if available."""
269 """Return version information if available."""
270 try:
270 try:
271 import __version__
271 import __version__
272 return __version__.version
272 return __version__.version
273 except ImportError:
273 except ImportError:
274 return 'unknown'
274 return 'unknown'
275
275
276 # used by parsedate
276 # used by parsedate
277 defaultdateformats = (
277 defaultdateformats = (
278 '%Y-%m-%d %H:%M:%S',
278 '%Y-%m-%d %H:%M:%S',
279 '%Y-%m-%d %I:%M:%S%p',
279 '%Y-%m-%d %I:%M:%S%p',
280 '%Y-%m-%d %H:%M',
280 '%Y-%m-%d %H:%M',
281 '%Y-%m-%d %I:%M%p',
281 '%Y-%m-%d %I:%M%p',
282 '%Y-%m-%d',
282 '%Y-%m-%d',
283 '%m-%d',
283 '%m-%d',
284 '%m/%d',
284 '%m/%d',
285 '%m/%d/%y',
285 '%m/%d/%y',
286 '%m/%d/%Y',
286 '%m/%d/%Y',
287 '%a %b %d %H:%M:%S %Y',
287 '%a %b %d %H:%M:%S %Y',
288 '%a %b %d %I:%M:%S%p %Y',
288 '%a %b %d %I:%M:%S%p %Y',
289 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
289 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
290 '%b %d %H:%M:%S %Y',
290 '%b %d %H:%M:%S %Y',
291 '%b %d %I:%M:%S%p %Y',
291 '%b %d %I:%M:%S%p %Y',
292 '%b %d %H:%M:%S',
292 '%b %d %H:%M:%S',
293 '%b %d %I:%M:%S%p',
293 '%b %d %I:%M:%S%p',
294 '%b %d %H:%M',
294 '%b %d %H:%M',
295 '%b %d %I:%M%p',
295 '%b %d %I:%M%p',
296 '%b %d %Y',
296 '%b %d %Y',
297 '%b %d',
297 '%b %d',
298 '%H:%M:%S',
298 '%H:%M:%S',
299 '%I:%M:%S%p',
299 '%I:%M:%S%p',
300 '%H:%M',
300 '%H:%M',
301 '%I:%M%p',
301 '%I:%M%p',
302 )
302 )
303
303
304 extendeddateformats = defaultdateformats + (
304 extendeddateformats = defaultdateformats + (
305 "%Y",
305 "%Y",
306 "%Y-%m",
306 "%Y-%m",
307 "%b",
307 "%b",
308 "%b %Y",
308 "%b %Y",
309 )
309 )
310
310
311 def cachefunc(func):
311 def cachefunc(func):
312 '''cache the result of function calls'''
312 '''cache the result of function calls'''
313 # XXX doesn't handle keywords args
313 # XXX doesn't handle keywords args
314 if func.func_code.co_argcount == 0:
314 if func.func_code.co_argcount == 0:
315 cache = []
315 cache = []
316 def f():
316 def f():
317 if len(cache) == 0:
317 if len(cache) == 0:
318 cache.append(func())
318 cache.append(func())
319 return cache[0]
319 return cache[0]
320 return f
320 return f
321 cache = {}
321 cache = {}
322 if func.func_code.co_argcount == 1:
322 if func.func_code.co_argcount == 1:
323 # we gain a small amount of time because
323 # we gain a small amount of time because
324 # we don't need to pack/unpack the list
324 # we don't need to pack/unpack the list
325 def f(arg):
325 def f(arg):
326 if arg not in cache:
326 if arg not in cache:
327 cache[arg] = func(arg)
327 cache[arg] = func(arg)
328 return cache[arg]
328 return cache[arg]
329 else:
329 else:
330 def f(*args):
330 def f(*args):
331 if args not in cache:
331 if args not in cache:
332 cache[args] = func(*args)
332 cache[args] = func(*args)
333 return cache[args]
333 return cache[args]
334
334
335 return f
335 return f
336
336
337 try:
337 try:
338 collections.deque.remove
338 collections.deque.remove
339 deque = collections.deque
339 deque = collections.deque
340 except AttributeError:
340 except AttributeError:
341 # python 2.4 lacks deque.remove
341 # python 2.4 lacks deque.remove
342 class deque(collections.deque):
342 class deque(collections.deque):
343 def remove(self, val):
343 def remove(self, val):
344 for i, v in enumerate(self):
344 for i, v in enumerate(self):
345 if v == val:
345 if v == val:
346 del self[i]
346 del self[i]
347 break
347 break
348
348
349 class sortdict(dict):
349 class sortdict(dict):
350 '''a simple sorted dictionary'''
350 '''a simple sorted dictionary'''
351 def __init__(self, data=None):
351 def __init__(self, data=None):
352 self._list = []
352 self._list = []
353 if data:
353 if data:
354 self.update(data)
354 self.update(data)
355 def copy(self):
355 def copy(self):
356 return sortdict(self)
356 return sortdict(self)
357 def __setitem__(self, key, val):
357 def __setitem__(self, key, val):
358 if key in self:
358 if key in self:
359 self._list.remove(key)
359 self._list.remove(key)
360 self._list.append(key)
360 self._list.append(key)
361 dict.__setitem__(self, key, val)
361 dict.__setitem__(self, key, val)
362 def __iter__(self):
362 def __iter__(self):
363 return self._list.__iter__()
363 return self._list.__iter__()
364 def update(self, src):
364 def update(self, src):
365 if isinstance(src, dict):
365 if isinstance(src, dict):
366 src = src.iteritems()
366 src = src.iteritems()
367 for k, v in src:
367 for k, v in src:
368 self[k] = v
368 self[k] = v
369 def clear(self):
369 def clear(self):
370 dict.clear(self)
370 dict.clear(self)
371 self._list = []
371 self._list = []
372 def items(self):
372 def items(self):
373 return [(k, self[k]) for k in self._list]
373 return [(k, self[k]) for k in self._list]
374 def __delitem__(self, key):
374 def __delitem__(self, key):
375 dict.__delitem__(self, key)
375 dict.__delitem__(self, key)
376 self._list.remove(key)
376 self._list.remove(key)
377 def pop(self, key, *args, **kwargs):
377 def pop(self, key, *args, **kwargs):
378 dict.pop(self, key, *args, **kwargs)
378 dict.pop(self, key, *args, **kwargs)
379 try:
379 try:
380 self._list.remove(key)
380 self._list.remove(key)
381 except ValueError:
381 except ValueError:
382 pass
382 pass
383 def keys(self):
383 def keys(self):
384 return self._list
384 return self._list
385 def iterkeys(self):
385 def iterkeys(self):
386 return self._list.__iter__()
386 return self._list.__iter__()
387 def iteritems(self):
387 def iteritems(self):
388 for k in self._list:
388 for k in self._list:
389 yield k, self[k]
389 yield k, self[k]
390 def insert(self, index, key, val):
390 def insert(self, index, key, val):
391 self._list.insert(index, key)
391 self._list.insert(index, key)
392 dict.__setitem__(self, key, val)
392 dict.__setitem__(self, key, val)
393
393
394 class lrucachedict(object):
394 class lrucachedict(object):
395 '''cache most recent gets from or sets to this dictionary'''
395 '''cache most recent gets from or sets to this dictionary'''
396 def __init__(self, maxsize):
396 def __init__(self, maxsize):
397 self._cache = {}
397 self._cache = {}
398 self._maxsize = maxsize
398 self._maxsize = maxsize
399 self._order = deque()
399 self._order = deque()
400
400
401 def __getitem__(self, key):
401 def __getitem__(self, key):
402 value = self._cache[key]
402 value = self._cache[key]
403 self._order.remove(key)
403 self._order.remove(key)
404 self._order.append(key)
404 self._order.append(key)
405 return value
405 return value
406
406
407 def __setitem__(self, key, value):
407 def __setitem__(self, key, value):
408 if key not in self._cache:
408 if key not in self._cache:
409 if len(self._cache) >= self._maxsize:
409 if len(self._cache) >= self._maxsize:
410 del self._cache[self._order.popleft()]
410 del self._cache[self._order.popleft()]
411 else:
411 else:
412 self._order.remove(key)
412 self._order.remove(key)
413 self._cache[key] = value
413 self._cache[key] = value
414 self._order.append(key)
414 self._order.append(key)
415
415
416 def __contains__(self, key):
416 def __contains__(self, key):
417 return key in self._cache
417 return key in self._cache
418
418
419 def clear(self):
419 def clear(self):
420 self._cache.clear()
420 self._cache.clear()
421 self._order = deque()
421 self._order = deque()
422
422
423 def lrucachefunc(func):
423 def lrucachefunc(func):
424 '''cache most recent results of function calls'''
424 '''cache most recent results of function calls'''
425 cache = {}
425 cache = {}
426 order = deque()
426 order = deque()
427 if func.func_code.co_argcount == 1:
427 if func.func_code.co_argcount == 1:
428 def f(arg):
428 def f(arg):
429 if arg not in cache:
429 if arg not in cache:
430 if len(cache) > 20:
430 if len(cache) > 20:
431 del cache[order.popleft()]
431 del cache[order.popleft()]
432 cache[arg] = func(arg)
432 cache[arg] = func(arg)
433 else:
433 else:
434 order.remove(arg)
434 order.remove(arg)
435 order.append(arg)
435 order.append(arg)
436 return cache[arg]
436 return cache[arg]
437 else:
437 else:
438 def f(*args):
438 def f(*args):
439 if args not in cache:
439 if args not in cache:
440 if len(cache) > 20:
440 if len(cache) > 20:
441 del cache[order.popleft()]
441 del cache[order.popleft()]
442 cache[args] = func(*args)
442 cache[args] = func(*args)
443 else:
443 else:
444 order.remove(args)
444 order.remove(args)
445 order.append(args)
445 order.append(args)
446 return cache[args]
446 return cache[args]
447
447
448 return f
448 return f
449
449
450 class propertycache(object):
450 class propertycache(object):
451 def __init__(self, func):
451 def __init__(self, func):
452 self.func = func
452 self.func = func
453 self.name = func.__name__
453 self.name = func.__name__
454 def __get__(self, obj, type=None):
454 def __get__(self, obj, type=None):
455 result = self.func(obj)
455 result = self.func(obj)
456 self.cachevalue(obj, result)
456 self.cachevalue(obj, result)
457 return result
457 return result
458
458
459 def cachevalue(self, obj, value):
459 def cachevalue(self, obj, value):
460 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
460 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
461 obj.__dict__[self.name] = value
461 obj.__dict__[self.name] = value
462
462
463 def pipefilter(s, cmd):
463 def pipefilter(s, cmd):
464 '''filter string S through command CMD, returning its output'''
464 '''filter string S through command CMD, returning its output'''
465 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
465 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
466 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
466 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
467 pout, perr = p.communicate(s)
467 pout, perr = p.communicate(s)
468 return pout
468 return pout
469
469
470 def tempfilter(s, cmd):
470 def tempfilter(s, cmd):
471 '''filter string S through a pair of temporary files with CMD.
471 '''filter string S through a pair of temporary files with CMD.
472 CMD is used as a template to create the real command to be run,
472 CMD is used as a template to create the real command to be run,
473 with the strings INFILE and OUTFILE replaced by the real names of
473 with the strings INFILE and OUTFILE replaced by the real names of
474 the temporary files generated.'''
474 the temporary files generated.'''
475 inname, outname = None, None
475 inname, outname = None, None
476 try:
476 try:
477 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
477 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
478 fp = os.fdopen(infd, 'wb')
478 fp = os.fdopen(infd, 'wb')
479 fp.write(s)
479 fp.write(s)
480 fp.close()
480 fp.close()
481 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
481 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
482 os.close(outfd)
482 os.close(outfd)
483 cmd = cmd.replace('INFILE', inname)
483 cmd = cmd.replace('INFILE', inname)
484 cmd = cmd.replace('OUTFILE', outname)
484 cmd = cmd.replace('OUTFILE', outname)
485 code = os.system(cmd)
485 code = os.system(cmd)
486 if sys.platform == 'OpenVMS' and code & 1:
486 if sys.platform == 'OpenVMS' and code & 1:
487 code = 0
487 code = 0
488 if code:
488 if code:
489 raise Abort(_("command '%s' failed: %s") %
489 raise Abort(_("command '%s' failed: %s") %
490 (cmd, explainexit(code)))
490 (cmd, explainexit(code)))
491 fp = open(outname, 'rb')
491 fp = open(outname, 'rb')
492 r = fp.read()
492 r = fp.read()
493 fp.close()
493 fp.close()
494 return r
494 return r
495 finally:
495 finally:
496 try:
496 try:
497 if inname:
497 if inname:
498 os.unlink(inname)
498 os.unlink(inname)
499 except OSError:
499 except OSError:
500 pass
500 pass
501 try:
501 try:
502 if outname:
502 if outname:
503 os.unlink(outname)
503 os.unlink(outname)
504 except OSError:
504 except OSError:
505 pass
505 pass
506
506
507 filtertable = {
507 filtertable = {
508 'tempfile:': tempfilter,
508 'tempfile:': tempfilter,
509 'pipe:': pipefilter,
509 'pipe:': pipefilter,
510 }
510 }
511
511
512 def filter(s, cmd):
512 def filter(s, cmd):
513 "filter a string through a command that transforms its input to its output"
513 "filter a string through a command that transforms its input to its output"
514 for name, fn in filtertable.iteritems():
514 for name, fn in filtertable.iteritems():
515 if cmd.startswith(name):
515 if cmd.startswith(name):
516 return fn(s, cmd[len(name):].lstrip())
516 return fn(s, cmd[len(name):].lstrip())
517 return pipefilter(s, cmd)
517 return pipefilter(s, cmd)
518
518
519 def binary(s):
519 def binary(s):
520 """return true if a string is binary data"""
520 """return true if a string is binary data"""
521 return bool(s and '\0' in s)
521 return bool(s and '\0' in s)
522
522
523 def increasingchunks(source, min=1024, max=65536):
523 def increasingchunks(source, min=1024, max=65536):
524 '''return no less than min bytes per chunk while data remains,
524 '''return no less than min bytes per chunk while data remains,
525 doubling min after each chunk until it reaches max'''
525 doubling min after each chunk until it reaches max'''
526 def log2(x):
526 def log2(x):
527 if not x:
527 if not x:
528 return 0
528 return 0
529 i = 0
529 i = 0
530 while x:
530 while x:
531 x >>= 1
531 x >>= 1
532 i += 1
532 i += 1
533 return i - 1
533 return i - 1
534
534
535 buf = []
535 buf = []
536 blen = 0
536 blen = 0
537 for chunk in source:
537 for chunk in source:
538 buf.append(chunk)
538 buf.append(chunk)
539 blen += len(chunk)
539 blen += len(chunk)
540 if blen >= min:
540 if blen >= min:
541 if min < max:
541 if min < max:
542 min = min << 1
542 min = min << 1
543 nmin = 1 << log2(blen)
543 nmin = 1 << log2(blen)
544 if nmin > min:
544 if nmin > min:
545 min = nmin
545 min = nmin
546 if min > max:
546 if min > max:
547 min = max
547 min = max
548 yield ''.join(buf)
548 yield ''.join(buf)
549 blen = 0
549 blen = 0
550 buf = []
550 buf = []
551 if buf:
551 if buf:
552 yield ''.join(buf)
552 yield ''.join(buf)
553
553
554 Abort = error.Abort
554 Abort = error.Abort
555
555
556 def always(fn):
556 def always(fn):
557 return True
557 return True
558
558
559 def never(fn):
559 def never(fn):
560 return False
560 return False
561
561
562 def nogc(func):
562 def nogc(func):
563 """disable garbage collector
563 """disable garbage collector
564
564
565 Python's garbage collector triggers a GC each time a certain number of
565 Python's garbage collector triggers a GC each time a certain number of
566 container objects (the number being defined by gc.get_threshold()) are
566 container objects (the number being defined by gc.get_threshold()) are
567 allocated even when marked not to be tracked by the collector. Tracking has
567 allocated even when marked not to be tracked by the collector. Tracking has
568 no effect on when GCs are triggered, only on what objects the GC looks
568 no effect on when GCs are triggered, only on what objects the GC looks
569 into. As a workaround, disable GC while building complex (huge)
569 into. As a workaround, disable GC while building complex (huge)
570 containers.
570 containers.
571
571
572 This garbage collector issue have been fixed in 2.7.
572 This garbage collector issue have been fixed in 2.7.
573 """
573 """
574 def wrapper(*args, **kwargs):
574 def wrapper(*args, **kwargs):
575 gcenabled = gc.isenabled()
575 gcenabled = gc.isenabled()
576 gc.disable()
576 gc.disable()
577 try:
577 try:
578 return func(*args, **kwargs)
578 return func(*args, **kwargs)
579 finally:
579 finally:
580 if gcenabled:
580 if gcenabled:
581 gc.enable()
581 gc.enable()
582 return wrapper
582 return wrapper
583
583
584 def pathto(root, n1, n2):
584 def pathto(root, n1, n2):
585 '''return the relative path from one place to another.
585 '''return the relative path from one place to another.
586 root should use os.sep to separate directories
586 root should use os.sep to separate directories
587 n1 should use os.sep to separate directories
587 n1 should use os.sep to separate directories
588 n2 should use "/" to separate directories
588 n2 should use "/" to separate directories
589 returns an os.sep-separated path.
589 returns an os.sep-separated path.
590
590
591 If n1 is a relative path, it's assumed it's
591 If n1 is a relative path, it's assumed it's
592 relative to root.
592 relative to root.
593 n2 should always be relative to root.
593 n2 should always be relative to root.
594 '''
594 '''
595 if not n1:
595 if not n1:
596 return localpath(n2)
596 return localpath(n2)
597 if os.path.isabs(n1):
597 if os.path.isabs(n1):
598 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
598 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
599 return os.path.join(root, localpath(n2))
599 return os.path.join(root, localpath(n2))
600 n2 = '/'.join((pconvert(root), n2))
600 n2 = '/'.join((pconvert(root), n2))
601 a, b = splitpath(n1), n2.split('/')
601 a, b = splitpath(n1), n2.split('/')
602 a.reverse()
602 a.reverse()
603 b.reverse()
603 b.reverse()
604 while a and b and a[-1] == b[-1]:
604 while a and b and a[-1] == b[-1]:
605 a.pop()
605 a.pop()
606 b.pop()
606 b.pop()
607 b.reverse()
607 b.reverse()
608 return os.sep.join((['..'] * len(a)) + b) or '.'
608 return os.sep.join((['..'] * len(a)) + b) or '.'
609
609
610 def mainfrozen():
610 def mainfrozen():
611 """return True if we are a frozen executable.
611 """return True if we are a frozen executable.
612
612
613 The code supports py2exe (most common, Windows only) and tools/freeze
613 The code supports py2exe (most common, Windows only) and tools/freeze
614 (portable, not much used).
614 (portable, not much used).
615 """
615 """
616 return (safehasattr(sys, "frozen") or # new py2exe
616 return (safehasattr(sys, "frozen") or # new py2exe
617 safehasattr(sys, "importers") or # old py2exe
617 safehasattr(sys, "importers") or # old py2exe
618 imp.is_frozen("__main__")) # tools/freeze
618 imp.is_frozen("__main__")) # tools/freeze
619
619
620 # the location of data files matching the source code
620 # the location of data files matching the source code
621 if mainfrozen():
621 if mainfrozen():
622 # executable version (py2exe) doesn't support __file__
622 # executable version (py2exe) doesn't support __file__
623 datapath = os.path.dirname(sys.executable)
623 datapath = os.path.dirname(sys.executable)
624 else:
624 else:
625 datapath = os.path.dirname(__file__)
625 datapath = os.path.dirname(__file__)
626
626
627 i18n.setdatapath(datapath)
627 i18n.setdatapath(datapath)
628
628
629 _hgexecutable = None
629 _hgexecutable = None
630
630
631 def hgexecutable():
631 def hgexecutable():
632 """return location of the 'hg' executable.
632 """return location of the 'hg' executable.
633
633
634 Defaults to $HG or 'hg' in the search path.
634 Defaults to $HG or 'hg' in the search path.
635 """
635 """
636 if _hgexecutable is None:
636 if _hgexecutable is None:
637 hg = os.environ.get('HG')
637 hg = os.environ.get('HG')
638 mainmod = sys.modules['__main__']
638 mainmod = sys.modules['__main__']
639 if hg:
639 if hg:
640 _sethgexecutable(hg)
640 _sethgexecutable(hg)
641 elif mainfrozen():
641 elif mainfrozen():
642 _sethgexecutable(sys.executable)
642 _sethgexecutable(sys.executable)
643 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
643 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
644 _sethgexecutable(mainmod.__file__)
644 _sethgexecutable(mainmod.__file__)
645 else:
645 else:
646 exe = findexe('hg') or os.path.basename(sys.argv[0])
646 exe = findexe('hg') or os.path.basename(sys.argv[0])
647 _sethgexecutable(exe)
647 _sethgexecutable(exe)
648 return _hgexecutable
648 return _hgexecutable
649
649
650 def _sethgexecutable(path):
650 def _sethgexecutable(path):
651 """set location of the 'hg' executable"""
651 """set location of the 'hg' executable"""
652 global _hgexecutable
652 global _hgexecutable
653 _hgexecutable = path
653 _hgexecutable = path
654
654
655 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
655 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
656 '''enhanced shell command execution.
656 '''enhanced shell command execution.
657 run with environment maybe modified, maybe in different dir.
657 run with environment maybe modified, maybe in different dir.
658
658
659 if command fails and onerr is None, return status, else raise onerr
659 if command fails and onerr is None, return status, else raise onerr
660 object as exception.
660 object as exception.
661
661
662 if out is specified, it is assumed to be a file-like object that has a
662 if out is specified, it is assumed to be a file-like object that has a
663 write() method. stdout and stderr will be redirected to out.'''
663 write() method. stdout and stderr will be redirected to out.'''
664 try:
664 try:
665 sys.stdout.flush()
665 sys.stdout.flush()
666 except Exception:
666 except Exception:
667 pass
667 pass
668 def py2shell(val):
668 def py2shell(val):
669 'convert python object into string that is useful to shell'
669 'convert python object into string that is useful to shell'
670 if val is None or val is False:
670 if val is None or val is False:
671 return '0'
671 return '0'
672 if val is True:
672 if val is True:
673 return '1'
673 return '1'
674 return str(val)
674 return str(val)
675 origcmd = cmd
675 origcmd = cmd
676 cmd = quotecommand(cmd)
676 cmd = quotecommand(cmd)
677 if sys.platform == 'plan9' and (sys.version_info[0] == 2
677 if sys.platform == 'plan9' and (sys.version_info[0] == 2
678 and sys.version_info[1] < 7):
678 and sys.version_info[1] < 7):
679 # subprocess kludge to work around issues in half-baked Python
679 # subprocess kludge to work around issues in half-baked Python
680 # ports, notably bichued/python:
680 # ports, notably bichued/python:
681 if not cwd is None:
681 if not cwd is None:
682 os.chdir(cwd)
682 os.chdir(cwd)
683 rc = os.system(cmd)
683 rc = os.system(cmd)
684 else:
684 else:
685 env = dict(os.environ)
685 env = dict(os.environ)
686 env.update((k, py2shell(v)) for k, v in environ.iteritems())
686 env.update((k, py2shell(v)) for k, v in environ.iteritems())
687 env['HG'] = hgexecutable()
687 env['HG'] = hgexecutable()
688 if out is None or out == sys.__stdout__:
688 if out is None or out == sys.__stdout__:
689 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
689 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
690 env=env, cwd=cwd)
690 env=env, cwd=cwd)
691 else:
691 else:
692 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
692 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
693 env=env, cwd=cwd, stdout=subprocess.PIPE,
693 env=env, cwd=cwd, stdout=subprocess.PIPE,
694 stderr=subprocess.STDOUT)
694 stderr=subprocess.STDOUT)
695 while True:
695 while True:
696 line = proc.stdout.readline()
696 line = proc.stdout.readline()
697 if not line:
697 if not line:
698 break
698 break
699 out.write(line)
699 out.write(line)
700 proc.wait()
700 proc.wait()
701 rc = proc.returncode
701 rc = proc.returncode
702 if sys.platform == 'OpenVMS' and rc & 1:
702 if sys.platform == 'OpenVMS' and rc & 1:
703 rc = 0
703 rc = 0
704 if rc and onerr:
704 if rc and onerr:
705 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
705 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
706 explainexit(rc)[0])
706 explainexit(rc)[0])
707 if errprefix:
707 if errprefix:
708 errmsg = '%s: %s' % (errprefix, errmsg)
708 errmsg = '%s: %s' % (errprefix, errmsg)
709 raise onerr(errmsg)
709 raise onerr(errmsg)
710 return rc
710 return rc
711
711
712 def checksignature(func):
712 def checksignature(func):
713 '''wrap a function with code to check for calling errors'''
713 '''wrap a function with code to check for calling errors'''
714 def check(*args, **kwargs):
714 def check(*args, **kwargs):
715 try:
715 try:
716 return func(*args, **kwargs)
716 return func(*args, **kwargs)
717 except TypeError:
717 except TypeError:
718 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
718 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
719 raise error.SignatureError
719 raise error.SignatureError
720 raise
720 raise
721
721
722 return check
722 return check
723
723
724 def copyfile(src, dest, hardlink=False):
724 def copyfile(src, dest, hardlink=False):
725 "copy a file, preserving mode and atime/mtime"
725 "copy a file, preserving mode and atime/mtime"
726 if os.path.lexists(dest):
726 if os.path.lexists(dest):
727 unlink(dest)
727 unlink(dest)
728 # hardlinks are problematic on CIFS, quietly ignore this flag
728 # hardlinks are problematic on CIFS, quietly ignore this flag
729 # until we find a way to work around it cleanly (issue4546)
729 # until we find a way to work around it cleanly (issue4546)
730 if False and hardlink:
730 if False and hardlink:
731 try:
731 try:
732 oslink(src, dest)
732 oslink(src, dest)
733 return
733 return
734 except (IOError, OSError):
734 except (IOError, OSError):
735 pass # fall back to normal copy
735 pass # fall back to normal copy
736 if os.path.islink(src):
736 if os.path.islink(src):
737 os.symlink(os.readlink(src), dest)
737 os.symlink(os.readlink(src), dest)
738 else:
738 else:
739 try:
739 try:
740 shutil.copyfile(src, dest)
740 shutil.copyfile(src, dest)
741 shutil.copymode(src, dest)
741 shutil.copymode(src, dest)
742 except shutil.Error, inst:
742 except shutil.Error, inst:
743 raise Abort(str(inst))
743 raise Abort(str(inst))
744
744
745 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
745 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
746 """Copy a directory tree using hardlinks if possible."""
746 """Copy a directory tree using hardlinks if possible."""
747 num = 0
747 num = 0
748
748
749 if hardlink is None:
749 if hardlink is None:
750 hardlink = (os.stat(src).st_dev ==
750 hardlink = (os.stat(src).st_dev ==
751 os.stat(os.path.dirname(dst)).st_dev)
751 os.stat(os.path.dirname(dst)).st_dev)
752 if hardlink:
752 if hardlink:
753 topic = _('linking')
753 topic = _('linking')
754 else:
754 else:
755 topic = _('copying')
755 topic = _('copying')
756
756
757 if os.path.isdir(src):
757 if os.path.isdir(src):
758 os.mkdir(dst)
758 os.mkdir(dst)
759 for name, kind in osutil.listdir(src):
759 for name, kind in osutil.listdir(src):
760 srcname = os.path.join(src, name)
760 srcname = os.path.join(src, name)
761 dstname = os.path.join(dst, name)
761 dstname = os.path.join(dst, name)
762 def nprog(t, pos):
762 def nprog(t, pos):
763 if pos is not None:
763 if pos is not None:
764 return progress(t, pos + num)
764 return progress(t, pos + num)
765 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
765 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
766 num += n
766 num += n
767 else:
767 else:
768 if hardlink:
768 if hardlink:
769 try:
769 try:
770 oslink(src, dst)
770 oslink(src, dst)
771 except (IOError, OSError):
771 except (IOError, OSError):
772 hardlink = False
772 hardlink = False
773 shutil.copy(src, dst)
773 shutil.copy(src, dst)
774 else:
774 else:
775 shutil.copy(src, dst)
775 shutil.copy(src, dst)
776 num += 1
776 num += 1
777 progress(topic, num)
777 progress(topic, num)
778 progress(topic, None)
778 progress(topic, None)
779
779
780 return hardlink, num
780 return hardlink, num
781
781
782 _winreservednames = '''con prn aux nul
782 _winreservednames = '''con prn aux nul
783 com1 com2 com3 com4 com5 com6 com7 com8 com9
783 com1 com2 com3 com4 com5 com6 com7 com8 com9
784 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
784 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
785 _winreservedchars = ':*?"<>|'
785 _winreservedchars = ':*?"<>|'
786 def checkwinfilename(path):
786 def checkwinfilename(path):
787 r'''Check that the base-relative path is a valid filename on Windows.
787 r'''Check that the base-relative path is a valid filename on Windows.
788 Returns None if the path is ok, or a UI string describing the problem.
788 Returns None if the path is ok, or a UI string describing the problem.
789
789
790 >>> checkwinfilename("just/a/normal/path")
790 >>> checkwinfilename("just/a/normal/path")
791 >>> checkwinfilename("foo/bar/con.xml")
791 >>> checkwinfilename("foo/bar/con.xml")
792 "filename contains 'con', which is reserved on Windows"
792 "filename contains 'con', which is reserved on Windows"
793 >>> checkwinfilename("foo/con.xml/bar")
793 >>> checkwinfilename("foo/con.xml/bar")
794 "filename contains 'con', which is reserved on Windows"
794 "filename contains 'con', which is reserved on Windows"
795 >>> checkwinfilename("foo/bar/xml.con")
795 >>> checkwinfilename("foo/bar/xml.con")
796 >>> checkwinfilename("foo/bar/AUX/bla.txt")
796 >>> checkwinfilename("foo/bar/AUX/bla.txt")
797 "filename contains 'AUX', which is reserved on Windows"
797 "filename contains 'AUX', which is reserved on Windows"
798 >>> checkwinfilename("foo/bar/bla:.txt")
798 >>> checkwinfilename("foo/bar/bla:.txt")
799 "filename contains ':', which is reserved on Windows"
799 "filename contains ':', which is reserved on Windows"
800 >>> checkwinfilename("foo/bar/b\07la.txt")
800 >>> checkwinfilename("foo/bar/b\07la.txt")
801 "filename contains '\\x07', which is invalid on Windows"
801 "filename contains '\\x07', which is invalid on Windows"
802 >>> checkwinfilename("foo/bar/bla ")
802 >>> checkwinfilename("foo/bar/bla ")
803 "filename ends with ' ', which is not allowed on Windows"
803 "filename ends with ' ', which is not allowed on Windows"
804 >>> checkwinfilename("../bar")
804 >>> checkwinfilename("../bar")
805 >>> checkwinfilename("foo\\")
805 >>> checkwinfilename("foo\\")
806 "filename ends with '\\', which is invalid on Windows"
806 "filename ends with '\\', which is invalid on Windows"
807 >>> checkwinfilename("foo\\/bar")
807 >>> checkwinfilename("foo\\/bar")
808 "directory name ends with '\\', which is invalid on Windows"
808 "directory name ends with '\\', which is invalid on Windows"
809 '''
809 '''
810 if path.endswith('\\'):
810 if path.endswith('\\'):
811 return _("filename ends with '\\', which is invalid on Windows")
811 return _("filename ends with '\\', which is invalid on Windows")
812 if '\\/' in path:
812 if '\\/' in path:
813 return _("directory name ends with '\\', which is invalid on Windows")
813 return _("directory name ends with '\\', which is invalid on Windows")
814 for n in path.replace('\\', '/').split('/'):
814 for n in path.replace('\\', '/').split('/'):
815 if not n:
815 if not n:
816 continue
816 continue
817 for c in n:
817 for c in n:
818 if c in _winreservedchars:
818 if c in _winreservedchars:
819 return _("filename contains '%s', which is reserved "
819 return _("filename contains '%s', which is reserved "
820 "on Windows") % c
820 "on Windows") % c
821 if ord(c) <= 31:
821 if ord(c) <= 31:
822 return _("filename contains %r, which is invalid "
822 return _("filename contains %r, which is invalid "
823 "on Windows") % c
823 "on Windows") % c
824 base = n.split('.')[0]
824 base = n.split('.')[0]
825 if base and base.lower() in _winreservednames:
825 if base and base.lower() in _winreservednames:
826 return _("filename contains '%s', which is reserved "
826 return _("filename contains '%s', which is reserved "
827 "on Windows") % base
827 "on Windows") % base
828 t = n[-1]
828 t = n[-1]
829 if t in '. ' and n not in '..':
829 if t in '. ' and n not in '..':
830 return _("filename ends with '%s', which is not allowed "
830 return _("filename ends with '%s', which is not allowed "
831 "on Windows") % t
831 "on Windows") % t
832
832
833 if os.name == 'nt':
833 if os.name == 'nt':
834 checkosfilename = checkwinfilename
834 checkosfilename = checkwinfilename
835 else:
835 else:
836 checkosfilename = platform.checkosfilename
836 checkosfilename = platform.checkosfilename
837
837
838 def makelock(info, pathname):
838 def makelock(info, pathname):
839 try:
839 try:
840 return os.symlink(info, pathname)
840 return os.symlink(info, pathname)
841 except OSError, why:
841 except OSError, why:
842 if why.errno == errno.EEXIST:
842 if why.errno == errno.EEXIST:
843 raise
843 raise
844 except AttributeError: # no symlink in os
844 except AttributeError: # no symlink in os
845 pass
845 pass
846
846
847 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
847 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
848 os.write(ld, info)
848 os.write(ld, info)
849 os.close(ld)
849 os.close(ld)
850
850
851 def readlock(pathname):
851 def readlock(pathname):
852 try:
852 try:
853 return os.readlink(pathname)
853 return os.readlink(pathname)
854 except OSError, why:
854 except OSError, why:
855 if why.errno not in (errno.EINVAL, errno.ENOSYS):
855 if why.errno not in (errno.EINVAL, errno.ENOSYS):
856 raise
856 raise
857 except AttributeError: # no symlink in os
857 except AttributeError: # no symlink in os
858 pass
858 pass
859 fp = posixfile(pathname)
859 fp = posixfile(pathname)
860 r = fp.read()
860 r = fp.read()
861 fp.close()
861 fp.close()
862 return r
862 return r
863
863
864 def fstat(fp):
864 def fstat(fp):
865 '''stat file object that may not have fileno method.'''
865 '''stat file object that may not have fileno method.'''
866 try:
866 try:
867 return os.fstat(fp.fileno())
867 return os.fstat(fp.fileno())
868 except AttributeError:
868 except AttributeError:
869 return os.stat(fp.name)
869 return os.stat(fp.name)
870
870
871 # File system features
871 # File system features
872
872
873 def checkcase(path):
873 def checkcase(path):
874 """
874 """
875 Return true if the given path is on a case-sensitive filesystem
875 Return true if the given path is on a case-sensitive filesystem
876
876
877 Requires a path (like /foo/.hg) ending with a foldable final
877 Requires a path (like /foo/.hg) ending with a foldable final
878 directory component.
878 directory component.
879 """
879 """
880 s1 = os.lstat(path)
880 s1 = os.lstat(path)
881 d, b = os.path.split(path)
881 d, b = os.path.split(path)
882 b2 = b.upper()
882 b2 = b.upper()
883 if b == b2:
883 if b == b2:
884 b2 = b.lower()
884 b2 = b.lower()
885 if b == b2:
885 if b == b2:
886 return True # no evidence against case sensitivity
886 return True # no evidence against case sensitivity
887 p2 = os.path.join(d, b2)
887 p2 = os.path.join(d, b2)
888 try:
888 try:
889 s2 = os.lstat(p2)
889 s2 = os.lstat(p2)
890 if s2 == s1:
890 if s2 == s1:
891 return False
891 return False
892 return True
892 return True
893 except OSError:
893 except OSError:
894 return True
894 return True
895
895
896 try:
896 try:
897 import re2
897 import re2
898 _re2 = None
898 _re2 = None
899 except ImportError:
899 except ImportError:
900 _re2 = False
900 _re2 = False
901
901
902 class _re(object):
902 class _re(object):
903 def _checkre2(self):
903 def _checkre2(self):
904 global _re2
904 global _re2
905 try:
905 try:
906 # check if match works, see issue3964
906 # check if match works, see issue3964
907 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
907 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
908 except ImportError:
908 except ImportError:
909 _re2 = False
909 _re2 = False
910
910
911 def compile(self, pat, flags=0):
911 def compile(self, pat, flags=0):
912 '''Compile a regular expression, using re2 if possible
912 '''Compile a regular expression, using re2 if possible
913
913
914 For best performance, use only re2-compatible regexp features. The
914 For best performance, use only re2-compatible regexp features. The
915 only flags from the re module that are re2-compatible are
915 only flags from the re module that are re2-compatible are
916 IGNORECASE and MULTILINE.'''
916 IGNORECASE and MULTILINE.'''
917 if _re2 is None:
917 if _re2 is None:
918 self._checkre2()
918 self._checkre2()
919 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
919 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
920 if flags & remod.IGNORECASE:
920 if flags & remod.IGNORECASE:
921 pat = '(?i)' + pat
921 pat = '(?i)' + pat
922 if flags & remod.MULTILINE:
922 if flags & remod.MULTILINE:
923 pat = '(?m)' + pat
923 pat = '(?m)' + pat
924 try:
924 try:
925 return re2.compile(pat)
925 return re2.compile(pat)
926 except re2.error:
926 except re2.error:
927 pass
927 pass
928 return remod.compile(pat, flags)
928 return remod.compile(pat, flags)
929
929
930 @propertycache
930 @propertycache
931 def escape(self):
931 def escape(self):
932 '''Return the version of escape corresponding to self.compile.
932 '''Return the version of escape corresponding to self.compile.
933
933
934 This is imperfect because whether re2 or re is used for a particular
934 This is imperfect because whether re2 or re is used for a particular
935 function depends on the flags, etc, but it's the best we can do.
935 function depends on the flags, etc, but it's the best we can do.
936 '''
936 '''
937 global _re2
937 global _re2
938 if _re2 is None:
938 if _re2 is None:
939 self._checkre2()
939 self._checkre2()
940 if _re2:
940 if _re2:
941 return re2.escape
941 return re2.escape
942 else:
942 else:
943 return remod.escape
943 return remod.escape
944
944
945 re = _re()
945 re = _re()
946
946
947 _fspathcache = {}
947 _fspathcache = {}
948 def fspath(name, root):
948 def fspath(name, root):
949 '''Get name in the case stored in the filesystem
949 '''Get name in the case stored in the filesystem
950
950
951 The name should be relative to root, and be normcase-ed for efficiency.
951 The name should be relative to root, and be normcase-ed for efficiency.
952
952
953 Note that this function is unnecessary, and should not be
953 Note that this function is unnecessary, and should not be
954 called, for case-sensitive filesystems (simply because it's expensive).
954 called, for case-sensitive filesystems (simply because it's expensive).
955
955
956 The root should be normcase-ed, too.
956 The root should be normcase-ed, too.
957 '''
957 '''
958 def _makefspathcacheentry(dir):
958 def _makefspathcacheentry(dir):
959 return dict((normcase(n), n) for n in os.listdir(dir))
959 return dict((normcase(n), n) for n in os.listdir(dir))
960
960
961 seps = os.sep
961 seps = os.sep
962 if os.altsep:
962 if os.altsep:
963 seps = seps + os.altsep
963 seps = seps + os.altsep
964 # Protect backslashes. This gets silly very quickly.
964 # Protect backslashes. This gets silly very quickly.
965 seps.replace('\\','\\\\')
965 seps.replace('\\','\\\\')
966 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
966 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
967 dir = os.path.normpath(root)
967 dir = os.path.normpath(root)
968 result = []
968 result = []
969 for part, sep in pattern.findall(name):
969 for part, sep in pattern.findall(name):
970 if sep:
970 if sep:
971 result.append(sep)
971 result.append(sep)
972 continue
972 continue
973
973
974 if dir not in _fspathcache:
974 if dir not in _fspathcache:
975 _fspathcache[dir] = _makefspathcacheentry(dir)
975 _fspathcache[dir] = _makefspathcacheentry(dir)
976 contents = _fspathcache[dir]
976 contents = _fspathcache[dir]
977
977
978 found = contents.get(part)
978 found = contents.get(part)
979 if not found:
979 if not found:
980 # retry "once per directory" per "dirstate.walk" which
980 # retry "once per directory" per "dirstate.walk" which
981 # may take place for each patches of "hg qpush", for example
981 # may take place for each patches of "hg qpush", for example
982 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
982 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
983 found = contents.get(part)
983 found = contents.get(part)
984
984
985 result.append(found or part)
985 result.append(found or part)
986 dir = os.path.join(dir, part)
986 dir = os.path.join(dir, part)
987
987
988 return ''.join(result)
988 return ''.join(result)
989
989
990 def checknlink(testfile):
990 def checknlink(testfile):
991 '''check whether hardlink count reporting works properly'''
991 '''check whether hardlink count reporting works properly'''
992
992
993 # testfile may be open, so we need a separate file for checking to
993 # testfile may be open, so we need a separate file for checking to
994 # work around issue2543 (or testfile may get lost on Samba shares)
994 # work around issue2543 (or testfile may get lost on Samba shares)
995 f1 = testfile + ".hgtmp1"
995 f1 = testfile + ".hgtmp1"
996 if os.path.lexists(f1):
996 if os.path.lexists(f1):
997 return False
997 return False
998 try:
998 try:
999 posixfile(f1, 'w').close()
999 posixfile(f1, 'w').close()
1000 except IOError:
1000 except IOError:
1001 return False
1001 return False
1002
1002
1003 f2 = testfile + ".hgtmp2"
1003 f2 = testfile + ".hgtmp2"
1004 fd = None
1004 fd = None
1005 try:
1005 try:
1006 try:
1006 oslink(f1, f2)
1007 oslink(f1, f2)
1008 except OSError:
1009 return False
1010
1011 # nlinks() may behave differently for files on Windows shares if
1007 # nlinks() may behave differently for files on Windows shares if
1012 # the file is open.
1008 # the file is open.
1013 fd = posixfile(f2)
1009 fd = posixfile(f2)
1014 return nlinks(f2) > 1
1010 return nlinks(f2) > 1
1011 except OSError:
1012 return False
1015 finally:
1013 finally:
1016 if fd is not None:
1014 if fd is not None:
1017 fd.close()
1015 fd.close()
1018 for f in (f1, f2):
1016 for f in (f1, f2):
1019 try:
1017 try:
1020 os.unlink(f)
1018 os.unlink(f)
1021 except OSError:
1019 except OSError:
1022 pass
1020 pass
1023
1021
1024 def endswithsep(path):
1022 def endswithsep(path):
1025 '''Check path ends with os.sep or os.altsep.'''
1023 '''Check path ends with os.sep or os.altsep.'''
1026 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1024 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1027
1025
1028 def splitpath(path):
1026 def splitpath(path):
1029 '''Split path by os.sep.
1027 '''Split path by os.sep.
1030 Note that this function does not use os.altsep because this is
1028 Note that this function does not use os.altsep because this is
1031 an alternative of simple "xxx.split(os.sep)".
1029 an alternative of simple "xxx.split(os.sep)".
1032 It is recommended to use os.path.normpath() before using this
1030 It is recommended to use os.path.normpath() before using this
1033 function if need.'''
1031 function if need.'''
1034 return path.split(os.sep)
1032 return path.split(os.sep)
1035
1033
1036 def gui():
1034 def gui():
1037 '''Are we running in a GUI?'''
1035 '''Are we running in a GUI?'''
1038 if sys.platform == 'darwin':
1036 if sys.platform == 'darwin':
1039 if 'SSH_CONNECTION' in os.environ:
1037 if 'SSH_CONNECTION' in os.environ:
1040 # handle SSH access to a box where the user is logged in
1038 # handle SSH access to a box where the user is logged in
1041 return False
1039 return False
1042 elif getattr(osutil, 'isgui', None):
1040 elif getattr(osutil, 'isgui', None):
1043 # check if a CoreGraphics session is available
1041 # check if a CoreGraphics session is available
1044 return osutil.isgui()
1042 return osutil.isgui()
1045 else:
1043 else:
1046 # pure build; use a safe default
1044 # pure build; use a safe default
1047 return True
1045 return True
1048 else:
1046 else:
1049 return os.name == "nt" or os.environ.get("DISPLAY")
1047 return os.name == "nt" or os.environ.get("DISPLAY")
1050
1048
1051 def mktempcopy(name, emptyok=False, createmode=None):
1049 def mktempcopy(name, emptyok=False, createmode=None):
1052 """Create a temporary file with the same contents from name
1050 """Create a temporary file with the same contents from name
1053
1051
1054 The permission bits are copied from the original file.
1052 The permission bits are copied from the original file.
1055
1053
1056 If the temporary file is going to be truncated immediately, you
1054 If the temporary file is going to be truncated immediately, you
1057 can use emptyok=True as an optimization.
1055 can use emptyok=True as an optimization.
1058
1056
1059 Returns the name of the temporary file.
1057 Returns the name of the temporary file.
1060 """
1058 """
1061 d, fn = os.path.split(name)
1059 d, fn = os.path.split(name)
1062 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1060 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1063 os.close(fd)
1061 os.close(fd)
1064 # Temporary files are created with mode 0600, which is usually not
1062 # Temporary files are created with mode 0600, which is usually not
1065 # what we want. If the original file already exists, just copy
1063 # what we want. If the original file already exists, just copy
1066 # its mode. Otherwise, manually obey umask.
1064 # its mode. Otherwise, manually obey umask.
1067 copymode(name, temp, createmode)
1065 copymode(name, temp, createmode)
1068 if emptyok:
1066 if emptyok:
1069 return temp
1067 return temp
1070 try:
1068 try:
1071 try:
1069 try:
1072 ifp = posixfile(name, "rb")
1070 ifp = posixfile(name, "rb")
1073 except IOError, inst:
1071 except IOError, inst:
1074 if inst.errno == errno.ENOENT:
1072 if inst.errno == errno.ENOENT:
1075 return temp
1073 return temp
1076 if not getattr(inst, 'filename', None):
1074 if not getattr(inst, 'filename', None):
1077 inst.filename = name
1075 inst.filename = name
1078 raise
1076 raise
1079 ofp = posixfile(temp, "wb")
1077 ofp = posixfile(temp, "wb")
1080 for chunk in filechunkiter(ifp):
1078 for chunk in filechunkiter(ifp):
1081 ofp.write(chunk)
1079 ofp.write(chunk)
1082 ifp.close()
1080 ifp.close()
1083 ofp.close()
1081 ofp.close()
1084 except: # re-raises
1082 except: # re-raises
1085 try: os.unlink(temp)
1083 try: os.unlink(temp)
1086 except OSError: pass
1084 except OSError: pass
1087 raise
1085 raise
1088 return temp
1086 return temp
1089
1087
1090 class atomictempfile(object):
1088 class atomictempfile(object):
1091 '''writable file object that atomically updates a file
1089 '''writable file object that atomically updates a file
1092
1090
1093 All writes will go to a temporary copy of the original file. Call
1091 All writes will go to a temporary copy of the original file. Call
1094 close() when you are done writing, and atomictempfile will rename
1092 close() when you are done writing, and atomictempfile will rename
1095 the temporary copy to the original name, making the changes
1093 the temporary copy to the original name, making the changes
1096 visible. If the object is destroyed without being closed, all your
1094 visible. If the object is destroyed without being closed, all your
1097 writes are discarded.
1095 writes are discarded.
1098 '''
1096 '''
1099 def __init__(self, name, mode='w+b', createmode=None):
1097 def __init__(self, name, mode='w+b', createmode=None):
1100 self.__name = name # permanent name
1098 self.__name = name # permanent name
1101 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1099 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1102 createmode=createmode)
1100 createmode=createmode)
1103 self._fp = posixfile(self._tempname, mode)
1101 self._fp = posixfile(self._tempname, mode)
1104
1102
1105 # delegated methods
1103 # delegated methods
1106 self.write = self._fp.write
1104 self.write = self._fp.write
1107 self.seek = self._fp.seek
1105 self.seek = self._fp.seek
1108 self.tell = self._fp.tell
1106 self.tell = self._fp.tell
1109 self.fileno = self._fp.fileno
1107 self.fileno = self._fp.fileno
1110
1108
1111 def close(self):
1109 def close(self):
1112 if not self._fp.closed:
1110 if not self._fp.closed:
1113 self._fp.close()
1111 self._fp.close()
1114 rename(self._tempname, localpath(self.__name))
1112 rename(self._tempname, localpath(self.__name))
1115
1113
1116 def discard(self):
1114 def discard(self):
1117 if not self._fp.closed:
1115 if not self._fp.closed:
1118 try:
1116 try:
1119 os.unlink(self._tempname)
1117 os.unlink(self._tempname)
1120 except OSError:
1118 except OSError:
1121 pass
1119 pass
1122 self._fp.close()
1120 self._fp.close()
1123
1121
1124 def __del__(self):
1122 def __del__(self):
1125 if safehasattr(self, '_fp'): # constructor actually did something
1123 if safehasattr(self, '_fp'): # constructor actually did something
1126 self.discard()
1124 self.discard()
1127
1125
1128 def makedirs(name, mode=None, notindexed=False):
1126 def makedirs(name, mode=None, notindexed=False):
1129 """recursive directory creation with parent mode inheritance"""
1127 """recursive directory creation with parent mode inheritance"""
1130 try:
1128 try:
1131 makedir(name, notindexed)
1129 makedir(name, notindexed)
1132 except OSError, err:
1130 except OSError, err:
1133 if err.errno == errno.EEXIST:
1131 if err.errno == errno.EEXIST:
1134 return
1132 return
1135 if err.errno != errno.ENOENT or not name:
1133 if err.errno != errno.ENOENT or not name:
1136 raise
1134 raise
1137 parent = os.path.dirname(os.path.abspath(name))
1135 parent = os.path.dirname(os.path.abspath(name))
1138 if parent == name:
1136 if parent == name:
1139 raise
1137 raise
1140 makedirs(parent, mode, notindexed)
1138 makedirs(parent, mode, notindexed)
1141 makedir(name, notindexed)
1139 makedir(name, notindexed)
1142 if mode is not None:
1140 if mode is not None:
1143 os.chmod(name, mode)
1141 os.chmod(name, mode)
1144
1142
1145 def ensuredirs(name, mode=None, notindexed=False):
1143 def ensuredirs(name, mode=None, notindexed=False):
1146 """race-safe recursive directory creation
1144 """race-safe recursive directory creation
1147
1145
1148 Newly created directories are marked as "not to be indexed by
1146 Newly created directories are marked as "not to be indexed by
1149 the content indexing service", if ``notindexed`` is specified
1147 the content indexing service", if ``notindexed`` is specified
1150 for "write" mode access.
1148 for "write" mode access.
1151 """
1149 """
1152 if os.path.isdir(name):
1150 if os.path.isdir(name):
1153 return
1151 return
1154 parent = os.path.dirname(os.path.abspath(name))
1152 parent = os.path.dirname(os.path.abspath(name))
1155 if parent != name:
1153 if parent != name:
1156 ensuredirs(parent, mode, notindexed)
1154 ensuredirs(parent, mode, notindexed)
1157 try:
1155 try:
1158 makedir(name, notindexed)
1156 makedir(name, notindexed)
1159 except OSError, err:
1157 except OSError, err:
1160 if err.errno == errno.EEXIST and os.path.isdir(name):
1158 if err.errno == errno.EEXIST and os.path.isdir(name):
1161 # someone else seems to have won a directory creation race
1159 # someone else seems to have won a directory creation race
1162 return
1160 return
1163 raise
1161 raise
1164 if mode is not None:
1162 if mode is not None:
1165 os.chmod(name, mode)
1163 os.chmod(name, mode)
1166
1164
1167 def readfile(path):
1165 def readfile(path):
1168 fp = open(path, 'rb')
1166 fp = open(path, 'rb')
1169 try:
1167 try:
1170 return fp.read()
1168 return fp.read()
1171 finally:
1169 finally:
1172 fp.close()
1170 fp.close()
1173
1171
1174 def writefile(path, text):
1172 def writefile(path, text):
1175 fp = open(path, 'wb')
1173 fp = open(path, 'wb')
1176 try:
1174 try:
1177 fp.write(text)
1175 fp.write(text)
1178 finally:
1176 finally:
1179 fp.close()
1177 fp.close()
1180
1178
1181 def appendfile(path, text):
1179 def appendfile(path, text):
1182 fp = open(path, 'ab')
1180 fp = open(path, 'ab')
1183 try:
1181 try:
1184 fp.write(text)
1182 fp.write(text)
1185 finally:
1183 finally:
1186 fp.close()
1184 fp.close()
1187
1185
1188 class chunkbuffer(object):
1186 class chunkbuffer(object):
1189 """Allow arbitrary sized chunks of data to be efficiently read from an
1187 """Allow arbitrary sized chunks of data to be efficiently read from an
1190 iterator over chunks of arbitrary size."""
1188 iterator over chunks of arbitrary size."""
1191
1189
1192 def __init__(self, in_iter):
1190 def __init__(self, in_iter):
1193 """in_iter is the iterator that's iterating over the input chunks.
1191 """in_iter is the iterator that's iterating over the input chunks.
1194 targetsize is how big a buffer to try to maintain."""
1192 targetsize is how big a buffer to try to maintain."""
1195 def splitbig(chunks):
1193 def splitbig(chunks):
1196 for chunk in chunks:
1194 for chunk in chunks:
1197 if len(chunk) > 2**20:
1195 if len(chunk) > 2**20:
1198 pos = 0
1196 pos = 0
1199 while pos < len(chunk):
1197 while pos < len(chunk):
1200 end = pos + 2 ** 18
1198 end = pos + 2 ** 18
1201 yield chunk[pos:end]
1199 yield chunk[pos:end]
1202 pos = end
1200 pos = end
1203 else:
1201 else:
1204 yield chunk
1202 yield chunk
1205 self.iter = splitbig(in_iter)
1203 self.iter = splitbig(in_iter)
1206 self._queue = deque()
1204 self._queue = deque()
1207
1205
1208 def read(self, l=None):
1206 def read(self, l=None):
1209 """Read L bytes of data from the iterator of chunks of data.
1207 """Read L bytes of data from the iterator of chunks of data.
1210 Returns less than L bytes if the iterator runs dry.
1208 Returns less than L bytes if the iterator runs dry.
1211
1209
1212 If size parameter is omitted, read everything"""
1210 If size parameter is omitted, read everything"""
1213 left = l
1211 left = l
1214 buf = []
1212 buf = []
1215 queue = self._queue
1213 queue = self._queue
1216 while left is None or left > 0:
1214 while left is None or left > 0:
1217 # refill the queue
1215 # refill the queue
1218 if not queue:
1216 if not queue:
1219 target = 2**18
1217 target = 2**18
1220 for chunk in self.iter:
1218 for chunk in self.iter:
1221 queue.append(chunk)
1219 queue.append(chunk)
1222 target -= len(chunk)
1220 target -= len(chunk)
1223 if target <= 0:
1221 if target <= 0:
1224 break
1222 break
1225 if not queue:
1223 if not queue:
1226 break
1224 break
1227
1225
1228 chunk = queue.popleft()
1226 chunk = queue.popleft()
1229 if left is not None:
1227 if left is not None:
1230 left -= len(chunk)
1228 left -= len(chunk)
1231 if left is not None and left < 0:
1229 if left is not None and left < 0:
1232 queue.appendleft(chunk[left:])
1230 queue.appendleft(chunk[left:])
1233 buf.append(chunk[:left])
1231 buf.append(chunk[:left])
1234 else:
1232 else:
1235 buf.append(chunk)
1233 buf.append(chunk)
1236
1234
1237 return ''.join(buf)
1235 return ''.join(buf)
1238
1236
1239 def filechunkiter(f, size=65536, limit=None):
1237 def filechunkiter(f, size=65536, limit=None):
1240 """Create a generator that produces the data in the file size
1238 """Create a generator that produces the data in the file size
1241 (default 65536) bytes at a time, up to optional limit (default is
1239 (default 65536) bytes at a time, up to optional limit (default is
1242 to read all data). Chunks may be less than size bytes if the
1240 to read all data). Chunks may be less than size bytes if the
1243 chunk is the last chunk in the file, or the file is a socket or
1241 chunk is the last chunk in the file, or the file is a socket or
1244 some other type of file that sometimes reads less data than is
1242 some other type of file that sometimes reads less data than is
1245 requested."""
1243 requested."""
1246 assert size >= 0
1244 assert size >= 0
1247 assert limit is None or limit >= 0
1245 assert limit is None or limit >= 0
1248 while True:
1246 while True:
1249 if limit is None:
1247 if limit is None:
1250 nbytes = size
1248 nbytes = size
1251 else:
1249 else:
1252 nbytes = min(limit, size)
1250 nbytes = min(limit, size)
1253 s = nbytes and f.read(nbytes)
1251 s = nbytes and f.read(nbytes)
1254 if not s:
1252 if not s:
1255 break
1253 break
1256 if limit:
1254 if limit:
1257 limit -= len(s)
1255 limit -= len(s)
1258 yield s
1256 yield s
1259
1257
1260 def makedate(timestamp=None):
1258 def makedate(timestamp=None):
1261 '''Return a unix timestamp (or the current time) as a (unixtime,
1259 '''Return a unix timestamp (or the current time) as a (unixtime,
1262 offset) tuple based off the local timezone.'''
1260 offset) tuple based off the local timezone.'''
1263 if timestamp is None:
1261 if timestamp is None:
1264 timestamp = time.time()
1262 timestamp = time.time()
1265 if timestamp < 0:
1263 if timestamp < 0:
1266 hint = _("check your clock")
1264 hint = _("check your clock")
1267 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1265 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1268 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1266 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1269 datetime.datetime.fromtimestamp(timestamp))
1267 datetime.datetime.fromtimestamp(timestamp))
1270 tz = delta.days * 86400 + delta.seconds
1268 tz = delta.days * 86400 + delta.seconds
1271 return timestamp, tz
1269 return timestamp, tz
1272
1270
1273 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1271 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1274 """represent a (unixtime, offset) tuple as a localized time.
1272 """represent a (unixtime, offset) tuple as a localized time.
1275 unixtime is seconds since the epoch, and offset is the time zone's
1273 unixtime is seconds since the epoch, and offset is the time zone's
1276 number of seconds away from UTC. if timezone is false, do not
1274 number of seconds away from UTC. if timezone is false, do not
1277 append time zone to string."""
1275 append time zone to string."""
1278 t, tz = date or makedate()
1276 t, tz = date or makedate()
1279 if t < 0:
1277 if t < 0:
1280 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1278 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1281 tz = 0
1279 tz = 0
1282 if "%1" in format or "%2" in format or "%z" in format:
1280 if "%1" in format or "%2" in format or "%z" in format:
1283 sign = (tz > 0) and "-" or "+"
1281 sign = (tz > 0) and "-" or "+"
1284 minutes = abs(tz) // 60
1282 minutes = abs(tz) // 60
1285 format = format.replace("%z", "%1%2")
1283 format = format.replace("%z", "%1%2")
1286 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1284 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1287 format = format.replace("%2", "%02d" % (minutes % 60))
1285 format = format.replace("%2", "%02d" % (minutes % 60))
1288 try:
1286 try:
1289 t = time.gmtime(float(t) - tz)
1287 t = time.gmtime(float(t) - tz)
1290 except ValueError:
1288 except ValueError:
1291 # time was out of range
1289 # time was out of range
1292 t = time.gmtime(sys.maxint)
1290 t = time.gmtime(sys.maxint)
1293 s = time.strftime(format, t)
1291 s = time.strftime(format, t)
1294 return s
1292 return s
1295
1293
1296 def shortdate(date=None):
1294 def shortdate(date=None):
1297 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1295 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1298 return datestr(date, format='%Y-%m-%d')
1296 return datestr(date, format='%Y-%m-%d')
1299
1297
1300 def strdate(string, format, defaults=[]):
1298 def strdate(string, format, defaults=[]):
1301 """parse a localized time string and return a (unixtime, offset) tuple.
1299 """parse a localized time string and return a (unixtime, offset) tuple.
1302 if the string cannot be parsed, ValueError is raised."""
1300 if the string cannot be parsed, ValueError is raised."""
1303 def timezone(string):
1301 def timezone(string):
1304 tz = string.split()[-1]
1302 tz = string.split()[-1]
1305 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1303 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1306 sign = (tz[0] == "+") and 1 or -1
1304 sign = (tz[0] == "+") and 1 or -1
1307 hours = int(tz[1:3])
1305 hours = int(tz[1:3])
1308 minutes = int(tz[3:5])
1306 minutes = int(tz[3:5])
1309 return -sign * (hours * 60 + minutes) * 60
1307 return -sign * (hours * 60 + minutes) * 60
1310 if tz == "GMT" or tz == "UTC":
1308 if tz == "GMT" or tz == "UTC":
1311 return 0
1309 return 0
1312 return None
1310 return None
1313
1311
1314 # NOTE: unixtime = localunixtime + offset
1312 # NOTE: unixtime = localunixtime + offset
1315 offset, date = timezone(string), string
1313 offset, date = timezone(string), string
1316 if offset is not None:
1314 if offset is not None:
1317 date = " ".join(string.split()[:-1])
1315 date = " ".join(string.split()[:-1])
1318
1316
1319 # add missing elements from defaults
1317 # add missing elements from defaults
1320 usenow = False # default to using biased defaults
1318 usenow = False # default to using biased defaults
1321 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1319 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1322 found = [True for p in part if ("%"+p) in format]
1320 found = [True for p in part if ("%"+p) in format]
1323 if not found:
1321 if not found:
1324 date += "@" + defaults[part][usenow]
1322 date += "@" + defaults[part][usenow]
1325 format += "@%" + part[0]
1323 format += "@%" + part[0]
1326 else:
1324 else:
1327 # We've found a specific time element, less specific time
1325 # We've found a specific time element, less specific time
1328 # elements are relative to today
1326 # elements are relative to today
1329 usenow = True
1327 usenow = True
1330
1328
1331 timetuple = time.strptime(date, format)
1329 timetuple = time.strptime(date, format)
1332 localunixtime = int(calendar.timegm(timetuple))
1330 localunixtime = int(calendar.timegm(timetuple))
1333 if offset is None:
1331 if offset is None:
1334 # local timezone
1332 # local timezone
1335 unixtime = int(time.mktime(timetuple))
1333 unixtime = int(time.mktime(timetuple))
1336 offset = unixtime - localunixtime
1334 offset = unixtime - localunixtime
1337 else:
1335 else:
1338 unixtime = localunixtime + offset
1336 unixtime = localunixtime + offset
1339 return unixtime, offset
1337 return unixtime, offset
1340
1338
1341 def parsedate(date, formats=None, bias={}):
1339 def parsedate(date, formats=None, bias={}):
1342 """parse a localized date/time and return a (unixtime, offset) tuple.
1340 """parse a localized date/time and return a (unixtime, offset) tuple.
1343
1341
1344 The date may be a "unixtime offset" string or in one of the specified
1342 The date may be a "unixtime offset" string or in one of the specified
1345 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1343 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1346
1344
1347 >>> parsedate(' today ') == parsedate(\
1345 >>> parsedate(' today ') == parsedate(\
1348 datetime.date.today().strftime('%b %d'))
1346 datetime.date.today().strftime('%b %d'))
1349 True
1347 True
1350 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1348 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1351 datetime.timedelta(days=1)\
1349 datetime.timedelta(days=1)\
1352 ).strftime('%b %d'))
1350 ).strftime('%b %d'))
1353 True
1351 True
1354 >>> now, tz = makedate()
1352 >>> now, tz = makedate()
1355 >>> strnow, strtz = parsedate('now')
1353 >>> strnow, strtz = parsedate('now')
1356 >>> (strnow - now) < 1
1354 >>> (strnow - now) < 1
1357 True
1355 True
1358 >>> tz == strtz
1356 >>> tz == strtz
1359 True
1357 True
1360 """
1358 """
1361 if not date:
1359 if not date:
1362 return 0, 0
1360 return 0, 0
1363 if isinstance(date, tuple) and len(date) == 2:
1361 if isinstance(date, tuple) and len(date) == 2:
1364 return date
1362 return date
1365 if not formats:
1363 if not formats:
1366 formats = defaultdateformats
1364 formats = defaultdateformats
1367 date = date.strip()
1365 date = date.strip()
1368
1366
1369 if date == 'now' or date == _('now'):
1367 if date == 'now' or date == _('now'):
1370 return makedate()
1368 return makedate()
1371 if date == 'today' or date == _('today'):
1369 if date == 'today' or date == _('today'):
1372 date = datetime.date.today().strftime('%b %d')
1370 date = datetime.date.today().strftime('%b %d')
1373 elif date == 'yesterday' or date == _('yesterday'):
1371 elif date == 'yesterday' or date == _('yesterday'):
1374 date = (datetime.date.today() -
1372 date = (datetime.date.today() -
1375 datetime.timedelta(days=1)).strftime('%b %d')
1373 datetime.timedelta(days=1)).strftime('%b %d')
1376
1374
1377 try:
1375 try:
1378 when, offset = map(int, date.split(' '))
1376 when, offset = map(int, date.split(' '))
1379 except ValueError:
1377 except ValueError:
1380 # fill out defaults
1378 # fill out defaults
1381 now = makedate()
1379 now = makedate()
1382 defaults = {}
1380 defaults = {}
1383 for part in ("d", "mb", "yY", "HI", "M", "S"):
1381 for part in ("d", "mb", "yY", "HI", "M", "S"):
1384 # this piece is for rounding the specific end of unknowns
1382 # this piece is for rounding the specific end of unknowns
1385 b = bias.get(part)
1383 b = bias.get(part)
1386 if b is None:
1384 if b is None:
1387 if part[0] in "HMS":
1385 if part[0] in "HMS":
1388 b = "00"
1386 b = "00"
1389 else:
1387 else:
1390 b = "0"
1388 b = "0"
1391
1389
1392 # this piece is for matching the generic end to today's date
1390 # this piece is for matching the generic end to today's date
1393 n = datestr(now, "%" + part[0])
1391 n = datestr(now, "%" + part[0])
1394
1392
1395 defaults[part] = (b, n)
1393 defaults[part] = (b, n)
1396
1394
1397 for format in formats:
1395 for format in formats:
1398 try:
1396 try:
1399 when, offset = strdate(date, format, defaults)
1397 when, offset = strdate(date, format, defaults)
1400 except (ValueError, OverflowError):
1398 except (ValueError, OverflowError):
1401 pass
1399 pass
1402 else:
1400 else:
1403 break
1401 break
1404 else:
1402 else:
1405 raise Abort(_('invalid date: %r') % date)
1403 raise Abort(_('invalid date: %r') % date)
1406 # validate explicit (probably user-specified) date and
1404 # validate explicit (probably user-specified) date and
1407 # time zone offset. values must fit in signed 32 bits for
1405 # time zone offset. values must fit in signed 32 bits for
1408 # current 32-bit linux runtimes. timezones go from UTC-12
1406 # current 32-bit linux runtimes. timezones go from UTC-12
1409 # to UTC+14
1407 # to UTC+14
1410 if abs(when) > 0x7fffffff:
1408 if abs(when) > 0x7fffffff:
1411 raise Abort(_('date exceeds 32 bits: %d') % when)
1409 raise Abort(_('date exceeds 32 bits: %d') % when)
1412 if when < 0:
1410 if when < 0:
1413 raise Abort(_('negative date value: %d') % when)
1411 raise Abort(_('negative date value: %d') % when)
1414 if offset < -50400 or offset > 43200:
1412 if offset < -50400 or offset > 43200:
1415 raise Abort(_('impossible time zone offset: %d') % offset)
1413 raise Abort(_('impossible time zone offset: %d') % offset)
1416 return when, offset
1414 return when, offset
1417
1415
1418 def matchdate(date):
1416 def matchdate(date):
1419 """Return a function that matches a given date match specifier
1417 """Return a function that matches a given date match specifier
1420
1418
1421 Formats include:
1419 Formats include:
1422
1420
1423 '{date}' match a given date to the accuracy provided
1421 '{date}' match a given date to the accuracy provided
1424
1422
1425 '<{date}' on or before a given date
1423 '<{date}' on or before a given date
1426
1424
1427 '>{date}' on or after a given date
1425 '>{date}' on or after a given date
1428
1426
1429 >>> p1 = parsedate("10:29:59")
1427 >>> p1 = parsedate("10:29:59")
1430 >>> p2 = parsedate("10:30:00")
1428 >>> p2 = parsedate("10:30:00")
1431 >>> p3 = parsedate("10:30:59")
1429 >>> p3 = parsedate("10:30:59")
1432 >>> p4 = parsedate("10:31:00")
1430 >>> p4 = parsedate("10:31:00")
1433 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1431 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1434 >>> f = matchdate("10:30")
1432 >>> f = matchdate("10:30")
1435 >>> f(p1[0])
1433 >>> f(p1[0])
1436 False
1434 False
1437 >>> f(p2[0])
1435 >>> f(p2[0])
1438 True
1436 True
1439 >>> f(p3[0])
1437 >>> f(p3[0])
1440 True
1438 True
1441 >>> f(p4[0])
1439 >>> f(p4[0])
1442 False
1440 False
1443 >>> f(p5[0])
1441 >>> f(p5[0])
1444 False
1442 False
1445 """
1443 """
1446
1444
1447 def lower(date):
1445 def lower(date):
1448 d = {'mb': "1", 'd': "1"}
1446 d = {'mb': "1", 'd': "1"}
1449 return parsedate(date, extendeddateformats, d)[0]
1447 return parsedate(date, extendeddateformats, d)[0]
1450
1448
1451 def upper(date):
1449 def upper(date):
1452 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1450 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1453 for days in ("31", "30", "29"):
1451 for days in ("31", "30", "29"):
1454 try:
1452 try:
1455 d["d"] = days
1453 d["d"] = days
1456 return parsedate(date, extendeddateformats, d)[0]
1454 return parsedate(date, extendeddateformats, d)[0]
1457 except Abort:
1455 except Abort:
1458 pass
1456 pass
1459 d["d"] = "28"
1457 d["d"] = "28"
1460 return parsedate(date, extendeddateformats, d)[0]
1458 return parsedate(date, extendeddateformats, d)[0]
1461
1459
1462 date = date.strip()
1460 date = date.strip()
1463
1461
1464 if not date:
1462 if not date:
1465 raise Abort(_("dates cannot consist entirely of whitespace"))
1463 raise Abort(_("dates cannot consist entirely of whitespace"))
1466 elif date[0] == "<":
1464 elif date[0] == "<":
1467 if not date[1:]:
1465 if not date[1:]:
1468 raise Abort(_("invalid day spec, use '<DATE'"))
1466 raise Abort(_("invalid day spec, use '<DATE'"))
1469 when = upper(date[1:])
1467 when = upper(date[1:])
1470 return lambda x: x <= when
1468 return lambda x: x <= when
1471 elif date[0] == ">":
1469 elif date[0] == ">":
1472 if not date[1:]:
1470 if not date[1:]:
1473 raise Abort(_("invalid day spec, use '>DATE'"))
1471 raise Abort(_("invalid day spec, use '>DATE'"))
1474 when = lower(date[1:])
1472 when = lower(date[1:])
1475 return lambda x: x >= when
1473 return lambda x: x >= when
1476 elif date[0] == "-":
1474 elif date[0] == "-":
1477 try:
1475 try:
1478 days = int(date[1:])
1476 days = int(date[1:])
1479 except ValueError:
1477 except ValueError:
1480 raise Abort(_("invalid day spec: %s") % date[1:])
1478 raise Abort(_("invalid day spec: %s") % date[1:])
1481 if days < 0:
1479 if days < 0:
1482 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1480 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1483 % date[1:])
1481 % date[1:])
1484 when = makedate()[0] - days * 3600 * 24
1482 when = makedate()[0] - days * 3600 * 24
1485 return lambda x: x >= when
1483 return lambda x: x >= when
1486 elif " to " in date:
1484 elif " to " in date:
1487 a, b = date.split(" to ")
1485 a, b = date.split(" to ")
1488 start, stop = lower(a), upper(b)
1486 start, stop = lower(a), upper(b)
1489 return lambda x: x >= start and x <= stop
1487 return lambda x: x >= start and x <= stop
1490 else:
1488 else:
1491 start, stop = lower(date), upper(date)
1489 start, stop = lower(date), upper(date)
1492 return lambda x: x >= start and x <= stop
1490 return lambda x: x >= start and x <= stop
1493
1491
1494 def shortuser(user):
1492 def shortuser(user):
1495 """Return a short representation of a user name or email address."""
1493 """Return a short representation of a user name or email address."""
1496 f = user.find('@')
1494 f = user.find('@')
1497 if f >= 0:
1495 if f >= 0:
1498 user = user[:f]
1496 user = user[:f]
1499 f = user.find('<')
1497 f = user.find('<')
1500 if f >= 0:
1498 if f >= 0:
1501 user = user[f + 1:]
1499 user = user[f + 1:]
1502 f = user.find(' ')
1500 f = user.find(' ')
1503 if f >= 0:
1501 if f >= 0:
1504 user = user[:f]
1502 user = user[:f]
1505 f = user.find('.')
1503 f = user.find('.')
1506 if f >= 0:
1504 if f >= 0:
1507 user = user[:f]
1505 user = user[:f]
1508 return user
1506 return user
1509
1507
1510 def emailuser(user):
1508 def emailuser(user):
1511 """Return the user portion of an email address."""
1509 """Return the user portion of an email address."""
1512 f = user.find('@')
1510 f = user.find('@')
1513 if f >= 0:
1511 if f >= 0:
1514 user = user[:f]
1512 user = user[:f]
1515 f = user.find('<')
1513 f = user.find('<')
1516 if f >= 0:
1514 if f >= 0:
1517 user = user[f + 1:]
1515 user = user[f + 1:]
1518 return user
1516 return user
1519
1517
1520 def email(author):
1518 def email(author):
1521 '''get email of author.'''
1519 '''get email of author.'''
1522 r = author.find('>')
1520 r = author.find('>')
1523 if r == -1:
1521 if r == -1:
1524 r = None
1522 r = None
1525 return author[author.find('<') + 1:r]
1523 return author[author.find('<') + 1:r]
1526
1524
1527 def ellipsis(text, maxlength=400):
1525 def ellipsis(text, maxlength=400):
1528 """Trim string to at most maxlength (default: 400) columns in display."""
1526 """Trim string to at most maxlength (default: 400) columns in display."""
1529 return encoding.trim(text, maxlength, ellipsis='...')
1527 return encoding.trim(text, maxlength, ellipsis='...')
1530
1528
1531 def unitcountfn(*unittable):
1529 def unitcountfn(*unittable):
1532 '''return a function that renders a readable count of some quantity'''
1530 '''return a function that renders a readable count of some quantity'''
1533
1531
1534 def go(count):
1532 def go(count):
1535 for multiplier, divisor, format in unittable:
1533 for multiplier, divisor, format in unittable:
1536 if count >= divisor * multiplier:
1534 if count >= divisor * multiplier:
1537 return format % (count / float(divisor))
1535 return format % (count / float(divisor))
1538 return unittable[-1][2] % count
1536 return unittable[-1][2] % count
1539
1537
1540 return go
1538 return go
1541
1539
1542 bytecount = unitcountfn(
1540 bytecount = unitcountfn(
1543 (100, 1 << 30, _('%.0f GB')),
1541 (100, 1 << 30, _('%.0f GB')),
1544 (10, 1 << 30, _('%.1f GB')),
1542 (10, 1 << 30, _('%.1f GB')),
1545 (1, 1 << 30, _('%.2f GB')),
1543 (1, 1 << 30, _('%.2f GB')),
1546 (100, 1 << 20, _('%.0f MB')),
1544 (100, 1 << 20, _('%.0f MB')),
1547 (10, 1 << 20, _('%.1f MB')),
1545 (10, 1 << 20, _('%.1f MB')),
1548 (1, 1 << 20, _('%.2f MB')),
1546 (1, 1 << 20, _('%.2f MB')),
1549 (100, 1 << 10, _('%.0f KB')),
1547 (100, 1 << 10, _('%.0f KB')),
1550 (10, 1 << 10, _('%.1f KB')),
1548 (10, 1 << 10, _('%.1f KB')),
1551 (1, 1 << 10, _('%.2f KB')),
1549 (1, 1 << 10, _('%.2f KB')),
1552 (1, 1, _('%.0f bytes')),
1550 (1, 1, _('%.0f bytes')),
1553 )
1551 )
1554
1552
1555 def uirepr(s):
1553 def uirepr(s):
1556 # Avoid double backslash in Windows path repr()
1554 # Avoid double backslash in Windows path repr()
1557 return repr(s).replace('\\\\', '\\')
1555 return repr(s).replace('\\\\', '\\')
1558
1556
1559 # delay import of textwrap
1557 # delay import of textwrap
1560 def MBTextWrapper(**kwargs):
1558 def MBTextWrapper(**kwargs):
1561 class tw(textwrap.TextWrapper):
1559 class tw(textwrap.TextWrapper):
1562 """
1560 """
1563 Extend TextWrapper for width-awareness.
1561 Extend TextWrapper for width-awareness.
1564
1562
1565 Neither number of 'bytes' in any encoding nor 'characters' is
1563 Neither number of 'bytes' in any encoding nor 'characters' is
1566 appropriate to calculate terminal columns for specified string.
1564 appropriate to calculate terminal columns for specified string.
1567
1565
1568 Original TextWrapper implementation uses built-in 'len()' directly,
1566 Original TextWrapper implementation uses built-in 'len()' directly,
1569 so overriding is needed to use width information of each characters.
1567 so overriding is needed to use width information of each characters.
1570
1568
1571 In addition, characters classified into 'ambiguous' width are
1569 In addition, characters classified into 'ambiguous' width are
1572 treated as wide in East Asian area, but as narrow in other.
1570 treated as wide in East Asian area, but as narrow in other.
1573
1571
1574 This requires use decision to determine width of such characters.
1572 This requires use decision to determine width of such characters.
1575 """
1573 """
1576 def __init__(self, **kwargs):
1574 def __init__(self, **kwargs):
1577 textwrap.TextWrapper.__init__(self, **kwargs)
1575 textwrap.TextWrapper.__init__(self, **kwargs)
1578
1576
1579 # for compatibility between 2.4 and 2.6
1577 # for compatibility between 2.4 and 2.6
1580 if getattr(self, 'drop_whitespace', None) is None:
1578 if getattr(self, 'drop_whitespace', None) is None:
1581 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1579 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1582
1580
1583 def _cutdown(self, ucstr, space_left):
1581 def _cutdown(self, ucstr, space_left):
1584 l = 0
1582 l = 0
1585 colwidth = encoding.ucolwidth
1583 colwidth = encoding.ucolwidth
1586 for i in xrange(len(ucstr)):
1584 for i in xrange(len(ucstr)):
1587 l += colwidth(ucstr[i])
1585 l += colwidth(ucstr[i])
1588 if space_left < l:
1586 if space_left < l:
1589 return (ucstr[:i], ucstr[i:])
1587 return (ucstr[:i], ucstr[i:])
1590 return ucstr, ''
1588 return ucstr, ''
1591
1589
1592 # overriding of base class
1590 # overriding of base class
1593 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1591 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1594 space_left = max(width - cur_len, 1)
1592 space_left = max(width - cur_len, 1)
1595
1593
1596 if self.break_long_words:
1594 if self.break_long_words:
1597 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1595 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1598 cur_line.append(cut)
1596 cur_line.append(cut)
1599 reversed_chunks[-1] = res
1597 reversed_chunks[-1] = res
1600 elif not cur_line:
1598 elif not cur_line:
1601 cur_line.append(reversed_chunks.pop())
1599 cur_line.append(reversed_chunks.pop())
1602
1600
1603 # this overriding code is imported from TextWrapper of python 2.6
1601 # this overriding code is imported from TextWrapper of python 2.6
1604 # to calculate columns of string by 'encoding.ucolwidth()'
1602 # to calculate columns of string by 'encoding.ucolwidth()'
1605 def _wrap_chunks(self, chunks):
1603 def _wrap_chunks(self, chunks):
1606 colwidth = encoding.ucolwidth
1604 colwidth = encoding.ucolwidth
1607
1605
1608 lines = []
1606 lines = []
1609 if self.width <= 0:
1607 if self.width <= 0:
1610 raise ValueError("invalid width %r (must be > 0)" % self.width)
1608 raise ValueError("invalid width %r (must be > 0)" % self.width)
1611
1609
1612 # Arrange in reverse order so items can be efficiently popped
1610 # Arrange in reverse order so items can be efficiently popped
1613 # from a stack of chucks.
1611 # from a stack of chucks.
1614 chunks.reverse()
1612 chunks.reverse()
1615
1613
1616 while chunks:
1614 while chunks:
1617
1615
1618 # Start the list of chunks that will make up the current line.
1616 # Start the list of chunks that will make up the current line.
1619 # cur_len is just the length of all the chunks in cur_line.
1617 # cur_len is just the length of all the chunks in cur_line.
1620 cur_line = []
1618 cur_line = []
1621 cur_len = 0
1619 cur_len = 0
1622
1620
1623 # Figure out which static string will prefix this line.
1621 # Figure out which static string will prefix this line.
1624 if lines:
1622 if lines:
1625 indent = self.subsequent_indent
1623 indent = self.subsequent_indent
1626 else:
1624 else:
1627 indent = self.initial_indent
1625 indent = self.initial_indent
1628
1626
1629 # Maximum width for this line.
1627 # Maximum width for this line.
1630 width = self.width - len(indent)
1628 width = self.width - len(indent)
1631
1629
1632 # First chunk on line is whitespace -- drop it, unless this
1630 # First chunk on line is whitespace -- drop it, unless this
1633 # is the very beginning of the text (i.e. no lines started yet).
1631 # is the very beginning of the text (i.e. no lines started yet).
1634 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1632 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1635 del chunks[-1]
1633 del chunks[-1]
1636
1634
1637 while chunks:
1635 while chunks:
1638 l = colwidth(chunks[-1])
1636 l = colwidth(chunks[-1])
1639
1637
1640 # Can at least squeeze this chunk onto the current line.
1638 # Can at least squeeze this chunk onto the current line.
1641 if cur_len + l <= width:
1639 if cur_len + l <= width:
1642 cur_line.append(chunks.pop())
1640 cur_line.append(chunks.pop())
1643 cur_len += l
1641 cur_len += l
1644
1642
1645 # Nope, this line is full.
1643 # Nope, this line is full.
1646 else:
1644 else:
1647 break
1645 break
1648
1646
1649 # The current line is full, and the next chunk is too big to
1647 # The current line is full, and the next chunk is too big to
1650 # fit on *any* line (not just this one).
1648 # fit on *any* line (not just this one).
1651 if chunks and colwidth(chunks[-1]) > width:
1649 if chunks and colwidth(chunks[-1]) > width:
1652 self._handle_long_word(chunks, cur_line, cur_len, width)
1650 self._handle_long_word(chunks, cur_line, cur_len, width)
1653
1651
1654 # If the last chunk on this line is all whitespace, drop it.
1652 # If the last chunk on this line is all whitespace, drop it.
1655 if (self.drop_whitespace and
1653 if (self.drop_whitespace and
1656 cur_line and cur_line[-1].strip() == ''):
1654 cur_line and cur_line[-1].strip() == ''):
1657 del cur_line[-1]
1655 del cur_line[-1]
1658
1656
1659 # Convert current line back to a string and store it in list
1657 # Convert current line back to a string and store it in list
1660 # of all lines (return value).
1658 # of all lines (return value).
1661 if cur_line:
1659 if cur_line:
1662 lines.append(indent + ''.join(cur_line))
1660 lines.append(indent + ''.join(cur_line))
1663
1661
1664 return lines
1662 return lines
1665
1663
1666 global MBTextWrapper
1664 global MBTextWrapper
1667 MBTextWrapper = tw
1665 MBTextWrapper = tw
1668 return tw(**kwargs)
1666 return tw(**kwargs)
1669
1667
1670 def wrap(line, width, initindent='', hangindent=''):
1668 def wrap(line, width, initindent='', hangindent=''):
1671 maxindent = max(len(hangindent), len(initindent))
1669 maxindent = max(len(hangindent), len(initindent))
1672 if width <= maxindent:
1670 if width <= maxindent:
1673 # adjust for weird terminal size
1671 # adjust for weird terminal size
1674 width = max(78, maxindent + 1)
1672 width = max(78, maxindent + 1)
1675 line = line.decode(encoding.encoding, encoding.encodingmode)
1673 line = line.decode(encoding.encoding, encoding.encodingmode)
1676 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1674 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1677 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1675 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1678 wrapper = MBTextWrapper(width=width,
1676 wrapper = MBTextWrapper(width=width,
1679 initial_indent=initindent,
1677 initial_indent=initindent,
1680 subsequent_indent=hangindent)
1678 subsequent_indent=hangindent)
1681 return wrapper.fill(line).encode(encoding.encoding)
1679 return wrapper.fill(line).encode(encoding.encoding)
1682
1680
1683 def iterlines(iterator):
1681 def iterlines(iterator):
1684 for chunk in iterator:
1682 for chunk in iterator:
1685 for line in chunk.splitlines():
1683 for line in chunk.splitlines():
1686 yield line
1684 yield line
1687
1685
1688 def expandpath(path):
1686 def expandpath(path):
1689 return os.path.expanduser(os.path.expandvars(path))
1687 return os.path.expanduser(os.path.expandvars(path))
1690
1688
1691 def hgcmd():
1689 def hgcmd():
1692 """Return the command used to execute current hg
1690 """Return the command used to execute current hg
1693
1691
1694 This is different from hgexecutable() because on Windows we want
1692 This is different from hgexecutable() because on Windows we want
1695 to avoid things opening new shell windows like batch files, so we
1693 to avoid things opening new shell windows like batch files, so we
1696 get either the python call or current executable.
1694 get either the python call or current executable.
1697 """
1695 """
1698 if mainfrozen():
1696 if mainfrozen():
1699 return [sys.executable]
1697 return [sys.executable]
1700 return gethgcmd()
1698 return gethgcmd()
1701
1699
1702 def rundetached(args, condfn):
1700 def rundetached(args, condfn):
1703 """Execute the argument list in a detached process.
1701 """Execute the argument list in a detached process.
1704
1702
1705 condfn is a callable which is called repeatedly and should return
1703 condfn is a callable which is called repeatedly and should return
1706 True once the child process is known to have started successfully.
1704 True once the child process is known to have started successfully.
1707 At this point, the child process PID is returned. If the child
1705 At this point, the child process PID is returned. If the child
1708 process fails to start or finishes before condfn() evaluates to
1706 process fails to start or finishes before condfn() evaluates to
1709 True, return -1.
1707 True, return -1.
1710 """
1708 """
1711 # Windows case is easier because the child process is either
1709 # Windows case is easier because the child process is either
1712 # successfully starting and validating the condition or exiting
1710 # successfully starting and validating the condition or exiting
1713 # on failure. We just poll on its PID. On Unix, if the child
1711 # on failure. We just poll on its PID. On Unix, if the child
1714 # process fails to start, it will be left in a zombie state until
1712 # process fails to start, it will be left in a zombie state until
1715 # the parent wait on it, which we cannot do since we expect a long
1713 # the parent wait on it, which we cannot do since we expect a long
1716 # running process on success. Instead we listen for SIGCHLD telling
1714 # running process on success. Instead we listen for SIGCHLD telling
1717 # us our child process terminated.
1715 # us our child process terminated.
1718 terminated = set()
1716 terminated = set()
1719 def handler(signum, frame):
1717 def handler(signum, frame):
1720 terminated.add(os.wait())
1718 terminated.add(os.wait())
1721 prevhandler = None
1719 prevhandler = None
1722 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1720 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1723 if SIGCHLD is not None:
1721 if SIGCHLD is not None:
1724 prevhandler = signal.signal(SIGCHLD, handler)
1722 prevhandler = signal.signal(SIGCHLD, handler)
1725 try:
1723 try:
1726 pid = spawndetached(args)
1724 pid = spawndetached(args)
1727 while not condfn():
1725 while not condfn():
1728 if ((pid in terminated or not testpid(pid))
1726 if ((pid in terminated or not testpid(pid))
1729 and not condfn()):
1727 and not condfn()):
1730 return -1
1728 return -1
1731 time.sleep(0.1)
1729 time.sleep(0.1)
1732 return pid
1730 return pid
1733 finally:
1731 finally:
1734 if prevhandler is not None:
1732 if prevhandler is not None:
1735 signal.signal(signal.SIGCHLD, prevhandler)
1733 signal.signal(signal.SIGCHLD, prevhandler)
1736
1734
1737 try:
1735 try:
1738 any, all = any, all
1736 any, all = any, all
1739 except NameError:
1737 except NameError:
1740 def any(iterable):
1738 def any(iterable):
1741 for i in iterable:
1739 for i in iterable:
1742 if i:
1740 if i:
1743 return True
1741 return True
1744 return False
1742 return False
1745
1743
1746 def all(iterable):
1744 def all(iterable):
1747 for i in iterable:
1745 for i in iterable:
1748 if not i:
1746 if not i:
1749 return False
1747 return False
1750 return True
1748 return True
1751
1749
1752 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1750 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1753 """Return the result of interpolating items in the mapping into string s.
1751 """Return the result of interpolating items in the mapping into string s.
1754
1752
1755 prefix is a single character string, or a two character string with
1753 prefix is a single character string, or a two character string with
1756 a backslash as the first character if the prefix needs to be escaped in
1754 a backslash as the first character if the prefix needs to be escaped in
1757 a regular expression.
1755 a regular expression.
1758
1756
1759 fn is an optional function that will be applied to the replacement text
1757 fn is an optional function that will be applied to the replacement text
1760 just before replacement.
1758 just before replacement.
1761
1759
1762 escape_prefix is an optional flag that allows using doubled prefix for
1760 escape_prefix is an optional flag that allows using doubled prefix for
1763 its escaping.
1761 its escaping.
1764 """
1762 """
1765 fn = fn or (lambda s: s)
1763 fn = fn or (lambda s: s)
1766 patterns = '|'.join(mapping.keys())
1764 patterns = '|'.join(mapping.keys())
1767 if escape_prefix:
1765 if escape_prefix:
1768 patterns += '|' + prefix
1766 patterns += '|' + prefix
1769 if len(prefix) > 1:
1767 if len(prefix) > 1:
1770 prefix_char = prefix[1:]
1768 prefix_char = prefix[1:]
1771 else:
1769 else:
1772 prefix_char = prefix
1770 prefix_char = prefix
1773 mapping[prefix_char] = prefix_char
1771 mapping[prefix_char] = prefix_char
1774 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1772 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1775 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1773 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1776
1774
1777 def getport(port):
1775 def getport(port):
1778 """Return the port for a given network service.
1776 """Return the port for a given network service.
1779
1777
1780 If port is an integer, it's returned as is. If it's a string, it's
1778 If port is an integer, it's returned as is. If it's a string, it's
1781 looked up using socket.getservbyname(). If there's no matching
1779 looked up using socket.getservbyname(). If there's no matching
1782 service, util.Abort is raised.
1780 service, util.Abort is raised.
1783 """
1781 """
1784 try:
1782 try:
1785 return int(port)
1783 return int(port)
1786 except ValueError:
1784 except ValueError:
1787 pass
1785 pass
1788
1786
1789 try:
1787 try:
1790 return socket.getservbyname(port)
1788 return socket.getservbyname(port)
1791 except socket.error:
1789 except socket.error:
1792 raise Abort(_("no port number associated with service '%s'") % port)
1790 raise Abort(_("no port number associated with service '%s'") % port)
1793
1791
1794 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1792 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1795 '0': False, 'no': False, 'false': False, 'off': False,
1793 '0': False, 'no': False, 'false': False, 'off': False,
1796 'never': False}
1794 'never': False}
1797
1795
1798 def parsebool(s):
1796 def parsebool(s):
1799 """Parse s into a boolean.
1797 """Parse s into a boolean.
1800
1798
1801 If s is not a valid boolean, returns None.
1799 If s is not a valid boolean, returns None.
1802 """
1800 """
1803 return _booleans.get(s.lower(), None)
1801 return _booleans.get(s.lower(), None)
1804
1802
1805 _hexdig = '0123456789ABCDEFabcdef'
1803 _hexdig = '0123456789ABCDEFabcdef'
1806 _hextochr = dict((a + b, chr(int(a + b, 16)))
1804 _hextochr = dict((a + b, chr(int(a + b, 16)))
1807 for a in _hexdig for b in _hexdig)
1805 for a in _hexdig for b in _hexdig)
1808
1806
1809 def _urlunquote(s):
1807 def _urlunquote(s):
1810 """Decode HTTP/HTML % encoding.
1808 """Decode HTTP/HTML % encoding.
1811
1809
1812 >>> _urlunquote('abc%20def')
1810 >>> _urlunquote('abc%20def')
1813 'abc def'
1811 'abc def'
1814 """
1812 """
1815 res = s.split('%')
1813 res = s.split('%')
1816 # fastpath
1814 # fastpath
1817 if len(res) == 1:
1815 if len(res) == 1:
1818 return s
1816 return s
1819 s = res[0]
1817 s = res[0]
1820 for item in res[1:]:
1818 for item in res[1:]:
1821 try:
1819 try:
1822 s += _hextochr[item[:2]] + item[2:]
1820 s += _hextochr[item[:2]] + item[2:]
1823 except KeyError:
1821 except KeyError:
1824 s += '%' + item
1822 s += '%' + item
1825 except UnicodeDecodeError:
1823 except UnicodeDecodeError:
1826 s += unichr(int(item[:2], 16)) + item[2:]
1824 s += unichr(int(item[:2], 16)) + item[2:]
1827 return s
1825 return s
1828
1826
1829 class url(object):
1827 class url(object):
1830 r"""Reliable URL parser.
1828 r"""Reliable URL parser.
1831
1829
1832 This parses URLs and provides attributes for the following
1830 This parses URLs and provides attributes for the following
1833 components:
1831 components:
1834
1832
1835 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1833 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1836
1834
1837 Missing components are set to None. The only exception is
1835 Missing components are set to None. The only exception is
1838 fragment, which is set to '' if present but empty.
1836 fragment, which is set to '' if present but empty.
1839
1837
1840 If parsefragment is False, fragment is included in query. If
1838 If parsefragment is False, fragment is included in query. If
1841 parsequery is False, query is included in path. If both are
1839 parsequery is False, query is included in path. If both are
1842 False, both fragment and query are included in path.
1840 False, both fragment and query are included in path.
1843
1841
1844 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1842 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1845
1843
1846 Note that for backward compatibility reasons, bundle URLs do not
1844 Note that for backward compatibility reasons, bundle URLs do not
1847 take host names. That means 'bundle://../' has a path of '../'.
1845 take host names. That means 'bundle://../' has a path of '../'.
1848
1846
1849 Examples:
1847 Examples:
1850
1848
1851 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1849 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1852 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1850 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1853 >>> url('ssh://[::1]:2200//home/joe/repo')
1851 >>> url('ssh://[::1]:2200//home/joe/repo')
1854 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1852 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1855 >>> url('file:///home/joe/repo')
1853 >>> url('file:///home/joe/repo')
1856 <url scheme: 'file', path: '/home/joe/repo'>
1854 <url scheme: 'file', path: '/home/joe/repo'>
1857 >>> url('file:///c:/temp/foo/')
1855 >>> url('file:///c:/temp/foo/')
1858 <url scheme: 'file', path: 'c:/temp/foo/'>
1856 <url scheme: 'file', path: 'c:/temp/foo/'>
1859 >>> url('bundle:foo')
1857 >>> url('bundle:foo')
1860 <url scheme: 'bundle', path: 'foo'>
1858 <url scheme: 'bundle', path: 'foo'>
1861 >>> url('bundle://../foo')
1859 >>> url('bundle://../foo')
1862 <url scheme: 'bundle', path: '../foo'>
1860 <url scheme: 'bundle', path: '../foo'>
1863 >>> url(r'c:\foo\bar')
1861 >>> url(r'c:\foo\bar')
1864 <url path: 'c:\\foo\\bar'>
1862 <url path: 'c:\\foo\\bar'>
1865 >>> url(r'\\blah\blah\blah')
1863 >>> url(r'\\blah\blah\blah')
1866 <url path: '\\\\blah\\blah\\blah'>
1864 <url path: '\\\\blah\\blah\\blah'>
1867 >>> url(r'\\blah\blah\blah#baz')
1865 >>> url(r'\\blah\blah\blah#baz')
1868 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1866 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1869 >>> url(r'file:///C:\users\me')
1867 >>> url(r'file:///C:\users\me')
1870 <url scheme: 'file', path: 'C:\\users\\me'>
1868 <url scheme: 'file', path: 'C:\\users\\me'>
1871
1869
1872 Authentication credentials:
1870 Authentication credentials:
1873
1871
1874 >>> url('ssh://joe:xyz@x/repo')
1872 >>> url('ssh://joe:xyz@x/repo')
1875 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1873 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1876 >>> url('ssh://joe@x/repo')
1874 >>> url('ssh://joe@x/repo')
1877 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1875 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1878
1876
1879 Query strings and fragments:
1877 Query strings and fragments:
1880
1878
1881 >>> url('http://host/a?b#c')
1879 >>> url('http://host/a?b#c')
1882 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1880 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1883 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1881 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1884 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1882 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1885 """
1883 """
1886
1884
1887 _safechars = "!~*'()+"
1885 _safechars = "!~*'()+"
1888 _safepchars = "/!~*'()+:\\"
1886 _safepchars = "/!~*'()+:\\"
1889 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
1887 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
1890
1888
1891 def __init__(self, path, parsequery=True, parsefragment=True):
1889 def __init__(self, path, parsequery=True, parsefragment=True):
1892 # We slowly chomp away at path until we have only the path left
1890 # We slowly chomp away at path until we have only the path left
1893 self.scheme = self.user = self.passwd = self.host = None
1891 self.scheme = self.user = self.passwd = self.host = None
1894 self.port = self.path = self.query = self.fragment = None
1892 self.port = self.path = self.query = self.fragment = None
1895 self._localpath = True
1893 self._localpath = True
1896 self._hostport = ''
1894 self._hostport = ''
1897 self._origpath = path
1895 self._origpath = path
1898
1896
1899 if parsefragment and '#' in path:
1897 if parsefragment and '#' in path:
1900 path, self.fragment = path.split('#', 1)
1898 path, self.fragment = path.split('#', 1)
1901 if not path:
1899 if not path:
1902 path = None
1900 path = None
1903
1901
1904 # special case for Windows drive letters and UNC paths
1902 # special case for Windows drive letters and UNC paths
1905 if hasdriveletter(path) or path.startswith(r'\\'):
1903 if hasdriveletter(path) or path.startswith(r'\\'):
1906 self.path = path
1904 self.path = path
1907 return
1905 return
1908
1906
1909 # For compatibility reasons, we can't handle bundle paths as
1907 # For compatibility reasons, we can't handle bundle paths as
1910 # normal URLS
1908 # normal URLS
1911 if path.startswith('bundle:'):
1909 if path.startswith('bundle:'):
1912 self.scheme = 'bundle'
1910 self.scheme = 'bundle'
1913 path = path[7:]
1911 path = path[7:]
1914 if path.startswith('//'):
1912 if path.startswith('//'):
1915 path = path[2:]
1913 path = path[2:]
1916 self.path = path
1914 self.path = path
1917 return
1915 return
1918
1916
1919 if self._matchscheme(path):
1917 if self._matchscheme(path):
1920 parts = path.split(':', 1)
1918 parts = path.split(':', 1)
1921 if parts[0]:
1919 if parts[0]:
1922 self.scheme, path = parts
1920 self.scheme, path = parts
1923 self._localpath = False
1921 self._localpath = False
1924
1922
1925 if not path:
1923 if not path:
1926 path = None
1924 path = None
1927 if self._localpath:
1925 if self._localpath:
1928 self.path = ''
1926 self.path = ''
1929 return
1927 return
1930 else:
1928 else:
1931 if self._localpath:
1929 if self._localpath:
1932 self.path = path
1930 self.path = path
1933 return
1931 return
1934
1932
1935 if parsequery and '?' in path:
1933 if parsequery and '?' in path:
1936 path, self.query = path.split('?', 1)
1934 path, self.query = path.split('?', 1)
1937 if not path:
1935 if not path:
1938 path = None
1936 path = None
1939 if not self.query:
1937 if not self.query:
1940 self.query = None
1938 self.query = None
1941
1939
1942 # // is required to specify a host/authority
1940 # // is required to specify a host/authority
1943 if path and path.startswith('//'):
1941 if path and path.startswith('//'):
1944 parts = path[2:].split('/', 1)
1942 parts = path[2:].split('/', 1)
1945 if len(parts) > 1:
1943 if len(parts) > 1:
1946 self.host, path = parts
1944 self.host, path = parts
1947 else:
1945 else:
1948 self.host = parts[0]
1946 self.host = parts[0]
1949 path = None
1947 path = None
1950 if not self.host:
1948 if not self.host:
1951 self.host = None
1949 self.host = None
1952 # path of file:///d is /d
1950 # path of file:///d is /d
1953 # path of file:///d:/ is d:/, not /d:/
1951 # path of file:///d:/ is d:/, not /d:/
1954 if path and not hasdriveletter(path):
1952 if path and not hasdriveletter(path):
1955 path = '/' + path
1953 path = '/' + path
1956
1954
1957 if self.host and '@' in self.host:
1955 if self.host and '@' in self.host:
1958 self.user, self.host = self.host.rsplit('@', 1)
1956 self.user, self.host = self.host.rsplit('@', 1)
1959 if ':' in self.user:
1957 if ':' in self.user:
1960 self.user, self.passwd = self.user.split(':', 1)
1958 self.user, self.passwd = self.user.split(':', 1)
1961 if not self.host:
1959 if not self.host:
1962 self.host = None
1960 self.host = None
1963
1961
1964 # Don't split on colons in IPv6 addresses without ports
1962 # Don't split on colons in IPv6 addresses without ports
1965 if (self.host and ':' in self.host and
1963 if (self.host and ':' in self.host and
1966 not (self.host.startswith('[') and self.host.endswith(']'))):
1964 not (self.host.startswith('[') and self.host.endswith(']'))):
1967 self._hostport = self.host
1965 self._hostport = self.host
1968 self.host, self.port = self.host.rsplit(':', 1)
1966 self.host, self.port = self.host.rsplit(':', 1)
1969 if not self.host:
1967 if not self.host:
1970 self.host = None
1968 self.host = None
1971
1969
1972 if (self.host and self.scheme == 'file' and
1970 if (self.host and self.scheme == 'file' and
1973 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1971 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1974 raise Abort(_('file:// URLs can only refer to localhost'))
1972 raise Abort(_('file:// URLs can only refer to localhost'))
1975
1973
1976 self.path = path
1974 self.path = path
1977
1975
1978 # leave the query string escaped
1976 # leave the query string escaped
1979 for a in ('user', 'passwd', 'host', 'port',
1977 for a in ('user', 'passwd', 'host', 'port',
1980 'path', 'fragment'):
1978 'path', 'fragment'):
1981 v = getattr(self, a)
1979 v = getattr(self, a)
1982 if v is not None:
1980 if v is not None:
1983 setattr(self, a, _urlunquote(v))
1981 setattr(self, a, _urlunquote(v))
1984
1982
1985 def __repr__(self):
1983 def __repr__(self):
1986 attrs = []
1984 attrs = []
1987 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1985 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1988 'query', 'fragment'):
1986 'query', 'fragment'):
1989 v = getattr(self, a)
1987 v = getattr(self, a)
1990 if v is not None:
1988 if v is not None:
1991 attrs.append('%s: %r' % (a, v))
1989 attrs.append('%s: %r' % (a, v))
1992 return '<url %s>' % ', '.join(attrs)
1990 return '<url %s>' % ', '.join(attrs)
1993
1991
1994 def __str__(self):
1992 def __str__(self):
1995 r"""Join the URL's components back into a URL string.
1993 r"""Join the URL's components back into a URL string.
1996
1994
1997 Examples:
1995 Examples:
1998
1996
1999 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1997 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2000 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1998 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2001 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1999 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2002 'http://user:pw@host:80/?foo=bar&baz=42'
2000 'http://user:pw@host:80/?foo=bar&baz=42'
2003 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2001 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2004 'http://user:pw@host:80/?foo=bar%3dbaz'
2002 'http://user:pw@host:80/?foo=bar%3dbaz'
2005 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2003 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2006 'ssh://user:pw@[::1]:2200//home/joe#'
2004 'ssh://user:pw@[::1]:2200//home/joe#'
2007 >>> str(url('http://localhost:80//'))
2005 >>> str(url('http://localhost:80//'))
2008 'http://localhost:80//'
2006 'http://localhost:80//'
2009 >>> str(url('http://localhost:80/'))
2007 >>> str(url('http://localhost:80/'))
2010 'http://localhost:80/'
2008 'http://localhost:80/'
2011 >>> str(url('http://localhost:80'))
2009 >>> str(url('http://localhost:80'))
2012 'http://localhost:80/'
2010 'http://localhost:80/'
2013 >>> str(url('bundle:foo'))
2011 >>> str(url('bundle:foo'))
2014 'bundle:foo'
2012 'bundle:foo'
2015 >>> str(url('bundle://../foo'))
2013 >>> str(url('bundle://../foo'))
2016 'bundle:../foo'
2014 'bundle:../foo'
2017 >>> str(url('path'))
2015 >>> str(url('path'))
2018 'path'
2016 'path'
2019 >>> str(url('file:///tmp/foo/bar'))
2017 >>> str(url('file:///tmp/foo/bar'))
2020 'file:///tmp/foo/bar'
2018 'file:///tmp/foo/bar'
2021 >>> str(url('file:///c:/tmp/foo/bar'))
2019 >>> str(url('file:///c:/tmp/foo/bar'))
2022 'file:///c:/tmp/foo/bar'
2020 'file:///c:/tmp/foo/bar'
2023 >>> print url(r'bundle:foo\bar')
2021 >>> print url(r'bundle:foo\bar')
2024 bundle:foo\bar
2022 bundle:foo\bar
2025 >>> print url(r'file:///D:\data\hg')
2023 >>> print url(r'file:///D:\data\hg')
2026 file:///D:\data\hg
2024 file:///D:\data\hg
2027 """
2025 """
2028 if self._localpath:
2026 if self._localpath:
2029 s = self.path
2027 s = self.path
2030 if self.scheme == 'bundle':
2028 if self.scheme == 'bundle':
2031 s = 'bundle:' + s
2029 s = 'bundle:' + s
2032 if self.fragment:
2030 if self.fragment:
2033 s += '#' + self.fragment
2031 s += '#' + self.fragment
2034 return s
2032 return s
2035
2033
2036 s = self.scheme + ':'
2034 s = self.scheme + ':'
2037 if self.user or self.passwd or self.host:
2035 if self.user or self.passwd or self.host:
2038 s += '//'
2036 s += '//'
2039 elif self.scheme and (not self.path or self.path.startswith('/')
2037 elif self.scheme and (not self.path or self.path.startswith('/')
2040 or hasdriveletter(self.path)):
2038 or hasdriveletter(self.path)):
2041 s += '//'
2039 s += '//'
2042 if hasdriveletter(self.path):
2040 if hasdriveletter(self.path):
2043 s += '/'
2041 s += '/'
2044 if self.user:
2042 if self.user:
2045 s += urllib.quote(self.user, safe=self._safechars)
2043 s += urllib.quote(self.user, safe=self._safechars)
2046 if self.passwd:
2044 if self.passwd:
2047 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2045 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2048 if self.user or self.passwd:
2046 if self.user or self.passwd:
2049 s += '@'
2047 s += '@'
2050 if self.host:
2048 if self.host:
2051 if not (self.host.startswith('[') and self.host.endswith(']')):
2049 if not (self.host.startswith('[') and self.host.endswith(']')):
2052 s += urllib.quote(self.host)
2050 s += urllib.quote(self.host)
2053 else:
2051 else:
2054 s += self.host
2052 s += self.host
2055 if self.port:
2053 if self.port:
2056 s += ':' + urllib.quote(self.port)
2054 s += ':' + urllib.quote(self.port)
2057 if self.host:
2055 if self.host:
2058 s += '/'
2056 s += '/'
2059 if self.path:
2057 if self.path:
2060 # TODO: similar to the query string, we should not unescape the
2058 # TODO: similar to the query string, we should not unescape the
2061 # path when we store it, the path might contain '%2f' = '/',
2059 # path when we store it, the path might contain '%2f' = '/',
2062 # which we should *not* escape.
2060 # which we should *not* escape.
2063 s += urllib.quote(self.path, safe=self._safepchars)
2061 s += urllib.quote(self.path, safe=self._safepchars)
2064 if self.query:
2062 if self.query:
2065 # we store the query in escaped form.
2063 # we store the query in escaped form.
2066 s += '?' + self.query
2064 s += '?' + self.query
2067 if self.fragment is not None:
2065 if self.fragment is not None:
2068 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2066 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2069 return s
2067 return s
2070
2068
2071 def authinfo(self):
2069 def authinfo(self):
2072 user, passwd = self.user, self.passwd
2070 user, passwd = self.user, self.passwd
2073 try:
2071 try:
2074 self.user, self.passwd = None, None
2072 self.user, self.passwd = None, None
2075 s = str(self)
2073 s = str(self)
2076 finally:
2074 finally:
2077 self.user, self.passwd = user, passwd
2075 self.user, self.passwd = user, passwd
2078 if not self.user:
2076 if not self.user:
2079 return (s, None)
2077 return (s, None)
2080 # authinfo[1] is passed to urllib2 password manager, and its
2078 # authinfo[1] is passed to urllib2 password manager, and its
2081 # URIs must not contain credentials. The host is passed in the
2079 # URIs must not contain credentials. The host is passed in the
2082 # URIs list because Python < 2.4.3 uses only that to search for
2080 # URIs list because Python < 2.4.3 uses only that to search for
2083 # a password.
2081 # a password.
2084 return (s, (None, (s, self.host),
2082 return (s, (None, (s, self.host),
2085 self.user, self.passwd or ''))
2083 self.user, self.passwd or ''))
2086
2084
2087 def isabs(self):
2085 def isabs(self):
2088 if self.scheme and self.scheme != 'file':
2086 if self.scheme and self.scheme != 'file':
2089 return True # remote URL
2087 return True # remote URL
2090 if hasdriveletter(self.path):
2088 if hasdriveletter(self.path):
2091 return True # absolute for our purposes - can't be joined()
2089 return True # absolute for our purposes - can't be joined()
2092 if self.path.startswith(r'\\'):
2090 if self.path.startswith(r'\\'):
2093 return True # Windows UNC path
2091 return True # Windows UNC path
2094 if self.path.startswith('/'):
2092 if self.path.startswith('/'):
2095 return True # POSIX-style
2093 return True # POSIX-style
2096 return False
2094 return False
2097
2095
2098 def localpath(self):
2096 def localpath(self):
2099 if self.scheme == 'file' or self.scheme == 'bundle':
2097 if self.scheme == 'file' or self.scheme == 'bundle':
2100 path = self.path or '/'
2098 path = self.path or '/'
2101 # For Windows, we need to promote hosts containing drive
2099 # For Windows, we need to promote hosts containing drive
2102 # letters to paths with drive letters.
2100 # letters to paths with drive letters.
2103 if hasdriveletter(self._hostport):
2101 if hasdriveletter(self._hostport):
2104 path = self._hostport + '/' + self.path
2102 path = self._hostport + '/' + self.path
2105 elif (self.host is not None and self.path
2103 elif (self.host is not None and self.path
2106 and not hasdriveletter(path)):
2104 and not hasdriveletter(path)):
2107 path = '/' + path
2105 path = '/' + path
2108 return path
2106 return path
2109 return self._origpath
2107 return self._origpath
2110
2108
2111 def islocal(self):
2109 def islocal(self):
2112 '''whether localpath will return something that posixfile can open'''
2110 '''whether localpath will return something that posixfile can open'''
2113 return (not self.scheme or self.scheme == 'file'
2111 return (not self.scheme or self.scheme == 'file'
2114 or self.scheme == 'bundle')
2112 or self.scheme == 'bundle')
2115
2113
2116 def hasscheme(path):
2114 def hasscheme(path):
2117 return bool(url(path).scheme)
2115 return bool(url(path).scheme)
2118
2116
2119 def hasdriveletter(path):
2117 def hasdriveletter(path):
2120 return path and path[1:2] == ':' and path[0:1].isalpha()
2118 return path and path[1:2] == ':' and path[0:1].isalpha()
2121
2119
2122 def urllocalpath(path):
2120 def urllocalpath(path):
2123 return url(path, parsequery=False, parsefragment=False).localpath()
2121 return url(path, parsequery=False, parsefragment=False).localpath()
2124
2122
2125 def hidepassword(u):
2123 def hidepassword(u):
2126 '''hide user credential in a url string'''
2124 '''hide user credential in a url string'''
2127 u = url(u)
2125 u = url(u)
2128 if u.passwd:
2126 if u.passwd:
2129 u.passwd = '***'
2127 u.passwd = '***'
2130 return str(u)
2128 return str(u)
2131
2129
2132 def removeauth(u):
2130 def removeauth(u):
2133 '''remove all authentication information from a url string'''
2131 '''remove all authentication information from a url string'''
2134 u = url(u)
2132 u = url(u)
2135 u.user = u.passwd = None
2133 u.user = u.passwd = None
2136 return str(u)
2134 return str(u)
2137
2135
2138 def isatty(fd):
2136 def isatty(fd):
2139 try:
2137 try:
2140 return fd.isatty()
2138 return fd.isatty()
2141 except AttributeError:
2139 except AttributeError:
2142 return False
2140 return False
2143
2141
2144 timecount = unitcountfn(
2142 timecount = unitcountfn(
2145 (1, 1e3, _('%.0f s')),
2143 (1, 1e3, _('%.0f s')),
2146 (100, 1, _('%.1f s')),
2144 (100, 1, _('%.1f s')),
2147 (10, 1, _('%.2f s')),
2145 (10, 1, _('%.2f s')),
2148 (1, 1, _('%.3f s')),
2146 (1, 1, _('%.3f s')),
2149 (100, 0.001, _('%.1f ms')),
2147 (100, 0.001, _('%.1f ms')),
2150 (10, 0.001, _('%.2f ms')),
2148 (10, 0.001, _('%.2f ms')),
2151 (1, 0.001, _('%.3f ms')),
2149 (1, 0.001, _('%.3f ms')),
2152 (100, 0.000001, _('%.1f us')),
2150 (100, 0.000001, _('%.1f us')),
2153 (10, 0.000001, _('%.2f us')),
2151 (10, 0.000001, _('%.2f us')),
2154 (1, 0.000001, _('%.3f us')),
2152 (1, 0.000001, _('%.3f us')),
2155 (100, 0.000000001, _('%.1f ns')),
2153 (100, 0.000000001, _('%.1f ns')),
2156 (10, 0.000000001, _('%.2f ns')),
2154 (10, 0.000000001, _('%.2f ns')),
2157 (1, 0.000000001, _('%.3f ns')),
2155 (1, 0.000000001, _('%.3f ns')),
2158 )
2156 )
2159
2157
2160 _timenesting = [0]
2158 _timenesting = [0]
2161
2159
2162 def timed(func):
2160 def timed(func):
2163 '''Report the execution time of a function call to stderr.
2161 '''Report the execution time of a function call to stderr.
2164
2162
2165 During development, use as a decorator when you need to measure
2163 During development, use as a decorator when you need to measure
2166 the cost of a function, e.g. as follows:
2164 the cost of a function, e.g. as follows:
2167
2165
2168 @util.timed
2166 @util.timed
2169 def foo(a, b, c):
2167 def foo(a, b, c):
2170 pass
2168 pass
2171 '''
2169 '''
2172
2170
2173 def wrapper(*args, **kwargs):
2171 def wrapper(*args, **kwargs):
2174 start = time.time()
2172 start = time.time()
2175 indent = 2
2173 indent = 2
2176 _timenesting[0] += indent
2174 _timenesting[0] += indent
2177 try:
2175 try:
2178 return func(*args, **kwargs)
2176 return func(*args, **kwargs)
2179 finally:
2177 finally:
2180 elapsed = time.time() - start
2178 elapsed = time.time() - start
2181 _timenesting[0] -= indent
2179 _timenesting[0] -= indent
2182 sys.stderr.write('%s%s: %s\n' %
2180 sys.stderr.write('%s%s: %s\n' %
2183 (' ' * _timenesting[0], func.__name__,
2181 (' ' * _timenesting[0], func.__name__,
2184 timecount(elapsed)))
2182 timecount(elapsed)))
2185 return wrapper
2183 return wrapper
2186
2184
2187 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2185 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2188 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2186 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2189
2187
2190 def sizetoint(s):
2188 def sizetoint(s):
2191 '''Convert a space specifier to a byte count.
2189 '''Convert a space specifier to a byte count.
2192
2190
2193 >>> sizetoint('30')
2191 >>> sizetoint('30')
2194 30
2192 30
2195 >>> sizetoint('2.2kb')
2193 >>> sizetoint('2.2kb')
2196 2252
2194 2252
2197 >>> sizetoint('6M')
2195 >>> sizetoint('6M')
2198 6291456
2196 6291456
2199 '''
2197 '''
2200 t = s.strip().lower()
2198 t = s.strip().lower()
2201 try:
2199 try:
2202 for k, u in _sizeunits:
2200 for k, u in _sizeunits:
2203 if t.endswith(k):
2201 if t.endswith(k):
2204 return int(float(t[:-len(k)]) * u)
2202 return int(float(t[:-len(k)]) * u)
2205 return int(t)
2203 return int(t)
2206 except ValueError:
2204 except ValueError:
2207 raise error.ParseError(_("couldn't parse size: %s") % s)
2205 raise error.ParseError(_("couldn't parse size: %s") % s)
2208
2206
2209 class hooks(object):
2207 class hooks(object):
2210 '''A collection of hook functions that can be used to extend a
2208 '''A collection of hook functions that can be used to extend a
2211 function's behaviour. Hooks are called in lexicographic order,
2209 function's behaviour. Hooks are called in lexicographic order,
2212 based on the names of their sources.'''
2210 based on the names of their sources.'''
2213
2211
2214 def __init__(self):
2212 def __init__(self):
2215 self._hooks = []
2213 self._hooks = []
2216
2214
2217 def add(self, source, hook):
2215 def add(self, source, hook):
2218 self._hooks.append((source, hook))
2216 self._hooks.append((source, hook))
2219
2217
2220 def __call__(self, *args):
2218 def __call__(self, *args):
2221 self._hooks.sort(key=lambda x: x[0])
2219 self._hooks.sort(key=lambda x: x[0])
2222 results = []
2220 results = []
2223 for source, hook in self._hooks:
2221 for source, hook in self._hooks:
2224 results.append(hook(*args))
2222 results.append(hook(*args))
2225 return results
2223 return results
2226
2224
2227 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2225 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2228 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2226 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2229 Skips the 'skip' last entries. By default it will flush stdout first.
2227 Skips the 'skip' last entries. By default it will flush stdout first.
2230 It can be used everywhere and do intentionally not require an ui object.
2228 It can be used everywhere and do intentionally not require an ui object.
2231 Not be used in production code but very convenient while developing.
2229 Not be used in production code but very convenient while developing.
2232 '''
2230 '''
2233 if otherf:
2231 if otherf:
2234 otherf.flush()
2232 otherf.flush()
2235 f.write('%s at:\n' % msg)
2233 f.write('%s at:\n' % msg)
2236 entries = [('%s:%s' % (fn, ln), func)
2234 entries = [('%s:%s' % (fn, ln), func)
2237 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2235 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2238 if entries:
2236 if entries:
2239 fnmax = max(len(entry[0]) for entry in entries)
2237 fnmax = max(len(entry[0]) for entry in entries)
2240 for fnln, func in entries:
2238 for fnln, func in entries:
2241 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2239 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2242 f.flush()
2240 f.flush()
2243
2241
2244 class dirs(object):
2242 class dirs(object):
2245 '''a multiset of directory names from a dirstate or manifest'''
2243 '''a multiset of directory names from a dirstate or manifest'''
2246
2244
2247 def __init__(self, map, skip=None):
2245 def __init__(self, map, skip=None):
2248 self._dirs = {}
2246 self._dirs = {}
2249 addpath = self.addpath
2247 addpath = self.addpath
2250 if safehasattr(map, 'iteritems') and skip is not None:
2248 if safehasattr(map, 'iteritems') and skip is not None:
2251 for f, s in map.iteritems():
2249 for f, s in map.iteritems():
2252 if s[0] != skip:
2250 if s[0] != skip:
2253 addpath(f)
2251 addpath(f)
2254 else:
2252 else:
2255 for f in map:
2253 for f in map:
2256 addpath(f)
2254 addpath(f)
2257
2255
2258 def addpath(self, path):
2256 def addpath(self, path):
2259 dirs = self._dirs
2257 dirs = self._dirs
2260 for base in finddirs(path):
2258 for base in finddirs(path):
2261 if base in dirs:
2259 if base in dirs:
2262 dirs[base] += 1
2260 dirs[base] += 1
2263 return
2261 return
2264 dirs[base] = 1
2262 dirs[base] = 1
2265
2263
2266 def delpath(self, path):
2264 def delpath(self, path):
2267 dirs = self._dirs
2265 dirs = self._dirs
2268 for base in finddirs(path):
2266 for base in finddirs(path):
2269 if dirs[base] > 1:
2267 if dirs[base] > 1:
2270 dirs[base] -= 1
2268 dirs[base] -= 1
2271 return
2269 return
2272 del dirs[base]
2270 del dirs[base]
2273
2271
2274 def __iter__(self):
2272 def __iter__(self):
2275 return self._dirs.iterkeys()
2273 return self._dirs.iterkeys()
2276
2274
2277 def __contains__(self, d):
2275 def __contains__(self, d):
2278 return d in self._dirs
2276 return d in self._dirs
2279
2277
2280 if safehasattr(parsers, 'dirs'):
2278 if safehasattr(parsers, 'dirs'):
2281 dirs = parsers.dirs
2279 dirs = parsers.dirs
2282
2280
2283 def finddirs(path):
2281 def finddirs(path):
2284 pos = path.rfind('/')
2282 pos = path.rfind('/')
2285 while pos != -1:
2283 while pos != -1:
2286 yield path[:pos]
2284 yield path[:pos]
2287 pos = path.rfind('/', 0, pos)
2285 pos = path.rfind('/', 0, pos)
2288
2286
2289 # convenient shortcut
2287 # convenient shortcut
2290 dst = debugstacktrace
2288 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now