##// END OF EJS Templates
MBTextWrapper: drop dedicated __init__ method...
Pierre-Yves David -
r25210:b58dde1b default
parent child Browse files
Show More
@@ -1,2256 +1,2249 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 import i18n
16 import i18n
17 _ = i18n._
17 _ = i18n._
18 import error, osutil, encoding, parsers
18 import error, osutil, encoding, parsers
19 import errno, shutil, sys, tempfile, traceback
19 import errno, shutil, sys, tempfile, traceback
20 import re as remod
20 import re as remod
21 import os, time, datetime, calendar, textwrap, signal, collections
21 import os, time, datetime, calendar, textwrap, signal, collections
22 import imp, socket, urllib, struct
22 import imp, socket, urllib, struct
23 import gc
23 import gc
24
24
25 if os.name == 'nt':
25 if os.name == 'nt':
26 import windows as platform
26 import windows as platform
27 else:
27 else:
28 import posix as platform
28 import posix as platform
29
29
30 cachestat = platform.cachestat
30 cachestat = platform.cachestat
31 checkexec = platform.checkexec
31 checkexec = platform.checkexec
32 checklink = platform.checklink
32 checklink = platform.checklink
33 copymode = platform.copymode
33 copymode = platform.copymode
34 executablepath = platform.executablepath
34 executablepath = platform.executablepath
35 expandglobs = platform.expandglobs
35 expandglobs = platform.expandglobs
36 explainexit = platform.explainexit
36 explainexit = platform.explainexit
37 findexe = platform.findexe
37 findexe = platform.findexe
38 gethgcmd = platform.gethgcmd
38 gethgcmd = platform.gethgcmd
39 getuser = platform.getuser
39 getuser = platform.getuser
40 groupmembers = platform.groupmembers
40 groupmembers = platform.groupmembers
41 groupname = platform.groupname
41 groupname = platform.groupname
42 hidewindow = platform.hidewindow
42 hidewindow = platform.hidewindow
43 isexec = platform.isexec
43 isexec = platform.isexec
44 isowner = platform.isowner
44 isowner = platform.isowner
45 localpath = platform.localpath
45 localpath = platform.localpath
46 lookupreg = platform.lookupreg
46 lookupreg = platform.lookupreg
47 makedir = platform.makedir
47 makedir = platform.makedir
48 nlinks = platform.nlinks
48 nlinks = platform.nlinks
49 normpath = platform.normpath
49 normpath = platform.normpath
50 normcase = platform.normcase
50 normcase = platform.normcase
51 normcasespec = platform.normcasespec
51 normcasespec = platform.normcasespec
52 normcasefallback = platform.normcasefallback
52 normcasefallback = platform.normcasefallback
53 openhardlinks = platform.openhardlinks
53 openhardlinks = platform.openhardlinks
54 oslink = platform.oslink
54 oslink = platform.oslink
55 parsepatchoutput = platform.parsepatchoutput
55 parsepatchoutput = platform.parsepatchoutput
56 pconvert = platform.pconvert
56 pconvert = platform.pconvert
57 popen = platform.popen
57 popen = platform.popen
58 posixfile = platform.posixfile
58 posixfile = platform.posixfile
59 quotecommand = platform.quotecommand
59 quotecommand = platform.quotecommand
60 readpipe = platform.readpipe
60 readpipe = platform.readpipe
61 rename = platform.rename
61 rename = platform.rename
62 removedirs = platform.removedirs
62 removedirs = platform.removedirs
63 samedevice = platform.samedevice
63 samedevice = platform.samedevice
64 samefile = platform.samefile
64 samefile = platform.samefile
65 samestat = platform.samestat
65 samestat = platform.samestat
66 setbinary = platform.setbinary
66 setbinary = platform.setbinary
67 setflags = platform.setflags
67 setflags = platform.setflags
68 setsignalhandler = platform.setsignalhandler
68 setsignalhandler = platform.setsignalhandler
69 shellquote = platform.shellquote
69 shellquote = platform.shellquote
70 spawndetached = platform.spawndetached
70 spawndetached = platform.spawndetached
71 split = platform.split
71 split = platform.split
72 sshargs = platform.sshargs
72 sshargs = platform.sshargs
73 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
73 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
74 statisexec = platform.statisexec
74 statisexec = platform.statisexec
75 statislink = platform.statislink
75 statislink = platform.statislink
76 termwidth = platform.termwidth
76 termwidth = platform.termwidth
77 testpid = platform.testpid
77 testpid = platform.testpid
78 umask = platform.umask
78 umask = platform.umask
79 unlink = platform.unlink
79 unlink = platform.unlink
80 unlinkpath = platform.unlinkpath
80 unlinkpath = platform.unlinkpath
81 username = platform.username
81 username = platform.username
82
82
83 # Python compatibility
83 # Python compatibility
84
84
85 _notset = object()
85 _notset = object()
86
86
87 def safehasattr(thing, attr):
87 def safehasattr(thing, attr):
88 return getattr(thing, attr, _notset) is not _notset
88 return getattr(thing, attr, _notset) is not _notset
89
89
90 def sha1(s=''):
90 def sha1(s=''):
91 '''
91 '''
92 Low-overhead wrapper around Python's SHA support
92 Low-overhead wrapper around Python's SHA support
93
93
94 >>> f = _fastsha1
94 >>> f = _fastsha1
95 >>> a = sha1()
95 >>> a = sha1()
96 >>> a = f()
96 >>> a = f()
97 >>> a.hexdigest()
97 >>> a.hexdigest()
98 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
98 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
99 '''
99 '''
100
100
101 return _fastsha1(s)
101 return _fastsha1(s)
102
102
103 def _fastsha1(s=''):
103 def _fastsha1(s=''):
104 # This function will import sha1 from hashlib or sha (whichever is
104 # This function will import sha1 from hashlib or sha (whichever is
105 # available) and overwrite itself with it on the first call.
105 # available) and overwrite itself with it on the first call.
106 # Subsequent calls will go directly to the imported function.
106 # Subsequent calls will go directly to the imported function.
107 if sys.version_info >= (2, 5):
107 if sys.version_info >= (2, 5):
108 from hashlib import sha1 as _sha1
108 from hashlib import sha1 as _sha1
109 else:
109 else:
110 from sha import sha as _sha1
110 from sha import sha as _sha1
111 global _fastsha1, sha1
111 global _fastsha1, sha1
112 _fastsha1 = sha1 = _sha1
112 _fastsha1 = sha1 = _sha1
113 return _sha1(s)
113 return _sha1(s)
114
114
115 def md5(s=''):
115 def md5(s=''):
116 try:
116 try:
117 from hashlib import md5 as _md5
117 from hashlib import md5 as _md5
118 except ImportError:
118 except ImportError:
119 from md5 import md5 as _md5
119 from md5 import md5 as _md5
120 global md5
120 global md5
121 md5 = _md5
121 md5 = _md5
122 return _md5(s)
122 return _md5(s)
123
123
124 DIGESTS = {
124 DIGESTS = {
125 'md5': md5,
125 'md5': md5,
126 'sha1': sha1,
126 'sha1': sha1,
127 }
127 }
128 # List of digest types from strongest to weakest
128 # List of digest types from strongest to weakest
129 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
129 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
130
130
131 try:
131 try:
132 import hashlib
132 import hashlib
133 DIGESTS.update({
133 DIGESTS.update({
134 'sha512': hashlib.sha512,
134 'sha512': hashlib.sha512,
135 })
135 })
136 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
136 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
137 except ImportError:
137 except ImportError:
138 pass
138 pass
139
139
140 for k in DIGESTS_BY_STRENGTH:
140 for k in DIGESTS_BY_STRENGTH:
141 assert k in DIGESTS
141 assert k in DIGESTS
142
142
143 class digester(object):
143 class digester(object):
144 """helper to compute digests.
144 """helper to compute digests.
145
145
146 This helper can be used to compute one or more digests given their name.
146 This helper can be used to compute one or more digests given their name.
147
147
148 >>> d = digester(['md5', 'sha1'])
148 >>> d = digester(['md5', 'sha1'])
149 >>> d.update('foo')
149 >>> d.update('foo')
150 >>> [k for k in sorted(d)]
150 >>> [k for k in sorted(d)]
151 ['md5', 'sha1']
151 ['md5', 'sha1']
152 >>> d['md5']
152 >>> d['md5']
153 'acbd18db4cc2f85cedef654fccc4a4d8'
153 'acbd18db4cc2f85cedef654fccc4a4d8'
154 >>> d['sha1']
154 >>> d['sha1']
155 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
155 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
156 >>> digester.preferred(['md5', 'sha1'])
156 >>> digester.preferred(['md5', 'sha1'])
157 'sha1'
157 'sha1'
158 """
158 """
159
159
160 def __init__(self, digests, s=''):
160 def __init__(self, digests, s=''):
161 self._hashes = {}
161 self._hashes = {}
162 for k in digests:
162 for k in digests:
163 if k not in DIGESTS:
163 if k not in DIGESTS:
164 raise Abort(_('unknown digest type: %s') % k)
164 raise Abort(_('unknown digest type: %s') % k)
165 self._hashes[k] = DIGESTS[k]()
165 self._hashes[k] = DIGESTS[k]()
166 if s:
166 if s:
167 self.update(s)
167 self.update(s)
168
168
169 def update(self, data):
169 def update(self, data):
170 for h in self._hashes.values():
170 for h in self._hashes.values():
171 h.update(data)
171 h.update(data)
172
172
173 def __getitem__(self, key):
173 def __getitem__(self, key):
174 if key not in DIGESTS:
174 if key not in DIGESTS:
175 raise Abort(_('unknown digest type: %s') % k)
175 raise Abort(_('unknown digest type: %s') % k)
176 return self._hashes[key].hexdigest()
176 return self._hashes[key].hexdigest()
177
177
178 def __iter__(self):
178 def __iter__(self):
179 return iter(self._hashes)
179 return iter(self._hashes)
180
180
181 @staticmethod
181 @staticmethod
182 def preferred(supported):
182 def preferred(supported):
183 """returns the strongest digest type in both supported and DIGESTS."""
183 """returns the strongest digest type in both supported and DIGESTS."""
184
184
185 for k in DIGESTS_BY_STRENGTH:
185 for k in DIGESTS_BY_STRENGTH:
186 if k in supported:
186 if k in supported:
187 return k
187 return k
188 return None
188 return None
189
189
190 class digestchecker(object):
190 class digestchecker(object):
191 """file handle wrapper that additionally checks content against a given
191 """file handle wrapper that additionally checks content against a given
192 size and digests.
192 size and digests.
193
193
194 d = digestchecker(fh, size, {'md5': '...'})
194 d = digestchecker(fh, size, {'md5': '...'})
195
195
196 When multiple digests are given, all of them are validated.
196 When multiple digests are given, all of them are validated.
197 """
197 """
198
198
199 def __init__(self, fh, size, digests):
199 def __init__(self, fh, size, digests):
200 self._fh = fh
200 self._fh = fh
201 self._size = size
201 self._size = size
202 self._got = 0
202 self._got = 0
203 self._digests = dict(digests)
203 self._digests = dict(digests)
204 self._digester = digester(self._digests.keys())
204 self._digester = digester(self._digests.keys())
205
205
206 def read(self, length=-1):
206 def read(self, length=-1):
207 content = self._fh.read(length)
207 content = self._fh.read(length)
208 self._digester.update(content)
208 self._digester.update(content)
209 self._got += len(content)
209 self._got += len(content)
210 return content
210 return content
211
211
212 def validate(self):
212 def validate(self):
213 if self._size != self._got:
213 if self._size != self._got:
214 raise Abort(_('size mismatch: expected %d, got %d') %
214 raise Abort(_('size mismatch: expected %d, got %d') %
215 (self._size, self._got))
215 (self._size, self._got))
216 for k, v in self._digests.items():
216 for k, v in self._digests.items():
217 if v != self._digester[k]:
217 if v != self._digester[k]:
218 # i18n: first parameter is a digest name
218 # i18n: first parameter is a digest name
219 raise Abort(_('%s mismatch: expected %s, got %s') %
219 raise Abort(_('%s mismatch: expected %s, got %s') %
220 (k, v, self._digester[k]))
220 (k, v, self._digester[k]))
221
221
222 try:
222 try:
223 buffer = buffer
223 buffer = buffer
224 except NameError:
224 except NameError:
225 if sys.version_info[0] < 3:
225 if sys.version_info[0] < 3:
226 def buffer(sliceable, offset=0):
226 def buffer(sliceable, offset=0):
227 return sliceable[offset:]
227 return sliceable[offset:]
228 else:
228 else:
229 def buffer(sliceable, offset=0):
229 def buffer(sliceable, offset=0):
230 return memoryview(sliceable)[offset:]
230 return memoryview(sliceable)[offset:]
231
231
232 import subprocess
232 import subprocess
233 closefds = os.name == 'posix'
233 closefds = os.name == 'posix'
234
234
235 def unpacker(fmt):
235 def unpacker(fmt):
236 """create a struct unpacker for the specified format"""
236 """create a struct unpacker for the specified format"""
237 return struct.Struct(fmt).unpack
237 return struct.Struct(fmt).unpack
238
238
239 def popen2(cmd, env=None, newlines=False):
239 def popen2(cmd, env=None, newlines=False):
240 # Setting bufsize to -1 lets the system decide the buffer size.
240 # Setting bufsize to -1 lets the system decide the buffer size.
241 # The default for bufsize is 0, meaning unbuffered. This leads to
241 # The default for bufsize is 0, meaning unbuffered. This leads to
242 # poor performance on Mac OS X: http://bugs.python.org/issue4194
242 # poor performance on Mac OS X: http://bugs.python.org/issue4194
243 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
243 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
244 close_fds=closefds,
244 close_fds=closefds,
245 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
245 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
246 universal_newlines=newlines,
246 universal_newlines=newlines,
247 env=env)
247 env=env)
248 return p.stdin, p.stdout
248 return p.stdin, p.stdout
249
249
250 def popen3(cmd, env=None, newlines=False):
250 def popen3(cmd, env=None, newlines=False):
251 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
251 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
252 return stdin, stdout, stderr
252 return stdin, stdout, stderr
253
253
254 def popen4(cmd, env=None, newlines=False):
254 def popen4(cmd, env=None, newlines=False):
255 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
255 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
256 close_fds=closefds,
256 close_fds=closefds,
257 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
257 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
258 stderr=subprocess.PIPE,
258 stderr=subprocess.PIPE,
259 universal_newlines=newlines,
259 universal_newlines=newlines,
260 env=env)
260 env=env)
261 return p.stdin, p.stdout, p.stderr, p
261 return p.stdin, p.stdout, p.stderr, p
262
262
263 def version():
263 def version():
264 """Return version information if available."""
264 """Return version information if available."""
265 try:
265 try:
266 import __version__
266 import __version__
267 return __version__.version
267 return __version__.version
268 except ImportError:
268 except ImportError:
269 return 'unknown'
269 return 'unknown'
270
270
271 # used by parsedate
271 # used by parsedate
272 defaultdateformats = (
272 defaultdateformats = (
273 '%Y-%m-%d %H:%M:%S',
273 '%Y-%m-%d %H:%M:%S',
274 '%Y-%m-%d %I:%M:%S%p',
274 '%Y-%m-%d %I:%M:%S%p',
275 '%Y-%m-%d %H:%M',
275 '%Y-%m-%d %H:%M',
276 '%Y-%m-%d %I:%M%p',
276 '%Y-%m-%d %I:%M%p',
277 '%Y-%m-%d',
277 '%Y-%m-%d',
278 '%m-%d',
278 '%m-%d',
279 '%m/%d',
279 '%m/%d',
280 '%m/%d/%y',
280 '%m/%d/%y',
281 '%m/%d/%Y',
281 '%m/%d/%Y',
282 '%a %b %d %H:%M:%S %Y',
282 '%a %b %d %H:%M:%S %Y',
283 '%a %b %d %I:%M:%S%p %Y',
283 '%a %b %d %I:%M:%S%p %Y',
284 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
284 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
285 '%b %d %H:%M:%S %Y',
285 '%b %d %H:%M:%S %Y',
286 '%b %d %I:%M:%S%p %Y',
286 '%b %d %I:%M:%S%p %Y',
287 '%b %d %H:%M:%S',
287 '%b %d %H:%M:%S',
288 '%b %d %I:%M:%S%p',
288 '%b %d %I:%M:%S%p',
289 '%b %d %H:%M',
289 '%b %d %H:%M',
290 '%b %d %I:%M%p',
290 '%b %d %I:%M%p',
291 '%b %d %Y',
291 '%b %d %Y',
292 '%b %d',
292 '%b %d',
293 '%H:%M:%S',
293 '%H:%M:%S',
294 '%I:%M:%S%p',
294 '%I:%M:%S%p',
295 '%H:%M',
295 '%H:%M',
296 '%I:%M%p',
296 '%I:%M%p',
297 )
297 )
298
298
299 extendeddateformats = defaultdateformats + (
299 extendeddateformats = defaultdateformats + (
300 "%Y",
300 "%Y",
301 "%Y-%m",
301 "%Y-%m",
302 "%b",
302 "%b",
303 "%b %Y",
303 "%b %Y",
304 )
304 )
305
305
306 def cachefunc(func):
306 def cachefunc(func):
307 '''cache the result of function calls'''
307 '''cache the result of function calls'''
308 # XXX doesn't handle keywords args
308 # XXX doesn't handle keywords args
309 if func.func_code.co_argcount == 0:
309 if func.func_code.co_argcount == 0:
310 cache = []
310 cache = []
311 def f():
311 def f():
312 if len(cache) == 0:
312 if len(cache) == 0:
313 cache.append(func())
313 cache.append(func())
314 return cache[0]
314 return cache[0]
315 return f
315 return f
316 cache = {}
316 cache = {}
317 if func.func_code.co_argcount == 1:
317 if func.func_code.co_argcount == 1:
318 # we gain a small amount of time because
318 # we gain a small amount of time because
319 # we don't need to pack/unpack the list
319 # we don't need to pack/unpack the list
320 def f(arg):
320 def f(arg):
321 if arg not in cache:
321 if arg not in cache:
322 cache[arg] = func(arg)
322 cache[arg] = func(arg)
323 return cache[arg]
323 return cache[arg]
324 else:
324 else:
325 def f(*args):
325 def f(*args):
326 if args not in cache:
326 if args not in cache:
327 cache[args] = func(*args)
327 cache[args] = func(*args)
328 return cache[args]
328 return cache[args]
329
329
330 return f
330 return f
331
331
332 class sortdict(dict):
332 class sortdict(dict):
333 '''a simple sorted dictionary'''
333 '''a simple sorted dictionary'''
334 def __init__(self, data=None):
334 def __init__(self, data=None):
335 self._list = []
335 self._list = []
336 if data:
336 if data:
337 self.update(data)
337 self.update(data)
338 def copy(self):
338 def copy(self):
339 return sortdict(self)
339 return sortdict(self)
340 def __setitem__(self, key, val):
340 def __setitem__(self, key, val):
341 if key in self:
341 if key in self:
342 self._list.remove(key)
342 self._list.remove(key)
343 self._list.append(key)
343 self._list.append(key)
344 dict.__setitem__(self, key, val)
344 dict.__setitem__(self, key, val)
345 def __iter__(self):
345 def __iter__(self):
346 return self._list.__iter__()
346 return self._list.__iter__()
347 def update(self, src):
347 def update(self, src):
348 if isinstance(src, dict):
348 if isinstance(src, dict):
349 src = src.iteritems()
349 src = src.iteritems()
350 for k, v in src:
350 for k, v in src:
351 self[k] = v
351 self[k] = v
352 def clear(self):
352 def clear(self):
353 dict.clear(self)
353 dict.clear(self)
354 self._list = []
354 self._list = []
355 def items(self):
355 def items(self):
356 return [(k, self[k]) for k in self._list]
356 return [(k, self[k]) for k in self._list]
357 def __delitem__(self, key):
357 def __delitem__(self, key):
358 dict.__delitem__(self, key)
358 dict.__delitem__(self, key)
359 self._list.remove(key)
359 self._list.remove(key)
360 def pop(self, key, *args, **kwargs):
360 def pop(self, key, *args, **kwargs):
361 dict.pop(self, key, *args, **kwargs)
361 dict.pop(self, key, *args, **kwargs)
362 try:
362 try:
363 self._list.remove(key)
363 self._list.remove(key)
364 except ValueError:
364 except ValueError:
365 pass
365 pass
366 def keys(self):
366 def keys(self):
367 return self._list
367 return self._list
368 def iterkeys(self):
368 def iterkeys(self):
369 return self._list.__iter__()
369 return self._list.__iter__()
370 def iteritems(self):
370 def iteritems(self):
371 for k in self._list:
371 for k in self._list:
372 yield k, self[k]
372 yield k, self[k]
373 def insert(self, index, key, val):
373 def insert(self, index, key, val):
374 self._list.insert(index, key)
374 self._list.insert(index, key)
375 dict.__setitem__(self, key, val)
375 dict.__setitem__(self, key, val)
376
376
377 class lrucachedict(object):
377 class lrucachedict(object):
378 '''cache most recent gets from or sets to this dictionary'''
378 '''cache most recent gets from or sets to this dictionary'''
379 def __init__(self, maxsize):
379 def __init__(self, maxsize):
380 self._cache = {}
380 self._cache = {}
381 self._maxsize = maxsize
381 self._maxsize = maxsize
382 self._order = collections.deque()
382 self._order = collections.deque()
383
383
384 def __getitem__(self, key):
384 def __getitem__(self, key):
385 value = self._cache[key]
385 value = self._cache[key]
386 self._order.remove(key)
386 self._order.remove(key)
387 self._order.append(key)
387 self._order.append(key)
388 return value
388 return value
389
389
390 def __setitem__(self, key, value):
390 def __setitem__(self, key, value):
391 if key not in self._cache:
391 if key not in self._cache:
392 if len(self._cache) >= self._maxsize:
392 if len(self._cache) >= self._maxsize:
393 del self._cache[self._order.popleft()]
393 del self._cache[self._order.popleft()]
394 else:
394 else:
395 self._order.remove(key)
395 self._order.remove(key)
396 self._cache[key] = value
396 self._cache[key] = value
397 self._order.append(key)
397 self._order.append(key)
398
398
399 def __contains__(self, key):
399 def __contains__(self, key):
400 return key in self._cache
400 return key in self._cache
401
401
402 def clear(self):
402 def clear(self):
403 self._cache.clear()
403 self._cache.clear()
404 self._order = collections.deque()
404 self._order = collections.deque()
405
405
406 def lrucachefunc(func):
406 def lrucachefunc(func):
407 '''cache most recent results of function calls'''
407 '''cache most recent results of function calls'''
408 cache = {}
408 cache = {}
409 order = collections.deque()
409 order = collections.deque()
410 if func.func_code.co_argcount == 1:
410 if func.func_code.co_argcount == 1:
411 def f(arg):
411 def f(arg):
412 if arg not in cache:
412 if arg not in cache:
413 if len(cache) > 20:
413 if len(cache) > 20:
414 del cache[order.popleft()]
414 del cache[order.popleft()]
415 cache[arg] = func(arg)
415 cache[arg] = func(arg)
416 else:
416 else:
417 order.remove(arg)
417 order.remove(arg)
418 order.append(arg)
418 order.append(arg)
419 return cache[arg]
419 return cache[arg]
420 else:
420 else:
421 def f(*args):
421 def f(*args):
422 if args not in cache:
422 if args not in cache:
423 if len(cache) > 20:
423 if len(cache) > 20:
424 del cache[order.popleft()]
424 del cache[order.popleft()]
425 cache[args] = func(*args)
425 cache[args] = func(*args)
426 else:
426 else:
427 order.remove(args)
427 order.remove(args)
428 order.append(args)
428 order.append(args)
429 return cache[args]
429 return cache[args]
430
430
431 return f
431 return f
432
432
433 class propertycache(object):
433 class propertycache(object):
434 def __init__(self, func):
434 def __init__(self, func):
435 self.func = func
435 self.func = func
436 self.name = func.__name__
436 self.name = func.__name__
437 def __get__(self, obj, type=None):
437 def __get__(self, obj, type=None):
438 result = self.func(obj)
438 result = self.func(obj)
439 self.cachevalue(obj, result)
439 self.cachevalue(obj, result)
440 return result
440 return result
441
441
442 def cachevalue(self, obj, value):
442 def cachevalue(self, obj, value):
443 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
443 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
444 obj.__dict__[self.name] = value
444 obj.__dict__[self.name] = value
445
445
446 def pipefilter(s, cmd):
446 def pipefilter(s, cmd):
447 '''filter string S through command CMD, returning its output'''
447 '''filter string S through command CMD, returning its output'''
448 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
448 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
449 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
449 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
450 pout, perr = p.communicate(s)
450 pout, perr = p.communicate(s)
451 return pout
451 return pout
452
452
453 def tempfilter(s, cmd):
453 def tempfilter(s, cmd):
454 '''filter string S through a pair of temporary files with CMD.
454 '''filter string S through a pair of temporary files with CMD.
455 CMD is used as a template to create the real command to be run,
455 CMD is used as a template to create the real command to be run,
456 with the strings INFILE and OUTFILE replaced by the real names of
456 with the strings INFILE and OUTFILE replaced by the real names of
457 the temporary files generated.'''
457 the temporary files generated.'''
458 inname, outname = None, None
458 inname, outname = None, None
459 try:
459 try:
460 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
460 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
461 fp = os.fdopen(infd, 'wb')
461 fp = os.fdopen(infd, 'wb')
462 fp.write(s)
462 fp.write(s)
463 fp.close()
463 fp.close()
464 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
464 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
465 os.close(outfd)
465 os.close(outfd)
466 cmd = cmd.replace('INFILE', inname)
466 cmd = cmd.replace('INFILE', inname)
467 cmd = cmd.replace('OUTFILE', outname)
467 cmd = cmd.replace('OUTFILE', outname)
468 code = os.system(cmd)
468 code = os.system(cmd)
469 if sys.platform == 'OpenVMS' and code & 1:
469 if sys.platform == 'OpenVMS' and code & 1:
470 code = 0
470 code = 0
471 if code:
471 if code:
472 raise Abort(_("command '%s' failed: %s") %
472 raise Abort(_("command '%s' failed: %s") %
473 (cmd, explainexit(code)))
473 (cmd, explainexit(code)))
474 fp = open(outname, 'rb')
474 fp = open(outname, 'rb')
475 r = fp.read()
475 r = fp.read()
476 fp.close()
476 fp.close()
477 return r
477 return r
478 finally:
478 finally:
479 try:
479 try:
480 if inname:
480 if inname:
481 os.unlink(inname)
481 os.unlink(inname)
482 except OSError:
482 except OSError:
483 pass
483 pass
484 try:
484 try:
485 if outname:
485 if outname:
486 os.unlink(outname)
486 os.unlink(outname)
487 except OSError:
487 except OSError:
488 pass
488 pass
489
489
490 filtertable = {
490 filtertable = {
491 'tempfile:': tempfilter,
491 'tempfile:': tempfilter,
492 'pipe:': pipefilter,
492 'pipe:': pipefilter,
493 }
493 }
494
494
495 def filter(s, cmd):
495 def filter(s, cmd):
496 "filter a string through a command that transforms its input to its output"
496 "filter a string through a command that transforms its input to its output"
497 for name, fn in filtertable.iteritems():
497 for name, fn in filtertable.iteritems():
498 if cmd.startswith(name):
498 if cmd.startswith(name):
499 return fn(s, cmd[len(name):].lstrip())
499 return fn(s, cmd[len(name):].lstrip())
500 return pipefilter(s, cmd)
500 return pipefilter(s, cmd)
501
501
502 def binary(s):
502 def binary(s):
503 """return true if a string is binary data"""
503 """return true if a string is binary data"""
504 return bool(s and '\0' in s)
504 return bool(s and '\0' in s)
505
505
506 def increasingchunks(source, min=1024, max=65536):
506 def increasingchunks(source, min=1024, max=65536):
507 '''return no less than min bytes per chunk while data remains,
507 '''return no less than min bytes per chunk while data remains,
508 doubling min after each chunk until it reaches max'''
508 doubling min after each chunk until it reaches max'''
509 def log2(x):
509 def log2(x):
510 if not x:
510 if not x:
511 return 0
511 return 0
512 i = 0
512 i = 0
513 while x:
513 while x:
514 x >>= 1
514 x >>= 1
515 i += 1
515 i += 1
516 return i - 1
516 return i - 1
517
517
518 buf = []
518 buf = []
519 blen = 0
519 blen = 0
520 for chunk in source:
520 for chunk in source:
521 buf.append(chunk)
521 buf.append(chunk)
522 blen += len(chunk)
522 blen += len(chunk)
523 if blen >= min:
523 if blen >= min:
524 if min < max:
524 if min < max:
525 min = min << 1
525 min = min << 1
526 nmin = 1 << log2(blen)
526 nmin = 1 << log2(blen)
527 if nmin > min:
527 if nmin > min:
528 min = nmin
528 min = nmin
529 if min > max:
529 if min > max:
530 min = max
530 min = max
531 yield ''.join(buf)
531 yield ''.join(buf)
532 blen = 0
532 blen = 0
533 buf = []
533 buf = []
534 if buf:
534 if buf:
535 yield ''.join(buf)
535 yield ''.join(buf)
536
536
537 Abort = error.Abort
537 Abort = error.Abort
538
538
539 def always(fn):
539 def always(fn):
540 return True
540 return True
541
541
542 def never(fn):
542 def never(fn):
543 return False
543 return False
544
544
545 def nogc(func):
545 def nogc(func):
546 """disable garbage collector
546 """disable garbage collector
547
547
548 Python's garbage collector triggers a GC each time a certain number of
548 Python's garbage collector triggers a GC each time a certain number of
549 container objects (the number being defined by gc.get_threshold()) are
549 container objects (the number being defined by gc.get_threshold()) are
550 allocated even when marked not to be tracked by the collector. Tracking has
550 allocated even when marked not to be tracked by the collector. Tracking has
551 no effect on when GCs are triggered, only on what objects the GC looks
551 no effect on when GCs are triggered, only on what objects the GC looks
552 into. As a workaround, disable GC while building complex (huge)
552 into. As a workaround, disable GC while building complex (huge)
553 containers.
553 containers.
554
554
555 This garbage collector issue have been fixed in 2.7.
555 This garbage collector issue have been fixed in 2.7.
556 """
556 """
557 def wrapper(*args, **kwargs):
557 def wrapper(*args, **kwargs):
558 gcenabled = gc.isenabled()
558 gcenabled = gc.isenabled()
559 gc.disable()
559 gc.disable()
560 try:
560 try:
561 return func(*args, **kwargs)
561 return func(*args, **kwargs)
562 finally:
562 finally:
563 if gcenabled:
563 if gcenabled:
564 gc.enable()
564 gc.enable()
565 return wrapper
565 return wrapper
566
566
567 def pathto(root, n1, n2):
567 def pathto(root, n1, n2):
568 '''return the relative path from one place to another.
568 '''return the relative path from one place to another.
569 root should use os.sep to separate directories
569 root should use os.sep to separate directories
570 n1 should use os.sep to separate directories
570 n1 should use os.sep to separate directories
571 n2 should use "/" to separate directories
571 n2 should use "/" to separate directories
572 returns an os.sep-separated path.
572 returns an os.sep-separated path.
573
573
574 If n1 is a relative path, it's assumed it's
574 If n1 is a relative path, it's assumed it's
575 relative to root.
575 relative to root.
576 n2 should always be relative to root.
576 n2 should always be relative to root.
577 '''
577 '''
578 if not n1:
578 if not n1:
579 return localpath(n2)
579 return localpath(n2)
580 if os.path.isabs(n1):
580 if os.path.isabs(n1):
581 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
581 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
582 return os.path.join(root, localpath(n2))
582 return os.path.join(root, localpath(n2))
583 n2 = '/'.join((pconvert(root), n2))
583 n2 = '/'.join((pconvert(root), n2))
584 a, b = splitpath(n1), n2.split('/')
584 a, b = splitpath(n1), n2.split('/')
585 a.reverse()
585 a.reverse()
586 b.reverse()
586 b.reverse()
587 while a and b and a[-1] == b[-1]:
587 while a and b and a[-1] == b[-1]:
588 a.pop()
588 a.pop()
589 b.pop()
589 b.pop()
590 b.reverse()
590 b.reverse()
591 return os.sep.join((['..'] * len(a)) + b) or '.'
591 return os.sep.join((['..'] * len(a)) + b) or '.'
592
592
593 def mainfrozen():
593 def mainfrozen():
594 """return True if we are a frozen executable.
594 """return True if we are a frozen executable.
595
595
596 The code supports py2exe (most common, Windows only) and tools/freeze
596 The code supports py2exe (most common, Windows only) and tools/freeze
597 (portable, not much used).
597 (portable, not much used).
598 """
598 """
599 return (safehasattr(sys, "frozen") or # new py2exe
599 return (safehasattr(sys, "frozen") or # new py2exe
600 safehasattr(sys, "importers") or # old py2exe
600 safehasattr(sys, "importers") or # old py2exe
601 imp.is_frozen("__main__")) # tools/freeze
601 imp.is_frozen("__main__")) # tools/freeze
602
602
603 # the location of data files matching the source code
603 # the location of data files matching the source code
604 if mainfrozen():
604 if mainfrozen():
605 # executable version (py2exe) doesn't support __file__
605 # executable version (py2exe) doesn't support __file__
606 datapath = os.path.dirname(sys.executable)
606 datapath = os.path.dirname(sys.executable)
607 else:
607 else:
608 datapath = os.path.dirname(__file__)
608 datapath = os.path.dirname(__file__)
609
609
610 i18n.setdatapath(datapath)
610 i18n.setdatapath(datapath)
611
611
612 _hgexecutable = None
612 _hgexecutable = None
613
613
614 def hgexecutable():
614 def hgexecutable():
615 """return location of the 'hg' executable.
615 """return location of the 'hg' executable.
616
616
617 Defaults to $HG or 'hg' in the search path.
617 Defaults to $HG or 'hg' in the search path.
618 """
618 """
619 if _hgexecutable is None:
619 if _hgexecutable is None:
620 hg = os.environ.get('HG')
620 hg = os.environ.get('HG')
621 mainmod = sys.modules['__main__']
621 mainmod = sys.modules['__main__']
622 if hg:
622 if hg:
623 _sethgexecutable(hg)
623 _sethgexecutable(hg)
624 elif mainfrozen():
624 elif mainfrozen():
625 _sethgexecutable(sys.executable)
625 _sethgexecutable(sys.executable)
626 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
626 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
627 _sethgexecutable(mainmod.__file__)
627 _sethgexecutable(mainmod.__file__)
628 else:
628 else:
629 exe = findexe('hg') or os.path.basename(sys.argv[0])
629 exe = findexe('hg') or os.path.basename(sys.argv[0])
630 _sethgexecutable(exe)
630 _sethgexecutable(exe)
631 return _hgexecutable
631 return _hgexecutable
632
632
633 def _sethgexecutable(path):
633 def _sethgexecutable(path):
634 """set location of the 'hg' executable"""
634 """set location of the 'hg' executable"""
635 global _hgexecutable
635 global _hgexecutable
636 _hgexecutable = path
636 _hgexecutable = path
637
637
638 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
638 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
639 '''enhanced shell command execution.
639 '''enhanced shell command execution.
640 run with environment maybe modified, maybe in different dir.
640 run with environment maybe modified, maybe in different dir.
641
641
642 if command fails and onerr is None, return status, else raise onerr
642 if command fails and onerr is None, return status, else raise onerr
643 object as exception.
643 object as exception.
644
644
645 if out is specified, it is assumed to be a file-like object that has a
645 if out is specified, it is assumed to be a file-like object that has a
646 write() method. stdout and stderr will be redirected to out.'''
646 write() method. stdout and stderr will be redirected to out.'''
647 try:
647 try:
648 sys.stdout.flush()
648 sys.stdout.flush()
649 except Exception:
649 except Exception:
650 pass
650 pass
651 def py2shell(val):
651 def py2shell(val):
652 'convert python object into string that is useful to shell'
652 'convert python object into string that is useful to shell'
653 if val is None or val is False:
653 if val is None or val is False:
654 return '0'
654 return '0'
655 if val is True:
655 if val is True:
656 return '1'
656 return '1'
657 return str(val)
657 return str(val)
658 origcmd = cmd
658 origcmd = cmd
659 cmd = quotecommand(cmd)
659 cmd = quotecommand(cmd)
660 if sys.platform == 'plan9' and (sys.version_info[0] == 2
660 if sys.platform == 'plan9' and (sys.version_info[0] == 2
661 and sys.version_info[1] < 7):
661 and sys.version_info[1] < 7):
662 # subprocess kludge to work around issues in half-baked Python
662 # subprocess kludge to work around issues in half-baked Python
663 # ports, notably bichued/python:
663 # ports, notably bichued/python:
664 if not cwd is None:
664 if not cwd is None:
665 os.chdir(cwd)
665 os.chdir(cwd)
666 rc = os.system(cmd)
666 rc = os.system(cmd)
667 else:
667 else:
668 env = dict(os.environ)
668 env = dict(os.environ)
669 env.update((k, py2shell(v)) for k, v in environ.iteritems())
669 env.update((k, py2shell(v)) for k, v in environ.iteritems())
670 env['HG'] = hgexecutable()
670 env['HG'] = hgexecutable()
671 if out is None or out == sys.__stdout__:
671 if out is None or out == sys.__stdout__:
672 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
672 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
673 env=env, cwd=cwd)
673 env=env, cwd=cwd)
674 else:
674 else:
675 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
675 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
676 env=env, cwd=cwd, stdout=subprocess.PIPE,
676 env=env, cwd=cwd, stdout=subprocess.PIPE,
677 stderr=subprocess.STDOUT)
677 stderr=subprocess.STDOUT)
678 while True:
678 while True:
679 line = proc.stdout.readline()
679 line = proc.stdout.readline()
680 if not line:
680 if not line:
681 break
681 break
682 out.write(line)
682 out.write(line)
683 proc.wait()
683 proc.wait()
684 rc = proc.returncode
684 rc = proc.returncode
685 if sys.platform == 'OpenVMS' and rc & 1:
685 if sys.platform == 'OpenVMS' and rc & 1:
686 rc = 0
686 rc = 0
687 if rc and onerr:
687 if rc and onerr:
688 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
688 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
689 explainexit(rc)[0])
689 explainexit(rc)[0])
690 if errprefix:
690 if errprefix:
691 errmsg = '%s: %s' % (errprefix, errmsg)
691 errmsg = '%s: %s' % (errprefix, errmsg)
692 raise onerr(errmsg)
692 raise onerr(errmsg)
693 return rc
693 return rc
694
694
695 def checksignature(func):
695 def checksignature(func):
696 '''wrap a function with code to check for calling errors'''
696 '''wrap a function with code to check for calling errors'''
697 def check(*args, **kwargs):
697 def check(*args, **kwargs):
698 try:
698 try:
699 return func(*args, **kwargs)
699 return func(*args, **kwargs)
700 except TypeError:
700 except TypeError:
701 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
701 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
702 raise error.SignatureError
702 raise error.SignatureError
703 raise
703 raise
704
704
705 return check
705 return check
706
706
707 def copyfile(src, dest, hardlink=False):
707 def copyfile(src, dest, hardlink=False):
708 "copy a file, preserving mode and atime/mtime"
708 "copy a file, preserving mode and atime/mtime"
709 if os.path.lexists(dest):
709 if os.path.lexists(dest):
710 unlink(dest)
710 unlink(dest)
711 # hardlinks are problematic on CIFS, quietly ignore this flag
711 # hardlinks are problematic on CIFS, quietly ignore this flag
712 # until we find a way to work around it cleanly (issue4546)
712 # until we find a way to work around it cleanly (issue4546)
713 if False and hardlink:
713 if False and hardlink:
714 try:
714 try:
715 oslink(src, dest)
715 oslink(src, dest)
716 return
716 return
717 except (IOError, OSError):
717 except (IOError, OSError):
718 pass # fall back to normal copy
718 pass # fall back to normal copy
719 if os.path.islink(src):
719 if os.path.islink(src):
720 os.symlink(os.readlink(src), dest)
720 os.symlink(os.readlink(src), dest)
721 else:
721 else:
722 try:
722 try:
723 shutil.copyfile(src, dest)
723 shutil.copyfile(src, dest)
724 shutil.copymode(src, dest)
724 shutil.copymode(src, dest)
725 except shutil.Error, inst:
725 except shutil.Error, inst:
726 raise Abort(str(inst))
726 raise Abort(str(inst))
727
727
728 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
728 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
729 """Copy a directory tree using hardlinks if possible."""
729 """Copy a directory tree using hardlinks if possible."""
730 num = 0
730 num = 0
731
731
732 if hardlink is None:
732 if hardlink is None:
733 hardlink = (os.stat(src).st_dev ==
733 hardlink = (os.stat(src).st_dev ==
734 os.stat(os.path.dirname(dst)).st_dev)
734 os.stat(os.path.dirname(dst)).st_dev)
735 if hardlink:
735 if hardlink:
736 topic = _('linking')
736 topic = _('linking')
737 else:
737 else:
738 topic = _('copying')
738 topic = _('copying')
739
739
740 if os.path.isdir(src):
740 if os.path.isdir(src):
741 os.mkdir(dst)
741 os.mkdir(dst)
742 for name, kind in osutil.listdir(src):
742 for name, kind in osutil.listdir(src):
743 srcname = os.path.join(src, name)
743 srcname = os.path.join(src, name)
744 dstname = os.path.join(dst, name)
744 dstname = os.path.join(dst, name)
745 def nprog(t, pos):
745 def nprog(t, pos):
746 if pos is not None:
746 if pos is not None:
747 return progress(t, pos + num)
747 return progress(t, pos + num)
748 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
748 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
749 num += n
749 num += n
750 else:
750 else:
751 if hardlink:
751 if hardlink:
752 try:
752 try:
753 oslink(src, dst)
753 oslink(src, dst)
754 except (IOError, OSError):
754 except (IOError, OSError):
755 hardlink = False
755 hardlink = False
756 shutil.copy(src, dst)
756 shutil.copy(src, dst)
757 else:
757 else:
758 shutil.copy(src, dst)
758 shutil.copy(src, dst)
759 num += 1
759 num += 1
760 progress(topic, num)
760 progress(topic, num)
761 progress(topic, None)
761 progress(topic, None)
762
762
763 return hardlink, num
763 return hardlink, num
764
764
765 _winreservednames = '''con prn aux nul
765 _winreservednames = '''con prn aux nul
766 com1 com2 com3 com4 com5 com6 com7 com8 com9
766 com1 com2 com3 com4 com5 com6 com7 com8 com9
767 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
767 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
768 _winreservedchars = ':*?"<>|'
768 _winreservedchars = ':*?"<>|'
769 def checkwinfilename(path):
769 def checkwinfilename(path):
770 r'''Check that the base-relative path is a valid filename on Windows.
770 r'''Check that the base-relative path is a valid filename on Windows.
771 Returns None if the path is ok, or a UI string describing the problem.
771 Returns None if the path is ok, or a UI string describing the problem.
772
772
773 >>> checkwinfilename("just/a/normal/path")
773 >>> checkwinfilename("just/a/normal/path")
774 >>> checkwinfilename("foo/bar/con.xml")
774 >>> checkwinfilename("foo/bar/con.xml")
775 "filename contains 'con', which is reserved on Windows"
775 "filename contains 'con', which is reserved on Windows"
776 >>> checkwinfilename("foo/con.xml/bar")
776 >>> checkwinfilename("foo/con.xml/bar")
777 "filename contains 'con', which is reserved on Windows"
777 "filename contains 'con', which is reserved on Windows"
778 >>> checkwinfilename("foo/bar/xml.con")
778 >>> checkwinfilename("foo/bar/xml.con")
779 >>> checkwinfilename("foo/bar/AUX/bla.txt")
779 >>> checkwinfilename("foo/bar/AUX/bla.txt")
780 "filename contains 'AUX', which is reserved on Windows"
780 "filename contains 'AUX', which is reserved on Windows"
781 >>> checkwinfilename("foo/bar/bla:.txt")
781 >>> checkwinfilename("foo/bar/bla:.txt")
782 "filename contains ':', which is reserved on Windows"
782 "filename contains ':', which is reserved on Windows"
783 >>> checkwinfilename("foo/bar/b\07la.txt")
783 >>> checkwinfilename("foo/bar/b\07la.txt")
784 "filename contains '\\x07', which is invalid on Windows"
784 "filename contains '\\x07', which is invalid on Windows"
785 >>> checkwinfilename("foo/bar/bla ")
785 >>> checkwinfilename("foo/bar/bla ")
786 "filename ends with ' ', which is not allowed on Windows"
786 "filename ends with ' ', which is not allowed on Windows"
787 >>> checkwinfilename("../bar")
787 >>> checkwinfilename("../bar")
788 >>> checkwinfilename("foo\\")
788 >>> checkwinfilename("foo\\")
789 "filename ends with '\\', which is invalid on Windows"
789 "filename ends with '\\', which is invalid on Windows"
790 >>> checkwinfilename("foo\\/bar")
790 >>> checkwinfilename("foo\\/bar")
791 "directory name ends with '\\', which is invalid on Windows"
791 "directory name ends with '\\', which is invalid on Windows"
792 '''
792 '''
793 if path.endswith('\\'):
793 if path.endswith('\\'):
794 return _("filename ends with '\\', which is invalid on Windows")
794 return _("filename ends with '\\', which is invalid on Windows")
795 if '\\/' in path:
795 if '\\/' in path:
796 return _("directory name ends with '\\', which is invalid on Windows")
796 return _("directory name ends with '\\', which is invalid on Windows")
797 for n in path.replace('\\', '/').split('/'):
797 for n in path.replace('\\', '/').split('/'):
798 if not n:
798 if not n:
799 continue
799 continue
800 for c in n:
800 for c in n:
801 if c in _winreservedchars:
801 if c in _winreservedchars:
802 return _("filename contains '%s', which is reserved "
802 return _("filename contains '%s', which is reserved "
803 "on Windows") % c
803 "on Windows") % c
804 if ord(c) <= 31:
804 if ord(c) <= 31:
805 return _("filename contains %r, which is invalid "
805 return _("filename contains %r, which is invalid "
806 "on Windows") % c
806 "on Windows") % c
807 base = n.split('.')[0]
807 base = n.split('.')[0]
808 if base and base.lower() in _winreservednames:
808 if base and base.lower() in _winreservednames:
809 return _("filename contains '%s', which is reserved "
809 return _("filename contains '%s', which is reserved "
810 "on Windows") % base
810 "on Windows") % base
811 t = n[-1]
811 t = n[-1]
812 if t in '. ' and n not in '..':
812 if t in '. ' and n not in '..':
813 return _("filename ends with '%s', which is not allowed "
813 return _("filename ends with '%s', which is not allowed "
814 "on Windows") % t
814 "on Windows") % t
815
815
816 if os.name == 'nt':
816 if os.name == 'nt':
817 checkosfilename = checkwinfilename
817 checkosfilename = checkwinfilename
818 else:
818 else:
819 checkosfilename = platform.checkosfilename
819 checkosfilename = platform.checkosfilename
820
820
821 def makelock(info, pathname):
821 def makelock(info, pathname):
822 try:
822 try:
823 return os.symlink(info, pathname)
823 return os.symlink(info, pathname)
824 except OSError, why:
824 except OSError, why:
825 if why.errno == errno.EEXIST:
825 if why.errno == errno.EEXIST:
826 raise
826 raise
827 except AttributeError: # no symlink in os
827 except AttributeError: # no symlink in os
828 pass
828 pass
829
829
830 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
830 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
831 os.write(ld, info)
831 os.write(ld, info)
832 os.close(ld)
832 os.close(ld)
833
833
834 def readlock(pathname):
834 def readlock(pathname):
835 try:
835 try:
836 return os.readlink(pathname)
836 return os.readlink(pathname)
837 except OSError, why:
837 except OSError, why:
838 if why.errno not in (errno.EINVAL, errno.ENOSYS):
838 if why.errno not in (errno.EINVAL, errno.ENOSYS):
839 raise
839 raise
840 except AttributeError: # no symlink in os
840 except AttributeError: # no symlink in os
841 pass
841 pass
842 fp = posixfile(pathname)
842 fp = posixfile(pathname)
843 r = fp.read()
843 r = fp.read()
844 fp.close()
844 fp.close()
845 return r
845 return r
846
846
847 def fstat(fp):
847 def fstat(fp):
848 '''stat file object that may not have fileno method.'''
848 '''stat file object that may not have fileno method.'''
849 try:
849 try:
850 return os.fstat(fp.fileno())
850 return os.fstat(fp.fileno())
851 except AttributeError:
851 except AttributeError:
852 return os.stat(fp.name)
852 return os.stat(fp.name)
853
853
854 # File system features
854 # File system features
855
855
856 def checkcase(path):
856 def checkcase(path):
857 """
857 """
858 Return true if the given path is on a case-sensitive filesystem
858 Return true if the given path is on a case-sensitive filesystem
859
859
860 Requires a path (like /foo/.hg) ending with a foldable final
860 Requires a path (like /foo/.hg) ending with a foldable final
861 directory component.
861 directory component.
862 """
862 """
863 s1 = os.lstat(path)
863 s1 = os.lstat(path)
864 d, b = os.path.split(path)
864 d, b = os.path.split(path)
865 b2 = b.upper()
865 b2 = b.upper()
866 if b == b2:
866 if b == b2:
867 b2 = b.lower()
867 b2 = b.lower()
868 if b == b2:
868 if b == b2:
869 return True # no evidence against case sensitivity
869 return True # no evidence against case sensitivity
870 p2 = os.path.join(d, b2)
870 p2 = os.path.join(d, b2)
871 try:
871 try:
872 s2 = os.lstat(p2)
872 s2 = os.lstat(p2)
873 if s2 == s1:
873 if s2 == s1:
874 return False
874 return False
875 return True
875 return True
876 except OSError:
876 except OSError:
877 return True
877 return True
878
878
879 try:
879 try:
880 import re2
880 import re2
881 _re2 = None
881 _re2 = None
882 except ImportError:
882 except ImportError:
883 _re2 = False
883 _re2 = False
884
884
885 class _re(object):
885 class _re(object):
886 def _checkre2(self):
886 def _checkre2(self):
887 global _re2
887 global _re2
888 try:
888 try:
889 # check if match works, see issue3964
889 # check if match works, see issue3964
890 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
890 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
891 except ImportError:
891 except ImportError:
892 _re2 = False
892 _re2 = False
893
893
894 def compile(self, pat, flags=0):
894 def compile(self, pat, flags=0):
895 '''Compile a regular expression, using re2 if possible
895 '''Compile a regular expression, using re2 if possible
896
896
897 For best performance, use only re2-compatible regexp features. The
897 For best performance, use only re2-compatible regexp features. The
898 only flags from the re module that are re2-compatible are
898 only flags from the re module that are re2-compatible are
899 IGNORECASE and MULTILINE.'''
899 IGNORECASE and MULTILINE.'''
900 if _re2 is None:
900 if _re2 is None:
901 self._checkre2()
901 self._checkre2()
902 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
902 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
903 if flags & remod.IGNORECASE:
903 if flags & remod.IGNORECASE:
904 pat = '(?i)' + pat
904 pat = '(?i)' + pat
905 if flags & remod.MULTILINE:
905 if flags & remod.MULTILINE:
906 pat = '(?m)' + pat
906 pat = '(?m)' + pat
907 try:
907 try:
908 return re2.compile(pat)
908 return re2.compile(pat)
909 except re2.error:
909 except re2.error:
910 pass
910 pass
911 return remod.compile(pat, flags)
911 return remod.compile(pat, flags)
912
912
913 @propertycache
913 @propertycache
914 def escape(self):
914 def escape(self):
915 '''Return the version of escape corresponding to self.compile.
915 '''Return the version of escape corresponding to self.compile.
916
916
917 This is imperfect because whether re2 or re is used for a particular
917 This is imperfect because whether re2 or re is used for a particular
918 function depends on the flags, etc, but it's the best we can do.
918 function depends on the flags, etc, but it's the best we can do.
919 '''
919 '''
920 global _re2
920 global _re2
921 if _re2 is None:
921 if _re2 is None:
922 self._checkre2()
922 self._checkre2()
923 if _re2:
923 if _re2:
924 return re2.escape
924 return re2.escape
925 else:
925 else:
926 return remod.escape
926 return remod.escape
927
927
928 re = _re()
928 re = _re()
929
929
930 _fspathcache = {}
930 _fspathcache = {}
931 def fspath(name, root):
931 def fspath(name, root):
932 '''Get name in the case stored in the filesystem
932 '''Get name in the case stored in the filesystem
933
933
934 The name should be relative to root, and be normcase-ed for efficiency.
934 The name should be relative to root, and be normcase-ed for efficiency.
935
935
936 Note that this function is unnecessary, and should not be
936 Note that this function is unnecessary, and should not be
937 called, for case-sensitive filesystems (simply because it's expensive).
937 called, for case-sensitive filesystems (simply because it's expensive).
938
938
939 The root should be normcase-ed, too.
939 The root should be normcase-ed, too.
940 '''
940 '''
941 def _makefspathcacheentry(dir):
941 def _makefspathcacheentry(dir):
942 return dict((normcase(n), n) for n in os.listdir(dir))
942 return dict((normcase(n), n) for n in os.listdir(dir))
943
943
944 seps = os.sep
944 seps = os.sep
945 if os.altsep:
945 if os.altsep:
946 seps = seps + os.altsep
946 seps = seps + os.altsep
947 # Protect backslashes. This gets silly very quickly.
947 # Protect backslashes. This gets silly very quickly.
948 seps.replace('\\','\\\\')
948 seps.replace('\\','\\\\')
949 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
949 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
950 dir = os.path.normpath(root)
950 dir = os.path.normpath(root)
951 result = []
951 result = []
952 for part, sep in pattern.findall(name):
952 for part, sep in pattern.findall(name):
953 if sep:
953 if sep:
954 result.append(sep)
954 result.append(sep)
955 continue
955 continue
956
956
957 if dir not in _fspathcache:
957 if dir not in _fspathcache:
958 _fspathcache[dir] = _makefspathcacheentry(dir)
958 _fspathcache[dir] = _makefspathcacheentry(dir)
959 contents = _fspathcache[dir]
959 contents = _fspathcache[dir]
960
960
961 found = contents.get(part)
961 found = contents.get(part)
962 if not found:
962 if not found:
963 # retry "once per directory" per "dirstate.walk" which
963 # retry "once per directory" per "dirstate.walk" which
964 # may take place for each patches of "hg qpush", for example
964 # may take place for each patches of "hg qpush", for example
965 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
965 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
966 found = contents.get(part)
966 found = contents.get(part)
967
967
968 result.append(found or part)
968 result.append(found or part)
969 dir = os.path.join(dir, part)
969 dir = os.path.join(dir, part)
970
970
971 return ''.join(result)
971 return ''.join(result)
972
972
973 def checknlink(testfile):
973 def checknlink(testfile):
974 '''check whether hardlink count reporting works properly'''
974 '''check whether hardlink count reporting works properly'''
975
975
976 # testfile may be open, so we need a separate file for checking to
976 # testfile may be open, so we need a separate file for checking to
977 # work around issue2543 (or testfile may get lost on Samba shares)
977 # work around issue2543 (or testfile may get lost on Samba shares)
978 f1 = testfile + ".hgtmp1"
978 f1 = testfile + ".hgtmp1"
979 if os.path.lexists(f1):
979 if os.path.lexists(f1):
980 return False
980 return False
981 try:
981 try:
982 posixfile(f1, 'w').close()
982 posixfile(f1, 'w').close()
983 except IOError:
983 except IOError:
984 return False
984 return False
985
985
986 f2 = testfile + ".hgtmp2"
986 f2 = testfile + ".hgtmp2"
987 fd = None
987 fd = None
988 try:
988 try:
989 oslink(f1, f2)
989 oslink(f1, f2)
990 # nlinks() may behave differently for files on Windows shares if
990 # nlinks() may behave differently for files on Windows shares if
991 # the file is open.
991 # the file is open.
992 fd = posixfile(f2)
992 fd = posixfile(f2)
993 return nlinks(f2) > 1
993 return nlinks(f2) > 1
994 except OSError:
994 except OSError:
995 return False
995 return False
996 finally:
996 finally:
997 if fd is not None:
997 if fd is not None:
998 fd.close()
998 fd.close()
999 for f in (f1, f2):
999 for f in (f1, f2):
1000 try:
1000 try:
1001 os.unlink(f)
1001 os.unlink(f)
1002 except OSError:
1002 except OSError:
1003 pass
1003 pass
1004
1004
1005 def endswithsep(path):
1005 def endswithsep(path):
1006 '''Check path ends with os.sep or os.altsep.'''
1006 '''Check path ends with os.sep or os.altsep.'''
1007 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1007 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1008
1008
1009 def splitpath(path):
1009 def splitpath(path):
1010 '''Split path by os.sep.
1010 '''Split path by os.sep.
1011 Note that this function does not use os.altsep because this is
1011 Note that this function does not use os.altsep because this is
1012 an alternative of simple "xxx.split(os.sep)".
1012 an alternative of simple "xxx.split(os.sep)".
1013 It is recommended to use os.path.normpath() before using this
1013 It is recommended to use os.path.normpath() before using this
1014 function if need.'''
1014 function if need.'''
1015 return path.split(os.sep)
1015 return path.split(os.sep)
1016
1016
1017 def gui():
1017 def gui():
1018 '''Are we running in a GUI?'''
1018 '''Are we running in a GUI?'''
1019 if sys.platform == 'darwin':
1019 if sys.platform == 'darwin':
1020 if 'SSH_CONNECTION' in os.environ:
1020 if 'SSH_CONNECTION' in os.environ:
1021 # handle SSH access to a box where the user is logged in
1021 # handle SSH access to a box where the user is logged in
1022 return False
1022 return False
1023 elif getattr(osutil, 'isgui', None):
1023 elif getattr(osutil, 'isgui', None):
1024 # check if a CoreGraphics session is available
1024 # check if a CoreGraphics session is available
1025 return osutil.isgui()
1025 return osutil.isgui()
1026 else:
1026 else:
1027 # pure build; use a safe default
1027 # pure build; use a safe default
1028 return True
1028 return True
1029 else:
1029 else:
1030 return os.name == "nt" or os.environ.get("DISPLAY")
1030 return os.name == "nt" or os.environ.get("DISPLAY")
1031
1031
1032 def mktempcopy(name, emptyok=False, createmode=None):
1032 def mktempcopy(name, emptyok=False, createmode=None):
1033 """Create a temporary file with the same contents from name
1033 """Create a temporary file with the same contents from name
1034
1034
1035 The permission bits are copied from the original file.
1035 The permission bits are copied from the original file.
1036
1036
1037 If the temporary file is going to be truncated immediately, you
1037 If the temporary file is going to be truncated immediately, you
1038 can use emptyok=True as an optimization.
1038 can use emptyok=True as an optimization.
1039
1039
1040 Returns the name of the temporary file.
1040 Returns the name of the temporary file.
1041 """
1041 """
1042 d, fn = os.path.split(name)
1042 d, fn = os.path.split(name)
1043 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1043 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1044 os.close(fd)
1044 os.close(fd)
1045 # Temporary files are created with mode 0600, which is usually not
1045 # Temporary files are created with mode 0600, which is usually not
1046 # what we want. If the original file already exists, just copy
1046 # what we want. If the original file already exists, just copy
1047 # its mode. Otherwise, manually obey umask.
1047 # its mode. Otherwise, manually obey umask.
1048 copymode(name, temp, createmode)
1048 copymode(name, temp, createmode)
1049 if emptyok:
1049 if emptyok:
1050 return temp
1050 return temp
1051 try:
1051 try:
1052 try:
1052 try:
1053 ifp = posixfile(name, "rb")
1053 ifp = posixfile(name, "rb")
1054 except IOError, inst:
1054 except IOError, inst:
1055 if inst.errno == errno.ENOENT:
1055 if inst.errno == errno.ENOENT:
1056 return temp
1056 return temp
1057 if not getattr(inst, 'filename', None):
1057 if not getattr(inst, 'filename', None):
1058 inst.filename = name
1058 inst.filename = name
1059 raise
1059 raise
1060 ofp = posixfile(temp, "wb")
1060 ofp = posixfile(temp, "wb")
1061 for chunk in filechunkiter(ifp):
1061 for chunk in filechunkiter(ifp):
1062 ofp.write(chunk)
1062 ofp.write(chunk)
1063 ifp.close()
1063 ifp.close()
1064 ofp.close()
1064 ofp.close()
1065 except: # re-raises
1065 except: # re-raises
1066 try: os.unlink(temp)
1066 try: os.unlink(temp)
1067 except OSError: pass
1067 except OSError: pass
1068 raise
1068 raise
1069 return temp
1069 return temp
1070
1070
1071 class atomictempfile(object):
1071 class atomictempfile(object):
1072 '''writable file object that atomically updates a file
1072 '''writable file object that atomically updates a file
1073
1073
1074 All writes will go to a temporary copy of the original file. Call
1074 All writes will go to a temporary copy of the original file. Call
1075 close() when you are done writing, and atomictempfile will rename
1075 close() when you are done writing, and atomictempfile will rename
1076 the temporary copy to the original name, making the changes
1076 the temporary copy to the original name, making the changes
1077 visible. If the object is destroyed without being closed, all your
1077 visible. If the object is destroyed without being closed, all your
1078 writes are discarded.
1078 writes are discarded.
1079 '''
1079 '''
1080 def __init__(self, name, mode='w+b', createmode=None):
1080 def __init__(self, name, mode='w+b', createmode=None):
1081 self.__name = name # permanent name
1081 self.__name = name # permanent name
1082 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1082 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1083 createmode=createmode)
1083 createmode=createmode)
1084 self._fp = posixfile(self._tempname, mode)
1084 self._fp = posixfile(self._tempname, mode)
1085
1085
1086 # delegated methods
1086 # delegated methods
1087 self.write = self._fp.write
1087 self.write = self._fp.write
1088 self.seek = self._fp.seek
1088 self.seek = self._fp.seek
1089 self.tell = self._fp.tell
1089 self.tell = self._fp.tell
1090 self.fileno = self._fp.fileno
1090 self.fileno = self._fp.fileno
1091
1091
1092 def close(self):
1092 def close(self):
1093 if not self._fp.closed:
1093 if not self._fp.closed:
1094 self._fp.close()
1094 self._fp.close()
1095 rename(self._tempname, localpath(self.__name))
1095 rename(self._tempname, localpath(self.__name))
1096
1096
1097 def discard(self):
1097 def discard(self):
1098 if not self._fp.closed:
1098 if not self._fp.closed:
1099 try:
1099 try:
1100 os.unlink(self._tempname)
1100 os.unlink(self._tempname)
1101 except OSError:
1101 except OSError:
1102 pass
1102 pass
1103 self._fp.close()
1103 self._fp.close()
1104
1104
1105 def __del__(self):
1105 def __del__(self):
1106 if safehasattr(self, '_fp'): # constructor actually did something
1106 if safehasattr(self, '_fp'): # constructor actually did something
1107 self.discard()
1107 self.discard()
1108
1108
1109 def makedirs(name, mode=None, notindexed=False):
1109 def makedirs(name, mode=None, notindexed=False):
1110 """recursive directory creation with parent mode inheritance"""
1110 """recursive directory creation with parent mode inheritance"""
1111 try:
1111 try:
1112 makedir(name, notindexed)
1112 makedir(name, notindexed)
1113 except OSError, err:
1113 except OSError, err:
1114 if err.errno == errno.EEXIST:
1114 if err.errno == errno.EEXIST:
1115 return
1115 return
1116 if err.errno != errno.ENOENT or not name:
1116 if err.errno != errno.ENOENT or not name:
1117 raise
1117 raise
1118 parent = os.path.dirname(os.path.abspath(name))
1118 parent = os.path.dirname(os.path.abspath(name))
1119 if parent == name:
1119 if parent == name:
1120 raise
1120 raise
1121 makedirs(parent, mode, notindexed)
1121 makedirs(parent, mode, notindexed)
1122 makedir(name, notindexed)
1122 makedir(name, notindexed)
1123 if mode is not None:
1123 if mode is not None:
1124 os.chmod(name, mode)
1124 os.chmod(name, mode)
1125
1125
1126 def ensuredirs(name, mode=None, notindexed=False):
1126 def ensuredirs(name, mode=None, notindexed=False):
1127 """race-safe recursive directory creation
1127 """race-safe recursive directory creation
1128
1128
1129 Newly created directories are marked as "not to be indexed by
1129 Newly created directories are marked as "not to be indexed by
1130 the content indexing service", if ``notindexed`` is specified
1130 the content indexing service", if ``notindexed`` is specified
1131 for "write" mode access.
1131 for "write" mode access.
1132 """
1132 """
1133 if os.path.isdir(name):
1133 if os.path.isdir(name):
1134 return
1134 return
1135 parent = os.path.dirname(os.path.abspath(name))
1135 parent = os.path.dirname(os.path.abspath(name))
1136 if parent != name:
1136 if parent != name:
1137 ensuredirs(parent, mode, notindexed)
1137 ensuredirs(parent, mode, notindexed)
1138 try:
1138 try:
1139 makedir(name, notindexed)
1139 makedir(name, notindexed)
1140 except OSError, err:
1140 except OSError, err:
1141 if err.errno == errno.EEXIST and os.path.isdir(name):
1141 if err.errno == errno.EEXIST and os.path.isdir(name):
1142 # someone else seems to have won a directory creation race
1142 # someone else seems to have won a directory creation race
1143 return
1143 return
1144 raise
1144 raise
1145 if mode is not None:
1145 if mode is not None:
1146 os.chmod(name, mode)
1146 os.chmod(name, mode)
1147
1147
1148 def readfile(path):
1148 def readfile(path):
1149 fp = open(path, 'rb')
1149 fp = open(path, 'rb')
1150 try:
1150 try:
1151 return fp.read()
1151 return fp.read()
1152 finally:
1152 finally:
1153 fp.close()
1153 fp.close()
1154
1154
1155 def writefile(path, text):
1155 def writefile(path, text):
1156 fp = open(path, 'wb')
1156 fp = open(path, 'wb')
1157 try:
1157 try:
1158 fp.write(text)
1158 fp.write(text)
1159 finally:
1159 finally:
1160 fp.close()
1160 fp.close()
1161
1161
1162 def appendfile(path, text):
1162 def appendfile(path, text):
1163 fp = open(path, 'ab')
1163 fp = open(path, 'ab')
1164 try:
1164 try:
1165 fp.write(text)
1165 fp.write(text)
1166 finally:
1166 finally:
1167 fp.close()
1167 fp.close()
1168
1168
1169 class chunkbuffer(object):
1169 class chunkbuffer(object):
1170 """Allow arbitrary sized chunks of data to be efficiently read from an
1170 """Allow arbitrary sized chunks of data to be efficiently read from an
1171 iterator over chunks of arbitrary size."""
1171 iterator over chunks of arbitrary size."""
1172
1172
1173 def __init__(self, in_iter):
1173 def __init__(self, in_iter):
1174 """in_iter is the iterator that's iterating over the input chunks.
1174 """in_iter is the iterator that's iterating over the input chunks.
1175 targetsize is how big a buffer to try to maintain."""
1175 targetsize is how big a buffer to try to maintain."""
1176 def splitbig(chunks):
1176 def splitbig(chunks):
1177 for chunk in chunks:
1177 for chunk in chunks:
1178 if len(chunk) > 2**20:
1178 if len(chunk) > 2**20:
1179 pos = 0
1179 pos = 0
1180 while pos < len(chunk):
1180 while pos < len(chunk):
1181 end = pos + 2 ** 18
1181 end = pos + 2 ** 18
1182 yield chunk[pos:end]
1182 yield chunk[pos:end]
1183 pos = end
1183 pos = end
1184 else:
1184 else:
1185 yield chunk
1185 yield chunk
1186 self.iter = splitbig(in_iter)
1186 self.iter = splitbig(in_iter)
1187 self._queue = collections.deque()
1187 self._queue = collections.deque()
1188
1188
1189 def read(self, l=None):
1189 def read(self, l=None):
1190 """Read L bytes of data from the iterator of chunks of data.
1190 """Read L bytes of data from the iterator of chunks of data.
1191 Returns less than L bytes if the iterator runs dry.
1191 Returns less than L bytes if the iterator runs dry.
1192
1192
1193 If size parameter is omitted, read everything"""
1193 If size parameter is omitted, read everything"""
1194 left = l
1194 left = l
1195 buf = []
1195 buf = []
1196 queue = self._queue
1196 queue = self._queue
1197 while left is None or left > 0:
1197 while left is None or left > 0:
1198 # refill the queue
1198 # refill the queue
1199 if not queue:
1199 if not queue:
1200 target = 2**18
1200 target = 2**18
1201 for chunk in self.iter:
1201 for chunk in self.iter:
1202 queue.append(chunk)
1202 queue.append(chunk)
1203 target -= len(chunk)
1203 target -= len(chunk)
1204 if target <= 0:
1204 if target <= 0:
1205 break
1205 break
1206 if not queue:
1206 if not queue:
1207 break
1207 break
1208
1208
1209 chunk = queue.popleft()
1209 chunk = queue.popleft()
1210 if left is not None:
1210 if left is not None:
1211 left -= len(chunk)
1211 left -= len(chunk)
1212 if left is not None and left < 0:
1212 if left is not None and left < 0:
1213 queue.appendleft(chunk[left:])
1213 queue.appendleft(chunk[left:])
1214 buf.append(chunk[:left])
1214 buf.append(chunk[:left])
1215 else:
1215 else:
1216 buf.append(chunk)
1216 buf.append(chunk)
1217
1217
1218 return ''.join(buf)
1218 return ''.join(buf)
1219
1219
1220 def filechunkiter(f, size=65536, limit=None):
1220 def filechunkiter(f, size=65536, limit=None):
1221 """Create a generator that produces the data in the file size
1221 """Create a generator that produces the data in the file size
1222 (default 65536) bytes at a time, up to optional limit (default is
1222 (default 65536) bytes at a time, up to optional limit (default is
1223 to read all data). Chunks may be less than size bytes if the
1223 to read all data). Chunks may be less than size bytes if the
1224 chunk is the last chunk in the file, or the file is a socket or
1224 chunk is the last chunk in the file, or the file is a socket or
1225 some other type of file that sometimes reads less data than is
1225 some other type of file that sometimes reads less data than is
1226 requested."""
1226 requested."""
1227 assert size >= 0
1227 assert size >= 0
1228 assert limit is None or limit >= 0
1228 assert limit is None or limit >= 0
1229 while True:
1229 while True:
1230 if limit is None:
1230 if limit is None:
1231 nbytes = size
1231 nbytes = size
1232 else:
1232 else:
1233 nbytes = min(limit, size)
1233 nbytes = min(limit, size)
1234 s = nbytes and f.read(nbytes)
1234 s = nbytes and f.read(nbytes)
1235 if not s:
1235 if not s:
1236 break
1236 break
1237 if limit:
1237 if limit:
1238 limit -= len(s)
1238 limit -= len(s)
1239 yield s
1239 yield s
1240
1240
1241 def makedate(timestamp=None):
1241 def makedate(timestamp=None):
1242 '''Return a unix timestamp (or the current time) as a (unixtime,
1242 '''Return a unix timestamp (or the current time) as a (unixtime,
1243 offset) tuple based off the local timezone.'''
1243 offset) tuple based off the local timezone.'''
1244 if timestamp is None:
1244 if timestamp is None:
1245 timestamp = time.time()
1245 timestamp = time.time()
1246 if timestamp < 0:
1246 if timestamp < 0:
1247 hint = _("check your clock")
1247 hint = _("check your clock")
1248 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1248 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1249 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1249 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1250 datetime.datetime.fromtimestamp(timestamp))
1250 datetime.datetime.fromtimestamp(timestamp))
1251 tz = delta.days * 86400 + delta.seconds
1251 tz = delta.days * 86400 + delta.seconds
1252 return timestamp, tz
1252 return timestamp, tz
1253
1253
1254 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1254 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1255 """represent a (unixtime, offset) tuple as a localized time.
1255 """represent a (unixtime, offset) tuple as a localized time.
1256 unixtime is seconds since the epoch, and offset is the time zone's
1256 unixtime is seconds since the epoch, and offset is the time zone's
1257 number of seconds away from UTC. if timezone is false, do not
1257 number of seconds away from UTC. if timezone is false, do not
1258 append time zone to string."""
1258 append time zone to string."""
1259 t, tz = date or makedate()
1259 t, tz = date or makedate()
1260 if t < 0:
1260 if t < 0:
1261 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1261 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1262 tz = 0
1262 tz = 0
1263 if "%1" in format or "%2" in format or "%z" in format:
1263 if "%1" in format or "%2" in format or "%z" in format:
1264 sign = (tz > 0) and "-" or "+"
1264 sign = (tz > 0) and "-" or "+"
1265 minutes = abs(tz) // 60
1265 minutes = abs(tz) // 60
1266 format = format.replace("%z", "%1%2")
1266 format = format.replace("%z", "%1%2")
1267 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1267 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1268 format = format.replace("%2", "%02d" % (minutes % 60))
1268 format = format.replace("%2", "%02d" % (minutes % 60))
1269 try:
1269 try:
1270 t = time.gmtime(float(t) - tz)
1270 t = time.gmtime(float(t) - tz)
1271 except ValueError:
1271 except ValueError:
1272 # time was out of range
1272 # time was out of range
1273 t = time.gmtime(sys.maxint)
1273 t = time.gmtime(sys.maxint)
1274 s = time.strftime(format, t)
1274 s = time.strftime(format, t)
1275 return s
1275 return s
1276
1276
1277 def shortdate(date=None):
1277 def shortdate(date=None):
1278 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1278 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1279 return datestr(date, format='%Y-%m-%d')
1279 return datestr(date, format='%Y-%m-%d')
1280
1280
1281 def strdate(string, format, defaults=[]):
1281 def strdate(string, format, defaults=[]):
1282 """parse a localized time string and return a (unixtime, offset) tuple.
1282 """parse a localized time string and return a (unixtime, offset) tuple.
1283 if the string cannot be parsed, ValueError is raised."""
1283 if the string cannot be parsed, ValueError is raised."""
1284 def timezone(string):
1284 def timezone(string):
1285 tz = string.split()[-1]
1285 tz = string.split()[-1]
1286 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1286 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1287 sign = (tz[0] == "+") and 1 or -1
1287 sign = (tz[0] == "+") and 1 or -1
1288 hours = int(tz[1:3])
1288 hours = int(tz[1:3])
1289 minutes = int(tz[3:5])
1289 minutes = int(tz[3:5])
1290 return -sign * (hours * 60 + minutes) * 60
1290 return -sign * (hours * 60 + minutes) * 60
1291 if tz == "GMT" or tz == "UTC":
1291 if tz == "GMT" or tz == "UTC":
1292 return 0
1292 return 0
1293 return None
1293 return None
1294
1294
1295 # NOTE: unixtime = localunixtime + offset
1295 # NOTE: unixtime = localunixtime + offset
1296 offset, date = timezone(string), string
1296 offset, date = timezone(string), string
1297 if offset is not None:
1297 if offset is not None:
1298 date = " ".join(string.split()[:-1])
1298 date = " ".join(string.split()[:-1])
1299
1299
1300 # add missing elements from defaults
1300 # add missing elements from defaults
1301 usenow = False # default to using biased defaults
1301 usenow = False # default to using biased defaults
1302 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1302 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1303 found = [True for p in part if ("%"+p) in format]
1303 found = [True for p in part if ("%"+p) in format]
1304 if not found:
1304 if not found:
1305 date += "@" + defaults[part][usenow]
1305 date += "@" + defaults[part][usenow]
1306 format += "@%" + part[0]
1306 format += "@%" + part[0]
1307 else:
1307 else:
1308 # We've found a specific time element, less specific time
1308 # We've found a specific time element, less specific time
1309 # elements are relative to today
1309 # elements are relative to today
1310 usenow = True
1310 usenow = True
1311
1311
1312 timetuple = time.strptime(date, format)
1312 timetuple = time.strptime(date, format)
1313 localunixtime = int(calendar.timegm(timetuple))
1313 localunixtime = int(calendar.timegm(timetuple))
1314 if offset is None:
1314 if offset is None:
1315 # local timezone
1315 # local timezone
1316 unixtime = int(time.mktime(timetuple))
1316 unixtime = int(time.mktime(timetuple))
1317 offset = unixtime - localunixtime
1317 offset = unixtime - localunixtime
1318 else:
1318 else:
1319 unixtime = localunixtime + offset
1319 unixtime = localunixtime + offset
1320 return unixtime, offset
1320 return unixtime, offset
1321
1321
1322 def parsedate(date, formats=None, bias={}):
1322 def parsedate(date, formats=None, bias={}):
1323 """parse a localized date/time and return a (unixtime, offset) tuple.
1323 """parse a localized date/time and return a (unixtime, offset) tuple.
1324
1324
1325 The date may be a "unixtime offset" string or in one of the specified
1325 The date may be a "unixtime offset" string or in one of the specified
1326 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1326 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1327
1327
1328 >>> parsedate(' today ') == parsedate(\
1328 >>> parsedate(' today ') == parsedate(\
1329 datetime.date.today().strftime('%b %d'))
1329 datetime.date.today().strftime('%b %d'))
1330 True
1330 True
1331 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1331 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1332 datetime.timedelta(days=1)\
1332 datetime.timedelta(days=1)\
1333 ).strftime('%b %d'))
1333 ).strftime('%b %d'))
1334 True
1334 True
1335 >>> now, tz = makedate()
1335 >>> now, tz = makedate()
1336 >>> strnow, strtz = parsedate('now')
1336 >>> strnow, strtz = parsedate('now')
1337 >>> (strnow - now) < 1
1337 >>> (strnow - now) < 1
1338 True
1338 True
1339 >>> tz == strtz
1339 >>> tz == strtz
1340 True
1340 True
1341 """
1341 """
1342 if not date:
1342 if not date:
1343 return 0, 0
1343 return 0, 0
1344 if isinstance(date, tuple) and len(date) == 2:
1344 if isinstance(date, tuple) and len(date) == 2:
1345 return date
1345 return date
1346 if not formats:
1346 if not formats:
1347 formats = defaultdateformats
1347 formats = defaultdateformats
1348 date = date.strip()
1348 date = date.strip()
1349
1349
1350 if date == 'now' or date == _('now'):
1350 if date == 'now' or date == _('now'):
1351 return makedate()
1351 return makedate()
1352 if date == 'today' or date == _('today'):
1352 if date == 'today' or date == _('today'):
1353 date = datetime.date.today().strftime('%b %d')
1353 date = datetime.date.today().strftime('%b %d')
1354 elif date == 'yesterday' or date == _('yesterday'):
1354 elif date == 'yesterday' or date == _('yesterday'):
1355 date = (datetime.date.today() -
1355 date = (datetime.date.today() -
1356 datetime.timedelta(days=1)).strftime('%b %d')
1356 datetime.timedelta(days=1)).strftime('%b %d')
1357
1357
1358 try:
1358 try:
1359 when, offset = map(int, date.split(' '))
1359 when, offset = map(int, date.split(' '))
1360 except ValueError:
1360 except ValueError:
1361 # fill out defaults
1361 # fill out defaults
1362 now = makedate()
1362 now = makedate()
1363 defaults = {}
1363 defaults = {}
1364 for part in ("d", "mb", "yY", "HI", "M", "S"):
1364 for part in ("d", "mb", "yY", "HI", "M", "S"):
1365 # this piece is for rounding the specific end of unknowns
1365 # this piece is for rounding the specific end of unknowns
1366 b = bias.get(part)
1366 b = bias.get(part)
1367 if b is None:
1367 if b is None:
1368 if part[0] in "HMS":
1368 if part[0] in "HMS":
1369 b = "00"
1369 b = "00"
1370 else:
1370 else:
1371 b = "0"
1371 b = "0"
1372
1372
1373 # this piece is for matching the generic end to today's date
1373 # this piece is for matching the generic end to today's date
1374 n = datestr(now, "%" + part[0])
1374 n = datestr(now, "%" + part[0])
1375
1375
1376 defaults[part] = (b, n)
1376 defaults[part] = (b, n)
1377
1377
1378 for format in formats:
1378 for format in formats:
1379 try:
1379 try:
1380 when, offset = strdate(date, format, defaults)
1380 when, offset = strdate(date, format, defaults)
1381 except (ValueError, OverflowError):
1381 except (ValueError, OverflowError):
1382 pass
1382 pass
1383 else:
1383 else:
1384 break
1384 break
1385 else:
1385 else:
1386 raise Abort(_('invalid date: %r') % date)
1386 raise Abort(_('invalid date: %r') % date)
1387 # validate explicit (probably user-specified) date and
1387 # validate explicit (probably user-specified) date and
1388 # time zone offset. values must fit in signed 32 bits for
1388 # time zone offset. values must fit in signed 32 bits for
1389 # current 32-bit linux runtimes. timezones go from UTC-12
1389 # current 32-bit linux runtimes. timezones go from UTC-12
1390 # to UTC+14
1390 # to UTC+14
1391 if abs(when) > 0x7fffffff:
1391 if abs(when) > 0x7fffffff:
1392 raise Abort(_('date exceeds 32 bits: %d') % when)
1392 raise Abort(_('date exceeds 32 bits: %d') % when)
1393 if when < 0:
1393 if when < 0:
1394 raise Abort(_('negative date value: %d') % when)
1394 raise Abort(_('negative date value: %d') % when)
1395 if offset < -50400 or offset > 43200:
1395 if offset < -50400 or offset > 43200:
1396 raise Abort(_('impossible time zone offset: %d') % offset)
1396 raise Abort(_('impossible time zone offset: %d') % offset)
1397 return when, offset
1397 return when, offset
1398
1398
1399 def matchdate(date):
1399 def matchdate(date):
1400 """Return a function that matches a given date match specifier
1400 """Return a function that matches a given date match specifier
1401
1401
1402 Formats include:
1402 Formats include:
1403
1403
1404 '{date}' match a given date to the accuracy provided
1404 '{date}' match a given date to the accuracy provided
1405
1405
1406 '<{date}' on or before a given date
1406 '<{date}' on or before a given date
1407
1407
1408 '>{date}' on or after a given date
1408 '>{date}' on or after a given date
1409
1409
1410 >>> p1 = parsedate("10:29:59")
1410 >>> p1 = parsedate("10:29:59")
1411 >>> p2 = parsedate("10:30:00")
1411 >>> p2 = parsedate("10:30:00")
1412 >>> p3 = parsedate("10:30:59")
1412 >>> p3 = parsedate("10:30:59")
1413 >>> p4 = parsedate("10:31:00")
1413 >>> p4 = parsedate("10:31:00")
1414 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1414 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1415 >>> f = matchdate("10:30")
1415 >>> f = matchdate("10:30")
1416 >>> f(p1[0])
1416 >>> f(p1[0])
1417 False
1417 False
1418 >>> f(p2[0])
1418 >>> f(p2[0])
1419 True
1419 True
1420 >>> f(p3[0])
1420 >>> f(p3[0])
1421 True
1421 True
1422 >>> f(p4[0])
1422 >>> f(p4[0])
1423 False
1423 False
1424 >>> f(p5[0])
1424 >>> f(p5[0])
1425 False
1425 False
1426 """
1426 """
1427
1427
1428 def lower(date):
1428 def lower(date):
1429 d = {'mb': "1", 'd': "1"}
1429 d = {'mb': "1", 'd': "1"}
1430 return parsedate(date, extendeddateformats, d)[0]
1430 return parsedate(date, extendeddateformats, d)[0]
1431
1431
1432 def upper(date):
1432 def upper(date):
1433 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1433 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1434 for days in ("31", "30", "29"):
1434 for days in ("31", "30", "29"):
1435 try:
1435 try:
1436 d["d"] = days
1436 d["d"] = days
1437 return parsedate(date, extendeddateformats, d)[0]
1437 return parsedate(date, extendeddateformats, d)[0]
1438 except Abort:
1438 except Abort:
1439 pass
1439 pass
1440 d["d"] = "28"
1440 d["d"] = "28"
1441 return parsedate(date, extendeddateformats, d)[0]
1441 return parsedate(date, extendeddateformats, d)[0]
1442
1442
1443 date = date.strip()
1443 date = date.strip()
1444
1444
1445 if not date:
1445 if not date:
1446 raise Abort(_("dates cannot consist entirely of whitespace"))
1446 raise Abort(_("dates cannot consist entirely of whitespace"))
1447 elif date[0] == "<":
1447 elif date[0] == "<":
1448 if not date[1:]:
1448 if not date[1:]:
1449 raise Abort(_("invalid day spec, use '<DATE'"))
1449 raise Abort(_("invalid day spec, use '<DATE'"))
1450 when = upper(date[1:])
1450 when = upper(date[1:])
1451 return lambda x: x <= when
1451 return lambda x: x <= when
1452 elif date[0] == ">":
1452 elif date[0] == ">":
1453 if not date[1:]:
1453 if not date[1:]:
1454 raise Abort(_("invalid day spec, use '>DATE'"))
1454 raise Abort(_("invalid day spec, use '>DATE'"))
1455 when = lower(date[1:])
1455 when = lower(date[1:])
1456 return lambda x: x >= when
1456 return lambda x: x >= when
1457 elif date[0] == "-":
1457 elif date[0] == "-":
1458 try:
1458 try:
1459 days = int(date[1:])
1459 days = int(date[1:])
1460 except ValueError:
1460 except ValueError:
1461 raise Abort(_("invalid day spec: %s") % date[1:])
1461 raise Abort(_("invalid day spec: %s") % date[1:])
1462 if days < 0:
1462 if days < 0:
1463 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1463 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1464 % date[1:])
1464 % date[1:])
1465 when = makedate()[0] - days * 3600 * 24
1465 when = makedate()[0] - days * 3600 * 24
1466 return lambda x: x >= when
1466 return lambda x: x >= when
1467 elif " to " in date:
1467 elif " to " in date:
1468 a, b = date.split(" to ")
1468 a, b = date.split(" to ")
1469 start, stop = lower(a), upper(b)
1469 start, stop = lower(a), upper(b)
1470 return lambda x: x >= start and x <= stop
1470 return lambda x: x >= start and x <= stop
1471 else:
1471 else:
1472 start, stop = lower(date), upper(date)
1472 start, stop = lower(date), upper(date)
1473 return lambda x: x >= start and x <= stop
1473 return lambda x: x >= start and x <= stop
1474
1474
1475 def shortuser(user):
1475 def shortuser(user):
1476 """Return a short representation of a user name or email address."""
1476 """Return a short representation of a user name or email address."""
1477 f = user.find('@')
1477 f = user.find('@')
1478 if f >= 0:
1478 if f >= 0:
1479 user = user[:f]
1479 user = user[:f]
1480 f = user.find('<')
1480 f = user.find('<')
1481 if f >= 0:
1481 if f >= 0:
1482 user = user[f + 1:]
1482 user = user[f + 1:]
1483 f = user.find(' ')
1483 f = user.find(' ')
1484 if f >= 0:
1484 if f >= 0:
1485 user = user[:f]
1485 user = user[:f]
1486 f = user.find('.')
1486 f = user.find('.')
1487 if f >= 0:
1487 if f >= 0:
1488 user = user[:f]
1488 user = user[:f]
1489 return user
1489 return user
1490
1490
1491 def emailuser(user):
1491 def emailuser(user):
1492 """Return the user portion of an email address."""
1492 """Return the user portion of an email address."""
1493 f = user.find('@')
1493 f = user.find('@')
1494 if f >= 0:
1494 if f >= 0:
1495 user = user[:f]
1495 user = user[:f]
1496 f = user.find('<')
1496 f = user.find('<')
1497 if f >= 0:
1497 if f >= 0:
1498 user = user[f + 1:]
1498 user = user[f + 1:]
1499 return user
1499 return user
1500
1500
1501 def email(author):
1501 def email(author):
1502 '''get email of author.'''
1502 '''get email of author.'''
1503 r = author.find('>')
1503 r = author.find('>')
1504 if r == -1:
1504 if r == -1:
1505 r = None
1505 r = None
1506 return author[author.find('<') + 1:r]
1506 return author[author.find('<') + 1:r]
1507
1507
1508 def ellipsis(text, maxlength=400):
1508 def ellipsis(text, maxlength=400):
1509 """Trim string to at most maxlength (default: 400) columns in display."""
1509 """Trim string to at most maxlength (default: 400) columns in display."""
1510 return encoding.trim(text, maxlength, ellipsis='...')
1510 return encoding.trim(text, maxlength, ellipsis='...')
1511
1511
1512 def unitcountfn(*unittable):
1512 def unitcountfn(*unittable):
1513 '''return a function that renders a readable count of some quantity'''
1513 '''return a function that renders a readable count of some quantity'''
1514
1514
1515 def go(count):
1515 def go(count):
1516 for multiplier, divisor, format in unittable:
1516 for multiplier, divisor, format in unittable:
1517 if count >= divisor * multiplier:
1517 if count >= divisor * multiplier:
1518 return format % (count / float(divisor))
1518 return format % (count / float(divisor))
1519 return unittable[-1][2] % count
1519 return unittable[-1][2] % count
1520
1520
1521 return go
1521 return go
1522
1522
1523 bytecount = unitcountfn(
1523 bytecount = unitcountfn(
1524 (100, 1 << 30, _('%.0f GB')),
1524 (100, 1 << 30, _('%.0f GB')),
1525 (10, 1 << 30, _('%.1f GB')),
1525 (10, 1 << 30, _('%.1f GB')),
1526 (1, 1 << 30, _('%.2f GB')),
1526 (1, 1 << 30, _('%.2f GB')),
1527 (100, 1 << 20, _('%.0f MB')),
1527 (100, 1 << 20, _('%.0f MB')),
1528 (10, 1 << 20, _('%.1f MB')),
1528 (10, 1 << 20, _('%.1f MB')),
1529 (1, 1 << 20, _('%.2f MB')),
1529 (1, 1 << 20, _('%.2f MB')),
1530 (100, 1 << 10, _('%.0f KB')),
1530 (100, 1 << 10, _('%.0f KB')),
1531 (10, 1 << 10, _('%.1f KB')),
1531 (10, 1 << 10, _('%.1f KB')),
1532 (1, 1 << 10, _('%.2f KB')),
1532 (1, 1 << 10, _('%.2f KB')),
1533 (1, 1, _('%.0f bytes')),
1533 (1, 1, _('%.0f bytes')),
1534 )
1534 )
1535
1535
1536 def uirepr(s):
1536 def uirepr(s):
1537 # Avoid double backslash in Windows path repr()
1537 # Avoid double backslash in Windows path repr()
1538 return repr(s).replace('\\\\', '\\')
1538 return repr(s).replace('\\\\', '\\')
1539
1539
1540 # delay import of textwrap
1540 # delay import of textwrap
1541 def MBTextWrapper(**kwargs):
1541 def MBTextWrapper(**kwargs):
1542 class tw(textwrap.TextWrapper):
1542 class tw(textwrap.TextWrapper):
1543 """
1543 """
1544 Extend TextWrapper for width-awareness.
1544 Extend TextWrapper for width-awareness.
1545
1545
1546 Neither number of 'bytes' in any encoding nor 'characters' is
1546 Neither number of 'bytes' in any encoding nor 'characters' is
1547 appropriate to calculate terminal columns for specified string.
1547 appropriate to calculate terminal columns for specified string.
1548
1548
1549 Original TextWrapper implementation uses built-in 'len()' directly,
1549 Original TextWrapper implementation uses built-in 'len()' directly,
1550 so overriding is needed to use width information of each characters.
1550 so overriding is needed to use width information of each characters.
1551
1551
1552 In addition, characters classified into 'ambiguous' width are
1552 In addition, characters classified into 'ambiguous' width are
1553 treated as wide in East Asian area, but as narrow in other.
1553 treated as wide in East Asian area, but as narrow in other.
1554
1554
1555 This requires use decision to determine width of such characters.
1555 This requires use decision to determine width of such characters.
1556 """
1556 """
1557 def __init__(self, **kwargs):
1558 textwrap.TextWrapper.__init__(self, **kwargs)
1559
1560 # for compatibility between 2.4 and 2.6
1561 if getattr(self, 'drop_whitespace', None) is None:
1562 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1563
1564 def _cutdown(self, ucstr, space_left):
1557 def _cutdown(self, ucstr, space_left):
1565 l = 0
1558 l = 0
1566 colwidth = encoding.ucolwidth
1559 colwidth = encoding.ucolwidth
1567 for i in xrange(len(ucstr)):
1560 for i in xrange(len(ucstr)):
1568 l += colwidth(ucstr[i])
1561 l += colwidth(ucstr[i])
1569 if space_left < l:
1562 if space_left < l:
1570 return (ucstr[:i], ucstr[i:])
1563 return (ucstr[:i], ucstr[i:])
1571 return ucstr, ''
1564 return ucstr, ''
1572
1565
1573 # overriding of base class
1566 # overriding of base class
1574 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1567 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1575 space_left = max(width - cur_len, 1)
1568 space_left = max(width - cur_len, 1)
1576
1569
1577 if self.break_long_words:
1570 if self.break_long_words:
1578 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1571 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1579 cur_line.append(cut)
1572 cur_line.append(cut)
1580 reversed_chunks[-1] = res
1573 reversed_chunks[-1] = res
1581 elif not cur_line:
1574 elif not cur_line:
1582 cur_line.append(reversed_chunks.pop())
1575 cur_line.append(reversed_chunks.pop())
1583
1576
1584 # this overriding code is imported from TextWrapper of python 2.6
1577 # this overriding code is imported from TextWrapper of python 2.6
1585 # to calculate columns of string by 'encoding.ucolwidth()'
1578 # to calculate columns of string by 'encoding.ucolwidth()'
1586 def _wrap_chunks(self, chunks):
1579 def _wrap_chunks(self, chunks):
1587 colwidth = encoding.ucolwidth
1580 colwidth = encoding.ucolwidth
1588
1581
1589 lines = []
1582 lines = []
1590 if self.width <= 0:
1583 if self.width <= 0:
1591 raise ValueError("invalid width %r (must be > 0)" % self.width)
1584 raise ValueError("invalid width %r (must be > 0)" % self.width)
1592
1585
1593 # Arrange in reverse order so items can be efficiently popped
1586 # Arrange in reverse order so items can be efficiently popped
1594 # from a stack of chucks.
1587 # from a stack of chucks.
1595 chunks.reverse()
1588 chunks.reverse()
1596
1589
1597 while chunks:
1590 while chunks:
1598
1591
1599 # Start the list of chunks that will make up the current line.
1592 # Start the list of chunks that will make up the current line.
1600 # cur_len is just the length of all the chunks in cur_line.
1593 # cur_len is just the length of all the chunks in cur_line.
1601 cur_line = []
1594 cur_line = []
1602 cur_len = 0
1595 cur_len = 0
1603
1596
1604 # Figure out which static string will prefix this line.
1597 # Figure out which static string will prefix this line.
1605 if lines:
1598 if lines:
1606 indent = self.subsequent_indent
1599 indent = self.subsequent_indent
1607 else:
1600 else:
1608 indent = self.initial_indent
1601 indent = self.initial_indent
1609
1602
1610 # Maximum width for this line.
1603 # Maximum width for this line.
1611 width = self.width - len(indent)
1604 width = self.width - len(indent)
1612
1605
1613 # First chunk on line is whitespace -- drop it, unless this
1606 # First chunk on line is whitespace -- drop it, unless this
1614 # is the very beginning of the text (i.e. no lines started yet).
1607 # is the very beginning of the text (i.e. no lines started yet).
1615 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1608 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1616 del chunks[-1]
1609 del chunks[-1]
1617
1610
1618 while chunks:
1611 while chunks:
1619 l = colwidth(chunks[-1])
1612 l = colwidth(chunks[-1])
1620
1613
1621 # Can at least squeeze this chunk onto the current line.
1614 # Can at least squeeze this chunk onto the current line.
1622 if cur_len + l <= width:
1615 if cur_len + l <= width:
1623 cur_line.append(chunks.pop())
1616 cur_line.append(chunks.pop())
1624 cur_len += l
1617 cur_len += l
1625
1618
1626 # Nope, this line is full.
1619 # Nope, this line is full.
1627 else:
1620 else:
1628 break
1621 break
1629
1622
1630 # The current line is full, and the next chunk is too big to
1623 # The current line is full, and the next chunk is too big to
1631 # fit on *any* line (not just this one).
1624 # fit on *any* line (not just this one).
1632 if chunks and colwidth(chunks[-1]) > width:
1625 if chunks and colwidth(chunks[-1]) > width:
1633 self._handle_long_word(chunks, cur_line, cur_len, width)
1626 self._handle_long_word(chunks, cur_line, cur_len, width)
1634
1627
1635 # If the last chunk on this line is all whitespace, drop it.
1628 # If the last chunk on this line is all whitespace, drop it.
1636 if (self.drop_whitespace and
1629 if (self.drop_whitespace and
1637 cur_line and cur_line[-1].strip() == ''):
1630 cur_line and cur_line[-1].strip() == ''):
1638 del cur_line[-1]
1631 del cur_line[-1]
1639
1632
1640 # Convert current line back to a string and store it in list
1633 # Convert current line back to a string and store it in list
1641 # of all lines (return value).
1634 # of all lines (return value).
1642 if cur_line:
1635 if cur_line:
1643 lines.append(indent + ''.join(cur_line))
1636 lines.append(indent + ''.join(cur_line))
1644
1637
1645 return lines
1638 return lines
1646
1639
1647 global MBTextWrapper
1640 global MBTextWrapper
1648 MBTextWrapper = tw
1641 MBTextWrapper = tw
1649 return tw(**kwargs)
1642 return tw(**kwargs)
1650
1643
1651 def wrap(line, width, initindent='', hangindent=''):
1644 def wrap(line, width, initindent='', hangindent=''):
1652 maxindent = max(len(hangindent), len(initindent))
1645 maxindent = max(len(hangindent), len(initindent))
1653 if width <= maxindent:
1646 if width <= maxindent:
1654 # adjust for weird terminal size
1647 # adjust for weird terminal size
1655 width = max(78, maxindent + 1)
1648 width = max(78, maxindent + 1)
1656 line = line.decode(encoding.encoding, encoding.encodingmode)
1649 line = line.decode(encoding.encoding, encoding.encodingmode)
1657 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1650 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1658 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1651 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1659 wrapper = MBTextWrapper(width=width,
1652 wrapper = MBTextWrapper(width=width,
1660 initial_indent=initindent,
1653 initial_indent=initindent,
1661 subsequent_indent=hangindent)
1654 subsequent_indent=hangindent)
1662 return wrapper.fill(line).encode(encoding.encoding)
1655 return wrapper.fill(line).encode(encoding.encoding)
1663
1656
1664 def iterlines(iterator):
1657 def iterlines(iterator):
1665 for chunk in iterator:
1658 for chunk in iterator:
1666 for line in chunk.splitlines():
1659 for line in chunk.splitlines():
1667 yield line
1660 yield line
1668
1661
1669 def expandpath(path):
1662 def expandpath(path):
1670 return os.path.expanduser(os.path.expandvars(path))
1663 return os.path.expanduser(os.path.expandvars(path))
1671
1664
1672 def hgcmd():
1665 def hgcmd():
1673 """Return the command used to execute current hg
1666 """Return the command used to execute current hg
1674
1667
1675 This is different from hgexecutable() because on Windows we want
1668 This is different from hgexecutable() because on Windows we want
1676 to avoid things opening new shell windows like batch files, so we
1669 to avoid things opening new shell windows like batch files, so we
1677 get either the python call or current executable.
1670 get either the python call or current executable.
1678 """
1671 """
1679 if mainfrozen():
1672 if mainfrozen():
1680 return [sys.executable]
1673 return [sys.executable]
1681 return gethgcmd()
1674 return gethgcmd()
1682
1675
1683 def rundetached(args, condfn):
1676 def rundetached(args, condfn):
1684 """Execute the argument list in a detached process.
1677 """Execute the argument list in a detached process.
1685
1678
1686 condfn is a callable which is called repeatedly and should return
1679 condfn is a callable which is called repeatedly and should return
1687 True once the child process is known to have started successfully.
1680 True once the child process is known to have started successfully.
1688 At this point, the child process PID is returned. If the child
1681 At this point, the child process PID is returned. If the child
1689 process fails to start or finishes before condfn() evaluates to
1682 process fails to start or finishes before condfn() evaluates to
1690 True, return -1.
1683 True, return -1.
1691 """
1684 """
1692 # Windows case is easier because the child process is either
1685 # Windows case is easier because the child process is either
1693 # successfully starting and validating the condition or exiting
1686 # successfully starting and validating the condition or exiting
1694 # on failure. We just poll on its PID. On Unix, if the child
1687 # on failure. We just poll on its PID. On Unix, if the child
1695 # process fails to start, it will be left in a zombie state until
1688 # process fails to start, it will be left in a zombie state until
1696 # the parent wait on it, which we cannot do since we expect a long
1689 # the parent wait on it, which we cannot do since we expect a long
1697 # running process on success. Instead we listen for SIGCHLD telling
1690 # running process on success. Instead we listen for SIGCHLD telling
1698 # us our child process terminated.
1691 # us our child process terminated.
1699 terminated = set()
1692 terminated = set()
1700 def handler(signum, frame):
1693 def handler(signum, frame):
1701 terminated.add(os.wait())
1694 terminated.add(os.wait())
1702 prevhandler = None
1695 prevhandler = None
1703 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1696 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1704 if SIGCHLD is not None:
1697 if SIGCHLD is not None:
1705 prevhandler = signal.signal(SIGCHLD, handler)
1698 prevhandler = signal.signal(SIGCHLD, handler)
1706 try:
1699 try:
1707 pid = spawndetached(args)
1700 pid = spawndetached(args)
1708 while not condfn():
1701 while not condfn():
1709 if ((pid in terminated or not testpid(pid))
1702 if ((pid in terminated or not testpid(pid))
1710 and not condfn()):
1703 and not condfn()):
1711 return -1
1704 return -1
1712 time.sleep(0.1)
1705 time.sleep(0.1)
1713 return pid
1706 return pid
1714 finally:
1707 finally:
1715 if prevhandler is not None:
1708 if prevhandler is not None:
1716 signal.signal(signal.SIGCHLD, prevhandler)
1709 signal.signal(signal.SIGCHLD, prevhandler)
1717
1710
1718 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1711 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1719 """Return the result of interpolating items in the mapping into string s.
1712 """Return the result of interpolating items in the mapping into string s.
1720
1713
1721 prefix is a single character string, or a two character string with
1714 prefix is a single character string, or a two character string with
1722 a backslash as the first character if the prefix needs to be escaped in
1715 a backslash as the first character if the prefix needs to be escaped in
1723 a regular expression.
1716 a regular expression.
1724
1717
1725 fn is an optional function that will be applied to the replacement text
1718 fn is an optional function that will be applied to the replacement text
1726 just before replacement.
1719 just before replacement.
1727
1720
1728 escape_prefix is an optional flag that allows using doubled prefix for
1721 escape_prefix is an optional flag that allows using doubled prefix for
1729 its escaping.
1722 its escaping.
1730 """
1723 """
1731 fn = fn or (lambda s: s)
1724 fn = fn or (lambda s: s)
1732 patterns = '|'.join(mapping.keys())
1725 patterns = '|'.join(mapping.keys())
1733 if escape_prefix:
1726 if escape_prefix:
1734 patterns += '|' + prefix
1727 patterns += '|' + prefix
1735 if len(prefix) > 1:
1728 if len(prefix) > 1:
1736 prefix_char = prefix[1:]
1729 prefix_char = prefix[1:]
1737 else:
1730 else:
1738 prefix_char = prefix
1731 prefix_char = prefix
1739 mapping[prefix_char] = prefix_char
1732 mapping[prefix_char] = prefix_char
1740 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1733 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1741 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1734 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1742
1735
1743 def getport(port):
1736 def getport(port):
1744 """Return the port for a given network service.
1737 """Return the port for a given network service.
1745
1738
1746 If port is an integer, it's returned as is. If it's a string, it's
1739 If port is an integer, it's returned as is. If it's a string, it's
1747 looked up using socket.getservbyname(). If there's no matching
1740 looked up using socket.getservbyname(). If there's no matching
1748 service, util.Abort is raised.
1741 service, util.Abort is raised.
1749 """
1742 """
1750 try:
1743 try:
1751 return int(port)
1744 return int(port)
1752 except ValueError:
1745 except ValueError:
1753 pass
1746 pass
1754
1747
1755 try:
1748 try:
1756 return socket.getservbyname(port)
1749 return socket.getservbyname(port)
1757 except socket.error:
1750 except socket.error:
1758 raise Abort(_("no port number associated with service '%s'") % port)
1751 raise Abort(_("no port number associated with service '%s'") % port)
1759
1752
1760 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1753 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1761 '0': False, 'no': False, 'false': False, 'off': False,
1754 '0': False, 'no': False, 'false': False, 'off': False,
1762 'never': False}
1755 'never': False}
1763
1756
1764 def parsebool(s):
1757 def parsebool(s):
1765 """Parse s into a boolean.
1758 """Parse s into a boolean.
1766
1759
1767 If s is not a valid boolean, returns None.
1760 If s is not a valid boolean, returns None.
1768 """
1761 """
1769 return _booleans.get(s.lower(), None)
1762 return _booleans.get(s.lower(), None)
1770
1763
1771 _hexdig = '0123456789ABCDEFabcdef'
1764 _hexdig = '0123456789ABCDEFabcdef'
1772 _hextochr = dict((a + b, chr(int(a + b, 16)))
1765 _hextochr = dict((a + b, chr(int(a + b, 16)))
1773 for a in _hexdig for b in _hexdig)
1766 for a in _hexdig for b in _hexdig)
1774
1767
1775 def _urlunquote(s):
1768 def _urlunquote(s):
1776 """Decode HTTP/HTML % encoding.
1769 """Decode HTTP/HTML % encoding.
1777
1770
1778 >>> _urlunquote('abc%20def')
1771 >>> _urlunquote('abc%20def')
1779 'abc def'
1772 'abc def'
1780 """
1773 """
1781 res = s.split('%')
1774 res = s.split('%')
1782 # fastpath
1775 # fastpath
1783 if len(res) == 1:
1776 if len(res) == 1:
1784 return s
1777 return s
1785 s = res[0]
1778 s = res[0]
1786 for item in res[1:]:
1779 for item in res[1:]:
1787 try:
1780 try:
1788 s += _hextochr[item[:2]] + item[2:]
1781 s += _hextochr[item[:2]] + item[2:]
1789 except KeyError:
1782 except KeyError:
1790 s += '%' + item
1783 s += '%' + item
1791 except UnicodeDecodeError:
1784 except UnicodeDecodeError:
1792 s += unichr(int(item[:2], 16)) + item[2:]
1785 s += unichr(int(item[:2], 16)) + item[2:]
1793 return s
1786 return s
1794
1787
1795 class url(object):
1788 class url(object):
1796 r"""Reliable URL parser.
1789 r"""Reliable URL parser.
1797
1790
1798 This parses URLs and provides attributes for the following
1791 This parses URLs and provides attributes for the following
1799 components:
1792 components:
1800
1793
1801 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1794 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1802
1795
1803 Missing components are set to None. The only exception is
1796 Missing components are set to None. The only exception is
1804 fragment, which is set to '' if present but empty.
1797 fragment, which is set to '' if present but empty.
1805
1798
1806 If parsefragment is False, fragment is included in query. If
1799 If parsefragment is False, fragment is included in query. If
1807 parsequery is False, query is included in path. If both are
1800 parsequery is False, query is included in path. If both are
1808 False, both fragment and query are included in path.
1801 False, both fragment and query are included in path.
1809
1802
1810 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1803 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1811
1804
1812 Note that for backward compatibility reasons, bundle URLs do not
1805 Note that for backward compatibility reasons, bundle URLs do not
1813 take host names. That means 'bundle://../' has a path of '../'.
1806 take host names. That means 'bundle://../' has a path of '../'.
1814
1807
1815 Examples:
1808 Examples:
1816
1809
1817 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1810 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1818 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1811 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1819 >>> url('ssh://[::1]:2200//home/joe/repo')
1812 >>> url('ssh://[::1]:2200//home/joe/repo')
1820 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1813 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1821 >>> url('file:///home/joe/repo')
1814 >>> url('file:///home/joe/repo')
1822 <url scheme: 'file', path: '/home/joe/repo'>
1815 <url scheme: 'file', path: '/home/joe/repo'>
1823 >>> url('file:///c:/temp/foo/')
1816 >>> url('file:///c:/temp/foo/')
1824 <url scheme: 'file', path: 'c:/temp/foo/'>
1817 <url scheme: 'file', path: 'c:/temp/foo/'>
1825 >>> url('bundle:foo')
1818 >>> url('bundle:foo')
1826 <url scheme: 'bundle', path: 'foo'>
1819 <url scheme: 'bundle', path: 'foo'>
1827 >>> url('bundle://../foo')
1820 >>> url('bundle://../foo')
1828 <url scheme: 'bundle', path: '../foo'>
1821 <url scheme: 'bundle', path: '../foo'>
1829 >>> url(r'c:\foo\bar')
1822 >>> url(r'c:\foo\bar')
1830 <url path: 'c:\\foo\\bar'>
1823 <url path: 'c:\\foo\\bar'>
1831 >>> url(r'\\blah\blah\blah')
1824 >>> url(r'\\blah\blah\blah')
1832 <url path: '\\\\blah\\blah\\blah'>
1825 <url path: '\\\\blah\\blah\\blah'>
1833 >>> url(r'\\blah\blah\blah#baz')
1826 >>> url(r'\\blah\blah\blah#baz')
1834 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1827 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1835 >>> url(r'file:///C:\users\me')
1828 >>> url(r'file:///C:\users\me')
1836 <url scheme: 'file', path: 'C:\\users\\me'>
1829 <url scheme: 'file', path: 'C:\\users\\me'>
1837
1830
1838 Authentication credentials:
1831 Authentication credentials:
1839
1832
1840 >>> url('ssh://joe:xyz@x/repo')
1833 >>> url('ssh://joe:xyz@x/repo')
1841 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1834 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1842 >>> url('ssh://joe@x/repo')
1835 >>> url('ssh://joe@x/repo')
1843 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1836 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1844
1837
1845 Query strings and fragments:
1838 Query strings and fragments:
1846
1839
1847 >>> url('http://host/a?b#c')
1840 >>> url('http://host/a?b#c')
1848 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1841 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1849 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1842 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1850 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1843 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1851 """
1844 """
1852
1845
1853 _safechars = "!~*'()+"
1846 _safechars = "!~*'()+"
1854 _safepchars = "/!~*'()+:\\"
1847 _safepchars = "/!~*'()+:\\"
1855 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
1848 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
1856
1849
1857 def __init__(self, path, parsequery=True, parsefragment=True):
1850 def __init__(self, path, parsequery=True, parsefragment=True):
1858 # We slowly chomp away at path until we have only the path left
1851 # We slowly chomp away at path until we have only the path left
1859 self.scheme = self.user = self.passwd = self.host = None
1852 self.scheme = self.user = self.passwd = self.host = None
1860 self.port = self.path = self.query = self.fragment = None
1853 self.port = self.path = self.query = self.fragment = None
1861 self._localpath = True
1854 self._localpath = True
1862 self._hostport = ''
1855 self._hostport = ''
1863 self._origpath = path
1856 self._origpath = path
1864
1857
1865 if parsefragment and '#' in path:
1858 if parsefragment and '#' in path:
1866 path, self.fragment = path.split('#', 1)
1859 path, self.fragment = path.split('#', 1)
1867 if not path:
1860 if not path:
1868 path = None
1861 path = None
1869
1862
1870 # special case for Windows drive letters and UNC paths
1863 # special case for Windows drive letters and UNC paths
1871 if hasdriveletter(path) or path.startswith(r'\\'):
1864 if hasdriveletter(path) or path.startswith(r'\\'):
1872 self.path = path
1865 self.path = path
1873 return
1866 return
1874
1867
1875 # For compatibility reasons, we can't handle bundle paths as
1868 # For compatibility reasons, we can't handle bundle paths as
1876 # normal URLS
1869 # normal URLS
1877 if path.startswith('bundle:'):
1870 if path.startswith('bundle:'):
1878 self.scheme = 'bundle'
1871 self.scheme = 'bundle'
1879 path = path[7:]
1872 path = path[7:]
1880 if path.startswith('//'):
1873 if path.startswith('//'):
1881 path = path[2:]
1874 path = path[2:]
1882 self.path = path
1875 self.path = path
1883 return
1876 return
1884
1877
1885 if self._matchscheme(path):
1878 if self._matchscheme(path):
1886 parts = path.split(':', 1)
1879 parts = path.split(':', 1)
1887 if parts[0]:
1880 if parts[0]:
1888 self.scheme, path = parts
1881 self.scheme, path = parts
1889 self._localpath = False
1882 self._localpath = False
1890
1883
1891 if not path:
1884 if not path:
1892 path = None
1885 path = None
1893 if self._localpath:
1886 if self._localpath:
1894 self.path = ''
1887 self.path = ''
1895 return
1888 return
1896 else:
1889 else:
1897 if self._localpath:
1890 if self._localpath:
1898 self.path = path
1891 self.path = path
1899 return
1892 return
1900
1893
1901 if parsequery and '?' in path:
1894 if parsequery and '?' in path:
1902 path, self.query = path.split('?', 1)
1895 path, self.query = path.split('?', 1)
1903 if not path:
1896 if not path:
1904 path = None
1897 path = None
1905 if not self.query:
1898 if not self.query:
1906 self.query = None
1899 self.query = None
1907
1900
1908 # // is required to specify a host/authority
1901 # // is required to specify a host/authority
1909 if path and path.startswith('//'):
1902 if path and path.startswith('//'):
1910 parts = path[2:].split('/', 1)
1903 parts = path[2:].split('/', 1)
1911 if len(parts) > 1:
1904 if len(parts) > 1:
1912 self.host, path = parts
1905 self.host, path = parts
1913 else:
1906 else:
1914 self.host = parts[0]
1907 self.host = parts[0]
1915 path = None
1908 path = None
1916 if not self.host:
1909 if not self.host:
1917 self.host = None
1910 self.host = None
1918 # path of file:///d is /d
1911 # path of file:///d is /d
1919 # path of file:///d:/ is d:/, not /d:/
1912 # path of file:///d:/ is d:/, not /d:/
1920 if path and not hasdriveletter(path):
1913 if path and not hasdriveletter(path):
1921 path = '/' + path
1914 path = '/' + path
1922
1915
1923 if self.host and '@' in self.host:
1916 if self.host and '@' in self.host:
1924 self.user, self.host = self.host.rsplit('@', 1)
1917 self.user, self.host = self.host.rsplit('@', 1)
1925 if ':' in self.user:
1918 if ':' in self.user:
1926 self.user, self.passwd = self.user.split(':', 1)
1919 self.user, self.passwd = self.user.split(':', 1)
1927 if not self.host:
1920 if not self.host:
1928 self.host = None
1921 self.host = None
1929
1922
1930 # Don't split on colons in IPv6 addresses without ports
1923 # Don't split on colons in IPv6 addresses without ports
1931 if (self.host and ':' in self.host and
1924 if (self.host and ':' in self.host and
1932 not (self.host.startswith('[') and self.host.endswith(']'))):
1925 not (self.host.startswith('[') and self.host.endswith(']'))):
1933 self._hostport = self.host
1926 self._hostport = self.host
1934 self.host, self.port = self.host.rsplit(':', 1)
1927 self.host, self.port = self.host.rsplit(':', 1)
1935 if not self.host:
1928 if not self.host:
1936 self.host = None
1929 self.host = None
1937
1930
1938 if (self.host and self.scheme == 'file' and
1931 if (self.host and self.scheme == 'file' and
1939 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1932 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1940 raise Abort(_('file:// URLs can only refer to localhost'))
1933 raise Abort(_('file:// URLs can only refer to localhost'))
1941
1934
1942 self.path = path
1935 self.path = path
1943
1936
1944 # leave the query string escaped
1937 # leave the query string escaped
1945 for a in ('user', 'passwd', 'host', 'port',
1938 for a in ('user', 'passwd', 'host', 'port',
1946 'path', 'fragment'):
1939 'path', 'fragment'):
1947 v = getattr(self, a)
1940 v = getattr(self, a)
1948 if v is not None:
1941 if v is not None:
1949 setattr(self, a, _urlunquote(v))
1942 setattr(self, a, _urlunquote(v))
1950
1943
1951 def __repr__(self):
1944 def __repr__(self):
1952 attrs = []
1945 attrs = []
1953 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1946 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1954 'query', 'fragment'):
1947 'query', 'fragment'):
1955 v = getattr(self, a)
1948 v = getattr(self, a)
1956 if v is not None:
1949 if v is not None:
1957 attrs.append('%s: %r' % (a, v))
1950 attrs.append('%s: %r' % (a, v))
1958 return '<url %s>' % ', '.join(attrs)
1951 return '<url %s>' % ', '.join(attrs)
1959
1952
1960 def __str__(self):
1953 def __str__(self):
1961 r"""Join the URL's components back into a URL string.
1954 r"""Join the URL's components back into a URL string.
1962
1955
1963 Examples:
1956 Examples:
1964
1957
1965 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1958 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1966 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1959 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1967 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1960 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1968 'http://user:pw@host:80/?foo=bar&baz=42'
1961 'http://user:pw@host:80/?foo=bar&baz=42'
1969 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1962 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1970 'http://user:pw@host:80/?foo=bar%3dbaz'
1963 'http://user:pw@host:80/?foo=bar%3dbaz'
1971 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1964 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1972 'ssh://user:pw@[::1]:2200//home/joe#'
1965 'ssh://user:pw@[::1]:2200//home/joe#'
1973 >>> str(url('http://localhost:80//'))
1966 >>> str(url('http://localhost:80//'))
1974 'http://localhost:80//'
1967 'http://localhost:80//'
1975 >>> str(url('http://localhost:80/'))
1968 >>> str(url('http://localhost:80/'))
1976 'http://localhost:80/'
1969 'http://localhost:80/'
1977 >>> str(url('http://localhost:80'))
1970 >>> str(url('http://localhost:80'))
1978 'http://localhost:80/'
1971 'http://localhost:80/'
1979 >>> str(url('bundle:foo'))
1972 >>> str(url('bundle:foo'))
1980 'bundle:foo'
1973 'bundle:foo'
1981 >>> str(url('bundle://../foo'))
1974 >>> str(url('bundle://../foo'))
1982 'bundle:../foo'
1975 'bundle:../foo'
1983 >>> str(url('path'))
1976 >>> str(url('path'))
1984 'path'
1977 'path'
1985 >>> str(url('file:///tmp/foo/bar'))
1978 >>> str(url('file:///tmp/foo/bar'))
1986 'file:///tmp/foo/bar'
1979 'file:///tmp/foo/bar'
1987 >>> str(url('file:///c:/tmp/foo/bar'))
1980 >>> str(url('file:///c:/tmp/foo/bar'))
1988 'file:///c:/tmp/foo/bar'
1981 'file:///c:/tmp/foo/bar'
1989 >>> print url(r'bundle:foo\bar')
1982 >>> print url(r'bundle:foo\bar')
1990 bundle:foo\bar
1983 bundle:foo\bar
1991 >>> print url(r'file:///D:\data\hg')
1984 >>> print url(r'file:///D:\data\hg')
1992 file:///D:\data\hg
1985 file:///D:\data\hg
1993 """
1986 """
1994 if self._localpath:
1987 if self._localpath:
1995 s = self.path
1988 s = self.path
1996 if self.scheme == 'bundle':
1989 if self.scheme == 'bundle':
1997 s = 'bundle:' + s
1990 s = 'bundle:' + s
1998 if self.fragment:
1991 if self.fragment:
1999 s += '#' + self.fragment
1992 s += '#' + self.fragment
2000 return s
1993 return s
2001
1994
2002 s = self.scheme + ':'
1995 s = self.scheme + ':'
2003 if self.user or self.passwd or self.host:
1996 if self.user or self.passwd or self.host:
2004 s += '//'
1997 s += '//'
2005 elif self.scheme and (not self.path or self.path.startswith('/')
1998 elif self.scheme and (not self.path or self.path.startswith('/')
2006 or hasdriveletter(self.path)):
1999 or hasdriveletter(self.path)):
2007 s += '//'
2000 s += '//'
2008 if hasdriveletter(self.path):
2001 if hasdriveletter(self.path):
2009 s += '/'
2002 s += '/'
2010 if self.user:
2003 if self.user:
2011 s += urllib.quote(self.user, safe=self._safechars)
2004 s += urllib.quote(self.user, safe=self._safechars)
2012 if self.passwd:
2005 if self.passwd:
2013 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2006 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2014 if self.user or self.passwd:
2007 if self.user or self.passwd:
2015 s += '@'
2008 s += '@'
2016 if self.host:
2009 if self.host:
2017 if not (self.host.startswith('[') and self.host.endswith(']')):
2010 if not (self.host.startswith('[') and self.host.endswith(']')):
2018 s += urllib.quote(self.host)
2011 s += urllib.quote(self.host)
2019 else:
2012 else:
2020 s += self.host
2013 s += self.host
2021 if self.port:
2014 if self.port:
2022 s += ':' + urllib.quote(self.port)
2015 s += ':' + urllib.quote(self.port)
2023 if self.host:
2016 if self.host:
2024 s += '/'
2017 s += '/'
2025 if self.path:
2018 if self.path:
2026 # TODO: similar to the query string, we should not unescape the
2019 # TODO: similar to the query string, we should not unescape the
2027 # path when we store it, the path might contain '%2f' = '/',
2020 # path when we store it, the path might contain '%2f' = '/',
2028 # which we should *not* escape.
2021 # which we should *not* escape.
2029 s += urllib.quote(self.path, safe=self._safepchars)
2022 s += urllib.quote(self.path, safe=self._safepchars)
2030 if self.query:
2023 if self.query:
2031 # we store the query in escaped form.
2024 # we store the query in escaped form.
2032 s += '?' + self.query
2025 s += '?' + self.query
2033 if self.fragment is not None:
2026 if self.fragment is not None:
2034 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2027 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2035 return s
2028 return s
2036
2029
2037 def authinfo(self):
2030 def authinfo(self):
2038 user, passwd = self.user, self.passwd
2031 user, passwd = self.user, self.passwd
2039 try:
2032 try:
2040 self.user, self.passwd = None, None
2033 self.user, self.passwd = None, None
2041 s = str(self)
2034 s = str(self)
2042 finally:
2035 finally:
2043 self.user, self.passwd = user, passwd
2036 self.user, self.passwd = user, passwd
2044 if not self.user:
2037 if not self.user:
2045 return (s, None)
2038 return (s, None)
2046 # authinfo[1] is passed to urllib2 password manager, and its
2039 # authinfo[1] is passed to urllib2 password manager, and its
2047 # URIs must not contain credentials. The host is passed in the
2040 # URIs must not contain credentials. The host is passed in the
2048 # URIs list because Python < 2.4.3 uses only that to search for
2041 # URIs list because Python < 2.4.3 uses only that to search for
2049 # a password.
2042 # a password.
2050 return (s, (None, (s, self.host),
2043 return (s, (None, (s, self.host),
2051 self.user, self.passwd or ''))
2044 self.user, self.passwd or ''))
2052
2045
2053 def isabs(self):
2046 def isabs(self):
2054 if self.scheme and self.scheme != 'file':
2047 if self.scheme and self.scheme != 'file':
2055 return True # remote URL
2048 return True # remote URL
2056 if hasdriveletter(self.path):
2049 if hasdriveletter(self.path):
2057 return True # absolute for our purposes - can't be joined()
2050 return True # absolute for our purposes - can't be joined()
2058 if self.path.startswith(r'\\'):
2051 if self.path.startswith(r'\\'):
2059 return True # Windows UNC path
2052 return True # Windows UNC path
2060 if self.path.startswith('/'):
2053 if self.path.startswith('/'):
2061 return True # POSIX-style
2054 return True # POSIX-style
2062 return False
2055 return False
2063
2056
2064 def localpath(self):
2057 def localpath(self):
2065 if self.scheme == 'file' or self.scheme == 'bundle':
2058 if self.scheme == 'file' or self.scheme == 'bundle':
2066 path = self.path or '/'
2059 path = self.path or '/'
2067 # For Windows, we need to promote hosts containing drive
2060 # For Windows, we need to promote hosts containing drive
2068 # letters to paths with drive letters.
2061 # letters to paths with drive letters.
2069 if hasdriveletter(self._hostport):
2062 if hasdriveletter(self._hostport):
2070 path = self._hostport + '/' + self.path
2063 path = self._hostport + '/' + self.path
2071 elif (self.host is not None and self.path
2064 elif (self.host is not None and self.path
2072 and not hasdriveletter(path)):
2065 and not hasdriveletter(path)):
2073 path = '/' + path
2066 path = '/' + path
2074 return path
2067 return path
2075 return self._origpath
2068 return self._origpath
2076
2069
2077 def islocal(self):
2070 def islocal(self):
2078 '''whether localpath will return something that posixfile can open'''
2071 '''whether localpath will return something that posixfile can open'''
2079 return (not self.scheme or self.scheme == 'file'
2072 return (not self.scheme or self.scheme == 'file'
2080 or self.scheme == 'bundle')
2073 or self.scheme == 'bundle')
2081
2074
2082 def hasscheme(path):
2075 def hasscheme(path):
2083 return bool(url(path).scheme)
2076 return bool(url(path).scheme)
2084
2077
2085 def hasdriveletter(path):
2078 def hasdriveletter(path):
2086 return path and path[1:2] == ':' and path[0:1].isalpha()
2079 return path and path[1:2] == ':' and path[0:1].isalpha()
2087
2080
2088 def urllocalpath(path):
2081 def urllocalpath(path):
2089 return url(path, parsequery=False, parsefragment=False).localpath()
2082 return url(path, parsequery=False, parsefragment=False).localpath()
2090
2083
2091 def hidepassword(u):
2084 def hidepassword(u):
2092 '''hide user credential in a url string'''
2085 '''hide user credential in a url string'''
2093 u = url(u)
2086 u = url(u)
2094 if u.passwd:
2087 if u.passwd:
2095 u.passwd = '***'
2088 u.passwd = '***'
2096 return str(u)
2089 return str(u)
2097
2090
2098 def removeauth(u):
2091 def removeauth(u):
2099 '''remove all authentication information from a url string'''
2092 '''remove all authentication information from a url string'''
2100 u = url(u)
2093 u = url(u)
2101 u.user = u.passwd = None
2094 u.user = u.passwd = None
2102 return str(u)
2095 return str(u)
2103
2096
2104 def isatty(fd):
2097 def isatty(fd):
2105 try:
2098 try:
2106 return fd.isatty()
2099 return fd.isatty()
2107 except AttributeError:
2100 except AttributeError:
2108 return False
2101 return False
2109
2102
2110 timecount = unitcountfn(
2103 timecount = unitcountfn(
2111 (1, 1e3, _('%.0f s')),
2104 (1, 1e3, _('%.0f s')),
2112 (100, 1, _('%.1f s')),
2105 (100, 1, _('%.1f s')),
2113 (10, 1, _('%.2f s')),
2106 (10, 1, _('%.2f s')),
2114 (1, 1, _('%.3f s')),
2107 (1, 1, _('%.3f s')),
2115 (100, 0.001, _('%.1f ms')),
2108 (100, 0.001, _('%.1f ms')),
2116 (10, 0.001, _('%.2f ms')),
2109 (10, 0.001, _('%.2f ms')),
2117 (1, 0.001, _('%.3f ms')),
2110 (1, 0.001, _('%.3f ms')),
2118 (100, 0.000001, _('%.1f us')),
2111 (100, 0.000001, _('%.1f us')),
2119 (10, 0.000001, _('%.2f us')),
2112 (10, 0.000001, _('%.2f us')),
2120 (1, 0.000001, _('%.3f us')),
2113 (1, 0.000001, _('%.3f us')),
2121 (100, 0.000000001, _('%.1f ns')),
2114 (100, 0.000000001, _('%.1f ns')),
2122 (10, 0.000000001, _('%.2f ns')),
2115 (10, 0.000000001, _('%.2f ns')),
2123 (1, 0.000000001, _('%.3f ns')),
2116 (1, 0.000000001, _('%.3f ns')),
2124 )
2117 )
2125
2118
2126 _timenesting = [0]
2119 _timenesting = [0]
2127
2120
2128 def timed(func):
2121 def timed(func):
2129 '''Report the execution time of a function call to stderr.
2122 '''Report the execution time of a function call to stderr.
2130
2123
2131 During development, use as a decorator when you need to measure
2124 During development, use as a decorator when you need to measure
2132 the cost of a function, e.g. as follows:
2125 the cost of a function, e.g. as follows:
2133
2126
2134 @util.timed
2127 @util.timed
2135 def foo(a, b, c):
2128 def foo(a, b, c):
2136 pass
2129 pass
2137 '''
2130 '''
2138
2131
2139 def wrapper(*args, **kwargs):
2132 def wrapper(*args, **kwargs):
2140 start = time.time()
2133 start = time.time()
2141 indent = 2
2134 indent = 2
2142 _timenesting[0] += indent
2135 _timenesting[0] += indent
2143 try:
2136 try:
2144 return func(*args, **kwargs)
2137 return func(*args, **kwargs)
2145 finally:
2138 finally:
2146 elapsed = time.time() - start
2139 elapsed = time.time() - start
2147 _timenesting[0] -= indent
2140 _timenesting[0] -= indent
2148 sys.stderr.write('%s%s: %s\n' %
2141 sys.stderr.write('%s%s: %s\n' %
2149 (' ' * _timenesting[0], func.__name__,
2142 (' ' * _timenesting[0], func.__name__,
2150 timecount(elapsed)))
2143 timecount(elapsed)))
2151 return wrapper
2144 return wrapper
2152
2145
2153 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2146 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2154 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2147 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2155
2148
2156 def sizetoint(s):
2149 def sizetoint(s):
2157 '''Convert a space specifier to a byte count.
2150 '''Convert a space specifier to a byte count.
2158
2151
2159 >>> sizetoint('30')
2152 >>> sizetoint('30')
2160 30
2153 30
2161 >>> sizetoint('2.2kb')
2154 >>> sizetoint('2.2kb')
2162 2252
2155 2252
2163 >>> sizetoint('6M')
2156 >>> sizetoint('6M')
2164 6291456
2157 6291456
2165 '''
2158 '''
2166 t = s.strip().lower()
2159 t = s.strip().lower()
2167 try:
2160 try:
2168 for k, u in _sizeunits:
2161 for k, u in _sizeunits:
2169 if t.endswith(k):
2162 if t.endswith(k):
2170 return int(float(t[:-len(k)]) * u)
2163 return int(float(t[:-len(k)]) * u)
2171 return int(t)
2164 return int(t)
2172 except ValueError:
2165 except ValueError:
2173 raise error.ParseError(_("couldn't parse size: %s") % s)
2166 raise error.ParseError(_("couldn't parse size: %s") % s)
2174
2167
2175 class hooks(object):
2168 class hooks(object):
2176 '''A collection of hook functions that can be used to extend a
2169 '''A collection of hook functions that can be used to extend a
2177 function's behaviour. Hooks are called in lexicographic order,
2170 function's behaviour. Hooks are called in lexicographic order,
2178 based on the names of their sources.'''
2171 based on the names of their sources.'''
2179
2172
2180 def __init__(self):
2173 def __init__(self):
2181 self._hooks = []
2174 self._hooks = []
2182
2175
2183 def add(self, source, hook):
2176 def add(self, source, hook):
2184 self._hooks.append((source, hook))
2177 self._hooks.append((source, hook))
2185
2178
2186 def __call__(self, *args):
2179 def __call__(self, *args):
2187 self._hooks.sort(key=lambda x: x[0])
2180 self._hooks.sort(key=lambda x: x[0])
2188 results = []
2181 results = []
2189 for source, hook in self._hooks:
2182 for source, hook in self._hooks:
2190 results.append(hook(*args))
2183 results.append(hook(*args))
2191 return results
2184 return results
2192
2185
2193 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2186 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2194 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2187 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2195 Skips the 'skip' last entries. By default it will flush stdout first.
2188 Skips the 'skip' last entries. By default it will flush stdout first.
2196 It can be used everywhere and do intentionally not require an ui object.
2189 It can be used everywhere and do intentionally not require an ui object.
2197 Not be used in production code but very convenient while developing.
2190 Not be used in production code but very convenient while developing.
2198 '''
2191 '''
2199 if otherf:
2192 if otherf:
2200 otherf.flush()
2193 otherf.flush()
2201 f.write('%s at:\n' % msg)
2194 f.write('%s at:\n' % msg)
2202 entries = [('%s:%s' % (fn, ln), func)
2195 entries = [('%s:%s' % (fn, ln), func)
2203 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2196 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2204 if entries:
2197 if entries:
2205 fnmax = max(len(entry[0]) for entry in entries)
2198 fnmax = max(len(entry[0]) for entry in entries)
2206 for fnln, func in entries:
2199 for fnln, func in entries:
2207 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2200 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2208 f.flush()
2201 f.flush()
2209
2202
2210 class dirs(object):
2203 class dirs(object):
2211 '''a multiset of directory names from a dirstate or manifest'''
2204 '''a multiset of directory names from a dirstate or manifest'''
2212
2205
2213 def __init__(self, map, skip=None):
2206 def __init__(self, map, skip=None):
2214 self._dirs = {}
2207 self._dirs = {}
2215 addpath = self.addpath
2208 addpath = self.addpath
2216 if safehasattr(map, 'iteritems') and skip is not None:
2209 if safehasattr(map, 'iteritems') and skip is not None:
2217 for f, s in map.iteritems():
2210 for f, s in map.iteritems():
2218 if s[0] != skip:
2211 if s[0] != skip:
2219 addpath(f)
2212 addpath(f)
2220 else:
2213 else:
2221 for f in map:
2214 for f in map:
2222 addpath(f)
2215 addpath(f)
2223
2216
2224 def addpath(self, path):
2217 def addpath(self, path):
2225 dirs = self._dirs
2218 dirs = self._dirs
2226 for base in finddirs(path):
2219 for base in finddirs(path):
2227 if base in dirs:
2220 if base in dirs:
2228 dirs[base] += 1
2221 dirs[base] += 1
2229 return
2222 return
2230 dirs[base] = 1
2223 dirs[base] = 1
2231
2224
2232 def delpath(self, path):
2225 def delpath(self, path):
2233 dirs = self._dirs
2226 dirs = self._dirs
2234 for base in finddirs(path):
2227 for base in finddirs(path):
2235 if dirs[base] > 1:
2228 if dirs[base] > 1:
2236 dirs[base] -= 1
2229 dirs[base] -= 1
2237 return
2230 return
2238 del dirs[base]
2231 del dirs[base]
2239
2232
2240 def __iter__(self):
2233 def __iter__(self):
2241 return self._dirs.iterkeys()
2234 return self._dirs.iterkeys()
2242
2235
2243 def __contains__(self, d):
2236 def __contains__(self, d):
2244 return d in self._dirs
2237 return d in self._dirs
2245
2238
2246 if safehasattr(parsers, 'dirs'):
2239 if safehasattr(parsers, 'dirs'):
2247 dirs = parsers.dirs
2240 dirs = parsers.dirs
2248
2241
2249 def finddirs(path):
2242 def finddirs(path):
2250 pos = path.rfind('/')
2243 pos = path.rfind('/')
2251 while pos != -1:
2244 while pos != -1:
2252 yield path[:pos]
2245 yield path[:pos]
2253 pos = path.rfind('/', 0, pos)
2246 pos = path.rfind('/', 0, pos)
2254
2247
2255 # convenient shortcut
2248 # convenient shortcut
2256 dst = debugstacktrace
2249 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now