##// END OF EJS Templates
util: drop the compatibility with Python 2.4 unpacker...
Pierre-Yves David -
r25209:277a535c default
parent child Browse files
Show More
@@ -1,2261 +1,2256 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 import i18n
16 import i18n
17 _ = i18n._
17 _ = i18n._
18 import error, osutil, encoding, parsers
18 import error, osutil, encoding, parsers
19 import errno, shutil, sys, tempfile, traceback
19 import errno, shutil, sys, tempfile, traceback
20 import re as remod
20 import re as remod
21 import os, time, datetime, calendar, textwrap, signal, collections
21 import os, time, datetime, calendar, textwrap, signal, collections
22 import imp, socket, urllib, struct
22 import imp, socket, urllib, struct
23 import gc
23 import gc
24
24
25 if os.name == 'nt':
25 if os.name == 'nt':
26 import windows as platform
26 import windows as platform
27 else:
27 else:
28 import posix as platform
28 import posix as platform
29
29
30 cachestat = platform.cachestat
30 cachestat = platform.cachestat
31 checkexec = platform.checkexec
31 checkexec = platform.checkexec
32 checklink = platform.checklink
32 checklink = platform.checklink
33 copymode = platform.copymode
33 copymode = platform.copymode
34 executablepath = platform.executablepath
34 executablepath = platform.executablepath
35 expandglobs = platform.expandglobs
35 expandglobs = platform.expandglobs
36 explainexit = platform.explainexit
36 explainexit = platform.explainexit
37 findexe = platform.findexe
37 findexe = platform.findexe
38 gethgcmd = platform.gethgcmd
38 gethgcmd = platform.gethgcmd
39 getuser = platform.getuser
39 getuser = platform.getuser
40 groupmembers = platform.groupmembers
40 groupmembers = platform.groupmembers
41 groupname = platform.groupname
41 groupname = platform.groupname
42 hidewindow = platform.hidewindow
42 hidewindow = platform.hidewindow
43 isexec = platform.isexec
43 isexec = platform.isexec
44 isowner = platform.isowner
44 isowner = platform.isowner
45 localpath = platform.localpath
45 localpath = platform.localpath
46 lookupreg = platform.lookupreg
46 lookupreg = platform.lookupreg
47 makedir = platform.makedir
47 makedir = platform.makedir
48 nlinks = platform.nlinks
48 nlinks = platform.nlinks
49 normpath = platform.normpath
49 normpath = platform.normpath
50 normcase = platform.normcase
50 normcase = platform.normcase
51 normcasespec = platform.normcasespec
51 normcasespec = platform.normcasespec
52 normcasefallback = platform.normcasefallback
52 normcasefallback = platform.normcasefallback
53 openhardlinks = platform.openhardlinks
53 openhardlinks = platform.openhardlinks
54 oslink = platform.oslink
54 oslink = platform.oslink
55 parsepatchoutput = platform.parsepatchoutput
55 parsepatchoutput = platform.parsepatchoutput
56 pconvert = platform.pconvert
56 pconvert = platform.pconvert
57 popen = platform.popen
57 popen = platform.popen
58 posixfile = platform.posixfile
58 posixfile = platform.posixfile
59 quotecommand = platform.quotecommand
59 quotecommand = platform.quotecommand
60 readpipe = platform.readpipe
60 readpipe = platform.readpipe
61 rename = platform.rename
61 rename = platform.rename
62 removedirs = platform.removedirs
62 removedirs = platform.removedirs
63 samedevice = platform.samedevice
63 samedevice = platform.samedevice
64 samefile = platform.samefile
64 samefile = platform.samefile
65 samestat = platform.samestat
65 samestat = platform.samestat
66 setbinary = platform.setbinary
66 setbinary = platform.setbinary
67 setflags = platform.setflags
67 setflags = platform.setflags
68 setsignalhandler = platform.setsignalhandler
68 setsignalhandler = platform.setsignalhandler
69 shellquote = platform.shellquote
69 shellquote = platform.shellquote
70 spawndetached = platform.spawndetached
70 spawndetached = platform.spawndetached
71 split = platform.split
71 split = platform.split
72 sshargs = platform.sshargs
72 sshargs = platform.sshargs
73 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
73 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
74 statisexec = platform.statisexec
74 statisexec = platform.statisexec
75 statislink = platform.statislink
75 statislink = platform.statislink
76 termwidth = platform.termwidth
76 termwidth = platform.termwidth
77 testpid = platform.testpid
77 testpid = platform.testpid
78 umask = platform.umask
78 umask = platform.umask
79 unlink = platform.unlink
79 unlink = platform.unlink
80 unlinkpath = platform.unlinkpath
80 unlinkpath = platform.unlinkpath
81 username = platform.username
81 username = platform.username
82
82
83 # Python compatibility
83 # Python compatibility
84
84
85 _notset = object()
85 _notset = object()
86
86
87 def safehasattr(thing, attr):
87 def safehasattr(thing, attr):
88 return getattr(thing, attr, _notset) is not _notset
88 return getattr(thing, attr, _notset) is not _notset
89
89
90 def sha1(s=''):
90 def sha1(s=''):
91 '''
91 '''
92 Low-overhead wrapper around Python's SHA support
92 Low-overhead wrapper around Python's SHA support
93
93
94 >>> f = _fastsha1
94 >>> f = _fastsha1
95 >>> a = sha1()
95 >>> a = sha1()
96 >>> a = f()
96 >>> a = f()
97 >>> a.hexdigest()
97 >>> a.hexdigest()
98 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
98 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
99 '''
99 '''
100
100
101 return _fastsha1(s)
101 return _fastsha1(s)
102
102
103 def _fastsha1(s=''):
103 def _fastsha1(s=''):
104 # This function will import sha1 from hashlib or sha (whichever is
104 # This function will import sha1 from hashlib or sha (whichever is
105 # available) and overwrite itself with it on the first call.
105 # available) and overwrite itself with it on the first call.
106 # Subsequent calls will go directly to the imported function.
106 # Subsequent calls will go directly to the imported function.
107 if sys.version_info >= (2, 5):
107 if sys.version_info >= (2, 5):
108 from hashlib import sha1 as _sha1
108 from hashlib import sha1 as _sha1
109 else:
109 else:
110 from sha import sha as _sha1
110 from sha import sha as _sha1
111 global _fastsha1, sha1
111 global _fastsha1, sha1
112 _fastsha1 = sha1 = _sha1
112 _fastsha1 = sha1 = _sha1
113 return _sha1(s)
113 return _sha1(s)
114
114
115 def md5(s=''):
115 def md5(s=''):
116 try:
116 try:
117 from hashlib import md5 as _md5
117 from hashlib import md5 as _md5
118 except ImportError:
118 except ImportError:
119 from md5 import md5 as _md5
119 from md5 import md5 as _md5
120 global md5
120 global md5
121 md5 = _md5
121 md5 = _md5
122 return _md5(s)
122 return _md5(s)
123
123
124 DIGESTS = {
124 DIGESTS = {
125 'md5': md5,
125 'md5': md5,
126 'sha1': sha1,
126 'sha1': sha1,
127 }
127 }
128 # List of digest types from strongest to weakest
128 # List of digest types from strongest to weakest
129 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
129 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
130
130
131 try:
131 try:
132 import hashlib
132 import hashlib
133 DIGESTS.update({
133 DIGESTS.update({
134 'sha512': hashlib.sha512,
134 'sha512': hashlib.sha512,
135 })
135 })
136 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
136 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
137 except ImportError:
137 except ImportError:
138 pass
138 pass
139
139
140 for k in DIGESTS_BY_STRENGTH:
140 for k in DIGESTS_BY_STRENGTH:
141 assert k in DIGESTS
141 assert k in DIGESTS
142
142
143 class digester(object):
143 class digester(object):
144 """helper to compute digests.
144 """helper to compute digests.
145
145
146 This helper can be used to compute one or more digests given their name.
146 This helper can be used to compute one or more digests given their name.
147
147
148 >>> d = digester(['md5', 'sha1'])
148 >>> d = digester(['md5', 'sha1'])
149 >>> d.update('foo')
149 >>> d.update('foo')
150 >>> [k for k in sorted(d)]
150 >>> [k for k in sorted(d)]
151 ['md5', 'sha1']
151 ['md5', 'sha1']
152 >>> d['md5']
152 >>> d['md5']
153 'acbd18db4cc2f85cedef654fccc4a4d8'
153 'acbd18db4cc2f85cedef654fccc4a4d8'
154 >>> d['sha1']
154 >>> d['sha1']
155 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
155 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
156 >>> digester.preferred(['md5', 'sha1'])
156 >>> digester.preferred(['md5', 'sha1'])
157 'sha1'
157 'sha1'
158 """
158 """
159
159
160 def __init__(self, digests, s=''):
160 def __init__(self, digests, s=''):
161 self._hashes = {}
161 self._hashes = {}
162 for k in digests:
162 for k in digests:
163 if k not in DIGESTS:
163 if k not in DIGESTS:
164 raise Abort(_('unknown digest type: %s') % k)
164 raise Abort(_('unknown digest type: %s') % k)
165 self._hashes[k] = DIGESTS[k]()
165 self._hashes[k] = DIGESTS[k]()
166 if s:
166 if s:
167 self.update(s)
167 self.update(s)
168
168
169 def update(self, data):
169 def update(self, data):
170 for h in self._hashes.values():
170 for h in self._hashes.values():
171 h.update(data)
171 h.update(data)
172
172
173 def __getitem__(self, key):
173 def __getitem__(self, key):
174 if key not in DIGESTS:
174 if key not in DIGESTS:
175 raise Abort(_('unknown digest type: %s') % k)
175 raise Abort(_('unknown digest type: %s') % k)
176 return self._hashes[key].hexdigest()
176 return self._hashes[key].hexdigest()
177
177
178 def __iter__(self):
178 def __iter__(self):
179 return iter(self._hashes)
179 return iter(self._hashes)
180
180
181 @staticmethod
181 @staticmethod
182 def preferred(supported):
182 def preferred(supported):
183 """returns the strongest digest type in both supported and DIGESTS."""
183 """returns the strongest digest type in both supported and DIGESTS."""
184
184
185 for k in DIGESTS_BY_STRENGTH:
185 for k in DIGESTS_BY_STRENGTH:
186 if k in supported:
186 if k in supported:
187 return k
187 return k
188 return None
188 return None
189
189
190 class digestchecker(object):
190 class digestchecker(object):
191 """file handle wrapper that additionally checks content against a given
191 """file handle wrapper that additionally checks content against a given
192 size and digests.
192 size and digests.
193
193
194 d = digestchecker(fh, size, {'md5': '...'})
194 d = digestchecker(fh, size, {'md5': '...'})
195
195
196 When multiple digests are given, all of them are validated.
196 When multiple digests are given, all of them are validated.
197 """
197 """
198
198
199 def __init__(self, fh, size, digests):
199 def __init__(self, fh, size, digests):
200 self._fh = fh
200 self._fh = fh
201 self._size = size
201 self._size = size
202 self._got = 0
202 self._got = 0
203 self._digests = dict(digests)
203 self._digests = dict(digests)
204 self._digester = digester(self._digests.keys())
204 self._digester = digester(self._digests.keys())
205
205
206 def read(self, length=-1):
206 def read(self, length=-1):
207 content = self._fh.read(length)
207 content = self._fh.read(length)
208 self._digester.update(content)
208 self._digester.update(content)
209 self._got += len(content)
209 self._got += len(content)
210 return content
210 return content
211
211
212 def validate(self):
212 def validate(self):
213 if self._size != self._got:
213 if self._size != self._got:
214 raise Abort(_('size mismatch: expected %d, got %d') %
214 raise Abort(_('size mismatch: expected %d, got %d') %
215 (self._size, self._got))
215 (self._size, self._got))
216 for k, v in self._digests.items():
216 for k, v in self._digests.items():
217 if v != self._digester[k]:
217 if v != self._digester[k]:
218 # i18n: first parameter is a digest name
218 # i18n: first parameter is a digest name
219 raise Abort(_('%s mismatch: expected %s, got %s') %
219 raise Abort(_('%s mismatch: expected %s, got %s') %
220 (k, v, self._digester[k]))
220 (k, v, self._digester[k]))
221
221
222 try:
222 try:
223 buffer = buffer
223 buffer = buffer
224 except NameError:
224 except NameError:
225 if sys.version_info[0] < 3:
225 if sys.version_info[0] < 3:
226 def buffer(sliceable, offset=0):
226 def buffer(sliceable, offset=0):
227 return sliceable[offset:]
227 return sliceable[offset:]
228 else:
228 else:
229 def buffer(sliceable, offset=0):
229 def buffer(sliceable, offset=0):
230 return memoryview(sliceable)[offset:]
230 return memoryview(sliceable)[offset:]
231
231
232 import subprocess
232 import subprocess
233 closefds = os.name == 'posix'
233 closefds = os.name == 'posix'
234
234
235 def unpacker(fmt):
235 def unpacker(fmt):
236 """create a struct unpacker for the specified format"""
236 """create a struct unpacker for the specified format"""
237 try:
237 return struct.Struct(fmt).unpack
238 # 2.5+
239 return struct.Struct(fmt).unpack
240 except AttributeError:
241 # 2.4
242 return lambda buf: struct.unpack(fmt, buf)
243
238
244 def popen2(cmd, env=None, newlines=False):
239 def popen2(cmd, env=None, newlines=False):
245 # Setting bufsize to -1 lets the system decide the buffer size.
240 # Setting bufsize to -1 lets the system decide the buffer size.
246 # The default for bufsize is 0, meaning unbuffered. This leads to
241 # The default for bufsize is 0, meaning unbuffered. This leads to
247 # poor performance on Mac OS X: http://bugs.python.org/issue4194
242 # poor performance on Mac OS X: http://bugs.python.org/issue4194
248 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
243 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
249 close_fds=closefds,
244 close_fds=closefds,
250 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
245 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
251 universal_newlines=newlines,
246 universal_newlines=newlines,
252 env=env)
247 env=env)
253 return p.stdin, p.stdout
248 return p.stdin, p.stdout
254
249
255 def popen3(cmd, env=None, newlines=False):
250 def popen3(cmd, env=None, newlines=False):
256 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
251 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
257 return stdin, stdout, stderr
252 return stdin, stdout, stderr
258
253
259 def popen4(cmd, env=None, newlines=False):
254 def popen4(cmd, env=None, newlines=False):
260 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
255 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
261 close_fds=closefds,
256 close_fds=closefds,
262 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
257 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
263 stderr=subprocess.PIPE,
258 stderr=subprocess.PIPE,
264 universal_newlines=newlines,
259 universal_newlines=newlines,
265 env=env)
260 env=env)
266 return p.stdin, p.stdout, p.stderr, p
261 return p.stdin, p.stdout, p.stderr, p
267
262
268 def version():
263 def version():
269 """Return version information if available."""
264 """Return version information if available."""
270 try:
265 try:
271 import __version__
266 import __version__
272 return __version__.version
267 return __version__.version
273 except ImportError:
268 except ImportError:
274 return 'unknown'
269 return 'unknown'
275
270
276 # used by parsedate
271 # used by parsedate
277 defaultdateformats = (
272 defaultdateformats = (
278 '%Y-%m-%d %H:%M:%S',
273 '%Y-%m-%d %H:%M:%S',
279 '%Y-%m-%d %I:%M:%S%p',
274 '%Y-%m-%d %I:%M:%S%p',
280 '%Y-%m-%d %H:%M',
275 '%Y-%m-%d %H:%M',
281 '%Y-%m-%d %I:%M%p',
276 '%Y-%m-%d %I:%M%p',
282 '%Y-%m-%d',
277 '%Y-%m-%d',
283 '%m-%d',
278 '%m-%d',
284 '%m/%d',
279 '%m/%d',
285 '%m/%d/%y',
280 '%m/%d/%y',
286 '%m/%d/%Y',
281 '%m/%d/%Y',
287 '%a %b %d %H:%M:%S %Y',
282 '%a %b %d %H:%M:%S %Y',
288 '%a %b %d %I:%M:%S%p %Y',
283 '%a %b %d %I:%M:%S%p %Y',
289 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
284 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
290 '%b %d %H:%M:%S %Y',
285 '%b %d %H:%M:%S %Y',
291 '%b %d %I:%M:%S%p %Y',
286 '%b %d %I:%M:%S%p %Y',
292 '%b %d %H:%M:%S',
287 '%b %d %H:%M:%S',
293 '%b %d %I:%M:%S%p',
288 '%b %d %I:%M:%S%p',
294 '%b %d %H:%M',
289 '%b %d %H:%M',
295 '%b %d %I:%M%p',
290 '%b %d %I:%M%p',
296 '%b %d %Y',
291 '%b %d %Y',
297 '%b %d',
292 '%b %d',
298 '%H:%M:%S',
293 '%H:%M:%S',
299 '%I:%M:%S%p',
294 '%I:%M:%S%p',
300 '%H:%M',
295 '%H:%M',
301 '%I:%M%p',
296 '%I:%M%p',
302 )
297 )
303
298
304 extendeddateformats = defaultdateformats + (
299 extendeddateformats = defaultdateformats + (
305 "%Y",
300 "%Y",
306 "%Y-%m",
301 "%Y-%m",
307 "%b",
302 "%b",
308 "%b %Y",
303 "%b %Y",
309 )
304 )
310
305
311 def cachefunc(func):
306 def cachefunc(func):
312 '''cache the result of function calls'''
307 '''cache the result of function calls'''
313 # XXX doesn't handle keywords args
308 # XXX doesn't handle keywords args
314 if func.func_code.co_argcount == 0:
309 if func.func_code.co_argcount == 0:
315 cache = []
310 cache = []
316 def f():
311 def f():
317 if len(cache) == 0:
312 if len(cache) == 0:
318 cache.append(func())
313 cache.append(func())
319 return cache[0]
314 return cache[0]
320 return f
315 return f
321 cache = {}
316 cache = {}
322 if func.func_code.co_argcount == 1:
317 if func.func_code.co_argcount == 1:
323 # we gain a small amount of time because
318 # we gain a small amount of time because
324 # we don't need to pack/unpack the list
319 # we don't need to pack/unpack the list
325 def f(arg):
320 def f(arg):
326 if arg not in cache:
321 if arg not in cache:
327 cache[arg] = func(arg)
322 cache[arg] = func(arg)
328 return cache[arg]
323 return cache[arg]
329 else:
324 else:
330 def f(*args):
325 def f(*args):
331 if args not in cache:
326 if args not in cache:
332 cache[args] = func(*args)
327 cache[args] = func(*args)
333 return cache[args]
328 return cache[args]
334
329
335 return f
330 return f
336
331
337 class sortdict(dict):
332 class sortdict(dict):
338 '''a simple sorted dictionary'''
333 '''a simple sorted dictionary'''
339 def __init__(self, data=None):
334 def __init__(self, data=None):
340 self._list = []
335 self._list = []
341 if data:
336 if data:
342 self.update(data)
337 self.update(data)
343 def copy(self):
338 def copy(self):
344 return sortdict(self)
339 return sortdict(self)
345 def __setitem__(self, key, val):
340 def __setitem__(self, key, val):
346 if key in self:
341 if key in self:
347 self._list.remove(key)
342 self._list.remove(key)
348 self._list.append(key)
343 self._list.append(key)
349 dict.__setitem__(self, key, val)
344 dict.__setitem__(self, key, val)
350 def __iter__(self):
345 def __iter__(self):
351 return self._list.__iter__()
346 return self._list.__iter__()
352 def update(self, src):
347 def update(self, src):
353 if isinstance(src, dict):
348 if isinstance(src, dict):
354 src = src.iteritems()
349 src = src.iteritems()
355 for k, v in src:
350 for k, v in src:
356 self[k] = v
351 self[k] = v
357 def clear(self):
352 def clear(self):
358 dict.clear(self)
353 dict.clear(self)
359 self._list = []
354 self._list = []
360 def items(self):
355 def items(self):
361 return [(k, self[k]) for k in self._list]
356 return [(k, self[k]) for k in self._list]
362 def __delitem__(self, key):
357 def __delitem__(self, key):
363 dict.__delitem__(self, key)
358 dict.__delitem__(self, key)
364 self._list.remove(key)
359 self._list.remove(key)
365 def pop(self, key, *args, **kwargs):
360 def pop(self, key, *args, **kwargs):
366 dict.pop(self, key, *args, **kwargs)
361 dict.pop(self, key, *args, **kwargs)
367 try:
362 try:
368 self._list.remove(key)
363 self._list.remove(key)
369 except ValueError:
364 except ValueError:
370 pass
365 pass
371 def keys(self):
366 def keys(self):
372 return self._list
367 return self._list
373 def iterkeys(self):
368 def iterkeys(self):
374 return self._list.__iter__()
369 return self._list.__iter__()
375 def iteritems(self):
370 def iteritems(self):
376 for k in self._list:
371 for k in self._list:
377 yield k, self[k]
372 yield k, self[k]
378 def insert(self, index, key, val):
373 def insert(self, index, key, val):
379 self._list.insert(index, key)
374 self._list.insert(index, key)
380 dict.__setitem__(self, key, val)
375 dict.__setitem__(self, key, val)
381
376
382 class lrucachedict(object):
377 class lrucachedict(object):
383 '''cache most recent gets from or sets to this dictionary'''
378 '''cache most recent gets from or sets to this dictionary'''
384 def __init__(self, maxsize):
379 def __init__(self, maxsize):
385 self._cache = {}
380 self._cache = {}
386 self._maxsize = maxsize
381 self._maxsize = maxsize
387 self._order = collections.deque()
382 self._order = collections.deque()
388
383
389 def __getitem__(self, key):
384 def __getitem__(self, key):
390 value = self._cache[key]
385 value = self._cache[key]
391 self._order.remove(key)
386 self._order.remove(key)
392 self._order.append(key)
387 self._order.append(key)
393 return value
388 return value
394
389
395 def __setitem__(self, key, value):
390 def __setitem__(self, key, value):
396 if key not in self._cache:
391 if key not in self._cache:
397 if len(self._cache) >= self._maxsize:
392 if len(self._cache) >= self._maxsize:
398 del self._cache[self._order.popleft()]
393 del self._cache[self._order.popleft()]
399 else:
394 else:
400 self._order.remove(key)
395 self._order.remove(key)
401 self._cache[key] = value
396 self._cache[key] = value
402 self._order.append(key)
397 self._order.append(key)
403
398
404 def __contains__(self, key):
399 def __contains__(self, key):
405 return key in self._cache
400 return key in self._cache
406
401
407 def clear(self):
402 def clear(self):
408 self._cache.clear()
403 self._cache.clear()
409 self._order = collections.deque()
404 self._order = collections.deque()
410
405
411 def lrucachefunc(func):
406 def lrucachefunc(func):
412 '''cache most recent results of function calls'''
407 '''cache most recent results of function calls'''
413 cache = {}
408 cache = {}
414 order = collections.deque()
409 order = collections.deque()
415 if func.func_code.co_argcount == 1:
410 if func.func_code.co_argcount == 1:
416 def f(arg):
411 def f(arg):
417 if arg not in cache:
412 if arg not in cache:
418 if len(cache) > 20:
413 if len(cache) > 20:
419 del cache[order.popleft()]
414 del cache[order.popleft()]
420 cache[arg] = func(arg)
415 cache[arg] = func(arg)
421 else:
416 else:
422 order.remove(arg)
417 order.remove(arg)
423 order.append(arg)
418 order.append(arg)
424 return cache[arg]
419 return cache[arg]
425 else:
420 else:
426 def f(*args):
421 def f(*args):
427 if args not in cache:
422 if args not in cache:
428 if len(cache) > 20:
423 if len(cache) > 20:
429 del cache[order.popleft()]
424 del cache[order.popleft()]
430 cache[args] = func(*args)
425 cache[args] = func(*args)
431 else:
426 else:
432 order.remove(args)
427 order.remove(args)
433 order.append(args)
428 order.append(args)
434 return cache[args]
429 return cache[args]
435
430
436 return f
431 return f
437
432
438 class propertycache(object):
433 class propertycache(object):
439 def __init__(self, func):
434 def __init__(self, func):
440 self.func = func
435 self.func = func
441 self.name = func.__name__
436 self.name = func.__name__
442 def __get__(self, obj, type=None):
437 def __get__(self, obj, type=None):
443 result = self.func(obj)
438 result = self.func(obj)
444 self.cachevalue(obj, result)
439 self.cachevalue(obj, result)
445 return result
440 return result
446
441
447 def cachevalue(self, obj, value):
442 def cachevalue(self, obj, value):
448 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
443 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
449 obj.__dict__[self.name] = value
444 obj.__dict__[self.name] = value
450
445
451 def pipefilter(s, cmd):
446 def pipefilter(s, cmd):
452 '''filter string S through command CMD, returning its output'''
447 '''filter string S through command CMD, returning its output'''
453 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
448 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
454 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
449 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
455 pout, perr = p.communicate(s)
450 pout, perr = p.communicate(s)
456 return pout
451 return pout
457
452
458 def tempfilter(s, cmd):
453 def tempfilter(s, cmd):
459 '''filter string S through a pair of temporary files with CMD.
454 '''filter string S through a pair of temporary files with CMD.
460 CMD is used as a template to create the real command to be run,
455 CMD is used as a template to create the real command to be run,
461 with the strings INFILE and OUTFILE replaced by the real names of
456 with the strings INFILE and OUTFILE replaced by the real names of
462 the temporary files generated.'''
457 the temporary files generated.'''
463 inname, outname = None, None
458 inname, outname = None, None
464 try:
459 try:
465 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
460 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
466 fp = os.fdopen(infd, 'wb')
461 fp = os.fdopen(infd, 'wb')
467 fp.write(s)
462 fp.write(s)
468 fp.close()
463 fp.close()
469 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
464 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
470 os.close(outfd)
465 os.close(outfd)
471 cmd = cmd.replace('INFILE', inname)
466 cmd = cmd.replace('INFILE', inname)
472 cmd = cmd.replace('OUTFILE', outname)
467 cmd = cmd.replace('OUTFILE', outname)
473 code = os.system(cmd)
468 code = os.system(cmd)
474 if sys.platform == 'OpenVMS' and code & 1:
469 if sys.platform == 'OpenVMS' and code & 1:
475 code = 0
470 code = 0
476 if code:
471 if code:
477 raise Abort(_("command '%s' failed: %s") %
472 raise Abort(_("command '%s' failed: %s") %
478 (cmd, explainexit(code)))
473 (cmd, explainexit(code)))
479 fp = open(outname, 'rb')
474 fp = open(outname, 'rb')
480 r = fp.read()
475 r = fp.read()
481 fp.close()
476 fp.close()
482 return r
477 return r
483 finally:
478 finally:
484 try:
479 try:
485 if inname:
480 if inname:
486 os.unlink(inname)
481 os.unlink(inname)
487 except OSError:
482 except OSError:
488 pass
483 pass
489 try:
484 try:
490 if outname:
485 if outname:
491 os.unlink(outname)
486 os.unlink(outname)
492 except OSError:
487 except OSError:
493 pass
488 pass
494
489
495 filtertable = {
490 filtertable = {
496 'tempfile:': tempfilter,
491 'tempfile:': tempfilter,
497 'pipe:': pipefilter,
492 'pipe:': pipefilter,
498 }
493 }
499
494
500 def filter(s, cmd):
495 def filter(s, cmd):
501 "filter a string through a command that transforms its input to its output"
496 "filter a string through a command that transforms its input to its output"
502 for name, fn in filtertable.iteritems():
497 for name, fn in filtertable.iteritems():
503 if cmd.startswith(name):
498 if cmd.startswith(name):
504 return fn(s, cmd[len(name):].lstrip())
499 return fn(s, cmd[len(name):].lstrip())
505 return pipefilter(s, cmd)
500 return pipefilter(s, cmd)
506
501
507 def binary(s):
502 def binary(s):
508 """return true if a string is binary data"""
503 """return true if a string is binary data"""
509 return bool(s and '\0' in s)
504 return bool(s and '\0' in s)
510
505
511 def increasingchunks(source, min=1024, max=65536):
506 def increasingchunks(source, min=1024, max=65536):
512 '''return no less than min bytes per chunk while data remains,
507 '''return no less than min bytes per chunk while data remains,
513 doubling min after each chunk until it reaches max'''
508 doubling min after each chunk until it reaches max'''
514 def log2(x):
509 def log2(x):
515 if not x:
510 if not x:
516 return 0
511 return 0
517 i = 0
512 i = 0
518 while x:
513 while x:
519 x >>= 1
514 x >>= 1
520 i += 1
515 i += 1
521 return i - 1
516 return i - 1
522
517
523 buf = []
518 buf = []
524 blen = 0
519 blen = 0
525 for chunk in source:
520 for chunk in source:
526 buf.append(chunk)
521 buf.append(chunk)
527 blen += len(chunk)
522 blen += len(chunk)
528 if blen >= min:
523 if blen >= min:
529 if min < max:
524 if min < max:
530 min = min << 1
525 min = min << 1
531 nmin = 1 << log2(blen)
526 nmin = 1 << log2(blen)
532 if nmin > min:
527 if nmin > min:
533 min = nmin
528 min = nmin
534 if min > max:
529 if min > max:
535 min = max
530 min = max
536 yield ''.join(buf)
531 yield ''.join(buf)
537 blen = 0
532 blen = 0
538 buf = []
533 buf = []
539 if buf:
534 if buf:
540 yield ''.join(buf)
535 yield ''.join(buf)
541
536
542 Abort = error.Abort
537 Abort = error.Abort
543
538
544 def always(fn):
539 def always(fn):
545 return True
540 return True
546
541
547 def never(fn):
542 def never(fn):
548 return False
543 return False
549
544
550 def nogc(func):
545 def nogc(func):
551 """disable garbage collector
546 """disable garbage collector
552
547
553 Python's garbage collector triggers a GC each time a certain number of
548 Python's garbage collector triggers a GC each time a certain number of
554 container objects (the number being defined by gc.get_threshold()) are
549 container objects (the number being defined by gc.get_threshold()) are
555 allocated even when marked not to be tracked by the collector. Tracking has
550 allocated even when marked not to be tracked by the collector. Tracking has
556 no effect on when GCs are triggered, only on what objects the GC looks
551 no effect on when GCs are triggered, only on what objects the GC looks
557 into. As a workaround, disable GC while building complex (huge)
552 into. As a workaround, disable GC while building complex (huge)
558 containers.
553 containers.
559
554
560 This garbage collector issue have been fixed in 2.7.
555 This garbage collector issue have been fixed in 2.7.
561 """
556 """
562 def wrapper(*args, **kwargs):
557 def wrapper(*args, **kwargs):
563 gcenabled = gc.isenabled()
558 gcenabled = gc.isenabled()
564 gc.disable()
559 gc.disable()
565 try:
560 try:
566 return func(*args, **kwargs)
561 return func(*args, **kwargs)
567 finally:
562 finally:
568 if gcenabled:
563 if gcenabled:
569 gc.enable()
564 gc.enable()
570 return wrapper
565 return wrapper
571
566
572 def pathto(root, n1, n2):
567 def pathto(root, n1, n2):
573 '''return the relative path from one place to another.
568 '''return the relative path from one place to another.
574 root should use os.sep to separate directories
569 root should use os.sep to separate directories
575 n1 should use os.sep to separate directories
570 n1 should use os.sep to separate directories
576 n2 should use "/" to separate directories
571 n2 should use "/" to separate directories
577 returns an os.sep-separated path.
572 returns an os.sep-separated path.
578
573
579 If n1 is a relative path, it's assumed it's
574 If n1 is a relative path, it's assumed it's
580 relative to root.
575 relative to root.
581 n2 should always be relative to root.
576 n2 should always be relative to root.
582 '''
577 '''
583 if not n1:
578 if not n1:
584 return localpath(n2)
579 return localpath(n2)
585 if os.path.isabs(n1):
580 if os.path.isabs(n1):
586 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
581 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
587 return os.path.join(root, localpath(n2))
582 return os.path.join(root, localpath(n2))
588 n2 = '/'.join((pconvert(root), n2))
583 n2 = '/'.join((pconvert(root), n2))
589 a, b = splitpath(n1), n2.split('/')
584 a, b = splitpath(n1), n2.split('/')
590 a.reverse()
585 a.reverse()
591 b.reverse()
586 b.reverse()
592 while a and b and a[-1] == b[-1]:
587 while a and b and a[-1] == b[-1]:
593 a.pop()
588 a.pop()
594 b.pop()
589 b.pop()
595 b.reverse()
590 b.reverse()
596 return os.sep.join((['..'] * len(a)) + b) or '.'
591 return os.sep.join((['..'] * len(a)) + b) or '.'
597
592
598 def mainfrozen():
593 def mainfrozen():
599 """return True if we are a frozen executable.
594 """return True if we are a frozen executable.
600
595
601 The code supports py2exe (most common, Windows only) and tools/freeze
596 The code supports py2exe (most common, Windows only) and tools/freeze
602 (portable, not much used).
597 (portable, not much used).
603 """
598 """
604 return (safehasattr(sys, "frozen") or # new py2exe
599 return (safehasattr(sys, "frozen") or # new py2exe
605 safehasattr(sys, "importers") or # old py2exe
600 safehasattr(sys, "importers") or # old py2exe
606 imp.is_frozen("__main__")) # tools/freeze
601 imp.is_frozen("__main__")) # tools/freeze
607
602
608 # the location of data files matching the source code
603 # the location of data files matching the source code
609 if mainfrozen():
604 if mainfrozen():
610 # executable version (py2exe) doesn't support __file__
605 # executable version (py2exe) doesn't support __file__
611 datapath = os.path.dirname(sys.executable)
606 datapath = os.path.dirname(sys.executable)
612 else:
607 else:
613 datapath = os.path.dirname(__file__)
608 datapath = os.path.dirname(__file__)
614
609
615 i18n.setdatapath(datapath)
610 i18n.setdatapath(datapath)
616
611
617 _hgexecutable = None
612 _hgexecutable = None
618
613
619 def hgexecutable():
614 def hgexecutable():
620 """return location of the 'hg' executable.
615 """return location of the 'hg' executable.
621
616
622 Defaults to $HG or 'hg' in the search path.
617 Defaults to $HG or 'hg' in the search path.
623 """
618 """
624 if _hgexecutable is None:
619 if _hgexecutable is None:
625 hg = os.environ.get('HG')
620 hg = os.environ.get('HG')
626 mainmod = sys.modules['__main__']
621 mainmod = sys.modules['__main__']
627 if hg:
622 if hg:
628 _sethgexecutable(hg)
623 _sethgexecutable(hg)
629 elif mainfrozen():
624 elif mainfrozen():
630 _sethgexecutable(sys.executable)
625 _sethgexecutable(sys.executable)
631 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
626 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
632 _sethgexecutable(mainmod.__file__)
627 _sethgexecutable(mainmod.__file__)
633 else:
628 else:
634 exe = findexe('hg') or os.path.basename(sys.argv[0])
629 exe = findexe('hg') or os.path.basename(sys.argv[0])
635 _sethgexecutable(exe)
630 _sethgexecutable(exe)
636 return _hgexecutable
631 return _hgexecutable
637
632
638 def _sethgexecutable(path):
633 def _sethgexecutable(path):
639 """set location of the 'hg' executable"""
634 """set location of the 'hg' executable"""
640 global _hgexecutable
635 global _hgexecutable
641 _hgexecutable = path
636 _hgexecutable = path
642
637
643 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
638 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
644 '''enhanced shell command execution.
639 '''enhanced shell command execution.
645 run with environment maybe modified, maybe in different dir.
640 run with environment maybe modified, maybe in different dir.
646
641
647 if command fails and onerr is None, return status, else raise onerr
642 if command fails and onerr is None, return status, else raise onerr
648 object as exception.
643 object as exception.
649
644
650 if out is specified, it is assumed to be a file-like object that has a
645 if out is specified, it is assumed to be a file-like object that has a
651 write() method. stdout and stderr will be redirected to out.'''
646 write() method. stdout and stderr will be redirected to out.'''
652 try:
647 try:
653 sys.stdout.flush()
648 sys.stdout.flush()
654 except Exception:
649 except Exception:
655 pass
650 pass
656 def py2shell(val):
651 def py2shell(val):
657 'convert python object into string that is useful to shell'
652 'convert python object into string that is useful to shell'
658 if val is None or val is False:
653 if val is None or val is False:
659 return '0'
654 return '0'
660 if val is True:
655 if val is True:
661 return '1'
656 return '1'
662 return str(val)
657 return str(val)
663 origcmd = cmd
658 origcmd = cmd
664 cmd = quotecommand(cmd)
659 cmd = quotecommand(cmd)
665 if sys.platform == 'plan9' and (sys.version_info[0] == 2
660 if sys.platform == 'plan9' and (sys.version_info[0] == 2
666 and sys.version_info[1] < 7):
661 and sys.version_info[1] < 7):
667 # subprocess kludge to work around issues in half-baked Python
662 # subprocess kludge to work around issues in half-baked Python
668 # ports, notably bichued/python:
663 # ports, notably bichued/python:
669 if not cwd is None:
664 if not cwd is None:
670 os.chdir(cwd)
665 os.chdir(cwd)
671 rc = os.system(cmd)
666 rc = os.system(cmd)
672 else:
667 else:
673 env = dict(os.environ)
668 env = dict(os.environ)
674 env.update((k, py2shell(v)) for k, v in environ.iteritems())
669 env.update((k, py2shell(v)) for k, v in environ.iteritems())
675 env['HG'] = hgexecutable()
670 env['HG'] = hgexecutable()
676 if out is None or out == sys.__stdout__:
671 if out is None or out == sys.__stdout__:
677 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
672 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
678 env=env, cwd=cwd)
673 env=env, cwd=cwd)
679 else:
674 else:
680 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
675 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
681 env=env, cwd=cwd, stdout=subprocess.PIPE,
676 env=env, cwd=cwd, stdout=subprocess.PIPE,
682 stderr=subprocess.STDOUT)
677 stderr=subprocess.STDOUT)
683 while True:
678 while True:
684 line = proc.stdout.readline()
679 line = proc.stdout.readline()
685 if not line:
680 if not line:
686 break
681 break
687 out.write(line)
682 out.write(line)
688 proc.wait()
683 proc.wait()
689 rc = proc.returncode
684 rc = proc.returncode
690 if sys.platform == 'OpenVMS' and rc & 1:
685 if sys.platform == 'OpenVMS' and rc & 1:
691 rc = 0
686 rc = 0
692 if rc and onerr:
687 if rc and onerr:
693 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
688 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
694 explainexit(rc)[0])
689 explainexit(rc)[0])
695 if errprefix:
690 if errprefix:
696 errmsg = '%s: %s' % (errprefix, errmsg)
691 errmsg = '%s: %s' % (errprefix, errmsg)
697 raise onerr(errmsg)
692 raise onerr(errmsg)
698 return rc
693 return rc
699
694
700 def checksignature(func):
695 def checksignature(func):
701 '''wrap a function with code to check for calling errors'''
696 '''wrap a function with code to check for calling errors'''
702 def check(*args, **kwargs):
697 def check(*args, **kwargs):
703 try:
698 try:
704 return func(*args, **kwargs)
699 return func(*args, **kwargs)
705 except TypeError:
700 except TypeError:
706 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
701 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
707 raise error.SignatureError
702 raise error.SignatureError
708 raise
703 raise
709
704
710 return check
705 return check
711
706
712 def copyfile(src, dest, hardlink=False):
707 def copyfile(src, dest, hardlink=False):
713 "copy a file, preserving mode and atime/mtime"
708 "copy a file, preserving mode and atime/mtime"
714 if os.path.lexists(dest):
709 if os.path.lexists(dest):
715 unlink(dest)
710 unlink(dest)
716 # hardlinks are problematic on CIFS, quietly ignore this flag
711 # hardlinks are problematic on CIFS, quietly ignore this flag
717 # until we find a way to work around it cleanly (issue4546)
712 # until we find a way to work around it cleanly (issue4546)
718 if False and hardlink:
713 if False and hardlink:
719 try:
714 try:
720 oslink(src, dest)
715 oslink(src, dest)
721 return
716 return
722 except (IOError, OSError):
717 except (IOError, OSError):
723 pass # fall back to normal copy
718 pass # fall back to normal copy
724 if os.path.islink(src):
719 if os.path.islink(src):
725 os.symlink(os.readlink(src), dest)
720 os.symlink(os.readlink(src), dest)
726 else:
721 else:
727 try:
722 try:
728 shutil.copyfile(src, dest)
723 shutil.copyfile(src, dest)
729 shutil.copymode(src, dest)
724 shutil.copymode(src, dest)
730 except shutil.Error, inst:
725 except shutil.Error, inst:
731 raise Abort(str(inst))
726 raise Abort(str(inst))
732
727
733 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
728 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
734 """Copy a directory tree using hardlinks if possible."""
729 """Copy a directory tree using hardlinks if possible."""
735 num = 0
730 num = 0
736
731
737 if hardlink is None:
732 if hardlink is None:
738 hardlink = (os.stat(src).st_dev ==
733 hardlink = (os.stat(src).st_dev ==
739 os.stat(os.path.dirname(dst)).st_dev)
734 os.stat(os.path.dirname(dst)).st_dev)
740 if hardlink:
735 if hardlink:
741 topic = _('linking')
736 topic = _('linking')
742 else:
737 else:
743 topic = _('copying')
738 topic = _('copying')
744
739
745 if os.path.isdir(src):
740 if os.path.isdir(src):
746 os.mkdir(dst)
741 os.mkdir(dst)
747 for name, kind in osutil.listdir(src):
742 for name, kind in osutil.listdir(src):
748 srcname = os.path.join(src, name)
743 srcname = os.path.join(src, name)
749 dstname = os.path.join(dst, name)
744 dstname = os.path.join(dst, name)
750 def nprog(t, pos):
745 def nprog(t, pos):
751 if pos is not None:
746 if pos is not None:
752 return progress(t, pos + num)
747 return progress(t, pos + num)
753 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
748 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
754 num += n
749 num += n
755 else:
750 else:
756 if hardlink:
751 if hardlink:
757 try:
752 try:
758 oslink(src, dst)
753 oslink(src, dst)
759 except (IOError, OSError):
754 except (IOError, OSError):
760 hardlink = False
755 hardlink = False
761 shutil.copy(src, dst)
756 shutil.copy(src, dst)
762 else:
757 else:
763 shutil.copy(src, dst)
758 shutil.copy(src, dst)
764 num += 1
759 num += 1
765 progress(topic, num)
760 progress(topic, num)
766 progress(topic, None)
761 progress(topic, None)
767
762
768 return hardlink, num
763 return hardlink, num
769
764
770 _winreservednames = '''con prn aux nul
765 _winreservednames = '''con prn aux nul
771 com1 com2 com3 com4 com5 com6 com7 com8 com9
766 com1 com2 com3 com4 com5 com6 com7 com8 com9
772 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
767 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
773 _winreservedchars = ':*?"<>|'
768 _winreservedchars = ':*?"<>|'
774 def checkwinfilename(path):
769 def checkwinfilename(path):
775 r'''Check that the base-relative path is a valid filename on Windows.
770 r'''Check that the base-relative path is a valid filename on Windows.
776 Returns None if the path is ok, or a UI string describing the problem.
771 Returns None if the path is ok, or a UI string describing the problem.
777
772
778 >>> checkwinfilename("just/a/normal/path")
773 >>> checkwinfilename("just/a/normal/path")
779 >>> checkwinfilename("foo/bar/con.xml")
774 >>> checkwinfilename("foo/bar/con.xml")
780 "filename contains 'con', which is reserved on Windows"
775 "filename contains 'con', which is reserved on Windows"
781 >>> checkwinfilename("foo/con.xml/bar")
776 >>> checkwinfilename("foo/con.xml/bar")
782 "filename contains 'con', which is reserved on Windows"
777 "filename contains 'con', which is reserved on Windows"
783 >>> checkwinfilename("foo/bar/xml.con")
778 >>> checkwinfilename("foo/bar/xml.con")
784 >>> checkwinfilename("foo/bar/AUX/bla.txt")
779 >>> checkwinfilename("foo/bar/AUX/bla.txt")
785 "filename contains 'AUX', which is reserved on Windows"
780 "filename contains 'AUX', which is reserved on Windows"
786 >>> checkwinfilename("foo/bar/bla:.txt")
781 >>> checkwinfilename("foo/bar/bla:.txt")
787 "filename contains ':', which is reserved on Windows"
782 "filename contains ':', which is reserved on Windows"
788 >>> checkwinfilename("foo/bar/b\07la.txt")
783 >>> checkwinfilename("foo/bar/b\07la.txt")
789 "filename contains '\\x07', which is invalid on Windows"
784 "filename contains '\\x07', which is invalid on Windows"
790 >>> checkwinfilename("foo/bar/bla ")
785 >>> checkwinfilename("foo/bar/bla ")
791 "filename ends with ' ', which is not allowed on Windows"
786 "filename ends with ' ', which is not allowed on Windows"
792 >>> checkwinfilename("../bar")
787 >>> checkwinfilename("../bar")
793 >>> checkwinfilename("foo\\")
788 >>> checkwinfilename("foo\\")
794 "filename ends with '\\', which is invalid on Windows"
789 "filename ends with '\\', which is invalid on Windows"
795 >>> checkwinfilename("foo\\/bar")
790 >>> checkwinfilename("foo\\/bar")
796 "directory name ends with '\\', which is invalid on Windows"
791 "directory name ends with '\\', which is invalid on Windows"
797 '''
792 '''
798 if path.endswith('\\'):
793 if path.endswith('\\'):
799 return _("filename ends with '\\', which is invalid on Windows")
794 return _("filename ends with '\\', which is invalid on Windows")
800 if '\\/' in path:
795 if '\\/' in path:
801 return _("directory name ends with '\\', which is invalid on Windows")
796 return _("directory name ends with '\\', which is invalid on Windows")
802 for n in path.replace('\\', '/').split('/'):
797 for n in path.replace('\\', '/').split('/'):
803 if not n:
798 if not n:
804 continue
799 continue
805 for c in n:
800 for c in n:
806 if c in _winreservedchars:
801 if c in _winreservedchars:
807 return _("filename contains '%s', which is reserved "
802 return _("filename contains '%s', which is reserved "
808 "on Windows") % c
803 "on Windows") % c
809 if ord(c) <= 31:
804 if ord(c) <= 31:
810 return _("filename contains %r, which is invalid "
805 return _("filename contains %r, which is invalid "
811 "on Windows") % c
806 "on Windows") % c
812 base = n.split('.')[0]
807 base = n.split('.')[0]
813 if base and base.lower() in _winreservednames:
808 if base and base.lower() in _winreservednames:
814 return _("filename contains '%s', which is reserved "
809 return _("filename contains '%s', which is reserved "
815 "on Windows") % base
810 "on Windows") % base
816 t = n[-1]
811 t = n[-1]
817 if t in '. ' and n not in '..':
812 if t in '. ' and n not in '..':
818 return _("filename ends with '%s', which is not allowed "
813 return _("filename ends with '%s', which is not allowed "
819 "on Windows") % t
814 "on Windows") % t
820
815
821 if os.name == 'nt':
816 if os.name == 'nt':
822 checkosfilename = checkwinfilename
817 checkosfilename = checkwinfilename
823 else:
818 else:
824 checkosfilename = platform.checkosfilename
819 checkosfilename = platform.checkosfilename
825
820
826 def makelock(info, pathname):
821 def makelock(info, pathname):
827 try:
822 try:
828 return os.symlink(info, pathname)
823 return os.symlink(info, pathname)
829 except OSError, why:
824 except OSError, why:
830 if why.errno == errno.EEXIST:
825 if why.errno == errno.EEXIST:
831 raise
826 raise
832 except AttributeError: # no symlink in os
827 except AttributeError: # no symlink in os
833 pass
828 pass
834
829
835 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
830 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
836 os.write(ld, info)
831 os.write(ld, info)
837 os.close(ld)
832 os.close(ld)
838
833
839 def readlock(pathname):
834 def readlock(pathname):
840 try:
835 try:
841 return os.readlink(pathname)
836 return os.readlink(pathname)
842 except OSError, why:
837 except OSError, why:
843 if why.errno not in (errno.EINVAL, errno.ENOSYS):
838 if why.errno not in (errno.EINVAL, errno.ENOSYS):
844 raise
839 raise
845 except AttributeError: # no symlink in os
840 except AttributeError: # no symlink in os
846 pass
841 pass
847 fp = posixfile(pathname)
842 fp = posixfile(pathname)
848 r = fp.read()
843 r = fp.read()
849 fp.close()
844 fp.close()
850 return r
845 return r
851
846
852 def fstat(fp):
847 def fstat(fp):
853 '''stat file object that may not have fileno method.'''
848 '''stat file object that may not have fileno method.'''
854 try:
849 try:
855 return os.fstat(fp.fileno())
850 return os.fstat(fp.fileno())
856 except AttributeError:
851 except AttributeError:
857 return os.stat(fp.name)
852 return os.stat(fp.name)
858
853
859 # File system features
854 # File system features
860
855
861 def checkcase(path):
856 def checkcase(path):
862 """
857 """
863 Return true if the given path is on a case-sensitive filesystem
858 Return true if the given path is on a case-sensitive filesystem
864
859
865 Requires a path (like /foo/.hg) ending with a foldable final
860 Requires a path (like /foo/.hg) ending with a foldable final
866 directory component.
861 directory component.
867 """
862 """
868 s1 = os.lstat(path)
863 s1 = os.lstat(path)
869 d, b = os.path.split(path)
864 d, b = os.path.split(path)
870 b2 = b.upper()
865 b2 = b.upper()
871 if b == b2:
866 if b == b2:
872 b2 = b.lower()
867 b2 = b.lower()
873 if b == b2:
868 if b == b2:
874 return True # no evidence against case sensitivity
869 return True # no evidence against case sensitivity
875 p2 = os.path.join(d, b2)
870 p2 = os.path.join(d, b2)
876 try:
871 try:
877 s2 = os.lstat(p2)
872 s2 = os.lstat(p2)
878 if s2 == s1:
873 if s2 == s1:
879 return False
874 return False
880 return True
875 return True
881 except OSError:
876 except OSError:
882 return True
877 return True
883
878
884 try:
879 try:
885 import re2
880 import re2
886 _re2 = None
881 _re2 = None
887 except ImportError:
882 except ImportError:
888 _re2 = False
883 _re2 = False
889
884
890 class _re(object):
885 class _re(object):
891 def _checkre2(self):
886 def _checkre2(self):
892 global _re2
887 global _re2
893 try:
888 try:
894 # check if match works, see issue3964
889 # check if match works, see issue3964
895 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
890 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
896 except ImportError:
891 except ImportError:
897 _re2 = False
892 _re2 = False
898
893
899 def compile(self, pat, flags=0):
894 def compile(self, pat, flags=0):
900 '''Compile a regular expression, using re2 if possible
895 '''Compile a regular expression, using re2 if possible
901
896
902 For best performance, use only re2-compatible regexp features. The
897 For best performance, use only re2-compatible regexp features. The
903 only flags from the re module that are re2-compatible are
898 only flags from the re module that are re2-compatible are
904 IGNORECASE and MULTILINE.'''
899 IGNORECASE and MULTILINE.'''
905 if _re2 is None:
900 if _re2 is None:
906 self._checkre2()
901 self._checkre2()
907 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
902 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
908 if flags & remod.IGNORECASE:
903 if flags & remod.IGNORECASE:
909 pat = '(?i)' + pat
904 pat = '(?i)' + pat
910 if flags & remod.MULTILINE:
905 if flags & remod.MULTILINE:
911 pat = '(?m)' + pat
906 pat = '(?m)' + pat
912 try:
907 try:
913 return re2.compile(pat)
908 return re2.compile(pat)
914 except re2.error:
909 except re2.error:
915 pass
910 pass
916 return remod.compile(pat, flags)
911 return remod.compile(pat, flags)
917
912
918 @propertycache
913 @propertycache
919 def escape(self):
914 def escape(self):
920 '''Return the version of escape corresponding to self.compile.
915 '''Return the version of escape corresponding to self.compile.
921
916
922 This is imperfect because whether re2 or re is used for a particular
917 This is imperfect because whether re2 or re is used for a particular
923 function depends on the flags, etc, but it's the best we can do.
918 function depends on the flags, etc, but it's the best we can do.
924 '''
919 '''
925 global _re2
920 global _re2
926 if _re2 is None:
921 if _re2 is None:
927 self._checkre2()
922 self._checkre2()
928 if _re2:
923 if _re2:
929 return re2.escape
924 return re2.escape
930 else:
925 else:
931 return remod.escape
926 return remod.escape
932
927
933 re = _re()
928 re = _re()
934
929
935 _fspathcache = {}
930 _fspathcache = {}
936 def fspath(name, root):
931 def fspath(name, root):
937 '''Get name in the case stored in the filesystem
932 '''Get name in the case stored in the filesystem
938
933
939 The name should be relative to root, and be normcase-ed for efficiency.
934 The name should be relative to root, and be normcase-ed for efficiency.
940
935
941 Note that this function is unnecessary, and should not be
936 Note that this function is unnecessary, and should not be
942 called, for case-sensitive filesystems (simply because it's expensive).
937 called, for case-sensitive filesystems (simply because it's expensive).
943
938
944 The root should be normcase-ed, too.
939 The root should be normcase-ed, too.
945 '''
940 '''
946 def _makefspathcacheentry(dir):
941 def _makefspathcacheentry(dir):
947 return dict((normcase(n), n) for n in os.listdir(dir))
942 return dict((normcase(n), n) for n in os.listdir(dir))
948
943
949 seps = os.sep
944 seps = os.sep
950 if os.altsep:
945 if os.altsep:
951 seps = seps + os.altsep
946 seps = seps + os.altsep
952 # Protect backslashes. This gets silly very quickly.
947 # Protect backslashes. This gets silly very quickly.
953 seps.replace('\\','\\\\')
948 seps.replace('\\','\\\\')
954 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
949 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
955 dir = os.path.normpath(root)
950 dir = os.path.normpath(root)
956 result = []
951 result = []
957 for part, sep in pattern.findall(name):
952 for part, sep in pattern.findall(name):
958 if sep:
953 if sep:
959 result.append(sep)
954 result.append(sep)
960 continue
955 continue
961
956
962 if dir not in _fspathcache:
957 if dir not in _fspathcache:
963 _fspathcache[dir] = _makefspathcacheentry(dir)
958 _fspathcache[dir] = _makefspathcacheentry(dir)
964 contents = _fspathcache[dir]
959 contents = _fspathcache[dir]
965
960
966 found = contents.get(part)
961 found = contents.get(part)
967 if not found:
962 if not found:
968 # retry "once per directory" per "dirstate.walk" which
963 # retry "once per directory" per "dirstate.walk" which
969 # may take place for each patches of "hg qpush", for example
964 # may take place for each patches of "hg qpush", for example
970 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
965 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
971 found = contents.get(part)
966 found = contents.get(part)
972
967
973 result.append(found or part)
968 result.append(found or part)
974 dir = os.path.join(dir, part)
969 dir = os.path.join(dir, part)
975
970
976 return ''.join(result)
971 return ''.join(result)
977
972
978 def checknlink(testfile):
973 def checknlink(testfile):
979 '''check whether hardlink count reporting works properly'''
974 '''check whether hardlink count reporting works properly'''
980
975
981 # testfile may be open, so we need a separate file for checking to
976 # testfile may be open, so we need a separate file for checking to
982 # work around issue2543 (or testfile may get lost on Samba shares)
977 # work around issue2543 (or testfile may get lost on Samba shares)
983 f1 = testfile + ".hgtmp1"
978 f1 = testfile + ".hgtmp1"
984 if os.path.lexists(f1):
979 if os.path.lexists(f1):
985 return False
980 return False
986 try:
981 try:
987 posixfile(f1, 'w').close()
982 posixfile(f1, 'w').close()
988 except IOError:
983 except IOError:
989 return False
984 return False
990
985
991 f2 = testfile + ".hgtmp2"
986 f2 = testfile + ".hgtmp2"
992 fd = None
987 fd = None
993 try:
988 try:
994 oslink(f1, f2)
989 oslink(f1, f2)
995 # nlinks() may behave differently for files on Windows shares if
990 # nlinks() may behave differently for files on Windows shares if
996 # the file is open.
991 # the file is open.
997 fd = posixfile(f2)
992 fd = posixfile(f2)
998 return nlinks(f2) > 1
993 return nlinks(f2) > 1
999 except OSError:
994 except OSError:
1000 return False
995 return False
1001 finally:
996 finally:
1002 if fd is not None:
997 if fd is not None:
1003 fd.close()
998 fd.close()
1004 for f in (f1, f2):
999 for f in (f1, f2):
1005 try:
1000 try:
1006 os.unlink(f)
1001 os.unlink(f)
1007 except OSError:
1002 except OSError:
1008 pass
1003 pass
1009
1004
1010 def endswithsep(path):
1005 def endswithsep(path):
1011 '''Check path ends with os.sep or os.altsep.'''
1006 '''Check path ends with os.sep or os.altsep.'''
1012 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1007 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1013
1008
1014 def splitpath(path):
1009 def splitpath(path):
1015 '''Split path by os.sep.
1010 '''Split path by os.sep.
1016 Note that this function does not use os.altsep because this is
1011 Note that this function does not use os.altsep because this is
1017 an alternative of simple "xxx.split(os.sep)".
1012 an alternative of simple "xxx.split(os.sep)".
1018 It is recommended to use os.path.normpath() before using this
1013 It is recommended to use os.path.normpath() before using this
1019 function if need.'''
1014 function if need.'''
1020 return path.split(os.sep)
1015 return path.split(os.sep)
1021
1016
1022 def gui():
1017 def gui():
1023 '''Are we running in a GUI?'''
1018 '''Are we running in a GUI?'''
1024 if sys.platform == 'darwin':
1019 if sys.platform == 'darwin':
1025 if 'SSH_CONNECTION' in os.environ:
1020 if 'SSH_CONNECTION' in os.environ:
1026 # handle SSH access to a box where the user is logged in
1021 # handle SSH access to a box where the user is logged in
1027 return False
1022 return False
1028 elif getattr(osutil, 'isgui', None):
1023 elif getattr(osutil, 'isgui', None):
1029 # check if a CoreGraphics session is available
1024 # check if a CoreGraphics session is available
1030 return osutil.isgui()
1025 return osutil.isgui()
1031 else:
1026 else:
1032 # pure build; use a safe default
1027 # pure build; use a safe default
1033 return True
1028 return True
1034 else:
1029 else:
1035 return os.name == "nt" or os.environ.get("DISPLAY")
1030 return os.name == "nt" or os.environ.get("DISPLAY")
1036
1031
1037 def mktempcopy(name, emptyok=False, createmode=None):
1032 def mktempcopy(name, emptyok=False, createmode=None):
1038 """Create a temporary file with the same contents from name
1033 """Create a temporary file with the same contents from name
1039
1034
1040 The permission bits are copied from the original file.
1035 The permission bits are copied from the original file.
1041
1036
1042 If the temporary file is going to be truncated immediately, you
1037 If the temporary file is going to be truncated immediately, you
1043 can use emptyok=True as an optimization.
1038 can use emptyok=True as an optimization.
1044
1039
1045 Returns the name of the temporary file.
1040 Returns the name of the temporary file.
1046 """
1041 """
1047 d, fn = os.path.split(name)
1042 d, fn = os.path.split(name)
1048 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1043 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1049 os.close(fd)
1044 os.close(fd)
1050 # Temporary files are created with mode 0600, which is usually not
1045 # Temporary files are created with mode 0600, which is usually not
1051 # what we want. If the original file already exists, just copy
1046 # what we want. If the original file already exists, just copy
1052 # its mode. Otherwise, manually obey umask.
1047 # its mode. Otherwise, manually obey umask.
1053 copymode(name, temp, createmode)
1048 copymode(name, temp, createmode)
1054 if emptyok:
1049 if emptyok:
1055 return temp
1050 return temp
1056 try:
1051 try:
1057 try:
1052 try:
1058 ifp = posixfile(name, "rb")
1053 ifp = posixfile(name, "rb")
1059 except IOError, inst:
1054 except IOError, inst:
1060 if inst.errno == errno.ENOENT:
1055 if inst.errno == errno.ENOENT:
1061 return temp
1056 return temp
1062 if not getattr(inst, 'filename', None):
1057 if not getattr(inst, 'filename', None):
1063 inst.filename = name
1058 inst.filename = name
1064 raise
1059 raise
1065 ofp = posixfile(temp, "wb")
1060 ofp = posixfile(temp, "wb")
1066 for chunk in filechunkiter(ifp):
1061 for chunk in filechunkiter(ifp):
1067 ofp.write(chunk)
1062 ofp.write(chunk)
1068 ifp.close()
1063 ifp.close()
1069 ofp.close()
1064 ofp.close()
1070 except: # re-raises
1065 except: # re-raises
1071 try: os.unlink(temp)
1066 try: os.unlink(temp)
1072 except OSError: pass
1067 except OSError: pass
1073 raise
1068 raise
1074 return temp
1069 return temp
1075
1070
1076 class atomictempfile(object):
1071 class atomictempfile(object):
1077 '''writable file object that atomically updates a file
1072 '''writable file object that atomically updates a file
1078
1073
1079 All writes will go to a temporary copy of the original file. Call
1074 All writes will go to a temporary copy of the original file. Call
1080 close() when you are done writing, and atomictempfile will rename
1075 close() when you are done writing, and atomictempfile will rename
1081 the temporary copy to the original name, making the changes
1076 the temporary copy to the original name, making the changes
1082 visible. If the object is destroyed without being closed, all your
1077 visible. If the object is destroyed without being closed, all your
1083 writes are discarded.
1078 writes are discarded.
1084 '''
1079 '''
1085 def __init__(self, name, mode='w+b', createmode=None):
1080 def __init__(self, name, mode='w+b', createmode=None):
1086 self.__name = name # permanent name
1081 self.__name = name # permanent name
1087 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1082 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1088 createmode=createmode)
1083 createmode=createmode)
1089 self._fp = posixfile(self._tempname, mode)
1084 self._fp = posixfile(self._tempname, mode)
1090
1085
1091 # delegated methods
1086 # delegated methods
1092 self.write = self._fp.write
1087 self.write = self._fp.write
1093 self.seek = self._fp.seek
1088 self.seek = self._fp.seek
1094 self.tell = self._fp.tell
1089 self.tell = self._fp.tell
1095 self.fileno = self._fp.fileno
1090 self.fileno = self._fp.fileno
1096
1091
1097 def close(self):
1092 def close(self):
1098 if not self._fp.closed:
1093 if not self._fp.closed:
1099 self._fp.close()
1094 self._fp.close()
1100 rename(self._tempname, localpath(self.__name))
1095 rename(self._tempname, localpath(self.__name))
1101
1096
1102 def discard(self):
1097 def discard(self):
1103 if not self._fp.closed:
1098 if not self._fp.closed:
1104 try:
1099 try:
1105 os.unlink(self._tempname)
1100 os.unlink(self._tempname)
1106 except OSError:
1101 except OSError:
1107 pass
1102 pass
1108 self._fp.close()
1103 self._fp.close()
1109
1104
1110 def __del__(self):
1105 def __del__(self):
1111 if safehasattr(self, '_fp'): # constructor actually did something
1106 if safehasattr(self, '_fp'): # constructor actually did something
1112 self.discard()
1107 self.discard()
1113
1108
1114 def makedirs(name, mode=None, notindexed=False):
1109 def makedirs(name, mode=None, notindexed=False):
1115 """recursive directory creation with parent mode inheritance"""
1110 """recursive directory creation with parent mode inheritance"""
1116 try:
1111 try:
1117 makedir(name, notindexed)
1112 makedir(name, notindexed)
1118 except OSError, err:
1113 except OSError, err:
1119 if err.errno == errno.EEXIST:
1114 if err.errno == errno.EEXIST:
1120 return
1115 return
1121 if err.errno != errno.ENOENT or not name:
1116 if err.errno != errno.ENOENT or not name:
1122 raise
1117 raise
1123 parent = os.path.dirname(os.path.abspath(name))
1118 parent = os.path.dirname(os.path.abspath(name))
1124 if parent == name:
1119 if parent == name:
1125 raise
1120 raise
1126 makedirs(parent, mode, notindexed)
1121 makedirs(parent, mode, notindexed)
1127 makedir(name, notindexed)
1122 makedir(name, notindexed)
1128 if mode is not None:
1123 if mode is not None:
1129 os.chmod(name, mode)
1124 os.chmod(name, mode)
1130
1125
1131 def ensuredirs(name, mode=None, notindexed=False):
1126 def ensuredirs(name, mode=None, notindexed=False):
1132 """race-safe recursive directory creation
1127 """race-safe recursive directory creation
1133
1128
1134 Newly created directories are marked as "not to be indexed by
1129 Newly created directories are marked as "not to be indexed by
1135 the content indexing service", if ``notindexed`` is specified
1130 the content indexing service", if ``notindexed`` is specified
1136 for "write" mode access.
1131 for "write" mode access.
1137 """
1132 """
1138 if os.path.isdir(name):
1133 if os.path.isdir(name):
1139 return
1134 return
1140 parent = os.path.dirname(os.path.abspath(name))
1135 parent = os.path.dirname(os.path.abspath(name))
1141 if parent != name:
1136 if parent != name:
1142 ensuredirs(parent, mode, notindexed)
1137 ensuredirs(parent, mode, notindexed)
1143 try:
1138 try:
1144 makedir(name, notindexed)
1139 makedir(name, notindexed)
1145 except OSError, err:
1140 except OSError, err:
1146 if err.errno == errno.EEXIST and os.path.isdir(name):
1141 if err.errno == errno.EEXIST and os.path.isdir(name):
1147 # someone else seems to have won a directory creation race
1142 # someone else seems to have won a directory creation race
1148 return
1143 return
1149 raise
1144 raise
1150 if mode is not None:
1145 if mode is not None:
1151 os.chmod(name, mode)
1146 os.chmod(name, mode)
1152
1147
1153 def readfile(path):
1148 def readfile(path):
1154 fp = open(path, 'rb')
1149 fp = open(path, 'rb')
1155 try:
1150 try:
1156 return fp.read()
1151 return fp.read()
1157 finally:
1152 finally:
1158 fp.close()
1153 fp.close()
1159
1154
1160 def writefile(path, text):
1155 def writefile(path, text):
1161 fp = open(path, 'wb')
1156 fp = open(path, 'wb')
1162 try:
1157 try:
1163 fp.write(text)
1158 fp.write(text)
1164 finally:
1159 finally:
1165 fp.close()
1160 fp.close()
1166
1161
1167 def appendfile(path, text):
1162 def appendfile(path, text):
1168 fp = open(path, 'ab')
1163 fp = open(path, 'ab')
1169 try:
1164 try:
1170 fp.write(text)
1165 fp.write(text)
1171 finally:
1166 finally:
1172 fp.close()
1167 fp.close()
1173
1168
1174 class chunkbuffer(object):
1169 class chunkbuffer(object):
1175 """Allow arbitrary sized chunks of data to be efficiently read from an
1170 """Allow arbitrary sized chunks of data to be efficiently read from an
1176 iterator over chunks of arbitrary size."""
1171 iterator over chunks of arbitrary size."""
1177
1172
1178 def __init__(self, in_iter):
1173 def __init__(self, in_iter):
1179 """in_iter is the iterator that's iterating over the input chunks.
1174 """in_iter is the iterator that's iterating over the input chunks.
1180 targetsize is how big a buffer to try to maintain."""
1175 targetsize is how big a buffer to try to maintain."""
1181 def splitbig(chunks):
1176 def splitbig(chunks):
1182 for chunk in chunks:
1177 for chunk in chunks:
1183 if len(chunk) > 2**20:
1178 if len(chunk) > 2**20:
1184 pos = 0
1179 pos = 0
1185 while pos < len(chunk):
1180 while pos < len(chunk):
1186 end = pos + 2 ** 18
1181 end = pos + 2 ** 18
1187 yield chunk[pos:end]
1182 yield chunk[pos:end]
1188 pos = end
1183 pos = end
1189 else:
1184 else:
1190 yield chunk
1185 yield chunk
1191 self.iter = splitbig(in_iter)
1186 self.iter = splitbig(in_iter)
1192 self._queue = collections.deque()
1187 self._queue = collections.deque()
1193
1188
1194 def read(self, l=None):
1189 def read(self, l=None):
1195 """Read L bytes of data from the iterator of chunks of data.
1190 """Read L bytes of data from the iterator of chunks of data.
1196 Returns less than L bytes if the iterator runs dry.
1191 Returns less than L bytes if the iterator runs dry.
1197
1192
1198 If size parameter is omitted, read everything"""
1193 If size parameter is omitted, read everything"""
1199 left = l
1194 left = l
1200 buf = []
1195 buf = []
1201 queue = self._queue
1196 queue = self._queue
1202 while left is None or left > 0:
1197 while left is None or left > 0:
1203 # refill the queue
1198 # refill the queue
1204 if not queue:
1199 if not queue:
1205 target = 2**18
1200 target = 2**18
1206 for chunk in self.iter:
1201 for chunk in self.iter:
1207 queue.append(chunk)
1202 queue.append(chunk)
1208 target -= len(chunk)
1203 target -= len(chunk)
1209 if target <= 0:
1204 if target <= 0:
1210 break
1205 break
1211 if not queue:
1206 if not queue:
1212 break
1207 break
1213
1208
1214 chunk = queue.popleft()
1209 chunk = queue.popleft()
1215 if left is not None:
1210 if left is not None:
1216 left -= len(chunk)
1211 left -= len(chunk)
1217 if left is not None and left < 0:
1212 if left is not None and left < 0:
1218 queue.appendleft(chunk[left:])
1213 queue.appendleft(chunk[left:])
1219 buf.append(chunk[:left])
1214 buf.append(chunk[:left])
1220 else:
1215 else:
1221 buf.append(chunk)
1216 buf.append(chunk)
1222
1217
1223 return ''.join(buf)
1218 return ''.join(buf)
1224
1219
1225 def filechunkiter(f, size=65536, limit=None):
1220 def filechunkiter(f, size=65536, limit=None):
1226 """Create a generator that produces the data in the file size
1221 """Create a generator that produces the data in the file size
1227 (default 65536) bytes at a time, up to optional limit (default is
1222 (default 65536) bytes at a time, up to optional limit (default is
1228 to read all data). Chunks may be less than size bytes if the
1223 to read all data). Chunks may be less than size bytes if the
1229 chunk is the last chunk in the file, or the file is a socket or
1224 chunk is the last chunk in the file, or the file is a socket or
1230 some other type of file that sometimes reads less data than is
1225 some other type of file that sometimes reads less data than is
1231 requested."""
1226 requested."""
1232 assert size >= 0
1227 assert size >= 0
1233 assert limit is None or limit >= 0
1228 assert limit is None or limit >= 0
1234 while True:
1229 while True:
1235 if limit is None:
1230 if limit is None:
1236 nbytes = size
1231 nbytes = size
1237 else:
1232 else:
1238 nbytes = min(limit, size)
1233 nbytes = min(limit, size)
1239 s = nbytes and f.read(nbytes)
1234 s = nbytes and f.read(nbytes)
1240 if not s:
1235 if not s:
1241 break
1236 break
1242 if limit:
1237 if limit:
1243 limit -= len(s)
1238 limit -= len(s)
1244 yield s
1239 yield s
1245
1240
1246 def makedate(timestamp=None):
1241 def makedate(timestamp=None):
1247 '''Return a unix timestamp (or the current time) as a (unixtime,
1242 '''Return a unix timestamp (or the current time) as a (unixtime,
1248 offset) tuple based off the local timezone.'''
1243 offset) tuple based off the local timezone.'''
1249 if timestamp is None:
1244 if timestamp is None:
1250 timestamp = time.time()
1245 timestamp = time.time()
1251 if timestamp < 0:
1246 if timestamp < 0:
1252 hint = _("check your clock")
1247 hint = _("check your clock")
1253 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1248 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1254 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1249 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1255 datetime.datetime.fromtimestamp(timestamp))
1250 datetime.datetime.fromtimestamp(timestamp))
1256 tz = delta.days * 86400 + delta.seconds
1251 tz = delta.days * 86400 + delta.seconds
1257 return timestamp, tz
1252 return timestamp, tz
1258
1253
1259 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1254 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1260 """represent a (unixtime, offset) tuple as a localized time.
1255 """represent a (unixtime, offset) tuple as a localized time.
1261 unixtime is seconds since the epoch, and offset is the time zone's
1256 unixtime is seconds since the epoch, and offset is the time zone's
1262 number of seconds away from UTC. if timezone is false, do not
1257 number of seconds away from UTC. if timezone is false, do not
1263 append time zone to string."""
1258 append time zone to string."""
1264 t, tz = date or makedate()
1259 t, tz = date or makedate()
1265 if t < 0:
1260 if t < 0:
1266 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1261 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1267 tz = 0
1262 tz = 0
1268 if "%1" in format or "%2" in format or "%z" in format:
1263 if "%1" in format or "%2" in format or "%z" in format:
1269 sign = (tz > 0) and "-" or "+"
1264 sign = (tz > 0) and "-" or "+"
1270 minutes = abs(tz) // 60
1265 minutes = abs(tz) // 60
1271 format = format.replace("%z", "%1%2")
1266 format = format.replace("%z", "%1%2")
1272 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1267 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1273 format = format.replace("%2", "%02d" % (minutes % 60))
1268 format = format.replace("%2", "%02d" % (minutes % 60))
1274 try:
1269 try:
1275 t = time.gmtime(float(t) - tz)
1270 t = time.gmtime(float(t) - tz)
1276 except ValueError:
1271 except ValueError:
1277 # time was out of range
1272 # time was out of range
1278 t = time.gmtime(sys.maxint)
1273 t = time.gmtime(sys.maxint)
1279 s = time.strftime(format, t)
1274 s = time.strftime(format, t)
1280 return s
1275 return s
1281
1276
1282 def shortdate(date=None):
1277 def shortdate(date=None):
1283 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1278 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1284 return datestr(date, format='%Y-%m-%d')
1279 return datestr(date, format='%Y-%m-%d')
1285
1280
1286 def strdate(string, format, defaults=[]):
1281 def strdate(string, format, defaults=[]):
1287 """parse a localized time string and return a (unixtime, offset) tuple.
1282 """parse a localized time string and return a (unixtime, offset) tuple.
1288 if the string cannot be parsed, ValueError is raised."""
1283 if the string cannot be parsed, ValueError is raised."""
1289 def timezone(string):
1284 def timezone(string):
1290 tz = string.split()[-1]
1285 tz = string.split()[-1]
1291 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1286 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1292 sign = (tz[0] == "+") and 1 or -1
1287 sign = (tz[0] == "+") and 1 or -1
1293 hours = int(tz[1:3])
1288 hours = int(tz[1:3])
1294 minutes = int(tz[3:5])
1289 minutes = int(tz[3:5])
1295 return -sign * (hours * 60 + minutes) * 60
1290 return -sign * (hours * 60 + minutes) * 60
1296 if tz == "GMT" or tz == "UTC":
1291 if tz == "GMT" or tz == "UTC":
1297 return 0
1292 return 0
1298 return None
1293 return None
1299
1294
1300 # NOTE: unixtime = localunixtime + offset
1295 # NOTE: unixtime = localunixtime + offset
1301 offset, date = timezone(string), string
1296 offset, date = timezone(string), string
1302 if offset is not None:
1297 if offset is not None:
1303 date = " ".join(string.split()[:-1])
1298 date = " ".join(string.split()[:-1])
1304
1299
1305 # add missing elements from defaults
1300 # add missing elements from defaults
1306 usenow = False # default to using biased defaults
1301 usenow = False # default to using biased defaults
1307 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1302 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1308 found = [True for p in part if ("%"+p) in format]
1303 found = [True for p in part if ("%"+p) in format]
1309 if not found:
1304 if not found:
1310 date += "@" + defaults[part][usenow]
1305 date += "@" + defaults[part][usenow]
1311 format += "@%" + part[0]
1306 format += "@%" + part[0]
1312 else:
1307 else:
1313 # We've found a specific time element, less specific time
1308 # We've found a specific time element, less specific time
1314 # elements are relative to today
1309 # elements are relative to today
1315 usenow = True
1310 usenow = True
1316
1311
1317 timetuple = time.strptime(date, format)
1312 timetuple = time.strptime(date, format)
1318 localunixtime = int(calendar.timegm(timetuple))
1313 localunixtime = int(calendar.timegm(timetuple))
1319 if offset is None:
1314 if offset is None:
1320 # local timezone
1315 # local timezone
1321 unixtime = int(time.mktime(timetuple))
1316 unixtime = int(time.mktime(timetuple))
1322 offset = unixtime - localunixtime
1317 offset = unixtime - localunixtime
1323 else:
1318 else:
1324 unixtime = localunixtime + offset
1319 unixtime = localunixtime + offset
1325 return unixtime, offset
1320 return unixtime, offset
1326
1321
1327 def parsedate(date, formats=None, bias={}):
1322 def parsedate(date, formats=None, bias={}):
1328 """parse a localized date/time and return a (unixtime, offset) tuple.
1323 """parse a localized date/time and return a (unixtime, offset) tuple.
1329
1324
1330 The date may be a "unixtime offset" string or in one of the specified
1325 The date may be a "unixtime offset" string or in one of the specified
1331 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1326 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1332
1327
1333 >>> parsedate(' today ') == parsedate(\
1328 >>> parsedate(' today ') == parsedate(\
1334 datetime.date.today().strftime('%b %d'))
1329 datetime.date.today().strftime('%b %d'))
1335 True
1330 True
1336 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1331 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1337 datetime.timedelta(days=1)\
1332 datetime.timedelta(days=1)\
1338 ).strftime('%b %d'))
1333 ).strftime('%b %d'))
1339 True
1334 True
1340 >>> now, tz = makedate()
1335 >>> now, tz = makedate()
1341 >>> strnow, strtz = parsedate('now')
1336 >>> strnow, strtz = parsedate('now')
1342 >>> (strnow - now) < 1
1337 >>> (strnow - now) < 1
1343 True
1338 True
1344 >>> tz == strtz
1339 >>> tz == strtz
1345 True
1340 True
1346 """
1341 """
1347 if not date:
1342 if not date:
1348 return 0, 0
1343 return 0, 0
1349 if isinstance(date, tuple) and len(date) == 2:
1344 if isinstance(date, tuple) and len(date) == 2:
1350 return date
1345 return date
1351 if not formats:
1346 if not formats:
1352 formats = defaultdateformats
1347 formats = defaultdateformats
1353 date = date.strip()
1348 date = date.strip()
1354
1349
1355 if date == 'now' or date == _('now'):
1350 if date == 'now' or date == _('now'):
1356 return makedate()
1351 return makedate()
1357 if date == 'today' or date == _('today'):
1352 if date == 'today' or date == _('today'):
1358 date = datetime.date.today().strftime('%b %d')
1353 date = datetime.date.today().strftime('%b %d')
1359 elif date == 'yesterday' or date == _('yesterday'):
1354 elif date == 'yesterday' or date == _('yesterday'):
1360 date = (datetime.date.today() -
1355 date = (datetime.date.today() -
1361 datetime.timedelta(days=1)).strftime('%b %d')
1356 datetime.timedelta(days=1)).strftime('%b %d')
1362
1357
1363 try:
1358 try:
1364 when, offset = map(int, date.split(' '))
1359 when, offset = map(int, date.split(' '))
1365 except ValueError:
1360 except ValueError:
1366 # fill out defaults
1361 # fill out defaults
1367 now = makedate()
1362 now = makedate()
1368 defaults = {}
1363 defaults = {}
1369 for part in ("d", "mb", "yY", "HI", "M", "S"):
1364 for part in ("d", "mb", "yY", "HI", "M", "S"):
1370 # this piece is for rounding the specific end of unknowns
1365 # this piece is for rounding the specific end of unknowns
1371 b = bias.get(part)
1366 b = bias.get(part)
1372 if b is None:
1367 if b is None:
1373 if part[0] in "HMS":
1368 if part[0] in "HMS":
1374 b = "00"
1369 b = "00"
1375 else:
1370 else:
1376 b = "0"
1371 b = "0"
1377
1372
1378 # this piece is for matching the generic end to today's date
1373 # this piece is for matching the generic end to today's date
1379 n = datestr(now, "%" + part[0])
1374 n = datestr(now, "%" + part[0])
1380
1375
1381 defaults[part] = (b, n)
1376 defaults[part] = (b, n)
1382
1377
1383 for format in formats:
1378 for format in formats:
1384 try:
1379 try:
1385 when, offset = strdate(date, format, defaults)
1380 when, offset = strdate(date, format, defaults)
1386 except (ValueError, OverflowError):
1381 except (ValueError, OverflowError):
1387 pass
1382 pass
1388 else:
1383 else:
1389 break
1384 break
1390 else:
1385 else:
1391 raise Abort(_('invalid date: %r') % date)
1386 raise Abort(_('invalid date: %r') % date)
1392 # validate explicit (probably user-specified) date and
1387 # validate explicit (probably user-specified) date and
1393 # time zone offset. values must fit in signed 32 bits for
1388 # time zone offset. values must fit in signed 32 bits for
1394 # current 32-bit linux runtimes. timezones go from UTC-12
1389 # current 32-bit linux runtimes. timezones go from UTC-12
1395 # to UTC+14
1390 # to UTC+14
1396 if abs(when) > 0x7fffffff:
1391 if abs(when) > 0x7fffffff:
1397 raise Abort(_('date exceeds 32 bits: %d') % when)
1392 raise Abort(_('date exceeds 32 bits: %d') % when)
1398 if when < 0:
1393 if when < 0:
1399 raise Abort(_('negative date value: %d') % when)
1394 raise Abort(_('negative date value: %d') % when)
1400 if offset < -50400 or offset > 43200:
1395 if offset < -50400 or offset > 43200:
1401 raise Abort(_('impossible time zone offset: %d') % offset)
1396 raise Abort(_('impossible time zone offset: %d') % offset)
1402 return when, offset
1397 return when, offset
1403
1398
1404 def matchdate(date):
1399 def matchdate(date):
1405 """Return a function that matches a given date match specifier
1400 """Return a function that matches a given date match specifier
1406
1401
1407 Formats include:
1402 Formats include:
1408
1403
1409 '{date}' match a given date to the accuracy provided
1404 '{date}' match a given date to the accuracy provided
1410
1405
1411 '<{date}' on or before a given date
1406 '<{date}' on or before a given date
1412
1407
1413 '>{date}' on or after a given date
1408 '>{date}' on or after a given date
1414
1409
1415 >>> p1 = parsedate("10:29:59")
1410 >>> p1 = parsedate("10:29:59")
1416 >>> p2 = parsedate("10:30:00")
1411 >>> p2 = parsedate("10:30:00")
1417 >>> p3 = parsedate("10:30:59")
1412 >>> p3 = parsedate("10:30:59")
1418 >>> p4 = parsedate("10:31:00")
1413 >>> p4 = parsedate("10:31:00")
1419 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1414 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1420 >>> f = matchdate("10:30")
1415 >>> f = matchdate("10:30")
1421 >>> f(p1[0])
1416 >>> f(p1[0])
1422 False
1417 False
1423 >>> f(p2[0])
1418 >>> f(p2[0])
1424 True
1419 True
1425 >>> f(p3[0])
1420 >>> f(p3[0])
1426 True
1421 True
1427 >>> f(p4[0])
1422 >>> f(p4[0])
1428 False
1423 False
1429 >>> f(p5[0])
1424 >>> f(p5[0])
1430 False
1425 False
1431 """
1426 """
1432
1427
1433 def lower(date):
1428 def lower(date):
1434 d = {'mb': "1", 'd': "1"}
1429 d = {'mb': "1", 'd': "1"}
1435 return parsedate(date, extendeddateformats, d)[0]
1430 return parsedate(date, extendeddateformats, d)[0]
1436
1431
1437 def upper(date):
1432 def upper(date):
1438 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1433 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1439 for days in ("31", "30", "29"):
1434 for days in ("31", "30", "29"):
1440 try:
1435 try:
1441 d["d"] = days
1436 d["d"] = days
1442 return parsedate(date, extendeddateformats, d)[0]
1437 return parsedate(date, extendeddateformats, d)[0]
1443 except Abort:
1438 except Abort:
1444 pass
1439 pass
1445 d["d"] = "28"
1440 d["d"] = "28"
1446 return parsedate(date, extendeddateformats, d)[0]
1441 return parsedate(date, extendeddateformats, d)[0]
1447
1442
1448 date = date.strip()
1443 date = date.strip()
1449
1444
1450 if not date:
1445 if not date:
1451 raise Abort(_("dates cannot consist entirely of whitespace"))
1446 raise Abort(_("dates cannot consist entirely of whitespace"))
1452 elif date[0] == "<":
1447 elif date[0] == "<":
1453 if not date[1:]:
1448 if not date[1:]:
1454 raise Abort(_("invalid day spec, use '<DATE'"))
1449 raise Abort(_("invalid day spec, use '<DATE'"))
1455 when = upper(date[1:])
1450 when = upper(date[1:])
1456 return lambda x: x <= when
1451 return lambda x: x <= when
1457 elif date[0] == ">":
1452 elif date[0] == ">":
1458 if not date[1:]:
1453 if not date[1:]:
1459 raise Abort(_("invalid day spec, use '>DATE'"))
1454 raise Abort(_("invalid day spec, use '>DATE'"))
1460 when = lower(date[1:])
1455 when = lower(date[1:])
1461 return lambda x: x >= when
1456 return lambda x: x >= when
1462 elif date[0] == "-":
1457 elif date[0] == "-":
1463 try:
1458 try:
1464 days = int(date[1:])
1459 days = int(date[1:])
1465 except ValueError:
1460 except ValueError:
1466 raise Abort(_("invalid day spec: %s") % date[1:])
1461 raise Abort(_("invalid day spec: %s") % date[1:])
1467 if days < 0:
1462 if days < 0:
1468 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1463 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1469 % date[1:])
1464 % date[1:])
1470 when = makedate()[0] - days * 3600 * 24
1465 when = makedate()[0] - days * 3600 * 24
1471 return lambda x: x >= when
1466 return lambda x: x >= when
1472 elif " to " in date:
1467 elif " to " in date:
1473 a, b = date.split(" to ")
1468 a, b = date.split(" to ")
1474 start, stop = lower(a), upper(b)
1469 start, stop = lower(a), upper(b)
1475 return lambda x: x >= start and x <= stop
1470 return lambda x: x >= start and x <= stop
1476 else:
1471 else:
1477 start, stop = lower(date), upper(date)
1472 start, stop = lower(date), upper(date)
1478 return lambda x: x >= start and x <= stop
1473 return lambda x: x >= start and x <= stop
1479
1474
1480 def shortuser(user):
1475 def shortuser(user):
1481 """Return a short representation of a user name or email address."""
1476 """Return a short representation of a user name or email address."""
1482 f = user.find('@')
1477 f = user.find('@')
1483 if f >= 0:
1478 if f >= 0:
1484 user = user[:f]
1479 user = user[:f]
1485 f = user.find('<')
1480 f = user.find('<')
1486 if f >= 0:
1481 if f >= 0:
1487 user = user[f + 1:]
1482 user = user[f + 1:]
1488 f = user.find(' ')
1483 f = user.find(' ')
1489 if f >= 0:
1484 if f >= 0:
1490 user = user[:f]
1485 user = user[:f]
1491 f = user.find('.')
1486 f = user.find('.')
1492 if f >= 0:
1487 if f >= 0:
1493 user = user[:f]
1488 user = user[:f]
1494 return user
1489 return user
1495
1490
1496 def emailuser(user):
1491 def emailuser(user):
1497 """Return the user portion of an email address."""
1492 """Return the user portion of an email address."""
1498 f = user.find('@')
1493 f = user.find('@')
1499 if f >= 0:
1494 if f >= 0:
1500 user = user[:f]
1495 user = user[:f]
1501 f = user.find('<')
1496 f = user.find('<')
1502 if f >= 0:
1497 if f >= 0:
1503 user = user[f + 1:]
1498 user = user[f + 1:]
1504 return user
1499 return user
1505
1500
1506 def email(author):
1501 def email(author):
1507 '''get email of author.'''
1502 '''get email of author.'''
1508 r = author.find('>')
1503 r = author.find('>')
1509 if r == -1:
1504 if r == -1:
1510 r = None
1505 r = None
1511 return author[author.find('<') + 1:r]
1506 return author[author.find('<') + 1:r]
1512
1507
1513 def ellipsis(text, maxlength=400):
1508 def ellipsis(text, maxlength=400):
1514 """Trim string to at most maxlength (default: 400) columns in display."""
1509 """Trim string to at most maxlength (default: 400) columns in display."""
1515 return encoding.trim(text, maxlength, ellipsis='...')
1510 return encoding.trim(text, maxlength, ellipsis='...')
1516
1511
1517 def unitcountfn(*unittable):
1512 def unitcountfn(*unittable):
1518 '''return a function that renders a readable count of some quantity'''
1513 '''return a function that renders a readable count of some quantity'''
1519
1514
1520 def go(count):
1515 def go(count):
1521 for multiplier, divisor, format in unittable:
1516 for multiplier, divisor, format in unittable:
1522 if count >= divisor * multiplier:
1517 if count >= divisor * multiplier:
1523 return format % (count / float(divisor))
1518 return format % (count / float(divisor))
1524 return unittable[-1][2] % count
1519 return unittable[-1][2] % count
1525
1520
1526 return go
1521 return go
1527
1522
1528 bytecount = unitcountfn(
1523 bytecount = unitcountfn(
1529 (100, 1 << 30, _('%.0f GB')),
1524 (100, 1 << 30, _('%.0f GB')),
1530 (10, 1 << 30, _('%.1f GB')),
1525 (10, 1 << 30, _('%.1f GB')),
1531 (1, 1 << 30, _('%.2f GB')),
1526 (1, 1 << 30, _('%.2f GB')),
1532 (100, 1 << 20, _('%.0f MB')),
1527 (100, 1 << 20, _('%.0f MB')),
1533 (10, 1 << 20, _('%.1f MB')),
1528 (10, 1 << 20, _('%.1f MB')),
1534 (1, 1 << 20, _('%.2f MB')),
1529 (1, 1 << 20, _('%.2f MB')),
1535 (100, 1 << 10, _('%.0f KB')),
1530 (100, 1 << 10, _('%.0f KB')),
1536 (10, 1 << 10, _('%.1f KB')),
1531 (10, 1 << 10, _('%.1f KB')),
1537 (1, 1 << 10, _('%.2f KB')),
1532 (1, 1 << 10, _('%.2f KB')),
1538 (1, 1, _('%.0f bytes')),
1533 (1, 1, _('%.0f bytes')),
1539 )
1534 )
1540
1535
1541 def uirepr(s):
1536 def uirepr(s):
1542 # Avoid double backslash in Windows path repr()
1537 # Avoid double backslash in Windows path repr()
1543 return repr(s).replace('\\\\', '\\')
1538 return repr(s).replace('\\\\', '\\')
1544
1539
1545 # delay import of textwrap
1540 # delay import of textwrap
1546 def MBTextWrapper(**kwargs):
1541 def MBTextWrapper(**kwargs):
1547 class tw(textwrap.TextWrapper):
1542 class tw(textwrap.TextWrapper):
1548 """
1543 """
1549 Extend TextWrapper for width-awareness.
1544 Extend TextWrapper for width-awareness.
1550
1545
1551 Neither number of 'bytes' in any encoding nor 'characters' is
1546 Neither number of 'bytes' in any encoding nor 'characters' is
1552 appropriate to calculate terminal columns for specified string.
1547 appropriate to calculate terminal columns for specified string.
1553
1548
1554 Original TextWrapper implementation uses built-in 'len()' directly,
1549 Original TextWrapper implementation uses built-in 'len()' directly,
1555 so overriding is needed to use width information of each characters.
1550 so overriding is needed to use width information of each characters.
1556
1551
1557 In addition, characters classified into 'ambiguous' width are
1552 In addition, characters classified into 'ambiguous' width are
1558 treated as wide in East Asian area, but as narrow in other.
1553 treated as wide in East Asian area, but as narrow in other.
1559
1554
1560 This requires use decision to determine width of such characters.
1555 This requires use decision to determine width of such characters.
1561 """
1556 """
1562 def __init__(self, **kwargs):
1557 def __init__(self, **kwargs):
1563 textwrap.TextWrapper.__init__(self, **kwargs)
1558 textwrap.TextWrapper.__init__(self, **kwargs)
1564
1559
1565 # for compatibility between 2.4 and 2.6
1560 # for compatibility between 2.4 and 2.6
1566 if getattr(self, 'drop_whitespace', None) is None:
1561 if getattr(self, 'drop_whitespace', None) is None:
1567 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1562 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1568
1563
1569 def _cutdown(self, ucstr, space_left):
1564 def _cutdown(self, ucstr, space_left):
1570 l = 0
1565 l = 0
1571 colwidth = encoding.ucolwidth
1566 colwidth = encoding.ucolwidth
1572 for i in xrange(len(ucstr)):
1567 for i in xrange(len(ucstr)):
1573 l += colwidth(ucstr[i])
1568 l += colwidth(ucstr[i])
1574 if space_left < l:
1569 if space_left < l:
1575 return (ucstr[:i], ucstr[i:])
1570 return (ucstr[:i], ucstr[i:])
1576 return ucstr, ''
1571 return ucstr, ''
1577
1572
1578 # overriding of base class
1573 # overriding of base class
1579 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1574 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1580 space_left = max(width - cur_len, 1)
1575 space_left = max(width - cur_len, 1)
1581
1576
1582 if self.break_long_words:
1577 if self.break_long_words:
1583 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1578 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1584 cur_line.append(cut)
1579 cur_line.append(cut)
1585 reversed_chunks[-1] = res
1580 reversed_chunks[-1] = res
1586 elif not cur_line:
1581 elif not cur_line:
1587 cur_line.append(reversed_chunks.pop())
1582 cur_line.append(reversed_chunks.pop())
1588
1583
1589 # this overriding code is imported from TextWrapper of python 2.6
1584 # this overriding code is imported from TextWrapper of python 2.6
1590 # to calculate columns of string by 'encoding.ucolwidth()'
1585 # to calculate columns of string by 'encoding.ucolwidth()'
1591 def _wrap_chunks(self, chunks):
1586 def _wrap_chunks(self, chunks):
1592 colwidth = encoding.ucolwidth
1587 colwidth = encoding.ucolwidth
1593
1588
1594 lines = []
1589 lines = []
1595 if self.width <= 0:
1590 if self.width <= 0:
1596 raise ValueError("invalid width %r (must be > 0)" % self.width)
1591 raise ValueError("invalid width %r (must be > 0)" % self.width)
1597
1592
1598 # Arrange in reverse order so items can be efficiently popped
1593 # Arrange in reverse order so items can be efficiently popped
1599 # from a stack of chucks.
1594 # from a stack of chucks.
1600 chunks.reverse()
1595 chunks.reverse()
1601
1596
1602 while chunks:
1597 while chunks:
1603
1598
1604 # Start the list of chunks that will make up the current line.
1599 # Start the list of chunks that will make up the current line.
1605 # cur_len is just the length of all the chunks in cur_line.
1600 # cur_len is just the length of all the chunks in cur_line.
1606 cur_line = []
1601 cur_line = []
1607 cur_len = 0
1602 cur_len = 0
1608
1603
1609 # Figure out which static string will prefix this line.
1604 # Figure out which static string will prefix this line.
1610 if lines:
1605 if lines:
1611 indent = self.subsequent_indent
1606 indent = self.subsequent_indent
1612 else:
1607 else:
1613 indent = self.initial_indent
1608 indent = self.initial_indent
1614
1609
1615 # Maximum width for this line.
1610 # Maximum width for this line.
1616 width = self.width - len(indent)
1611 width = self.width - len(indent)
1617
1612
1618 # First chunk on line is whitespace -- drop it, unless this
1613 # First chunk on line is whitespace -- drop it, unless this
1619 # is the very beginning of the text (i.e. no lines started yet).
1614 # is the very beginning of the text (i.e. no lines started yet).
1620 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1615 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1621 del chunks[-1]
1616 del chunks[-1]
1622
1617
1623 while chunks:
1618 while chunks:
1624 l = colwidth(chunks[-1])
1619 l = colwidth(chunks[-1])
1625
1620
1626 # Can at least squeeze this chunk onto the current line.
1621 # Can at least squeeze this chunk onto the current line.
1627 if cur_len + l <= width:
1622 if cur_len + l <= width:
1628 cur_line.append(chunks.pop())
1623 cur_line.append(chunks.pop())
1629 cur_len += l
1624 cur_len += l
1630
1625
1631 # Nope, this line is full.
1626 # Nope, this line is full.
1632 else:
1627 else:
1633 break
1628 break
1634
1629
1635 # The current line is full, and the next chunk is too big to
1630 # The current line is full, and the next chunk is too big to
1636 # fit on *any* line (not just this one).
1631 # fit on *any* line (not just this one).
1637 if chunks and colwidth(chunks[-1]) > width:
1632 if chunks and colwidth(chunks[-1]) > width:
1638 self._handle_long_word(chunks, cur_line, cur_len, width)
1633 self._handle_long_word(chunks, cur_line, cur_len, width)
1639
1634
1640 # If the last chunk on this line is all whitespace, drop it.
1635 # If the last chunk on this line is all whitespace, drop it.
1641 if (self.drop_whitespace and
1636 if (self.drop_whitespace and
1642 cur_line and cur_line[-1].strip() == ''):
1637 cur_line and cur_line[-1].strip() == ''):
1643 del cur_line[-1]
1638 del cur_line[-1]
1644
1639
1645 # Convert current line back to a string and store it in list
1640 # Convert current line back to a string and store it in list
1646 # of all lines (return value).
1641 # of all lines (return value).
1647 if cur_line:
1642 if cur_line:
1648 lines.append(indent + ''.join(cur_line))
1643 lines.append(indent + ''.join(cur_line))
1649
1644
1650 return lines
1645 return lines
1651
1646
1652 global MBTextWrapper
1647 global MBTextWrapper
1653 MBTextWrapper = tw
1648 MBTextWrapper = tw
1654 return tw(**kwargs)
1649 return tw(**kwargs)
1655
1650
1656 def wrap(line, width, initindent='', hangindent=''):
1651 def wrap(line, width, initindent='', hangindent=''):
1657 maxindent = max(len(hangindent), len(initindent))
1652 maxindent = max(len(hangindent), len(initindent))
1658 if width <= maxindent:
1653 if width <= maxindent:
1659 # adjust for weird terminal size
1654 # adjust for weird terminal size
1660 width = max(78, maxindent + 1)
1655 width = max(78, maxindent + 1)
1661 line = line.decode(encoding.encoding, encoding.encodingmode)
1656 line = line.decode(encoding.encoding, encoding.encodingmode)
1662 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1657 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1663 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1658 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1664 wrapper = MBTextWrapper(width=width,
1659 wrapper = MBTextWrapper(width=width,
1665 initial_indent=initindent,
1660 initial_indent=initindent,
1666 subsequent_indent=hangindent)
1661 subsequent_indent=hangindent)
1667 return wrapper.fill(line).encode(encoding.encoding)
1662 return wrapper.fill(line).encode(encoding.encoding)
1668
1663
1669 def iterlines(iterator):
1664 def iterlines(iterator):
1670 for chunk in iterator:
1665 for chunk in iterator:
1671 for line in chunk.splitlines():
1666 for line in chunk.splitlines():
1672 yield line
1667 yield line
1673
1668
1674 def expandpath(path):
1669 def expandpath(path):
1675 return os.path.expanduser(os.path.expandvars(path))
1670 return os.path.expanduser(os.path.expandvars(path))
1676
1671
1677 def hgcmd():
1672 def hgcmd():
1678 """Return the command used to execute current hg
1673 """Return the command used to execute current hg
1679
1674
1680 This is different from hgexecutable() because on Windows we want
1675 This is different from hgexecutable() because on Windows we want
1681 to avoid things opening new shell windows like batch files, so we
1676 to avoid things opening new shell windows like batch files, so we
1682 get either the python call or current executable.
1677 get either the python call or current executable.
1683 """
1678 """
1684 if mainfrozen():
1679 if mainfrozen():
1685 return [sys.executable]
1680 return [sys.executable]
1686 return gethgcmd()
1681 return gethgcmd()
1687
1682
1688 def rundetached(args, condfn):
1683 def rundetached(args, condfn):
1689 """Execute the argument list in a detached process.
1684 """Execute the argument list in a detached process.
1690
1685
1691 condfn is a callable which is called repeatedly and should return
1686 condfn is a callable which is called repeatedly and should return
1692 True once the child process is known to have started successfully.
1687 True once the child process is known to have started successfully.
1693 At this point, the child process PID is returned. If the child
1688 At this point, the child process PID is returned. If the child
1694 process fails to start or finishes before condfn() evaluates to
1689 process fails to start or finishes before condfn() evaluates to
1695 True, return -1.
1690 True, return -1.
1696 """
1691 """
1697 # Windows case is easier because the child process is either
1692 # Windows case is easier because the child process is either
1698 # successfully starting and validating the condition or exiting
1693 # successfully starting and validating the condition or exiting
1699 # on failure. We just poll on its PID. On Unix, if the child
1694 # on failure. We just poll on its PID. On Unix, if the child
1700 # process fails to start, it will be left in a zombie state until
1695 # process fails to start, it will be left in a zombie state until
1701 # the parent wait on it, which we cannot do since we expect a long
1696 # the parent wait on it, which we cannot do since we expect a long
1702 # running process on success. Instead we listen for SIGCHLD telling
1697 # running process on success. Instead we listen for SIGCHLD telling
1703 # us our child process terminated.
1698 # us our child process terminated.
1704 terminated = set()
1699 terminated = set()
1705 def handler(signum, frame):
1700 def handler(signum, frame):
1706 terminated.add(os.wait())
1701 terminated.add(os.wait())
1707 prevhandler = None
1702 prevhandler = None
1708 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1703 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1709 if SIGCHLD is not None:
1704 if SIGCHLD is not None:
1710 prevhandler = signal.signal(SIGCHLD, handler)
1705 prevhandler = signal.signal(SIGCHLD, handler)
1711 try:
1706 try:
1712 pid = spawndetached(args)
1707 pid = spawndetached(args)
1713 while not condfn():
1708 while not condfn():
1714 if ((pid in terminated or not testpid(pid))
1709 if ((pid in terminated or not testpid(pid))
1715 and not condfn()):
1710 and not condfn()):
1716 return -1
1711 return -1
1717 time.sleep(0.1)
1712 time.sleep(0.1)
1718 return pid
1713 return pid
1719 finally:
1714 finally:
1720 if prevhandler is not None:
1715 if prevhandler is not None:
1721 signal.signal(signal.SIGCHLD, prevhandler)
1716 signal.signal(signal.SIGCHLD, prevhandler)
1722
1717
1723 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1718 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1724 """Return the result of interpolating items in the mapping into string s.
1719 """Return the result of interpolating items in the mapping into string s.
1725
1720
1726 prefix is a single character string, or a two character string with
1721 prefix is a single character string, or a two character string with
1727 a backslash as the first character if the prefix needs to be escaped in
1722 a backslash as the first character if the prefix needs to be escaped in
1728 a regular expression.
1723 a regular expression.
1729
1724
1730 fn is an optional function that will be applied to the replacement text
1725 fn is an optional function that will be applied to the replacement text
1731 just before replacement.
1726 just before replacement.
1732
1727
1733 escape_prefix is an optional flag that allows using doubled prefix for
1728 escape_prefix is an optional flag that allows using doubled prefix for
1734 its escaping.
1729 its escaping.
1735 """
1730 """
1736 fn = fn or (lambda s: s)
1731 fn = fn or (lambda s: s)
1737 patterns = '|'.join(mapping.keys())
1732 patterns = '|'.join(mapping.keys())
1738 if escape_prefix:
1733 if escape_prefix:
1739 patterns += '|' + prefix
1734 patterns += '|' + prefix
1740 if len(prefix) > 1:
1735 if len(prefix) > 1:
1741 prefix_char = prefix[1:]
1736 prefix_char = prefix[1:]
1742 else:
1737 else:
1743 prefix_char = prefix
1738 prefix_char = prefix
1744 mapping[prefix_char] = prefix_char
1739 mapping[prefix_char] = prefix_char
1745 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1740 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1746 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1741 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1747
1742
1748 def getport(port):
1743 def getport(port):
1749 """Return the port for a given network service.
1744 """Return the port for a given network service.
1750
1745
1751 If port is an integer, it's returned as is. If it's a string, it's
1746 If port is an integer, it's returned as is. If it's a string, it's
1752 looked up using socket.getservbyname(). If there's no matching
1747 looked up using socket.getservbyname(). If there's no matching
1753 service, util.Abort is raised.
1748 service, util.Abort is raised.
1754 """
1749 """
1755 try:
1750 try:
1756 return int(port)
1751 return int(port)
1757 except ValueError:
1752 except ValueError:
1758 pass
1753 pass
1759
1754
1760 try:
1755 try:
1761 return socket.getservbyname(port)
1756 return socket.getservbyname(port)
1762 except socket.error:
1757 except socket.error:
1763 raise Abort(_("no port number associated with service '%s'") % port)
1758 raise Abort(_("no port number associated with service '%s'") % port)
1764
1759
1765 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1760 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1766 '0': False, 'no': False, 'false': False, 'off': False,
1761 '0': False, 'no': False, 'false': False, 'off': False,
1767 'never': False}
1762 'never': False}
1768
1763
1769 def parsebool(s):
1764 def parsebool(s):
1770 """Parse s into a boolean.
1765 """Parse s into a boolean.
1771
1766
1772 If s is not a valid boolean, returns None.
1767 If s is not a valid boolean, returns None.
1773 """
1768 """
1774 return _booleans.get(s.lower(), None)
1769 return _booleans.get(s.lower(), None)
1775
1770
1776 _hexdig = '0123456789ABCDEFabcdef'
1771 _hexdig = '0123456789ABCDEFabcdef'
1777 _hextochr = dict((a + b, chr(int(a + b, 16)))
1772 _hextochr = dict((a + b, chr(int(a + b, 16)))
1778 for a in _hexdig for b in _hexdig)
1773 for a in _hexdig for b in _hexdig)
1779
1774
1780 def _urlunquote(s):
1775 def _urlunquote(s):
1781 """Decode HTTP/HTML % encoding.
1776 """Decode HTTP/HTML % encoding.
1782
1777
1783 >>> _urlunquote('abc%20def')
1778 >>> _urlunquote('abc%20def')
1784 'abc def'
1779 'abc def'
1785 """
1780 """
1786 res = s.split('%')
1781 res = s.split('%')
1787 # fastpath
1782 # fastpath
1788 if len(res) == 1:
1783 if len(res) == 1:
1789 return s
1784 return s
1790 s = res[0]
1785 s = res[0]
1791 for item in res[1:]:
1786 for item in res[1:]:
1792 try:
1787 try:
1793 s += _hextochr[item[:2]] + item[2:]
1788 s += _hextochr[item[:2]] + item[2:]
1794 except KeyError:
1789 except KeyError:
1795 s += '%' + item
1790 s += '%' + item
1796 except UnicodeDecodeError:
1791 except UnicodeDecodeError:
1797 s += unichr(int(item[:2], 16)) + item[2:]
1792 s += unichr(int(item[:2], 16)) + item[2:]
1798 return s
1793 return s
1799
1794
1800 class url(object):
1795 class url(object):
1801 r"""Reliable URL parser.
1796 r"""Reliable URL parser.
1802
1797
1803 This parses URLs and provides attributes for the following
1798 This parses URLs and provides attributes for the following
1804 components:
1799 components:
1805
1800
1806 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1801 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1807
1802
1808 Missing components are set to None. The only exception is
1803 Missing components are set to None. The only exception is
1809 fragment, which is set to '' if present but empty.
1804 fragment, which is set to '' if present but empty.
1810
1805
1811 If parsefragment is False, fragment is included in query. If
1806 If parsefragment is False, fragment is included in query. If
1812 parsequery is False, query is included in path. If both are
1807 parsequery is False, query is included in path. If both are
1813 False, both fragment and query are included in path.
1808 False, both fragment and query are included in path.
1814
1809
1815 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1810 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1816
1811
1817 Note that for backward compatibility reasons, bundle URLs do not
1812 Note that for backward compatibility reasons, bundle URLs do not
1818 take host names. That means 'bundle://../' has a path of '../'.
1813 take host names. That means 'bundle://../' has a path of '../'.
1819
1814
1820 Examples:
1815 Examples:
1821
1816
1822 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1817 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1823 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1818 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1824 >>> url('ssh://[::1]:2200//home/joe/repo')
1819 >>> url('ssh://[::1]:2200//home/joe/repo')
1825 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1820 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1826 >>> url('file:///home/joe/repo')
1821 >>> url('file:///home/joe/repo')
1827 <url scheme: 'file', path: '/home/joe/repo'>
1822 <url scheme: 'file', path: '/home/joe/repo'>
1828 >>> url('file:///c:/temp/foo/')
1823 >>> url('file:///c:/temp/foo/')
1829 <url scheme: 'file', path: 'c:/temp/foo/'>
1824 <url scheme: 'file', path: 'c:/temp/foo/'>
1830 >>> url('bundle:foo')
1825 >>> url('bundle:foo')
1831 <url scheme: 'bundle', path: 'foo'>
1826 <url scheme: 'bundle', path: 'foo'>
1832 >>> url('bundle://../foo')
1827 >>> url('bundle://../foo')
1833 <url scheme: 'bundle', path: '../foo'>
1828 <url scheme: 'bundle', path: '../foo'>
1834 >>> url(r'c:\foo\bar')
1829 >>> url(r'c:\foo\bar')
1835 <url path: 'c:\\foo\\bar'>
1830 <url path: 'c:\\foo\\bar'>
1836 >>> url(r'\\blah\blah\blah')
1831 >>> url(r'\\blah\blah\blah')
1837 <url path: '\\\\blah\\blah\\blah'>
1832 <url path: '\\\\blah\\blah\\blah'>
1838 >>> url(r'\\blah\blah\blah#baz')
1833 >>> url(r'\\blah\blah\blah#baz')
1839 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1834 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1840 >>> url(r'file:///C:\users\me')
1835 >>> url(r'file:///C:\users\me')
1841 <url scheme: 'file', path: 'C:\\users\\me'>
1836 <url scheme: 'file', path: 'C:\\users\\me'>
1842
1837
1843 Authentication credentials:
1838 Authentication credentials:
1844
1839
1845 >>> url('ssh://joe:xyz@x/repo')
1840 >>> url('ssh://joe:xyz@x/repo')
1846 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1841 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1847 >>> url('ssh://joe@x/repo')
1842 >>> url('ssh://joe@x/repo')
1848 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1843 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1849
1844
1850 Query strings and fragments:
1845 Query strings and fragments:
1851
1846
1852 >>> url('http://host/a?b#c')
1847 >>> url('http://host/a?b#c')
1853 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1848 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1854 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1849 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1855 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1850 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1856 """
1851 """
1857
1852
1858 _safechars = "!~*'()+"
1853 _safechars = "!~*'()+"
1859 _safepchars = "/!~*'()+:\\"
1854 _safepchars = "/!~*'()+:\\"
1860 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
1855 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
1861
1856
1862 def __init__(self, path, parsequery=True, parsefragment=True):
1857 def __init__(self, path, parsequery=True, parsefragment=True):
1863 # We slowly chomp away at path until we have only the path left
1858 # We slowly chomp away at path until we have only the path left
1864 self.scheme = self.user = self.passwd = self.host = None
1859 self.scheme = self.user = self.passwd = self.host = None
1865 self.port = self.path = self.query = self.fragment = None
1860 self.port = self.path = self.query = self.fragment = None
1866 self._localpath = True
1861 self._localpath = True
1867 self._hostport = ''
1862 self._hostport = ''
1868 self._origpath = path
1863 self._origpath = path
1869
1864
1870 if parsefragment and '#' in path:
1865 if parsefragment and '#' in path:
1871 path, self.fragment = path.split('#', 1)
1866 path, self.fragment = path.split('#', 1)
1872 if not path:
1867 if not path:
1873 path = None
1868 path = None
1874
1869
1875 # special case for Windows drive letters and UNC paths
1870 # special case for Windows drive letters and UNC paths
1876 if hasdriveletter(path) or path.startswith(r'\\'):
1871 if hasdriveletter(path) or path.startswith(r'\\'):
1877 self.path = path
1872 self.path = path
1878 return
1873 return
1879
1874
1880 # For compatibility reasons, we can't handle bundle paths as
1875 # For compatibility reasons, we can't handle bundle paths as
1881 # normal URLS
1876 # normal URLS
1882 if path.startswith('bundle:'):
1877 if path.startswith('bundle:'):
1883 self.scheme = 'bundle'
1878 self.scheme = 'bundle'
1884 path = path[7:]
1879 path = path[7:]
1885 if path.startswith('//'):
1880 if path.startswith('//'):
1886 path = path[2:]
1881 path = path[2:]
1887 self.path = path
1882 self.path = path
1888 return
1883 return
1889
1884
1890 if self._matchscheme(path):
1885 if self._matchscheme(path):
1891 parts = path.split(':', 1)
1886 parts = path.split(':', 1)
1892 if parts[0]:
1887 if parts[0]:
1893 self.scheme, path = parts
1888 self.scheme, path = parts
1894 self._localpath = False
1889 self._localpath = False
1895
1890
1896 if not path:
1891 if not path:
1897 path = None
1892 path = None
1898 if self._localpath:
1893 if self._localpath:
1899 self.path = ''
1894 self.path = ''
1900 return
1895 return
1901 else:
1896 else:
1902 if self._localpath:
1897 if self._localpath:
1903 self.path = path
1898 self.path = path
1904 return
1899 return
1905
1900
1906 if parsequery and '?' in path:
1901 if parsequery and '?' in path:
1907 path, self.query = path.split('?', 1)
1902 path, self.query = path.split('?', 1)
1908 if not path:
1903 if not path:
1909 path = None
1904 path = None
1910 if not self.query:
1905 if not self.query:
1911 self.query = None
1906 self.query = None
1912
1907
1913 # // is required to specify a host/authority
1908 # // is required to specify a host/authority
1914 if path and path.startswith('//'):
1909 if path and path.startswith('//'):
1915 parts = path[2:].split('/', 1)
1910 parts = path[2:].split('/', 1)
1916 if len(parts) > 1:
1911 if len(parts) > 1:
1917 self.host, path = parts
1912 self.host, path = parts
1918 else:
1913 else:
1919 self.host = parts[0]
1914 self.host = parts[0]
1920 path = None
1915 path = None
1921 if not self.host:
1916 if not self.host:
1922 self.host = None
1917 self.host = None
1923 # path of file:///d is /d
1918 # path of file:///d is /d
1924 # path of file:///d:/ is d:/, not /d:/
1919 # path of file:///d:/ is d:/, not /d:/
1925 if path and not hasdriveletter(path):
1920 if path and not hasdriveletter(path):
1926 path = '/' + path
1921 path = '/' + path
1927
1922
1928 if self.host and '@' in self.host:
1923 if self.host and '@' in self.host:
1929 self.user, self.host = self.host.rsplit('@', 1)
1924 self.user, self.host = self.host.rsplit('@', 1)
1930 if ':' in self.user:
1925 if ':' in self.user:
1931 self.user, self.passwd = self.user.split(':', 1)
1926 self.user, self.passwd = self.user.split(':', 1)
1932 if not self.host:
1927 if not self.host:
1933 self.host = None
1928 self.host = None
1934
1929
1935 # Don't split on colons in IPv6 addresses without ports
1930 # Don't split on colons in IPv6 addresses without ports
1936 if (self.host and ':' in self.host and
1931 if (self.host and ':' in self.host and
1937 not (self.host.startswith('[') and self.host.endswith(']'))):
1932 not (self.host.startswith('[') and self.host.endswith(']'))):
1938 self._hostport = self.host
1933 self._hostport = self.host
1939 self.host, self.port = self.host.rsplit(':', 1)
1934 self.host, self.port = self.host.rsplit(':', 1)
1940 if not self.host:
1935 if not self.host:
1941 self.host = None
1936 self.host = None
1942
1937
1943 if (self.host and self.scheme == 'file' and
1938 if (self.host and self.scheme == 'file' and
1944 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1939 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1945 raise Abort(_('file:// URLs can only refer to localhost'))
1940 raise Abort(_('file:// URLs can only refer to localhost'))
1946
1941
1947 self.path = path
1942 self.path = path
1948
1943
1949 # leave the query string escaped
1944 # leave the query string escaped
1950 for a in ('user', 'passwd', 'host', 'port',
1945 for a in ('user', 'passwd', 'host', 'port',
1951 'path', 'fragment'):
1946 'path', 'fragment'):
1952 v = getattr(self, a)
1947 v = getattr(self, a)
1953 if v is not None:
1948 if v is not None:
1954 setattr(self, a, _urlunquote(v))
1949 setattr(self, a, _urlunquote(v))
1955
1950
1956 def __repr__(self):
1951 def __repr__(self):
1957 attrs = []
1952 attrs = []
1958 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1953 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1959 'query', 'fragment'):
1954 'query', 'fragment'):
1960 v = getattr(self, a)
1955 v = getattr(self, a)
1961 if v is not None:
1956 if v is not None:
1962 attrs.append('%s: %r' % (a, v))
1957 attrs.append('%s: %r' % (a, v))
1963 return '<url %s>' % ', '.join(attrs)
1958 return '<url %s>' % ', '.join(attrs)
1964
1959
1965 def __str__(self):
1960 def __str__(self):
1966 r"""Join the URL's components back into a URL string.
1961 r"""Join the URL's components back into a URL string.
1967
1962
1968 Examples:
1963 Examples:
1969
1964
1970 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1965 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1971 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1966 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1972 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1967 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1973 'http://user:pw@host:80/?foo=bar&baz=42'
1968 'http://user:pw@host:80/?foo=bar&baz=42'
1974 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1969 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1975 'http://user:pw@host:80/?foo=bar%3dbaz'
1970 'http://user:pw@host:80/?foo=bar%3dbaz'
1976 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1971 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1977 'ssh://user:pw@[::1]:2200//home/joe#'
1972 'ssh://user:pw@[::1]:2200//home/joe#'
1978 >>> str(url('http://localhost:80//'))
1973 >>> str(url('http://localhost:80//'))
1979 'http://localhost:80//'
1974 'http://localhost:80//'
1980 >>> str(url('http://localhost:80/'))
1975 >>> str(url('http://localhost:80/'))
1981 'http://localhost:80/'
1976 'http://localhost:80/'
1982 >>> str(url('http://localhost:80'))
1977 >>> str(url('http://localhost:80'))
1983 'http://localhost:80/'
1978 'http://localhost:80/'
1984 >>> str(url('bundle:foo'))
1979 >>> str(url('bundle:foo'))
1985 'bundle:foo'
1980 'bundle:foo'
1986 >>> str(url('bundle://../foo'))
1981 >>> str(url('bundle://../foo'))
1987 'bundle:../foo'
1982 'bundle:../foo'
1988 >>> str(url('path'))
1983 >>> str(url('path'))
1989 'path'
1984 'path'
1990 >>> str(url('file:///tmp/foo/bar'))
1985 >>> str(url('file:///tmp/foo/bar'))
1991 'file:///tmp/foo/bar'
1986 'file:///tmp/foo/bar'
1992 >>> str(url('file:///c:/tmp/foo/bar'))
1987 >>> str(url('file:///c:/tmp/foo/bar'))
1993 'file:///c:/tmp/foo/bar'
1988 'file:///c:/tmp/foo/bar'
1994 >>> print url(r'bundle:foo\bar')
1989 >>> print url(r'bundle:foo\bar')
1995 bundle:foo\bar
1990 bundle:foo\bar
1996 >>> print url(r'file:///D:\data\hg')
1991 >>> print url(r'file:///D:\data\hg')
1997 file:///D:\data\hg
1992 file:///D:\data\hg
1998 """
1993 """
1999 if self._localpath:
1994 if self._localpath:
2000 s = self.path
1995 s = self.path
2001 if self.scheme == 'bundle':
1996 if self.scheme == 'bundle':
2002 s = 'bundle:' + s
1997 s = 'bundle:' + s
2003 if self.fragment:
1998 if self.fragment:
2004 s += '#' + self.fragment
1999 s += '#' + self.fragment
2005 return s
2000 return s
2006
2001
2007 s = self.scheme + ':'
2002 s = self.scheme + ':'
2008 if self.user or self.passwd or self.host:
2003 if self.user or self.passwd or self.host:
2009 s += '//'
2004 s += '//'
2010 elif self.scheme and (not self.path or self.path.startswith('/')
2005 elif self.scheme and (not self.path or self.path.startswith('/')
2011 or hasdriveletter(self.path)):
2006 or hasdriveletter(self.path)):
2012 s += '//'
2007 s += '//'
2013 if hasdriveletter(self.path):
2008 if hasdriveletter(self.path):
2014 s += '/'
2009 s += '/'
2015 if self.user:
2010 if self.user:
2016 s += urllib.quote(self.user, safe=self._safechars)
2011 s += urllib.quote(self.user, safe=self._safechars)
2017 if self.passwd:
2012 if self.passwd:
2018 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2013 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2019 if self.user or self.passwd:
2014 if self.user or self.passwd:
2020 s += '@'
2015 s += '@'
2021 if self.host:
2016 if self.host:
2022 if not (self.host.startswith('[') and self.host.endswith(']')):
2017 if not (self.host.startswith('[') and self.host.endswith(']')):
2023 s += urllib.quote(self.host)
2018 s += urllib.quote(self.host)
2024 else:
2019 else:
2025 s += self.host
2020 s += self.host
2026 if self.port:
2021 if self.port:
2027 s += ':' + urllib.quote(self.port)
2022 s += ':' + urllib.quote(self.port)
2028 if self.host:
2023 if self.host:
2029 s += '/'
2024 s += '/'
2030 if self.path:
2025 if self.path:
2031 # TODO: similar to the query string, we should not unescape the
2026 # TODO: similar to the query string, we should not unescape the
2032 # path when we store it, the path might contain '%2f' = '/',
2027 # path when we store it, the path might contain '%2f' = '/',
2033 # which we should *not* escape.
2028 # which we should *not* escape.
2034 s += urllib.quote(self.path, safe=self._safepchars)
2029 s += urllib.quote(self.path, safe=self._safepchars)
2035 if self.query:
2030 if self.query:
2036 # we store the query in escaped form.
2031 # we store the query in escaped form.
2037 s += '?' + self.query
2032 s += '?' + self.query
2038 if self.fragment is not None:
2033 if self.fragment is not None:
2039 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2034 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2040 return s
2035 return s
2041
2036
2042 def authinfo(self):
2037 def authinfo(self):
2043 user, passwd = self.user, self.passwd
2038 user, passwd = self.user, self.passwd
2044 try:
2039 try:
2045 self.user, self.passwd = None, None
2040 self.user, self.passwd = None, None
2046 s = str(self)
2041 s = str(self)
2047 finally:
2042 finally:
2048 self.user, self.passwd = user, passwd
2043 self.user, self.passwd = user, passwd
2049 if not self.user:
2044 if not self.user:
2050 return (s, None)
2045 return (s, None)
2051 # authinfo[1] is passed to urllib2 password manager, and its
2046 # authinfo[1] is passed to urllib2 password manager, and its
2052 # URIs must not contain credentials. The host is passed in the
2047 # URIs must not contain credentials. The host is passed in the
2053 # URIs list because Python < 2.4.3 uses only that to search for
2048 # URIs list because Python < 2.4.3 uses only that to search for
2054 # a password.
2049 # a password.
2055 return (s, (None, (s, self.host),
2050 return (s, (None, (s, self.host),
2056 self.user, self.passwd or ''))
2051 self.user, self.passwd or ''))
2057
2052
2058 def isabs(self):
2053 def isabs(self):
2059 if self.scheme and self.scheme != 'file':
2054 if self.scheme and self.scheme != 'file':
2060 return True # remote URL
2055 return True # remote URL
2061 if hasdriveletter(self.path):
2056 if hasdriveletter(self.path):
2062 return True # absolute for our purposes - can't be joined()
2057 return True # absolute for our purposes - can't be joined()
2063 if self.path.startswith(r'\\'):
2058 if self.path.startswith(r'\\'):
2064 return True # Windows UNC path
2059 return True # Windows UNC path
2065 if self.path.startswith('/'):
2060 if self.path.startswith('/'):
2066 return True # POSIX-style
2061 return True # POSIX-style
2067 return False
2062 return False
2068
2063
2069 def localpath(self):
2064 def localpath(self):
2070 if self.scheme == 'file' or self.scheme == 'bundle':
2065 if self.scheme == 'file' or self.scheme == 'bundle':
2071 path = self.path or '/'
2066 path = self.path or '/'
2072 # For Windows, we need to promote hosts containing drive
2067 # For Windows, we need to promote hosts containing drive
2073 # letters to paths with drive letters.
2068 # letters to paths with drive letters.
2074 if hasdriveletter(self._hostport):
2069 if hasdriveletter(self._hostport):
2075 path = self._hostport + '/' + self.path
2070 path = self._hostport + '/' + self.path
2076 elif (self.host is not None and self.path
2071 elif (self.host is not None and self.path
2077 and not hasdriveletter(path)):
2072 and not hasdriveletter(path)):
2078 path = '/' + path
2073 path = '/' + path
2079 return path
2074 return path
2080 return self._origpath
2075 return self._origpath
2081
2076
2082 def islocal(self):
2077 def islocal(self):
2083 '''whether localpath will return something that posixfile can open'''
2078 '''whether localpath will return something that posixfile can open'''
2084 return (not self.scheme or self.scheme == 'file'
2079 return (not self.scheme or self.scheme == 'file'
2085 or self.scheme == 'bundle')
2080 or self.scheme == 'bundle')
2086
2081
2087 def hasscheme(path):
2082 def hasscheme(path):
2088 return bool(url(path).scheme)
2083 return bool(url(path).scheme)
2089
2084
2090 def hasdriveletter(path):
2085 def hasdriveletter(path):
2091 return path and path[1:2] == ':' and path[0:1].isalpha()
2086 return path and path[1:2] == ':' and path[0:1].isalpha()
2092
2087
2093 def urllocalpath(path):
2088 def urllocalpath(path):
2094 return url(path, parsequery=False, parsefragment=False).localpath()
2089 return url(path, parsequery=False, parsefragment=False).localpath()
2095
2090
2096 def hidepassword(u):
2091 def hidepassword(u):
2097 '''hide user credential in a url string'''
2092 '''hide user credential in a url string'''
2098 u = url(u)
2093 u = url(u)
2099 if u.passwd:
2094 if u.passwd:
2100 u.passwd = '***'
2095 u.passwd = '***'
2101 return str(u)
2096 return str(u)
2102
2097
2103 def removeauth(u):
2098 def removeauth(u):
2104 '''remove all authentication information from a url string'''
2099 '''remove all authentication information from a url string'''
2105 u = url(u)
2100 u = url(u)
2106 u.user = u.passwd = None
2101 u.user = u.passwd = None
2107 return str(u)
2102 return str(u)
2108
2103
2109 def isatty(fd):
2104 def isatty(fd):
2110 try:
2105 try:
2111 return fd.isatty()
2106 return fd.isatty()
2112 except AttributeError:
2107 except AttributeError:
2113 return False
2108 return False
2114
2109
2115 timecount = unitcountfn(
2110 timecount = unitcountfn(
2116 (1, 1e3, _('%.0f s')),
2111 (1, 1e3, _('%.0f s')),
2117 (100, 1, _('%.1f s')),
2112 (100, 1, _('%.1f s')),
2118 (10, 1, _('%.2f s')),
2113 (10, 1, _('%.2f s')),
2119 (1, 1, _('%.3f s')),
2114 (1, 1, _('%.3f s')),
2120 (100, 0.001, _('%.1f ms')),
2115 (100, 0.001, _('%.1f ms')),
2121 (10, 0.001, _('%.2f ms')),
2116 (10, 0.001, _('%.2f ms')),
2122 (1, 0.001, _('%.3f ms')),
2117 (1, 0.001, _('%.3f ms')),
2123 (100, 0.000001, _('%.1f us')),
2118 (100, 0.000001, _('%.1f us')),
2124 (10, 0.000001, _('%.2f us')),
2119 (10, 0.000001, _('%.2f us')),
2125 (1, 0.000001, _('%.3f us')),
2120 (1, 0.000001, _('%.3f us')),
2126 (100, 0.000000001, _('%.1f ns')),
2121 (100, 0.000000001, _('%.1f ns')),
2127 (10, 0.000000001, _('%.2f ns')),
2122 (10, 0.000000001, _('%.2f ns')),
2128 (1, 0.000000001, _('%.3f ns')),
2123 (1, 0.000000001, _('%.3f ns')),
2129 )
2124 )
2130
2125
2131 _timenesting = [0]
2126 _timenesting = [0]
2132
2127
2133 def timed(func):
2128 def timed(func):
2134 '''Report the execution time of a function call to stderr.
2129 '''Report the execution time of a function call to stderr.
2135
2130
2136 During development, use as a decorator when you need to measure
2131 During development, use as a decorator when you need to measure
2137 the cost of a function, e.g. as follows:
2132 the cost of a function, e.g. as follows:
2138
2133
2139 @util.timed
2134 @util.timed
2140 def foo(a, b, c):
2135 def foo(a, b, c):
2141 pass
2136 pass
2142 '''
2137 '''
2143
2138
2144 def wrapper(*args, **kwargs):
2139 def wrapper(*args, **kwargs):
2145 start = time.time()
2140 start = time.time()
2146 indent = 2
2141 indent = 2
2147 _timenesting[0] += indent
2142 _timenesting[0] += indent
2148 try:
2143 try:
2149 return func(*args, **kwargs)
2144 return func(*args, **kwargs)
2150 finally:
2145 finally:
2151 elapsed = time.time() - start
2146 elapsed = time.time() - start
2152 _timenesting[0] -= indent
2147 _timenesting[0] -= indent
2153 sys.stderr.write('%s%s: %s\n' %
2148 sys.stderr.write('%s%s: %s\n' %
2154 (' ' * _timenesting[0], func.__name__,
2149 (' ' * _timenesting[0], func.__name__,
2155 timecount(elapsed)))
2150 timecount(elapsed)))
2156 return wrapper
2151 return wrapper
2157
2152
2158 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2153 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2159 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2154 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2160
2155
2161 def sizetoint(s):
2156 def sizetoint(s):
2162 '''Convert a space specifier to a byte count.
2157 '''Convert a space specifier to a byte count.
2163
2158
2164 >>> sizetoint('30')
2159 >>> sizetoint('30')
2165 30
2160 30
2166 >>> sizetoint('2.2kb')
2161 >>> sizetoint('2.2kb')
2167 2252
2162 2252
2168 >>> sizetoint('6M')
2163 >>> sizetoint('6M')
2169 6291456
2164 6291456
2170 '''
2165 '''
2171 t = s.strip().lower()
2166 t = s.strip().lower()
2172 try:
2167 try:
2173 for k, u in _sizeunits:
2168 for k, u in _sizeunits:
2174 if t.endswith(k):
2169 if t.endswith(k):
2175 return int(float(t[:-len(k)]) * u)
2170 return int(float(t[:-len(k)]) * u)
2176 return int(t)
2171 return int(t)
2177 except ValueError:
2172 except ValueError:
2178 raise error.ParseError(_("couldn't parse size: %s") % s)
2173 raise error.ParseError(_("couldn't parse size: %s") % s)
2179
2174
2180 class hooks(object):
2175 class hooks(object):
2181 '''A collection of hook functions that can be used to extend a
2176 '''A collection of hook functions that can be used to extend a
2182 function's behaviour. Hooks are called in lexicographic order,
2177 function's behaviour. Hooks are called in lexicographic order,
2183 based on the names of their sources.'''
2178 based on the names of their sources.'''
2184
2179
2185 def __init__(self):
2180 def __init__(self):
2186 self._hooks = []
2181 self._hooks = []
2187
2182
2188 def add(self, source, hook):
2183 def add(self, source, hook):
2189 self._hooks.append((source, hook))
2184 self._hooks.append((source, hook))
2190
2185
2191 def __call__(self, *args):
2186 def __call__(self, *args):
2192 self._hooks.sort(key=lambda x: x[0])
2187 self._hooks.sort(key=lambda x: x[0])
2193 results = []
2188 results = []
2194 for source, hook in self._hooks:
2189 for source, hook in self._hooks:
2195 results.append(hook(*args))
2190 results.append(hook(*args))
2196 return results
2191 return results
2197
2192
2198 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2193 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2199 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2194 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2200 Skips the 'skip' last entries. By default it will flush stdout first.
2195 Skips the 'skip' last entries. By default it will flush stdout first.
2201 It can be used everywhere and do intentionally not require an ui object.
2196 It can be used everywhere and do intentionally not require an ui object.
2202 Not be used in production code but very convenient while developing.
2197 Not be used in production code but very convenient while developing.
2203 '''
2198 '''
2204 if otherf:
2199 if otherf:
2205 otherf.flush()
2200 otherf.flush()
2206 f.write('%s at:\n' % msg)
2201 f.write('%s at:\n' % msg)
2207 entries = [('%s:%s' % (fn, ln), func)
2202 entries = [('%s:%s' % (fn, ln), func)
2208 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2203 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2209 if entries:
2204 if entries:
2210 fnmax = max(len(entry[0]) for entry in entries)
2205 fnmax = max(len(entry[0]) for entry in entries)
2211 for fnln, func in entries:
2206 for fnln, func in entries:
2212 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2207 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2213 f.flush()
2208 f.flush()
2214
2209
2215 class dirs(object):
2210 class dirs(object):
2216 '''a multiset of directory names from a dirstate or manifest'''
2211 '''a multiset of directory names from a dirstate or manifest'''
2217
2212
2218 def __init__(self, map, skip=None):
2213 def __init__(self, map, skip=None):
2219 self._dirs = {}
2214 self._dirs = {}
2220 addpath = self.addpath
2215 addpath = self.addpath
2221 if safehasattr(map, 'iteritems') and skip is not None:
2216 if safehasattr(map, 'iteritems') and skip is not None:
2222 for f, s in map.iteritems():
2217 for f, s in map.iteritems():
2223 if s[0] != skip:
2218 if s[0] != skip:
2224 addpath(f)
2219 addpath(f)
2225 else:
2220 else:
2226 for f in map:
2221 for f in map:
2227 addpath(f)
2222 addpath(f)
2228
2223
2229 def addpath(self, path):
2224 def addpath(self, path):
2230 dirs = self._dirs
2225 dirs = self._dirs
2231 for base in finddirs(path):
2226 for base in finddirs(path):
2232 if base in dirs:
2227 if base in dirs:
2233 dirs[base] += 1
2228 dirs[base] += 1
2234 return
2229 return
2235 dirs[base] = 1
2230 dirs[base] = 1
2236
2231
2237 def delpath(self, path):
2232 def delpath(self, path):
2238 dirs = self._dirs
2233 dirs = self._dirs
2239 for base in finddirs(path):
2234 for base in finddirs(path):
2240 if dirs[base] > 1:
2235 if dirs[base] > 1:
2241 dirs[base] -= 1
2236 dirs[base] -= 1
2242 return
2237 return
2243 del dirs[base]
2238 del dirs[base]
2244
2239
2245 def __iter__(self):
2240 def __iter__(self):
2246 return self._dirs.iterkeys()
2241 return self._dirs.iterkeys()
2247
2242
2248 def __contains__(self, d):
2243 def __contains__(self, d):
2249 return d in self._dirs
2244 return d in self._dirs
2250
2245
2251 if safehasattr(parsers, 'dirs'):
2246 if safehasattr(parsers, 'dirs'):
2252 dirs = parsers.dirs
2247 dirs = parsers.dirs
2253
2248
2254 def finddirs(path):
2249 def finddirs(path):
2255 pos = path.rfind('/')
2250 pos = path.rfind('/')
2256 while pos != -1:
2251 while pos != -1:
2257 yield path[:pos]
2252 yield path[:pos]
2258 pos = path.rfind('/', 0, pos)
2253 pos = path.rfind('/', 0, pos)
2259
2254
2260 # convenient shortcut
2255 # convenient shortcut
2261 dst = debugstacktrace
2256 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now