##// END OF EJS Templates
util: drop any() and all() polyfills
Augie Fackler -
r25152:ac2e66f4 default
parent child Browse files
Show More
@@ -1,2276 +1,2261 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 import i18n
16 import i18n
17 _ = i18n._
17 _ = i18n._
18 import error, osutil, encoding, parsers
18 import error, osutil, encoding, parsers
19 import errno, shutil, sys, tempfile, traceback
19 import errno, shutil, sys, tempfile, traceback
20 import re as remod
20 import re as remod
21 import os, time, datetime, calendar, textwrap, signal, collections
21 import os, time, datetime, calendar, textwrap, signal, collections
22 import imp, socket, urllib, struct
22 import imp, socket, urllib, struct
23 import gc
23 import gc
24
24
25 if os.name == 'nt':
25 if os.name == 'nt':
26 import windows as platform
26 import windows as platform
27 else:
27 else:
28 import posix as platform
28 import posix as platform
29
29
30 cachestat = platform.cachestat
30 cachestat = platform.cachestat
31 checkexec = platform.checkexec
31 checkexec = platform.checkexec
32 checklink = platform.checklink
32 checklink = platform.checklink
33 copymode = platform.copymode
33 copymode = platform.copymode
34 executablepath = platform.executablepath
34 executablepath = platform.executablepath
35 expandglobs = platform.expandglobs
35 expandglobs = platform.expandglobs
36 explainexit = platform.explainexit
36 explainexit = platform.explainexit
37 findexe = platform.findexe
37 findexe = platform.findexe
38 gethgcmd = platform.gethgcmd
38 gethgcmd = platform.gethgcmd
39 getuser = platform.getuser
39 getuser = platform.getuser
40 groupmembers = platform.groupmembers
40 groupmembers = platform.groupmembers
41 groupname = platform.groupname
41 groupname = platform.groupname
42 hidewindow = platform.hidewindow
42 hidewindow = platform.hidewindow
43 isexec = platform.isexec
43 isexec = platform.isexec
44 isowner = platform.isowner
44 isowner = platform.isowner
45 localpath = platform.localpath
45 localpath = platform.localpath
46 lookupreg = platform.lookupreg
46 lookupreg = platform.lookupreg
47 makedir = platform.makedir
47 makedir = platform.makedir
48 nlinks = platform.nlinks
48 nlinks = platform.nlinks
49 normpath = platform.normpath
49 normpath = platform.normpath
50 normcase = platform.normcase
50 normcase = platform.normcase
51 normcasespec = platform.normcasespec
51 normcasespec = platform.normcasespec
52 normcasefallback = platform.normcasefallback
52 normcasefallback = platform.normcasefallback
53 openhardlinks = platform.openhardlinks
53 openhardlinks = platform.openhardlinks
54 oslink = platform.oslink
54 oslink = platform.oslink
55 parsepatchoutput = platform.parsepatchoutput
55 parsepatchoutput = platform.parsepatchoutput
56 pconvert = platform.pconvert
56 pconvert = platform.pconvert
57 popen = platform.popen
57 popen = platform.popen
58 posixfile = platform.posixfile
58 posixfile = platform.posixfile
59 quotecommand = platform.quotecommand
59 quotecommand = platform.quotecommand
60 readpipe = platform.readpipe
60 readpipe = platform.readpipe
61 rename = platform.rename
61 rename = platform.rename
62 removedirs = platform.removedirs
62 removedirs = platform.removedirs
63 samedevice = platform.samedevice
63 samedevice = platform.samedevice
64 samefile = platform.samefile
64 samefile = platform.samefile
65 samestat = platform.samestat
65 samestat = platform.samestat
66 setbinary = platform.setbinary
66 setbinary = platform.setbinary
67 setflags = platform.setflags
67 setflags = platform.setflags
68 setsignalhandler = platform.setsignalhandler
68 setsignalhandler = platform.setsignalhandler
69 shellquote = platform.shellquote
69 shellquote = platform.shellquote
70 spawndetached = platform.spawndetached
70 spawndetached = platform.spawndetached
71 split = platform.split
71 split = platform.split
72 sshargs = platform.sshargs
72 sshargs = platform.sshargs
73 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
73 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
74 statisexec = platform.statisexec
74 statisexec = platform.statisexec
75 statislink = platform.statislink
75 statislink = platform.statislink
76 termwidth = platform.termwidth
76 termwidth = platform.termwidth
77 testpid = platform.testpid
77 testpid = platform.testpid
78 umask = platform.umask
78 umask = platform.umask
79 unlink = platform.unlink
79 unlink = platform.unlink
80 unlinkpath = platform.unlinkpath
80 unlinkpath = platform.unlinkpath
81 username = platform.username
81 username = platform.username
82
82
83 # Python compatibility
83 # Python compatibility
84
84
85 _notset = object()
85 _notset = object()
86
86
87 def safehasattr(thing, attr):
87 def safehasattr(thing, attr):
88 return getattr(thing, attr, _notset) is not _notset
88 return getattr(thing, attr, _notset) is not _notset
89
89
90 def sha1(s=''):
90 def sha1(s=''):
91 '''
91 '''
92 Low-overhead wrapper around Python's SHA support
92 Low-overhead wrapper around Python's SHA support
93
93
94 >>> f = _fastsha1
94 >>> f = _fastsha1
95 >>> a = sha1()
95 >>> a = sha1()
96 >>> a = f()
96 >>> a = f()
97 >>> a.hexdigest()
97 >>> a.hexdigest()
98 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
98 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
99 '''
99 '''
100
100
101 return _fastsha1(s)
101 return _fastsha1(s)
102
102
103 def _fastsha1(s=''):
103 def _fastsha1(s=''):
104 # This function will import sha1 from hashlib or sha (whichever is
104 # This function will import sha1 from hashlib or sha (whichever is
105 # available) and overwrite itself with it on the first call.
105 # available) and overwrite itself with it on the first call.
106 # Subsequent calls will go directly to the imported function.
106 # Subsequent calls will go directly to the imported function.
107 if sys.version_info >= (2, 5):
107 if sys.version_info >= (2, 5):
108 from hashlib import sha1 as _sha1
108 from hashlib import sha1 as _sha1
109 else:
109 else:
110 from sha import sha as _sha1
110 from sha import sha as _sha1
111 global _fastsha1, sha1
111 global _fastsha1, sha1
112 _fastsha1 = sha1 = _sha1
112 _fastsha1 = sha1 = _sha1
113 return _sha1(s)
113 return _sha1(s)
114
114
115 def md5(s=''):
115 def md5(s=''):
116 try:
116 try:
117 from hashlib import md5 as _md5
117 from hashlib import md5 as _md5
118 except ImportError:
118 except ImportError:
119 from md5 import md5 as _md5
119 from md5 import md5 as _md5
120 global md5
120 global md5
121 md5 = _md5
121 md5 = _md5
122 return _md5(s)
122 return _md5(s)
123
123
124 DIGESTS = {
124 DIGESTS = {
125 'md5': md5,
125 'md5': md5,
126 'sha1': sha1,
126 'sha1': sha1,
127 }
127 }
128 # List of digest types from strongest to weakest
128 # List of digest types from strongest to weakest
129 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
129 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
130
130
131 try:
131 try:
132 import hashlib
132 import hashlib
133 DIGESTS.update({
133 DIGESTS.update({
134 'sha512': hashlib.sha512,
134 'sha512': hashlib.sha512,
135 })
135 })
136 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
136 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
137 except ImportError:
137 except ImportError:
138 pass
138 pass
139
139
140 for k in DIGESTS_BY_STRENGTH:
140 for k in DIGESTS_BY_STRENGTH:
141 assert k in DIGESTS
141 assert k in DIGESTS
142
142
143 class digester(object):
143 class digester(object):
144 """helper to compute digests.
144 """helper to compute digests.
145
145
146 This helper can be used to compute one or more digests given their name.
146 This helper can be used to compute one or more digests given their name.
147
147
148 >>> d = digester(['md5', 'sha1'])
148 >>> d = digester(['md5', 'sha1'])
149 >>> d.update('foo')
149 >>> d.update('foo')
150 >>> [k for k in sorted(d)]
150 >>> [k for k in sorted(d)]
151 ['md5', 'sha1']
151 ['md5', 'sha1']
152 >>> d['md5']
152 >>> d['md5']
153 'acbd18db4cc2f85cedef654fccc4a4d8'
153 'acbd18db4cc2f85cedef654fccc4a4d8'
154 >>> d['sha1']
154 >>> d['sha1']
155 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
155 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
156 >>> digester.preferred(['md5', 'sha1'])
156 >>> digester.preferred(['md5', 'sha1'])
157 'sha1'
157 'sha1'
158 """
158 """
159
159
160 def __init__(self, digests, s=''):
160 def __init__(self, digests, s=''):
161 self._hashes = {}
161 self._hashes = {}
162 for k in digests:
162 for k in digests:
163 if k not in DIGESTS:
163 if k not in DIGESTS:
164 raise Abort(_('unknown digest type: %s') % k)
164 raise Abort(_('unknown digest type: %s') % k)
165 self._hashes[k] = DIGESTS[k]()
165 self._hashes[k] = DIGESTS[k]()
166 if s:
166 if s:
167 self.update(s)
167 self.update(s)
168
168
169 def update(self, data):
169 def update(self, data):
170 for h in self._hashes.values():
170 for h in self._hashes.values():
171 h.update(data)
171 h.update(data)
172
172
173 def __getitem__(self, key):
173 def __getitem__(self, key):
174 if key not in DIGESTS:
174 if key not in DIGESTS:
175 raise Abort(_('unknown digest type: %s') % k)
175 raise Abort(_('unknown digest type: %s') % k)
176 return self._hashes[key].hexdigest()
176 return self._hashes[key].hexdigest()
177
177
178 def __iter__(self):
178 def __iter__(self):
179 return iter(self._hashes)
179 return iter(self._hashes)
180
180
181 @staticmethod
181 @staticmethod
182 def preferred(supported):
182 def preferred(supported):
183 """returns the strongest digest type in both supported and DIGESTS."""
183 """returns the strongest digest type in both supported and DIGESTS."""
184
184
185 for k in DIGESTS_BY_STRENGTH:
185 for k in DIGESTS_BY_STRENGTH:
186 if k in supported:
186 if k in supported:
187 return k
187 return k
188 return None
188 return None
189
189
190 class digestchecker(object):
190 class digestchecker(object):
191 """file handle wrapper that additionally checks content against a given
191 """file handle wrapper that additionally checks content against a given
192 size and digests.
192 size and digests.
193
193
194 d = digestchecker(fh, size, {'md5': '...'})
194 d = digestchecker(fh, size, {'md5': '...'})
195
195
196 When multiple digests are given, all of them are validated.
196 When multiple digests are given, all of them are validated.
197 """
197 """
198
198
199 def __init__(self, fh, size, digests):
199 def __init__(self, fh, size, digests):
200 self._fh = fh
200 self._fh = fh
201 self._size = size
201 self._size = size
202 self._got = 0
202 self._got = 0
203 self._digests = dict(digests)
203 self._digests = dict(digests)
204 self._digester = digester(self._digests.keys())
204 self._digester = digester(self._digests.keys())
205
205
206 def read(self, length=-1):
206 def read(self, length=-1):
207 content = self._fh.read(length)
207 content = self._fh.read(length)
208 self._digester.update(content)
208 self._digester.update(content)
209 self._got += len(content)
209 self._got += len(content)
210 return content
210 return content
211
211
212 def validate(self):
212 def validate(self):
213 if self._size != self._got:
213 if self._size != self._got:
214 raise Abort(_('size mismatch: expected %d, got %d') %
214 raise Abort(_('size mismatch: expected %d, got %d') %
215 (self._size, self._got))
215 (self._size, self._got))
216 for k, v in self._digests.items():
216 for k, v in self._digests.items():
217 if v != self._digester[k]:
217 if v != self._digester[k]:
218 # i18n: first parameter is a digest name
218 # i18n: first parameter is a digest name
219 raise Abort(_('%s mismatch: expected %s, got %s') %
219 raise Abort(_('%s mismatch: expected %s, got %s') %
220 (k, v, self._digester[k]))
220 (k, v, self._digester[k]))
221
221
222 try:
222 try:
223 buffer = buffer
223 buffer = buffer
224 except NameError:
224 except NameError:
225 if sys.version_info[0] < 3:
225 if sys.version_info[0] < 3:
226 def buffer(sliceable, offset=0):
226 def buffer(sliceable, offset=0):
227 return sliceable[offset:]
227 return sliceable[offset:]
228 else:
228 else:
229 def buffer(sliceable, offset=0):
229 def buffer(sliceable, offset=0):
230 return memoryview(sliceable)[offset:]
230 return memoryview(sliceable)[offset:]
231
231
232 import subprocess
232 import subprocess
233 closefds = os.name == 'posix'
233 closefds = os.name == 'posix'
234
234
235 def unpacker(fmt):
235 def unpacker(fmt):
236 """create a struct unpacker for the specified format"""
236 """create a struct unpacker for the specified format"""
237 try:
237 try:
238 # 2.5+
238 # 2.5+
239 return struct.Struct(fmt).unpack
239 return struct.Struct(fmt).unpack
240 except AttributeError:
240 except AttributeError:
241 # 2.4
241 # 2.4
242 return lambda buf: struct.unpack(fmt, buf)
242 return lambda buf: struct.unpack(fmt, buf)
243
243
244 def popen2(cmd, env=None, newlines=False):
244 def popen2(cmd, env=None, newlines=False):
245 # Setting bufsize to -1 lets the system decide the buffer size.
245 # Setting bufsize to -1 lets the system decide the buffer size.
246 # The default for bufsize is 0, meaning unbuffered. This leads to
246 # The default for bufsize is 0, meaning unbuffered. This leads to
247 # poor performance on Mac OS X: http://bugs.python.org/issue4194
247 # poor performance on Mac OS X: http://bugs.python.org/issue4194
248 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
248 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
249 close_fds=closefds,
249 close_fds=closefds,
250 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
250 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
251 universal_newlines=newlines,
251 universal_newlines=newlines,
252 env=env)
252 env=env)
253 return p.stdin, p.stdout
253 return p.stdin, p.stdout
254
254
255 def popen3(cmd, env=None, newlines=False):
255 def popen3(cmd, env=None, newlines=False):
256 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
256 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
257 return stdin, stdout, stderr
257 return stdin, stdout, stderr
258
258
259 def popen4(cmd, env=None, newlines=False):
259 def popen4(cmd, env=None, newlines=False):
260 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
260 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
261 close_fds=closefds,
261 close_fds=closefds,
262 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
262 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
263 stderr=subprocess.PIPE,
263 stderr=subprocess.PIPE,
264 universal_newlines=newlines,
264 universal_newlines=newlines,
265 env=env)
265 env=env)
266 return p.stdin, p.stdout, p.stderr, p
266 return p.stdin, p.stdout, p.stderr, p
267
267
268 def version():
268 def version():
269 """Return version information if available."""
269 """Return version information if available."""
270 try:
270 try:
271 import __version__
271 import __version__
272 return __version__.version
272 return __version__.version
273 except ImportError:
273 except ImportError:
274 return 'unknown'
274 return 'unknown'
275
275
276 # used by parsedate
276 # used by parsedate
277 defaultdateformats = (
277 defaultdateformats = (
278 '%Y-%m-%d %H:%M:%S',
278 '%Y-%m-%d %H:%M:%S',
279 '%Y-%m-%d %I:%M:%S%p',
279 '%Y-%m-%d %I:%M:%S%p',
280 '%Y-%m-%d %H:%M',
280 '%Y-%m-%d %H:%M',
281 '%Y-%m-%d %I:%M%p',
281 '%Y-%m-%d %I:%M%p',
282 '%Y-%m-%d',
282 '%Y-%m-%d',
283 '%m-%d',
283 '%m-%d',
284 '%m/%d',
284 '%m/%d',
285 '%m/%d/%y',
285 '%m/%d/%y',
286 '%m/%d/%Y',
286 '%m/%d/%Y',
287 '%a %b %d %H:%M:%S %Y',
287 '%a %b %d %H:%M:%S %Y',
288 '%a %b %d %I:%M:%S%p %Y',
288 '%a %b %d %I:%M:%S%p %Y',
289 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
289 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
290 '%b %d %H:%M:%S %Y',
290 '%b %d %H:%M:%S %Y',
291 '%b %d %I:%M:%S%p %Y',
291 '%b %d %I:%M:%S%p %Y',
292 '%b %d %H:%M:%S',
292 '%b %d %H:%M:%S',
293 '%b %d %I:%M:%S%p',
293 '%b %d %I:%M:%S%p',
294 '%b %d %H:%M',
294 '%b %d %H:%M',
295 '%b %d %I:%M%p',
295 '%b %d %I:%M%p',
296 '%b %d %Y',
296 '%b %d %Y',
297 '%b %d',
297 '%b %d',
298 '%H:%M:%S',
298 '%H:%M:%S',
299 '%I:%M:%S%p',
299 '%I:%M:%S%p',
300 '%H:%M',
300 '%H:%M',
301 '%I:%M%p',
301 '%I:%M%p',
302 )
302 )
303
303
304 extendeddateformats = defaultdateformats + (
304 extendeddateformats = defaultdateformats + (
305 "%Y",
305 "%Y",
306 "%Y-%m",
306 "%Y-%m",
307 "%b",
307 "%b",
308 "%b %Y",
308 "%b %Y",
309 )
309 )
310
310
311 def cachefunc(func):
311 def cachefunc(func):
312 '''cache the result of function calls'''
312 '''cache the result of function calls'''
313 # XXX doesn't handle keywords args
313 # XXX doesn't handle keywords args
314 if func.func_code.co_argcount == 0:
314 if func.func_code.co_argcount == 0:
315 cache = []
315 cache = []
316 def f():
316 def f():
317 if len(cache) == 0:
317 if len(cache) == 0:
318 cache.append(func())
318 cache.append(func())
319 return cache[0]
319 return cache[0]
320 return f
320 return f
321 cache = {}
321 cache = {}
322 if func.func_code.co_argcount == 1:
322 if func.func_code.co_argcount == 1:
323 # we gain a small amount of time because
323 # we gain a small amount of time because
324 # we don't need to pack/unpack the list
324 # we don't need to pack/unpack the list
325 def f(arg):
325 def f(arg):
326 if arg not in cache:
326 if arg not in cache:
327 cache[arg] = func(arg)
327 cache[arg] = func(arg)
328 return cache[arg]
328 return cache[arg]
329 else:
329 else:
330 def f(*args):
330 def f(*args):
331 if args not in cache:
331 if args not in cache:
332 cache[args] = func(*args)
332 cache[args] = func(*args)
333 return cache[args]
333 return cache[args]
334
334
335 return f
335 return f
336
336
337 class sortdict(dict):
337 class sortdict(dict):
338 '''a simple sorted dictionary'''
338 '''a simple sorted dictionary'''
339 def __init__(self, data=None):
339 def __init__(self, data=None):
340 self._list = []
340 self._list = []
341 if data:
341 if data:
342 self.update(data)
342 self.update(data)
343 def copy(self):
343 def copy(self):
344 return sortdict(self)
344 return sortdict(self)
345 def __setitem__(self, key, val):
345 def __setitem__(self, key, val):
346 if key in self:
346 if key in self:
347 self._list.remove(key)
347 self._list.remove(key)
348 self._list.append(key)
348 self._list.append(key)
349 dict.__setitem__(self, key, val)
349 dict.__setitem__(self, key, val)
350 def __iter__(self):
350 def __iter__(self):
351 return self._list.__iter__()
351 return self._list.__iter__()
352 def update(self, src):
352 def update(self, src):
353 if isinstance(src, dict):
353 if isinstance(src, dict):
354 src = src.iteritems()
354 src = src.iteritems()
355 for k, v in src:
355 for k, v in src:
356 self[k] = v
356 self[k] = v
357 def clear(self):
357 def clear(self):
358 dict.clear(self)
358 dict.clear(self)
359 self._list = []
359 self._list = []
360 def items(self):
360 def items(self):
361 return [(k, self[k]) for k in self._list]
361 return [(k, self[k]) for k in self._list]
362 def __delitem__(self, key):
362 def __delitem__(self, key):
363 dict.__delitem__(self, key)
363 dict.__delitem__(self, key)
364 self._list.remove(key)
364 self._list.remove(key)
365 def pop(self, key, *args, **kwargs):
365 def pop(self, key, *args, **kwargs):
366 dict.pop(self, key, *args, **kwargs)
366 dict.pop(self, key, *args, **kwargs)
367 try:
367 try:
368 self._list.remove(key)
368 self._list.remove(key)
369 except ValueError:
369 except ValueError:
370 pass
370 pass
371 def keys(self):
371 def keys(self):
372 return self._list
372 return self._list
373 def iterkeys(self):
373 def iterkeys(self):
374 return self._list.__iter__()
374 return self._list.__iter__()
375 def iteritems(self):
375 def iteritems(self):
376 for k in self._list:
376 for k in self._list:
377 yield k, self[k]
377 yield k, self[k]
378 def insert(self, index, key, val):
378 def insert(self, index, key, val):
379 self._list.insert(index, key)
379 self._list.insert(index, key)
380 dict.__setitem__(self, key, val)
380 dict.__setitem__(self, key, val)
381
381
382 class lrucachedict(object):
382 class lrucachedict(object):
383 '''cache most recent gets from or sets to this dictionary'''
383 '''cache most recent gets from or sets to this dictionary'''
384 def __init__(self, maxsize):
384 def __init__(self, maxsize):
385 self._cache = {}
385 self._cache = {}
386 self._maxsize = maxsize
386 self._maxsize = maxsize
387 self._order = collections.deque()
387 self._order = collections.deque()
388
388
389 def __getitem__(self, key):
389 def __getitem__(self, key):
390 value = self._cache[key]
390 value = self._cache[key]
391 self._order.remove(key)
391 self._order.remove(key)
392 self._order.append(key)
392 self._order.append(key)
393 return value
393 return value
394
394
395 def __setitem__(self, key, value):
395 def __setitem__(self, key, value):
396 if key not in self._cache:
396 if key not in self._cache:
397 if len(self._cache) >= self._maxsize:
397 if len(self._cache) >= self._maxsize:
398 del self._cache[self._order.popleft()]
398 del self._cache[self._order.popleft()]
399 else:
399 else:
400 self._order.remove(key)
400 self._order.remove(key)
401 self._cache[key] = value
401 self._cache[key] = value
402 self._order.append(key)
402 self._order.append(key)
403
403
404 def __contains__(self, key):
404 def __contains__(self, key):
405 return key in self._cache
405 return key in self._cache
406
406
407 def clear(self):
407 def clear(self):
408 self._cache.clear()
408 self._cache.clear()
409 self._order = collections.deque()
409 self._order = collections.deque()
410
410
411 def lrucachefunc(func):
411 def lrucachefunc(func):
412 '''cache most recent results of function calls'''
412 '''cache most recent results of function calls'''
413 cache = {}
413 cache = {}
414 order = collections.deque()
414 order = collections.deque()
415 if func.func_code.co_argcount == 1:
415 if func.func_code.co_argcount == 1:
416 def f(arg):
416 def f(arg):
417 if arg not in cache:
417 if arg not in cache:
418 if len(cache) > 20:
418 if len(cache) > 20:
419 del cache[order.popleft()]
419 del cache[order.popleft()]
420 cache[arg] = func(arg)
420 cache[arg] = func(arg)
421 else:
421 else:
422 order.remove(arg)
422 order.remove(arg)
423 order.append(arg)
423 order.append(arg)
424 return cache[arg]
424 return cache[arg]
425 else:
425 else:
426 def f(*args):
426 def f(*args):
427 if args not in cache:
427 if args not in cache:
428 if len(cache) > 20:
428 if len(cache) > 20:
429 del cache[order.popleft()]
429 del cache[order.popleft()]
430 cache[args] = func(*args)
430 cache[args] = func(*args)
431 else:
431 else:
432 order.remove(args)
432 order.remove(args)
433 order.append(args)
433 order.append(args)
434 return cache[args]
434 return cache[args]
435
435
436 return f
436 return f
437
437
438 class propertycache(object):
438 class propertycache(object):
439 def __init__(self, func):
439 def __init__(self, func):
440 self.func = func
440 self.func = func
441 self.name = func.__name__
441 self.name = func.__name__
442 def __get__(self, obj, type=None):
442 def __get__(self, obj, type=None):
443 result = self.func(obj)
443 result = self.func(obj)
444 self.cachevalue(obj, result)
444 self.cachevalue(obj, result)
445 return result
445 return result
446
446
447 def cachevalue(self, obj, value):
447 def cachevalue(self, obj, value):
448 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
448 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
449 obj.__dict__[self.name] = value
449 obj.__dict__[self.name] = value
450
450
451 def pipefilter(s, cmd):
451 def pipefilter(s, cmd):
452 '''filter string S through command CMD, returning its output'''
452 '''filter string S through command CMD, returning its output'''
453 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
453 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
454 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
454 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
455 pout, perr = p.communicate(s)
455 pout, perr = p.communicate(s)
456 return pout
456 return pout
457
457
458 def tempfilter(s, cmd):
458 def tempfilter(s, cmd):
459 '''filter string S through a pair of temporary files with CMD.
459 '''filter string S through a pair of temporary files with CMD.
460 CMD is used as a template to create the real command to be run,
460 CMD is used as a template to create the real command to be run,
461 with the strings INFILE and OUTFILE replaced by the real names of
461 with the strings INFILE and OUTFILE replaced by the real names of
462 the temporary files generated.'''
462 the temporary files generated.'''
463 inname, outname = None, None
463 inname, outname = None, None
464 try:
464 try:
465 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
465 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
466 fp = os.fdopen(infd, 'wb')
466 fp = os.fdopen(infd, 'wb')
467 fp.write(s)
467 fp.write(s)
468 fp.close()
468 fp.close()
469 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
469 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
470 os.close(outfd)
470 os.close(outfd)
471 cmd = cmd.replace('INFILE', inname)
471 cmd = cmd.replace('INFILE', inname)
472 cmd = cmd.replace('OUTFILE', outname)
472 cmd = cmd.replace('OUTFILE', outname)
473 code = os.system(cmd)
473 code = os.system(cmd)
474 if sys.platform == 'OpenVMS' and code & 1:
474 if sys.platform == 'OpenVMS' and code & 1:
475 code = 0
475 code = 0
476 if code:
476 if code:
477 raise Abort(_("command '%s' failed: %s") %
477 raise Abort(_("command '%s' failed: %s") %
478 (cmd, explainexit(code)))
478 (cmd, explainexit(code)))
479 fp = open(outname, 'rb')
479 fp = open(outname, 'rb')
480 r = fp.read()
480 r = fp.read()
481 fp.close()
481 fp.close()
482 return r
482 return r
483 finally:
483 finally:
484 try:
484 try:
485 if inname:
485 if inname:
486 os.unlink(inname)
486 os.unlink(inname)
487 except OSError:
487 except OSError:
488 pass
488 pass
489 try:
489 try:
490 if outname:
490 if outname:
491 os.unlink(outname)
491 os.unlink(outname)
492 except OSError:
492 except OSError:
493 pass
493 pass
494
494
495 filtertable = {
495 filtertable = {
496 'tempfile:': tempfilter,
496 'tempfile:': tempfilter,
497 'pipe:': pipefilter,
497 'pipe:': pipefilter,
498 }
498 }
499
499
500 def filter(s, cmd):
500 def filter(s, cmd):
501 "filter a string through a command that transforms its input to its output"
501 "filter a string through a command that transforms its input to its output"
502 for name, fn in filtertable.iteritems():
502 for name, fn in filtertable.iteritems():
503 if cmd.startswith(name):
503 if cmd.startswith(name):
504 return fn(s, cmd[len(name):].lstrip())
504 return fn(s, cmd[len(name):].lstrip())
505 return pipefilter(s, cmd)
505 return pipefilter(s, cmd)
506
506
507 def binary(s):
507 def binary(s):
508 """return true if a string is binary data"""
508 """return true if a string is binary data"""
509 return bool(s and '\0' in s)
509 return bool(s and '\0' in s)
510
510
511 def increasingchunks(source, min=1024, max=65536):
511 def increasingchunks(source, min=1024, max=65536):
512 '''return no less than min bytes per chunk while data remains,
512 '''return no less than min bytes per chunk while data remains,
513 doubling min after each chunk until it reaches max'''
513 doubling min after each chunk until it reaches max'''
514 def log2(x):
514 def log2(x):
515 if not x:
515 if not x:
516 return 0
516 return 0
517 i = 0
517 i = 0
518 while x:
518 while x:
519 x >>= 1
519 x >>= 1
520 i += 1
520 i += 1
521 return i - 1
521 return i - 1
522
522
523 buf = []
523 buf = []
524 blen = 0
524 blen = 0
525 for chunk in source:
525 for chunk in source:
526 buf.append(chunk)
526 buf.append(chunk)
527 blen += len(chunk)
527 blen += len(chunk)
528 if blen >= min:
528 if blen >= min:
529 if min < max:
529 if min < max:
530 min = min << 1
530 min = min << 1
531 nmin = 1 << log2(blen)
531 nmin = 1 << log2(blen)
532 if nmin > min:
532 if nmin > min:
533 min = nmin
533 min = nmin
534 if min > max:
534 if min > max:
535 min = max
535 min = max
536 yield ''.join(buf)
536 yield ''.join(buf)
537 blen = 0
537 blen = 0
538 buf = []
538 buf = []
539 if buf:
539 if buf:
540 yield ''.join(buf)
540 yield ''.join(buf)
541
541
542 Abort = error.Abort
542 Abort = error.Abort
543
543
544 def always(fn):
544 def always(fn):
545 return True
545 return True
546
546
547 def never(fn):
547 def never(fn):
548 return False
548 return False
549
549
550 def nogc(func):
550 def nogc(func):
551 """disable garbage collector
551 """disable garbage collector
552
552
553 Python's garbage collector triggers a GC each time a certain number of
553 Python's garbage collector triggers a GC each time a certain number of
554 container objects (the number being defined by gc.get_threshold()) are
554 container objects (the number being defined by gc.get_threshold()) are
555 allocated even when marked not to be tracked by the collector. Tracking has
555 allocated even when marked not to be tracked by the collector. Tracking has
556 no effect on when GCs are triggered, only on what objects the GC looks
556 no effect on when GCs are triggered, only on what objects the GC looks
557 into. As a workaround, disable GC while building complex (huge)
557 into. As a workaround, disable GC while building complex (huge)
558 containers.
558 containers.
559
559
560 This garbage collector issue have been fixed in 2.7.
560 This garbage collector issue have been fixed in 2.7.
561 """
561 """
562 def wrapper(*args, **kwargs):
562 def wrapper(*args, **kwargs):
563 gcenabled = gc.isenabled()
563 gcenabled = gc.isenabled()
564 gc.disable()
564 gc.disable()
565 try:
565 try:
566 return func(*args, **kwargs)
566 return func(*args, **kwargs)
567 finally:
567 finally:
568 if gcenabled:
568 if gcenabled:
569 gc.enable()
569 gc.enable()
570 return wrapper
570 return wrapper
571
571
572 def pathto(root, n1, n2):
572 def pathto(root, n1, n2):
573 '''return the relative path from one place to another.
573 '''return the relative path from one place to another.
574 root should use os.sep to separate directories
574 root should use os.sep to separate directories
575 n1 should use os.sep to separate directories
575 n1 should use os.sep to separate directories
576 n2 should use "/" to separate directories
576 n2 should use "/" to separate directories
577 returns an os.sep-separated path.
577 returns an os.sep-separated path.
578
578
579 If n1 is a relative path, it's assumed it's
579 If n1 is a relative path, it's assumed it's
580 relative to root.
580 relative to root.
581 n2 should always be relative to root.
581 n2 should always be relative to root.
582 '''
582 '''
583 if not n1:
583 if not n1:
584 return localpath(n2)
584 return localpath(n2)
585 if os.path.isabs(n1):
585 if os.path.isabs(n1):
586 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
586 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
587 return os.path.join(root, localpath(n2))
587 return os.path.join(root, localpath(n2))
588 n2 = '/'.join((pconvert(root), n2))
588 n2 = '/'.join((pconvert(root), n2))
589 a, b = splitpath(n1), n2.split('/')
589 a, b = splitpath(n1), n2.split('/')
590 a.reverse()
590 a.reverse()
591 b.reverse()
591 b.reverse()
592 while a and b and a[-1] == b[-1]:
592 while a and b and a[-1] == b[-1]:
593 a.pop()
593 a.pop()
594 b.pop()
594 b.pop()
595 b.reverse()
595 b.reverse()
596 return os.sep.join((['..'] * len(a)) + b) or '.'
596 return os.sep.join((['..'] * len(a)) + b) or '.'
597
597
598 def mainfrozen():
598 def mainfrozen():
599 """return True if we are a frozen executable.
599 """return True if we are a frozen executable.
600
600
601 The code supports py2exe (most common, Windows only) and tools/freeze
601 The code supports py2exe (most common, Windows only) and tools/freeze
602 (portable, not much used).
602 (portable, not much used).
603 """
603 """
604 return (safehasattr(sys, "frozen") or # new py2exe
604 return (safehasattr(sys, "frozen") or # new py2exe
605 safehasattr(sys, "importers") or # old py2exe
605 safehasattr(sys, "importers") or # old py2exe
606 imp.is_frozen("__main__")) # tools/freeze
606 imp.is_frozen("__main__")) # tools/freeze
607
607
608 # the location of data files matching the source code
608 # the location of data files matching the source code
609 if mainfrozen():
609 if mainfrozen():
610 # executable version (py2exe) doesn't support __file__
610 # executable version (py2exe) doesn't support __file__
611 datapath = os.path.dirname(sys.executable)
611 datapath = os.path.dirname(sys.executable)
612 else:
612 else:
613 datapath = os.path.dirname(__file__)
613 datapath = os.path.dirname(__file__)
614
614
615 i18n.setdatapath(datapath)
615 i18n.setdatapath(datapath)
616
616
617 _hgexecutable = None
617 _hgexecutable = None
618
618
619 def hgexecutable():
619 def hgexecutable():
620 """return location of the 'hg' executable.
620 """return location of the 'hg' executable.
621
621
622 Defaults to $HG or 'hg' in the search path.
622 Defaults to $HG or 'hg' in the search path.
623 """
623 """
624 if _hgexecutable is None:
624 if _hgexecutable is None:
625 hg = os.environ.get('HG')
625 hg = os.environ.get('HG')
626 mainmod = sys.modules['__main__']
626 mainmod = sys.modules['__main__']
627 if hg:
627 if hg:
628 _sethgexecutable(hg)
628 _sethgexecutable(hg)
629 elif mainfrozen():
629 elif mainfrozen():
630 _sethgexecutable(sys.executable)
630 _sethgexecutable(sys.executable)
631 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
631 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
632 _sethgexecutable(mainmod.__file__)
632 _sethgexecutable(mainmod.__file__)
633 else:
633 else:
634 exe = findexe('hg') or os.path.basename(sys.argv[0])
634 exe = findexe('hg') or os.path.basename(sys.argv[0])
635 _sethgexecutable(exe)
635 _sethgexecutable(exe)
636 return _hgexecutable
636 return _hgexecutable
637
637
638 def _sethgexecutable(path):
638 def _sethgexecutable(path):
639 """set location of the 'hg' executable"""
639 """set location of the 'hg' executable"""
640 global _hgexecutable
640 global _hgexecutable
641 _hgexecutable = path
641 _hgexecutable = path
642
642
643 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
643 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
644 '''enhanced shell command execution.
644 '''enhanced shell command execution.
645 run with environment maybe modified, maybe in different dir.
645 run with environment maybe modified, maybe in different dir.
646
646
647 if command fails and onerr is None, return status, else raise onerr
647 if command fails and onerr is None, return status, else raise onerr
648 object as exception.
648 object as exception.
649
649
650 if out is specified, it is assumed to be a file-like object that has a
650 if out is specified, it is assumed to be a file-like object that has a
651 write() method. stdout and stderr will be redirected to out.'''
651 write() method. stdout and stderr will be redirected to out.'''
652 try:
652 try:
653 sys.stdout.flush()
653 sys.stdout.flush()
654 except Exception:
654 except Exception:
655 pass
655 pass
656 def py2shell(val):
656 def py2shell(val):
657 'convert python object into string that is useful to shell'
657 'convert python object into string that is useful to shell'
658 if val is None or val is False:
658 if val is None or val is False:
659 return '0'
659 return '0'
660 if val is True:
660 if val is True:
661 return '1'
661 return '1'
662 return str(val)
662 return str(val)
663 origcmd = cmd
663 origcmd = cmd
664 cmd = quotecommand(cmd)
664 cmd = quotecommand(cmd)
665 if sys.platform == 'plan9' and (sys.version_info[0] == 2
665 if sys.platform == 'plan9' and (sys.version_info[0] == 2
666 and sys.version_info[1] < 7):
666 and sys.version_info[1] < 7):
667 # subprocess kludge to work around issues in half-baked Python
667 # subprocess kludge to work around issues in half-baked Python
668 # ports, notably bichued/python:
668 # ports, notably bichued/python:
669 if not cwd is None:
669 if not cwd is None:
670 os.chdir(cwd)
670 os.chdir(cwd)
671 rc = os.system(cmd)
671 rc = os.system(cmd)
672 else:
672 else:
673 env = dict(os.environ)
673 env = dict(os.environ)
674 env.update((k, py2shell(v)) for k, v in environ.iteritems())
674 env.update((k, py2shell(v)) for k, v in environ.iteritems())
675 env['HG'] = hgexecutable()
675 env['HG'] = hgexecutable()
676 if out is None or out == sys.__stdout__:
676 if out is None or out == sys.__stdout__:
677 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
677 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
678 env=env, cwd=cwd)
678 env=env, cwd=cwd)
679 else:
679 else:
680 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
680 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
681 env=env, cwd=cwd, stdout=subprocess.PIPE,
681 env=env, cwd=cwd, stdout=subprocess.PIPE,
682 stderr=subprocess.STDOUT)
682 stderr=subprocess.STDOUT)
683 while True:
683 while True:
684 line = proc.stdout.readline()
684 line = proc.stdout.readline()
685 if not line:
685 if not line:
686 break
686 break
687 out.write(line)
687 out.write(line)
688 proc.wait()
688 proc.wait()
689 rc = proc.returncode
689 rc = proc.returncode
690 if sys.platform == 'OpenVMS' and rc & 1:
690 if sys.platform == 'OpenVMS' and rc & 1:
691 rc = 0
691 rc = 0
692 if rc and onerr:
692 if rc and onerr:
693 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
693 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
694 explainexit(rc)[0])
694 explainexit(rc)[0])
695 if errprefix:
695 if errprefix:
696 errmsg = '%s: %s' % (errprefix, errmsg)
696 errmsg = '%s: %s' % (errprefix, errmsg)
697 raise onerr(errmsg)
697 raise onerr(errmsg)
698 return rc
698 return rc
699
699
700 def checksignature(func):
700 def checksignature(func):
701 '''wrap a function with code to check for calling errors'''
701 '''wrap a function with code to check for calling errors'''
702 def check(*args, **kwargs):
702 def check(*args, **kwargs):
703 try:
703 try:
704 return func(*args, **kwargs)
704 return func(*args, **kwargs)
705 except TypeError:
705 except TypeError:
706 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
706 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
707 raise error.SignatureError
707 raise error.SignatureError
708 raise
708 raise
709
709
710 return check
710 return check
711
711
712 def copyfile(src, dest, hardlink=False):
712 def copyfile(src, dest, hardlink=False):
713 "copy a file, preserving mode and atime/mtime"
713 "copy a file, preserving mode and atime/mtime"
714 if os.path.lexists(dest):
714 if os.path.lexists(dest):
715 unlink(dest)
715 unlink(dest)
716 # hardlinks are problematic on CIFS, quietly ignore this flag
716 # hardlinks are problematic on CIFS, quietly ignore this flag
717 # until we find a way to work around it cleanly (issue4546)
717 # until we find a way to work around it cleanly (issue4546)
718 if False and hardlink:
718 if False and hardlink:
719 try:
719 try:
720 oslink(src, dest)
720 oslink(src, dest)
721 return
721 return
722 except (IOError, OSError):
722 except (IOError, OSError):
723 pass # fall back to normal copy
723 pass # fall back to normal copy
724 if os.path.islink(src):
724 if os.path.islink(src):
725 os.symlink(os.readlink(src), dest)
725 os.symlink(os.readlink(src), dest)
726 else:
726 else:
727 try:
727 try:
728 shutil.copyfile(src, dest)
728 shutil.copyfile(src, dest)
729 shutil.copymode(src, dest)
729 shutil.copymode(src, dest)
730 except shutil.Error, inst:
730 except shutil.Error, inst:
731 raise Abort(str(inst))
731 raise Abort(str(inst))
732
732
733 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
733 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
734 """Copy a directory tree using hardlinks if possible."""
734 """Copy a directory tree using hardlinks if possible."""
735 num = 0
735 num = 0
736
736
737 if hardlink is None:
737 if hardlink is None:
738 hardlink = (os.stat(src).st_dev ==
738 hardlink = (os.stat(src).st_dev ==
739 os.stat(os.path.dirname(dst)).st_dev)
739 os.stat(os.path.dirname(dst)).st_dev)
740 if hardlink:
740 if hardlink:
741 topic = _('linking')
741 topic = _('linking')
742 else:
742 else:
743 topic = _('copying')
743 topic = _('copying')
744
744
745 if os.path.isdir(src):
745 if os.path.isdir(src):
746 os.mkdir(dst)
746 os.mkdir(dst)
747 for name, kind in osutil.listdir(src):
747 for name, kind in osutil.listdir(src):
748 srcname = os.path.join(src, name)
748 srcname = os.path.join(src, name)
749 dstname = os.path.join(dst, name)
749 dstname = os.path.join(dst, name)
750 def nprog(t, pos):
750 def nprog(t, pos):
751 if pos is not None:
751 if pos is not None:
752 return progress(t, pos + num)
752 return progress(t, pos + num)
753 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
753 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
754 num += n
754 num += n
755 else:
755 else:
756 if hardlink:
756 if hardlink:
757 try:
757 try:
758 oslink(src, dst)
758 oslink(src, dst)
759 except (IOError, OSError):
759 except (IOError, OSError):
760 hardlink = False
760 hardlink = False
761 shutil.copy(src, dst)
761 shutil.copy(src, dst)
762 else:
762 else:
763 shutil.copy(src, dst)
763 shutil.copy(src, dst)
764 num += 1
764 num += 1
765 progress(topic, num)
765 progress(topic, num)
766 progress(topic, None)
766 progress(topic, None)
767
767
768 return hardlink, num
768 return hardlink, num
769
769
770 _winreservednames = '''con prn aux nul
770 _winreservednames = '''con prn aux nul
771 com1 com2 com3 com4 com5 com6 com7 com8 com9
771 com1 com2 com3 com4 com5 com6 com7 com8 com9
772 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
772 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
773 _winreservedchars = ':*?"<>|'
773 _winreservedchars = ':*?"<>|'
774 def checkwinfilename(path):
774 def checkwinfilename(path):
775 r'''Check that the base-relative path is a valid filename on Windows.
775 r'''Check that the base-relative path is a valid filename on Windows.
776 Returns None if the path is ok, or a UI string describing the problem.
776 Returns None if the path is ok, or a UI string describing the problem.
777
777
778 >>> checkwinfilename("just/a/normal/path")
778 >>> checkwinfilename("just/a/normal/path")
779 >>> checkwinfilename("foo/bar/con.xml")
779 >>> checkwinfilename("foo/bar/con.xml")
780 "filename contains 'con', which is reserved on Windows"
780 "filename contains 'con', which is reserved on Windows"
781 >>> checkwinfilename("foo/con.xml/bar")
781 >>> checkwinfilename("foo/con.xml/bar")
782 "filename contains 'con', which is reserved on Windows"
782 "filename contains 'con', which is reserved on Windows"
783 >>> checkwinfilename("foo/bar/xml.con")
783 >>> checkwinfilename("foo/bar/xml.con")
784 >>> checkwinfilename("foo/bar/AUX/bla.txt")
784 >>> checkwinfilename("foo/bar/AUX/bla.txt")
785 "filename contains 'AUX', which is reserved on Windows"
785 "filename contains 'AUX', which is reserved on Windows"
786 >>> checkwinfilename("foo/bar/bla:.txt")
786 >>> checkwinfilename("foo/bar/bla:.txt")
787 "filename contains ':', which is reserved on Windows"
787 "filename contains ':', which is reserved on Windows"
788 >>> checkwinfilename("foo/bar/b\07la.txt")
788 >>> checkwinfilename("foo/bar/b\07la.txt")
789 "filename contains '\\x07', which is invalid on Windows"
789 "filename contains '\\x07', which is invalid on Windows"
790 >>> checkwinfilename("foo/bar/bla ")
790 >>> checkwinfilename("foo/bar/bla ")
791 "filename ends with ' ', which is not allowed on Windows"
791 "filename ends with ' ', which is not allowed on Windows"
792 >>> checkwinfilename("../bar")
792 >>> checkwinfilename("../bar")
793 >>> checkwinfilename("foo\\")
793 >>> checkwinfilename("foo\\")
794 "filename ends with '\\', which is invalid on Windows"
794 "filename ends with '\\', which is invalid on Windows"
795 >>> checkwinfilename("foo\\/bar")
795 >>> checkwinfilename("foo\\/bar")
796 "directory name ends with '\\', which is invalid on Windows"
796 "directory name ends with '\\', which is invalid on Windows"
797 '''
797 '''
798 if path.endswith('\\'):
798 if path.endswith('\\'):
799 return _("filename ends with '\\', which is invalid on Windows")
799 return _("filename ends with '\\', which is invalid on Windows")
800 if '\\/' in path:
800 if '\\/' in path:
801 return _("directory name ends with '\\', which is invalid on Windows")
801 return _("directory name ends with '\\', which is invalid on Windows")
802 for n in path.replace('\\', '/').split('/'):
802 for n in path.replace('\\', '/').split('/'):
803 if not n:
803 if not n:
804 continue
804 continue
805 for c in n:
805 for c in n:
806 if c in _winreservedchars:
806 if c in _winreservedchars:
807 return _("filename contains '%s', which is reserved "
807 return _("filename contains '%s', which is reserved "
808 "on Windows") % c
808 "on Windows") % c
809 if ord(c) <= 31:
809 if ord(c) <= 31:
810 return _("filename contains %r, which is invalid "
810 return _("filename contains %r, which is invalid "
811 "on Windows") % c
811 "on Windows") % c
812 base = n.split('.')[0]
812 base = n.split('.')[0]
813 if base and base.lower() in _winreservednames:
813 if base and base.lower() in _winreservednames:
814 return _("filename contains '%s', which is reserved "
814 return _("filename contains '%s', which is reserved "
815 "on Windows") % base
815 "on Windows") % base
816 t = n[-1]
816 t = n[-1]
817 if t in '. ' and n not in '..':
817 if t in '. ' and n not in '..':
818 return _("filename ends with '%s', which is not allowed "
818 return _("filename ends with '%s', which is not allowed "
819 "on Windows") % t
819 "on Windows") % t
820
820
821 if os.name == 'nt':
821 if os.name == 'nt':
822 checkosfilename = checkwinfilename
822 checkosfilename = checkwinfilename
823 else:
823 else:
824 checkosfilename = platform.checkosfilename
824 checkosfilename = platform.checkosfilename
825
825
826 def makelock(info, pathname):
826 def makelock(info, pathname):
827 try:
827 try:
828 return os.symlink(info, pathname)
828 return os.symlink(info, pathname)
829 except OSError, why:
829 except OSError, why:
830 if why.errno == errno.EEXIST:
830 if why.errno == errno.EEXIST:
831 raise
831 raise
832 except AttributeError: # no symlink in os
832 except AttributeError: # no symlink in os
833 pass
833 pass
834
834
835 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
835 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
836 os.write(ld, info)
836 os.write(ld, info)
837 os.close(ld)
837 os.close(ld)
838
838
839 def readlock(pathname):
839 def readlock(pathname):
840 try:
840 try:
841 return os.readlink(pathname)
841 return os.readlink(pathname)
842 except OSError, why:
842 except OSError, why:
843 if why.errno not in (errno.EINVAL, errno.ENOSYS):
843 if why.errno not in (errno.EINVAL, errno.ENOSYS):
844 raise
844 raise
845 except AttributeError: # no symlink in os
845 except AttributeError: # no symlink in os
846 pass
846 pass
847 fp = posixfile(pathname)
847 fp = posixfile(pathname)
848 r = fp.read()
848 r = fp.read()
849 fp.close()
849 fp.close()
850 return r
850 return r
851
851
852 def fstat(fp):
852 def fstat(fp):
853 '''stat file object that may not have fileno method.'''
853 '''stat file object that may not have fileno method.'''
854 try:
854 try:
855 return os.fstat(fp.fileno())
855 return os.fstat(fp.fileno())
856 except AttributeError:
856 except AttributeError:
857 return os.stat(fp.name)
857 return os.stat(fp.name)
858
858
859 # File system features
859 # File system features
860
860
861 def checkcase(path):
861 def checkcase(path):
862 """
862 """
863 Return true if the given path is on a case-sensitive filesystem
863 Return true if the given path is on a case-sensitive filesystem
864
864
865 Requires a path (like /foo/.hg) ending with a foldable final
865 Requires a path (like /foo/.hg) ending with a foldable final
866 directory component.
866 directory component.
867 """
867 """
868 s1 = os.lstat(path)
868 s1 = os.lstat(path)
869 d, b = os.path.split(path)
869 d, b = os.path.split(path)
870 b2 = b.upper()
870 b2 = b.upper()
871 if b == b2:
871 if b == b2:
872 b2 = b.lower()
872 b2 = b.lower()
873 if b == b2:
873 if b == b2:
874 return True # no evidence against case sensitivity
874 return True # no evidence against case sensitivity
875 p2 = os.path.join(d, b2)
875 p2 = os.path.join(d, b2)
876 try:
876 try:
877 s2 = os.lstat(p2)
877 s2 = os.lstat(p2)
878 if s2 == s1:
878 if s2 == s1:
879 return False
879 return False
880 return True
880 return True
881 except OSError:
881 except OSError:
882 return True
882 return True
883
883
884 try:
884 try:
885 import re2
885 import re2
886 _re2 = None
886 _re2 = None
887 except ImportError:
887 except ImportError:
888 _re2 = False
888 _re2 = False
889
889
890 class _re(object):
890 class _re(object):
891 def _checkre2(self):
891 def _checkre2(self):
892 global _re2
892 global _re2
893 try:
893 try:
894 # check if match works, see issue3964
894 # check if match works, see issue3964
895 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
895 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
896 except ImportError:
896 except ImportError:
897 _re2 = False
897 _re2 = False
898
898
899 def compile(self, pat, flags=0):
899 def compile(self, pat, flags=0):
900 '''Compile a regular expression, using re2 if possible
900 '''Compile a regular expression, using re2 if possible
901
901
902 For best performance, use only re2-compatible regexp features. The
902 For best performance, use only re2-compatible regexp features. The
903 only flags from the re module that are re2-compatible are
903 only flags from the re module that are re2-compatible are
904 IGNORECASE and MULTILINE.'''
904 IGNORECASE and MULTILINE.'''
905 if _re2 is None:
905 if _re2 is None:
906 self._checkre2()
906 self._checkre2()
907 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
907 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
908 if flags & remod.IGNORECASE:
908 if flags & remod.IGNORECASE:
909 pat = '(?i)' + pat
909 pat = '(?i)' + pat
910 if flags & remod.MULTILINE:
910 if flags & remod.MULTILINE:
911 pat = '(?m)' + pat
911 pat = '(?m)' + pat
912 try:
912 try:
913 return re2.compile(pat)
913 return re2.compile(pat)
914 except re2.error:
914 except re2.error:
915 pass
915 pass
916 return remod.compile(pat, flags)
916 return remod.compile(pat, flags)
917
917
918 @propertycache
918 @propertycache
919 def escape(self):
919 def escape(self):
920 '''Return the version of escape corresponding to self.compile.
920 '''Return the version of escape corresponding to self.compile.
921
921
922 This is imperfect because whether re2 or re is used for a particular
922 This is imperfect because whether re2 or re is used for a particular
923 function depends on the flags, etc, but it's the best we can do.
923 function depends on the flags, etc, but it's the best we can do.
924 '''
924 '''
925 global _re2
925 global _re2
926 if _re2 is None:
926 if _re2 is None:
927 self._checkre2()
927 self._checkre2()
928 if _re2:
928 if _re2:
929 return re2.escape
929 return re2.escape
930 else:
930 else:
931 return remod.escape
931 return remod.escape
932
932
933 re = _re()
933 re = _re()
934
934
935 _fspathcache = {}
935 _fspathcache = {}
936 def fspath(name, root):
936 def fspath(name, root):
937 '''Get name in the case stored in the filesystem
937 '''Get name in the case stored in the filesystem
938
938
939 The name should be relative to root, and be normcase-ed for efficiency.
939 The name should be relative to root, and be normcase-ed for efficiency.
940
940
941 Note that this function is unnecessary, and should not be
941 Note that this function is unnecessary, and should not be
942 called, for case-sensitive filesystems (simply because it's expensive).
942 called, for case-sensitive filesystems (simply because it's expensive).
943
943
944 The root should be normcase-ed, too.
944 The root should be normcase-ed, too.
945 '''
945 '''
946 def _makefspathcacheentry(dir):
946 def _makefspathcacheentry(dir):
947 return dict((normcase(n), n) for n in os.listdir(dir))
947 return dict((normcase(n), n) for n in os.listdir(dir))
948
948
949 seps = os.sep
949 seps = os.sep
950 if os.altsep:
950 if os.altsep:
951 seps = seps + os.altsep
951 seps = seps + os.altsep
952 # Protect backslashes. This gets silly very quickly.
952 # Protect backslashes. This gets silly very quickly.
953 seps.replace('\\','\\\\')
953 seps.replace('\\','\\\\')
954 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
954 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
955 dir = os.path.normpath(root)
955 dir = os.path.normpath(root)
956 result = []
956 result = []
957 for part, sep in pattern.findall(name):
957 for part, sep in pattern.findall(name):
958 if sep:
958 if sep:
959 result.append(sep)
959 result.append(sep)
960 continue
960 continue
961
961
962 if dir not in _fspathcache:
962 if dir not in _fspathcache:
963 _fspathcache[dir] = _makefspathcacheentry(dir)
963 _fspathcache[dir] = _makefspathcacheentry(dir)
964 contents = _fspathcache[dir]
964 contents = _fspathcache[dir]
965
965
966 found = contents.get(part)
966 found = contents.get(part)
967 if not found:
967 if not found:
968 # retry "once per directory" per "dirstate.walk" which
968 # retry "once per directory" per "dirstate.walk" which
969 # may take place for each patches of "hg qpush", for example
969 # may take place for each patches of "hg qpush", for example
970 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
970 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
971 found = contents.get(part)
971 found = contents.get(part)
972
972
973 result.append(found or part)
973 result.append(found or part)
974 dir = os.path.join(dir, part)
974 dir = os.path.join(dir, part)
975
975
976 return ''.join(result)
976 return ''.join(result)
977
977
978 def checknlink(testfile):
978 def checknlink(testfile):
979 '''check whether hardlink count reporting works properly'''
979 '''check whether hardlink count reporting works properly'''
980
980
981 # testfile may be open, so we need a separate file for checking to
981 # testfile may be open, so we need a separate file for checking to
982 # work around issue2543 (or testfile may get lost on Samba shares)
982 # work around issue2543 (or testfile may get lost on Samba shares)
983 f1 = testfile + ".hgtmp1"
983 f1 = testfile + ".hgtmp1"
984 if os.path.lexists(f1):
984 if os.path.lexists(f1):
985 return False
985 return False
986 try:
986 try:
987 posixfile(f1, 'w').close()
987 posixfile(f1, 'w').close()
988 except IOError:
988 except IOError:
989 return False
989 return False
990
990
991 f2 = testfile + ".hgtmp2"
991 f2 = testfile + ".hgtmp2"
992 fd = None
992 fd = None
993 try:
993 try:
994 oslink(f1, f2)
994 oslink(f1, f2)
995 # nlinks() may behave differently for files on Windows shares if
995 # nlinks() may behave differently for files on Windows shares if
996 # the file is open.
996 # the file is open.
997 fd = posixfile(f2)
997 fd = posixfile(f2)
998 return nlinks(f2) > 1
998 return nlinks(f2) > 1
999 except OSError:
999 except OSError:
1000 return False
1000 return False
1001 finally:
1001 finally:
1002 if fd is not None:
1002 if fd is not None:
1003 fd.close()
1003 fd.close()
1004 for f in (f1, f2):
1004 for f in (f1, f2):
1005 try:
1005 try:
1006 os.unlink(f)
1006 os.unlink(f)
1007 except OSError:
1007 except OSError:
1008 pass
1008 pass
1009
1009
1010 def endswithsep(path):
1010 def endswithsep(path):
1011 '''Check path ends with os.sep or os.altsep.'''
1011 '''Check path ends with os.sep or os.altsep.'''
1012 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1012 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1013
1013
1014 def splitpath(path):
1014 def splitpath(path):
1015 '''Split path by os.sep.
1015 '''Split path by os.sep.
1016 Note that this function does not use os.altsep because this is
1016 Note that this function does not use os.altsep because this is
1017 an alternative of simple "xxx.split(os.sep)".
1017 an alternative of simple "xxx.split(os.sep)".
1018 It is recommended to use os.path.normpath() before using this
1018 It is recommended to use os.path.normpath() before using this
1019 function if need.'''
1019 function if need.'''
1020 return path.split(os.sep)
1020 return path.split(os.sep)
1021
1021
1022 def gui():
1022 def gui():
1023 '''Are we running in a GUI?'''
1023 '''Are we running in a GUI?'''
1024 if sys.platform == 'darwin':
1024 if sys.platform == 'darwin':
1025 if 'SSH_CONNECTION' in os.environ:
1025 if 'SSH_CONNECTION' in os.environ:
1026 # handle SSH access to a box where the user is logged in
1026 # handle SSH access to a box where the user is logged in
1027 return False
1027 return False
1028 elif getattr(osutil, 'isgui', None):
1028 elif getattr(osutil, 'isgui', None):
1029 # check if a CoreGraphics session is available
1029 # check if a CoreGraphics session is available
1030 return osutil.isgui()
1030 return osutil.isgui()
1031 else:
1031 else:
1032 # pure build; use a safe default
1032 # pure build; use a safe default
1033 return True
1033 return True
1034 else:
1034 else:
1035 return os.name == "nt" or os.environ.get("DISPLAY")
1035 return os.name == "nt" or os.environ.get("DISPLAY")
1036
1036
1037 def mktempcopy(name, emptyok=False, createmode=None):
1037 def mktempcopy(name, emptyok=False, createmode=None):
1038 """Create a temporary file with the same contents from name
1038 """Create a temporary file with the same contents from name
1039
1039
1040 The permission bits are copied from the original file.
1040 The permission bits are copied from the original file.
1041
1041
1042 If the temporary file is going to be truncated immediately, you
1042 If the temporary file is going to be truncated immediately, you
1043 can use emptyok=True as an optimization.
1043 can use emptyok=True as an optimization.
1044
1044
1045 Returns the name of the temporary file.
1045 Returns the name of the temporary file.
1046 """
1046 """
1047 d, fn = os.path.split(name)
1047 d, fn = os.path.split(name)
1048 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1048 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1049 os.close(fd)
1049 os.close(fd)
1050 # Temporary files are created with mode 0600, which is usually not
1050 # Temporary files are created with mode 0600, which is usually not
1051 # what we want. If the original file already exists, just copy
1051 # what we want. If the original file already exists, just copy
1052 # its mode. Otherwise, manually obey umask.
1052 # its mode. Otherwise, manually obey umask.
1053 copymode(name, temp, createmode)
1053 copymode(name, temp, createmode)
1054 if emptyok:
1054 if emptyok:
1055 return temp
1055 return temp
1056 try:
1056 try:
1057 try:
1057 try:
1058 ifp = posixfile(name, "rb")
1058 ifp = posixfile(name, "rb")
1059 except IOError, inst:
1059 except IOError, inst:
1060 if inst.errno == errno.ENOENT:
1060 if inst.errno == errno.ENOENT:
1061 return temp
1061 return temp
1062 if not getattr(inst, 'filename', None):
1062 if not getattr(inst, 'filename', None):
1063 inst.filename = name
1063 inst.filename = name
1064 raise
1064 raise
1065 ofp = posixfile(temp, "wb")
1065 ofp = posixfile(temp, "wb")
1066 for chunk in filechunkiter(ifp):
1066 for chunk in filechunkiter(ifp):
1067 ofp.write(chunk)
1067 ofp.write(chunk)
1068 ifp.close()
1068 ifp.close()
1069 ofp.close()
1069 ofp.close()
1070 except: # re-raises
1070 except: # re-raises
1071 try: os.unlink(temp)
1071 try: os.unlink(temp)
1072 except OSError: pass
1072 except OSError: pass
1073 raise
1073 raise
1074 return temp
1074 return temp
1075
1075
1076 class atomictempfile(object):
1076 class atomictempfile(object):
1077 '''writable file object that atomically updates a file
1077 '''writable file object that atomically updates a file
1078
1078
1079 All writes will go to a temporary copy of the original file. Call
1079 All writes will go to a temporary copy of the original file. Call
1080 close() when you are done writing, and atomictempfile will rename
1080 close() when you are done writing, and atomictempfile will rename
1081 the temporary copy to the original name, making the changes
1081 the temporary copy to the original name, making the changes
1082 visible. If the object is destroyed without being closed, all your
1082 visible. If the object is destroyed without being closed, all your
1083 writes are discarded.
1083 writes are discarded.
1084 '''
1084 '''
1085 def __init__(self, name, mode='w+b', createmode=None):
1085 def __init__(self, name, mode='w+b', createmode=None):
1086 self.__name = name # permanent name
1086 self.__name = name # permanent name
1087 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1087 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1088 createmode=createmode)
1088 createmode=createmode)
1089 self._fp = posixfile(self._tempname, mode)
1089 self._fp = posixfile(self._tempname, mode)
1090
1090
1091 # delegated methods
1091 # delegated methods
1092 self.write = self._fp.write
1092 self.write = self._fp.write
1093 self.seek = self._fp.seek
1093 self.seek = self._fp.seek
1094 self.tell = self._fp.tell
1094 self.tell = self._fp.tell
1095 self.fileno = self._fp.fileno
1095 self.fileno = self._fp.fileno
1096
1096
1097 def close(self):
1097 def close(self):
1098 if not self._fp.closed:
1098 if not self._fp.closed:
1099 self._fp.close()
1099 self._fp.close()
1100 rename(self._tempname, localpath(self.__name))
1100 rename(self._tempname, localpath(self.__name))
1101
1101
1102 def discard(self):
1102 def discard(self):
1103 if not self._fp.closed:
1103 if not self._fp.closed:
1104 try:
1104 try:
1105 os.unlink(self._tempname)
1105 os.unlink(self._tempname)
1106 except OSError:
1106 except OSError:
1107 pass
1107 pass
1108 self._fp.close()
1108 self._fp.close()
1109
1109
1110 def __del__(self):
1110 def __del__(self):
1111 if safehasattr(self, '_fp'): # constructor actually did something
1111 if safehasattr(self, '_fp'): # constructor actually did something
1112 self.discard()
1112 self.discard()
1113
1113
1114 def makedirs(name, mode=None, notindexed=False):
1114 def makedirs(name, mode=None, notindexed=False):
1115 """recursive directory creation with parent mode inheritance"""
1115 """recursive directory creation with parent mode inheritance"""
1116 try:
1116 try:
1117 makedir(name, notindexed)
1117 makedir(name, notindexed)
1118 except OSError, err:
1118 except OSError, err:
1119 if err.errno == errno.EEXIST:
1119 if err.errno == errno.EEXIST:
1120 return
1120 return
1121 if err.errno != errno.ENOENT or not name:
1121 if err.errno != errno.ENOENT or not name:
1122 raise
1122 raise
1123 parent = os.path.dirname(os.path.abspath(name))
1123 parent = os.path.dirname(os.path.abspath(name))
1124 if parent == name:
1124 if parent == name:
1125 raise
1125 raise
1126 makedirs(parent, mode, notindexed)
1126 makedirs(parent, mode, notindexed)
1127 makedir(name, notindexed)
1127 makedir(name, notindexed)
1128 if mode is not None:
1128 if mode is not None:
1129 os.chmod(name, mode)
1129 os.chmod(name, mode)
1130
1130
1131 def ensuredirs(name, mode=None, notindexed=False):
1131 def ensuredirs(name, mode=None, notindexed=False):
1132 """race-safe recursive directory creation
1132 """race-safe recursive directory creation
1133
1133
1134 Newly created directories are marked as "not to be indexed by
1134 Newly created directories are marked as "not to be indexed by
1135 the content indexing service", if ``notindexed`` is specified
1135 the content indexing service", if ``notindexed`` is specified
1136 for "write" mode access.
1136 for "write" mode access.
1137 """
1137 """
1138 if os.path.isdir(name):
1138 if os.path.isdir(name):
1139 return
1139 return
1140 parent = os.path.dirname(os.path.abspath(name))
1140 parent = os.path.dirname(os.path.abspath(name))
1141 if parent != name:
1141 if parent != name:
1142 ensuredirs(parent, mode, notindexed)
1142 ensuredirs(parent, mode, notindexed)
1143 try:
1143 try:
1144 makedir(name, notindexed)
1144 makedir(name, notindexed)
1145 except OSError, err:
1145 except OSError, err:
1146 if err.errno == errno.EEXIST and os.path.isdir(name):
1146 if err.errno == errno.EEXIST and os.path.isdir(name):
1147 # someone else seems to have won a directory creation race
1147 # someone else seems to have won a directory creation race
1148 return
1148 return
1149 raise
1149 raise
1150 if mode is not None:
1150 if mode is not None:
1151 os.chmod(name, mode)
1151 os.chmod(name, mode)
1152
1152
1153 def readfile(path):
1153 def readfile(path):
1154 fp = open(path, 'rb')
1154 fp = open(path, 'rb')
1155 try:
1155 try:
1156 return fp.read()
1156 return fp.read()
1157 finally:
1157 finally:
1158 fp.close()
1158 fp.close()
1159
1159
1160 def writefile(path, text):
1160 def writefile(path, text):
1161 fp = open(path, 'wb')
1161 fp = open(path, 'wb')
1162 try:
1162 try:
1163 fp.write(text)
1163 fp.write(text)
1164 finally:
1164 finally:
1165 fp.close()
1165 fp.close()
1166
1166
1167 def appendfile(path, text):
1167 def appendfile(path, text):
1168 fp = open(path, 'ab')
1168 fp = open(path, 'ab')
1169 try:
1169 try:
1170 fp.write(text)
1170 fp.write(text)
1171 finally:
1171 finally:
1172 fp.close()
1172 fp.close()
1173
1173
1174 class chunkbuffer(object):
1174 class chunkbuffer(object):
1175 """Allow arbitrary sized chunks of data to be efficiently read from an
1175 """Allow arbitrary sized chunks of data to be efficiently read from an
1176 iterator over chunks of arbitrary size."""
1176 iterator over chunks of arbitrary size."""
1177
1177
1178 def __init__(self, in_iter):
1178 def __init__(self, in_iter):
1179 """in_iter is the iterator that's iterating over the input chunks.
1179 """in_iter is the iterator that's iterating over the input chunks.
1180 targetsize is how big a buffer to try to maintain."""
1180 targetsize is how big a buffer to try to maintain."""
1181 def splitbig(chunks):
1181 def splitbig(chunks):
1182 for chunk in chunks:
1182 for chunk in chunks:
1183 if len(chunk) > 2**20:
1183 if len(chunk) > 2**20:
1184 pos = 0
1184 pos = 0
1185 while pos < len(chunk):
1185 while pos < len(chunk):
1186 end = pos + 2 ** 18
1186 end = pos + 2 ** 18
1187 yield chunk[pos:end]
1187 yield chunk[pos:end]
1188 pos = end
1188 pos = end
1189 else:
1189 else:
1190 yield chunk
1190 yield chunk
1191 self.iter = splitbig(in_iter)
1191 self.iter = splitbig(in_iter)
1192 self._queue = collections.deque()
1192 self._queue = collections.deque()
1193
1193
1194 def read(self, l=None):
1194 def read(self, l=None):
1195 """Read L bytes of data from the iterator of chunks of data.
1195 """Read L bytes of data from the iterator of chunks of data.
1196 Returns less than L bytes if the iterator runs dry.
1196 Returns less than L bytes if the iterator runs dry.
1197
1197
1198 If size parameter is omitted, read everything"""
1198 If size parameter is omitted, read everything"""
1199 left = l
1199 left = l
1200 buf = []
1200 buf = []
1201 queue = self._queue
1201 queue = self._queue
1202 while left is None or left > 0:
1202 while left is None or left > 0:
1203 # refill the queue
1203 # refill the queue
1204 if not queue:
1204 if not queue:
1205 target = 2**18
1205 target = 2**18
1206 for chunk in self.iter:
1206 for chunk in self.iter:
1207 queue.append(chunk)
1207 queue.append(chunk)
1208 target -= len(chunk)
1208 target -= len(chunk)
1209 if target <= 0:
1209 if target <= 0:
1210 break
1210 break
1211 if not queue:
1211 if not queue:
1212 break
1212 break
1213
1213
1214 chunk = queue.popleft()
1214 chunk = queue.popleft()
1215 if left is not None:
1215 if left is not None:
1216 left -= len(chunk)
1216 left -= len(chunk)
1217 if left is not None and left < 0:
1217 if left is not None and left < 0:
1218 queue.appendleft(chunk[left:])
1218 queue.appendleft(chunk[left:])
1219 buf.append(chunk[:left])
1219 buf.append(chunk[:left])
1220 else:
1220 else:
1221 buf.append(chunk)
1221 buf.append(chunk)
1222
1222
1223 return ''.join(buf)
1223 return ''.join(buf)
1224
1224
1225 def filechunkiter(f, size=65536, limit=None):
1225 def filechunkiter(f, size=65536, limit=None):
1226 """Create a generator that produces the data in the file size
1226 """Create a generator that produces the data in the file size
1227 (default 65536) bytes at a time, up to optional limit (default is
1227 (default 65536) bytes at a time, up to optional limit (default is
1228 to read all data). Chunks may be less than size bytes if the
1228 to read all data). Chunks may be less than size bytes if the
1229 chunk is the last chunk in the file, or the file is a socket or
1229 chunk is the last chunk in the file, or the file is a socket or
1230 some other type of file that sometimes reads less data than is
1230 some other type of file that sometimes reads less data than is
1231 requested."""
1231 requested."""
1232 assert size >= 0
1232 assert size >= 0
1233 assert limit is None or limit >= 0
1233 assert limit is None or limit >= 0
1234 while True:
1234 while True:
1235 if limit is None:
1235 if limit is None:
1236 nbytes = size
1236 nbytes = size
1237 else:
1237 else:
1238 nbytes = min(limit, size)
1238 nbytes = min(limit, size)
1239 s = nbytes and f.read(nbytes)
1239 s = nbytes and f.read(nbytes)
1240 if not s:
1240 if not s:
1241 break
1241 break
1242 if limit:
1242 if limit:
1243 limit -= len(s)
1243 limit -= len(s)
1244 yield s
1244 yield s
1245
1245
1246 def makedate(timestamp=None):
1246 def makedate(timestamp=None):
1247 '''Return a unix timestamp (or the current time) as a (unixtime,
1247 '''Return a unix timestamp (or the current time) as a (unixtime,
1248 offset) tuple based off the local timezone.'''
1248 offset) tuple based off the local timezone.'''
1249 if timestamp is None:
1249 if timestamp is None:
1250 timestamp = time.time()
1250 timestamp = time.time()
1251 if timestamp < 0:
1251 if timestamp < 0:
1252 hint = _("check your clock")
1252 hint = _("check your clock")
1253 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1253 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1254 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1254 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1255 datetime.datetime.fromtimestamp(timestamp))
1255 datetime.datetime.fromtimestamp(timestamp))
1256 tz = delta.days * 86400 + delta.seconds
1256 tz = delta.days * 86400 + delta.seconds
1257 return timestamp, tz
1257 return timestamp, tz
1258
1258
1259 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1259 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1260 """represent a (unixtime, offset) tuple as a localized time.
1260 """represent a (unixtime, offset) tuple as a localized time.
1261 unixtime is seconds since the epoch, and offset is the time zone's
1261 unixtime is seconds since the epoch, and offset is the time zone's
1262 number of seconds away from UTC. if timezone is false, do not
1262 number of seconds away from UTC. if timezone is false, do not
1263 append time zone to string."""
1263 append time zone to string."""
1264 t, tz = date or makedate()
1264 t, tz = date or makedate()
1265 if t < 0:
1265 if t < 0:
1266 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1266 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1267 tz = 0
1267 tz = 0
1268 if "%1" in format or "%2" in format or "%z" in format:
1268 if "%1" in format or "%2" in format or "%z" in format:
1269 sign = (tz > 0) and "-" or "+"
1269 sign = (tz > 0) and "-" or "+"
1270 minutes = abs(tz) // 60
1270 minutes = abs(tz) // 60
1271 format = format.replace("%z", "%1%2")
1271 format = format.replace("%z", "%1%2")
1272 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1272 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1273 format = format.replace("%2", "%02d" % (minutes % 60))
1273 format = format.replace("%2", "%02d" % (minutes % 60))
1274 try:
1274 try:
1275 t = time.gmtime(float(t) - tz)
1275 t = time.gmtime(float(t) - tz)
1276 except ValueError:
1276 except ValueError:
1277 # time was out of range
1277 # time was out of range
1278 t = time.gmtime(sys.maxint)
1278 t = time.gmtime(sys.maxint)
1279 s = time.strftime(format, t)
1279 s = time.strftime(format, t)
1280 return s
1280 return s
1281
1281
1282 def shortdate(date=None):
1282 def shortdate(date=None):
1283 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1283 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1284 return datestr(date, format='%Y-%m-%d')
1284 return datestr(date, format='%Y-%m-%d')
1285
1285
1286 def strdate(string, format, defaults=[]):
1286 def strdate(string, format, defaults=[]):
1287 """parse a localized time string and return a (unixtime, offset) tuple.
1287 """parse a localized time string and return a (unixtime, offset) tuple.
1288 if the string cannot be parsed, ValueError is raised."""
1288 if the string cannot be parsed, ValueError is raised."""
1289 def timezone(string):
1289 def timezone(string):
1290 tz = string.split()[-1]
1290 tz = string.split()[-1]
1291 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1291 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1292 sign = (tz[0] == "+") and 1 or -1
1292 sign = (tz[0] == "+") and 1 or -1
1293 hours = int(tz[1:3])
1293 hours = int(tz[1:3])
1294 minutes = int(tz[3:5])
1294 minutes = int(tz[3:5])
1295 return -sign * (hours * 60 + minutes) * 60
1295 return -sign * (hours * 60 + minutes) * 60
1296 if tz == "GMT" or tz == "UTC":
1296 if tz == "GMT" or tz == "UTC":
1297 return 0
1297 return 0
1298 return None
1298 return None
1299
1299
1300 # NOTE: unixtime = localunixtime + offset
1300 # NOTE: unixtime = localunixtime + offset
1301 offset, date = timezone(string), string
1301 offset, date = timezone(string), string
1302 if offset is not None:
1302 if offset is not None:
1303 date = " ".join(string.split()[:-1])
1303 date = " ".join(string.split()[:-1])
1304
1304
1305 # add missing elements from defaults
1305 # add missing elements from defaults
1306 usenow = False # default to using biased defaults
1306 usenow = False # default to using biased defaults
1307 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1307 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1308 found = [True for p in part if ("%"+p) in format]
1308 found = [True for p in part if ("%"+p) in format]
1309 if not found:
1309 if not found:
1310 date += "@" + defaults[part][usenow]
1310 date += "@" + defaults[part][usenow]
1311 format += "@%" + part[0]
1311 format += "@%" + part[0]
1312 else:
1312 else:
1313 # We've found a specific time element, less specific time
1313 # We've found a specific time element, less specific time
1314 # elements are relative to today
1314 # elements are relative to today
1315 usenow = True
1315 usenow = True
1316
1316
1317 timetuple = time.strptime(date, format)
1317 timetuple = time.strptime(date, format)
1318 localunixtime = int(calendar.timegm(timetuple))
1318 localunixtime = int(calendar.timegm(timetuple))
1319 if offset is None:
1319 if offset is None:
1320 # local timezone
1320 # local timezone
1321 unixtime = int(time.mktime(timetuple))
1321 unixtime = int(time.mktime(timetuple))
1322 offset = unixtime - localunixtime
1322 offset = unixtime - localunixtime
1323 else:
1323 else:
1324 unixtime = localunixtime + offset
1324 unixtime = localunixtime + offset
1325 return unixtime, offset
1325 return unixtime, offset
1326
1326
1327 def parsedate(date, formats=None, bias={}):
1327 def parsedate(date, formats=None, bias={}):
1328 """parse a localized date/time and return a (unixtime, offset) tuple.
1328 """parse a localized date/time and return a (unixtime, offset) tuple.
1329
1329
1330 The date may be a "unixtime offset" string or in one of the specified
1330 The date may be a "unixtime offset" string or in one of the specified
1331 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1331 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1332
1332
1333 >>> parsedate(' today ') == parsedate(\
1333 >>> parsedate(' today ') == parsedate(\
1334 datetime.date.today().strftime('%b %d'))
1334 datetime.date.today().strftime('%b %d'))
1335 True
1335 True
1336 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1336 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1337 datetime.timedelta(days=1)\
1337 datetime.timedelta(days=1)\
1338 ).strftime('%b %d'))
1338 ).strftime('%b %d'))
1339 True
1339 True
1340 >>> now, tz = makedate()
1340 >>> now, tz = makedate()
1341 >>> strnow, strtz = parsedate('now')
1341 >>> strnow, strtz = parsedate('now')
1342 >>> (strnow - now) < 1
1342 >>> (strnow - now) < 1
1343 True
1343 True
1344 >>> tz == strtz
1344 >>> tz == strtz
1345 True
1345 True
1346 """
1346 """
1347 if not date:
1347 if not date:
1348 return 0, 0
1348 return 0, 0
1349 if isinstance(date, tuple) and len(date) == 2:
1349 if isinstance(date, tuple) and len(date) == 2:
1350 return date
1350 return date
1351 if not formats:
1351 if not formats:
1352 formats = defaultdateformats
1352 formats = defaultdateformats
1353 date = date.strip()
1353 date = date.strip()
1354
1354
1355 if date == 'now' or date == _('now'):
1355 if date == 'now' or date == _('now'):
1356 return makedate()
1356 return makedate()
1357 if date == 'today' or date == _('today'):
1357 if date == 'today' or date == _('today'):
1358 date = datetime.date.today().strftime('%b %d')
1358 date = datetime.date.today().strftime('%b %d')
1359 elif date == 'yesterday' or date == _('yesterday'):
1359 elif date == 'yesterday' or date == _('yesterday'):
1360 date = (datetime.date.today() -
1360 date = (datetime.date.today() -
1361 datetime.timedelta(days=1)).strftime('%b %d')
1361 datetime.timedelta(days=1)).strftime('%b %d')
1362
1362
1363 try:
1363 try:
1364 when, offset = map(int, date.split(' '))
1364 when, offset = map(int, date.split(' '))
1365 except ValueError:
1365 except ValueError:
1366 # fill out defaults
1366 # fill out defaults
1367 now = makedate()
1367 now = makedate()
1368 defaults = {}
1368 defaults = {}
1369 for part in ("d", "mb", "yY", "HI", "M", "S"):
1369 for part in ("d", "mb", "yY", "HI", "M", "S"):
1370 # this piece is for rounding the specific end of unknowns
1370 # this piece is for rounding the specific end of unknowns
1371 b = bias.get(part)
1371 b = bias.get(part)
1372 if b is None:
1372 if b is None:
1373 if part[0] in "HMS":
1373 if part[0] in "HMS":
1374 b = "00"
1374 b = "00"
1375 else:
1375 else:
1376 b = "0"
1376 b = "0"
1377
1377
1378 # this piece is for matching the generic end to today's date
1378 # this piece is for matching the generic end to today's date
1379 n = datestr(now, "%" + part[0])
1379 n = datestr(now, "%" + part[0])
1380
1380
1381 defaults[part] = (b, n)
1381 defaults[part] = (b, n)
1382
1382
1383 for format in formats:
1383 for format in formats:
1384 try:
1384 try:
1385 when, offset = strdate(date, format, defaults)
1385 when, offset = strdate(date, format, defaults)
1386 except (ValueError, OverflowError):
1386 except (ValueError, OverflowError):
1387 pass
1387 pass
1388 else:
1388 else:
1389 break
1389 break
1390 else:
1390 else:
1391 raise Abort(_('invalid date: %r') % date)
1391 raise Abort(_('invalid date: %r') % date)
1392 # validate explicit (probably user-specified) date and
1392 # validate explicit (probably user-specified) date and
1393 # time zone offset. values must fit in signed 32 bits for
1393 # time zone offset. values must fit in signed 32 bits for
1394 # current 32-bit linux runtimes. timezones go from UTC-12
1394 # current 32-bit linux runtimes. timezones go from UTC-12
1395 # to UTC+14
1395 # to UTC+14
1396 if abs(when) > 0x7fffffff:
1396 if abs(when) > 0x7fffffff:
1397 raise Abort(_('date exceeds 32 bits: %d') % when)
1397 raise Abort(_('date exceeds 32 bits: %d') % when)
1398 if when < 0:
1398 if when < 0:
1399 raise Abort(_('negative date value: %d') % when)
1399 raise Abort(_('negative date value: %d') % when)
1400 if offset < -50400 or offset > 43200:
1400 if offset < -50400 or offset > 43200:
1401 raise Abort(_('impossible time zone offset: %d') % offset)
1401 raise Abort(_('impossible time zone offset: %d') % offset)
1402 return when, offset
1402 return when, offset
1403
1403
1404 def matchdate(date):
1404 def matchdate(date):
1405 """Return a function that matches a given date match specifier
1405 """Return a function that matches a given date match specifier
1406
1406
1407 Formats include:
1407 Formats include:
1408
1408
1409 '{date}' match a given date to the accuracy provided
1409 '{date}' match a given date to the accuracy provided
1410
1410
1411 '<{date}' on or before a given date
1411 '<{date}' on or before a given date
1412
1412
1413 '>{date}' on or after a given date
1413 '>{date}' on or after a given date
1414
1414
1415 >>> p1 = parsedate("10:29:59")
1415 >>> p1 = parsedate("10:29:59")
1416 >>> p2 = parsedate("10:30:00")
1416 >>> p2 = parsedate("10:30:00")
1417 >>> p3 = parsedate("10:30:59")
1417 >>> p3 = parsedate("10:30:59")
1418 >>> p4 = parsedate("10:31:00")
1418 >>> p4 = parsedate("10:31:00")
1419 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1419 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1420 >>> f = matchdate("10:30")
1420 >>> f = matchdate("10:30")
1421 >>> f(p1[0])
1421 >>> f(p1[0])
1422 False
1422 False
1423 >>> f(p2[0])
1423 >>> f(p2[0])
1424 True
1424 True
1425 >>> f(p3[0])
1425 >>> f(p3[0])
1426 True
1426 True
1427 >>> f(p4[0])
1427 >>> f(p4[0])
1428 False
1428 False
1429 >>> f(p5[0])
1429 >>> f(p5[0])
1430 False
1430 False
1431 """
1431 """
1432
1432
1433 def lower(date):
1433 def lower(date):
1434 d = {'mb': "1", 'd': "1"}
1434 d = {'mb': "1", 'd': "1"}
1435 return parsedate(date, extendeddateformats, d)[0]
1435 return parsedate(date, extendeddateformats, d)[0]
1436
1436
1437 def upper(date):
1437 def upper(date):
1438 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1438 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1439 for days in ("31", "30", "29"):
1439 for days in ("31", "30", "29"):
1440 try:
1440 try:
1441 d["d"] = days
1441 d["d"] = days
1442 return parsedate(date, extendeddateformats, d)[0]
1442 return parsedate(date, extendeddateformats, d)[0]
1443 except Abort:
1443 except Abort:
1444 pass
1444 pass
1445 d["d"] = "28"
1445 d["d"] = "28"
1446 return parsedate(date, extendeddateformats, d)[0]
1446 return parsedate(date, extendeddateformats, d)[0]
1447
1447
1448 date = date.strip()
1448 date = date.strip()
1449
1449
1450 if not date:
1450 if not date:
1451 raise Abort(_("dates cannot consist entirely of whitespace"))
1451 raise Abort(_("dates cannot consist entirely of whitespace"))
1452 elif date[0] == "<":
1452 elif date[0] == "<":
1453 if not date[1:]:
1453 if not date[1:]:
1454 raise Abort(_("invalid day spec, use '<DATE'"))
1454 raise Abort(_("invalid day spec, use '<DATE'"))
1455 when = upper(date[1:])
1455 when = upper(date[1:])
1456 return lambda x: x <= when
1456 return lambda x: x <= when
1457 elif date[0] == ">":
1457 elif date[0] == ">":
1458 if not date[1:]:
1458 if not date[1:]:
1459 raise Abort(_("invalid day spec, use '>DATE'"))
1459 raise Abort(_("invalid day spec, use '>DATE'"))
1460 when = lower(date[1:])
1460 when = lower(date[1:])
1461 return lambda x: x >= when
1461 return lambda x: x >= when
1462 elif date[0] == "-":
1462 elif date[0] == "-":
1463 try:
1463 try:
1464 days = int(date[1:])
1464 days = int(date[1:])
1465 except ValueError:
1465 except ValueError:
1466 raise Abort(_("invalid day spec: %s") % date[1:])
1466 raise Abort(_("invalid day spec: %s") % date[1:])
1467 if days < 0:
1467 if days < 0:
1468 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1468 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1469 % date[1:])
1469 % date[1:])
1470 when = makedate()[0] - days * 3600 * 24
1470 when = makedate()[0] - days * 3600 * 24
1471 return lambda x: x >= when
1471 return lambda x: x >= when
1472 elif " to " in date:
1472 elif " to " in date:
1473 a, b = date.split(" to ")
1473 a, b = date.split(" to ")
1474 start, stop = lower(a), upper(b)
1474 start, stop = lower(a), upper(b)
1475 return lambda x: x >= start and x <= stop
1475 return lambda x: x >= start and x <= stop
1476 else:
1476 else:
1477 start, stop = lower(date), upper(date)
1477 start, stop = lower(date), upper(date)
1478 return lambda x: x >= start and x <= stop
1478 return lambda x: x >= start and x <= stop
1479
1479
1480 def shortuser(user):
1480 def shortuser(user):
1481 """Return a short representation of a user name or email address."""
1481 """Return a short representation of a user name or email address."""
1482 f = user.find('@')
1482 f = user.find('@')
1483 if f >= 0:
1483 if f >= 0:
1484 user = user[:f]
1484 user = user[:f]
1485 f = user.find('<')
1485 f = user.find('<')
1486 if f >= 0:
1486 if f >= 0:
1487 user = user[f + 1:]
1487 user = user[f + 1:]
1488 f = user.find(' ')
1488 f = user.find(' ')
1489 if f >= 0:
1489 if f >= 0:
1490 user = user[:f]
1490 user = user[:f]
1491 f = user.find('.')
1491 f = user.find('.')
1492 if f >= 0:
1492 if f >= 0:
1493 user = user[:f]
1493 user = user[:f]
1494 return user
1494 return user
1495
1495
1496 def emailuser(user):
1496 def emailuser(user):
1497 """Return the user portion of an email address."""
1497 """Return the user portion of an email address."""
1498 f = user.find('@')
1498 f = user.find('@')
1499 if f >= 0:
1499 if f >= 0:
1500 user = user[:f]
1500 user = user[:f]
1501 f = user.find('<')
1501 f = user.find('<')
1502 if f >= 0:
1502 if f >= 0:
1503 user = user[f + 1:]
1503 user = user[f + 1:]
1504 return user
1504 return user
1505
1505
1506 def email(author):
1506 def email(author):
1507 '''get email of author.'''
1507 '''get email of author.'''
1508 r = author.find('>')
1508 r = author.find('>')
1509 if r == -1:
1509 if r == -1:
1510 r = None
1510 r = None
1511 return author[author.find('<') + 1:r]
1511 return author[author.find('<') + 1:r]
1512
1512
1513 def ellipsis(text, maxlength=400):
1513 def ellipsis(text, maxlength=400):
1514 """Trim string to at most maxlength (default: 400) columns in display."""
1514 """Trim string to at most maxlength (default: 400) columns in display."""
1515 return encoding.trim(text, maxlength, ellipsis='...')
1515 return encoding.trim(text, maxlength, ellipsis='...')
1516
1516
1517 def unitcountfn(*unittable):
1517 def unitcountfn(*unittable):
1518 '''return a function that renders a readable count of some quantity'''
1518 '''return a function that renders a readable count of some quantity'''
1519
1519
1520 def go(count):
1520 def go(count):
1521 for multiplier, divisor, format in unittable:
1521 for multiplier, divisor, format in unittable:
1522 if count >= divisor * multiplier:
1522 if count >= divisor * multiplier:
1523 return format % (count / float(divisor))
1523 return format % (count / float(divisor))
1524 return unittable[-1][2] % count
1524 return unittable[-1][2] % count
1525
1525
1526 return go
1526 return go
1527
1527
1528 bytecount = unitcountfn(
1528 bytecount = unitcountfn(
1529 (100, 1 << 30, _('%.0f GB')),
1529 (100, 1 << 30, _('%.0f GB')),
1530 (10, 1 << 30, _('%.1f GB')),
1530 (10, 1 << 30, _('%.1f GB')),
1531 (1, 1 << 30, _('%.2f GB')),
1531 (1, 1 << 30, _('%.2f GB')),
1532 (100, 1 << 20, _('%.0f MB')),
1532 (100, 1 << 20, _('%.0f MB')),
1533 (10, 1 << 20, _('%.1f MB')),
1533 (10, 1 << 20, _('%.1f MB')),
1534 (1, 1 << 20, _('%.2f MB')),
1534 (1, 1 << 20, _('%.2f MB')),
1535 (100, 1 << 10, _('%.0f KB')),
1535 (100, 1 << 10, _('%.0f KB')),
1536 (10, 1 << 10, _('%.1f KB')),
1536 (10, 1 << 10, _('%.1f KB')),
1537 (1, 1 << 10, _('%.2f KB')),
1537 (1, 1 << 10, _('%.2f KB')),
1538 (1, 1, _('%.0f bytes')),
1538 (1, 1, _('%.0f bytes')),
1539 )
1539 )
1540
1540
1541 def uirepr(s):
1541 def uirepr(s):
1542 # Avoid double backslash in Windows path repr()
1542 # Avoid double backslash in Windows path repr()
1543 return repr(s).replace('\\\\', '\\')
1543 return repr(s).replace('\\\\', '\\')
1544
1544
1545 # delay import of textwrap
1545 # delay import of textwrap
1546 def MBTextWrapper(**kwargs):
1546 def MBTextWrapper(**kwargs):
1547 class tw(textwrap.TextWrapper):
1547 class tw(textwrap.TextWrapper):
1548 """
1548 """
1549 Extend TextWrapper for width-awareness.
1549 Extend TextWrapper for width-awareness.
1550
1550
1551 Neither number of 'bytes' in any encoding nor 'characters' is
1551 Neither number of 'bytes' in any encoding nor 'characters' is
1552 appropriate to calculate terminal columns for specified string.
1552 appropriate to calculate terminal columns for specified string.
1553
1553
1554 Original TextWrapper implementation uses built-in 'len()' directly,
1554 Original TextWrapper implementation uses built-in 'len()' directly,
1555 so overriding is needed to use width information of each characters.
1555 so overriding is needed to use width information of each characters.
1556
1556
1557 In addition, characters classified into 'ambiguous' width are
1557 In addition, characters classified into 'ambiguous' width are
1558 treated as wide in East Asian area, but as narrow in other.
1558 treated as wide in East Asian area, but as narrow in other.
1559
1559
1560 This requires use decision to determine width of such characters.
1560 This requires use decision to determine width of such characters.
1561 """
1561 """
1562 def __init__(self, **kwargs):
1562 def __init__(self, **kwargs):
1563 textwrap.TextWrapper.__init__(self, **kwargs)
1563 textwrap.TextWrapper.__init__(self, **kwargs)
1564
1564
1565 # for compatibility between 2.4 and 2.6
1565 # for compatibility between 2.4 and 2.6
1566 if getattr(self, 'drop_whitespace', None) is None:
1566 if getattr(self, 'drop_whitespace', None) is None:
1567 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1567 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1568
1568
1569 def _cutdown(self, ucstr, space_left):
1569 def _cutdown(self, ucstr, space_left):
1570 l = 0
1570 l = 0
1571 colwidth = encoding.ucolwidth
1571 colwidth = encoding.ucolwidth
1572 for i in xrange(len(ucstr)):
1572 for i in xrange(len(ucstr)):
1573 l += colwidth(ucstr[i])
1573 l += colwidth(ucstr[i])
1574 if space_left < l:
1574 if space_left < l:
1575 return (ucstr[:i], ucstr[i:])
1575 return (ucstr[:i], ucstr[i:])
1576 return ucstr, ''
1576 return ucstr, ''
1577
1577
1578 # overriding of base class
1578 # overriding of base class
1579 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1579 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1580 space_left = max(width - cur_len, 1)
1580 space_left = max(width - cur_len, 1)
1581
1581
1582 if self.break_long_words:
1582 if self.break_long_words:
1583 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1583 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1584 cur_line.append(cut)
1584 cur_line.append(cut)
1585 reversed_chunks[-1] = res
1585 reversed_chunks[-1] = res
1586 elif not cur_line:
1586 elif not cur_line:
1587 cur_line.append(reversed_chunks.pop())
1587 cur_line.append(reversed_chunks.pop())
1588
1588
1589 # this overriding code is imported from TextWrapper of python 2.6
1589 # this overriding code is imported from TextWrapper of python 2.6
1590 # to calculate columns of string by 'encoding.ucolwidth()'
1590 # to calculate columns of string by 'encoding.ucolwidth()'
1591 def _wrap_chunks(self, chunks):
1591 def _wrap_chunks(self, chunks):
1592 colwidth = encoding.ucolwidth
1592 colwidth = encoding.ucolwidth
1593
1593
1594 lines = []
1594 lines = []
1595 if self.width <= 0:
1595 if self.width <= 0:
1596 raise ValueError("invalid width %r (must be > 0)" % self.width)
1596 raise ValueError("invalid width %r (must be > 0)" % self.width)
1597
1597
1598 # Arrange in reverse order so items can be efficiently popped
1598 # Arrange in reverse order so items can be efficiently popped
1599 # from a stack of chucks.
1599 # from a stack of chucks.
1600 chunks.reverse()
1600 chunks.reverse()
1601
1601
1602 while chunks:
1602 while chunks:
1603
1603
1604 # Start the list of chunks that will make up the current line.
1604 # Start the list of chunks that will make up the current line.
1605 # cur_len is just the length of all the chunks in cur_line.
1605 # cur_len is just the length of all the chunks in cur_line.
1606 cur_line = []
1606 cur_line = []
1607 cur_len = 0
1607 cur_len = 0
1608
1608
1609 # Figure out which static string will prefix this line.
1609 # Figure out which static string will prefix this line.
1610 if lines:
1610 if lines:
1611 indent = self.subsequent_indent
1611 indent = self.subsequent_indent
1612 else:
1612 else:
1613 indent = self.initial_indent
1613 indent = self.initial_indent
1614
1614
1615 # Maximum width for this line.
1615 # Maximum width for this line.
1616 width = self.width - len(indent)
1616 width = self.width - len(indent)
1617
1617
1618 # First chunk on line is whitespace -- drop it, unless this
1618 # First chunk on line is whitespace -- drop it, unless this
1619 # is the very beginning of the text (i.e. no lines started yet).
1619 # is the very beginning of the text (i.e. no lines started yet).
1620 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1620 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1621 del chunks[-1]
1621 del chunks[-1]
1622
1622
1623 while chunks:
1623 while chunks:
1624 l = colwidth(chunks[-1])
1624 l = colwidth(chunks[-1])
1625
1625
1626 # Can at least squeeze this chunk onto the current line.
1626 # Can at least squeeze this chunk onto the current line.
1627 if cur_len + l <= width:
1627 if cur_len + l <= width:
1628 cur_line.append(chunks.pop())
1628 cur_line.append(chunks.pop())
1629 cur_len += l
1629 cur_len += l
1630
1630
1631 # Nope, this line is full.
1631 # Nope, this line is full.
1632 else:
1632 else:
1633 break
1633 break
1634
1634
1635 # The current line is full, and the next chunk is too big to
1635 # The current line is full, and the next chunk is too big to
1636 # fit on *any* line (not just this one).
1636 # fit on *any* line (not just this one).
1637 if chunks and colwidth(chunks[-1]) > width:
1637 if chunks and colwidth(chunks[-1]) > width:
1638 self._handle_long_word(chunks, cur_line, cur_len, width)
1638 self._handle_long_word(chunks, cur_line, cur_len, width)
1639
1639
1640 # If the last chunk on this line is all whitespace, drop it.
1640 # If the last chunk on this line is all whitespace, drop it.
1641 if (self.drop_whitespace and
1641 if (self.drop_whitespace and
1642 cur_line and cur_line[-1].strip() == ''):
1642 cur_line and cur_line[-1].strip() == ''):
1643 del cur_line[-1]
1643 del cur_line[-1]
1644
1644
1645 # Convert current line back to a string and store it in list
1645 # Convert current line back to a string and store it in list
1646 # of all lines (return value).
1646 # of all lines (return value).
1647 if cur_line:
1647 if cur_line:
1648 lines.append(indent + ''.join(cur_line))
1648 lines.append(indent + ''.join(cur_line))
1649
1649
1650 return lines
1650 return lines
1651
1651
1652 global MBTextWrapper
1652 global MBTextWrapper
1653 MBTextWrapper = tw
1653 MBTextWrapper = tw
1654 return tw(**kwargs)
1654 return tw(**kwargs)
1655
1655
1656 def wrap(line, width, initindent='', hangindent=''):
1656 def wrap(line, width, initindent='', hangindent=''):
1657 maxindent = max(len(hangindent), len(initindent))
1657 maxindent = max(len(hangindent), len(initindent))
1658 if width <= maxindent:
1658 if width <= maxindent:
1659 # adjust for weird terminal size
1659 # adjust for weird terminal size
1660 width = max(78, maxindent + 1)
1660 width = max(78, maxindent + 1)
1661 line = line.decode(encoding.encoding, encoding.encodingmode)
1661 line = line.decode(encoding.encoding, encoding.encodingmode)
1662 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1662 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1663 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1663 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1664 wrapper = MBTextWrapper(width=width,
1664 wrapper = MBTextWrapper(width=width,
1665 initial_indent=initindent,
1665 initial_indent=initindent,
1666 subsequent_indent=hangindent)
1666 subsequent_indent=hangindent)
1667 return wrapper.fill(line).encode(encoding.encoding)
1667 return wrapper.fill(line).encode(encoding.encoding)
1668
1668
1669 def iterlines(iterator):
1669 def iterlines(iterator):
1670 for chunk in iterator:
1670 for chunk in iterator:
1671 for line in chunk.splitlines():
1671 for line in chunk.splitlines():
1672 yield line
1672 yield line
1673
1673
1674 def expandpath(path):
1674 def expandpath(path):
1675 return os.path.expanduser(os.path.expandvars(path))
1675 return os.path.expanduser(os.path.expandvars(path))
1676
1676
1677 def hgcmd():
1677 def hgcmd():
1678 """Return the command used to execute current hg
1678 """Return the command used to execute current hg
1679
1679
1680 This is different from hgexecutable() because on Windows we want
1680 This is different from hgexecutable() because on Windows we want
1681 to avoid things opening new shell windows like batch files, so we
1681 to avoid things opening new shell windows like batch files, so we
1682 get either the python call or current executable.
1682 get either the python call or current executable.
1683 """
1683 """
1684 if mainfrozen():
1684 if mainfrozen():
1685 return [sys.executable]
1685 return [sys.executable]
1686 return gethgcmd()
1686 return gethgcmd()
1687
1687
1688 def rundetached(args, condfn):
1688 def rundetached(args, condfn):
1689 """Execute the argument list in a detached process.
1689 """Execute the argument list in a detached process.
1690
1690
1691 condfn is a callable which is called repeatedly and should return
1691 condfn is a callable which is called repeatedly and should return
1692 True once the child process is known to have started successfully.
1692 True once the child process is known to have started successfully.
1693 At this point, the child process PID is returned. If the child
1693 At this point, the child process PID is returned. If the child
1694 process fails to start or finishes before condfn() evaluates to
1694 process fails to start or finishes before condfn() evaluates to
1695 True, return -1.
1695 True, return -1.
1696 """
1696 """
1697 # Windows case is easier because the child process is either
1697 # Windows case is easier because the child process is either
1698 # successfully starting and validating the condition or exiting
1698 # successfully starting and validating the condition or exiting
1699 # on failure. We just poll on its PID. On Unix, if the child
1699 # on failure. We just poll on its PID. On Unix, if the child
1700 # process fails to start, it will be left in a zombie state until
1700 # process fails to start, it will be left in a zombie state until
1701 # the parent wait on it, which we cannot do since we expect a long
1701 # the parent wait on it, which we cannot do since we expect a long
1702 # running process on success. Instead we listen for SIGCHLD telling
1702 # running process on success. Instead we listen for SIGCHLD telling
1703 # us our child process terminated.
1703 # us our child process terminated.
1704 terminated = set()
1704 terminated = set()
1705 def handler(signum, frame):
1705 def handler(signum, frame):
1706 terminated.add(os.wait())
1706 terminated.add(os.wait())
1707 prevhandler = None
1707 prevhandler = None
1708 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1708 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1709 if SIGCHLD is not None:
1709 if SIGCHLD is not None:
1710 prevhandler = signal.signal(SIGCHLD, handler)
1710 prevhandler = signal.signal(SIGCHLD, handler)
1711 try:
1711 try:
1712 pid = spawndetached(args)
1712 pid = spawndetached(args)
1713 while not condfn():
1713 while not condfn():
1714 if ((pid in terminated or not testpid(pid))
1714 if ((pid in terminated or not testpid(pid))
1715 and not condfn()):
1715 and not condfn()):
1716 return -1
1716 return -1
1717 time.sleep(0.1)
1717 time.sleep(0.1)
1718 return pid
1718 return pid
1719 finally:
1719 finally:
1720 if prevhandler is not None:
1720 if prevhandler is not None:
1721 signal.signal(signal.SIGCHLD, prevhandler)
1721 signal.signal(signal.SIGCHLD, prevhandler)
1722
1722
1723 try:
1724 any, all = any, all
1725 except NameError:
1726 def any(iterable):
1727 for i in iterable:
1728 if i:
1729 return True
1730 return False
1731
1732 def all(iterable):
1733 for i in iterable:
1734 if not i:
1735 return False
1736 return True
1737
1738 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1723 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1739 """Return the result of interpolating items in the mapping into string s.
1724 """Return the result of interpolating items in the mapping into string s.
1740
1725
1741 prefix is a single character string, or a two character string with
1726 prefix is a single character string, or a two character string with
1742 a backslash as the first character if the prefix needs to be escaped in
1727 a backslash as the first character if the prefix needs to be escaped in
1743 a regular expression.
1728 a regular expression.
1744
1729
1745 fn is an optional function that will be applied to the replacement text
1730 fn is an optional function that will be applied to the replacement text
1746 just before replacement.
1731 just before replacement.
1747
1732
1748 escape_prefix is an optional flag that allows using doubled prefix for
1733 escape_prefix is an optional flag that allows using doubled prefix for
1749 its escaping.
1734 its escaping.
1750 """
1735 """
1751 fn = fn or (lambda s: s)
1736 fn = fn or (lambda s: s)
1752 patterns = '|'.join(mapping.keys())
1737 patterns = '|'.join(mapping.keys())
1753 if escape_prefix:
1738 if escape_prefix:
1754 patterns += '|' + prefix
1739 patterns += '|' + prefix
1755 if len(prefix) > 1:
1740 if len(prefix) > 1:
1756 prefix_char = prefix[1:]
1741 prefix_char = prefix[1:]
1757 else:
1742 else:
1758 prefix_char = prefix
1743 prefix_char = prefix
1759 mapping[prefix_char] = prefix_char
1744 mapping[prefix_char] = prefix_char
1760 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1745 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1761 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1746 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1762
1747
1763 def getport(port):
1748 def getport(port):
1764 """Return the port for a given network service.
1749 """Return the port for a given network service.
1765
1750
1766 If port is an integer, it's returned as is. If it's a string, it's
1751 If port is an integer, it's returned as is. If it's a string, it's
1767 looked up using socket.getservbyname(). If there's no matching
1752 looked up using socket.getservbyname(). If there's no matching
1768 service, util.Abort is raised.
1753 service, util.Abort is raised.
1769 """
1754 """
1770 try:
1755 try:
1771 return int(port)
1756 return int(port)
1772 except ValueError:
1757 except ValueError:
1773 pass
1758 pass
1774
1759
1775 try:
1760 try:
1776 return socket.getservbyname(port)
1761 return socket.getservbyname(port)
1777 except socket.error:
1762 except socket.error:
1778 raise Abort(_("no port number associated with service '%s'") % port)
1763 raise Abort(_("no port number associated with service '%s'") % port)
1779
1764
1780 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1765 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1781 '0': False, 'no': False, 'false': False, 'off': False,
1766 '0': False, 'no': False, 'false': False, 'off': False,
1782 'never': False}
1767 'never': False}
1783
1768
1784 def parsebool(s):
1769 def parsebool(s):
1785 """Parse s into a boolean.
1770 """Parse s into a boolean.
1786
1771
1787 If s is not a valid boolean, returns None.
1772 If s is not a valid boolean, returns None.
1788 """
1773 """
1789 return _booleans.get(s.lower(), None)
1774 return _booleans.get(s.lower(), None)
1790
1775
1791 _hexdig = '0123456789ABCDEFabcdef'
1776 _hexdig = '0123456789ABCDEFabcdef'
1792 _hextochr = dict((a + b, chr(int(a + b, 16)))
1777 _hextochr = dict((a + b, chr(int(a + b, 16)))
1793 for a in _hexdig for b in _hexdig)
1778 for a in _hexdig for b in _hexdig)
1794
1779
1795 def _urlunquote(s):
1780 def _urlunquote(s):
1796 """Decode HTTP/HTML % encoding.
1781 """Decode HTTP/HTML % encoding.
1797
1782
1798 >>> _urlunquote('abc%20def')
1783 >>> _urlunquote('abc%20def')
1799 'abc def'
1784 'abc def'
1800 """
1785 """
1801 res = s.split('%')
1786 res = s.split('%')
1802 # fastpath
1787 # fastpath
1803 if len(res) == 1:
1788 if len(res) == 1:
1804 return s
1789 return s
1805 s = res[0]
1790 s = res[0]
1806 for item in res[1:]:
1791 for item in res[1:]:
1807 try:
1792 try:
1808 s += _hextochr[item[:2]] + item[2:]
1793 s += _hextochr[item[:2]] + item[2:]
1809 except KeyError:
1794 except KeyError:
1810 s += '%' + item
1795 s += '%' + item
1811 except UnicodeDecodeError:
1796 except UnicodeDecodeError:
1812 s += unichr(int(item[:2], 16)) + item[2:]
1797 s += unichr(int(item[:2], 16)) + item[2:]
1813 return s
1798 return s
1814
1799
1815 class url(object):
1800 class url(object):
1816 r"""Reliable URL parser.
1801 r"""Reliable URL parser.
1817
1802
1818 This parses URLs and provides attributes for the following
1803 This parses URLs and provides attributes for the following
1819 components:
1804 components:
1820
1805
1821 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1806 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1822
1807
1823 Missing components are set to None. The only exception is
1808 Missing components are set to None. The only exception is
1824 fragment, which is set to '' if present but empty.
1809 fragment, which is set to '' if present but empty.
1825
1810
1826 If parsefragment is False, fragment is included in query. If
1811 If parsefragment is False, fragment is included in query. If
1827 parsequery is False, query is included in path. If both are
1812 parsequery is False, query is included in path. If both are
1828 False, both fragment and query are included in path.
1813 False, both fragment and query are included in path.
1829
1814
1830 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1815 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1831
1816
1832 Note that for backward compatibility reasons, bundle URLs do not
1817 Note that for backward compatibility reasons, bundle URLs do not
1833 take host names. That means 'bundle://../' has a path of '../'.
1818 take host names. That means 'bundle://../' has a path of '../'.
1834
1819
1835 Examples:
1820 Examples:
1836
1821
1837 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1822 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1838 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1823 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1839 >>> url('ssh://[::1]:2200//home/joe/repo')
1824 >>> url('ssh://[::1]:2200//home/joe/repo')
1840 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1825 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1841 >>> url('file:///home/joe/repo')
1826 >>> url('file:///home/joe/repo')
1842 <url scheme: 'file', path: '/home/joe/repo'>
1827 <url scheme: 'file', path: '/home/joe/repo'>
1843 >>> url('file:///c:/temp/foo/')
1828 >>> url('file:///c:/temp/foo/')
1844 <url scheme: 'file', path: 'c:/temp/foo/'>
1829 <url scheme: 'file', path: 'c:/temp/foo/'>
1845 >>> url('bundle:foo')
1830 >>> url('bundle:foo')
1846 <url scheme: 'bundle', path: 'foo'>
1831 <url scheme: 'bundle', path: 'foo'>
1847 >>> url('bundle://../foo')
1832 >>> url('bundle://../foo')
1848 <url scheme: 'bundle', path: '../foo'>
1833 <url scheme: 'bundle', path: '../foo'>
1849 >>> url(r'c:\foo\bar')
1834 >>> url(r'c:\foo\bar')
1850 <url path: 'c:\\foo\\bar'>
1835 <url path: 'c:\\foo\\bar'>
1851 >>> url(r'\\blah\blah\blah')
1836 >>> url(r'\\blah\blah\blah')
1852 <url path: '\\\\blah\\blah\\blah'>
1837 <url path: '\\\\blah\\blah\\blah'>
1853 >>> url(r'\\blah\blah\blah#baz')
1838 >>> url(r'\\blah\blah\blah#baz')
1854 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1839 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1855 >>> url(r'file:///C:\users\me')
1840 >>> url(r'file:///C:\users\me')
1856 <url scheme: 'file', path: 'C:\\users\\me'>
1841 <url scheme: 'file', path: 'C:\\users\\me'>
1857
1842
1858 Authentication credentials:
1843 Authentication credentials:
1859
1844
1860 >>> url('ssh://joe:xyz@x/repo')
1845 >>> url('ssh://joe:xyz@x/repo')
1861 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1846 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1862 >>> url('ssh://joe@x/repo')
1847 >>> url('ssh://joe@x/repo')
1863 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1848 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1864
1849
1865 Query strings and fragments:
1850 Query strings and fragments:
1866
1851
1867 >>> url('http://host/a?b#c')
1852 >>> url('http://host/a?b#c')
1868 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1853 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1869 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1854 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1870 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1855 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1871 """
1856 """
1872
1857
1873 _safechars = "!~*'()+"
1858 _safechars = "!~*'()+"
1874 _safepchars = "/!~*'()+:\\"
1859 _safepchars = "/!~*'()+:\\"
1875 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
1860 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
1876
1861
1877 def __init__(self, path, parsequery=True, parsefragment=True):
1862 def __init__(self, path, parsequery=True, parsefragment=True):
1878 # We slowly chomp away at path until we have only the path left
1863 # We slowly chomp away at path until we have only the path left
1879 self.scheme = self.user = self.passwd = self.host = None
1864 self.scheme = self.user = self.passwd = self.host = None
1880 self.port = self.path = self.query = self.fragment = None
1865 self.port = self.path = self.query = self.fragment = None
1881 self._localpath = True
1866 self._localpath = True
1882 self._hostport = ''
1867 self._hostport = ''
1883 self._origpath = path
1868 self._origpath = path
1884
1869
1885 if parsefragment and '#' in path:
1870 if parsefragment and '#' in path:
1886 path, self.fragment = path.split('#', 1)
1871 path, self.fragment = path.split('#', 1)
1887 if not path:
1872 if not path:
1888 path = None
1873 path = None
1889
1874
1890 # special case for Windows drive letters and UNC paths
1875 # special case for Windows drive letters and UNC paths
1891 if hasdriveletter(path) or path.startswith(r'\\'):
1876 if hasdriveletter(path) or path.startswith(r'\\'):
1892 self.path = path
1877 self.path = path
1893 return
1878 return
1894
1879
1895 # For compatibility reasons, we can't handle bundle paths as
1880 # For compatibility reasons, we can't handle bundle paths as
1896 # normal URLS
1881 # normal URLS
1897 if path.startswith('bundle:'):
1882 if path.startswith('bundle:'):
1898 self.scheme = 'bundle'
1883 self.scheme = 'bundle'
1899 path = path[7:]
1884 path = path[7:]
1900 if path.startswith('//'):
1885 if path.startswith('//'):
1901 path = path[2:]
1886 path = path[2:]
1902 self.path = path
1887 self.path = path
1903 return
1888 return
1904
1889
1905 if self._matchscheme(path):
1890 if self._matchscheme(path):
1906 parts = path.split(':', 1)
1891 parts = path.split(':', 1)
1907 if parts[0]:
1892 if parts[0]:
1908 self.scheme, path = parts
1893 self.scheme, path = parts
1909 self._localpath = False
1894 self._localpath = False
1910
1895
1911 if not path:
1896 if not path:
1912 path = None
1897 path = None
1913 if self._localpath:
1898 if self._localpath:
1914 self.path = ''
1899 self.path = ''
1915 return
1900 return
1916 else:
1901 else:
1917 if self._localpath:
1902 if self._localpath:
1918 self.path = path
1903 self.path = path
1919 return
1904 return
1920
1905
1921 if parsequery and '?' in path:
1906 if parsequery and '?' in path:
1922 path, self.query = path.split('?', 1)
1907 path, self.query = path.split('?', 1)
1923 if not path:
1908 if not path:
1924 path = None
1909 path = None
1925 if not self.query:
1910 if not self.query:
1926 self.query = None
1911 self.query = None
1927
1912
1928 # // is required to specify a host/authority
1913 # // is required to specify a host/authority
1929 if path and path.startswith('//'):
1914 if path and path.startswith('//'):
1930 parts = path[2:].split('/', 1)
1915 parts = path[2:].split('/', 1)
1931 if len(parts) > 1:
1916 if len(parts) > 1:
1932 self.host, path = parts
1917 self.host, path = parts
1933 else:
1918 else:
1934 self.host = parts[0]
1919 self.host = parts[0]
1935 path = None
1920 path = None
1936 if not self.host:
1921 if not self.host:
1937 self.host = None
1922 self.host = None
1938 # path of file:///d is /d
1923 # path of file:///d is /d
1939 # path of file:///d:/ is d:/, not /d:/
1924 # path of file:///d:/ is d:/, not /d:/
1940 if path and not hasdriveletter(path):
1925 if path and not hasdriveletter(path):
1941 path = '/' + path
1926 path = '/' + path
1942
1927
1943 if self.host and '@' in self.host:
1928 if self.host and '@' in self.host:
1944 self.user, self.host = self.host.rsplit('@', 1)
1929 self.user, self.host = self.host.rsplit('@', 1)
1945 if ':' in self.user:
1930 if ':' in self.user:
1946 self.user, self.passwd = self.user.split(':', 1)
1931 self.user, self.passwd = self.user.split(':', 1)
1947 if not self.host:
1932 if not self.host:
1948 self.host = None
1933 self.host = None
1949
1934
1950 # Don't split on colons in IPv6 addresses without ports
1935 # Don't split on colons in IPv6 addresses without ports
1951 if (self.host and ':' in self.host and
1936 if (self.host and ':' in self.host and
1952 not (self.host.startswith('[') and self.host.endswith(']'))):
1937 not (self.host.startswith('[') and self.host.endswith(']'))):
1953 self._hostport = self.host
1938 self._hostport = self.host
1954 self.host, self.port = self.host.rsplit(':', 1)
1939 self.host, self.port = self.host.rsplit(':', 1)
1955 if not self.host:
1940 if not self.host:
1956 self.host = None
1941 self.host = None
1957
1942
1958 if (self.host and self.scheme == 'file' and
1943 if (self.host and self.scheme == 'file' and
1959 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1944 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1960 raise Abort(_('file:// URLs can only refer to localhost'))
1945 raise Abort(_('file:// URLs can only refer to localhost'))
1961
1946
1962 self.path = path
1947 self.path = path
1963
1948
1964 # leave the query string escaped
1949 # leave the query string escaped
1965 for a in ('user', 'passwd', 'host', 'port',
1950 for a in ('user', 'passwd', 'host', 'port',
1966 'path', 'fragment'):
1951 'path', 'fragment'):
1967 v = getattr(self, a)
1952 v = getattr(self, a)
1968 if v is not None:
1953 if v is not None:
1969 setattr(self, a, _urlunquote(v))
1954 setattr(self, a, _urlunquote(v))
1970
1955
1971 def __repr__(self):
1956 def __repr__(self):
1972 attrs = []
1957 attrs = []
1973 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1958 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1974 'query', 'fragment'):
1959 'query', 'fragment'):
1975 v = getattr(self, a)
1960 v = getattr(self, a)
1976 if v is not None:
1961 if v is not None:
1977 attrs.append('%s: %r' % (a, v))
1962 attrs.append('%s: %r' % (a, v))
1978 return '<url %s>' % ', '.join(attrs)
1963 return '<url %s>' % ', '.join(attrs)
1979
1964
1980 def __str__(self):
1965 def __str__(self):
1981 r"""Join the URL's components back into a URL string.
1966 r"""Join the URL's components back into a URL string.
1982
1967
1983 Examples:
1968 Examples:
1984
1969
1985 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1970 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1986 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1971 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1987 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1972 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1988 'http://user:pw@host:80/?foo=bar&baz=42'
1973 'http://user:pw@host:80/?foo=bar&baz=42'
1989 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1974 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1990 'http://user:pw@host:80/?foo=bar%3dbaz'
1975 'http://user:pw@host:80/?foo=bar%3dbaz'
1991 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1976 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1992 'ssh://user:pw@[::1]:2200//home/joe#'
1977 'ssh://user:pw@[::1]:2200//home/joe#'
1993 >>> str(url('http://localhost:80//'))
1978 >>> str(url('http://localhost:80//'))
1994 'http://localhost:80//'
1979 'http://localhost:80//'
1995 >>> str(url('http://localhost:80/'))
1980 >>> str(url('http://localhost:80/'))
1996 'http://localhost:80/'
1981 'http://localhost:80/'
1997 >>> str(url('http://localhost:80'))
1982 >>> str(url('http://localhost:80'))
1998 'http://localhost:80/'
1983 'http://localhost:80/'
1999 >>> str(url('bundle:foo'))
1984 >>> str(url('bundle:foo'))
2000 'bundle:foo'
1985 'bundle:foo'
2001 >>> str(url('bundle://../foo'))
1986 >>> str(url('bundle://../foo'))
2002 'bundle:../foo'
1987 'bundle:../foo'
2003 >>> str(url('path'))
1988 >>> str(url('path'))
2004 'path'
1989 'path'
2005 >>> str(url('file:///tmp/foo/bar'))
1990 >>> str(url('file:///tmp/foo/bar'))
2006 'file:///tmp/foo/bar'
1991 'file:///tmp/foo/bar'
2007 >>> str(url('file:///c:/tmp/foo/bar'))
1992 >>> str(url('file:///c:/tmp/foo/bar'))
2008 'file:///c:/tmp/foo/bar'
1993 'file:///c:/tmp/foo/bar'
2009 >>> print url(r'bundle:foo\bar')
1994 >>> print url(r'bundle:foo\bar')
2010 bundle:foo\bar
1995 bundle:foo\bar
2011 >>> print url(r'file:///D:\data\hg')
1996 >>> print url(r'file:///D:\data\hg')
2012 file:///D:\data\hg
1997 file:///D:\data\hg
2013 """
1998 """
2014 if self._localpath:
1999 if self._localpath:
2015 s = self.path
2000 s = self.path
2016 if self.scheme == 'bundle':
2001 if self.scheme == 'bundle':
2017 s = 'bundle:' + s
2002 s = 'bundle:' + s
2018 if self.fragment:
2003 if self.fragment:
2019 s += '#' + self.fragment
2004 s += '#' + self.fragment
2020 return s
2005 return s
2021
2006
2022 s = self.scheme + ':'
2007 s = self.scheme + ':'
2023 if self.user or self.passwd or self.host:
2008 if self.user or self.passwd or self.host:
2024 s += '//'
2009 s += '//'
2025 elif self.scheme and (not self.path or self.path.startswith('/')
2010 elif self.scheme and (not self.path or self.path.startswith('/')
2026 or hasdriveletter(self.path)):
2011 or hasdriveletter(self.path)):
2027 s += '//'
2012 s += '//'
2028 if hasdriveletter(self.path):
2013 if hasdriveletter(self.path):
2029 s += '/'
2014 s += '/'
2030 if self.user:
2015 if self.user:
2031 s += urllib.quote(self.user, safe=self._safechars)
2016 s += urllib.quote(self.user, safe=self._safechars)
2032 if self.passwd:
2017 if self.passwd:
2033 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2018 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2034 if self.user or self.passwd:
2019 if self.user or self.passwd:
2035 s += '@'
2020 s += '@'
2036 if self.host:
2021 if self.host:
2037 if not (self.host.startswith('[') and self.host.endswith(']')):
2022 if not (self.host.startswith('[') and self.host.endswith(']')):
2038 s += urllib.quote(self.host)
2023 s += urllib.quote(self.host)
2039 else:
2024 else:
2040 s += self.host
2025 s += self.host
2041 if self.port:
2026 if self.port:
2042 s += ':' + urllib.quote(self.port)
2027 s += ':' + urllib.quote(self.port)
2043 if self.host:
2028 if self.host:
2044 s += '/'
2029 s += '/'
2045 if self.path:
2030 if self.path:
2046 # TODO: similar to the query string, we should not unescape the
2031 # TODO: similar to the query string, we should not unescape the
2047 # path when we store it, the path might contain '%2f' = '/',
2032 # path when we store it, the path might contain '%2f' = '/',
2048 # which we should *not* escape.
2033 # which we should *not* escape.
2049 s += urllib.quote(self.path, safe=self._safepchars)
2034 s += urllib.quote(self.path, safe=self._safepchars)
2050 if self.query:
2035 if self.query:
2051 # we store the query in escaped form.
2036 # we store the query in escaped form.
2052 s += '?' + self.query
2037 s += '?' + self.query
2053 if self.fragment is not None:
2038 if self.fragment is not None:
2054 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2039 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2055 return s
2040 return s
2056
2041
2057 def authinfo(self):
2042 def authinfo(self):
2058 user, passwd = self.user, self.passwd
2043 user, passwd = self.user, self.passwd
2059 try:
2044 try:
2060 self.user, self.passwd = None, None
2045 self.user, self.passwd = None, None
2061 s = str(self)
2046 s = str(self)
2062 finally:
2047 finally:
2063 self.user, self.passwd = user, passwd
2048 self.user, self.passwd = user, passwd
2064 if not self.user:
2049 if not self.user:
2065 return (s, None)
2050 return (s, None)
2066 # authinfo[1] is passed to urllib2 password manager, and its
2051 # authinfo[1] is passed to urllib2 password manager, and its
2067 # URIs must not contain credentials. The host is passed in the
2052 # URIs must not contain credentials. The host is passed in the
2068 # URIs list because Python < 2.4.3 uses only that to search for
2053 # URIs list because Python < 2.4.3 uses only that to search for
2069 # a password.
2054 # a password.
2070 return (s, (None, (s, self.host),
2055 return (s, (None, (s, self.host),
2071 self.user, self.passwd or ''))
2056 self.user, self.passwd or ''))
2072
2057
2073 def isabs(self):
2058 def isabs(self):
2074 if self.scheme and self.scheme != 'file':
2059 if self.scheme and self.scheme != 'file':
2075 return True # remote URL
2060 return True # remote URL
2076 if hasdriveletter(self.path):
2061 if hasdriveletter(self.path):
2077 return True # absolute for our purposes - can't be joined()
2062 return True # absolute for our purposes - can't be joined()
2078 if self.path.startswith(r'\\'):
2063 if self.path.startswith(r'\\'):
2079 return True # Windows UNC path
2064 return True # Windows UNC path
2080 if self.path.startswith('/'):
2065 if self.path.startswith('/'):
2081 return True # POSIX-style
2066 return True # POSIX-style
2082 return False
2067 return False
2083
2068
2084 def localpath(self):
2069 def localpath(self):
2085 if self.scheme == 'file' or self.scheme == 'bundle':
2070 if self.scheme == 'file' or self.scheme == 'bundle':
2086 path = self.path or '/'
2071 path = self.path or '/'
2087 # For Windows, we need to promote hosts containing drive
2072 # For Windows, we need to promote hosts containing drive
2088 # letters to paths with drive letters.
2073 # letters to paths with drive letters.
2089 if hasdriveletter(self._hostport):
2074 if hasdriveletter(self._hostport):
2090 path = self._hostport + '/' + self.path
2075 path = self._hostport + '/' + self.path
2091 elif (self.host is not None and self.path
2076 elif (self.host is not None and self.path
2092 and not hasdriveletter(path)):
2077 and not hasdriveletter(path)):
2093 path = '/' + path
2078 path = '/' + path
2094 return path
2079 return path
2095 return self._origpath
2080 return self._origpath
2096
2081
2097 def islocal(self):
2082 def islocal(self):
2098 '''whether localpath will return something that posixfile can open'''
2083 '''whether localpath will return something that posixfile can open'''
2099 return (not self.scheme or self.scheme == 'file'
2084 return (not self.scheme or self.scheme == 'file'
2100 or self.scheme == 'bundle')
2085 or self.scheme == 'bundle')
2101
2086
2102 def hasscheme(path):
2087 def hasscheme(path):
2103 return bool(url(path).scheme)
2088 return bool(url(path).scheme)
2104
2089
2105 def hasdriveletter(path):
2090 def hasdriveletter(path):
2106 return path and path[1:2] == ':' and path[0:1].isalpha()
2091 return path and path[1:2] == ':' and path[0:1].isalpha()
2107
2092
2108 def urllocalpath(path):
2093 def urllocalpath(path):
2109 return url(path, parsequery=False, parsefragment=False).localpath()
2094 return url(path, parsequery=False, parsefragment=False).localpath()
2110
2095
2111 def hidepassword(u):
2096 def hidepassword(u):
2112 '''hide user credential in a url string'''
2097 '''hide user credential in a url string'''
2113 u = url(u)
2098 u = url(u)
2114 if u.passwd:
2099 if u.passwd:
2115 u.passwd = '***'
2100 u.passwd = '***'
2116 return str(u)
2101 return str(u)
2117
2102
2118 def removeauth(u):
2103 def removeauth(u):
2119 '''remove all authentication information from a url string'''
2104 '''remove all authentication information from a url string'''
2120 u = url(u)
2105 u = url(u)
2121 u.user = u.passwd = None
2106 u.user = u.passwd = None
2122 return str(u)
2107 return str(u)
2123
2108
2124 def isatty(fd):
2109 def isatty(fd):
2125 try:
2110 try:
2126 return fd.isatty()
2111 return fd.isatty()
2127 except AttributeError:
2112 except AttributeError:
2128 return False
2113 return False
2129
2114
2130 timecount = unitcountfn(
2115 timecount = unitcountfn(
2131 (1, 1e3, _('%.0f s')),
2116 (1, 1e3, _('%.0f s')),
2132 (100, 1, _('%.1f s')),
2117 (100, 1, _('%.1f s')),
2133 (10, 1, _('%.2f s')),
2118 (10, 1, _('%.2f s')),
2134 (1, 1, _('%.3f s')),
2119 (1, 1, _('%.3f s')),
2135 (100, 0.001, _('%.1f ms')),
2120 (100, 0.001, _('%.1f ms')),
2136 (10, 0.001, _('%.2f ms')),
2121 (10, 0.001, _('%.2f ms')),
2137 (1, 0.001, _('%.3f ms')),
2122 (1, 0.001, _('%.3f ms')),
2138 (100, 0.000001, _('%.1f us')),
2123 (100, 0.000001, _('%.1f us')),
2139 (10, 0.000001, _('%.2f us')),
2124 (10, 0.000001, _('%.2f us')),
2140 (1, 0.000001, _('%.3f us')),
2125 (1, 0.000001, _('%.3f us')),
2141 (100, 0.000000001, _('%.1f ns')),
2126 (100, 0.000000001, _('%.1f ns')),
2142 (10, 0.000000001, _('%.2f ns')),
2127 (10, 0.000000001, _('%.2f ns')),
2143 (1, 0.000000001, _('%.3f ns')),
2128 (1, 0.000000001, _('%.3f ns')),
2144 )
2129 )
2145
2130
2146 _timenesting = [0]
2131 _timenesting = [0]
2147
2132
2148 def timed(func):
2133 def timed(func):
2149 '''Report the execution time of a function call to stderr.
2134 '''Report the execution time of a function call to stderr.
2150
2135
2151 During development, use as a decorator when you need to measure
2136 During development, use as a decorator when you need to measure
2152 the cost of a function, e.g. as follows:
2137 the cost of a function, e.g. as follows:
2153
2138
2154 @util.timed
2139 @util.timed
2155 def foo(a, b, c):
2140 def foo(a, b, c):
2156 pass
2141 pass
2157 '''
2142 '''
2158
2143
2159 def wrapper(*args, **kwargs):
2144 def wrapper(*args, **kwargs):
2160 start = time.time()
2145 start = time.time()
2161 indent = 2
2146 indent = 2
2162 _timenesting[0] += indent
2147 _timenesting[0] += indent
2163 try:
2148 try:
2164 return func(*args, **kwargs)
2149 return func(*args, **kwargs)
2165 finally:
2150 finally:
2166 elapsed = time.time() - start
2151 elapsed = time.time() - start
2167 _timenesting[0] -= indent
2152 _timenesting[0] -= indent
2168 sys.stderr.write('%s%s: %s\n' %
2153 sys.stderr.write('%s%s: %s\n' %
2169 (' ' * _timenesting[0], func.__name__,
2154 (' ' * _timenesting[0], func.__name__,
2170 timecount(elapsed)))
2155 timecount(elapsed)))
2171 return wrapper
2156 return wrapper
2172
2157
2173 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2158 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2174 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2159 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2175
2160
2176 def sizetoint(s):
2161 def sizetoint(s):
2177 '''Convert a space specifier to a byte count.
2162 '''Convert a space specifier to a byte count.
2178
2163
2179 >>> sizetoint('30')
2164 >>> sizetoint('30')
2180 30
2165 30
2181 >>> sizetoint('2.2kb')
2166 >>> sizetoint('2.2kb')
2182 2252
2167 2252
2183 >>> sizetoint('6M')
2168 >>> sizetoint('6M')
2184 6291456
2169 6291456
2185 '''
2170 '''
2186 t = s.strip().lower()
2171 t = s.strip().lower()
2187 try:
2172 try:
2188 for k, u in _sizeunits:
2173 for k, u in _sizeunits:
2189 if t.endswith(k):
2174 if t.endswith(k):
2190 return int(float(t[:-len(k)]) * u)
2175 return int(float(t[:-len(k)]) * u)
2191 return int(t)
2176 return int(t)
2192 except ValueError:
2177 except ValueError:
2193 raise error.ParseError(_("couldn't parse size: %s") % s)
2178 raise error.ParseError(_("couldn't parse size: %s") % s)
2194
2179
2195 class hooks(object):
2180 class hooks(object):
2196 '''A collection of hook functions that can be used to extend a
2181 '''A collection of hook functions that can be used to extend a
2197 function's behaviour. Hooks are called in lexicographic order,
2182 function's behaviour. Hooks are called in lexicographic order,
2198 based on the names of their sources.'''
2183 based on the names of their sources.'''
2199
2184
2200 def __init__(self):
2185 def __init__(self):
2201 self._hooks = []
2186 self._hooks = []
2202
2187
2203 def add(self, source, hook):
2188 def add(self, source, hook):
2204 self._hooks.append((source, hook))
2189 self._hooks.append((source, hook))
2205
2190
2206 def __call__(self, *args):
2191 def __call__(self, *args):
2207 self._hooks.sort(key=lambda x: x[0])
2192 self._hooks.sort(key=lambda x: x[0])
2208 results = []
2193 results = []
2209 for source, hook in self._hooks:
2194 for source, hook in self._hooks:
2210 results.append(hook(*args))
2195 results.append(hook(*args))
2211 return results
2196 return results
2212
2197
2213 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2198 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2214 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2199 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2215 Skips the 'skip' last entries. By default it will flush stdout first.
2200 Skips the 'skip' last entries. By default it will flush stdout first.
2216 It can be used everywhere and do intentionally not require an ui object.
2201 It can be used everywhere and do intentionally not require an ui object.
2217 Not be used in production code but very convenient while developing.
2202 Not be used in production code but very convenient while developing.
2218 '''
2203 '''
2219 if otherf:
2204 if otherf:
2220 otherf.flush()
2205 otherf.flush()
2221 f.write('%s at:\n' % msg)
2206 f.write('%s at:\n' % msg)
2222 entries = [('%s:%s' % (fn, ln), func)
2207 entries = [('%s:%s' % (fn, ln), func)
2223 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2208 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2224 if entries:
2209 if entries:
2225 fnmax = max(len(entry[0]) for entry in entries)
2210 fnmax = max(len(entry[0]) for entry in entries)
2226 for fnln, func in entries:
2211 for fnln, func in entries:
2227 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2212 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2228 f.flush()
2213 f.flush()
2229
2214
2230 class dirs(object):
2215 class dirs(object):
2231 '''a multiset of directory names from a dirstate or manifest'''
2216 '''a multiset of directory names from a dirstate or manifest'''
2232
2217
2233 def __init__(self, map, skip=None):
2218 def __init__(self, map, skip=None):
2234 self._dirs = {}
2219 self._dirs = {}
2235 addpath = self.addpath
2220 addpath = self.addpath
2236 if safehasattr(map, 'iteritems') and skip is not None:
2221 if safehasattr(map, 'iteritems') and skip is not None:
2237 for f, s in map.iteritems():
2222 for f, s in map.iteritems():
2238 if s[0] != skip:
2223 if s[0] != skip:
2239 addpath(f)
2224 addpath(f)
2240 else:
2225 else:
2241 for f in map:
2226 for f in map:
2242 addpath(f)
2227 addpath(f)
2243
2228
2244 def addpath(self, path):
2229 def addpath(self, path):
2245 dirs = self._dirs
2230 dirs = self._dirs
2246 for base in finddirs(path):
2231 for base in finddirs(path):
2247 if base in dirs:
2232 if base in dirs:
2248 dirs[base] += 1
2233 dirs[base] += 1
2249 return
2234 return
2250 dirs[base] = 1
2235 dirs[base] = 1
2251
2236
2252 def delpath(self, path):
2237 def delpath(self, path):
2253 dirs = self._dirs
2238 dirs = self._dirs
2254 for base in finddirs(path):
2239 for base in finddirs(path):
2255 if dirs[base] > 1:
2240 if dirs[base] > 1:
2256 dirs[base] -= 1
2241 dirs[base] -= 1
2257 return
2242 return
2258 del dirs[base]
2243 del dirs[base]
2259
2244
2260 def __iter__(self):
2245 def __iter__(self):
2261 return self._dirs.iterkeys()
2246 return self._dirs.iterkeys()
2262
2247
2263 def __contains__(self, d):
2248 def __contains__(self, d):
2264 return d in self._dirs
2249 return d in self._dirs
2265
2250
2266 if safehasattr(parsers, 'dirs'):
2251 if safehasattr(parsers, 'dirs'):
2267 dirs = parsers.dirs
2252 dirs = parsers.dirs
2268
2253
2269 def finddirs(path):
2254 def finddirs(path):
2270 pos = path.rfind('/')
2255 pos = path.rfind('/')
2271 while pos != -1:
2256 while pos != -1:
2272 yield path[:pos]
2257 yield path[:pos]
2273 pos = path.rfind('/', 0, pos)
2258 pos = path.rfind('/', 0, pos)
2274
2259
2275 # convenient shortcut
2260 # convenient shortcut
2276 dst = debugstacktrace
2261 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now