##// END OF EJS Templates
util: kill Python 2.4 deque.remove hack
Adrian Buehlmann -
r25112:3d14c121 default
parent child Browse files
Show More
@@ -1,2288 +1,2278 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 import i18n
16 import i18n
17 _ = i18n._
17 _ = i18n._
18 import error, osutil, encoding, parsers
18 import error, osutil, encoding, parsers
19 import errno, shutil, sys, tempfile, traceback
19 import errno, shutil, sys, tempfile, traceback
20 import re as remod
20 import re as remod
21 import os, time, datetime, calendar, textwrap, signal, collections
21 import os, time, datetime, calendar, textwrap, signal, collections
22 import imp, socket, urllib, struct
22 import imp, socket, urllib, struct
23 import gc
23 import gc
24
24
25 if os.name == 'nt':
25 if os.name == 'nt':
26 import windows as platform
26 import windows as platform
27 else:
27 else:
28 import posix as platform
28 import posix as platform
29
29
30 cachestat = platform.cachestat
30 cachestat = platform.cachestat
31 checkexec = platform.checkexec
31 checkexec = platform.checkexec
32 checklink = platform.checklink
32 checklink = platform.checklink
33 copymode = platform.copymode
33 copymode = platform.copymode
34 executablepath = platform.executablepath
34 executablepath = platform.executablepath
35 expandglobs = platform.expandglobs
35 expandglobs = platform.expandglobs
36 explainexit = platform.explainexit
36 explainexit = platform.explainexit
37 findexe = platform.findexe
37 findexe = platform.findexe
38 gethgcmd = platform.gethgcmd
38 gethgcmd = platform.gethgcmd
39 getuser = platform.getuser
39 getuser = platform.getuser
40 groupmembers = platform.groupmembers
40 groupmembers = platform.groupmembers
41 groupname = platform.groupname
41 groupname = platform.groupname
42 hidewindow = platform.hidewindow
42 hidewindow = platform.hidewindow
43 isexec = platform.isexec
43 isexec = platform.isexec
44 isowner = platform.isowner
44 isowner = platform.isowner
45 localpath = platform.localpath
45 localpath = platform.localpath
46 lookupreg = platform.lookupreg
46 lookupreg = platform.lookupreg
47 makedir = platform.makedir
47 makedir = platform.makedir
48 nlinks = platform.nlinks
48 nlinks = platform.nlinks
49 normpath = platform.normpath
49 normpath = platform.normpath
50 normcase = platform.normcase
50 normcase = platform.normcase
51 normcasespec = platform.normcasespec
51 normcasespec = platform.normcasespec
52 normcasefallback = platform.normcasefallback
52 normcasefallback = platform.normcasefallback
53 openhardlinks = platform.openhardlinks
53 openhardlinks = platform.openhardlinks
54 oslink = platform.oslink
54 oslink = platform.oslink
55 parsepatchoutput = platform.parsepatchoutput
55 parsepatchoutput = platform.parsepatchoutput
56 pconvert = platform.pconvert
56 pconvert = platform.pconvert
57 popen = platform.popen
57 popen = platform.popen
58 posixfile = platform.posixfile
58 posixfile = platform.posixfile
59 quotecommand = platform.quotecommand
59 quotecommand = platform.quotecommand
60 readpipe = platform.readpipe
60 readpipe = platform.readpipe
61 rename = platform.rename
61 rename = platform.rename
62 removedirs = platform.removedirs
62 removedirs = platform.removedirs
63 samedevice = platform.samedevice
63 samedevice = platform.samedevice
64 samefile = platform.samefile
64 samefile = platform.samefile
65 samestat = platform.samestat
65 samestat = platform.samestat
66 setbinary = platform.setbinary
66 setbinary = platform.setbinary
67 setflags = platform.setflags
67 setflags = platform.setflags
68 setsignalhandler = platform.setsignalhandler
68 setsignalhandler = platform.setsignalhandler
69 shellquote = platform.shellquote
69 shellquote = platform.shellquote
70 spawndetached = platform.spawndetached
70 spawndetached = platform.spawndetached
71 split = platform.split
71 split = platform.split
72 sshargs = platform.sshargs
72 sshargs = platform.sshargs
73 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
73 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
74 statisexec = platform.statisexec
74 statisexec = platform.statisexec
75 statislink = platform.statislink
75 statislink = platform.statislink
76 termwidth = platform.termwidth
76 termwidth = platform.termwidth
77 testpid = platform.testpid
77 testpid = platform.testpid
78 umask = platform.umask
78 umask = platform.umask
79 unlink = platform.unlink
79 unlink = platform.unlink
80 unlinkpath = platform.unlinkpath
80 unlinkpath = platform.unlinkpath
81 username = platform.username
81 username = platform.username
82
82
83 # Python compatibility
83 # Python compatibility
84
84
85 _notset = object()
85 _notset = object()
86
86
87 def safehasattr(thing, attr):
87 def safehasattr(thing, attr):
88 return getattr(thing, attr, _notset) is not _notset
88 return getattr(thing, attr, _notset) is not _notset
89
89
90 def sha1(s=''):
90 def sha1(s=''):
91 '''
91 '''
92 Low-overhead wrapper around Python's SHA support
92 Low-overhead wrapper around Python's SHA support
93
93
94 >>> f = _fastsha1
94 >>> f = _fastsha1
95 >>> a = sha1()
95 >>> a = sha1()
96 >>> a = f()
96 >>> a = f()
97 >>> a.hexdigest()
97 >>> a.hexdigest()
98 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
98 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
99 '''
99 '''
100
100
101 return _fastsha1(s)
101 return _fastsha1(s)
102
102
103 def _fastsha1(s=''):
103 def _fastsha1(s=''):
104 # This function will import sha1 from hashlib or sha (whichever is
104 # This function will import sha1 from hashlib or sha (whichever is
105 # available) and overwrite itself with it on the first call.
105 # available) and overwrite itself with it on the first call.
106 # Subsequent calls will go directly to the imported function.
106 # Subsequent calls will go directly to the imported function.
107 if sys.version_info >= (2, 5):
107 if sys.version_info >= (2, 5):
108 from hashlib import sha1 as _sha1
108 from hashlib import sha1 as _sha1
109 else:
109 else:
110 from sha import sha as _sha1
110 from sha import sha as _sha1
111 global _fastsha1, sha1
111 global _fastsha1, sha1
112 _fastsha1 = sha1 = _sha1
112 _fastsha1 = sha1 = _sha1
113 return _sha1(s)
113 return _sha1(s)
114
114
115 def md5(s=''):
115 def md5(s=''):
116 try:
116 try:
117 from hashlib import md5 as _md5
117 from hashlib import md5 as _md5
118 except ImportError:
118 except ImportError:
119 from md5 import md5 as _md5
119 from md5 import md5 as _md5
120 global md5
120 global md5
121 md5 = _md5
121 md5 = _md5
122 return _md5(s)
122 return _md5(s)
123
123
124 DIGESTS = {
124 DIGESTS = {
125 'md5': md5,
125 'md5': md5,
126 'sha1': sha1,
126 'sha1': sha1,
127 }
127 }
128 # List of digest types from strongest to weakest
128 # List of digest types from strongest to weakest
129 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
129 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
130
130
131 try:
131 try:
132 import hashlib
132 import hashlib
133 DIGESTS.update({
133 DIGESTS.update({
134 'sha512': hashlib.sha512,
134 'sha512': hashlib.sha512,
135 })
135 })
136 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
136 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
137 except ImportError:
137 except ImportError:
138 pass
138 pass
139
139
140 for k in DIGESTS_BY_STRENGTH:
140 for k in DIGESTS_BY_STRENGTH:
141 assert k in DIGESTS
141 assert k in DIGESTS
142
142
143 class digester(object):
143 class digester(object):
144 """helper to compute digests.
144 """helper to compute digests.
145
145
146 This helper can be used to compute one or more digests given their name.
146 This helper can be used to compute one or more digests given their name.
147
147
148 >>> d = digester(['md5', 'sha1'])
148 >>> d = digester(['md5', 'sha1'])
149 >>> d.update('foo')
149 >>> d.update('foo')
150 >>> [k for k in sorted(d)]
150 >>> [k for k in sorted(d)]
151 ['md5', 'sha1']
151 ['md5', 'sha1']
152 >>> d['md5']
152 >>> d['md5']
153 'acbd18db4cc2f85cedef654fccc4a4d8'
153 'acbd18db4cc2f85cedef654fccc4a4d8'
154 >>> d['sha1']
154 >>> d['sha1']
155 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
155 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
156 >>> digester.preferred(['md5', 'sha1'])
156 >>> digester.preferred(['md5', 'sha1'])
157 'sha1'
157 'sha1'
158 """
158 """
159
159
160 def __init__(self, digests, s=''):
160 def __init__(self, digests, s=''):
161 self._hashes = {}
161 self._hashes = {}
162 for k in digests:
162 for k in digests:
163 if k not in DIGESTS:
163 if k not in DIGESTS:
164 raise Abort(_('unknown digest type: %s') % k)
164 raise Abort(_('unknown digest type: %s') % k)
165 self._hashes[k] = DIGESTS[k]()
165 self._hashes[k] = DIGESTS[k]()
166 if s:
166 if s:
167 self.update(s)
167 self.update(s)
168
168
169 def update(self, data):
169 def update(self, data):
170 for h in self._hashes.values():
170 for h in self._hashes.values():
171 h.update(data)
171 h.update(data)
172
172
173 def __getitem__(self, key):
173 def __getitem__(self, key):
174 if key not in DIGESTS:
174 if key not in DIGESTS:
175 raise Abort(_('unknown digest type: %s') % k)
175 raise Abort(_('unknown digest type: %s') % k)
176 return self._hashes[key].hexdigest()
176 return self._hashes[key].hexdigest()
177
177
178 def __iter__(self):
178 def __iter__(self):
179 return iter(self._hashes)
179 return iter(self._hashes)
180
180
181 @staticmethod
181 @staticmethod
182 def preferred(supported):
182 def preferred(supported):
183 """returns the strongest digest type in both supported and DIGESTS."""
183 """returns the strongest digest type in both supported and DIGESTS."""
184
184
185 for k in DIGESTS_BY_STRENGTH:
185 for k in DIGESTS_BY_STRENGTH:
186 if k in supported:
186 if k in supported:
187 return k
187 return k
188 return None
188 return None
189
189
190 class digestchecker(object):
190 class digestchecker(object):
191 """file handle wrapper that additionally checks content against a given
191 """file handle wrapper that additionally checks content against a given
192 size and digests.
192 size and digests.
193
193
194 d = digestchecker(fh, size, {'md5': '...'})
194 d = digestchecker(fh, size, {'md5': '...'})
195
195
196 When multiple digests are given, all of them are validated.
196 When multiple digests are given, all of them are validated.
197 """
197 """
198
198
199 def __init__(self, fh, size, digests):
199 def __init__(self, fh, size, digests):
200 self._fh = fh
200 self._fh = fh
201 self._size = size
201 self._size = size
202 self._got = 0
202 self._got = 0
203 self._digests = dict(digests)
203 self._digests = dict(digests)
204 self._digester = digester(self._digests.keys())
204 self._digester = digester(self._digests.keys())
205
205
206 def read(self, length=-1):
206 def read(self, length=-1):
207 content = self._fh.read(length)
207 content = self._fh.read(length)
208 self._digester.update(content)
208 self._digester.update(content)
209 self._got += len(content)
209 self._got += len(content)
210 return content
210 return content
211
211
212 def validate(self):
212 def validate(self):
213 if self._size != self._got:
213 if self._size != self._got:
214 raise Abort(_('size mismatch: expected %d, got %d') %
214 raise Abort(_('size mismatch: expected %d, got %d') %
215 (self._size, self._got))
215 (self._size, self._got))
216 for k, v in self._digests.items():
216 for k, v in self._digests.items():
217 if v != self._digester[k]:
217 if v != self._digester[k]:
218 # i18n: first parameter is a digest name
218 # i18n: first parameter is a digest name
219 raise Abort(_('%s mismatch: expected %s, got %s') %
219 raise Abort(_('%s mismatch: expected %s, got %s') %
220 (k, v, self._digester[k]))
220 (k, v, self._digester[k]))
221
221
222 try:
222 try:
223 buffer = buffer
223 buffer = buffer
224 except NameError:
224 except NameError:
225 if sys.version_info[0] < 3:
225 if sys.version_info[0] < 3:
226 def buffer(sliceable, offset=0):
226 def buffer(sliceable, offset=0):
227 return sliceable[offset:]
227 return sliceable[offset:]
228 else:
228 else:
229 def buffer(sliceable, offset=0):
229 def buffer(sliceable, offset=0):
230 return memoryview(sliceable)[offset:]
230 return memoryview(sliceable)[offset:]
231
231
232 import subprocess
232 import subprocess
233 closefds = os.name == 'posix'
233 closefds = os.name == 'posix'
234
234
235 def unpacker(fmt):
235 def unpacker(fmt):
236 """create a struct unpacker for the specified format"""
236 """create a struct unpacker for the specified format"""
237 try:
237 try:
238 # 2.5+
238 # 2.5+
239 return struct.Struct(fmt).unpack
239 return struct.Struct(fmt).unpack
240 except AttributeError:
240 except AttributeError:
241 # 2.4
241 # 2.4
242 return lambda buf: struct.unpack(fmt, buf)
242 return lambda buf: struct.unpack(fmt, buf)
243
243
244 def popen2(cmd, env=None, newlines=False):
244 def popen2(cmd, env=None, newlines=False):
245 # Setting bufsize to -1 lets the system decide the buffer size.
245 # Setting bufsize to -1 lets the system decide the buffer size.
246 # The default for bufsize is 0, meaning unbuffered. This leads to
246 # The default for bufsize is 0, meaning unbuffered. This leads to
247 # poor performance on Mac OS X: http://bugs.python.org/issue4194
247 # poor performance on Mac OS X: http://bugs.python.org/issue4194
248 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
248 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
249 close_fds=closefds,
249 close_fds=closefds,
250 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
250 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
251 universal_newlines=newlines,
251 universal_newlines=newlines,
252 env=env)
252 env=env)
253 return p.stdin, p.stdout
253 return p.stdin, p.stdout
254
254
255 def popen3(cmd, env=None, newlines=False):
255 def popen3(cmd, env=None, newlines=False):
256 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
256 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
257 return stdin, stdout, stderr
257 return stdin, stdout, stderr
258
258
259 def popen4(cmd, env=None, newlines=False):
259 def popen4(cmd, env=None, newlines=False):
260 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
260 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
261 close_fds=closefds,
261 close_fds=closefds,
262 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
262 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
263 stderr=subprocess.PIPE,
263 stderr=subprocess.PIPE,
264 universal_newlines=newlines,
264 universal_newlines=newlines,
265 env=env)
265 env=env)
266 return p.stdin, p.stdout, p.stderr, p
266 return p.stdin, p.stdout, p.stderr, p
267
267
268 def version():
268 def version():
269 """Return version information if available."""
269 """Return version information if available."""
270 try:
270 try:
271 import __version__
271 import __version__
272 return __version__.version
272 return __version__.version
273 except ImportError:
273 except ImportError:
274 return 'unknown'
274 return 'unknown'
275
275
276 # used by parsedate
276 # used by parsedate
277 defaultdateformats = (
277 defaultdateformats = (
278 '%Y-%m-%d %H:%M:%S',
278 '%Y-%m-%d %H:%M:%S',
279 '%Y-%m-%d %I:%M:%S%p',
279 '%Y-%m-%d %I:%M:%S%p',
280 '%Y-%m-%d %H:%M',
280 '%Y-%m-%d %H:%M',
281 '%Y-%m-%d %I:%M%p',
281 '%Y-%m-%d %I:%M%p',
282 '%Y-%m-%d',
282 '%Y-%m-%d',
283 '%m-%d',
283 '%m-%d',
284 '%m/%d',
284 '%m/%d',
285 '%m/%d/%y',
285 '%m/%d/%y',
286 '%m/%d/%Y',
286 '%m/%d/%Y',
287 '%a %b %d %H:%M:%S %Y',
287 '%a %b %d %H:%M:%S %Y',
288 '%a %b %d %I:%M:%S%p %Y',
288 '%a %b %d %I:%M:%S%p %Y',
289 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
289 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
290 '%b %d %H:%M:%S %Y',
290 '%b %d %H:%M:%S %Y',
291 '%b %d %I:%M:%S%p %Y',
291 '%b %d %I:%M:%S%p %Y',
292 '%b %d %H:%M:%S',
292 '%b %d %H:%M:%S',
293 '%b %d %I:%M:%S%p',
293 '%b %d %I:%M:%S%p',
294 '%b %d %H:%M',
294 '%b %d %H:%M',
295 '%b %d %I:%M%p',
295 '%b %d %I:%M%p',
296 '%b %d %Y',
296 '%b %d %Y',
297 '%b %d',
297 '%b %d',
298 '%H:%M:%S',
298 '%H:%M:%S',
299 '%I:%M:%S%p',
299 '%I:%M:%S%p',
300 '%H:%M',
300 '%H:%M',
301 '%I:%M%p',
301 '%I:%M%p',
302 )
302 )
303
303
304 extendeddateformats = defaultdateformats + (
304 extendeddateformats = defaultdateformats + (
305 "%Y",
305 "%Y",
306 "%Y-%m",
306 "%Y-%m",
307 "%b",
307 "%b",
308 "%b %Y",
308 "%b %Y",
309 )
309 )
310
310
311 def cachefunc(func):
311 def cachefunc(func):
312 '''cache the result of function calls'''
312 '''cache the result of function calls'''
313 # XXX doesn't handle keywords args
313 # XXX doesn't handle keywords args
314 if func.func_code.co_argcount == 0:
314 if func.func_code.co_argcount == 0:
315 cache = []
315 cache = []
316 def f():
316 def f():
317 if len(cache) == 0:
317 if len(cache) == 0:
318 cache.append(func())
318 cache.append(func())
319 return cache[0]
319 return cache[0]
320 return f
320 return f
321 cache = {}
321 cache = {}
322 if func.func_code.co_argcount == 1:
322 if func.func_code.co_argcount == 1:
323 # we gain a small amount of time because
323 # we gain a small amount of time because
324 # we don't need to pack/unpack the list
324 # we don't need to pack/unpack the list
325 def f(arg):
325 def f(arg):
326 if arg not in cache:
326 if arg not in cache:
327 cache[arg] = func(arg)
327 cache[arg] = func(arg)
328 return cache[arg]
328 return cache[arg]
329 else:
329 else:
330 def f(*args):
330 def f(*args):
331 if args not in cache:
331 if args not in cache:
332 cache[args] = func(*args)
332 cache[args] = func(*args)
333 return cache[args]
333 return cache[args]
334
334
335 return f
335 return f
336
336
337 try:
337 deque = collections.deque
338 collections.deque.remove
339 deque = collections.deque
340 except AttributeError:
341 # python 2.4 lacks deque.remove
342 class deque(collections.deque):
343 def remove(self, val):
344 for i, v in enumerate(self):
345 if v == val:
346 del self[i]
347 break
348
338
349 class sortdict(dict):
339 class sortdict(dict):
350 '''a simple sorted dictionary'''
340 '''a simple sorted dictionary'''
351 def __init__(self, data=None):
341 def __init__(self, data=None):
352 self._list = []
342 self._list = []
353 if data:
343 if data:
354 self.update(data)
344 self.update(data)
355 def copy(self):
345 def copy(self):
356 return sortdict(self)
346 return sortdict(self)
357 def __setitem__(self, key, val):
347 def __setitem__(self, key, val):
358 if key in self:
348 if key in self:
359 self._list.remove(key)
349 self._list.remove(key)
360 self._list.append(key)
350 self._list.append(key)
361 dict.__setitem__(self, key, val)
351 dict.__setitem__(self, key, val)
362 def __iter__(self):
352 def __iter__(self):
363 return self._list.__iter__()
353 return self._list.__iter__()
364 def update(self, src):
354 def update(self, src):
365 if isinstance(src, dict):
355 if isinstance(src, dict):
366 src = src.iteritems()
356 src = src.iteritems()
367 for k, v in src:
357 for k, v in src:
368 self[k] = v
358 self[k] = v
369 def clear(self):
359 def clear(self):
370 dict.clear(self)
360 dict.clear(self)
371 self._list = []
361 self._list = []
372 def items(self):
362 def items(self):
373 return [(k, self[k]) for k in self._list]
363 return [(k, self[k]) for k in self._list]
374 def __delitem__(self, key):
364 def __delitem__(self, key):
375 dict.__delitem__(self, key)
365 dict.__delitem__(self, key)
376 self._list.remove(key)
366 self._list.remove(key)
377 def pop(self, key, *args, **kwargs):
367 def pop(self, key, *args, **kwargs):
378 dict.pop(self, key, *args, **kwargs)
368 dict.pop(self, key, *args, **kwargs)
379 try:
369 try:
380 self._list.remove(key)
370 self._list.remove(key)
381 except ValueError:
371 except ValueError:
382 pass
372 pass
383 def keys(self):
373 def keys(self):
384 return self._list
374 return self._list
385 def iterkeys(self):
375 def iterkeys(self):
386 return self._list.__iter__()
376 return self._list.__iter__()
387 def iteritems(self):
377 def iteritems(self):
388 for k in self._list:
378 for k in self._list:
389 yield k, self[k]
379 yield k, self[k]
390 def insert(self, index, key, val):
380 def insert(self, index, key, val):
391 self._list.insert(index, key)
381 self._list.insert(index, key)
392 dict.__setitem__(self, key, val)
382 dict.__setitem__(self, key, val)
393
383
394 class lrucachedict(object):
384 class lrucachedict(object):
395 '''cache most recent gets from or sets to this dictionary'''
385 '''cache most recent gets from or sets to this dictionary'''
396 def __init__(self, maxsize):
386 def __init__(self, maxsize):
397 self._cache = {}
387 self._cache = {}
398 self._maxsize = maxsize
388 self._maxsize = maxsize
399 self._order = deque()
389 self._order = deque()
400
390
401 def __getitem__(self, key):
391 def __getitem__(self, key):
402 value = self._cache[key]
392 value = self._cache[key]
403 self._order.remove(key)
393 self._order.remove(key)
404 self._order.append(key)
394 self._order.append(key)
405 return value
395 return value
406
396
407 def __setitem__(self, key, value):
397 def __setitem__(self, key, value):
408 if key not in self._cache:
398 if key not in self._cache:
409 if len(self._cache) >= self._maxsize:
399 if len(self._cache) >= self._maxsize:
410 del self._cache[self._order.popleft()]
400 del self._cache[self._order.popleft()]
411 else:
401 else:
412 self._order.remove(key)
402 self._order.remove(key)
413 self._cache[key] = value
403 self._cache[key] = value
414 self._order.append(key)
404 self._order.append(key)
415
405
416 def __contains__(self, key):
406 def __contains__(self, key):
417 return key in self._cache
407 return key in self._cache
418
408
419 def clear(self):
409 def clear(self):
420 self._cache.clear()
410 self._cache.clear()
421 self._order = deque()
411 self._order = deque()
422
412
423 def lrucachefunc(func):
413 def lrucachefunc(func):
424 '''cache most recent results of function calls'''
414 '''cache most recent results of function calls'''
425 cache = {}
415 cache = {}
426 order = deque()
416 order = deque()
427 if func.func_code.co_argcount == 1:
417 if func.func_code.co_argcount == 1:
428 def f(arg):
418 def f(arg):
429 if arg not in cache:
419 if arg not in cache:
430 if len(cache) > 20:
420 if len(cache) > 20:
431 del cache[order.popleft()]
421 del cache[order.popleft()]
432 cache[arg] = func(arg)
422 cache[arg] = func(arg)
433 else:
423 else:
434 order.remove(arg)
424 order.remove(arg)
435 order.append(arg)
425 order.append(arg)
436 return cache[arg]
426 return cache[arg]
437 else:
427 else:
438 def f(*args):
428 def f(*args):
439 if args not in cache:
429 if args not in cache:
440 if len(cache) > 20:
430 if len(cache) > 20:
441 del cache[order.popleft()]
431 del cache[order.popleft()]
442 cache[args] = func(*args)
432 cache[args] = func(*args)
443 else:
433 else:
444 order.remove(args)
434 order.remove(args)
445 order.append(args)
435 order.append(args)
446 return cache[args]
436 return cache[args]
447
437
448 return f
438 return f
449
439
450 class propertycache(object):
440 class propertycache(object):
451 def __init__(self, func):
441 def __init__(self, func):
452 self.func = func
442 self.func = func
453 self.name = func.__name__
443 self.name = func.__name__
454 def __get__(self, obj, type=None):
444 def __get__(self, obj, type=None):
455 result = self.func(obj)
445 result = self.func(obj)
456 self.cachevalue(obj, result)
446 self.cachevalue(obj, result)
457 return result
447 return result
458
448
459 def cachevalue(self, obj, value):
449 def cachevalue(self, obj, value):
460 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
450 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
461 obj.__dict__[self.name] = value
451 obj.__dict__[self.name] = value
462
452
463 def pipefilter(s, cmd):
453 def pipefilter(s, cmd):
464 '''filter string S through command CMD, returning its output'''
454 '''filter string S through command CMD, returning its output'''
465 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
455 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
466 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
456 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
467 pout, perr = p.communicate(s)
457 pout, perr = p.communicate(s)
468 return pout
458 return pout
469
459
470 def tempfilter(s, cmd):
460 def tempfilter(s, cmd):
471 '''filter string S through a pair of temporary files with CMD.
461 '''filter string S through a pair of temporary files with CMD.
472 CMD is used as a template to create the real command to be run,
462 CMD is used as a template to create the real command to be run,
473 with the strings INFILE and OUTFILE replaced by the real names of
463 with the strings INFILE and OUTFILE replaced by the real names of
474 the temporary files generated.'''
464 the temporary files generated.'''
475 inname, outname = None, None
465 inname, outname = None, None
476 try:
466 try:
477 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
467 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
478 fp = os.fdopen(infd, 'wb')
468 fp = os.fdopen(infd, 'wb')
479 fp.write(s)
469 fp.write(s)
480 fp.close()
470 fp.close()
481 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
471 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
482 os.close(outfd)
472 os.close(outfd)
483 cmd = cmd.replace('INFILE', inname)
473 cmd = cmd.replace('INFILE', inname)
484 cmd = cmd.replace('OUTFILE', outname)
474 cmd = cmd.replace('OUTFILE', outname)
485 code = os.system(cmd)
475 code = os.system(cmd)
486 if sys.platform == 'OpenVMS' and code & 1:
476 if sys.platform == 'OpenVMS' and code & 1:
487 code = 0
477 code = 0
488 if code:
478 if code:
489 raise Abort(_("command '%s' failed: %s") %
479 raise Abort(_("command '%s' failed: %s") %
490 (cmd, explainexit(code)))
480 (cmd, explainexit(code)))
491 fp = open(outname, 'rb')
481 fp = open(outname, 'rb')
492 r = fp.read()
482 r = fp.read()
493 fp.close()
483 fp.close()
494 return r
484 return r
495 finally:
485 finally:
496 try:
486 try:
497 if inname:
487 if inname:
498 os.unlink(inname)
488 os.unlink(inname)
499 except OSError:
489 except OSError:
500 pass
490 pass
501 try:
491 try:
502 if outname:
492 if outname:
503 os.unlink(outname)
493 os.unlink(outname)
504 except OSError:
494 except OSError:
505 pass
495 pass
506
496
507 filtertable = {
497 filtertable = {
508 'tempfile:': tempfilter,
498 'tempfile:': tempfilter,
509 'pipe:': pipefilter,
499 'pipe:': pipefilter,
510 }
500 }
511
501
512 def filter(s, cmd):
502 def filter(s, cmd):
513 "filter a string through a command that transforms its input to its output"
503 "filter a string through a command that transforms its input to its output"
514 for name, fn in filtertable.iteritems():
504 for name, fn in filtertable.iteritems():
515 if cmd.startswith(name):
505 if cmd.startswith(name):
516 return fn(s, cmd[len(name):].lstrip())
506 return fn(s, cmd[len(name):].lstrip())
517 return pipefilter(s, cmd)
507 return pipefilter(s, cmd)
518
508
519 def binary(s):
509 def binary(s):
520 """return true if a string is binary data"""
510 """return true if a string is binary data"""
521 return bool(s and '\0' in s)
511 return bool(s and '\0' in s)
522
512
523 def increasingchunks(source, min=1024, max=65536):
513 def increasingchunks(source, min=1024, max=65536):
524 '''return no less than min bytes per chunk while data remains,
514 '''return no less than min bytes per chunk while data remains,
525 doubling min after each chunk until it reaches max'''
515 doubling min after each chunk until it reaches max'''
526 def log2(x):
516 def log2(x):
527 if not x:
517 if not x:
528 return 0
518 return 0
529 i = 0
519 i = 0
530 while x:
520 while x:
531 x >>= 1
521 x >>= 1
532 i += 1
522 i += 1
533 return i - 1
523 return i - 1
534
524
535 buf = []
525 buf = []
536 blen = 0
526 blen = 0
537 for chunk in source:
527 for chunk in source:
538 buf.append(chunk)
528 buf.append(chunk)
539 blen += len(chunk)
529 blen += len(chunk)
540 if blen >= min:
530 if blen >= min:
541 if min < max:
531 if min < max:
542 min = min << 1
532 min = min << 1
543 nmin = 1 << log2(blen)
533 nmin = 1 << log2(blen)
544 if nmin > min:
534 if nmin > min:
545 min = nmin
535 min = nmin
546 if min > max:
536 if min > max:
547 min = max
537 min = max
548 yield ''.join(buf)
538 yield ''.join(buf)
549 blen = 0
539 blen = 0
550 buf = []
540 buf = []
551 if buf:
541 if buf:
552 yield ''.join(buf)
542 yield ''.join(buf)
553
543
554 Abort = error.Abort
544 Abort = error.Abort
555
545
556 def always(fn):
546 def always(fn):
557 return True
547 return True
558
548
559 def never(fn):
549 def never(fn):
560 return False
550 return False
561
551
562 def nogc(func):
552 def nogc(func):
563 """disable garbage collector
553 """disable garbage collector
564
554
565 Python's garbage collector triggers a GC each time a certain number of
555 Python's garbage collector triggers a GC each time a certain number of
566 container objects (the number being defined by gc.get_threshold()) are
556 container objects (the number being defined by gc.get_threshold()) are
567 allocated even when marked not to be tracked by the collector. Tracking has
557 allocated even when marked not to be tracked by the collector. Tracking has
568 no effect on when GCs are triggered, only on what objects the GC looks
558 no effect on when GCs are triggered, only on what objects the GC looks
569 into. As a workaround, disable GC while building complex (huge)
559 into. As a workaround, disable GC while building complex (huge)
570 containers.
560 containers.
571
561
572 This garbage collector issue have been fixed in 2.7.
562 This garbage collector issue have been fixed in 2.7.
573 """
563 """
574 def wrapper(*args, **kwargs):
564 def wrapper(*args, **kwargs):
575 gcenabled = gc.isenabled()
565 gcenabled = gc.isenabled()
576 gc.disable()
566 gc.disable()
577 try:
567 try:
578 return func(*args, **kwargs)
568 return func(*args, **kwargs)
579 finally:
569 finally:
580 if gcenabled:
570 if gcenabled:
581 gc.enable()
571 gc.enable()
582 return wrapper
572 return wrapper
583
573
584 def pathto(root, n1, n2):
574 def pathto(root, n1, n2):
585 '''return the relative path from one place to another.
575 '''return the relative path from one place to another.
586 root should use os.sep to separate directories
576 root should use os.sep to separate directories
587 n1 should use os.sep to separate directories
577 n1 should use os.sep to separate directories
588 n2 should use "/" to separate directories
578 n2 should use "/" to separate directories
589 returns an os.sep-separated path.
579 returns an os.sep-separated path.
590
580
591 If n1 is a relative path, it's assumed it's
581 If n1 is a relative path, it's assumed it's
592 relative to root.
582 relative to root.
593 n2 should always be relative to root.
583 n2 should always be relative to root.
594 '''
584 '''
595 if not n1:
585 if not n1:
596 return localpath(n2)
586 return localpath(n2)
597 if os.path.isabs(n1):
587 if os.path.isabs(n1):
598 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
588 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
599 return os.path.join(root, localpath(n2))
589 return os.path.join(root, localpath(n2))
600 n2 = '/'.join((pconvert(root), n2))
590 n2 = '/'.join((pconvert(root), n2))
601 a, b = splitpath(n1), n2.split('/')
591 a, b = splitpath(n1), n2.split('/')
602 a.reverse()
592 a.reverse()
603 b.reverse()
593 b.reverse()
604 while a and b and a[-1] == b[-1]:
594 while a and b and a[-1] == b[-1]:
605 a.pop()
595 a.pop()
606 b.pop()
596 b.pop()
607 b.reverse()
597 b.reverse()
608 return os.sep.join((['..'] * len(a)) + b) or '.'
598 return os.sep.join((['..'] * len(a)) + b) or '.'
609
599
610 def mainfrozen():
600 def mainfrozen():
611 """return True if we are a frozen executable.
601 """return True if we are a frozen executable.
612
602
613 The code supports py2exe (most common, Windows only) and tools/freeze
603 The code supports py2exe (most common, Windows only) and tools/freeze
614 (portable, not much used).
604 (portable, not much used).
615 """
605 """
616 return (safehasattr(sys, "frozen") or # new py2exe
606 return (safehasattr(sys, "frozen") or # new py2exe
617 safehasattr(sys, "importers") or # old py2exe
607 safehasattr(sys, "importers") or # old py2exe
618 imp.is_frozen("__main__")) # tools/freeze
608 imp.is_frozen("__main__")) # tools/freeze
619
609
620 # the location of data files matching the source code
610 # the location of data files matching the source code
621 if mainfrozen():
611 if mainfrozen():
622 # executable version (py2exe) doesn't support __file__
612 # executable version (py2exe) doesn't support __file__
623 datapath = os.path.dirname(sys.executable)
613 datapath = os.path.dirname(sys.executable)
624 else:
614 else:
625 datapath = os.path.dirname(__file__)
615 datapath = os.path.dirname(__file__)
626
616
627 i18n.setdatapath(datapath)
617 i18n.setdatapath(datapath)
628
618
629 _hgexecutable = None
619 _hgexecutable = None
630
620
631 def hgexecutable():
621 def hgexecutable():
632 """return location of the 'hg' executable.
622 """return location of the 'hg' executable.
633
623
634 Defaults to $HG or 'hg' in the search path.
624 Defaults to $HG or 'hg' in the search path.
635 """
625 """
636 if _hgexecutable is None:
626 if _hgexecutable is None:
637 hg = os.environ.get('HG')
627 hg = os.environ.get('HG')
638 mainmod = sys.modules['__main__']
628 mainmod = sys.modules['__main__']
639 if hg:
629 if hg:
640 _sethgexecutable(hg)
630 _sethgexecutable(hg)
641 elif mainfrozen():
631 elif mainfrozen():
642 _sethgexecutable(sys.executable)
632 _sethgexecutable(sys.executable)
643 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
633 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
644 _sethgexecutable(mainmod.__file__)
634 _sethgexecutable(mainmod.__file__)
645 else:
635 else:
646 exe = findexe('hg') or os.path.basename(sys.argv[0])
636 exe = findexe('hg') or os.path.basename(sys.argv[0])
647 _sethgexecutable(exe)
637 _sethgexecutable(exe)
648 return _hgexecutable
638 return _hgexecutable
649
639
650 def _sethgexecutable(path):
640 def _sethgexecutable(path):
651 """set location of the 'hg' executable"""
641 """set location of the 'hg' executable"""
652 global _hgexecutable
642 global _hgexecutable
653 _hgexecutable = path
643 _hgexecutable = path
654
644
655 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
645 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
656 '''enhanced shell command execution.
646 '''enhanced shell command execution.
657 run with environment maybe modified, maybe in different dir.
647 run with environment maybe modified, maybe in different dir.
658
648
659 if command fails and onerr is None, return status, else raise onerr
649 if command fails and onerr is None, return status, else raise onerr
660 object as exception.
650 object as exception.
661
651
662 if out is specified, it is assumed to be a file-like object that has a
652 if out is specified, it is assumed to be a file-like object that has a
663 write() method. stdout and stderr will be redirected to out.'''
653 write() method. stdout and stderr will be redirected to out.'''
664 try:
654 try:
665 sys.stdout.flush()
655 sys.stdout.flush()
666 except Exception:
656 except Exception:
667 pass
657 pass
668 def py2shell(val):
658 def py2shell(val):
669 'convert python object into string that is useful to shell'
659 'convert python object into string that is useful to shell'
670 if val is None or val is False:
660 if val is None or val is False:
671 return '0'
661 return '0'
672 if val is True:
662 if val is True:
673 return '1'
663 return '1'
674 return str(val)
664 return str(val)
675 origcmd = cmd
665 origcmd = cmd
676 cmd = quotecommand(cmd)
666 cmd = quotecommand(cmd)
677 if sys.platform == 'plan9' and (sys.version_info[0] == 2
667 if sys.platform == 'plan9' and (sys.version_info[0] == 2
678 and sys.version_info[1] < 7):
668 and sys.version_info[1] < 7):
679 # subprocess kludge to work around issues in half-baked Python
669 # subprocess kludge to work around issues in half-baked Python
680 # ports, notably bichued/python:
670 # ports, notably bichued/python:
681 if not cwd is None:
671 if not cwd is None:
682 os.chdir(cwd)
672 os.chdir(cwd)
683 rc = os.system(cmd)
673 rc = os.system(cmd)
684 else:
674 else:
685 env = dict(os.environ)
675 env = dict(os.environ)
686 env.update((k, py2shell(v)) for k, v in environ.iteritems())
676 env.update((k, py2shell(v)) for k, v in environ.iteritems())
687 env['HG'] = hgexecutable()
677 env['HG'] = hgexecutable()
688 if out is None or out == sys.__stdout__:
678 if out is None or out == sys.__stdout__:
689 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
679 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
690 env=env, cwd=cwd)
680 env=env, cwd=cwd)
691 else:
681 else:
692 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
682 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
693 env=env, cwd=cwd, stdout=subprocess.PIPE,
683 env=env, cwd=cwd, stdout=subprocess.PIPE,
694 stderr=subprocess.STDOUT)
684 stderr=subprocess.STDOUT)
695 while True:
685 while True:
696 line = proc.stdout.readline()
686 line = proc.stdout.readline()
697 if not line:
687 if not line:
698 break
688 break
699 out.write(line)
689 out.write(line)
700 proc.wait()
690 proc.wait()
701 rc = proc.returncode
691 rc = proc.returncode
702 if sys.platform == 'OpenVMS' and rc & 1:
692 if sys.platform == 'OpenVMS' and rc & 1:
703 rc = 0
693 rc = 0
704 if rc and onerr:
694 if rc and onerr:
705 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
695 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
706 explainexit(rc)[0])
696 explainexit(rc)[0])
707 if errprefix:
697 if errprefix:
708 errmsg = '%s: %s' % (errprefix, errmsg)
698 errmsg = '%s: %s' % (errprefix, errmsg)
709 raise onerr(errmsg)
699 raise onerr(errmsg)
710 return rc
700 return rc
711
701
712 def checksignature(func):
702 def checksignature(func):
713 '''wrap a function with code to check for calling errors'''
703 '''wrap a function with code to check for calling errors'''
714 def check(*args, **kwargs):
704 def check(*args, **kwargs):
715 try:
705 try:
716 return func(*args, **kwargs)
706 return func(*args, **kwargs)
717 except TypeError:
707 except TypeError:
718 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
708 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
719 raise error.SignatureError
709 raise error.SignatureError
720 raise
710 raise
721
711
722 return check
712 return check
723
713
724 def copyfile(src, dest, hardlink=False):
714 def copyfile(src, dest, hardlink=False):
725 "copy a file, preserving mode and atime/mtime"
715 "copy a file, preserving mode and atime/mtime"
726 if os.path.lexists(dest):
716 if os.path.lexists(dest):
727 unlink(dest)
717 unlink(dest)
728 # hardlinks are problematic on CIFS, quietly ignore this flag
718 # hardlinks are problematic on CIFS, quietly ignore this flag
729 # until we find a way to work around it cleanly (issue4546)
719 # until we find a way to work around it cleanly (issue4546)
730 if False and hardlink:
720 if False and hardlink:
731 try:
721 try:
732 oslink(src, dest)
722 oslink(src, dest)
733 return
723 return
734 except (IOError, OSError):
724 except (IOError, OSError):
735 pass # fall back to normal copy
725 pass # fall back to normal copy
736 if os.path.islink(src):
726 if os.path.islink(src):
737 os.symlink(os.readlink(src), dest)
727 os.symlink(os.readlink(src), dest)
738 else:
728 else:
739 try:
729 try:
740 shutil.copyfile(src, dest)
730 shutil.copyfile(src, dest)
741 shutil.copymode(src, dest)
731 shutil.copymode(src, dest)
742 except shutil.Error, inst:
732 except shutil.Error, inst:
743 raise Abort(str(inst))
733 raise Abort(str(inst))
744
734
745 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
735 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
746 """Copy a directory tree using hardlinks if possible."""
736 """Copy a directory tree using hardlinks if possible."""
747 num = 0
737 num = 0
748
738
749 if hardlink is None:
739 if hardlink is None:
750 hardlink = (os.stat(src).st_dev ==
740 hardlink = (os.stat(src).st_dev ==
751 os.stat(os.path.dirname(dst)).st_dev)
741 os.stat(os.path.dirname(dst)).st_dev)
752 if hardlink:
742 if hardlink:
753 topic = _('linking')
743 topic = _('linking')
754 else:
744 else:
755 topic = _('copying')
745 topic = _('copying')
756
746
757 if os.path.isdir(src):
747 if os.path.isdir(src):
758 os.mkdir(dst)
748 os.mkdir(dst)
759 for name, kind in osutil.listdir(src):
749 for name, kind in osutil.listdir(src):
760 srcname = os.path.join(src, name)
750 srcname = os.path.join(src, name)
761 dstname = os.path.join(dst, name)
751 dstname = os.path.join(dst, name)
762 def nprog(t, pos):
752 def nprog(t, pos):
763 if pos is not None:
753 if pos is not None:
764 return progress(t, pos + num)
754 return progress(t, pos + num)
765 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
755 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
766 num += n
756 num += n
767 else:
757 else:
768 if hardlink:
758 if hardlink:
769 try:
759 try:
770 oslink(src, dst)
760 oslink(src, dst)
771 except (IOError, OSError):
761 except (IOError, OSError):
772 hardlink = False
762 hardlink = False
773 shutil.copy(src, dst)
763 shutil.copy(src, dst)
774 else:
764 else:
775 shutil.copy(src, dst)
765 shutil.copy(src, dst)
776 num += 1
766 num += 1
777 progress(topic, num)
767 progress(topic, num)
778 progress(topic, None)
768 progress(topic, None)
779
769
780 return hardlink, num
770 return hardlink, num
781
771
782 _winreservednames = '''con prn aux nul
772 _winreservednames = '''con prn aux nul
783 com1 com2 com3 com4 com5 com6 com7 com8 com9
773 com1 com2 com3 com4 com5 com6 com7 com8 com9
784 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
774 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
785 _winreservedchars = ':*?"<>|'
775 _winreservedchars = ':*?"<>|'
786 def checkwinfilename(path):
776 def checkwinfilename(path):
787 r'''Check that the base-relative path is a valid filename on Windows.
777 r'''Check that the base-relative path is a valid filename on Windows.
788 Returns None if the path is ok, or a UI string describing the problem.
778 Returns None if the path is ok, or a UI string describing the problem.
789
779
790 >>> checkwinfilename("just/a/normal/path")
780 >>> checkwinfilename("just/a/normal/path")
791 >>> checkwinfilename("foo/bar/con.xml")
781 >>> checkwinfilename("foo/bar/con.xml")
792 "filename contains 'con', which is reserved on Windows"
782 "filename contains 'con', which is reserved on Windows"
793 >>> checkwinfilename("foo/con.xml/bar")
783 >>> checkwinfilename("foo/con.xml/bar")
794 "filename contains 'con', which is reserved on Windows"
784 "filename contains 'con', which is reserved on Windows"
795 >>> checkwinfilename("foo/bar/xml.con")
785 >>> checkwinfilename("foo/bar/xml.con")
796 >>> checkwinfilename("foo/bar/AUX/bla.txt")
786 >>> checkwinfilename("foo/bar/AUX/bla.txt")
797 "filename contains 'AUX', which is reserved on Windows"
787 "filename contains 'AUX', which is reserved on Windows"
798 >>> checkwinfilename("foo/bar/bla:.txt")
788 >>> checkwinfilename("foo/bar/bla:.txt")
799 "filename contains ':', which is reserved on Windows"
789 "filename contains ':', which is reserved on Windows"
800 >>> checkwinfilename("foo/bar/b\07la.txt")
790 >>> checkwinfilename("foo/bar/b\07la.txt")
801 "filename contains '\\x07', which is invalid on Windows"
791 "filename contains '\\x07', which is invalid on Windows"
802 >>> checkwinfilename("foo/bar/bla ")
792 >>> checkwinfilename("foo/bar/bla ")
803 "filename ends with ' ', which is not allowed on Windows"
793 "filename ends with ' ', which is not allowed on Windows"
804 >>> checkwinfilename("../bar")
794 >>> checkwinfilename("../bar")
805 >>> checkwinfilename("foo\\")
795 >>> checkwinfilename("foo\\")
806 "filename ends with '\\', which is invalid on Windows"
796 "filename ends with '\\', which is invalid on Windows"
807 >>> checkwinfilename("foo\\/bar")
797 >>> checkwinfilename("foo\\/bar")
808 "directory name ends with '\\', which is invalid on Windows"
798 "directory name ends with '\\', which is invalid on Windows"
809 '''
799 '''
810 if path.endswith('\\'):
800 if path.endswith('\\'):
811 return _("filename ends with '\\', which is invalid on Windows")
801 return _("filename ends with '\\', which is invalid on Windows")
812 if '\\/' in path:
802 if '\\/' in path:
813 return _("directory name ends with '\\', which is invalid on Windows")
803 return _("directory name ends with '\\', which is invalid on Windows")
814 for n in path.replace('\\', '/').split('/'):
804 for n in path.replace('\\', '/').split('/'):
815 if not n:
805 if not n:
816 continue
806 continue
817 for c in n:
807 for c in n:
818 if c in _winreservedchars:
808 if c in _winreservedchars:
819 return _("filename contains '%s', which is reserved "
809 return _("filename contains '%s', which is reserved "
820 "on Windows") % c
810 "on Windows") % c
821 if ord(c) <= 31:
811 if ord(c) <= 31:
822 return _("filename contains %r, which is invalid "
812 return _("filename contains %r, which is invalid "
823 "on Windows") % c
813 "on Windows") % c
824 base = n.split('.')[0]
814 base = n.split('.')[0]
825 if base and base.lower() in _winreservednames:
815 if base and base.lower() in _winreservednames:
826 return _("filename contains '%s', which is reserved "
816 return _("filename contains '%s', which is reserved "
827 "on Windows") % base
817 "on Windows") % base
828 t = n[-1]
818 t = n[-1]
829 if t in '. ' and n not in '..':
819 if t in '. ' and n not in '..':
830 return _("filename ends with '%s', which is not allowed "
820 return _("filename ends with '%s', which is not allowed "
831 "on Windows") % t
821 "on Windows") % t
832
822
833 if os.name == 'nt':
823 if os.name == 'nt':
834 checkosfilename = checkwinfilename
824 checkosfilename = checkwinfilename
835 else:
825 else:
836 checkosfilename = platform.checkosfilename
826 checkosfilename = platform.checkosfilename
837
827
838 def makelock(info, pathname):
828 def makelock(info, pathname):
839 try:
829 try:
840 return os.symlink(info, pathname)
830 return os.symlink(info, pathname)
841 except OSError, why:
831 except OSError, why:
842 if why.errno == errno.EEXIST:
832 if why.errno == errno.EEXIST:
843 raise
833 raise
844 except AttributeError: # no symlink in os
834 except AttributeError: # no symlink in os
845 pass
835 pass
846
836
847 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
837 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
848 os.write(ld, info)
838 os.write(ld, info)
849 os.close(ld)
839 os.close(ld)
850
840
851 def readlock(pathname):
841 def readlock(pathname):
852 try:
842 try:
853 return os.readlink(pathname)
843 return os.readlink(pathname)
854 except OSError, why:
844 except OSError, why:
855 if why.errno not in (errno.EINVAL, errno.ENOSYS):
845 if why.errno not in (errno.EINVAL, errno.ENOSYS):
856 raise
846 raise
857 except AttributeError: # no symlink in os
847 except AttributeError: # no symlink in os
858 pass
848 pass
859 fp = posixfile(pathname)
849 fp = posixfile(pathname)
860 r = fp.read()
850 r = fp.read()
861 fp.close()
851 fp.close()
862 return r
852 return r
863
853
864 def fstat(fp):
854 def fstat(fp):
865 '''stat file object that may not have fileno method.'''
855 '''stat file object that may not have fileno method.'''
866 try:
856 try:
867 return os.fstat(fp.fileno())
857 return os.fstat(fp.fileno())
868 except AttributeError:
858 except AttributeError:
869 return os.stat(fp.name)
859 return os.stat(fp.name)
870
860
871 # File system features
861 # File system features
872
862
873 def checkcase(path):
863 def checkcase(path):
874 """
864 """
875 Return true if the given path is on a case-sensitive filesystem
865 Return true if the given path is on a case-sensitive filesystem
876
866
877 Requires a path (like /foo/.hg) ending with a foldable final
867 Requires a path (like /foo/.hg) ending with a foldable final
878 directory component.
868 directory component.
879 """
869 """
880 s1 = os.lstat(path)
870 s1 = os.lstat(path)
881 d, b = os.path.split(path)
871 d, b = os.path.split(path)
882 b2 = b.upper()
872 b2 = b.upper()
883 if b == b2:
873 if b == b2:
884 b2 = b.lower()
874 b2 = b.lower()
885 if b == b2:
875 if b == b2:
886 return True # no evidence against case sensitivity
876 return True # no evidence against case sensitivity
887 p2 = os.path.join(d, b2)
877 p2 = os.path.join(d, b2)
888 try:
878 try:
889 s2 = os.lstat(p2)
879 s2 = os.lstat(p2)
890 if s2 == s1:
880 if s2 == s1:
891 return False
881 return False
892 return True
882 return True
893 except OSError:
883 except OSError:
894 return True
884 return True
895
885
896 try:
886 try:
897 import re2
887 import re2
898 _re2 = None
888 _re2 = None
899 except ImportError:
889 except ImportError:
900 _re2 = False
890 _re2 = False
901
891
902 class _re(object):
892 class _re(object):
903 def _checkre2(self):
893 def _checkre2(self):
904 global _re2
894 global _re2
905 try:
895 try:
906 # check if match works, see issue3964
896 # check if match works, see issue3964
907 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
897 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
908 except ImportError:
898 except ImportError:
909 _re2 = False
899 _re2 = False
910
900
911 def compile(self, pat, flags=0):
901 def compile(self, pat, flags=0):
912 '''Compile a regular expression, using re2 if possible
902 '''Compile a regular expression, using re2 if possible
913
903
914 For best performance, use only re2-compatible regexp features. The
904 For best performance, use only re2-compatible regexp features. The
915 only flags from the re module that are re2-compatible are
905 only flags from the re module that are re2-compatible are
916 IGNORECASE and MULTILINE.'''
906 IGNORECASE and MULTILINE.'''
917 if _re2 is None:
907 if _re2 is None:
918 self._checkre2()
908 self._checkre2()
919 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
909 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
920 if flags & remod.IGNORECASE:
910 if flags & remod.IGNORECASE:
921 pat = '(?i)' + pat
911 pat = '(?i)' + pat
922 if flags & remod.MULTILINE:
912 if flags & remod.MULTILINE:
923 pat = '(?m)' + pat
913 pat = '(?m)' + pat
924 try:
914 try:
925 return re2.compile(pat)
915 return re2.compile(pat)
926 except re2.error:
916 except re2.error:
927 pass
917 pass
928 return remod.compile(pat, flags)
918 return remod.compile(pat, flags)
929
919
930 @propertycache
920 @propertycache
931 def escape(self):
921 def escape(self):
932 '''Return the version of escape corresponding to self.compile.
922 '''Return the version of escape corresponding to self.compile.
933
923
934 This is imperfect because whether re2 or re is used for a particular
924 This is imperfect because whether re2 or re is used for a particular
935 function depends on the flags, etc, but it's the best we can do.
925 function depends on the flags, etc, but it's the best we can do.
936 '''
926 '''
937 global _re2
927 global _re2
938 if _re2 is None:
928 if _re2 is None:
939 self._checkre2()
929 self._checkre2()
940 if _re2:
930 if _re2:
941 return re2.escape
931 return re2.escape
942 else:
932 else:
943 return remod.escape
933 return remod.escape
944
934
945 re = _re()
935 re = _re()
946
936
947 _fspathcache = {}
937 _fspathcache = {}
948 def fspath(name, root):
938 def fspath(name, root):
949 '''Get name in the case stored in the filesystem
939 '''Get name in the case stored in the filesystem
950
940
951 The name should be relative to root, and be normcase-ed for efficiency.
941 The name should be relative to root, and be normcase-ed for efficiency.
952
942
953 Note that this function is unnecessary, and should not be
943 Note that this function is unnecessary, and should not be
954 called, for case-sensitive filesystems (simply because it's expensive).
944 called, for case-sensitive filesystems (simply because it's expensive).
955
945
956 The root should be normcase-ed, too.
946 The root should be normcase-ed, too.
957 '''
947 '''
958 def _makefspathcacheentry(dir):
948 def _makefspathcacheentry(dir):
959 return dict((normcase(n), n) for n in os.listdir(dir))
949 return dict((normcase(n), n) for n in os.listdir(dir))
960
950
961 seps = os.sep
951 seps = os.sep
962 if os.altsep:
952 if os.altsep:
963 seps = seps + os.altsep
953 seps = seps + os.altsep
964 # Protect backslashes. This gets silly very quickly.
954 # Protect backslashes. This gets silly very quickly.
965 seps.replace('\\','\\\\')
955 seps.replace('\\','\\\\')
966 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
956 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
967 dir = os.path.normpath(root)
957 dir = os.path.normpath(root)
968 result = []
958 result = []
969 for part, sep in pattern.findall(name):
959 for part, sep in pattern.findall(name):
970 if sep:
960 if sep:
971 result.append(sep)
961 result.append(sep)
972 continue
962 continue
973
963
974 if dir not in _fspathcache:
964 if dir not in _fspathcache:
975 _fspathcache[dir] = _makefspathcacheentry(dir)
965 _fspathcache[dir] = _makefspathcacheentry(dir)
976 contents = _fspathcache[dir]
966 contents = _fspathcache[dir]
977
967
978 found = contents.get(part)
968 found = contents.get(part)
979 if not found:
969 if not found:
980 # retry "once per directory" per "dirstate.walk" which
970 # retry "once per directory" per "dirstate.walk" which
981 # may take place for each patches of "hg qpush", for example
971 # may take place for each patches of "hg qpush", for example
982 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
972 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
983 found = contents.get(part)
973 found = contents.get(part)
984
974
985 result.append(found or part)
975 result.append(found or part)
986 dir = os.path.join(dir, part)
976 dir = os.path.join(dir, part)
987
977
988 return ''.join(result)
978 return ''.join(result)
989
979
990 def checknlink(testfile):
980 def checknlink(testfile):
991 '''check whether hardlink count reporting works properly'''
981 '''check whether hardlink count reporting works properly'''
992
982
993 # testfile may be open, so we need a separate file for checking to
983 # testfile may be open, so we need a separate file for checking to
994 # work around issue2543 (or testfile may get lost on Samba shares)
984 # work around issue2543 (or testfile may get lost on Samba shares)
995 f1 = testfile + ".hgtmp1"
985 f1 = testfile + ".hgtmp1"
996 if os.path.lexists(f1):
986 if os.path.lexists(f1):
997 return False
987 return False
998 try:
988 try:
999 posixfile(f1, 'w').close()
989 posixfile(f1, 'w').close()
1000 except IOError:
990 except IOError:
1001 return False
991 return False
1002
992
1003 f2 = testfile + ".hgtmp2"
993 f2 = testfile + ".hgtmp2"
1004 fd = None
994 fd = None
1005 try:
995 try:
1006 oslink(f1, f2)
996 oslink(f1, f2)
1007 # nlinks() may behave differently for files on Windows shares if
997 # nlinks() may behave differently for files on Windows shares if
1008 # the file is open.
998 # the file is open.
1009 fd = posixfile(f2)
999 fd = posixfile(f2)
1010 return nlinks(f2) > 1
1000 return nlinks(f2) > 1
1011 except OSError:
1001 except OSError:
1012 return False
1002 return False
1013 finally:
1003 finally:
1014 if fd is not None:
1004 if fd is not None:
1015 fd.close()
1005 fd.close()
1016 for f in (f1, f2):
1006 for f in (f1, f2):
1017 try:
1007 try:
1018 os.unlink(f)
1008 os.unlink(f)
1019 except OSError:
1009 except OSError:
1020 pass
1010 pass
1021
1011
1022 def endswithsep(path):
1012 def endswithsep(path):
1023 '''Check path ends with os.sep or os.altsep.'''
1013 '''Check path ends with os.sep or os.altsep.'''
1024 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1014 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1025
1015
1026 def splitpath(path):
1016 def splitpath(path):
1027 '''Split path by os.sep.
1017 '''Split path by os.sep.
1028 Note that this function does not use os.altsep because this is
1018 Note that this function does not use os.altsep because this is
1029 an alternative of simple "xxx.split(os.sep)".
1019 an alternative of simple "xxx.split(os.sep)".
1030 It is recommended to use os.path.normpath() before using this
1020 It is recommended to use os.path.normpath() before using this
1031 function if need.'''
1021 function if need.'''
1032 return path.split(os.sep)
1022 return path.split(os.sep)
1033
1023
1034 def gui():
1024 def gui():
1035 '''Are we running in a GUI?'''
1025 '''Are we running in a GUI?'''
1036 if sys.platform == 'darwin':
1026 if sys.platform == 'darwin':
1037 if 'SSH_CONNECTION' in os.environ:
1027 if 'SSH_CONNECTION' in os.environ:
1038 # handle SSH access to a box where the user is logged in
1028 # handle SSH access to a box where the user is logged in
1039 return False
1029 return False
1040 elif getattr(osutil, 'isgui', None):
1030 elif getattr(osutil, 'isgui', None):
1041 # check if a CoreGraphics session is available
1031 # check if a CoreGraphics session is available
1042 return osutil.isgui()
1032 return osutil.isgui()
1043 else:
1033 else:
1044 # pure build; use a safe default
1034 # pure build; use a safe default
1045 return True
1035 return True
1046 else:
1036 else:
1047 return os.name == "nt" or os.environ.get("DISPLAY")
1037 return os.name == "nt" or os.environ.get("DISPLAY")
1048
1038
1049 def mktempcopy(name, emptyok=False, createmode=None):
1039 def mktempcopy(name, emptyok=False, createmode=None):
1050 """Create a temporary file with the same contents from name
1040 """Create a temporary file with the same contents from name
1051
1041
1052 The permission bits are copied from the original file.
1042 The permission bits are copied from the original file.
1053
1043
1054 If the temporary file is going to be truncated immediately, you
1044 If the temporary file is going to be truncated immediately, you
1055 can use emptyok=True as an optimization.
1045 can use emptyok=True as an optimization.
1056
1046
1057 Returns the name of the temporary file.
1047 Returns the name of the temporary file.
1058 """
1048 """
1059 d, fn = os.path.split(name)
1049 d, fn = os.path.split(name)
1060 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1050 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1061 os.close(fd)
1051 os.close(fd)
1062 # Temporary files are created with mode 0600, which is usually not
1052 # Temporary files are created with mode 0600, which is usually not
1063 # what we want. If the original file already exists, just copy
1053 # what we want. If the original file already exists, just copy
1064 # its mode. Otherwise, manually obey umask.
1054 # its mode. Otherwise, manually obey umask.
1065 copymode(name, temp, createmode)
1055 copymode(name, temp, createmode)
1066 if emptyok:
1056 if emptyok:
1067 return temp
1057 return temp
1068 try:
1058 try:
1069 try:
1059 try:
1070 ifp = posixfile(name, "rb")
1060 ifp = posixfile(name, "rb")
1071 except IOError, inst:
1061 except IOError, inst:
1072 if inst.errno == errno.ENOENT:
1062 if inst.errno == errno.ENOENT:
1073 return temp
1063 return temp
1074 if not getattr(inst, 'filename', None):
1064 if not getattr(inst, 'filename', None):
1075 inst.filename = name
1065 inst.filename = name
1076 raise
1066 raise
1077 ofp = posixfile(temp, "wb")
1067 ofp = posixfile(temp, "wb")
1078 for chunk in filechunkiter(ifp):
1068 for chunk in filechunkiter(ifp):
1079 ofp.write(chunk)
1069 ofp.write(chunk)
1080 ifp.close()
1070 ifp.close()
1081 ofp.close()
1071 ofp.close()
1082 except: # re-raises
1072 except: # re-raises
1083 try: os.unlink(temp)
1073 try: os.unlink(temp)
1084 except OSError: pass
1074 except OSError: pass
1085 raise
1075 raise
1086 return temp
1076 return temp
1087
1077
1088 class atomictempfile(object):
1078 class atomictempfile(object):
1089 '''writable file object that atomically updates a file
1079 '''writable file object that atomically updates a file
1090
1080
1091 All writes will go to a temporary copy of the original file. Call
1081 All writes will go to a temporary copy of the original file. Call
1092 close() when you are done writing, and atomictempfile will rename
1082 close() when you are done writing, and atomictempfile will rename
1093 the temporary copy to the original name, making the changes
1083 the temporary copy to the original name, making the changes
1094 visible. If the object is destroyed without being closed, all your
1084 visible. If the object is destroyed without being closed, all your
1095 writes are discarded.
1085 writes are discarded.
1096 '''
1086 '''
1097 def __init__(self, name, mode='w+b', createmode=None):
1087 def __init__(self, name, mode='w+b', createmode=None):
1098 self.__name = name # permanent name
1088 self.__name = name # permanent name
1099 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1089 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1100 createmode=createmode)
1090 createmode=createmode)
1101 self._fp = posixfile(self._tempname, mode)
1091 self._fp = posixfile(self._tempname, mode)
1102
1092
1103 # delegated methods
1093 # delegated methods
1104 self.write = self._fp.write
1094 self.write = self._fp.write
1105 self.seek = self._fp.seek
1095 self.seek = self._fp.seek
1106 self.tell = self._fp.tell
1096 self.tell = self._fp.tell
1107 self.fileno = self._fp.fileno
1097 self.fileno = self._fp.fileno
1108
1098
1109 def close(self):
1099 def close(self):
1110 if not self._fp.closed:
1100 if not self._fp.closed:
1111 self._fp.close()
1101 self._fp.close()
1112 rename(self._tempname, localpath(self.__name))
1102 rename(self._tempname, localpath(self.__name))
1113
1103
1114 def discard(self):
1104 def discard(self):
1115 if not self._fp.closed:
1105 if not self._fp.closed:
1116 try:
1106 try:
1117 os.unlink(self._tempname)
1107 os.unlink(self._tempname)
1118 except OSError:
1108 except OSError:
1119 pass
1109 pass
1120 self._fp.close()
1110 self._fp.close()
1121
1111
1122 def __del__(self):
1112 def __del__(self):
1123 if safehasattr(self, '_fp'): # constructor actually did something
1113 if safehasattr(self, '_fp'): # constructor actually did something
1124 self.discard()
1114 self.discard()
1125
1115
1126 def makedirs(name, mode=None, notindexed=False):
1116 def makedirs(name, mode=None, notindexed=False):
1127 """recursive directory creation with parent mode inheritance"""
1117 """recursive directory creation with parent mode inheritance"""
1128 try:
1118 try:
1129 makedir(name, notindexed)
1119 makedir(name, notindexed)
1130 except OSError, err:
1120 except OSError, err:
1131 if err.errno == errno.EEXIST:
1121 if err.errno == errno.EEXIST:
1132 return
1122 return
1133 if err.errno != errno.ENOENT or not name:
1123 if err.errno != errno.ENOENT or not name:
1134 raise
1124 raise
1135 parent = os.path.dirname(os.path.abspath(name))
1125 parent = os.path.dirname(os.path.abspath(name))
1136 if parent == name:
1126 if parent == name:
1137 raise
1127 raise
1138 makedirs(parent, mode, notindexed)
1128 makedirs(parent, mode, notindexed)
1139 makedir(name, notindexed)
1129 makedir(name, notindexed)
1140 if mode is not None:
1130 if mode is not None:
1141 os.chmod(name, mode)
1131 os.chmod(name, mode)
1142
1132
1143 def ensuredirs(name, mode=None, notindexed=False):
1133 def ensuredirs(name, mode=None, notindexed=False):
1144 """race-safe recursive directory creation
1134 """race-safe recursive directory creation
1145
1135
1146 Newly created directories are marked as "not to be indexed by
1136 Newly created directories are marked as "not to be indexed by
1147 the content indexing service", if ``notindexed`` is specified
1137 the content indexing service", if ``notindexed`` is specified
1148 for "write" mode access.
1138 for "write" mode access.
1149 """
1139 """
1150 if os.path.isdir(name):
1140 if os.path.isdir(name):
1151 return
1141 return
1152 parent = os.path.dirname(os.path.abspath(name))
1142 parent = os.path.dirname(os.path.abspath(name))
1153 if parent != name:
1143 if parent != name:
1154 ensuredirs(parent, mode, notindexed)
1144 ensuredirs(parent, mode, notindexed)
1155 try:
1145 try:
1156 makedir(name, notindexed)
1146 makedir(name, notindexed)
1157 except OSError, err:
1147 except OSError, err:
1158 if err.errno == errno.EEXIST and os.path.isdir(name):
1148 if err.errno == errno.EEXIST and os.path.isdir(name):
1159 # someone else seems to have won a directory creation race
1149 # someone else seems to have won a directory creation race
1160 return
1150 return
1161 raise
1151 raise
1162 if mode is not None:
1152 if mode is not None:
1163 os.chmod(name, mode)
1153 os.chmod(name, mode)
1164
1154
1165 def readfile(path):
1155 def readfile(path):
1166 fp = open(path, 'rb')
1156 fp = open(path, 'rb')
1167 try:
1157 try:
1168 return fp.read()
1158 return fp.read()
1169 finally:
1159 finally:
1170 fp.close()
1160 fp.close()
1171
1161
1172 def writefile(path, text):
1162 def writefile(path, text):
1173 fp = open(path, 'wb')
1163 fp = open(path, 'wb')
1174 try:
1164 try:
1175 fp.write(text)
1165 fp.write(text)
1176 finally:
1166 finally:
1177 fp.close()
1167 fp.close()
1178
1168
1179 def appendfile(path, text):
1169 def appendfile(path, text):
1180 fp = open(path, 'ab')
1170 fp = open(path, 'ab')
1181 try:
1171 try:
1182 fp.write(text)
1172 fp.write(text)
1183 finally:
1173 finally:
1184 fp.close()
1174 fp.close()
1185
1175
1186 class chunkbuffer(object):
1176 class chunkbuffer(object):
1187 """Allow arbitrary sized chunks of data to be efficiently read from an
1177 """Allow arbitrary sized chunks of data to be efficiently read from an
1188 iterator over chunks of arbitrary size."""
1178 iterator over chunks of arbitrary size."""
1189
1179
1190 def __init__(self, in_iter):
1180 def __init__(self, in_iter):
1191 """in_iter is the iterator that's iterating over the input chunks.
1181 """in_iter is the iterator that's iterating over the input chunks.
1192 targetsize is how big a buffer to try to maintain."""
1182 targetsize is how big a buffer to try to maintain."""
1193 def splitbig(chunks):
1183 def splitbig(chunks):
1194 for chunk in chunks:
1184 for chunk in chunks:
1195 if len(chunk) > 2**20:
1185 if len(chunk) > 2**20:
1196 pos = 0
1186 pos = 0
1197 while pos < len(chunk):
1187 while pos < len(chunk):
1198 end = pos + 2 ** 18
1188 end = pos + 2 ** 18
1199 yield chunk[pos:end]
1189 yield chunk[pos:end]
1200 pos = end
1190 pos = end
1201 else:
1191 else:
1202 yield chunk
1192 yield chunk
1203 self.iter = splitbig(in_iter)
1193 self.iter = splitbig(in_iter)
1204 self._queue = deque()
1194 self._queue = deque()
1205
1195
1206 def read(self, l=None):
1196 def read(self, l=None):
1207 """Read L bytes of data from the iterator of chunks of data.
1197 """Read L bytes of data from the iterator of chunks of data.
1208 Returns less than L bytes if the iterator runs dry.
1198 Returns less than L bytes if the iterator runs dry.
1209
1199
1210 If size parameter is omitted, read everything"""
1200 If size parameter is omitted, read everything"""
1211 left = l
1201 left = l
1212 buf = []
1202 buf = []
1213 queue = self._queue
1203 queue = self._queue
1214 while left is None or left > 0:
1204 while left is None or left > 0:
1215 # refill the queue
1205 # refill the queue
1216 if not queue:
1206 if not queue:
1217 target = 2**18
1207 target = 2**18
1218 for chunk in self.iter:
1208 for chunk in self.iter:
1219 queue.append(chunk)
1209 queue.append(chunk)
1220 target -= len(chunk)
1210 target -= len(chunk)
1221 if target <= 0:
1211 if target <= 0:
1222 break
1212 break
1223 if not queue:
1213 if not queue:
1224 break
1214 break
1225
1215
1226 chunk = queue.popleft()
1216 chunk = queue.popleft()
1227 if left is not None:
1217 if left is not None:
1228 left -= len(chunk)
1218 left -= len(chunk)
1229 if left is not None and left < 0:
1219 if left is not None and left < 0:
1230 queue.appendleft(chunk[left:])
1220 queue.appendleft(chunk[left:])
1231 buf.append(chunk[:left])
1221 buf.append(chunk[:left])
1232 else:
1222 else:
1233 buf.append(chunk)
1223 buf.append(chunk)
1234
1224
1235 return ''.join(buf)
1225 return ''.join(buf)
1236
1226
1237 def filechunkiter(f, size=65536, limit=None):
1227 def filechunkiter(f, size=65536, limit=None):
1238 """Create a generator that produces the data in the file size
1228 """Create a generator that produces the data in the file size
1239 (default 65536) bytes at a time, up to optional limit (default is
1229 (default 65536) bytes at a time, up to optional limit (default is
1240 to read all data). Chunks may be less than size bytes if the
1230 to read all data). Chunks may be less than size bytes if the
1241 chunk is the last chunk in the file, or the file is a socket or
1231 chunk is the last chunk in the file, or the file is a socket or
1242 some other type of file that sometimes reads less data than is
1232 some other type of file that sometimes reads less data than is
1243 requested."""
1233 requested."""
1244 assert size >= 0
1234 assert size >= 0
1245 assert limit is None or limit >= 0
1235 assert limit is None or limit >= 0
1246 while True:
1236 while True:
1247 if limit is None:
1237 if limit is None:
1248 nbytes = size
1238 nbytes = size
1249 else:
1239 else:
1250 nbytes = min(limit, size)
1240 nbytes = min(limit, size)
1251 s = nbytes and f.read(nbytes)
1241 s = nbytes and f.read(nbytes)
1252 if not s:
1242 if not s:
1253 break
1243 break
1254 if limit:
1244 if limit:
1255 limit -= len(s)
1245 limit -= len(s)
1256 yield s
1246 yield s
1257
1247
1258 def makedate(timestamp=None):
1248 def makedate(timestamp=None):
1259 '''Return a unix timestamp (or the current time) as a (unixtime,
1249 '''Return a unix timestamp (or the current time) as a (unixtime,
1260 offset) tuple based off the local timezone.'''
1250 offset) tuple based off the local timezone.'''
1261 if timestamp is None:
1251 if timestamp is None:
1262 timestamp = time.time()
1252 timestamp = time.time()
1263 if timestamp < 0:
1253 if timestamp < 0:
1264 hint = _("check your clock")
1254 hint = _("check your clock")
1265 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1255 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1266 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1256 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1267 datetime.datetime.fromtimestamp(timestamp))
1257 datetime.datetime.fromtimestamp(timestamp))
1268 tz = delta.days * 86400 + delta.seconds
1258 tz = delta.days * 86400 + delta.seconds
1269 return timestamp, tz
1259 return timestamp, tz
1270
1260
1271 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1261 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1272 """represent a (unixtime, offset) tuple as a localized time.
1262 """represent a (unixtime, offset) tuple as a localized time.
1273 unixtime is seconds since the epoch, and offset is the time zone's
1263 unixtime is seconds since the epoch, and offset is the time zone's
1274 number of seconds away from UTC. if timezone is false, do not
1264 number of seconds away from UTC. if timezone is false, do not
1275 append time zone to string."""
1265 append time zone to string."""
1276 t, tz = date or makedate()
1266 t, tz = date or makedate()
1277 if t < 0:
1267 if t < 0:
1278 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1268 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1279 tz = 0
1269 tz = 0
1280 if "%1" in format or "%2" in format or "%z" in format:
1270 if "%1" in format or "%2" in format or "%z" in format:
1281 sign = (tz > 0) and "-" or "+"
1271 sign = (tz > 0) and "-" or "+"
1282 minutes = abs(tz) // 60
1272 minutes = abs(tz) // 60
1283 format = format.replace("%z", "%1%2")
1273 format = format.replace("%z", "%1%2")
1284 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1274 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1285 format = format.replace("%2", "%02d" % (minutes % 60))
1275 format = format.replace("%2", "%02d" % (minutes % 60))
1286 try:
1276 try:
1287 t = time.gmtime(float(t) - tz)
1277 t = time.gmtime(float(t) - tz)
1288 except ValueError:
1278 except ValueError:
1289 # time was out of range
1279 # time was out of range
1290 t = time.gmtime(sys.maxint)
1280 t = time.gmtime(sys.maxint)
1291 s = time.strftime(format, t)
1281 s = time.strftime(format, t)
1292 return s
1282 return s
1293
1283
1294 def shortdate(date=None):
1284 def shortdate(date=None):
1295 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1285 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1296 return datestr(date, format='%Y-%m-%d')
1286 return datestr(date, format='%Y-%m-%d')
1297
1287
1298 def strdate(string, format, defaults=[]):
1288 def strdate(string, format, defaults=[]):
1299 """parse a localized time string and return a (unixtime, offset) tuple.
1289 """parse a localized time string and return a (unixtime, offset) tuple.
1300 if the string cannot be parsed, ValueError is raised."""
1290 if the string cannot be parsed, ValueError is raised."""
1301 def timezone(string):
1291 def timezone(string):
1302 tz = string.split()[-1]
1292 tz = string.split()[-1]
1303 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1293 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1304 sign = (tz[0] == "+") and 1 or -1
1294 sign = (tz[0] == "+") and 1 or -1
1305 hours = int(tz[1:3])
1295 hours = int(tz[1:3])
1306 minutes = int(tz[3:5])
1296 minutes = int(tz[3:5])
1307 return -sign * (hours * 60 + minutes) * 60
1297 return -sign * (hours * 60 + minutes) * 60
1308 if tz == "GMT" or tz == "UTC":
1298 if tz == "GMT" or tz == "UTC":
1309 return 0
1299 return 0
1310 return None
1300 return None
1311
1301
1312 # NOTE: unixtime = localunixtime + offset
1302 # NOTE: unixtime = localunixtime + offset
1313 offset, date = timezone(string), string
1303 offset, date = timezone(string), string
1314 if offset is not None:
1304 if offset is not None:
1315 date = " ".join(string.split()[:-1])
1305 date = " ".join(string.split()[:-1])
1316
1306
1317 # add missing elements from defaults
1307 # add missing elements from defaults
1318 usenow = False # default to using biased defaults
1308 usenow = False # default to using biased defaults
1319 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1309 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1320 found = [True for p in part if ("%"+p) in format]
1310 found = [True for p in part if ("%"+p) in format]
1321 if not found:
1311 if not found:
1322 date += "@" + defaults[part][usenow]
1312 date += "@" + defaults[part][usenow]
1323 format += "@%" + part[0]
1313 format += "@%" + part[0]
1324 else:
1314 else:
1325 # We've found a specific time element, less specific time
1315 # We've found a specific time element, less specific time
1326 # elements are relative to today
1316 # elements are relative to today
1327 usenow = True
1317 usenow = True
1328
1318
1329 timetuple = time.strptime(date, format)
1319 timetuple = time.strptime(date, format)
1330 localunixtime = int(calendar.timegm(timetuple))
1320 localunixtime = int(calendar.timegm(timetuple))
1331 if offset is None:
1321 if offset is None:
1332 # local timezone
1322 # local timezone
1333 unixtime = int(time.mktime(timetuple))
1323 unixtime = int(time.mktime(timetuple))
1334 offset = unixtime - localunixtime
1324 offset = unixtime - localunixtime
1335 else:
1325 else:
1336 unixtime = localunixtime + offset
1326 unixtime = localunixtime + offset
1337 return unixtime, offset
1327 return unixtime, offset
1338
1328
1339 def parsedate(date, formats=None, bias={}):
1329 def parsedate(date, formats=None, bias={}):
1340 """parse a localized date/time and return a (unixtime, offset) tuple.
1330 """parse a localized date/time and return a (unixtime, offset) tuple.
1341
1331
1342 The date may be a "unixtime offset" string or in one of the specified
1332 The date may be a "unixtime offset" string or in one of the specified
1343 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1333 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1344
1334
1345 >>> parsedate(' today ') == parsedate(\
1335 >>> parsedate(' today ') == parsedate(\
1346 datetime.date.today().strftime('%b %d'))
1336 datetime.date.today().strftime('%b %d'))
1347 True
1337 True
1348 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1338 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1349 datetime.timedelta(days=1)\
1339 datetime.timedelta(days=1)\
1350 ).strftime('%b %d'))
1340 ).strftime('%b %d'))
1351 True
1341 True
1352 >>> now, tz = makedate()
1342 >>> now, tz = makedate()
1353 >>> strnow, strtz = parsedate('now')
1343 >>> strnow, strtz = parsedate('now')
1354 >>> (strnow - now) < 1
1344 >>> (strnow - now) < 1
1355 True
1345 True
1356 >>> tz == strtz
1346 >>> tz == strtz
1357 True
1347 True
1358 """
1348 """
1359 if not date:
1349 if not date:
1360 return 0, 0
1350 return 0, 0
1361 if isinstance(date, tuple) and len(date) == 2:
1351 if isinstance(date, tuple) and len(date) == 2:
1362 return date
1352 return date
1363 if not formats:
1353 if not formats:
1364 formats = defaultdateformats
1354 formats = defaultdateformats
1365 date = date.strip()
1355 date = date.strip()
1366
1356
1367 if date == 'now' or date == _('now'):
1357 if date == 'now' or date == _('now'):
1368 return makedate()
1358 return makedate()
1369 if date == 'today' or date == _('today'):
1359 if date == 'today' or date == _('today'):
1370 date = datetime.date.today().strftime('%b %d')
1360 date = datetime.date.today().strftime('%b %d')
1371 elif date == 'yesterday' or date == _('yesterday'):
1361 elif date == 'yesterday' or date == _('yesterday'):
1372 date = (datetime.date.today() -
1362 date = (datetime.date.today() -
1373 datetime.timedelta(days=1)).strftime('%b %d')
1363 datetime.timedelta(days=1)).strftime('%b %d')
1374
1364
1375 try:
1365 try:
1376 when, offset = map(int, date.split(' '))
1366 when, offset = map(int, date.split(' '))
1377 except ValueError:
1367 except ValueError:
1378 # fill out defaults
1368 # fill out defaults
1379 now = makedate()
1369 now = makedate()
1380 defaults = {}
1370 defaults = {}
1381 for part in ("d", "mb", "yY", "HI", "M", "S"):
1371 for part in ("d", "mb", "yY", "HI", "M", "S"):
1382 # this piece is for rounding the specific end of unknowns
1372 # this piece is for rounding the specific end of unknowns
1383 b = bias.get(part)
1373 b = bias.get(part)
1384 if b is None:
1374 if b is None:
1385 if part[0] in "HMS":
1375 if part[0] in "HMS":
1386 b = "00"
1376 b = "00"
1387 else:
1377 else:
1388 b = "0"
1378 b = "0"
1389
1379
1390 # this piece is for matching the generic end to today's date
1380 # this piece is for matching the generic end to today's date
1391 n = datestr(now, "%" + part[0])
1381 n = datestr(now, "%" + part[0])
1392
1382
1393 defaults[part] = (b, n)
1383 defaults[part] = (b, n)
1394
1384
1395 for format in formats:
1385 for format in formats:
1396 try:
1386 try:
1397 when, offset = strdate(date, format, defaults)
1387 when, offset = strdate(date, format, defaults)
1398 except (ValueError, OverflowError):
1388 except (ValueError, OverflowError):
1399 pass
1389 pass
1400 else:
1390 else:
1401 break
1391 break
1402 else:
1392 else:
1403 raise Abort(_('invalid date: %r') % date)
1393 raise Abort(_('invalid date: %r') % date)
1404 # validate explicit (probably user-specified) date and
1394 # validate explicit (probably user-specified) date and
1405 # time zone offset. values must fit in signed 32 bits for
1395 # time zone offset. values must fit in signed 32 bits for
1406 # current 32-bit linux runtimes. timezones go from UTC-12
1396 # current 32-bit linux runtimes. timezones go from UTC-12
1407 # to UTC+14
1397 # to UTC+14
1408 if abs(when) > 0x7fffffff:
1398 if abs(when) > 0x7fffffff:
1409 raise Abort(_('date exceeds 32 bits: %d') % when)
1399 raise Abort(_('date exceeds 32 bits: %d') % when)
1410 if when < 0:
1400 if when < 0:
1411 raise Abort(_('negative date value: %d') % when)
1401 raise Abort(_('negative date value: %d') % when)
1412 if offset < -50400 or offset > 43200:
1402 if offset < -50400 or offset > 43200:
1413 raise Abort(_('impossible time zone offset: %d') % offset)
1403 raise Abort(_('impossible time zone offset: %d') % offset)
1414 return when, offset
1404 return when, offset
1415
1405
1416 def matchdate(date):
1406 def matchdate(date):
1417 """Return a function that matches a given date match specifier
1407 """Return a function that matches a given date match specifier
1418
1408
1419 Formats include:
1409 Formats include:
1420
1410
1421 '{date}' match a given date to the accuracy provided
1411 '{date}' match a given date to the accuracy provided
1422
1412
1423 '<{date}' on or before a given date
1413 '<{date}' on or before a given date
1424
1414
1425 '>{date}' on or after a given date
1415 '>{date}' on or after a given date
1426
1416
1427 >>> p1 = parsedate("10:29:59")
1417 >>> p1 = parsedate("10:29:59")
1428 >>> p2 = parsedate("10:30:00")
1418 >>> p2 = parsedate("10:30:00")
1429 >>> p3 = parsedate("10:30:59")
1419 >>> p3 = parsedate("10:30:59")
1430 >>> p4 = parsedate("10:31:00")
1420 >>> p4 = parsedate("10:31:00")
1431 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1421 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1432 >>> f = matchdate("10:30")
1422 >>> f = matchdate("10:30")
1433 >>> f(p1[0])
1423 >>> f(p1[0])
1434 False
1424 False
1435 >>> f(p2[0])
1425 >>> f(p2[0])
1436 True
1426 True
1437 >>> f(p3[0])
1427 >>> f(p3[0])
1438 True
1428 True
1439 >>> f(p4[0])
1429 >>> f(p4[0])
1440 False
1430 False
1441 >>> f(p5[0])
1431 >>> f(p5[0])
1442 False
1432 False
1443 """
1433 """
1444
1434
1445 def lower(date):
1435 def lower(date):
1446 d = {'mb': "1", 'd': "1"}
1436 d = {'mb': "1", 'd': "1"}
1447 return parsedate(date, extendeddateformats, d)[0]
1437 return parsedate(date, extendeddateformats, d)[0]
1448
1438
1449 def upper(date):
1439 def upper(date):
1450 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1440 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1451 for days in ("31", "30", "29"):
1441 for days in ("31", "30", "29"):
1452 try:
1442 try:
1453 d["d"] = days
1443 d["d"] = days
1454 return parsedate(date, extendeddateformats, d)[0]
1444 return parsedate(date, extendeddateformats, d)[0]
1455 except Abort:
1445 except Abort:
1456 pass
1446 pass
1457 d["d"] = "28"
1447 d["d"] = "28"
1458 return parsedate(date, extendeddateformats, d)[0]
1448 return parsedate(date, extendeddateformats, d)[0]
1459
1449
1460 date = date.strip()
1450 date = date.strip()
1461
1451
1462 if not date:
1452 if not date:
1463 raise Abort(_("dates cannot consist entirely of whitespace"))
1453 raise Abort(_("dates cannot consist entirely of whitespace"))
1464 elif date[0] == "<":
1454 elif date[0] == "<":
1465 if not date[1:]:
1455 if not date[1:]:
1466 raise Abort(_("invalid day spec, use '<DATE'"))
1456 raise Abort(_("invalid day spec, use '<DATE'"))
1467 when = upper(date[1:])
1457 when = upper(date[1:])
1468 return lambda x: x <= when
1458 return lambda x: x <= when
1469 elif date[0] == ">":
1459 elif date[0] == ">":
1470 if not date[1:]:
1460 if not date[1:]:
1471 raise Abort(_("invalid day spec, use '>DATE'"))
1461 raise Abort(_("invalid day spec, use '>DATE'"))
1472 when = lower(date[1:])
1462 when = lower(date[1:])
1473 return lambda x: x >= when
1463 return lambda x: x >= when
1474 elif date[0] == "-":
1464 elif date[0] == "-":
1475 try:
1465 try:
1476 days = int(date[1:])
1466 days = int(date[1:])
1477 except ValueError:
1467 except ValueError:
1478 raise Abort(_("invalid day spec: %s") % date[1:])
1468 raise Abort(_("invalid day spec: %s") % date[1:])
1479 if days < 0:
1469 if days < 0:
1480 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1470 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1481 % date[1:])
1471 % date[1:])
1482 when = makedate()[0] - days * 3600 * 24
1472 when = makedate()[0] - days * 3600 * 24
1483 return lambda x: x >= when
1473 return lambda x: x >= when
1484 elif " to " in date:
1474 elif " to " in date:
1485 a, b = date.split(" to ")
1475 a, b = date.split(" to ")
1486 start, stop = lower(a), upper(b)
1476 start, stop = lower(a), upper(b)
1487 return lambda x: x >= start and x <= stop
1477 return lambda x: x >= start and x <= stop
1488 else:
1478 else:
1489 start, stop = lower(date), upper(date)
1479 start, stop = lower(date), upper(date)
1490 return lambda x: x >= start and x <= stop
1480 return lambda x: x >= start and x <= stop
1491
1481
1492 def shortuser(user):
1482 def shortuser(user):
1493 """Return a short representation of a user name or email address."""
1483 """Return a short representation of a user name or email address."""
1494 f = user.find('@')
1484 f = user.find('@')
1495 if f >= 0:
1485 if f >= 0:
1496 user = user[:f]
1486 user = user[:f]
1497 f = user.find('<')
1487 f = user.find('<')
1498 if f >= 0:
1488 if f >= 0:
1499 user = user[f + 1:]
1489 user = user[f + 1:]
1500 f = user.find(' ')
1490 f = user.find(' ')
1501 if f >= 0:
1491 if f >= 0:
1502 user = user[:f]
1492 user = user[:f]
1503 f = user.find('.')
1493 f = user.find('.')
1504 if f >= 0:
1494 if f >= 0:
1505 user = user[:f]
1495 user = user[:f]
1506 return user
1496 return user
1507
1497
1508 def emailuser(user):
1498 def emailuser(user):
1509 """Return the user portion of an email address."""
1499 """Return the user portion of an email address."""
1510 f = user.find('@')
1500 f = user.find('@')
1511 if f >= 0:
1501 if f >= 0:
1512 user = user[:f]
1502 user = user[:f]
1513 f = user.find('<')
1503 f = user.find('<')
1514 if f >= 0:
1504 if f >= 0:
1515 user = user[f + 1:]
1505 user = user[f + 1:]
1516 return user
1506 return user
1517
1507
1518 def email(author):
1508 def email(author):
1519 '''get email of author.'''
1509 '''get email of author.'''
1520 r = author.find('>')
1510 r = author.find('>')
1521 if r == -1:
1511 if r == -1:
1522 r = None
1512 r = None
1523 return author[author.find('<') + 1:r]
1513 return author[author.find('<') + 1:r]
1524
1514
1525 def ellipsis(text, maxlength=400):
1515 def ellipsis(text, maxlength=400):
1526 """Trim string to at most maxlength (default: 400) columns in display."""
1516 """Trim string to at most maxlength (default: 400) columns in display."""
1527 return encoding.trim(text, maxlength, ellipsis='...')
1517 return encoding.trim(text, maxlength, ellipsis='...')
1528
1518
1529 def unitcountfn(*unittable):
1519 def unitcountfn(*unittable):
1530 '''return a function that renders a readable count of some quantity'''
1520 '''return a function that renders a readable count of some quantity'''
1531
1521
1532 def go(count):
1522 def go(count):
1533 for multiplier, divisor, format in unittable:
1523 for multiplier, divisor, format in unittable:
1534 if count >= divisor * multiplier:
1524 if count >= divisor * multiplier:
1535 return format % (count / float(divisor))
1525 return format % (count / float(divisor))
1536 return unittable[-1][2] % count
1526 return unittable[-1][2] % count
1537
1527
1538 return go
1528 return go
1539
1529
1540 bytecount = unitcountfn(
1530 bytecount = unitcountfn(
1541 (100, 1 << 30, _('%.0f GB')),
1531 (100, 1 << 30, _('%.0f GB')),
1542 (10, 1 << 30, _('%.1f GB')),
1532 (10, 1 << 30, _('%.1f GB')),
1543 (1, 1 << 30, _('%.2f GB')),
1533 (1, 1 << 30, _('%.2f GB')),
1544 (100, 1 << 20, _('%.0f MB')),
1534 (100, 1 << 20, _('%.0f MB')),
1545 (10, 1 << 20, _('%.1f MB')),
1535 (10, 1 << 20, _('%.1f MB')),
1546 (1, 1 << 20, _('%.2f MB')),
1536 (1, 1 << 20, _('%.2f MB')),
1547 (100, 1 << 10, _('%.0f KB')),
1537 (100, 1 << 10, _('%.0f KB')),
1548 (10, 1 << 10, _('%.1f KB')),
1538 (10, 1 << 10, _('%.1f KB')),
1549 (1, 1 << 10, _('%.2f KB')),
1539 (1, 1 << 10, _('%.2f KB')),
1550 (1, 1, _('%.0f bytes')),
1540 (1, 1, _('%.0f bytes')),
1551 )
1541 )
1552
1542
1553 def uirepr(s):
1543 def uirepr(s):
1554 # Avoid double backslash in Windows path repr()
1544 # Avoid double backslash in Windows path repr()
1555 return repr(s).replace('\\\\', '\\')
1545 return repr(s).replace('\\\\', '\\')
1556
1546
1557 # delay import of textwrap
1547 # delay import of textwrap
1558 def MBTextWrapper(**kwargs):
1548 def MBTextWrapper(**kwargs):
1559 class tw(textwrap.TextWrapper):
1549 class tw(textwrap.TextWrapper):
1560 """
1550 """
1561 Extend TextWrapper for width-awareness.
1551 Extend TextWrapper for width-awareness.
1562
1552
1563 Neither number of 'bytes' in any encoding nor 'characters' is
1553 Neither number of 'bytes' in any encoding nor 'characters' is
1564 appropriate to calculate terminal columns for specified string.
1554 appropriate to calculate terminal columns for specified string.
1565
1555
1566 Original TextWrapper implementation uses built-in 'len()' directly,
1556 Original TextWrapper implementation uses built-in 'len()' directly,
1567 so overriding is needed to use width information of each characters.
1557 so overriding is needed to use width information of each characters.
1568
1558
1569 In addition, characters classified into 'ambiguous' width are
1559 In addition, characters classified into 'ambiguous' width are
1570 treated as wide in East Asian area, but as narrow in other.
1560 treated as wide in East Asian area, but as narrow in other.
1571
1561
1572 This requires use decision to determine width of such characters.
1562 This requires use decision to determine width of such characters.
1573 """
1563 """
1574 def __init__(self, **kwargs):
1564 def __init__(self, **kwargs):
1575 textwrap.TextWrapper.__init__(self, **kwargs)
1565 textwrap.TextWrapper.__init__(self, **kwargs)
1576
1566
1577 # for compatibility between 2.4 and 2.6
1567 # for compatibility between 2.4 and 2.6
1578 if getattr(self, 'drop_whitespace', None) is None:
1568 if getattr(self, 'drop_whitespace', None) is None:
1579 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1569 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1580
1570
1581 def _cutdown(self, ucstr, space_left):
1571 def _cutdown(self, ucstr, space_left):
1582 l = 0
1572 l = 0
1583 colwidth = encoding.ucolwidth
1573 colwidth = encoding.ucolwidth
1584 for i in xrange(len(ucstr)):
1574 for i in xrange(len(ucstr)):
1585 l += colwidth(ucstr[i])
1575 l += colwidth(ucstr[i])
1586 if space_left < l:
1576 if space_left < l:
1587 return (ucstr[:i], ucstr[i:])
1577 return (ucstr[:i], ucstr[i:])
1588 return ucstr, ''
1578 return ucstr, ''
1589
1579
1590 # overriding of base class
1580 # overriding of base class
1591 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1581 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1592 space_left = max(width - cur_len, 1)
1582 space_left = max(width - cur_len, 1)
1593
1583
1594 if self.break_long_words:
1584 if self.break_long_words:
1595 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1585 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1596 cur_line.append(cut)
1586 cur_line.append(cut)
1597 reversed_chunks[-1] = res
1587 reversed_chunks[-1] = res
1598 elif not cur_line:
1588 elif not cur_line:
1599 cur_line.append(reversed_chunks.pop())
1589 cur_line.append(reversed_chunks.pop())
1600
1590
1601 # this overriding code is imported from TextWrapper of python 2.6
1591 # this overriding code is imported from TextWrapper of python 2.6
1602 # to calculate columns of string by 'encoding.ucolwidth()'
1592 # to calculate columns of string by 'encoding.ucolwidth()'
1603 def _wrap_chunks(self, chunks):
1593 def _wrap_chunks(self, chunks):
1604 colwidth = encoding.ucolwidth
1594 colwidth = encoding.ucolwidth
1605
1595
1606 lines = []
1596 lines = []
1607 if self.width <= 0:
1597 if self.width <= 0:
1608 raise ValueError("invalid width %r (must be > 0)" % self.width)
1598 raise ValueError("invalid width %r (must be > 0)" % self.width)
1609
1599
1610 # Arrange in reverse order so items can be efficiently popped
1600 # Arrange in reverse order so items can be efficiently popped
1611 # from a stack of chucks.
1601 # from a stack of chucks.
1612 chunks.reverse()
1602 chunks.reverse()
1613
1603
1614 while chunks:
1604 while chunks:
1615
1605
1616 # Start the list of chunks that will make up the current line.
1606 # Start the list of chunks that will make up the current line.
1617 # cur_len is just the length of all the chunks in cur_line.
1607 # cur_len is just the length of all the chunks in cur_line.
1618 cur_line = []
1608 cur_line = []
1619 cur_len = 0
1609 cur_len = 0
1620
1610
1621 # Figure out which static string will prefix this line.
1611 # Figure out which static string will prefix this line.
1622 if lines:
1612 if lines:
1623 indent = self.subsequent_indent
1613 indent = self.subsequent_indent
1624 else:
1614 else:
1625 indent = self.initial_indent
1615 indent = self.initial_indent
1626
1616
1627 # Maximum width for this line.
1617 # Maximum width for this line.
1628 width = self.width - len(indent)
1618 width = self.width - len(indent)
1629
1619
1630 # First chunk on line is whitespace -- drop it, unless this
1620 # First chunk on line is whitespace -- drop it, unless this
1631 # is the very beginning of the text (i.e. no lines started yet).
1621 # is the very beginning of the text (i.e. no lines started yet).
1632 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1622 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1633 del chunks[-1]
1623 del chunks[-1]
1634
1624
1635 while chunks:
1625 while chunks:
1636 l = colwidth(chunks[-1])
1626 l = colwidth(chunks[-1])
1637
1627
1638 # Can at least squeeze this chunk onto the current line.
1628 # Can at least squeeze this chunk onto the current line.
1639 if cur_len + l <= width:
1629 if cur_len + l <= width:
1640 cur_line.append(chunks.pop())
1630 cur_line.append(chunks.pop())
1641 cur_len += l
1631 cur_len += l
1642
1632
1643 # Nope, this line is full.
1633 # Nope, this line is full.
1644 else:
1634 else:
1645 break
1635 break
1646
1636
1647 # The current line is full, and the next chunk is too big to
1637 # The current line is full, and the next chunk is too big to
1648 # fit on *any* line (not just this one).
1638 # fit on *any* line (not just this one).
1649 if chunks and colwidth(chunks[-1]) > width:
1639 if chunks and colwidth(chunks[-1]) > width:
1650 self._handle_long_word(chunks, cur_line, cur_len, width)
1640 self._handle_long_word(chunks, cur_line, cur_len, width)
1651
1641
1652 # If the last chunk on this line is all whitespace, drop it.
1642 # If the last chunk on this line is all whitespace, drop it.
1653 if (self.drop_whitespace and
1643 if (self.drop_whitespace and
1654 cur_line and cur_line[-1].strip() == ''):
1644 cur_line and cur_line[-1].strip() == ''):
1655 del cur_line[-1]
1645 del cur_line[-1]
1656
1646
1657 # Convert current line back to a string and store it in list
1647 # Convert current line back to a string and store it in list
1658 # of all lines (return value).
1648 # of all lines (return value).
1659 if cur_line:
1649 if cur_line:
1660 lines.append(indent + ''.join(cur_line))
1650 lines.append(indent + ''.join(cur_line))
1661
1651
1662 return lines
1652 return lines
1663
1653
1664 global MBTextWrapper
1654 global MBTextWrapper
1665 MBTextWrapper = tw
1655 MBTextWrapper = tw
1666 return tw(**kwargs)
1656 return tw(**kwargs)
1667
1657
1668 def wrap(line, width, initindent='', hangindent=''):
1658 def wrap(line, width, initindent='', hangindent=''):
1669 maxindent = max(len(hangindent), len(initindent))
1659 maxindent = max(len(hangindent), len(initindent))
1670 if width <= maxindent:
1660 if width <= maxindent:
1671 # adjust for weird terminal size
1661 # adjust for weird terminal size
1672 width = max(78, maxindent + 1)
1662 width = max(78, maxindent + 1)
1673 line = line.decode(encoding.encoding, encoding.encodingmode)
1663 line = line.decode(encoding.encoding, encoding.encodingmode)
1674 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1664 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1675 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1665 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1676 wrapper = MBTextWrapper(width=width,
1666 wrapper = MBTextWrapper(width=width,
1677 initial_indent=initindent,
1667 initial_indent=initindent,
1678 subsequent_indent=hangindent)
1668 subsequent_indent=hangindent)
1679 return wrapper.fill(line).encode(encoding.encoding)
1669 return wrapper.fill(line).encode(encoding.encoding)
1680
1670
1681 def iterlines(iterator):
1671 def iterlines(iterator):
1682 for chunk in iterator:
1672 for chunk in iterator:
1683 for line in chunk.splitlines():
1673 for line in chunk.splitlines():
1684 yield line
1674 yield line
1685
1675
1686 def expandpath(path):
1676 def expandpath(path):
1687 return os.path.expanduser(os.path.expandvars(path))
1677 return os.path.expanduser(os.path.expandvars(path))
1688
1678
1689 def hgcmd():
1679 def hgcmd():
1690 """Return the command used to execute current hg
1680 """Return the command used to execute current hg
1691
1681
1692 This is different from hgexecutable() because on Windows we want
1682 This is different from hgexecutable() because on Windows we want
1693 to avoid things opening new shell windows like batch files, so we
1683 to avoid things opening new shell windows like batch files, so we
1694 get either the python call or current executable.
1684 get either the python call or current executable.
1695 """
1685 """
1696 if mainfrozen():
1686 if mainfrozen():
1697 return [sys.executable]
1687 return [sys.executable]
1698 return gethgcmd()
1688 return gethgcmd()
1699
1689
1700 def rundetached(args, condfn):
1690 def rundetached(args, condfn):
1701 """Execute the argument list in a detached process.
1691 """Execute the argument list in a detached process.
1702
1692
1703 condfn is a callable which is called repeatedly and should return
1693 condfn is a callable which is called repeatedly and should return
1704 True once the child process is known to have started successfully.
1694 True once the child process is known to have started successfully.
1705 At this point, the child process PID is returned. If the child
1695 At this point, the child process PID is returned. If the child
1706 process fails to start or finishes before condfn() evaluates to
1696 process fails to start or finishes before condfn() evaluates to
1707 True, return -1.
1697 True, return -1.
1708 """
1698 """
1709 # Windows case is easier because the child process is either
1699 # Windows case is easier because the child process is either
1710 # successfully starting and validating the condition or exiting
1700 # successfully starting and validating the condition or exiting
1711 # on failure. We just poll on its PID. On Unix, if the child
1701 # on failure. We just poll on its PID. On Unix, if the child
1712 # process fails to start, it will be left in a zombie state until
1702 # process fails to start, it will be left in a zombie state until
1713 # the parent wait on it, which we cannot do since we expect a long
1703 # the parent wait on it, which we cannot do since we expect a long
1714 # running process on success. Instead we listen for SIGCHLD telling
1704 # running process on success. Instead we listen for SIGCHLD telling
1715 # us our child process terminated.
1705 # us our child process terminated.
1716 terminated = set()
1706 terminated = set()
1717 def handler(signum, frame):
1707 def handler(signum, frame):
1718 terminated.add(os.wait())
1708 terminated.add(os.wait())
1719 prevhandler = None
1709 prevhandler = None
1720 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1710 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1721 if SIGCHLD is not None:
1711 if SIGCHLD is not None:
1722 prevhandler = signal.signal(SIGCHLD, handler)
1712 prevhandler = signal.signal(SIGCHLD, handler)
1723 try:
1713 try:
1724 pid = spawndetached(args)
1714 pid = spawndetached(args)
1725 while not condfn():
1715 while not condfn():
1726 if ((pid in terminated or not testpid(pid))
1716 if ((pid in terminated or not testpid(pid))
1727 and not condfn()):
1717 and not condfn()):
1728 return -1
1718 return -1
1729 time.sleep(0.1)
1719 time.sleep(0.1)
1730 return pid
1720 return pid
1731 finally:
1721 finally:
1732 if prevhandler is not None:
1722 if prevhandler is not None:
1733 signal.signal(signal.SIGCHLD, prevhandler)
1723 signal.signal(signal.SIGCHLD, prevhandler)
1734
1724
1735 try:
1725 try:
1736 any, all = any, all
1726 any, all = any, all
1737 except NameError:
1727 except NameError:
1738 def any(iterable):
1728 def any(iterable):
1739 for i in iterable:
1729 for i in iterable:
1740 if i:
1730 if i:
1741 return True
1731 return True
1742 return False
1732 return False
1743
1733
1744 def all(iterable):
1734 def all(iterable):
1745 for i in iterable:
1735 for i in iterable:
1746 if not i:
1736 if not i:
1747 return False
1737 return False
1748 return True
1738 return True
1749
1739
1750 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1740 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1751 """Return the result of interpolating items in the mapping into string s.
1741 """Return the result of interpolating items in the mapping into string s.
1752
1742
1753 prefix is a single character string, or a two character string with
1743 prefix is a single character string, or a two character string with
1754 a backslash as the first character if the prefix needs to be escaped in
1744 a backslash as the first character if the prefix needs to be escaped in
1755 a regular expression.
1745 a regular expression.
1756
1746
1757 fn is an optional function that will be applied to the replacement text
1747 fn is an optional function that will be applied to the replacement text
1758 just before replacement.
1748 just before replacement.
1759
1749
1760 escape_prefix is an optional flag that allows using doubled prefix for
1750 escape_prefix is an optional flag that allows using doubled prefix for
1761 its escaping.
1751 its escaping.
1762 """
1752 """
1763 fn = fn or (lambda s: s)
1753 fn = fn or (lambda s: s)
1764 patterns = '|'.join(mapping.keys())
1754 patterns = '|'.join(mapping.keys())
1765 if escape_prefix:
1755 if escape_prefix:
1766 patterns += '|' + prefix
1756 patterns += '|' + prefix
1767 if len(prefix) > 1:
1757 if len(prefix) > 1:
1768 prefix_char = prefix[1:]
1758 prefix_char = prefix[1:]
1769 else:
1759 else:
1770 prefix_char = prefix
1760 prefix_char = prefix
1771 mapping[prefix_char] = prefix_char
1761 mapping[prefix_char] = prefix_char
1772 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1762 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1773 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1763 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1774
1764
1775 def getport(port):
1765 def getport(port):
1776 """Return the port for a given network service.
1766 """Return the port for a given network service.
1777
1767
1778 If port is an integer, it's returned as is. If it's a string, it's
1768 If port is an integer, it's returned as is. If it's a string, it's
1779 looked up using socket.getservbyname(). If there's no matching
1769 looked up using socket.getservbyname(). If there's no matching
1780 service, util.Abort is raised.
1770 service, util.Abort is raised.
1781 """
1771 """
1782 try:
1772 try:
1783 return int(port)
1773 return int(port)
1784 except ValueError:
1774 except ValueError:
1785 pass
1775 pass
1786
1776
1787 try:
1777 try:
1788 return socket.getservbyname(port)
1778 return socket.getservbyname(port)
1789 except socket.error:
1779 except socket.error:
1790 raise Abort(_("no port number associated with service '%s'") % port)
1780 raise Abort(_("no port number associated with service '%s'") % port)
1791
1781
1792 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1782 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1793 '0': False, 'no': False, 'false': False, 'off': False,
1783 '0': False, 'no': False, 'false': False, 'off': False,
1794 'never': False}
1784 'never': False}
1795
1785
1796 def parsebool(s):
1786 def parsebool(s):
1797 """Parse s into a boolean.
1787 """Parse s into a boolean.
1798
1788
1799 If s is not a valid boolean, returns None.
1789 If s is not a valid boolean, returns None.
1800 """
1790 """
1801 return _booleans.get(s.lower(), None)
1791 return _booleans.get(s.lower(), None)
1802
1792
1803 _hexdig = '0123456789ABCDEFabcdef'
1793 _hexdig = '0123456789ABCDEFabcdef'
1804 _hextochr = dict((a + b, chr(int(a + b, 16)))
1794 _hextochr = dict((a + b, chr(int(a + b, 16)))
1805 for a in _hexdig for b in _hexdig)
1795 for a in _hexdig for b in _hexdig)
1806
1796
1807 def _urlunquote(s):
1797 def _urlunquote(s):
1808 """Decode HTTP/HTML % encoding.
1798 """Decode HTTP/HTML % encoding.
1809
1799
1810 >>> _urlunquote('abc%20def')
1800 >>> _urlunquote('abc%20def')
1811 'abc def'
1801 'abc def'
1812 """
1802 """
1813 res = s.split('%')
1803 res = s.split('%')
1814 # fastpath
1804 # fastpath
1815 if len(res) == 1:
1805 if len(res) == 1:
1816 return s
1806 return s
1817 s = res[0]
1807 s = res[0]
1818 for item in res[1:]:
1808 for item in res[1:]:
1819 try:
1809 try:
1820 s += _hextochr[item[:2]] + item[2:]
1810 s += _hextochr[item[:2]] + item[2:]
1821 except KeyError:
1811 except KeyError:
1822 s += '%' + item
1812 s += '%' + item
1823 except UnicodeDecodeError:
1813 except UnicodeDecodeError:
1824 s += unichr(int(item[:2], 16)) + item[2:]
1814 s += unichr(int(item[:2], 16)) + item[2:]
1825 return s
1815 return s
1826
1816
1827 class url(object):
1817 class url(object):
1828 r"""Reliable URL parser.
1818 r"""Reliable URL parser.
1829
1819
1830 This parses URLs and provides attributes for the following
1820 This parses URLs and provides attributes for the following
1831 components:
1821 components:
1832
1822
1833 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1823 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1834
1824
1835 Missing components are set to None. The only exception is
1825 Missing components are set to None. The only exception is
1836 fragment, which is set to '' if present but empty.
1826 fragment, which is set to '' if present but empty.
1837
1827
1838 If parsefragment is False, fragment is included in query. If
1828 If parsefragment is False, fragment is included in query. If
1839 parsequery is False, query is included in path. If both are
1829 parsequery is False, query is included in path. If both are
1840 False, both fragment and query are included in path.
1830 False, both fragment and query are included in path.
1841
1831
1842 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1832 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1843
1833
1844 Note that for backward compatibility reasons, bundle URLs do not
1834 Note that for backward compatibility reasons, bundle URLs do not
1845 take host names. That means 'bundle://../' has a path of '../'.
1835 take host names. That means 'bundle://../' has a path of '../'.
1846
1836
1847 Examples:
1837 Examples:
1848
1838
1849 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1839 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1850 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1840 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1851 >>> url('ssh://[::1]:2200//home/joe/repo')
1841 >>> url('ssh://[::1]:2200//home/joe/repo')
1852 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1842 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1853 >>> url('file:///home/joe/repo')
1843 >>> url('file:///home/joe/repo')
1854 <url scheme: 'file', path: '/home/joe/repo'>
1844 <url scheme: 'file', path: '/home/joe/repo'>
1855 >>> url('file:///c:/temp/foo/')
1845 >>> url('file:///c:/temp/foo/')
1856 <url scheme: 'file', path: 'c:/temp/foo/'>
1846 <url scheme: 'file', path: 'c:/temp/foo/'>
1857 >>> url('bundle:foo')
1847 >>> url('bundle:foo')
1858 <url scheme: 'bundle', path: 'foo'>
1848 <url scheme: 'bundle', path: 'foo'>
1859 >>> url('bundle://../foo')
1849 >>> url('bundle://../foo')
1860 <url scheme: 'bundle', path: '../foo'>
1850 <url scheme: 'bundle', path: '../foo'>
1861 >>> url(r'c:\foo\bar')
1851 >>> url(r'c:\foo\bar')
1862 <url path: 'c:\\foo\\bar'>
1852 <url path: 'c:\\foo\\bar'>
1863 >>> url(r'\\blah\blah\blah')
1853 >>> url(r'\\blah\blah\blah')
1864 <url path: '\\\\blah\\blah\\blah'>
1854 <url path: '\\\\blah\\blah\\blah'>
1865 >>> url(r'\\blah\blah\blah#baz')
1855 >>> url(r'\\blah\blah\blah#baz')
1866 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1856 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1867 >>> url(r'file:///C:\users\me')
1857 >>> url(r'file:///C:\users\me')
1868 <url scheme: 'file', path: 'C:\\users\\me'>
1858 <url scheme: 'file', path: 'C:\\users\\me'>
1869
1859
1870 Authentication credentials:
1860 Authentication credentials:
1871
1861
1872 >>> url('ssh://joe:xyz@x/repo')
1862 >>> url('ssh://joe:xyz@x/repo')
1873 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1863 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1874 >>> url('ssh://joe@x/repo')
1864 >>> url('ssh://joe@x/repo')
1875 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1865 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1876
1866
1877 Query strings and fragments:
1867 Query strings and fragments:
1878
1868
1879 >>> url('http://host/a?b#c')
1869 >>> url('http://host/a?b#c')
1880 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1870 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1881 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1871 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1882 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1872 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1883 """
1873 """
1884
1874
1885 _safechars = "!~*'()+"
1875 _safechars = "!~*'()+"
1886 _safepchars = "/!~*'()+:\\"
1876 _safepchars = "/!~*'()+:\\"
1887 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
1877 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
1888
1878
1889 def __init__(self, path, parsequery=True, parsefragment=True):
1879 def __init__(self, path, parsequery=True, parsefragment=True):
1890 # We slowly chomp away at path until we have only the path left
1880 # We slowly chomp away at path until we have only the path left
1891 self.scheme = self.user = self.passwd = self.host = None
1881 self.scheme = self.user = self.passwd = self.host = None
1892 self.port = self.path = self.query = self.fragment = None
1882 self.port = self.path = self.query = self.fragment = None
1893 self._localpath = True
1883 self._localpath = True
1894 self._hostport = ''
1884 self._hostport = ''
1895 self._origpath = path
1885 self._origpath = path
1896
1886
1897 if parsefragment and '#' in path:
1887 if parsefragment and '#' in path:
1898 path, self.fragment = path.split('#', 1)
1888 path, self.fragment = path.split('#', 1)
1899 if not path:
1889 if not path:
1900 path = None
1890 path = None
1901
1891
1902 # special case for Windows drive letters and UNC paths
1892 # special case for Windows drive letters and UNC paths
1903 if hasdriveletter(path) or path.startswith(r'\\'):
1893 if hasdriveletter(path) or path.startswith(r'\\'):
1904 self.path = path
1894 self.path = path
1905 return
1895 return
1906
1896
1907 # For compatibility reasons, we can't handle bundle paths as
1897 # For compatibility reasons, we can't handle bundle paths as
1908 # normal URLS
1898 # normal URLS
1909 if path.startswith('bundle:'):
1899 if path.startswith('bundle:'):
1910 self.scheme = 'bundle'
1900 self.scheme = 'bundle'
1911 path = path[7:]
1901 path = path[7:]
1912 if path.startswith('//'):
1902 if path.startswith('//'):
1913 path = path[2:]
1903 path = path[2:]
1914 self.path = path
1904 self.path = path
1915 return
1905 return
1916
1906
1917 if self._matchscheme(path):
1907 if self._matchscheme(path):
1918 parts = path.split(':', 1)
1908 parts = path.split(':', 1)
1919 if parts[0]:
1909 if parts[0]:
1920 self.scheme, path = parts
1910 self.scheme, path = parts
1921 self._localpath = False
1911 self._localpath = False
1922
1912
1923 if not path:
1913 if not path:
1924 path = None
1914 path = None
1925 if self._localpath:
1915 if self._localpath:
1926 self.path = ''
1916 self.path = ''
1927 return
1917 return
1928 else:
1918 else:
1929 if self._localpath:
1919 if self._localpath:
1930 self.path = path
1920 self.path = path
1931 return
1921 return
1932
1922
1933 if parsequery and '?' in path:
1923 if parsequery and '?' in path:
1934 path, self.query = path.split('?', 1)
1924 path, self.query = path.split('?', 1)
1935 if not path:
1925 if not path:
1936 path = None
1926 path = None
1937 if not self.query:
1927 if not self.query:
1938 self.query = None
1928 self.query = None
1939
1929
1940 # // is required to specify a host/authority
1930 # // is required to specify a host/authority
1941 if path and path.startswith('//'):
1931 if path and path.startswith('//'):
1942 parts = path[2:].split('/', 1)
1932 parts = path[2:].split('/', 1)
1943 if len(parts) > 1:
1933 if len(parts) > 1:
1944 self.host, path = parts
1934 self.host, path = parts
1945 else:
1935 else:
1946 self.host = parts[0]
1936 self.host = parts[0]
1947 path = None
1937 path = None
1948 if not self.host:
1938 if not self.host:
1949 self.host = None
1939 self.host = None
1950 # path of file:///d is /d
1940 # path of file:///d is /d
1951 # path of file:///d:/ is d:/, not /d:/
1941 # path of file:///d:/ is d:/, not /d:/
1952 if path and not hasdriveletter(path):
1942 if path and not hasdriveletter(path):
1953 path = '/' + path
1943 path = '/' + path
1954
1944
1955 if self.host and '@' in self.host:
1945 if self.host and '@' in self.host:
1956 self.user, self.host = self.host.rsplit('@', 1)
1946 self.user, self.host = self.host.rsplit('@', 1)
1957 if ':' in self.user:
1947 if ':' in self.user:
1958 self.user, self.passwd = self.user.split(':', 1)
1948 self.user, self.passwd = self.user.split(':', 1)
1959 if not self.host:
1949 if not self.host:
1960 self.host = None
1950 self.host = None
1961
1951
1962 # Don't split on colons in IPv6 addresses without ports
1952 # Don't split on colons in IPv6 addresses without ports
1963 if (self.host and ':' in self.host and
1953 if (self.host and ':' in self.host and
1964 not (self.host.startswith('[') and self.host.endswith(']'))):
1954 not (self.host.startswith('[') and self.host.endswith(']'))):
1965 self._hostport = self.host
1955 self._hostport = self.host
1966 self.host, self.port = self.host.rsplit(':', 1)
1956 self.host, self.port = self.host.rsplit(':', 1)
1967 if not self.host:
1957 if not self.host:
1968 self.host = None
1958 self.host = None
1969
1959
1970 if (self.host and self.scheme == 'file' and
1960 if (self.host and self.scheme == 'file' and
1971 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1961 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1972 raise Abort(_('file:// URLs can only refer to localhost'))
1962 raise Abort(_('file:// URLs can only refer to localhost'))
1973
1963
1974 self.path = path
1964 self.path = path
1975
1965
1976 # leave the query string escaped
1966 # leave the query string escaped
1977 for a in ('user', 'passwd', 'host', 'port',
1967 for a in ('user', 'passwd', 'host', 'port',
1978 'path', 'fragment'):
1968 'path', 'fragment'):
1979 v = getattr(self, a)
1969 v = getattr(self, a)
1980 if v is not None:
1970 if v is not None:
1981 setattr(self, a, _urlunquote(v))
1971 setattr(self, a, _urlunquote(v))
1982
1972
1983 def __repr__(self):
1973 def __repr__(self):
1984 attrs = []
1974 attrs = []
1985 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1975 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1986 'query', 'fragment'):
1976 'query', 'fragment'):
1987 v = getattr(self, a)
1977 v = getattr(self, a)
1988 if v is not None:
1978 if v is not None:
1989 attrs.append('%s: %r' % (a, v))
1979 attrs.append('%s: %r' % (a, v))
1990 return '<url %s>' % ', '.join(attrs)
1980 return '<url %s>' % ', '.join(attrs)
1991
1981
1992 def __str__(self):
1982 def __str__(self):
1993 r"""Join the URL's components back into a URL string.
1983 r"""Join the URL's components back into a URL string.
1994
1984
1995 Examples:
1985 Examples:
1996
1986
1997 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1987 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1998 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1988 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1999 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1989 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2000 'http://user:pw@host:80/?foo=bar&baz=42'
1990 'http://user:pw@host:80/?foo=bar&baz=42'
2001 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1991 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2002 'http://user:pw@host:80/?foo=bar%3dbaz'
1992 'http://user:pw@host:80/?foo=bar%3dbaz'
2003 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1993 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2004 'ssh://user:pw@[::1]:2200//home/joe#'
1994 'ssh://user:pw@[::1]:2200//home/joe#'
2005 >>> str(url('http://localhost:80//'))
1995 >>> str(url('http://localhost:80//'))
2006 'http://localhost:80//'
1996 'http://localhost:80//'
2007 >>> str(url('http://localhost:80/'))
1997 >>> str(url('http://localhost:80/'))
2008 'http://localhost:80/'
1998 'http://localhost:80/'
2009 >>> str(url('http://localhost:80'))
1999 >>> str(url('http://localhost:80'))
2010 'http://localhost:80/'
2000 'http://localhost:80/'
2011 >>> str(url('bundle:foo'))
2001 >>> str(url('bundle:foo'))
2012 'bundle:foo'
2002 'bundle:foo'
2013 >>> str(url('bundle://../foo'))
2003 >>> str(url('bundle://../foo'))
2014 'bundle:../foo'
2004 'bundle:../foo'
2015 >>> str(url('path'))
2005 >>> str(url('path'))
2016 'path'
2006 'path'
2017 >>> str(url('file:///tmp/foo/bar'))
2007 >>> str(url('file:///tmp/foo/bar'))
2018 'file:///tmp/foo/bar'
2008 'file:///tmp/foo/bar'
2019 >>> str(url('file:///c:/tmp/foo/bar'))
2009 >>> str(url('file:///c:/tmp/foo/bar'))
2020 'file:///c:/tmp/foo/bar'
2010 'file:///c:/tmp/foo/bar'
2021 >>> print url(r'bundle:foo\bar')
2011 >>> print url(r'bundle:foo\bar')
2022 bundle:foo\bar
2012 bundle:foo\bar
2023 >>> print url(r'file:///D:\data\hg')
2013 >>> print url(r'file:///D:\data\hg')
2024 file:///D:\data\hg
2014 file:///D:\data\hg
2025 """
2015 """
2026 if self._localpath:
2016 if self._localpath:
2027 s = self.path
2017 s = self.path
2028 if self.scheme == 'bundle':
2018 if self.scheme == 'bundle':
2029 s = 'bundle:' + s
2019 s = 'bundle:' + s
2030 if self.fragment:
2020 if self.fragment:
2031 s += '#' + self.fragment
2021 s += '#' + self.fragment
2032 return s
2022 return s
2033
2023
2034 s = self.scheme + ':'
2024 s = self.scheme + ':'
2035 if self.user or self.passwd or self.host:
2025 if self.user or self.passwd or self.host:
2036 s += '//'
2026 s += '//'
2037 elif self.scheme and (not self.path or self.path.startswith('/')
2027 elif self.scheme and (not self.path or self.path.startswith('/')
2038 or hasdriveletter(self.path)):
2028 or hasdriveletter(self.path)):
2039 s += '//'
2029 s += '//'
2040 if hasdriveletter(self.path):
2030 if hasdriveletter(self.path):
2041 s += '/'
2031 s += '/'
2042 if self.user:
2032 if self.user:
2043 s += urllib.quote(self.user, safe=self._safechars)
2033 s += urllib.quote(self.user, safe=self._safechars)
2044 if self.passwd:
2034 if self.passwd:
2045 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2035 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2046 if self.user or self.passwd:
2036 if self.user or self.passwd:
2047 s += '@'
2037 s += '@'
2048 if self.host:
2038 if self.host:
2049 if not (self.host.startswith('[') and self.host.endswith(']')):
2039 if not (self.host.startswith('[') and self.host.endswith(']')):
2050 s += urllib.quote(self.host)
2040 s += urllib.quote(self.host)
2051 else:
2041 else:
2052 s += self.host
2042 s += self.host
2053 if self.port:
2043 if self.port:
2054 s += ':' + urllib.quote(self.port)
2044 s += ':' + urllib.quote(self.port)
2055 if self.host:
2045 if self.host:
2056 s += '/'
2046 s += '/'
2057 if self.path:
2047 if self.path:
2058 # TODO: similar to the query string, we should not unescape the
2048 # TODO: similar to the query string, we should not unescape the
2059 # path when we store it, the path might contain '%2f' = '/',
2049 # path when we store it, the path might contain '%2f' = '/',
2060 # which we should *not* escape.
2050 # which we should *not* escape.
2061 s += urllib.quote(self.path, safe=self._safepchars)
2051 s += urllib.quote(self.path, safe=self._safepchars)
2062 if self.query:
2052 if self.query:
2063 # we store the query in escaped form.
2053 # we store the query in escaped form.
2064 s += '?' + self.query
2054 s += '?' + self.query
2065 if self.fragment is not None:
2055 if self.fragment is not None:
2066 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2056 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2067 return s
2057 return s
2068
2058
2069 def authinfo(self):
2059 def authinfo(self):
2070 user, passwd = self.user, self.passwd
2060 user, passwd = self.user, self.passwd
2071 try:
2061 try:
2072 self.user, self.passwd = None, None
2062 self.user, self.passwd = None, None
2073 s = str(self)
2063 s = str(self)
2074 finally:
2064 finally:
2075 self.user, self.passwd = user, passwd
2065 self.user, self.passwd = user, passwd
2076 if not self.user:
2066 if not self.user:
2077 return (s, None)
2067 return (s, None)
2078 # authinfo[1] is passed to urllib2 password manager, and its
2068 # authinfo[1] is passed to urllib2 password manager, and its
2079 # URIs must not contain credentials. The host is passed in the
2069 # URIs must not contain credentials. The host is passed in the
2080 # URIs list because Python < 2.4.3 uses only that to search for
2070 # URIs list because Python < 2.4.3 uses only that to search for
2081 # a password.
2071 # a password.
2082 return (s, (None, (s, self.host),
2072 return (s, (None, (s, self.host),
2083 self.user, self.passwd or ''))
2073 self.user, self.passwd or ''))
2084
2074
2085 def isabs(self):
2075 def isabs(self):
2086 if self.scheme and self.scheme != 'file':
2076 if self.scheme and self.scheme != 'file':
2087 return True # remote URL
2077 return True # remote URL
2088 if hasdriveletter(self.path):
2078 if hasdriveletter(self.path):
2089 return True # absolute for our purposes - can't be joined()
2079 return True # absolute for our purposes - can't be joined()
2090 if self.path.startswith(r'\\'):
2080 if self.path.startswith(r'\\'):
2091 return True # Windows UNC path
2081 return True # Windows UNC path
2092 if self.path.startswith('/'):
2082 if self.path.startswith('/'):
2093 return True # POSIX-style
2083 return True # POSIX-style
2094 return False
2084 return False
2095
2085
2096 def localpath(self):
2086 def localpath(self):
2097 if self.scheme == 'file' or self.scheme == 'bundle':
2087 if self.scheme == 'file' or self.scheme == 'bundle':
2098 path = self.path or '/'
2088 path = self.path or '/'
2099 # For Windows, we need to promote hosts containing drive
2089 # For Windows, we need to promote hosts containing drive
2100 # letters to paths with drive letters.
2090 # letters to paths with drive letters.
2101 if hasdriveletter(self._hostport):
2091 if hasdriveletter(self._hostport):
2102 path = self._hostport + '/' + self.path
2092 path = self._hostport + '/' + self.path
2103 elif (self.host is not None and self.path
2093 elif (self.host is not None and self.path
2104 and not hasdriveletter(path)):
2094 and not hasdriveletter(path)):
2105 path = '/' + path
2095 path = '/' + path
2106 return path
2096 return path
2107 return self._origpath
2097 return self._origpath
2108
2098
2109 def islocal(self):
2099 def islocal(self):
2110 '''whether localpath will return something that posixfile can open'''
2100 '''whether localpath will return something that posixfile can open'''
2111 return (not self.scheme or self.scheme == 'file'
2101 return (not self.scheme or self.scheme == 'file'
2112 or self.scheme == 'bundle')
2102 or self.scheme == 'bundle')
2113
2103
2114 def hasscheme(path):
2104 def hasscheme(path):
2115 return bool(url(path).scheme)
2105 return bool(url(path).scheme)
2116
2106
2117 def hasdriveletter(path):
2107 def hasdriveletter(path):
2118 return path and path[1:2] == ':' and path[0:1].isalpha()
2108 return path and path[1:2] == ':' and path[0:1].isalpha()
2119
2109
2120 def urllocalpath(path):
2110 def urllocalpath(path):
2121 return url(path, parsequery=False, parsefragment=False).localpath()
2111 return url(path, parsequery=False, parsefragment=False).localpath()
2122
2112
2123 def hidepassword(u):
2113 def hidepassword(u):
2124 '''hide user credential in a url string'''
2114 '''hide user credential in a url string'''
2125 u = url(u)
2115 u = url(u)
2126 if u.passwd:
2116 if u.passwd:
2127 u.passwd = '***'
2117 u.passwd = '***'
2128 return str(u)
2118 return str(u)
2129
2119
2130 def removeauth(u):
2120 def removeauth(u):
2131 '''remove all authentication information from a url string'''
2121 '''remove all authentication information from a url string'''
2132 u = url(u)
2122 u = url(u)
2133 u.user = u.passwd = None
2123 u.user = u.passwd = None
2134 return str(u)
2124 return str(u)
2135
2125
2136 def isatty(fd):
2126 def isatty(fd):
2137 try:
2127 try:
2138 return fd.isatty()
2128 return fd.isatty()
2139 except AttributeError:
2129 except AttributeError:
2140 return False
2130 return False
2141
2131
2142 timecount = unitcountfn(
2132 timecount = unitcountfn(
2143 (1, 1e3, _('%.0f s')),
2133 (1, 1e3, _('%.0f s')),
2144 (100, 1, _('%.1f s')),
2134 (100, 1, _('%.1f s')),
2145 (10, 1, _('%.2f s')),
2135 (10, 1, _('%.2f s')),
2146 (1, 1, _('%.3f s')),
2136 (1, 1, _('%.3f s')),
2147 (100, 0.001, _('%.1f ms')),
2137 (100, 0.001, _('%.1f ms')),
2148 (10, 0.001, _('%.2f ms')),
2138 (10, 0.001, _('%.2f ms')),
2149 (1, 0.001, _('%.3f ms')),
2139 (1, 0.001, _('%.3f ms')),
2150 (100, 0.000001, _('%.1f us')),
2140 (100, 0.000001, _('%.1f us')),
2151 (10, 0.000001, _('%.2f us')),
2141 (10, 0.000001, _('%.2f us')),
2152 (1, 0.000001, _('%.3f us')),
2142 (1, 0.000001, _('%.3f us')),
2153 (100, 0.000000001, _('%.1f ns')),
2143 (100, 0.000000001, _('%.1f ns')),
2154 (10, 0.000000001, _('%.2f ns')),
2144 (10, 0.000000001, _('%.2f ns')),
2155 (1, 0.000000001, _('%.3f ns')),
2145 (1, 0.000000001, _('%.3f ns')),
2156 )
2146 )
2157
2147
2158 _timenesting = [0]
2148 _timenesting = [0]
2159
2149
2160 def timed(func):
2150 def timed(func):
2161 '''Report the execution time of a function call to stderr.
2151 '''Report the execution time of a function call to stderr.
2162
2152
2163 During development, use as a decorator when you need to measure
2153 During development, use as a decorator when you need to measure
2164 the cost of a function, e.g. as follows:
2154 the cost of a function, e.g. as follows:
2165
2155
2166 @util.timed
2156 @util.timed
2167 def foo(a, b, c):
2157 def foo(a, b, c):
2168 pass
2158 pass
2169 '''
2159 '''
2170
2160
2171 def wrapper(*args, **kwargs):
2161 def wrapper(*args, **kwargs):
2172 start = time.time()
2162 start = time.time()
2173 indent = 2
2163 indent = 2
2174 _timenesting[0] += indent
2164 _timenesting[0] += indent
2175 try:
2165 try:
2176 return func(*args, **kwargs)
2166 return func(*args, **kwargs)
2177 finally:
2167 finally:
2178 elapsed = time.time() - start
2168 elapsed = time.time() - start
2179 _timenesting[0] -= indent
2169 _timenesting[0] -= indent
2180 sys.stderr.write('%s%s: %s\n' %
2170 sys.stderr.write('%s%s: %s\n' %
2181 (' ' * _timenesting[0], func.__name__,
2171 (' ' * _timenesting[0], func.__name__,
2182 timecount(elapsed)))
2172 timecount(elapsed)))
2183 return wrapper
2173 return wrapper
2184
2174
2185 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2175 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2186 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2176 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2187
2177
2188 def sizetoint(s):
2178 def sizetoint(s):
2189 '''Convert a space specifier to a byte count.
2179 '''Convert a space specifier to a byte count.
2190
2180
2191 >>> sizetoint('30')
2181 >>> sizetoint('30')
2192 30
2182 30
2193 >>> sizetoint('2.2kb')
2183 >>> sizetoint('2.2kb')
2194 2252
2184 2252
2195 >>> sizetoint('6M')
2185 >>> sizetoint('6M')
2196 6291456
2186 6291456
2197 '''
2187 '''
2198 t = s.strip().lower()
2188 t = s.strip().lower()
2199 try:
2189 try:
2200 for k, u in _sizeunits:
2190 for k, u in _sizeunits:
2201 if t.endswith(k):
2191 if t.endswith(k):
2202 return int(float(t[:-len(k)]) * u)
2192 return int(float(t[:-len(k)]) * u)
2203 return int(t)
2193 return int(t)
2204 except ValueError:
2194 except ValueError:
2205 raise error.ParseError(_("couldn't parse size: %s") % s)
2195 raise error.ParseError(_("couldn't parse size: %s") % s)
2206
2196
2207 class hooks(object):
2197 class hooks(object):
2208 '''A collection of hook functions that can be used to extend a
2198 '''A collection of hook functions that can be used to extend a
2209 function's behaviour. Hooks are called in lexicographic order,
2199 function's behaviour. Hooks are called in lexicographic order,
2210 based on the names of their sources.'''
2200 based on the names of their sources.'''
2211
2201
2212 def __init__(self):
2202 def __init__(self):
2213 self._hooks = []
2203 self._hooks = []
2214
2204
2215 def add(self, source, hook):
2205 def add(self, source, hook):
2216 self._hooks.append((source, hook))
2206 self._hooks.append((source, hook))
2217
2207
2218 def __call__(self, *args):
2208 def __call__(self, *args):
2219 self._hooks.sort(key=lambda x: x[0])
2209 self._hooks.sort(key=lambda x: x[0])
2220 results = []
2210 results = []
2221 for source, hook in self._hooks:
2211 for source, hook in self._hooks:
2222 results.append(hook(*args))
2212 results.append(hook(*args))
2223 return results
2213 return results
2224
2214
2225 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2215 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2226 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2216 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2227 Skips the 'skip' last entries. By default it will flush stdout first.
2217 Skips the 'skip' last entries. By default it will flush stdout first.
2228 It can be used everywhere and do intentionally not require an ui object.
2218 It can be used everywhere and do intentionally not require an ui object.
2229 Not be used in production code but very convenient while developing.
2219 Not be used in production code but very convenient while developing.
2230 '''
2220 '''
2231 if otherf:
2221 if otherf:
2232 otherf.flush()
2222 otherf.flush()
2233 f.write('%s at:\n' % msg)
2223 f.write('%s at:\n' % msg)
2234 entries = [('%s:%s' % (fn, ln), func)
2224 entries = [('%s:%s' % (fn, ln), func)
2235 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2225 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2236 if entries:
2226 if entries:
2237 fnmax = max(len(entry[0]) for entry in entries)
2227 fnmax = max(len(entry[0]) for entry in entries)
2238 for fnln, func in entries:
2228 for fnln, func in entries:
2239 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2229 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2240 f.flush()
2230 f.flush()
2241
2231
2242 class dirs(object):
2232 class dirs(object):
2243 '''a multiset of directory names from a dirstate or manifest'''
2233 '''a multiset of directory names from a dirstate or manifest'''
2244
2234
2245 def __init__(self, map, skip=None):
2235 def __init__(self, map, skip=None):
2246 self._dirs = {}
2236 self._dirs = {}
2247 addpath = self.addpath
2237 addpath = self.addpath
2248 if safehasattr(map, 'iteritems') and skip is not None:
2238 if safehasattr(map, 'iteritems') and skip is not None:
2249 for f, s in map.iteritems():
2239 for f, s in map.iteritems():
2250 if s[0] != skip:
2240 if s[0] != skip:
2251 addpath(f)
2241 addpath(f)
2252 else:
2242 else:
2253 for f in map:
2243 for f in map:
2254 addpath(f)
2244 addpath(f)
2255
2245
2256 def addpath(self, path):
2246 def addpath(self, path):
2257 dirs = self._dirs
2247 dirs = self._dirs
2258 for base in finddirs(path):
2248 for base in finddirs(path):
2259 if base in dirs:
2249 if base in dirs:
2260 dirs[base] += 1
2250 dirs[base] += 1
2261 return
2251 return
2262 dirs[base] = 1
2252 dirs[base] = 1
2263
2253
2264 def delpath(self, path):
2254 def delpath(self, path):
2265 dirs = self._dirs
2255 dirs = self._dirs
2266 for base in finddirs(path):
2256 for base in finddirs(path):
2267 if dirs[base] > 1:
2257 if dirs[base] > 1:
2268 dirs[base] -= 1
2258 dirs[base] -= 1
2269 return
2259 return
2270 del dirs[base]
2260 del dirs[base]
2271
2261
2272 def __iter__(self):
2262 def __iter__(self):
2273 return self._dirs.iterkeys()
2263 return self._dirs.iterkeys()
2274
2264
2275 def __contains__(self, d):
2265 def __contains__(self, d):
2276 return d in self._dirs
2266 return d in self._dirs
2277
2267
2278 if safehasattr(parsers, 'dirs'):
2268 if safehasattr(parsers, 'dirs'):
2279 dirs = parsers.dirs
2269 dirs = parsers.dirs
2280
2270
2281 def finddirs(path):
2271 def finddirs(path):
2282 pos = path.rfind('/')
2272 pos = path.rfind('/')
2283 while pos != -1:
2273 while pos != -1:
2284 yield path[:pos]
2274 yield path[:pos]
2285 pos = path.rfind('/', 0, pos)
2275 pos = path.rfind('/', 0, pos)
2286
2276
2287 # convenient shortcut
2277 # convenient shortcut
2288 dst = debugstacktrace
2278 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now