##// END OF EJS Templates
path_auditor: delegate checking of nested repos to a callback
Martin Geisler -
r12079:41e56e07 default
parent child Browse files
Show More
@@ -1,1433 +1,1437 b''
1 # util.py - Mercurial utility functions and platform specfic implementations
1 # util.py - Mercurial utility functions and platform specfic implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specfic implementations.
10 """Mercurial utility functions and platform specfic implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from i18n import _
16 from i18n import _
17 import error, osutil, encoding
17 import error, osutil, encoding
18 import errno, re, shutil, sys, tempfile, traceback
18 import errno, re, shutil, sys, tempfile, traceback
19 import os, stat, time, calendar, textwrap, unicodedata, signal
19 import os, stat, time, calendar, textwrap, unicodedata, signal
20 import imp, socket
20 import imp, socket
21
21
22 # Python compatibility
22 # Python compatibility
23
23
24 def sha1(s):
24 def sha1(s):
25 return _fastsha1(s)
25 return _fastsha1(s)
26
26
27 def _fastsha1(s):
27 def _fastsha1(s):
28 # This function will import sha1 from hashlib or sha (whichever is
28 # This function will import sha1 from hashlib or sha (whichever is
29 # available) and overwrite itself with it on the first call.
29 # available) and overwrite itself with it on the first call.
30 # Subsequent calls will go directly to the imported function.
30 # Subsequent calls will go directly to the imported function.
31 if sys.version_info >= (2, 5):
31 if sys.version_info >= (2, 5):
32 from hashlib import sha1 as _sha1
32 from hashlib import sha1 as _sha1
33 else:
33 else:
34 from sha import sha as _sha1
34 from sha import sha as _sha1
35 global _fastsha1, sha1
35 global _fastsha1, sha1
36 _fastsha1 = sha1 = _sha1
36 _fastsha1 = sha1 = _sha1
37 return _sha1(s)
37 return _sha1(s)
38
38
39 import __builtin__
39 import __builtin__
40
40
41 if sys.version_info[0] < 3:
41 if sys.version_info[0] < 3:
42 def fakebuffer(sliceable, offset=0):
42 def fakebuffer(sliceable, offset=0):
43 return sliceable[offset:]
43 return sliceable[offset:]
44 else:
44 else:
45 def fakebuffer(sliceable, offset=0):
45 def fakebuffer(sliceable, offset=0):
46 return memoryview(sliceable)[offset:]
46 return memoryview(sliceable)[offset:]
47 try:
47 try:
48 buffer
48 buffer
49 except NameError:
49 except NameError:
50 __builtin__.buffer = fakebuffer
50 __builtin__.buffer = fakebuffer
51
51
52 import subprocess
52 import subprocess
53 closefds = os.name == 'posix'
53 closefds = os.name == 'posix'
54
54
55 def popen2(cmd, env=None, newlines=False):
55 def popen2(cmd, env=None, newlines=False):
56 # Setting bufsize to -1 lets the system decide the buffer size.
56 # Setting bufsize to -1 lets the system decide the buffer size.
57 # The default for bufsize is 0, meaning unbuffered. This leads to
57 # The default for bufsize is 0, meaning unbuffered. This leads to
58 # poor performance on Mac OS X: http://bugs.python.org/issue4194
58 # poor performance on Mac OS X: http://bugs.python.org/issue4194
59 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
59 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
60 close_fds=closefds,
60 close_fds=closefds,
61 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
61 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
62 universal_newlines=newlines,
62 universal_newlines=newlines,
63 env=env)
63 env=env)
64 return p.stdin, p.stdout
64 return p.stdin, p.stdout
65
65
66 def popen3(cmd, env=None, newlines=False):
66 def popen3(cmd, env=None, newlines=False):
67 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
67 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
68 close_fds=closefds,
68 close_fds=closefds,
69 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
69 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
70 stderr=subprocess.PIPE,
70 stderr=subprocess.PIPE,
71 universal_newlines=newlines,
71 universal_newlines=newlines,
72 env=env)
72 env=env)
73 return p.stdin, p.stdout, p.stderr
73 return p.stdin, p.stdout, p.stderr
74
74
75 def version():
75 def version():
76 """Return version information if available."""
76 """Return version information if available."""
77 try:
77 try:
78 import __version__
78 import __version__
79 return __version__.version
79 return __version__.version
80 except ImportError:
80 except ImportError:
81 return 'unknown'
81 return 'unknown'
82
82
83 # used by parsedate
83 # used by parsedate
84 defaultdateformats = (
84 defaultdateformats = (
85 '%Y-%m-%d %H:%M:%S',
85 '%Y-%m-%d %H:%M:%S',
86 '%Y-%m-%d %I:%M:%S%p',
86 '%Y-%m-%d %I:%M:%S%p',
87 '%Y-%m-%d %H:%M',
87 '%Y-%m-%d %H:%M',
88 '%Y-%m-%d %I:%M%p',
88 '%Y-%m-%d %I:%M%p',
89 '%Y-%m-%d',
89 '%Y-%m-%d',
90 '%m-%d',
90 '%m-%d',
91 '%m/%d',
91 '%m/%d',
92 '%m/%d/%y',
92 '%m/%d/%y',
93 '%m/%d/%Y',
93 '%m/%d/%Y',
94 '%a %b %d %H:%M:%S %Y',
94 '%a %b %d %H:%M:%S %Y',
95 '%a %b %d %I:%M:%S%p %Y',
95 '%a %b %d %I:%M:%S%p %Y',
96 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
96 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
97 '%b %d %H:%M:%S %Y',
97 '%b %d %H:%M:%S %Y',
98 '%b %d %I:%M:%S%p %Y',
98 '%b %d %I:%M:%S%p %Y',
99 '%b %d %H:%M:%S',
99 '%b %d %H:%M:%S',
100 '%b %d %I:%M:%S%p',
100 '%b %d %I:%M:%S%p',
101 '%b %d %H:%M',
101 '%b %d %H:%M',
102 '%b %d %I:%M%p',
102 '%b %d %I:%M%p',
103 '%b %d %Y',
103 '%b %d %Y',
104 '%b %d',
104 '%b %d',
105 '%H:%M:%S',
105 '%H:%M:%S',
106 '%I:%M:%S%p',
106 '%I:%M:%S%p',
107 '%H:%M',
107 '%H:%M',
108 '%I:%M%p',
108 '%I:%M%p',
109 )
109 )
110
110
111 extendeddateformats = defaultdateformats + (
111 extendeddateformats = defaultdateformats + (
112 "%Y",
112 "%Y",
113 "%Y-%m",
113 "%Y-%m",
114 "%b",
114 "%b",
115 "%b %Y",
115 "%b %Y",
116 )
116 )
117
117
118 def cachefunc(func):
118 def cachefunc(func):
119 '''cache the result of function calls'''
119 '''cache the result of function calls'''
120 # XXX doesn't handle keywords args
120 # XXX doesn't handle keywords args
121 cache = {}
121 cache = {}
122 if func.func_code.co_argcount == 1:
122 if func.func_code.co_argcount == 1:
123 # we gain a small amount of time because
123 # we gain a small amount of time because
124 # we don't need to pack/unpack the list
124 # we don't need to pack/unpack the list
125 def f(arg):
125 def f(arg):
126 if arg not in cache:
126 if arg not in cache:
127 cache[arg] = func(arg)
127 cache[arg] = func(arg)
128 return cache[arg]
128 return cache[arg]
129 else:
129 else:
130 def f(*args):
130 def f(*args):
131 if args not in cache:
131 if args not in cache:
132 cache[args] = func(*args)
132 cache[args] = func(*args)
133 return cache[args]
133 return cache[args]
134
134
135 return f
135 return f
136
136
137 def lrucachefunc(func):
137 def lrucachefunc(func):
138 '''cache most recent results of function calls'''
138 '''cache most recent results of function calls'''
139 cache = {}
139 cache = {}
140 order = []
140 order = []
141 if func.func_code.co_argcount == 1:
141 if func.func_code.co_argcount == 1:
142 def f(arg):
142 def f(arg):
143 if arg not in cache:
143 if arg not in cache:
144 if len(cache) > 20:
144 if len(cache) > 20:
145 del cache[order.pop(0)]
145 del cache[order.pop(0)]
146 cache[arg] = func(arg)
146 cache[arg] = func(arg)
147 else:
147 else:
148 order.remove(arg)
148 order.remove(arg)
149 order.append(arg)
149 order.append(arg)
150 return cache[arg]
150 return cache[arg]
151 else:
151 else:
152 def f(*args):
152 def f(*args):
153 if args not in cache:
153 if args not in cache:
154 if len(cache) > 20:
154 if len(cache) > 20:
155 del cache[order.pop(0)]
155 del cache[order.pop(0)]
156 cache[args] = func(*args)
156 cache[args] = func(*args)
157 else:
157 else:
158 order.remove(args)
158 order.remove(args)
159 order.append(args)
159 order.append(args)
160 return cache[args]
160 return cache[args]
161
161
162 return f
162 return f
163
163
164 class propertycache(object):
164 class propertycache(object):
165 def __init__(self, func):
165 def __init__(self, func):
166 self.func = func
166 self.func = func
167 self.name = func.__name__
167 self.name = func.__name__
168 def __get__(self, obj, type=None):
168 def __get__(self, obj, type=None):
169 result = self.func(obj)
169 result = self.func(obj)
170 setattr(obj, self.name, result)
170 setattr(obj, self.name, result)
171 return result
171 return result
172
172
173 def pipefilter(s, cmd):
173 def pipefilter(s, cmd):
174 '''filter string S through command CMD, returning its output'''
174 '''filter string S through command CMD, returning its output'''
175 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
175 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
176 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
176 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
177 pout, perr = p.communicate(s)
177 pout, perr = p.communicate(s)
178 return pout
178 return pout
179
179
180 def tempfilter(s, cmd):
180 def tempfilter(s, cmd):
181 '''filter string S through a pair of temporary files with CMD.
181 '''filter string S through a pair of temporary files with CMD.
182 CMD is used as a template to create the real command to be run,
182 CMD is used as a template to create the real command to be run,
183 with the strings INFILE and OUTFILE replaced by the real names of
183 with the strings INFILE and OUTFILE replaced by the real names of
184 the temporary files generated.'''
184 the temporary files generated.'''
185 inname, outname = None, None
185 inname, outname = None, None
186 try:
186 try:
187 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
187 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
188 fp = os.fdopen(infd, 'wb')
188 fp = os.fdopen(infd, 'wb')
189 fp.write(s)
189 fp.write(s)
190 fp.close()
190 fp.close()
191 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
191 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
192 os.close(outfd)
192 os.close(outfd)
193 cmd = cmd.replace('INFILE', inname)
193 cmd = cmd.replace('INFILE', inname)
194 cmd = cmd.replace('OUTFILE', outname)
194 cmd = cmd.replace('OUTFILE', outname)
195 code = os.system(cmd)
195 code = os.system(cmd)
196 if sys.platform == 'OpenVMS' and code & 1:
196 if sys.platform == 'OpenVMS' and code & 1:
197 code = 0
197 code = 0
198 if code:
198 if code:
199 raise Abort(_("command '%s' failed: %s") %
199 raise Abort(_("command '%s' failed: %s") %
200 (cmd, explain_exit(code)))
200 (cmd, explain_exit(code)))
201 return open(outname, 'rb').read()
201 return open(outname, 'rb').read()
202 finally:
202 finally:
203 try:
203 try:
204 if inname:
204 if inname:
205 os.unlink(inname)
205 os.unlink(inname)
206 except:
206 except:
207 pass
207 pass
208 try:
208 try:
209 if outname:
209 if outname:
210 os.unlink(outname)
210 os.unlink(outname)
211 except:
211 except:
212 pass
212 pass
213
213
214 filtertable = {
214 filtertable = {
215 'tempfile:': tempfilter,
215 'tempfile:': tempfilter,
216 'pipe:': pipefilter,
216 'pipe:': pipefilter,
217 }
217 }
218
218
219 def filter(s, cmd):
219 def filter(s, cmd):
220 "filter a string through a command that transforms its input to its output"
220 "filter a string through a command that transforms its input to its output"
221 for name, fn in filtertable.iteritems():
221 for name, fn in filtertable.iteritems():
222 if cmd.startswith(name):
222 if cmd.startswith(name):
223 return fn(s, cmd[len(name):].lstrip())
223 return fn(s, cmd[len(name):].lstrip())
224 return pipefilter(s, cmd)
224 return pipefilter(s, cmd)
225
225
226 def binary(s):
226 def binary(s):
227 """return true if a string is binary data"""
227 """return true if a string is binary data"""
228 return bool(s and '\0' in s)
228 return bool(s and '\0' in s)
229
229
230 def increasingchunks(source, min=1024, max=65536):
230 def increasingchunks(source, min=1024, max=65536):
231 '''return no less than min bytes per chunk while data remains,
231 '''return no less than min bytes per chunk while data remains,
232 doubling min after each chunk until it reaches max'''
232 doubling min after each chunk until it reaches max'''
233 def log2(x):
233 def log2(x):
234 if not x:
234 if not x:
235 return 0
235 return 0
236 i = 0
236 i = 0
237 while x:
237 while x:
238 x >>= 1
238 x >>= 1
239 i += 1
239 i += 1
240 return i - 1
240 return i - 1
241
241
242 buf = []
242 buf = []
243 blen = 0
243 blen = 0
244 for chunk in source:
244 for chunk in source:
245 buf.append(chunk)
245 buf.append(chunk)
246 blen += len(chunk)
246 blen += len(chunk)
247 if blen >= min:
247 if blen >= min:
248 if min < max:
248 if min < max:
249 min = min << 1
249 min = min << 1
250 nmin = 1 << log2(blen)
250 nmin = 1 << log2(blen)
251 if nmin > min:
251 if nmin > min:
252 min = nmin
252 min = nmin
253 if min > max:
253 if min > max:
254 min = max
254 min = max
255 yield ''.join(buf)
255 yield ''.join(buf)
256 blen = 0
256 blen = 0
257 buf = []
257 buf = []
258 if buf:
258 if buf:
259 yield ''.join(buf)
259 yield ''.join(buf)
260
260
261 Abort = error.Abort
261 Abort = error.Abort
262
262
263 def always(fn):
263 def always(fn):
264 return True
264 return True
265
265
266 def never(fn):
266 def never(fn):
267 return False
267 return False
268
268
269 def pathto(root, n1, n2):
269 def pathto(root, n1, n2):
270 '''return the relative path from one place to another.
270 '''return the relative path from one place to another.
271 root should use os.sep to separate directories
271 root should use os.sep to separate directories
272 n1 should use os.sep to separate directories
272 n1 should use os.sep to separate directories
273 n2 should use "/" to separate directories
273 n2 should use "/" to separate directories
274 returns an os.sep-separated path.
274 returns an os.sep-separated path.
275
275
276 If n1 is a relative path, it's assumed it's
276 If n1 is a relative path, it's assumed it's
277 relative to root.
277 relative to root.
278 n2 should always be relative to root.
278 n2 should always be relative to root.
279 '''
279 '''
280 if not n1:
280 if not n1:
281 return localpath(n2)
281 return localpath(n2)
282 if os.path.isabs(n1):
282 if os.path.isabs(n1):
283 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
283 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
284 return os.path.join(root, localpath(n2))
284 return os.path.join(root, localpath(n2))
285 n2 = '/'.join((pconvert(root), n2))
285 n2 = '/'.join((pconvert(root), n2))
286 a, b = splitpath(n1), n2.split('/')
286 a, b = splitpath(n1), n2.split('/')
287 a.reverse()
287 a.reverse()
288 b.reverse()
288 b.reverse()
289 while a and b and a[-1] == b[-1]:
289 while a and b and a[-1] == b[-1]:
290 a.pop()
290 a.pop()
291 b.pop()
291 b.pop()
292 b.reverse()
292 b.reverse()
293 return os.sep.join((['..'] * len(a)) + b) or '.'
293 return os.sep.join((['..'] * len(a)) + b) or '.'
294
294
295 def canonpath(root, cwd, myname, auditor=None):
295 def canonpath(root, cwd, myname, auditor=None):
296 """return the canonical path of myname, given cwd and root"""
296 """return the canonical path of myname, given cwd and root"""
297 if endswithsep(root):
297 if endswithsep(root):
298 rootsep = root
298 rootsep = root
299 else:
299 else:
300 rootsep = root + os.sep
300 rootsep = root + os.sep
301 name = myname
301 name = myname
302 if not os.path.isabs(name):
302 if not os.path.isabs(name):
303 name = os.path.join(root, cwd, name)
303 name = os.path.join(root, cwd, name)
304 name = os.path.normpath(name)
304 name = os.path.normpath(name)
305 if auditor is None:
305 if auditor is None:
306 auditor = path_auditor(root)
306 auditor = path_auditor(root)
307 if name != rootsep and name.startswith(rootsep):
307 if name != rootsep and name.startswith(rootsep):
308 name = name[len(rootsep):]
308 name = name[len(rootsep):]
309 auditor(name)
309 auditor(name)
310 return pconvert(name)
310 return pconvert(name)
311 elif name == root:
311 elif name == root:
312 return ''
312 return ''
313 else:
313 else:
314 # Determine whether `name' is in the hierarchy at or beneath `root',
314 # Determine whether `name' is in the hierarchy at or beneath `root',
315 # by iterating name=dirname(name) until that causes no change (can't
315 # by iterating name=dirname(name) until that causes no change (can't
316 # check name == '/', because that doesn't work on windows). For each
316 # check name == '/', because that doesn't work on windows). For each
317 # `name', compare dev/inode numbers. If they match, the list `rel'
317 # `name', compare dev/inode numbers. If they match, the list `rel'
318 # holds the reversed list of components making up the relative file
318 # holds the reversed list of components making up the relative file
319 # name we want.
319 # name we want.
320 root_st = os.stat(root)
320 root_st = os.stat(root)
321 rel = []
321 rel = []
322 while True:
322 while True:
323 try:
323 try:
324 name_st = os.stat(name)
324 name_st = os.stat(name)
325 except OSError:
325 except OSError:
326 break
326 break
327 if samestat(name_st, root_st):
327 if samestat(name_st, root_st):
328 if not rel:
328 if not rel:
329 # name was actually the same as root (maybe a symlink)
329 # name was actually the same as root (maybe a symlink)
330 return ''
330 return ''
331 rel.reverse()
331 rel.reverse()
332 name = os.path.join(*rel)
332 name = os.path.join(*rel)
333 auditor(name)
333 auditor(name)
334 return pconvert(name)
334 return pconvert(name)
335 dirname, basename = os.path.split(name)
335 dirname, basename = os.path.split(name)
336 rel.append(basename)
336 rel.append(basename)
337 if dirname == name:
337 if dirname == name:
338 break
338 break
339 name = dirname
339 name = dirname
340
340
341 raise Abort('%s not under root' % myname)
341 raise Abort('%s not under root' % myname)
342
342
343 _hgexecutable = None
343 _hgexecutable = None
344
344
345 def main_is_frozen():
345 def main_is_frozen():
346 """return True if we are a frozen executable.
346 """return True if we are a frozen executable.
347
347
348 The code supports py2exe (most common, Windows only) and tools/freeze
348 The code supports py2exe (most common, Windows only) and tools/freeze
349 (portable, not much used).
349 (portable, not much used).
350 """
350 """
351 return (hasattr(sys, "frozen") or # new py2exe
351 return (hasattr(sys, "frozen") or # new py2exe
352 hasattr(sys, "importers") or # old py2exe
352 hasattr(sys, "importers") or # old py2exe
353 imp.is_frozen("__main__")) # tools/freeze
353 imp.is_frozen("__main__")) # tools/freeze
354
354
355 def hgexecutable():
355 def hgexecutable():
356 """return location of the 'hg' executable.
356 """return location of the 'hg' executable.
357
357
358 Defaults to $HG or 'hg' in the search path.
358 Defaults to $HG or 'hg' in the search path.
359 """
359 """
360 if _hgexecutable is None:
360 if _hgexecutable is None:
361 hg = os.environ.get('HG')
361 hg = os.environ.get('HG')
362 if hg:
362 if hg:
363 set_hgexecutable(hg)
363 set_hgexecutable(hg)
364 elif main_is_frozen():
364 elif main_is_frozen():
365 set_hgexecutable(sys.executable)
365 set_hgexecutable(sys.executable)
366 else:
366 else:
367 exe = find_exe('hg') or os.path.basename(sys.argv[0])
367 exe = find_exe('hg') or os.path.basename(sys.argv[0])
368 set_hgexecutable(exe)
368 set_hgexecutable(exe)
369 return _hgexecutable
369 return _hgexecutable
370
370
371 def set_hgexecutable(path):
371 def set_hgexecutable(path):
372 """set location of the 'hg' executable"""
372 """set location of the 'hg' executable"""
373 global _hgexecutable
373 global _hgexecutable
374 _hgexecutable = path
374 _hgexecutable = path
375
375
376 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
376 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
377 '''enhanced shell command execution.
377 '''enhanced shell command execution.
378 run with environment maybe modified, maybe in different dir.
378 run with environment maybe modified, maybe in different dir.
379
379
380 if command fails and onerr is None, return status. if ui object,
380 if command fails and onerr is None, return status. if ui object,
381 print error message and return status, else raise onerr object as
381 print error message and return status, else raise onerr object as
382 exception.
382 exception.
383
383
384 if out is specified, it is assumed to be a file-like object that has a
384 if out is specified, it is assumed to be a file-like object that has a
385 write() method. stdout and stderr will be redirected to out.'''
385 write() method. stdout and stderr will be redirected to out.'''
386 def py2shell(val):
386 def py2shell(val):
387 'convert python object into string that is useful to shell'
387 'convert python object into string that is useful to shell'
388 if val is None or val is False:
388 if val is None or val is False:
389 return '0'
389 return '0'
390 if val is True:
390 if val is True:
391 return '1'
391 return '1'
392 return str(val)
392 return str(val)
393 origcmd = cmd
393 origcmd = cmd
394 if os.name == 'nt':
394 if os.name == 'nt':
395 cmd = '"%s"' % cmd
395 cmd = '"%s"' % cmd
396 env = dict(os.environ)
396 env = dict(os.environ)
397 env.update((k, py2shell(v)) for k, v in environ.iteritems())
397 env.update((k, py2shell(v)) for k, v in environ.iteritems())
398 env['HG'] = hgexecutable()
398 env['HG'] = hgexecutable()
399 if out is None:
399 if out is None:
400 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
400 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
401 env=env, cwd=cwd)
401 env=env, cwd=cwd)
402 else:
402 else:
403 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
403 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
404 env=env, cwd=cwd, stdout=subprocess.PIPE,
404 env=env, cwd=cwd, stdout=subprocess.PIPE,
405 stderr=subprocess.STDOUT)
405 stderr=subprocess.STDOUT)
406 for line in proc.stdout:
406 for line in proc.stdout:
407 out.write(line)
407 out.write(line)
408 proc.wait()
408 proc.wait()
409 rc = proc.returncode
409 rc = proc.returncode
410 if sys.platform == 'OpenVMS' and rc & 1:
410 if sys.platform == 'OpenVMS' and rc & 1:
411 rc = 0
411 rc = 0
412 if rc and onerr:
412 if rc and onerr:
413 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
413 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
414 explain_exit(rc)[0])
414 explain_exit(rc)[0])
415 if errprefix:
415 if errprefix:
416 errmsg = '%s: %s' % (errprefix, errmsg)
416 errmsg = '%s: %s' % (errprefix, errmsg)
417 try:
417 try:
418 onerr.warn(errmsg + '\n')
418 onerr.warn(errmsg + '\n')
419 except AttributeError:
419 except AttributeError:
420 raise onerr(errmsg)
420 raise onerr(errmsg)
421 return rc
421 return rc
422
422
423 def checksignature(func):
423 def checksignature(func):
424 '''wrap a function with code to check for calling errors'''
424 '''wrap a function with code to check for calling errors'''
425 def check(*args, **kwargs):
425 def check(*args, **kwargs):
426 try:
426 try:
427 return func(*args, **kwargs)
427 return func(*args, **kwargs)
428 except TypeError:
428 except TypeError:
429 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
429 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
430 raise error.SignatureError
430 raise error.SignatureError
431 raise
431 raise
432
432
433 return check
433 return check
434
434
435 def unlink(f):
435 def unlink(f):
436 """unlink and remove the directory if it is empty"""
436 """unlink and remove the directory if it is empty"""
437 os.unlink(f)
437 os.unlink(f)
438 # try removing directories that might now be empty
438 # try removing directories that might now be empty
439 try:
439 try:
440 os.removedirs(os.path.dirname(f))
440 os.removedirs(os.path.dirname(f))
441 except OSError:
441 except OSError:
442 pass
442 pass
443
443
444 def copyfile(src, dest):
444 def copyfile(src, dest):
445 "copy a file, preserving mode and atime/mtime"
445 "copy a file, preserving mode and atime/mtime"
446 if os.path.islink(src):
446 if os.path.islink(src):
447 try:
447 try:
448 os.unlink(dest)
448 os.unlink(dest)
449 except:
449 except:
450 pass
450 pass
451 os.symlink(os.readlink(src), dest)
451 os.symlink(os.readlink(src), dest)
452 else:
452 else:
453 try:
453 try:
454 shutil.copyfile(src, dest)
454 shutil.copyfile(src, dest)
455 shutil.copystat(src, dest)
455 shutil.copystat(src, dest)
456 except shutil.Error, inst:
456 except shutil.Error, inst:
457 raise Abort(str(inst))
457 raise Abort(str(inst))
458
458
459 def copyfiles(src, dst, hardlink=None):
459 def copyfiles(src, dst, hardlink=None):
460 """Copy a directory tree using hardlinks if possible"""
460 """Copy a directory tree using hardlinks if possible"""
461
461
462 if hardlink is None:
462 if hardlink is None:
463 hardlink = (os.stat(src).st_dev ==
463 hardlink = (os.stat(src).st_dev ==
464 os.stat(os.path.dirname(dst)).st_dev)
464 os.stat(os.path.dirname(dst)).st_dev)
465
465
466 num = 0
466 num = 0
467 if os.path.isdir(src):
467 if os.path.isdir(src):
468 os.mkdir(dst)
468 os.mkdir(dst)
469 for name, kind in osutil.listdir(src):
469 for name, kind in osutil.listdir(src):
470 srcname = os.path.join(src, name)
470 srcname = os.path.join(src, name)
471 dstname = os.path.join(dst, name)
471 dstname = os.path.join(dst, name)
472 hardlink, n = copyfiles(srcname, dstname, hardlink)
472 hardlink, n = copyfiles(srcname, dstname, hardlink)
473 num += n
473 num += n
474 else:
474 else:
475 if hardlink:
475 if hardlink:
476 try:
476 try:
477 os_link(src, dst)
477 os_link(src, dst)
478 except (IOError, OSError):
478 except (IOError, OSError):
479 hardlink = False
479 hardlink = False
480 shutil.copy(src, dst)
480 shutil.copy(src, dst)
481 else:
481 else:
482 shutil.copy(src, dst)
482 shutil.copy(src, dst)
483 num += 1
483 num += 1
484
484
485 return hardlink, num
485 return hardlink, num
486
486
487 class path_auditor(object):
487 class path_auditor(object):
488 '''ensure that a filesystem path contains no banned components.
488 '''ensure that a filesystem path contains no banned components.
489 the following properties of a path are checked:
489 the following properties of a path are checked:
490
490
491 - under top-level .hg
491 - under top-level .hg
492 - starts at the root of a windows drive
492 - starts at the root of a windows drive
493 - contains ".."
493 - contains ".."
494 - traverses a symlink (e.g. a/symlink_here/b)
494 - traverses a symlink (e.g. a/symlink_here/b)
495 - inside a nested repository'''
495 - inside a nested repository (a callback can be used to approve
496 some nested repositories, e.g., subrepositories)
497 '''
496
498
497 def __init__(self, root):
499 def __init__(self, root, callback=None):
498 self.audited = set()
500 self.audited = set()
499 self.auditeddir = set()
501 self.auditeddir = set()
500 self.root = root
502 self.root = root
503 self.callback = callback
501
504
502 def __call__(self, path):
505 def __call__(self, path):
503 if path in self.audited:
506 if path in self.audited:
504 return
507 return
505 normpath = os.path.normcase(path)
508 normpath = os.path.normcase(path)
506 parts = splitpath(normpath)
509 parts = splitpath(normpath)
507 if (os.path.splitdrive(path)[0]
510 if (os.path.splitdrive(path)[0]
508 or parts[0].lower() in ('.hg', '.hg.', '')
511 or parts[0].lower() in ('.hg', '.hg.', '')
509 or os.pardir in parts):
512 or os.pardir in parts):
510 raise Abort(_("path contains illegal component: %s") % path)
513 raise Abort(_("path contains illegal component: %s") % path)
511 if '.hg' in path.lower():
514 if '.hg' in path.lower():
512 lparts = [p.lower() for p in parts]
515 lparts = [p.lower() for p in parts]
513 for p in '.hg', '.hg.':
516 for p in '.hg', '.hg.':
514 if p in lparts[1:]:
517 if p in lparts[1:]:
515 pos = lparts.index(p)
518 pos = lparts.index(p)
516 base = os.path.join(*parts[:pos])
519 base = os.path.join(*parts[:pos])
517 raise Abort(_('path %r is inside repo %r') % (path, base))
520 raise Abort(_('path %r is inside repo %r') % (path, base))
518 def check(prefix):
521 def check(prefix):
519 curpath = os.path.join(self.root, prefix)
522 curpath = os.path.join(self.root, prefix)
520 try:
523 try:
521 st = os.lstat(curpath)
524 st = os.lstat(curpath)
522 except OSError, err:
525 except OSError, err:
523 # EINVAL can be raised as invalid path syntax under win32.
526 # EINVAL can be raised as invalid path syntax under win32.
524 # They must be ignored for patterns can be checked too.
527 # They must be ignored for patterns can be checked too.
525 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
528 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
526 raise
529 raise
527 else:
530 else:
528 if stat.S_ISLNK(st.st_mode):
531 if stat.S_ISLNK(st.st_mode):
529 raise Abort(_('path %r traverses symbolic link %r') %
532 raise Abort(_('path %r traverses symbolic link %r') %
530 (path, prefix))
533 (path, prefix))
531 elif (stat.S_ISDIR(st.st_mode) and
534 elif (stat.S_ISDIR(st.st_mode) and
532 os.path.isdir(os.path.join(curpath, '.hg'))):
535 os.path.isdir(os.path.join(curpath, '.hg'))):
533 raise Abort(_('path %r is inside repo %r') %
536 if not self.callback or not self.callback(curpath):
534 (path, prefix))
537 raise Abort(_('path %r is inside repo %r') %
538 (path, prefix))
535 parts.pop()
539 parts.pop()
536 prefixes = []
540 prefixes = []
537 while parts:
541 while parts:
538 prefix = os.sep.join(parts)
542 prefix = os.sep.join(parts)
539 if prefix in self.auditeddir:
543 if prefix in self.auditeddir:
540 break
544 break
541 check(prefix)
545 check(prefix)
542 prefixes.append(prefix)
546 prefixes.append(prefix)
543 parts.pop()
547 parts.pop()
544
548
545 self.audited.add(path)
549 self.audited.add(path)
546 # only add prefixes to the cache after checking everything: we don't
550 # only add prefixes to the cache after checking everything: we don't
547 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
551 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
548 self.auditeddir.update(prefixes)
552 self.auditeddir.update(prefixes)
549
553
550 def nlinks(pathname):
554 def nlinks(pathname):
551 """Return number of hardlinks for the given file."""
555 """Return number of hardlinks for the given file."""
552 return os.lstat(pathname).st_nlink
556 return os.lstat(pathname).st_nlink
553
557
554 if hasattr(os, 'link'):
558 if hasattr(os, 'link'):
555 os_link = os.link
559 os_link = os.link
556 else:
560 else:
557 def os_link(src, dst):
561 def os_link(src, dst):
558 raise OSError(0, _("Hardlinks not supported"))
562 raise OSError(0, _("Hardlinks not supported"))
559
563
560 def lookup_reg(key, name=None, scope=None):
564 def lookup_reg(key, name=None, scope=None):
561 return None
565 return None
562
566
563 def hidewindow():
567 def hidewindow():
564 """Hide current shell window.
568 """Hide current shell window.
565
569
566 Used to hide the window opened when starting asynchronous
570 Used to hide the window opened when starting asynchronous
567 child process under Windows, unneeded on other systems.
571 child process under Windows, unneeded on other systems.
568 """
572 """
569 pass
573 pass
570
574
571 if os.name == 'nt':
575 if os.name == 'nt':
572 from windows import *
576 from windows import *
573 else:
577 else:
574 from posix import *
578 from posix import *
575
579
576 def makelock(info, pathname):
580 def makelock(info, pathname):
577 try:
581 try:
578 return os.symlink(info, pathname)
582 return os.symlink(info, pathname)
579 except OSError, why:
583 except OSError, why:
580 if why.errno == errno.EEXIST:
584 if why.errno == errno.EEXIST:
581 raise
585 raise
582 except AttributeError: # no symlink in os
586 except AttributeError: # no symlink in os
583 pass
587 pass
584
588
585 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
589 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
586 os.write(ld, info)
590 os.write(ld, info)
587 os.close(ld)
591 os.close(ld)
588
592
589 def readlock(pathname):
593 def readlock(pathname):
590 try:
594 try:
591 return os.readlink(pathname)
595 return os.readlink(pathname)
592 except OSError, why:
596 except OSError, why:
593 if why.errno not in (errno.EINVAL, errno.ENOSYS):
597 if why.errno not in (errno.EINVAL, errno.ENOSYS):
594 raise
598 raise
595 except AttributeError: # no symlink in os
599 except AttributeError: # no symlink in os
596 pass
600 pass
597 return posixfile(pathname).read()
601 return posixfile(pathname).read()
598
602
599 def fstat(fp):
603 def fstat(fp):
600 '''stat file object that may not have fileno method.'''
604 '''stat file object that may not have fileno method.'''
601 try:
605 try:
602 return os.fstat(fp.fileno())
606 return os.fstat(fp.fileno())
603 except AttributeError:
607 except AttributeError:
604 return os.stat(fp.name)
608 return os.stat(fp.name)
605
609
606 # File system features
610 # File system features
607
611
608 def checkcase(path):
612 def checkcase(path):
609 """
613 """
610 Check whether the given path is on a case-sensitive filesystem
614 Check whether the given path is on a case-sensitive filesystem
611
615
612 Requires a path (like /foo/.hg) ending with a foldable final
616 Requires a path (like /foo/.hg) ending with a foldable final
613 directory component.
617 directory component.
614 """
618 """
615 s1 = os.stat(path)
619 s1 = os.stat(path)
616 d, b = os.path.split(path)
620 d, b = os.path.split(path)
617 p2 = os.path.join(d, b.upper())
621 p2 = os.path.join(d, b.upper())
618 if path == p2:
622 if path == p2:
619 p2 = os.path.join(d, b.lower())
623 p2 = os.path.join(d, b.lower())
620 try:
624 try:
621 s2 = os.stat(p2)
625 s2 = os.stat(p2)
622 if s2 == s1:
626 if s2 == s1:
623 return False
627 return False
624 return True
628 return True
625 except:
629 except:
626 return True
630 return True
627
631
628 _fspathcache = {}
632 _fspathcache = {}
629 def fspath(name, root):
633 def fspath(name, root):
630 '''Get name in the case stored in the filesystem
634 '''Get name in the case stored in the filesystem
631
635
632 The name is either relative to root, or it is an absolute path starting
636 The name is either relative to root, or it is an absolute path starting
633 with root. Note that this function is unnecessary, and should not be
637 with root. Note that this function is unnecessary, and should not be
634 called, for case-sensitive filesystems (simply because it's expensive).
638 called, for case-sensitive filesystems (simply because it's expensive).
635 '''
639 '''
636 # If name is absolute, make it relative
640 # If name is absolute, make it relative
637 if name.lower().startswith(root.lower()):
641 if name.lower().startswith(root.lower()):
638 l = len(root)
642 l = len(root)
639 if name[l] == os.sep or name[l] == os.altsep:
643 if name[l] == os.sep or name[l] == os.altsep:
640 l = l + 1
644 l = l + 1
641 name = name[l:]
645 name = name[l:]
642
646
643 if not os.path.exists(os.path.join(root, name)):
647 if not os.path.exists(os.path.join(root, name)):
644 return None
648 return None
645
649
646 seps = os.sep
650 seps = os.sep
647 if os.altsep:
651 if os.altsep:
648 seps = seps + os.altsep
652 seps = seps + os.altsep
649 # Protect backslashes. This gets silly very quickly.
653 # Protect backslashes. This gets silly very quickly.
650 seps.replace('\\','\\\\')
654 seps.replace('\\','\\\\')
651 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
655 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
652 dir = os.path.normcase(os.path.normpath(root))
656 dir = os.path.normcase(os.path.normpath(root))
653 result = []
657 result = []
654 for part, sep in pattern.findall(name):
658 for part, sep in pattern.findall(name):
655 if sep:
659 if sep:
656 result.append(sep)
660 result.append(sep)
657 continue
661 continue
658
662
659 if dir not in _fspathcache:
663 if dir not in _fspathcache:
660 _fspathcache[dir] = os.listdir(dir)
664 _fspathcache[dir] = os.listdir(dir)
661 contents = _fspathcache[dir]
665 contents = _fspathcache[dir]
662
666
663 lpart = part.lower()
667 lpart = part.lower()
664 lenp = len(part)
668 lenp = len(part)
665 for n in contents:
669 for n in contents:
666 if lenp == len(n) and n.lower() == lpart:
670 if lenp == len(n) and n.lower() == lpart:
667 result.append(n)
671 result.append(n)
668 break
672 break
669 else:
673 else:
670 # Cannot happen, as the file exists!
674 # Cannot happen, as the file exists!
671 result.append(part)
675 result.append(part)
672 dir = os.path.join(dir, lpart)
676 dir = os.path.join(dir, lpart)
673
677
674 return ''.join(result)
678 return ''.join(result)
675
679
676 def checkexec(path):
680 def checkexec(path):
677 """
681 """
678 Check whether the given path is on a filesystem with UNIX-like exec flags
682 Check whether the given path is on a filesystem with UNIX-like exec flags
679
683
680 Requires a directory (like /foo/.hg)
684 Requires a directory (like /foo/.hg)
681 """
685 """
682
686
683 # VFAT on some Linux versions can flip mode but it doesn't persist
687 # VFAT on some Linux versions can flip mode but it doesn't persist
684 # a FS remount. Frequently we can detect it if files are created
688 # a FS remount. Frequently we can detect it if files are created
685 # with exec bit on.
689 # with exec bit on.
686
690
687 try:
691 try:
688 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
692 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
689 fh, fn = tempfile.mkstemp(dir=path, prefix='hg-checkexec-')
693 fh, fn = tempfile.mkstemp(dir=path, prefix='hg-checkexec-')
690 try:
694 try:
691 os.close(fh)
695 os.close(fh)
692 m = os.stat(fn).st_mode & 0777
696 m = os.stat(fn).st_mode & 0777
693 new_file_has_exec = m & EXECFLAGS
697 new_file_has_exec = m & EXECFLAGS
694 os.chmod(fn, m ^ EXECFLAGS)
698 os.chmod(fn, m ^ EXECFLAGS)
695 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
699 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
696 finally:
700 finally:
697 os.unlink(fn)
701 os.unlink(fn)
698 except (IOError, OSError):
702 except (IOError, OSError):
699 # we don't care, the user probably won't be able to commit anyway
703 # we don't care, the user probably won't be able to commit anyway
700 return False
704 return False
701 return not (new_file_has_exec or exec_flags_cannot_flip)
705 return not (new_file_has_exec or exec_flags_cannot_flip)
702
706
703 def checklink(path):
707 def checklink(path):
704 """check whether the given path is on a symlink-capable filesystem"""
708 """check whether the given path is on a symlink-capable filesystem"""
705 # mktemp is not racy because symlink creation will fail if the
709 # mktemp is not racy because symlink creation will fail if the
706 # file already exists
710 # file already exists
707 name = tempfile.mktemp(dir=path, prefix='hg-checklink-')
711 name = tempfile.mktemp(dir=path, prefix='hg-checklink-')
708 try:
712 try:
709 os.symlink(".", name)
713 os.symlink(".", name)
710 os.unlink(name)
714 os.unlink(name)
711 return True
715 return True
712 except (OSError, AttributeError):
716 except (OSError, AttributeError):
713 return False
717 return False
714
718
715 def needbinarypatch():
719 def needbinarypatch():
716 """return True if patches should be applied in binary mode by default."""
720 """return True if patches should be applied in binary mode by default."""
717 return os.name == 'nt'
721 return os.name == 'nt'
718
722
719 def endswithsep(path):
723 def endswithsep(path):
720 '''Check path ends with os.sep or os.altsep.'''
724 '''Check path ends with os.sep or os.altsep.'''
721 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
725 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
722
726
723 def splitpath(path):
727 def splitpath(path):
724 '''Split path by os.sep.
728 '''Split path by os.sep.
725 Note that this function does not use os.altsep because this is
729 Note that this function does not use os.altsep because this is
726 an alternative of simple "xxx.split(os.sep)".
730 an alternative of simple "xxx.split(os.sep)".
727 It is recommended to use os.path.normpath() before using this
731 It is recommended to use os.path.normpath() before using this
728 function if need.'''
732 function if need.'''
729 return path.split(os.sep)
733 return path.split(os.sep)
730
734
731 def gui():
735 def gui():
732 '''Are we running in a GUI?'''
736 '''Are we running in a GUI?'''
733 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
737 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
734
738
735 def mktempcopy(name, emptyok=False, createmode=None):
739 def mktempcopy(name, emptyok=False, createmode=None):
736 """Create a temporary file with the same contents from name
740 """Create a temporary file with the same contents from name
737
741
738 The permission bits are copied from the original file.
742 The permission bits are copied from the original file.
739
743
740 If the temporary file is going to be truncated immediately, you
744 If the temporary file is going to be truncated immediately, you
741 can use emptyok=True as an optimization.
745 can use emptyok=True as an optimization.
742
746
743 Returns the name of the temporary file.
747 Returns the name of the temporary file.
744 """
748 """
745 d, fn = os.path.split(name)
749 d, fn = os.path.split(name)
746 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
750 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
747 os.close(fd)
751 os.close(fd)
748 # Temporary files are created with mode 0600, which is usually not
752 # Temporary files are created with mode 0600, which is usually not
749 # what we want. If the original file already exists, just copy
753 # what we want. If the original file already exists, just copy
750 # its mode. Otherwise, manually obey umask.
754 # its mode. Otherwise, manually obey umask.
751 try:
755 try:
752 st_mode = os.lstat(name).st_mode & 0777
756 st_mode = os.lstat(name).st_mode & 0777
753 except OSError, inst:
757 except OSError, inst:
754 if inst.errno != errno.ENOENT:
758 if inst.errno != errno.ENOENT:
755 raise
759 raise
756 st_mode = createmode
760 st_mode = createmode
757 if st_mode is None:
761 if st_mode is None:
758 st_mode = ~umask
762 st_mode = ~umask
759 st_mode &= 0666
763 st_mode &= 0666
760 os.chmod(temp, st_mode)
764 os.chmod(temp, st_mode)
761 if emptyok:
765 if emptyok:
762 return temp
766 return temp
763 try:
767 try:
764 try:
768 try:
765 ifp = posixfile(name, "rb")
769 ifp = posixfile(name, "rb")
766 except IOError, inst:
770 except IOError, inst:
767 if inst.errno == errno.ENOENT:
771 if inst.errno == errno.ENOENT:
768 return temp
772 return temp
769 if not getattr(inst, 'filename', None):
773 if not getattr(inst, 'filename', None):
770 inst.filename = name
774 inst.filename = name
771 raise
775 raise
772 ofp = posixfile(temp, "wb")
776 ofp = posixfile(temp, "wb")
773 for chunk in filechunkiter(ifp):
777 for chunk in filechunkiter(ifp):
774 ofp.write(chunk)
778 ofp.write(chunk)
775 ifp.close()
779 ifp.close()
776 ofp.close()
780 ofp.close()
777 except:
781 except:
778 try: os.unlink(temp)
782 try: os.unlink(temp)
779 except: pass
783 except: pass
780 raise
784 raise
781 return temp
785 return temp
782
786
783 class atomictempfile(object):
787 class atomictempfile(object):
784 """file-like object that atomically updates a file
788 """file-like object that atomically updates a file
785
789
786 All writes will be redirected to a temporary copy of the original
790 All writes will be redirected to a temporary copy of the original
787 file. When rename is called, the copy is renamed to the original
791 file. When rename is called, the copy is renamed to the original
788 name, making the changes visible.
792 name, making the changes visible.
789 """
793 """
790 def __init__(self, name, mode='w+b', createmode=None):
794 def __init__(self, name, mode='w+b', createmode=None):
791 self.__name = name
795 self.__name = name
792 self._fp = None
796 self._fp = None
793 self.temp = mktempcopy(name, emptyok=('w' in mode),
797 self.temp = mktempcopy(name, emptyok=('w' in mode),
794 createmode=createmode)
798 createmode=createmode)
795 self._fp = posixfile(self.temp, mode)
799 self._fp = posixfile(self.temp, mode)
796
800
797 def __getattr__(self, name):
801 def __getattr__(self, name):
798 return getattr(self._fp, name)
802 return getattr(self._fp, name)
799
803
800 def rename(self):
804 def rename(self):
801 if not self._fp.closed:
805 if not self._fp.closed:
802 self._fp.close()
806 self._fp.close()
803 rename(self.temp, localpath(self.__name))
807 rename(self.temp, localpath(self.__name))
804
808
805 def __del__(self):
809 def __del__(self):
806 if not self._fp:
810 if not self._fp:
807 return
811 return
808 if not self._fp.closed:
812 if not self._fp.closed:
809 try:
813 try:
810 os.unlink(self.temp)
814 os.unlink(self.temp)
811 except: pass
815 except: pass
812 self._fp.close()
816 self._fp.close()
813
817
814 def makedirs(name, mode=None):
818 def makedirs(name, mode=None):
815 """recursive directory creation with parent mode inheritance"""
819 """recursive directory creation with parent mode inheritance"""
816 try:
820 try:
817 os.mkdir(name)
821 os.mkdir(name)
818 if mode is not None:
822 if mode is not None:
819 os.chmod(name, mode)
823 os.chmod(name, mode)
820 return
824 return
821 except OSError, err:
825 except OSError, err:
822 if err.errno == errno.EEXIST:
826 if err.errno == errno.EEXIST:
823 return
827 return
824 if err.errno != errno.ENOENT:
828 if err.errno != errno.ENOENT:
825 raise
829 raise
826 parent = os.path.abspath(os.path.dirname(name))
830 parent = os.path.abspath(os.path.dirname(name))
827 makedirs(parent, mode)
831 makedirs(parent, mode)
828 makedirs(name, mode)
832 makedirs(name, mode)
829
833
830 class opener(object):
834 class opener(object):
831 """Open files relative to a base directory
835 """Open files relative to a base directory
832
836
833 This class is used to hide the details of COW semantics and
837 This class is used to hide the details of COW semantics and
834 remote file access from higher level code.
838 remote file access from higher level code.
835 """
839 """
836 def __init__(self, base, audit=True):
840 def __init__(self, base, audit=True):
837 self.base = base
841 self.base = base
838 if audit:
842 if audit:
839 self.auditor = path_auditor(base)
843 self.auditor = path_auditor(base)
840 else:
844 else:
841 self.auditor = always
845 self.auditor = always
842 self.createmode = None
846 self.createmode = None
843
847
844 @propertycache
848 @propertycache
845 def _can_symlink(self):
849 def _can_symlink(self):
846 return checklink(self.base)
850 return checklink(self.base)
847
851
848 def _fixfilemode(self, name):
852 def _fixfilemode(self, name):
849 if self.createmode is None:
853 if self.createmode is None:
850 return
854 return
851 os.chmod(name, self.createmode & 0666)
855 os.chmod(name, self.createmode & 0666)
852
856
853 def __call__(self, path, mode="r", text=False, atomictemp=False):
857 def __call__(self, path, mode="r", text=False, atomictemp=False):
854 self.auditor(path)
858 self.auditor(path)
855 f = os.path.join(self.base, path)
859 f = os.path.join(self.base, path)
856
860
857 if not text and "b" not in mode:
861 if not text and "b" not in mode:
858 mode += "b" # for that other OS
862 mode += "b" # for that other OS
859
863
860 nlink = -1
864 nlink = -1
861 if mode not in ("r", "rb"):
865 if mode not in ("r", "rb"):
862 try:
866 try:
863 nlink = nlinks(f)
867 nlink = nlinks(f)
864 except OSError:
868 except OSError:
865 nlink = 0
869 nlink = 0
866 d = os.path.dirname(f)
870 d = os.path.dirname(f)
867 if not os.path.isdir(d):
871 if not os.path.isdir(d):
868 makedirs(d, self.createmode)
872 makedirs(d, self.createmode)
869 if atomictemp:
873 if atomictemp:
870 return atomictempfile(f, mode, self.createmode)
874 return atomictempfile(f, mode, self.createmode)
871 if nlink > 1:
875 if nlink > 1:
872 rename(mktempcopy(f), f)
876 rename(mktempcopy(f), f)
873 fp = posixfile(f, mode)
877 fp = posixfile(f, mode)
874 if nlink == 0:
878 if nlink == 0:
875 self._fixfilemode(f)
879 self._fixfilemode(f)
876 return fp
880 return fp
877
881
878 def symlink(self, src, dst):
882 def symlink(self, src, dst):
879 self.auditor(dst)
883 self.auditor(dst)
880 linkname = os.path.join(self.base, dst)
884 linkname = os.path.join(self.base, dst)
881 try:
885 try:
882 os.unlink(linkname)
886 os.unlink(linkname)
883 except OSError:
887 except OSError:
884 pass
888 pass
885
889
886 dirname = os.path.dirname(linkname)
890 dirname = os.path.dirname(linkname)
887 if not os.path.exists(dirname):
891 if not os.path.exists(dirname):
888 makedirs(dirname, self.createmode)
892 makedirs(dirname, self.createmode)
889
893
890 if self._can_symlink:
894 if self._can_symlink:
891 try:
895 try:
892 os.symlink(src, linkname)
896 os.symlink(src, linkname)
893 except OSError, err:
897 except OSError, err:
894 raise OSError(err.errno, _('could not symlink to %r: %s') %
898 raise OSError(err.errno, _('could not symlink to %r: %s') %
895 (src, err.strerror), linkname)
899 (src, err.strerror), linkname)
896 else:
900 else:
897 f = self(dst, "w")
901 f = self(dst, "w")
898 f.write(src)
902 f.write(src)
899 f.close()
903 f.close()
900 self._fixfilemode(dst)
904 self._fixfilemode(dst)
901
905
902 class chunkbuffer(object):
906 class chunkbuffer(object):
903 """Allow arbitrary sized chunks of data to be efficiently read from an
907 """Allow arbitrary sized chunks of data to be efficiently read from an
904 iterator over chunks of arbitrary size."""
908 iterator over chunks of arbitrary size."""
905
909
906 def __init__(self, in_iter):
910 def __init__(self, in_iter):
907 """in_iter is the iterator that's iterating over the input chunks.
911 """in_iter is the iterator that's iterating over the input chunks.
908 targetsize is how big a buffer to try to maintain."""
912 targetsize is how big a buffer to try to maintain."""
909 def splitbig(chunks):
913 def splitbig(chunks):
910 for chunk in chunks:
914 for chunk in chunks:
911 if len(chunk) > 2**20:
915 if len(chunk) > 2**20:
912 pos = 0
916 pos = 0
913 while pos < len(chunk):
917 while pos < len(chunk):
914 end = pos + 2 ** 18
918 end = pos + 2 ** 18
915 yield chunk[pos:end]
919 yield chunk[pos:end]
916 pos = end
920 pos = end
917 else:
921 else:
918 yield chunk
922 yield chunk
919 self.iter = splitbig(in_iter)
923 self.iter = splitbig(in_iter)
920 self._queue = []
924 self._queue = []
921
925
922 def read(self, l):
926 def read(self, l):
923 """Read L bytes of data from the iterator of chunks of data.
927 """Read L bytes of data from the iterator of chunks of data.
924 Returns less than L bytes if the iterator runs dry."""
928 Returns less than L bytes if the iterator runs dry."""
925 left = l
929 left = l
926 buf = ''
930 buf = ''
927 queue = self._queue
931 queue = self._queue
928 while left > 0:
932 while left > 0:
929 # refill the queue
933 # refill the queue
930 if not queue:
934 if not queue:
931 target = 2**18
935 target = 2**18
932 for chunk in self.iter:
936 for chunk in self.iter:
933 queue.append(chunk)
937 queue.append(chunk)
934 target -= len(chunk)
938 target -= len(chunk)
935 if target <= 0:
939 if target <= 0:
936 break
940 break
937 if not queue:
941 if not queue:
938 break
942 break
939
943
940 chunk = queue.pop(0)
944 chunk = queue.pop(0)
941 left -= len(chunk)
945 left -= len(chunk)
942 if left < 0:
946 if left < 0:
943 queue.insert(0, chunk[left:])
947 queue.insert(0, chunk[left:])
944 buf += chunk[:left]
948 buf += chunk[:left]
945 else:
949 else:
946 buf += chunk
950 buf += chunk
947
951
948 return buf
952 return buf
949
953
950 def filechunkiter(f, size=65536, limit=None):
954 def filechunkiter(f, size=65536, limit=None):
951 """Create a generator that produces the data in the file size
955 """Create a generator that produces the data in the file size
952 (default 65536) bytes at a time, up to optional limit (default is
956 (default 65536) bytes at a time, up to optional limit (default is
953 to read all data). Chunks may be less than size bytes if the
957 to read all data). Chunks may be less than size bytes if the
954 chunk is the last chunk in the file, or the file is a socket or
958 chunk is the last chunk in the file, or the file is a socket or
955 some other type of file that sometimes reads less data than is
959 some other type of file that sometimes reads less data than is
956 requested."""
960 requested."""
957 assert size >= 0
961 assert size >= 0
958 assert limit is None or limit >= 0
962 assert limit is None or limit >= 0
959 while True:
963 while True:
960 if limit is None:
964 if limit is None:
961 nbytes = size
965 nbytes = size
962 else:
966 else:
963 nbytes = min(limit, size)
967 nbytes = min(limit, size)
964 s = nbytes and f.read(nbytes)
968 s = nbytes and f.read(nbytes)
965 if not s:
969 if not s:
966 break
970 break
967 if limit:
971 if limit:
968 limit -= len(s)
972 limit -= len(s)
969 yield s
973 yield s
970
974
971 def makedate():
975 def makedate():
972 lt = time.localtime()
976 lt = time.localtime()
973 if lt[8] == 1 and time.daylight:
977 if lt[8] == 1 and time.daylight:
974 tz = time.altzone
978 tz = time.altzone
975 else:
979 else:
976 tz = time.timezone
980 tz = time.timezone
977 return time.mktime(lt), tz
981 return time.mktime(lt), tz
978
982
979 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
983 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
980 """represent a (unixtime, offset) tuple as a localized time.
984 """represent a (unixtime, offset) tuple as a localized time.
981 unixtime is seconds since the epoch, and offset is the time zone's
985 unixtime is seconds since the epoch, and offset is the time zone's
982 number of seconds away from UTC. if timezone is false, do not
986 number of seconds away from UTC. if timezone is false, do not
983 append time zone to string."""
987 append time zone to string."""
984 t, tz = date or makedate()
988 t, tz = date or makedate()
985 if "%1" in format or "%2" in format:
989 if "%1" in format or "%2" in format:
986 sign = (tz > 0) and "-" or "+"
990 sign = (tz > 0) and "-" or "+"
987 minutes = abs(tz) // 60
991 minutes = abs(tz) // 60
988 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
992 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
989 format = format.replace("%2", "%02d" % (minutes % 60))
993 format = format.replace("%2", "%02d" % (minutes % 60))
990 s = time.strftime(format, time.gmtime(float(t) - tz))
994 s = time.strftime(format, time.gmtime(float(t) - tz))
991 return s
995 return s
992
996
993 def shortdate(date=None):
997 def shortdate(date=None):
994 """turn (timestamp, tzoff) tuple into iso 8631 date."""
998 """turn (timestamp, tzoff) tuple into iso 8631 date."""
995 return datestr(date, format='%Y-%m-%d')
999 return datestr(date, format='%Y-%m-%d')
996
1000
997 def strdate(string, format, defaults=[]):
1001 def strdate(string, format, defaults=[]):
998 """parse a localized time string and return a (unixtime, offset) tuple.
1002 """parse a localized time string and return a (unixtime, offset) tuple.
999 if the string cannot be parsed, ValueError is raised."""
1003 if the string cannot be parsed, ValueError is raised."""
1000 def timezone(string):
1004 def timezone(string):
1001 tz = string.split()[-1]
1005 tz = string.split()[-1]
1002 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1006 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1003 sign = (tz[0] == "+") and 1 or -1
1007 sign = (tz[0] == "+") and 1 or -1
1004 hours = int(tz[1:3])
1008 hours = int(tz[1:3])
1005 minutes = int(tz[3:5])
1009 minutes = int(tz[3:5])
1006 return -sign * (hours * 60 + minutes) * 60
1010 return -sign * (hours * 60 + minutes) * 60
1007 if tz == "GMT" or tz == "UTC":
1011 if tz == "GMT" or tz == "UTC":
1008 return 0
1012 return 0
1009 return None
1013 return None
1010
1014
1011 # NOTE: unixtime = localunixtime + offset
1015 # NOTE: unixtime = localunixtime + offset
1012 offset, date = timezone(string), string
1016 offset, date = timezone(string), string
1013 if offset != None:
1017 if offset != None:
1014 date = " ".join(string.split()[:-1])
1018 date = " ".join(string.split()[:-1])
1015
1019
1016 # add missing elements from defaults
1020 # add missing elements from defaults
1017 for part in defaults:
1021 for part in defaults:
1018 found = [True for p in part if ("%"+p) in format]
1022 found = [True for p in part if ("%"+p) in format]
1019 if not found:
1023 if not found:
1020 date += "@" + defaults[part]
1024 date += "@" + defaults[part]
1021 format += "@%" + part[0]
1025 format += "@%" + part[0]
1022
1026
1023 timetuple = time.strptime(date, format)
1027 timetuple = time.strptime(date, format)
1024 localunixtime = int(calendar.timegm(timetuple))
1028 localunixtime = int(calendar.timegm(timetuple))
1025 if offset is None:
1029 if offset is None:
1026 # local timezone
1030 # local timezone
1027 unixtime = int(time.mktime(timetuple))
1031 unixtime = int(time.mktime(timetuple))
1028 offset = unixtime - localunixtime
1032 offset = unixtime - localunixtime
1029 else:
1033 else:
1030 unixtime = localunixtime + offset
1034 unixtime = localunixtime + offset
1031 return unixtime, offset
1035 return unixtime, offset
1032
1036
1033 def parsedate(date, formats=None, defaults=None):
1037 def parsedate(date, formats=None, defaults=None):
1034 """parse a localized date/time string and return a (unixtime, offset) tuple.
1038 """parse a localized date/time string and return a (unixtime, offset) tuple.
1035
1039
1036 The date may be a "unixtime offset" string or in one of the specified
1040 The date may be a "unixtime offset" string or in one of the specified
1037 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1041 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1038 """
1042 """
1039 if not date:
1043 if not date:
1040 return 0, 0
1044 return 0, 0
1041 if isinstance(date, tuple) and len(date) == 2:
1045 if isinstance(date, tuple) and len(date) == 2:
1042 return date
1046 return date
1043 if not formats:
1047 if not formats:
1044 formats = defaultdateformats
1048 formats = defaultdateformats
1045 date = date.strip()
1049 date = date.strip()
1046 try:
1050 try:
1047 when, offset = map(int, date.split(' '))
1051 when, offset = map(int, date.split(' '))
1048 except ValueError:
1052 except ValueError:
1049 # fill out defaults
1053 # fill out defaults
1050 if not defaults:
1054 if not defaults:
1051 defaults = {}
1055 defaults = {}
1052 now = makedate()
1056 now = makedate()
1053 for part in "d mb yY HI M S".split():
1057 for part in "d mb yY HI M S".split():
1054 if part not in defaults:
1058 if part not in defaults:
1055 if part[0] in "HMS":
1059 if part[0] in "HMS":
1056 defaults[part] = "00"
1060 defaults[part] = "00"
1057 else:
1061 else:
1058 defaults[part] = datestr(now, "%" + part[0])
1062 defaults[part] = datestr(now, "%" + part[0])
1059
1063
1060 for format in formats:
1064 for format in formats:
1061 try:
1065 try:
1062 when, offset = strdate(date, format, defaults)
1066 when, offset = strdate(date, format, defaults)
1063 except (ValueError, OverflowError):
1067 except (ValueError, OverflowError):
1064 pass
1068 pass
1065 else:
1069 else:
1066 break
1070 break
1067 else:
1071 else:
1068 raise Abort(_('invalid date: %r ') % date)
1072 raise Abort(_('invalid date: %r ') % date)
1069 # validate explicit (probably user-specified) date and
1073 # validate explicit (probably user-specified) date and
1070 # time zone offset. values must fit in signed 32 bits for
1074 # time zone offset. values must fit in signed 32 bits for
1071 # current 32-bit linux runtimes. timezones go from UTC-12
1075 # current 32-bit linux runtimes. timezones go from UTC-12
1072 # to UTC+14
1076 # to UTC+14
1073 if abs(when) > 0x7fffffff:
1077 if abs(when) > 0x7fffffff:
1074 raise Abort(_('date exceeds 32 bits: %d') % when)
1078 raise Abort(_('date exceeds 32 bits: %d') % when)
1075 if offset < -50400 or offset > 43200:
1079 if offset < -50400 or offset > 43200:
1076 raise Abort(_('impossible time zone offset: %d') % offset)
1080 raise Abort(_('impossible time zone offset: %d') % offset)
1077 return when, offset
1081 return when, offset
1078
1082
1079 def matchdate(date):
1083 def matchdate(date):
1080 """Return a function that matches a given date match specifier
1084 """Return a function that matches a given date match specifier
1081
1085
1082 Formats include:
1086 Formats include:
1083
1087
1084 '{date}' match a given date to the accuracy provided
1088 '{date}' match a given date to the accuracy provided
1085
1089
1086 '<{date}' on or before a given date
1090 '<{date}' on or before a given date
1087
1091
1088 '>{date}' on or after a given date
1092 '>{date}' on or after a given date
1089
1093
1090 """
1094 """
1091
1095
1092 def lower(date):
1096 def lower(date):
1093 d = dict(mb="1", d="1")
1097 d = dict(mb="1", d="1")
1094 return parsedate(date, extendeddateformats, d)[0]
1098 return parsedate(date, extendeddateformats, d)[0]
1095
1099
1096 def upper(date):
1100 def upper(date):
1097 d = dict(mb="12", HI="23", M="59", S="59")
1101 d = dict(mb="12", HI="23", M="59", S="59")
1098 for days in "31 30 29".split():
1102 for days in "31 30 29".split():
1099 try:
1103 try:
1100 d["d"] = days
1104 d["d"] = days
1101 return parsedate(date, extendeddateformats, d)[0]
1105 return parsedate(date, extendeddateformats, d)[0]
1102 except:
1106 except:
1103 pass
1107 pass
1104 d["d"] = "28"
1108 d["d"] = "28"
1105 return parsedate(date, extendeddateformats, d)[0]
1109 return parsedate(date, extendeddateformats, d)[0]
1106
1110
1107 date = date.strip()
1111 date = date.strip()
1108 if date[0] == "<":
1112 if date[0] == "<":
1109 when = upper(date[1:])
1113 when = upper(date[1:])
1110 return lambda x: x <= when
1114 return lambda x: x <= when
1111 elif date[0] == ">":
1115 elif date[0] == ">":
1112 when = lower(date[1:])
1116 when = lower(date[1:])
1113 return lambda x: x >= when
1117 return lambda x: x >= when
1114 elif date[0] == "-":
1118 elif date[0] == "-":
1115 try:
1119 try:
1116 days = int(date[1:])
1120 days = int(date[1:])
1117 except ValueError:
1121 except ValueError:
1118 raise Abort(_("invalid day spec: %s") % date[1:])
1122 raise Abort(_("invalid day spec: %s") % date[1:])
1119 when = makedate()[0] - days * 3600 * 24
1123 when = makedate()[0] - days * 3600 * 24
1120 return lambda x: x >= when
1124 return lambda x: x >= when
1121 elif " to " in date:
1125 elif " to " in date:
1122 a, b = date.split(" to ")
1126 a, b = date.split(" to ")
1123 start, stop = lower(a), upper(b)
1127 start, stop = lower(a), upper(b)
1124 return lambda x: x >= start and x <= stop
1128 return lambda x: x >= start and x <= stop
1125 else:
1129 else:
1126 start, stop = lower(date), upper(date)
1130 start, stop = lower(date), upper(date)
1127 return lambda x: x >= start and x <= stop
1131 return lambda x: x >= start and x <= stop
1128
1132
1129 def shortuser(user):
1133 def shortuser(user):
1130 """Return a short representation of a user name or email address."""
1134 """Return a short representation of a user name or email address."""
1131 f = user.find('@')
1135 f = user.find('@')
1132 if f >= 0:
1136 if f >= 0:
1133 user = user[:f]
1137 user = user[:f]
1134 f = user.find('<')
1138 f = user.find('<')
1135 if f >= 0:
1139 if f >= 0:
1136 user = user[f + 1:]
1140 user = user[f + 1:]
1137 f = user.find(' ')
1141 f = user.find(' ')
1138 if f >= 0:
1142 if f >= 0:
1139 user = user[:f]
1143 user = user[:f]
1140 f = user.find('.')
1144 f = user.find('.')
1141 if f >= 0:
1145 if f >= 0:
1142 user = user[:f]
1146 user = user[:f]
1143 return user
1147 return user
1144
1148
1145 def email(author):
1149 def email(author):
1146 '''get email of author.'''
1150 '''get email of author.'''
1147 r = author.find('>')
1151 r = author.find('>')
1148 if r == -1:
1152 if r == -1:
1149 r = None
1153 r = None
1150 return author[author.find('<') + 1:r]
1154 return author[author.find('<') + 1:r]
1151
1155
1152 def ellipsis(text, maxlength=400):
1156 def ellipsis(text, maxlength=400):
1153 """Trim string to at most maxlength (default: 400) characters."""
1157 """Trim string to at most maxlength (default: 400) characters."""
1154 if len(text) <= maxlength:
1158 if len(text) <= maxlength:
1155 return text
1159 return text
1156 else:
1160 else:
1157 return "%s..." % (text[:maxlength - 3])
1161 return "%s..." % (text[:maxlength - 3])
1158
1162
1159 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1163 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1160 '''yield every hg repository under path, recursively.'''
1164 '''yield every hg repository under path, recursively.'''
1161 def errhandler(err):
1165 def errhandler(err):
1162 if err.filename == path:
1166 if err.filename == path:
1163 raise err
1167 raise err
1164 if followsym and hasattr(os.path, 'samestat'):
1168 if followsym and hasattr(os.path, 'samestat'):
1165 def _add_dir_if_not_there(dirlst, dirname):
1169 def _add_dir_if_not_there(dirlst, dirname):
1166 match = False
1170 match = False
1167 samestat = os.path.samestat
1171 samestat = os.path.samestat
1168 dirstat = os.stat(dirname)
1172 dirstat = os.stat(dirname)
1169 for lstdirstat in dirlst:
1173 for lstdirstat in dirlst:
1170 if samestat(dirstat, lstdirstat):
1174 if samestat(dirstat, lstdirstat):
1171 match = True
1175 match = True
1172 break
1176 break
1173 if not match:
1177 if not match:
1174 dirlst.append(dirstat)
1178 dirlst.append(dirstat)
1175 return not match
1179 return not match
1176 else:
1180 else:
1177 followsym = False
1181 followsym = False
1178
1182
1179 if (seen_dirs is None) and followsym:
1183 if (seen_dirs is None) and followsym:
1180 seen_dirs = []
1184 seen_dirs = []
1181 _add_dir_if_not_there(seen_dirs, path)
1185 _add_dir_if_not_there(seen_dirs, path)
1182 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1186 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1183 dirs.sort()
1187 dirs.sort()
1184 if '.hg' in dirs:
1188 if '.hg' in dirs:
1185 yield root # found a repository
1189 yield root # found a repository
1186 qroot = os.path.join(root, '.hg', 'patches')
1190 qroot = os.path.join(root, '.hg', 'patches')
1187 if os.path.isdir(os.path.join(qroot, '.hg')):
1191 if os.path.isdir(os.path.join(qroot, '.hg')):
1188 yield qroot # we have a patch queue repo here
1192 yield qroot # we have a patch queue repo here
1189 if recurse:
1193 if recurse:
1190 # avoid recursing inside the .hg directory
1194 # avoid recursing inside the .hg directory
1191 dirs.remove('.hg')
1195 dirs.remove('.hg')
1192 else:
1196 else:
1193 dirs[:] = [] # don't descend further
1197 dirs[:] = [] # don't descend further
1194 elif followsym:
1198 elif followsym:
1195 newdirs = []
1199 newdirs = []
1196 for d in dirs:
1200 for d in dirs:
1197 fname = os.path.join(root, d)
1201 fname = os.path.join(root, d)
1198 if _add_dir_if_not_there(seen_dirs, fname):
1202 if _add_dir_if_not_there(seen_dirs, fname):
1199 if os.path.islink(fname):
1203 if os.path.islink(fname):
1200 for hgname in walkrepos(fname, True, seen_dirs):
1204 for hgname in walkrepos(fname, True, seen_dirs):
1201 yield hgname
1205 yield hgname
1202 else:
1206 else:
1203 newdirs.append(d)
1207 newdirs.append(d)
1204 dirs[:] = newdirs
1208 dirs[:] = newdirs
1205
1209
1206 _rcpath = None
1210 _rcpath = None
1207
1211
1208 def os_rcpath():
1212 def os_rcpath():
1209 '''return default os-specific hgrc search path'''
1213 '''return default os-specific hgrc search path'''
1210 path = system_rcpath()
1214 path = system_rcpath()
1211 path.extend(user_rcpath())
1215 path.extend(user_rcpath())
1212 path = [os.path.normpath(f) for f in path]
1216 path = [os.path.normpath(f) for f in path]
1213 return path
1217 return path
1214
1218
1215 def rcpath():
1219 def rcpath():
1216 '''return hgrc search path. if env var HGRCPATH is set, use it.
1220 '''return hgrc search path. if env var HGRCPATH is set, use it.
1217 for each item in path, if directory, use files ending in .rc,
1221 for each item in path, if directory, use files ending in .rc,
1218 else use item.
1222 else use item.
1219 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1223 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1220 if no HGRCPATH, use default os-specific path.'''
1224 if no HGRCPATH, use default os-specific path.'''
1221 global _rcpath
1225 global _rcpath
1222 if _rcpath is None:
1226 if _rcpath is None:
1223 if 'HGRCPATH' in os.environ:
1227 if 'HGRCPATH' in os.environ:
1224 _rcpath = []
1228 _rcpath = []
1225 for p in os.environ['HGRCPATH'].split(os.pathsep):
1229 for p in os.environ['HGRCPATH'].split(os.pathsep):
1226 if not p:
1230 if not p:
1227 continue
1231 continue
1228 p = expandpath(p)
1232 p = expandpath(p)
1229 if os.path.isdir(p):
1233 if os.path.isdir(p):
1230 for f, kind in osutil.listdir(p):
1234 for f, kind in osutil.listdir(p):
1231 if f.endswith('.rc'):
1235 if f.endswith('.rc'):
1232 _rcpath.append(os.path.join(p, f))
1236 _rcpath.append(os.path.join(p, f))
1233 else:
1237 else:
1234 _rcpath.append(p)
1238 _rcpath.append(p)
1235 else:
1239 else:
1236 _rcpath = os_rcpath()
1240 _rcpath = os_rcpath()
1237 return _rcpath
1241 return _rcpath
1238
1242
1239 def bytecount(nbytes):
1243 def bytecount(nbytes):
1240 '''return byte count formatted as readable string, with units'''
1244 '''return byte count formatted as readable string, with units'''
1241
1245
1242 units = (
1246 units = (
1243 (100, 1 << 30, _('%.0f GB')),
1247 (100, 1 << 30, _('%.0f GB')),
1244 (10, 1 << 30, _('%.1f GB')),
1248 (10, 1 << 30, _('%.1f GB')),
1245 (1, 1 << 30, _('%.2f GB')),
1249 (1, 1 << 30, _('%.2f GB')),
1246 (100, 1 << 20, _('%.0f MB')),
1250 (100, 1 << 20, _('%.0f MB')),
1247 (10, 1 << 20, _('%.1f MB')),
1251 (10, 1 << 20, _('%.1f MB')),
1248 (1, 1 << 20, _('%.2f MB')),
1252 (1, 1 << 20, _('%.2f MB')),
1249 (100, 1 << 10, _('%.0f KB')),
1253 (100, 1 << 10, _('%.0f KB')),
1250 (10, 1 << 10, _('%.1f KB')),
1254 (10, 1 << 10, _('%.1f KB')),
1251 (1, 1 << 10, _('%.2f KB')),
1255 (1, 1 << 10, _('%.2f KB')),
1252 (1, 1, _('%.0f bytes')),
1256 (1, 1, _('%.0f bytes')),
1253 )
1257 )
1254
1258
1255 for multiplier, divisor, format in units:
1259 for multiplier, divisor, format in units:
1256 if nbytes >= divisor * multiplier:
1260 if nbytes >= divisor * multiplier:
1257 return format % (nbytes / float(divisor))
1261 return format % (nbytes / float(divisor))
1258 return units[-1][2] % nbytes
1262 return units[-1][2] % nbytes
1259
1263
1260 def drop_scheme(scheme, path):
1264 def drop_scheme(scheme, path):
1261 sc = scheme + ':'
1265 sc = scheme + ':'
1262 if path.startswith(sc):
1266 if path.startswith(sc):
1263 path = path[len(sc):]
1267 path = path[len(sc):]
1264 if path.startswith('//'):
1268 if path.startswith('//'):
1265 if scheme == 'file':
1269 if scheme == 'file':
1266 i = path.find('/', 2)
1270 i = path.find('/', 2)
1267 if i == -1:
1271 if i == -1:
1268 return ''
1272 return ''
1269 # On Windows, absolute paths are rooted at the current drive
1273 # On Windows, absolute paths are rooted at the current drive
1270 # root. On POSIX they are rooted at the file system root.
1274 # root. On POSIX they are rooted at the file system root.
1271 if os.name == 'nt':
1275 if os.name == 'nt':
1272 droot = os.path.splitdrive(os.getcwd())[0] + '/'
1276 droot = os.path.splitdrive(os.getcwd())[0] + '/'
1273 path = os.path.join(droot, path[i + 1:])
1277 path = os.path.join(droot, path[i + 1:])
1274 else:
1278 else:
1275 path = path[i:]
1279 path = path[i:]
1276 else:
1280 else:
1277 path = path[2:]
1281 path = path[2:]
1278 return path
1282 return path
1279
1283
1280 def uirepr(s):
1284 def uirepr(s):
1281 # Avoid double backslash in Windows path repr()
1285 # Avoid double backslash in Windows path repr()
1282 return repr(s).replace('\\\\', '\\')
1286 return repr(s).replace('\\\\', '\\')
1283
1287
1284 #### naming convention of below implementation follows 'textwrap' module
1288 #### naming convention of below implementation follows 'textwrap' module
1285
1289
1286 class MBTextWrapper(textwrap.TextWrapper):
1290 class MBTextWrapper(textwrap.TextWrapper):
1287 def __init__(self, **kwargs):
1291 def __init__(self, **kwargs):
1288 textwrap.TextWrapper.__init__(self, **kwargs)
1292 textwrap.TextWrapper.__init__(self, **kwargs)
1289
1293
1290 def _cutdown(self, str, space_left):
1294 def _cutdown(self, str, space_left):
1291 l = 0
1295 l = 0
1292 ucstr = unicode(str, encoding.encoding)
1296 ucstr = unicode(str, encoding.encoding)
1293 w = unicodedata.east_asian_width
1297 w = unicodedata.east_asian_width
1294 for i in xrange(len(ucstr)):
1298 for i in xrange(len(ucstr)):
1295 l += w(ucstr[i]) in 'WFA' and 2 or 1
1299 l += w(ucstr[i]) in 'WFA' and 2 or 1
1296 if space_left < l:
1300 if space_left < l:
1297 return (ucstr[:i].encode(encoding.encoding),
1301 return (ucstr[:i].encode(encoding.encoding),
1298 ucstr[i:].encode(encoding.encoding))
1302 ucstr[i:].encode(encoding.encoding))
1299 return str, ''
1303 return str, ''
1300
1304
1301 # ----------------------------------------
1305 # ----------------------------------------
1302 # overriding of base class
1306 # overriding of base class
1303
1307
1304 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1308 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1305 space_left = max(width - cur_len, 1)
1309 space_left = max(width - cur_len, 1)
1306
1310
1307 if self.break_long_words:
1311 if self.break_long_words:
1308 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1312 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1309 cur_line.append(cut)
1313 cur_line.append(cut)
1310 reversed_chunks[-1] = res
1314 reversed_chunks[-1] = res
1311 elif not cur_line:
1315 elif not cur_line:
1312 cur_line.append(reversed_chunks.pop())
1316 cur_line.append(reversed_chunks.pop())
1313
1317
1314 #### naming convention of above implementation follows 'textwrap' module
1318 #### naming convention of above implementation follows 'textwrap' module
1315
1319
1316 def wrap(line, width=None, initindent='', hangindent=''):
1320 def wrap(line, width=None, initindent='', hangindent=''):
1317 if width is None:
1321 if width is None:
1318 width = termwidth() - 2
1322 width = termwidth() - 2
1319 maxindent = max(len(hangindent), len(initindent))
1323 maxindent = max(len(hangindent), len(initindent))
1320 if width <= maxindent:
1324 if width <= maxindent:
1321 # adjust for weird terminal size
1325 # adjust for weird terminal size
1322 width = max(78, maxindent + 1)
1326 width = max(78, maxindent + 1)
1323 wrapper = MBTextWrapper(width=width,
1327 wrapper = MBTextWrapper(width=width,
1324 initial_indent=initindent,
1328 initial_indent=initindent,
1325 subsequent_indent=hangindent)
1329 subsequent_indent=hangindent)
1326 return wrapper.fill(line)
1330 return wrapper.fill(line)
1327
1331
1328 def iterlines(iterator):
1332 def iterlines(iterator):
1329 for chunk in iterator:
1333 for chunk in iterator:
1330 for line in chunk.splitlines():
1334 for line in chunk.splitlines():
1331 yield line
1335 yield line
1332
1336
1333 def expandpath(path):
1337 def expandpath(path):
1334 return os.path.expanduser(os.path.expandvars(path))
1338 return os.path.expanduser(os.path.expandvars(path))
1335
1339
1336 def hgcmd():
1340 def hgcmd():
1337 """Return the command used to execute current hg
1341 """Return the command used to execute current hg
1338
1342
1339 This is different from hgexecutable() because on Windows we want
1343 This is different from hgexecutable() because on Windows we want
1340 to avoid things opening new shell windows like batch files, so we
1344 to avoid things opening new shell windows like batch files, so we
1341 get either the python call or current executable.
1345 get either the python call or current executable.
1342 """
1346 """
1343 if main_is_frozen():
1347 if main_is_frozen():
1344 return [sys.executable]
1348 return [sys.executable]
1345 return gethgcmd()
1349 return gethgcmd()
1346
1350
1347 def rundetached(args, condfn):
1351 def rundetached(args, condfn):
1348 """Execute the argument list in a detached process.
1352 """Execute the argument list in a detached process.
1349
1353
1350 condfn is a callable which is called repeatedly and should return
1354 condfn is a callable which is called repeatedly and should return
1351 True once the child process is known to have started successfully.
1355 True once the child process is known to have started successfully.
1352 At this point, the child process PID is returned. If the child
1356 At this point, the child process PID is returned. If the child
1353 process fails to start or finishes before condfn() evaluates to
1357 process fails to start or finishes before condfn() evaluates to
1354 True, return -1.
1358 True, return -1.
1355 """
1359 """
1356 # Windows case is easier because the child process is either
1360 # Windows case is easier because the child process is either
1357 # successfully starting and validating the condition or exiting
1361 # successfully starting and validating the condition or exiting
1358 # on failure. We just poll on its PID. On Unix, if the child
1362 # on failure. We just poll on its PID. On Unix, if the child
1359 # process fails to start, it will be left in a zombie state until
1363 # process fails to start, it will be left in a zombie state until
1360 # the parent wait on it, which we cannot do since we expect a long
1364 # the parent wait on it, which we cannot do since we expect a long
1361 # running process on success. Instead we listen for SIGCHLD telling
1365 # running process on success. Instead we listen for SIGCHLD telling
1362 # us our child process terminated.
1366 # us our child process terminated.
1363 terminated = set()
1367 terminated = set()
1364 def handler(signum, frame):
1368 def handler(signum, frame):
1365 terminated.add(os.wait())
1369 terminated.add(os.wait())
1366 prevhandler = None
1370 prevhandler = None
1367 if hasattr(signal, 'SIGCHLD'):
1371 if hasattr(signal, 'SIGCHLD'):
1368 prevhandler = signal.signal(signal.SIGCHLD, handler)
1372 prevhandler = signal.signal(signal.SIGCHLD, handler)
1369 try:
1373 try:
1370 pid = spawndetached(args)
1374 pid = spawndetached(args)
1371 while not condfn():
1375 while not condfn():
1372 if ((pid in terminated or not testpid(pid))
1376 if ((pid in terminated or not testpid(pid))
1373 and not condfn()):
1377 and not condfn()):
1374 return -1
1378 return -1
1375 time.sleep(0.1)
1379 time.sleep(0.1)
1376 return pid
1380 return pid
1377 finally:
1381 finally:
1378 if prevhandler is not None:
1382 if prevhandler is not None:
1379 signal.signal(signal.SIGCHLD, prevhandler)
1383 signal.signal(signal.SIGCHLD, prevhandler)
1380
1384
1381 try:
1385 try:
1382 any, all = any, all
1386 any, all = any, all
1383 except NameError:
1387 except NameError:
1384 def any(iterable):
1388 def any(iterable):
1385 for i in iterable:
1389 for i in iterable:
1386 if i:
1390 if i:
1387 return True
1391 return True
1388 return False
1392 return False
1389
1393
1390 def all(iterable):
1394 def all(iterable):
1391 for i in iterable:
1395 for i in iterable:
1392 if not i:
1396 if not i:
1393 return False
1397 return False
1394 return True
1398 return True
1395
1399
1396 def termwidth():
1400 def termwidth():
1397 if 'COLUMNS' in os.environ:
1401 if 'COLUMNS' in os.environ:
1398 try:
1402 try:
1399 return int(os.environ['COLUMNS'])
1403 return int(os.environ['COLUMNS'])
1400 except ValueError:
1404 except ValueError:
1401 pass
1405 pass
1402 return termwidth_()
1406 return termwidth_()
1403
1407
1404 def interpolate(prefix, mapping, s, fn=None):
1408 def interpolate(prefix, mapping, s, fn=None):
1405 """Return the result of interpolating items in the mapping into string s.
1409 """Return the result of interpolating items in the mapping into string s.
1406
1410
1407 prefix is a single character string, or a two character string with
1411 prefix is a single character string, or a two character string with
1408 a backslash as the first character if the prefix needs to be escaped in
1412 a backslash as the first character if the prefix needs to be escaped in
1409 a regular expression.
1413 a regular expression.
1410
1414
1411 fn is an optional function that will be applied to the replacement text
1415 fn is an optional function that will be applied to the replacement text
1412 just before replacement.
1416 just before replacement.
1413 """
1417 """
1414 fn = fn or (lambda s: s)
1418 fn = fn or (lambda s: s)
1415 r = re.compile(r'%s(%s)' % (prefix, '|'.join(mapping.keys())))
1419 r = re.compile(r'%s(%s)' % (prefix, '|'.join(mapping.keys())))
1416 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1420 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1417
1421
1418 def getport(port):
1422 def getport(port):
1419 """Return the port for a given network service.
1423 """Return the port for a given network service.
1420
1424
1421 If port is an integer, it's returned as is. If it's a string, it's
1425 If port is an integer, it's returned as is. If it's a string, it's
1422 looked up using socket.getservbyname(). If there's no matching
1426 looked up using socket.getservbyname(). If there's no matching
1423 service, util.Abort is raised.
1427 service, util.Abort is raised.
1424 """
1428 """
1425 try:
1429 try:
1426 return int(port)
1430 return int(port)
1427 except ValueError:
1431 except ValueError:
1428 pass
1432 pass
1429
1433
1430 try:
1434 try:
1431 return socket.getservbyname(port)
1435 return socket.getservbyname(port)
1432 except socket.error:
1436 except socket.error:
1433 raise Abort(_("no port number associated with service '%s'") % port)
1437 raise Abort(_("no port number associated with service '%s'") % port)
General Comments 0
You need to be logged in to leave comments. Login now