##// END OF EJS Templates
Merge with crew-stable
Patrick Mezard -
r8256:e68e149f merge default
parent child Browse files
Show More
@@ -1,1478 +1,1493 b''
1 # util.py - Mercurial utility functions and platform specfic implementations
1 # util.py - Mercurial utility functions and platform specfic implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2, incorporated herein by reference.
8 # GNU General Public License version 2, incorporated herein by reference.
9
9
10 """Mercurial utility functions and platform specfic implementations.
10 """Mercurial utility functions and platform specfic implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from i18n import _
16 from i18n import _
17 import cStringIO, errno, re, shutil, sys, tempfile, traceback, error
17 import cStringIO, errno, re, shutil, sys, tempfile, traceback, error
18 import os, stat, threading, time, calendar, glob, osutil
18 import os, stat, threading, time, calendar, glob, osutil
19 import imp
19 import imp
20
20
21 # Python compatibility
21 # Python compatibility
22
22
23 _md5 = None
23 _md5 = None
24 def md5(s):
24 def md5(s):
25 global _md5
25 global _md5
26 if _md5 is None:
26 if _md5 is None:
27 try:
27 try:
28 import hashlib
28 import hashlib
29 _md5 = hashlib.md5
29 _md5 = hashlib.md5
30 except ImportError:
30 except ImportError:
31 import md5
31 import md5
32 _md5 = md5.md5
32 _md5 = md5.md5
33 return _md5(s)
33 return _md5(s)
34
34
35 _sha1 = None
35 _sha1 = None
36 def sha1(s):
36 def sha1(s):
37 global _sha1
37 global _sha1
38 if _sha1 is None:
38 if _sha1 is None:
39 try:
39 try:
40 import hashlib
40 import hashlib
41 _sha1 = hashlib.sha1
41 _sha1 = hashlib.sha1
42 except ImportError:
42 except ImportError:
43 import sha
43 import sha
44 _sha1 = sha.sha
44 _sha1 = sha.sha
45 return _sha1(s)
45 return _sha1(s)
46
46
47 try:
47 try:
48 import subprocess
48 import subprocess
49 subprocess.Popen # trigger ImportError early
49 subprocess.Popen # trigger ImportError early
50 closefds = os.name == 'posix'
50 closefds = os.name == 'posix'
51 def popen2(cmd, mode='t', bufsize=-1):
51 def popen2(cmd, mode='t', bufsize=-1):
52 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
52 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
53 close_fds=closefds,
53 close_fds=closefds,
54 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
54 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
55 return p.stdin, p.stdout
55 return p.stdin, p.stdout
56 def popen3(cmd, mode='t', bufsize=-1):
56 def popen3(cmd, mode='t', bufsize=-1):
57 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
57 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
58 close_fds=closefds,
58 close_fds=closefds,
59 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
59 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
60 stderr=subprocess.PIPE)
60 stderr=subprocess.PIPE)
61 return p.stdin, p.stdout, p.stderr
61 return p.stdin, p.stdout, p.stderr
62 def Popen3(cmd, capturestderr=False, bufsize=-1):
62 def Popen3(cmd, capturestderr=False, bufsize=-1):
63 stderr = capturestderr and subprocess.PIPE or None
63 stderr = capturestderr and subprocess.PIPE or None
64 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
64 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
65 close_fds=closefds,
65 close_fds=closefds,
66 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
66 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
67 stderr=stderr)
67 stderr=stderr)
68 p.fromchild = p.stdout
68 p.fromchild = p.stdout
69 p.tochild = p.stdin
69 p.tochild = p.stdin
70 p.childerr = p.stderr
70 p.childerr = p.stderr
71 return p
71 return p
72 except ImportError:
72 except ImportError:
73 subprocess = None
73 subprocess = None
74 from popen2 import Popen3
74 from popen2 import Popen3
75 popen2 = os.popen2
75 popen2 = os.popen2
76 popen3 = os.popen3
76 popen3 = os.popen3
77
77
78
78
79 def version():
79 def version():
80 """Return version information if available."""
80 """Return version information if available."""
81 try:
81 try:
82 import __version__
82 import __version__
83 return __version__.version
83 return __version__.version
84 except ImportError:
84 except ImportError:
85 return 'unknown'
85 return 'unknown'
86
86
87 # used by parsedate
87 # used by parsedate
88 defaultdateformats = (
88 defaultdateformats = (
89 '%Y-%m-%d %H:%M:%S',
89 '%Y-%m-%d %H:%M:%S',
90 '%Y-%m-%d %I:%M:%S%p',
90 '%Y-%m-%d %I:%M:%S%p',
91 '%Y-%m-%d %H:%M',
91 '%Y-%m-%d %H:%M',
92 '%Y-%m-%d %I:%M%p',
92 '%Y-%m-%d %I:%M%p',
93 '%Y-%m-%d',
93 '%Y-%m-%d',
94 '%m-%d',
94 '%m-%d',
95 '%m/%d',
95 '%m/%d',
96 '%m/%d/%y',
96 '%m/%d/%y',
97 '%m/%d/%Y',
97 '%m/%d/%Y',
98 '%a %b %d %H:%M:%S %Y',
98 '%a %b %d %H:%M:%S %Y',
99 '%a %b %d %I:%M:%S%p %Y',
99 '%a %b %d %I:%M:%S%p %Y',
100 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
100 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
101 '%b %d %H:%M:%S %Y',
101 '%b %d %H:%M:%S %Y',
102 '%b %d %I:%M:%S%p %Y',
102 '%b %d %I:%M:%S%p %Y',
103 '%b %d %H:%M:%S',
103 '%b %d %H:%M:%S',
104 '%b %d %I:%M:%S%p',
104 '%b %d %I:%M:%S%p',
105 '%b %d %H:%M',
105 '%b %d %H:%M',
106 '%b %d %I:%M%p',
106 '%b %d %I:%M%p',
107 '%b %d %Y',
107 '%b %d %Y',
108 '%b %d',
108 '%b %d',
109 '%H:%M:%S',
109 '%H:%M:%S',
110 '%I:%M:%SP',
110 '%I:%M:%SP',
111 '%H:%M',
111 '%H:%M',
112 '%I:%M%p',
112 '%I:%M%p',
113 )
113 )
114
114
115 extendeddateformats = defaultdateformats + (
115 extendeddateformats = defaultdateformats + (
116 "%Y",
116 "%Y",
117 "%Y-%m",
117 "%Y-%m",
118 "%b",
118 "%b",
119 "%b %Y",
119 "%b %Y",
120 )
120 )
121
121
122 def cachefunc(func):
122 def cachefunc(func):
123 '''cache the result of function calls'''
123 '''cache the result of function calls'''
124 # XXX doesn't handle keywords args
124 # XXX doesn't handle keywords args
125 cache = {}
125 cache = {}
126 if func.func_code.co_argcount == 1:
126 if func.func_code.co_argcount == 1:
127 # we gain a small amount of time because
127 # we gain a small amount of time because
128 # we don't need to pack/unpack the list
128 # we don't need to pack/unpack the list
129 def f(arg):
129 def f(arg):
130 if arg not in cache:
130 if arg not in cache:
131 cache[arg] = func(arg)
131 cache[arg] = func(arg)
132 return cache[arg]
132 return cache[arg]
133 else:
133 else:
134 def f(*args):
134 def f(*args):
135 if args not in cache:
135 if args not in cache:
136 cache[args] = func(*args)
136 cache[args] = func(*args)
137 return cache[args]
137 return cache[args]
138
138
139 return f
139 return f
140
140
141 class propertycache(object):
141 class propertycache(object):
142 def __init__(self, func):
142 def __init__(self, func):
143 self.func = func
143 self.func = func
144 self.name = func.__name__
144 self.name = func.__name__
145 def __get__(self, obj, type=None):
145 def __get__(self, obj, type=None):
146 result = self.func(obj)
146 result = self.func(obj)
147 setattr(obj, self.name, result)
147 setattr(obj, self.name, result)
148 return result
148 return result
149
149
150 def pipefilter(s, cmd):
150 def pipefilter(s, cmd):
151 '''filter string S through command CMD, returning its output'''
151 '''filter string S through command CMD, returning its output'''
152 (pin, pout) = popen2(cmd, 'b')
152 (pin, pout) = popen2(cmd, 'b')
153 def writer():
153 def writer():
154 try:
154 try:
155 pin.write(s)
155 pin.write(s)
156 pin.close()
156 pin.close()
157 except IOError, inst:
157 except IOError, inst:
158 if inst.errno != errno.EPIPE:
158 if inst.errno != errno.EPIPE:
159 raise
159 raise
160
160
161 # we should use select instead on UNIX, but this will work on most
161 # we should use select instead on UNIX, but this will work on most
162 # systems, including Windows
162 # systems, including Windows
163 w = threading.Thread(target=writer)
163 w = threading.Thread(target=writer)
164 w.start()
164 w.start()
165 f = pout.read()
165 f = pout.read()
166 pout.close()
166 pout.close()
167 w.join()
167 w.join()
168 return f
168 return f
169
169
170 def tempfilter(s, cmd):
170 def tempfilter(s, cmd):
171 '''filter string S through a pair of temporary files with CMD.
171 '''filter string S through a pair of temporary files with CMD.
172 CMD is used as a template to create the real command to be run,
172 CMD is used as a template to create the real command to be run,
173 with the strings INFILE and OUTFILE replaced by the real names of
173 with the strings INFILE and OUTFILE replaced by the real names of
174 the temporary files generated.'''
174 the temporary files generated.'''
175 inname, outname = None, None
175 inname, outname = None, None
176 try:
176 try:
177 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
177 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
178 fp = os.fdopen(infd, 'wb')
178 fp = os.fdopen(infd, 'wb')
179 fp.write(s)
179 fp.write(s)
180 fp.close()
180 fp.close()
181 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
181 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
182 os.close(outfd)
182 os.close(outfd)
183 cmd = cmd.replace('INFILE', inname)
183 cmd = cmd.replace('INFILE', inname)
184 cmd = cmd.replace('OUTFILE', outname)
184 cmd = cmd.replace('OUTFILE', outname)
185 code = os.system(cmd)
185 code = os.system(cmd)
186 if sys.platform == 'OpenVMS' and code & 1:
186 if sys.platform == 'OpenVMS' and code & 1:
187 code = 0
187 code = 0
188 if code: raise Abort(_("command '%s' failed: %s") %
188 if code: raise Abort(_("command '%s' failed: %s") %
189 (cmd, explain_exit(code)))
189 (cmd, explain_exit(code)))
190 return open(outname, 'rb').read()
190 return open(outname, 'rb').read()
191 finally:
191 finally:
192 try:
192 try:
193 if inname: os.unlink(inname)
193 if inname: os.unlink(inname)
194 except: pass
194 except: pass
195 try:
195 try:
196 if outname: os.unlink(outname)
196 if outname: os.unlink(outname)
197 except: pass
197 except: pass
198
198
199 filtertable = {
199 filtertable = {
200 'tempfile:': tempfilter,
200 'tempfile:': tempfilter,
201 'pipe:': pipefilter,
201 'pipe:': pipefilter,
202 }
202 }
203
203
204 def filter(s, cmd):
204 def filter(s, cmd):
205 "filter a string through a command that transforms its input to its output"
205 "filter a string through a command that transforms its input to its output"
206 for name, fn in filtertable.iteritems():
206 for name, fn in filtertable.iteritems():
207 if cmd.startswith(name):
207 if cmd.startswith(name):
208 return fn(s, cmd[len(name):].lstrip())
208 return fn(s, cmd[len(name):].lstrip())
209 return pipefilter(s, cmd)
209 return pipefilter(s, cmd)
210
210
211 def binary(s):
211 def binary(s):
212 """return true if a string is binary data"""
212 """return true if a string is binary data"""
213 return bool(s and '\0' in s)
213 return bool(s and '\0' in s)
214
214
215 def increasingchunks(source, min=1024, max=65536):
215 def increasingchunks(source, min=1024, max=65536):
216 '''return no less than min bytes per chunk while data remains,
216 '''return no less than min bytes per chunk while data remains,
217 doubling min after each chunk until it reaches max'''
217 doubling min after each chunk until it reaches max'''
218 def log2(x):
218 def log2(x):
219 if not x:
219 if not x:
220 return 0
220 return 0
221 i = 0
221 i = 0
222 while x:
222 while x:
223 x >>= 1
223 x >>= 1
224 i += 1
224 i += 1
225 return i - 1
225 return i - 1
226
226
227 buf = []
227 buf = []
228 blen = 0
228 blen = 0
229 for chunk in source:
229 for chunk in source:
230 buf.append(chunk)
230 buf.append(chunk)
231 blen += len(chunk)
231 blen += len(chunk)
232 if blen >= min:
232 if blen >= min:
233 if min < max:
233 if min < max:
234 min = min << 1
234 min = min << 1
235 nmin = 1 << log2(blen)
235 nmin = 1 << log2(blen)
236 if nmin > min:
236 if nmin > min:
237 min = nmin
237 min = nmin
238 if min > max:
238 if min > max:
239 min = max
239 min = max
240 yield ''.join(buf)
240 yield ''.join(buf)
241 blen = 0
241 blen = 0
242 buf = []
242 buf = []
243 if buf:
243 if buf:
244 yield ''.join(buf)
244 yield ''.join(buf)
245
245
246 Abort = error.Abort
246 Abort = error.Abort
247
247
248 def always(fn): return True
248 def always(fn): return True
249 def never(fn): return False
249 def never(fn): return False
250
250
251 def patkind(name, default):
251 def patkind(name, default):
252 """Split a string into an optional pattern kind prefix and the
252 """Split a string into an optional pattern kind prefix and the
253 actual pattern."""
253 actual pattern."""
254 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
254 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
255 if name.startswith(prefix + ':'): return name.split(':', 1)
255 if name.startswith(prefix + ':'): return name.split(':', 1)
256 return default, name
256 return default, name
257
257
258 def globre(pat, head='^', tail='$'):
258 def globre(pat, head='^', tail='$'):
259 "convert a glob pattern into a regexp"
259 "convert a glob pattern into a regexp"
260 i, n = 0, len(pat)
260 i, n = 0, len(pat)
261 res = ''
261 res = ''
262 group = 0
262 group = 0
263 def peek(): return i < n and pat[i]
263 def peek(): return i < n and pat[i]
264 while i < n:
264 while i < n:
265 c = pat[i]
265 c = pat[i]
266 i = i+1
266 i = i+1
267 if c == '*':
267 if c == '*':
268 if peek() == '*':
268 if peek() == '*':
269 i += 1
269 i += 1
270 res += '.*'
270 res += '.*'
271 else:
271 else:
272 res += '[^/]*'
272 res += '[^/]*'
273 elif c == '?':
273 elif c == '?':
274 res += '.'
274 res += '.'
275 elif c == '[':
275 elif c == '[':
276 j = i
276 j = i
277 if j < n and pat[j] in '!]':
277 if j < n and pat[j] in '!]':
278 j += 1
278 j += 1
279 while j < n and pat[j] != ']':
279 while j < n and pat[j] != ']':
280 j += 1
280 j += 1
281 if j >= n:
281 if j >= n:
282 res += '\\['
282 res += '\\['
283 else:
283 else:
284 stuff = pat[i:j].replace('\\','\\\\')
284 stuff = pat[i:j].replace('\\','\\\\')
285 i = j + 1
285 i = j + 1
286 if stuff[0] == '!':
286 if stuff[0] == '!':
287 stuff = '^' + stuff[1:]
287 stuff = '^' + stuff[1:]
288 elif stuff[0] == '^':
288 elif stuff[0] == '^':
289 stuff = '\\' + stuff
289 stuff = '\\' + stuff
290 res = '%s[%s]' % (res, stuff)
290 res = '%s[%s]' % (res, stuff)
291 elif c == '{':
291 elif c == '{':
292 group += 1
292 group += 1
293 res += '(?:'
293 res += '(?:'
294 elif c == '}' and group:
294 elif c == '}' and group:
295 res += ')'
295 res += ')'
296 group -= 1
296 group -= 1
297 elif c == ',' and group:
297 elif c == ',' and group:
298 res += '|'
298 res += '|'
299 elif c == '\\':
299 elif c == '\\':
300 p = peek()
300 p = peek()
301 if p:
301 if p:
302 i += 1
302 i += 1
303 res += re.escape(p)
303 res += re.escape(p)
304 else:
304 else:
305 res += re.escape(c)
305 res += re.escape(c)
306 else:
306 else:
307 res += re.escape(c)
307 res += re.escape(c)
308 return head + res + tail
308 return head + res + tail
309
309
310 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
310 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
311
311
312 def pathto(root, n1, n2):
312 def pathto(root, n1, n2):
313 '''return the relative path from one place to another.
313 '''return the relative path from one place to another.
314 root should use os.sep to separate directories
314 root should use os.sep to separate directories
315 n1 should use os.sep to separate directories
315 n1 should use os.sep to separate directories
316 n2 should use "/" to separate directories
316 n2 should use "/" to separate directories
317 returns an os.sep-separated path.
317 returns an os.sep-separated path.
318
318
319 If n1 is a relative path, it's assumed it's
319 If n1 is a relative path, it's assumed it's
320 relative to root.
320 relative to root.
321 n2 should always be relative to root.
321 n2 should always be relative to root.
322 '''
322 '''
323 if not n1: return localpath(n2)
323 if not n1: return localpath(n2)
324 if os.path.isabs(n1):
324 if os.path.isabs(n1):
325 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
325 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
326 return os.path.join(root, localpath(n2))
326 return os.path.join(root, localpath(n2))
327 n2 = '/'.join((pconvert(root), n2))
327 n2 = '/'.join((pconvert(root), n2))
328 a, b = splitpath(n1), n2.split('/')
328 a, b = splitpath(n1), n2.split('/')
329 a.reverse()
329 a.reverse()
330 b.reverse()
330 b.reverse()
331 while a and b and a[-1] == b[-1]:
331 while a and b and a[-1] == b[-1]:
332 a.pop()
332 a.pop()
333 b.pop()
333 b.pop()
334 b.reverse()
334 b.reverse()
335 return os.sep.join((['..'] * len(a)) + b) or '.'
335 return os.sep.join((['..'] * len(a)) + b) or '.'
336
336
337 def canonpath(root, cwd, myname):
337 def canonpath(root, cwd, myname):
338 """return the canonical path of myname, given cwd and root"""
338 """return the canonical path of myname, given cwd and root"""
339 if root == os.sep:
339 if root == os.sep:
340 rootsep = os.sep
340 rootsep = os.sep
341 elif endswithsep(root):
341 elif endswithsep(root):
342 rootsep = root
342 rootsep = root
343 else:
343 else:
344 rootsep = root + os.sep
344 rootsep = root + os.sep
345 name = myname
345 name = myname
346 if not os.path.isabs(name):
346 if not os.path.isabs(name):
347 name = os.path.join(root, cwd, name)
347 name = os.path.join(root, cwd, name)
348 name = os.path.normpath(name)
348 name = os.path.normpath(name)
349 audit_path = path_auditor(root)
349 audit_path = path_auditor(root)
350 if name != rootsep and name.startswith(rootsep):
350 if name != rootsep and name.startswith(rootsep):
351 name = name[len(rootsep):]
351 name = name[len(rootsep):]
352 audit_path(name)
352 audit_path(name)
353 return pconvert(name)
353 return pconvert(name)
354 elif name == root:
354 elif name == root:
355 return ''
355 return ''
356 else:
356 else:
357 # Determine whether `name' is in the hierarchy at or beneath `root',
357 # Determine whether `name' is in the hierarchy at or beneath `root',
358 # by iterating name=dirname(name) until that causes no change (can't
358 # by iterating name=dirname(name) until that causes no change (can't
359 # check name == '/', because that doesn't work on windows). For each
359 # check name == '/', because that doesn't work on windows). For each
360 # `name', compare dev/inode numbers. If they match, the list `rel'
360 # `name', compare dev/inode numbers. If they match, the list `rel'
361 # holds the reversed list of components making up the relative file
361 # holds the reversed list of components making up the relative file
362 # name we want.
362 # name we want.
363 root_st = os.stat(root)
363 root_st = os.stat(root)
364 rel = []
364 rel = []
365 while True:
365 while True:
366 try:
366 try:
367 name_st = os.stat(name)
367 name_st = os.stat(name)
368 except OSError:
368 except OSError:
369 break
369 break
370 if samestat(name_st, root_st):
370 if samestat(name_st, root_st):
371 if not rel:
371 if not rel:
372 # name was actually the same as root (maybe a symlink)
372 # name was actually the same as root (maybe a symlink)
373 return ''
373 return ''
374 rel.reverse()
374 rel.reverse()
375 name = os.path.join(*rel)
375 name = os.path.join(*rel)
376 audit_path(name)
376 audit_path(name)
377 return pconvert(name)
377 return pconvert(name)
378 dirname, basename = os.path.split(name)
378 dirname, basename = os.path.split(name)
379 rel.append(basename)
379 rel.append(basename)
380 if dirname == name:
380 if dirname == name:
381 break
381 break
382 name = dirname
382 name = dirname
383
383
384 raise Abort('%s not under root' % myname)
384 raise Abort('%s not under root' % myname)
385
385
386 def matcher(canonroot, cwd='', names=[], inc=[], exc=[], src=None, dflt_pat='glob'):
386 def matcher(canonroot, cwd='', names=[], inc=[], exc=[], src=None, dflt_pat='glob'):
387 """build a function to match a set of file patterns
387 """build a function to match a set of file patterns
388
388
389 arguments:
389 arguments:
390 canonroot - the canonical root of the tree you're matching against
390 canonroot - the canonical root of the tree you're matching against
391 cwd - the current working directory, if relevant
391 cwd - the current working directory, if relevant
392 names - patterns to find
392 names - patterns to find
393 inc - patterns to include
393 inc - patterns to include
394 exc - patterns to exclude
394 exc - patterns to exclude
395 dflt_pat - if a pattern in names has no explicit type, assume this one
395 dflt_pat - if a pattern in names has no explicit type, assume this one
396 src - where these patterns came from (e.g. .hgignore)
396 src - where these patterns came from (e.g. .hgignore)
397
397
398 a pattern is one of:
398 a pattern is one of:
399 'glob:<glob>' - a glob relative to cwd
399 'glob:<glob>' - a glob relative to cwd
400 're:<regexp>' - a regular expression
400 're:<regexp>' - a regular expression
401 'path:<path>' - a path relative to canonroot
401 'path:<path>' - a path relative to canonroot
402 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
402 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
403 'relpath:<path>' - a path relative to cwd
403 'relpath:<path>' - a path relative to cwd
404 'relre:<regexp>' - a regexp that doesn't have to match the start of a name
404 'relre:<regexp>' - a regexp that doesn't have to match the start of a name
405 '<something>' - one of the cases above, selected by the dflt_pat argument
405 '<something>' - one of the cases above, selected by the dflt_pat argument
406
406
407 returns:
407 returns:
408 a 3-tuple containing
408 a 3-tuple containing
409 - list of roots (places where one should start a recursive walk of the fs);
409 - list of roots (places where one should start a recursive walk of the fs);
410 this often matches the explicit non-pattern names passed in, but also
410 this often matches the explicit non-pattern names passed in, but also
411 includes the initial part of glob: patterns that has no glob characters
411 includes the initial part of glob: patterns that has no glob characters
412 - a bool match(filename) function
412 - a bool match(filename) function
413 - a bool indicating if any patterns were passed in
413 - a bool indicating if any patterns were passed in
414 """
414 """
415
415
416 # a common case: no patterns at all
416 # a common case: no patterns at all
417 if not names and not inc and not exc:
417 if not names and not inc and not exc:
418 return [], always, False
418 return [], always, False
419
419
420 def contains_glob(name):
420 def contains_glob(name):
421 for c in name:
421 for c in name:
422 if c in _globchars: return True
422 if c in _globchars: return True
423 return False
423 return False
424
424
425 def regex(kind, name, tail):
425 def regex(kind, name, tail):
426 '''convert a pattern into a regular expression'''
426 '''convert a pattern into a regular expression'''
427 if not name:
427 if not name:
428 return ''
428 return ''
429 if kind == 're':
429 if kind == 're':
430 return name
430 return name
431 elif kind == 'path':
431 elif kind == 'path':
432 return '^' + re.escape(name) + '(?:/|$)'
432 return '^' + re.escape(name) + '(?:/|$)'
433 elif kind == 'relglob':
433 elif kind == 'relglob':
434 return globre(name, '(?:|.*/)', tail)
434 return globre(name, '(?:|.*/)', tail)
435 elif kind == 'relpath':
435 elif kind == 'relpath':
436 return re.escape(name) + '(?:/|$)'
436 return re.escape(name) + '(?:/|$)'
437 elif kind == 'relre':
437 elif kind == 'relre':
438 if name.startswith('^'):
438 if name.startswith('^'):
439 return name
439 return name
440 return '.*' + name
440 return '.*' + name
441 return globre(name, '', tail)
441 return globre(name, '', tail)
442
442
443 def matchfn(pats, tail):
443 def matchfn(pats, tail):
444 """build a matching function from a set of patterns"""
444 """build a matching function from a set of patterns"""
445 if not pats:
445 if not pats:
446 return
446 return
447 try:
447 try:
448 pat = '(?:%s)' % '|'.join([regex(k, p, tail) for (k, p) in pats])
448 pat = '(?:%s)' % '|'.join([regex(k, p, tail) for (k, p) in pats])
449 if len(pat) > 20000:
449 if len(pat) > 20000:
450 raise OverflowError()
450 raise OverflowError()
451 return re.compile(pat).match
451 return re.compile(pat).match
452 except OverflowError:
452 except OverflowError:
453 # We're using a Python with a tiny regex engine and we
453 # We're using a Python with a tiny regex engine and we
454 # made it explode, so we'll divide the pattern list in two
454 # made it explode, so we'll divide the pattern list in two
455 # until it works
455 # until it works
456 l = len(pats)
456 l = len(pats)
457 if l < 2:
457 if l < 2:
458 raise
458 raise
459 a, b = matchfn(pats[:l//2], tail), matchfn(pats[l//2:], tail)
459 a, b = matchfn(pats[:l//2], tail), matchfn(pats[l//2:], tail)
460 return lambda s: a(s) or b(s)
460 return lambda s: a(s) or b(s)
461 except re.error:
461 except re.error:
462 for k, p in pats:
462 for k, p in pats:
463 try:
463 try:
464 re.compile('(?:%s)' % regex(k, p, tail))
464 re.compile('(?:%s)' % regex(k, p, tail))
465 except re.error:
465 except re.error:
466 if src:
466 if src:
467 raise Abort("%s: invalid pattern (%s): %s" %
467 raise Abort("%s: invalid pattern (%s): %s" %
468 (src, k, p))
468 (src, k, p))
469 else:
469 else:
470 raise Abort("invalid pattern (%s): %s" % (k, p))
470 raise Abort("invalid pattern (%s): %s" % (k, p))
471 raise Abort("invalid pattern")
471 raise Abort("invalid pattern")
472
472
473 def globprefix(pat):
473 def globprefix(pat):
474 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
474 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
475 root = []
475 root = []
476 for p in pat.split('/'):
476 for p in pat.split('/'):
477 if contains_glob(p): break
477 if contains_glob(p): break
478 root.append(p)
478 root.append(p)
479 return '/'.join(root) or '.'
479 return '/'.join(root) or '.'
480
480
481 def normalizepats(names, default):
481 def normalizepats(names, default):
482 pats = []
482 pats = []
483 roots = []
483 roots = []
484 anypats = False
484 anypats = False
485 for kind, name in [patkind(p, default) for p in names]:
485 for kind, name in [patkind(p, default) for p in names]:
486 if kind in ('glob', 'relpath'):
486 if kind in ('glob', 'relpath'):
487 name = canonpath(canonroot, cwd, name)
487 name = canonpath(canonroot, cwd, name)
488 elif kind in ('relglob', 'path'):
488 elif kind in ('relglob', 'path'):
489 name = normpath(name)
489 name = normpath(name)
490
490
491 pats.append((kind, name))
491 pats.append((kind, name))
492
492
493 if kind in ('glob', 're', 'relglob', 'relre'):
493 if kind in ('glob', 're', 'relglob', 'relre'):
494 anypats = True
494 anypats = True
495
495
496 if kind == 'glob':
496 if kind == 'glob':
497 root = globprefix(name)
497 root = globprefix(name)
498 roots.append(root)
498 roots.append(root)
499 elif kind in ('relpath', 'path'):
499 elif kind in ('relpath', 'path'):
500 roots.append(name or '.')
500 roots.append(name or '.')
501 elif kind == 'relglob':
501 elif kind == 'relglob':
502 roots.append('.')
502 roots.append('.')
503 return roots, pats, anypats
503 return roots, pats, anypats
504
504
505 roots, pats, anypats = normalizepats(names, dflt_pat)
505 roots, pats, anypats = normalizepats(names, dflt_pat)
506
506
507 patmatch = matchfn(pats, '$') or always
507 patmatch = matchfn(pats, '$') or always
508 incmatch = always
508 incmatch = always
509 if inc:
509 if inc:
510 dummy, inckinds, dummy = normalizepats(inc, 'glob')
510 dummy, inckinds, dummy = normalizepats(inc, 'glob')
511 incmatch = matchfn(inckinds, '(?:/|$)')
511 incmatch = matchfn(inckinds, '(?:/|$)')
512 excmatch = never
512 excmatch = never
513 if exc:
513 if exc:
514 dummy, exckinds, dummy = normalizepats(exc, 'glob')
514 dummy, exckinds, dummy = normalizepats(exc, 'glob')
515 excmatch = matchfn(exckinds, '(?:/|$)')
515 excmatch = matchfn(exckinds, '(?:/|$)')
516
516
517 if not names and inc and not exc:
517 if not names and inc and not exc:
518 # common case: hgignore patterns
518 # common case: hgignore patterns
519 match = incmatch
519 match = incmatch
520 else:
520 else:
521 match = lambda fn: incmatch(fn) and not excmatch(fn) and patmatch(fn)
521 match = lambda fn: incmatch(fn) and not excmatch(fn) and patmatch(fn)
522
522
523 return (roots, match, (inc or exc or anypats) and True)
523 return (roots, match, (inc or exc or anypats) and True)
524
524
525 _hgexecutable = None
525 _hgexecutable = None
526
526
527 def main_is_frozen():
527 def main_is_frozen():
528 """return True if we are a frozen executable.
528 """return True if we are a frozen executable.
529
529
530 The code supports py2exe (most common, Windows only) and tools/freeze
530 The code supports py2exe (most common, Windows only) and tools/freeze
531 (portable, not much used).
531 (portable, not much used).
532 """
532 """
533 return (hasattr(sys, "frozen") or # new py2exe
533 return (hasattr(sys, "frozen") or # new py2exe
534 hasattr(sys, "importers") or # old py2exe
534 hasattr(sys, "importers") or # old py2exe
535 imp.is_frozen("__main__")) # tools/freeze
535 imp.is_frozen("__main__")) # tools/freeze
536
536
537 def hgexecutable():
537 def hgexecutable():
538 """return location of the 'hg' executable.
538 """return location of the 'hg' executable.
539
539
540 Defaults to $HG or 'hg' in the search path.
540 Defaults to $HG or 'hg' in the search path.
541 """
541 """
542 if _hgexecutable is None:
542 if _hgexecutable is None:
543 hg = os.environ.get('HG')
543 hg = os.environ.get('HG')
544 if hg:
544 if hg:
545 set_hgexecutable(hg)
545 set_hgexecutable(hg)
546 elif main_is_frozen():
546 elif main_is_frozen():
547 set_hgexecutable(sys.executable)
547 set_hgexecutable(sys.executable)
548 else:
548 else:
549 set_hgexecutable(find_exe('hg') or 'hg')
549 set_hgexecutable(find_exe('hg') or 'hg')
550 return _hgexecutable
550 return _hgexecutable
551
551
552 def set_hgexecutable(path):
552 def set_hgexecutable(path):
553 """set location of the 'hg' executable"""
553 """set location of the 'hg' executable"""
554 global _hgexecutable
554 global _hgexecutable
555 _hgexecutable = path
555 _hgexecutable = path
556
556
557 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
557 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
558 '''enhanced shell command execution.
558 '''enhanced shell command execution.
559 run with environment maybe modified, maybe in different dir.
559 run with environment maybe modified, maybe in different dir.
560
560
561 if command fails and onerr is None, return status. if ui object,
561 if command fails and onerr is None, return status. if ui object,
562 print error message and return status, else raise onerr object as
562 print error message and return status, else raise onerr object as
563 exception.'''
563 exception.'''
564 def py2shell(val):
564 def py2shell(val):
565 'convert python object into string that is useful to shell'
565 'convert python object into string that is useful to shell'
566 if val in (None, False):
566 if val in (None, False):
567 return '0'
567 return '0'
568 if val == True:
568 if val == True:
569 return '1'
569 return '1'
570 return str(val)
570 return str(val)
571 oldenv = {}
571 oldenv = {}
572 for k in environ:
572 for k in environ:
573 oldenv[k] = os.environ.get(k)
573 oldenv[k] = os.environ.get(k)
574 if cwd is not None:
574 if cwd is not None:
575 oldcwd = os.getcwd()
575 oldcwd = os.getcwd()
576 origcmd = cmd
576 origcmd = cmd
577 if os.name == 'nt':
577 if os.name == 'nt':
578 cmd = '"%s"' % cmd
578 cmd = '"%s"' % cmd
579 try:
579 try:
580 for k, v in environ.iteritems():
580 for k, v in environ.iteritems():
581 os.environ[k] = py2shell(v)
581 os.environ[k] = py2shell(v)
582 os.environ['HG'] = hgexecutable()
582 os.environ['HG'] = hgexecutable()
583 if cwd is not None and oldcwd != cwd:
583 if cwd is not None and oldcwd != cwd:
584 os.chdir(cwd)
584 os.chdir(cwd)
585 rc = os.system(cmd)
585 rc = os.system(cmd)
586 if sys.platform == 'OpenVMS' and rc & 1:
586 if sys.platform == 'OpenVMS' and rc & 1:
587 rc = 0
587 rc = 0
588 if rc and onerr:
588 if rc and onerr:
589 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
589 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
590 explain_exit(rc)[0])
590 explain_exit(rc)[0])
591 if errprefix:
591 if errprefix:
592 errmsg = '%s: %s' % (errprefix, errmsg)
592 errmsg = '%s: %s' % (errprefix, errmsg)
593 try:
593 try:
594 onerr.warn(errmsg + '\n')
594 onerr.warn(errmsg + '\n')
595 except AttributeError:
595 except AttributeError:
596 raise onerr(errmsg)
596 raise onerr(errmsg)
597 return rc
597 return rc
598 finally:
598 finally:
599 for k, v in oldenv.iteritems():
599 for k, v in oldenv.iteritems():
600 if v is None:
600 if v is None:
601 del os.environ[k]
601 del os.environ[k]
602 else:
602 else:
603 os.environ[k] = v
603 os.environ[k] = v
604 if cwd is not None and oldcwd != cwd:
604 if cwd is not None and oldcwd != cwd:
605 os.chdir(oldcwd)
605 os.chdir(oldcwd)
606
606
607 def checksignature(func):
607 def checksignature(func):
608 '''wrap a function with code to check for calling errors'''
608 '''wrap a function with code to check for calling errors'''
609 def check(*args, **kwargs):
609 def check(*args, **kwargs):
610 try:
610 try:
611 return func(*args, **kwargs)
611 return func(*args, **kwargs)
612 except TypeError:
612 except TypeError:
613 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
613 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
614 raise error.SignatureError
614 raise error.SignatureError
615 raise
615 raise
616
616
617 return check
617 return check
618
618
619 # os.path.lexists is not available on python2.3
619 # os.path.lexists is not available on python2.3
620 def lexists(filename):
620 def lexists(filename):
621 "test whether a file with this name exists. does not follow symlinks"
621 "test whether a file with this name exists. does not follow symlinks"
622 try:
622 try:
623 os.lstat(filename)
623 os.lstat(filename)
624 except:
624 except:
625 return False
625 return False
626 return True
626 return True
627
627
628 def rename(src, dst):
628 def rename(src, dst):
629 """forcibly rename a file"""
629 """forcibly rename a file"""
630 try:
630 try:
631 os.rename(src, dst)
631 os.rename(src, dst)
632 except OSError, err: # FIXME: check err (EEXIST ?)
632 except OSError, err: # FIXME: check err (EEXIST ?)
633 # on windows, rename to existing file is not allowed, so we
633
634 # must delete destination first. but if file is open, unlink
634 # On windows, rename to existing file is not allowed, so we
635 # schedules it for delete but does not delete it. rename
635 # must delete destination first. But if a file is open, unlink
636 # schedules it for delete but does not delete it. Rename
636 # happens immediately even for open files, so we rename
637 # happens immediately even for open files, so we rename
637 # destination to a temporary name, then delete that. then
638 # destination to a temporary name, then delete that. Then
638 # rename is safe to do.
639 # rename is safe to do.
639 temp = dst + "-force-rename"
640 # The temporary name is chosen at random to avoid the situation
641 # where a file is left lying around from a previous aborted run.
642 # The usual race condition this introduces can't be avoided as
643 # we need the name to rename into, and not the file itself. Due
644 # to the nature of the operation however, any races will at worst
645 # lead to the rename failing and the current operation aborting.
646
647 def tempname(prefix):
648 for tries in xrange(10):
649 temp = '%s-%08x' % (prefix, random.randint(0, 0xffffffff))
650 if not os.path.exists(temp):
651 return temp
652 raise IOError, (errno.EEXIST, "No usable temporary filename found")
653
654 temp = tempname(dst)
640 os.rename(dst, temp)
655 os.rename(dst, temp)
641 os.unlink(temp)
656 os.unlink(temp)
642 os.rename(src, dst)
657 os.rename(src, dst)
643
658
644 def unlink(f):
659 def unlink(f):
645 """unlink and remove the directory if it is empty"""
660 """unlink and remove the directory if it is empty"""
646 os.unlink(f)
661 os.unlink(f)
647 # try removing directories that might now be empty
662 # try removing directories that might now be empty
648 try:
663 try:
649 os.removedirs(os.path.dirname(f))
664 os.removedirs(os.path.dirname(f))
650 except OSError:
665 except OSError:
651 pass
666 pass
652
667
653 def copyfile(src, dest):
668 def copyfile(src, dest):
654 "copy a file, preserving mode and atime/mtime"
669 "copy a file, preserving mode and atime/mtime"
655 if os.path.islink(src):
670 if os.path.islink(src):
656 try:
671 try:
657 os.unlink(dest)
672 os.unlink(dest)
658 except:
673 except:
659 pass
674 pass
660 os.symlink(os.readlink(src), dest)
675 os.symlink(os.readlink(src), dest)
661 else:
676 else:
662 try:
677 try:
663 shutil.copyfile(src, dest)
678 shutil.copyfile(src, dest)
664 shutil.copystat(src, dest)
679 shutil.copystat(src, dest)
665 except shutil.Error, inst:
680 except shutil.Error, inst:
666 raise Abort(str(inst))
681 raise Abort(str(inst))
667
682
668 def copyfiles(src, dst, hardlink=None):
683 def copyfiles(src, dst, hardlink=None):
669 """Copy a directory tree using hardlinks if possible"""
684 """Copy a directory tree using hardlinks if possible"""
670
685
671 if hardlink is None:
686 if hardlink is None:
672 hardlink = (os.stat(src).st_dev ==
687 hardlink = (os.stat(src).st_dev ==
673 os.stat(os.path.dirname(dst)).st_dev)
688 os.stat(os.path.dirname(dst)).st_dev)
674
689
675 if os.path.isdir(src):
690 if os.path.isdir(src):
676 os.mkdir(dst)
691 os.mkdir(dst)
677 for name, kind in osutil.listdir(src):
692 for name, kind in osutil.listdir(src):
678 srcname = os.path.join(src, name)
693 srcname = os.path.join(src, name)
679 dstname = os.path.join(dst, name)
694 dstname = os.path.join(dst, name)
680 copyfiles(srcname, dstname, hardlink)
695 copyfiles(srcname, dstname, hardlink)
681 else:
696 else:
682 if hardlink:
697 if hardlink:
683 try:
698 try:
684 os_link(src, dst)
699 os_link(src, dst)
685 except (IOError, OSError):
700 except (IOError, OSError):
686 hardlink = False
701 hardlink = False
687 shutil.copy(src, dst)
702 shutil.copy(src, dst)
688 else:
703 else:
689 shutil.copy(src, dst)
704 shutil.copy(src, dst)
690
705
691 class path_auditor(object):
706 class path_auditor(object):
692 '''ensure that a filesystem path contains no banned components.
707 '''ensure that a filesystem path contains no banned components.
693 the following properties of a path are checked:
708 the following properties of a path are checked:
694
709
695 - under top-level .hg
710 - under top-level .hg
696 - starts at the root of a windows drive
711 - starts at the root of a windows drive
697 - contains ".."
712 - contains ".."
698 - traverses a symlink (e.g. a/symlink_here/b)
713 - traverses a symlink (e.g. a/symlink_here/b)
699 - inside a nested repository'''
714 - inside a nested repository'''
700
715
701 def __init__(self, root):
716 def __init__(self, root):
702 self.audited = set()
717 self.audited = set()
703 self.auditeddir = set()
718 self.auditeddir = set()
704 self.root = root
719 self.root = root
705
720
706 def __call__(self, path):
721 def __call__(self, path):
707 if path in self.audited:
722 if path in self.audited:
708 return
723 return
709 normpath = os.path.normcase(path)
724 normpath = os.path.normcase(path)
710 parts = splitpath(normpath)
725 parts = splitpath(normpath)
711 if (os.path.splitdrive(path)[0]
726 if (os.path.splitdrive(path)[0]
712 or parts[0].lower() in ('.hg', '.hg.', '')
727 or parts[0].lower() in ('.hg', '.hg.', '')
713 or os.pardir in parts):
728 or os.pardir in parts):
714 raise Abort(_("path contains illegal component: %s") % path)
729 raise Abort(_("path contains illegal component: %s") % path)
715 if '.hg' in path.lower():
730 if '.hg' in path.lower():
716 lparts = [p.lower() for p in parts]
731 lparts = [p.lower() for p in parts]
717 for p in '.hg', '.hg.':
732 for p in '.hg', '.hg.':
718 if p in lparts[1:]:
733 if p in lparts[1:]:
719 pos = lparts.index(p)
734 pos = lparts.index(p)
720 base = os.path.join(*parts[:pos])
735 base = os.path.join(*parts[:pos])
721 raise Abort(_('path %r is inside repo %r') % (path, base))
736 raise Abort(_('path %r is inside repo %r') % (path, base))
722 def check(prefix):
737 def check(prefix):
723 curpath = os.path.join(self.root, prefix)
738 curpath = os.path.join(self.root, prefix)
724 try:
739 try:
725 st = os.lstat(curpath)
740 st = os.lstat(curpath)
726 except OSError, err:
741 except OSError, err:
727 # EINVAL can be raised as invalid path syntax under win32.
742 # EINVAL can be raised as invalid path syntax under win32.
728 # They must be ignored for patterns can be checked too.
743 # They must be ignored for patterns can be checked too.
729 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
744 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
730 raise
745 raise
731 else:
746 else:
732 if stat.S_ISLNK(st.st_mode):
747 if stat.S_ISLNK(st.st_mode):
733 raise Abort(_('path %r traverses symbolic link %r') %
748 raise Abort(_('path %r traverses symbolic link %r') %
734 (path, prefix))
749 (path, prefix))
735 elif (stat.S_ISDIR(st.st_mode) and
750 elif (stat.S_ISDIR(st.st_mode) and
736 os.path.isdir(os.path.join(curpath, '.hg'))):
751 os.path.isdir(os.path.join(curpath, '.hg'))):
737 raise Abort(_('path %r is inside repo %r') %
752 raise Abort(_('path %r is inside repo %r') %
738 (path, prefix))
753 (path, prefix))
739 parts.pop()
754 parts.pop()
740 prefixes = []
755 prefixes = []
741 for n in range(len(parts)):
756 for n in range(len(parts)):
742 prefix = os.sep.join(parts)
757 prefix = os.sep.join(parts)
743 if prefix in self.auditeddir:
758 if prefix in self.auditeddir:
744 break
759 break
745 check(prefix)
760 check(prefix)
746 prefixes.append(prefix)
761 prefixes.append(prefix)
747 parts.pop()
762 parts.pop()
748
763
749 self.audited.add(path)
764 self.audited.add(path)
750 # only add prefixes to the cache after checking everything: we don't
765 # only add prefixes to the cache after checking everything: we don't
751 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
766 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
752 self.auditeddir.update(prefixes)
767 self.auditeddir.update(prefixes)
753
768
754 def nlinks(pathname):
769 def nlinks(pathname):
755 """Return number of hardlinks for the given file."""
770 """Return number of hardlinks for the given file."""
756 return os.lstat(pathname).st_nlink
771 return os.lstat(pathname).st_nlink
757
772
758 if hasattr(os, 'link'):
773 if hasattr(os, 'link'):
759 os_link = os.link
774 os_link = os.link
760 else:
775 else:
761 def os_link(src, dst):
776 def os_link(src, dst):
762 raise OSError(0, _("Hardlinks not supported"))
777 raise OSError(0, _("Hardlinks not supported"))
763
778
764 def lookup_reg(key, name=None, scope=None):
779 def lookup_reg(key, name=None, scope=None):
765 return None
780 return None
766
781
767 if os.name == 'nt':
782 if os.name == 'nt':
768 from windows import *
783 from windows import *
769 def expand_glob(pats):
784 def expand_glob(pats):
770 '''On Windows, expand the implicit globs in a list of patterns'''
785 '''On Windows, expand the implicit globs in a list of patterns'''
771 ret = []
786 ret = []
772 for p in pats:
787 for p in pats:
773 kind, name = patkind(p, None)
788 kind, name = patkind(p, None)
774 if kind is None:
789 if kind is None:
775 globbed = glob.glob(name)
790 globbed = glob.glob(name)
776 if globbed:
791 if globbed:
777 ret.extend(globbed)
792 ret.extend(globbed)
778 continue
793 continue
779 # if we couldn't expand the glob, just keep it around
794 # if we couldn't expand the glob, just keep it around
780 ret.append(p)
795 ret.append(p)
781 return ret
796 return ret
782 else:
797 else:
783 from posix import *
798 from posix import *
784
799
785 def makelock(info, pathname):
800 def makelock(info, pathname):
786 try:
801 try:
787 return os.symlink(info, pathname)
802 return os.symlink(info, pathname)
788 except OSError, why:
803 except OSError, why:
789 if why.errno == errno.EEXIST:
804 if why.errno == errno.EEXIST:
790 raise
805 raise
791 except AttributeError: # no symlink in os
806 except AttributeError: # no symlink in os
792 pass
807 pass
793
808
794 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
809 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
795 os.write(ld, info)
810 os.write(ld, info)
796 os.close(ld)
811 os.close(ld)
797
812
798 def readlock(pathname):
813 def readlock(pathname):
799 try:
814 try:
800 return os.readlink(pathname)
815 return os.readlink(pathname)
801 except OSError, why:
816 except OSError, why:
802 if why.errno not in (errno.EINVAL, errno.ENOSYS):
817 if why.errno not in (errno.EINVAL, errno.ENOSYS):
803 raise
818 raise
804 except AttributeError: # no symlink in os
819 except AttributeError: # no symlink in os
805 pass
820 pass
806 return posixfile(pathname).read()
821 return posixfile(pathname).read()
807
822
808 def fstat(fp):
823 def fstat(fp):
809 '''stat file object that may not have fileno method.'''
824 '''stat file object that may not have fileno method.'''
810 try:
825 try:
811 return os.fstat(fp.fileno())
826 return os.fstat(fp.fileno())
812 except AttributeError:
827 except AttributeError:
813 return os.stat(fp.name)
828 return os.stat(fp.name)
814
829
815 # File system features
830 # File system features
816
831
817 def checkcase(path):
832 def checkcase(path):
818 """
833 """
819 Check whether the given path is on a case-sensitive filesystem
834 Check whether the given path is on a case-sensitive filesystem
820
835
821 Requires a path (like /foo/.hg) ending with a foldable final
836 Requires a path (like /foo/.hg) ending with a foldable final
822 directory component.
837 directory component.
823 """
838 """
824 s1 = os.stat(path)
839 s1 = os.stat(path)
825 d, b = os.path.split(path)
840 d, b = os.path.split(path)
826 p2 = os.path.join(d, b.upper())
841 p2 = os.path.join(d, b.upper())
827 if path == p2:
842 if path == p2:
828 p2 = os.path.join(d, b.lower())
843 p2 = os.path.join(d, b.lower())
829 try:
844 try:
830 s2 = os.stat(p2)
845 s2 = os.stat(p2)
831 if s2 == s1:
846 if s2 == s1:
832 return False
847 return False
833 return True
848 return True
834 except:
849 except:
835 return True
850 return True
836
851
837 _fspathcache = {}
852 _fspathcache = {}
838 def fspath(name, root):
853 def fspath(name, root):
839 '''Get name in the case stored in the filesystem
854 '''Get name in the case stored in the filesystem
840
855
841 The name is either relative to root, or it is an absolute path starting
856 The name is either relative to root, or it is an absolute path starting
842 with root. Note that this function is unnecessary, and should not be
857 with root. Note that this function is unnecessary, and should not be
843 called, for case-sensitive filesystems (simply because it's expensive).
858 called, for case-sensitive filesystems (simply because it's expensive).
844 '''
859 '''
845 # If name is absolute, make it relative
860 # If name is absolute, make it relative
846 if name.lower().startswith(root.lower()):
861 if name.lower().startswith(root.lower()):
847 l = len(root)
862 l = len(root)
848 if name[l] == os.sep or name[l] == os.altsep:
863 if name[l] == os.sep or name[l] == os.altsep:
849 l = l + 1
864 l = l + 1
850 name = name[l:]
865 name = name[l:]
851
866
852 if not os.path.exists(os.path.join(root, name)):
867 if not os.path.exists(os.path.join(root, name)):
853 return None
868 return None
854
869
855 seps = os.sep
870 seps = os.sep
856 if os.altsep:
871 if os.altsep:
857 seps = seps + os.altsep
872 seps = seps + os.altsep
858 # Protect backslashes. This gets silly very quickly.
873 # Protect backslashes. This gets silly very quickly.
859 seps.replace('\\','\\\\')
874 seps.replace('\\','\\\\')
860 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
875 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
861 dir = os.path.normcase(os.path.normpath(root))
876 dir = os.path.normcase(os.path.normpath(root))
862 result = []
877 result = []
863 for part, sep in pattern.findall(name):
878 for part, sep in pattern.findall(name):
864 if sep:
879 if sep:
865 result.append(sep)
880 result.append(sep)
866 continue
881 continue
867
882
868 if dir not in _fspathcache:
883 if dir not in _fspathcache:
869 _fspathcache[dir] = os.listdir(dir)
884 _fspathcache[dir] = os.listdir(dir)
870 contents = _fspathcache[dir]
885 contents = _fspathcache[dir]
871
886
872 lpart = part.lower()
887 lpart = part.lower()
873 for n in contents:
888 for n in contents:
874 if n.lower() == lpart:
889 if n.lower() == lpart:
875 result.append(n)
890 result.append(n)
876 break
891 break
877 else:
892 else:
878 # Cannot happen, as the file exists!
893 # Cannot happen, as the file exists!
879 result.append(part)
894 result.append(part)
880 dir = os.path.join(dir, lpart)
895 dir = os.path.join(dir, lpart)
881
896
882 return ''.join(result)
897 return ''.join(result)
883
898
884 def checkexec(path):
899 def checkexec(path):
885 """
900 """
886 Check whether the given path is on a filesystem with UNIX-like exec flags
901 Check whether the given path is on a filesystem with UNIX-like exec flags
887
902
888 Requires a directory (like /foo/.hg)
903 Requires a directory (like /foo/.hg)
889 """
904 """
890
905
891 # VFAT on some Linux versions can flip mode but it doesn't persist
906 # VFAT on some Linux versions can flip mode but it doesn't persist
892 # a FS remount. Frequently we can detect it if files are created
907 # a FS remount. Frequently we can detect it if files are created
893 # with exec bit on.
908 # with exec bit on.
894
909
895 try:
910 try:
896 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
911 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
897 fh, fn = tempfile.mkstemp("", "", path)
912 fh, fn = tempfile.mkstemp("", "", path)
898 try:
913 try:
899 os.close(fh)
914 os.close(fh)
900 m = os.stat(fn).st_mode & 0777
915 m = os.stat(fn).st_mode & 0777
901 new_file_has_exec = m & EXECFLAGS
916 new_file_has_exec = m & EXECFLAGS
902 os.chmod(fn, m ^ EXECFLAGS)
917 os.chmod(fn, m ^ EXECFLAGS)
903 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
918 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
904 finally:
919 finally:
905 os.unlink(fn)
920 os.unlink(fn)
906 except (IOError, OSError):
921 except (IOError, OSError):
907 # we don't care, the user probably won't be able to commit anyway
922 # we don't care, the user probably won't be able to commit anyway
908 return False
923 return False
909 return not (new_file_has_exec or exec_flags_cannot_flip)
924 return not (new_file_has_exec or exec_flags_cannot_flip)
910
925
911 def checklink(path):
926 def checklink(path):
912 """check whether the given path is on a symlink-capable filesystem"""
927 """check whether the given path is on a symlink-capable filesystem"""
913 # mktemp is not racy because symlink creation will fail if the
928 # mktemp is not racy because symlink creation will fail if the
914 # file already exists
929 # file already exists
915 name = tempfile.mktemp(dir=path)
930 name = tempfile.mktemp(dir=path)
916 try:
931 try:
917 os.symlink(".", name)
932 os.symlink(".", name)
918 os.unlink(name)
933 os.unlink(name)
919 return True
934 return True
920 except (OSError, AttributeError):
935 except (OSError, AttributeError):
921 return False
936 return False
922
937
923 def needbinarypatch():
938 def needbinarypatch():
924 """return True if patches should be applied in binary mode by default."""
939 """return True if patches should be applied in binary mode by default."""
925 return os.name == 'nt'
940 return os.name == 'nt'
926
941
927 def endswithsep(path):
942 def endswithsep(path):
928 '''Check path ends with os.sep or os.altsep.'''
943 '''Check path ends with os.sep or os.altsep.'''
929 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
944 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
930
945
931 def splitpath(path):
946 def splitpath(path):
932 '''Split path by os.sep.
947 '''Split path by os.sep.
933 Note that this function does not use os.altsep because this is
948 Note that this function does not use os.altsep because this is
934 an alternative of simple "xxx.split(os.sep)".
949 an alternative of simple "xxx.split(os.sep)".
935 It is recommended to use os.path.normpath() before using this
950 It is recommended to use os.path.normpath() before using this
936 function if need.'''
951 function if need.'''
937 return path.split(os.sep)
952 return path.split(os.sep)
938
953
939 def gui():
954 def gui():
940 '''Are we running in a GUI?'''
955 '''Are we running in a GUI?'''
941 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
956 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
942
957
943 def mktempcopy(name, emptyok=False, createmode=None):
958 def mktempcopy(name, emptyok=False, createmode=None):
944 """Create a temporary file with the same contents from name
959 """Create a temporary file with the same contents from name
945
960
946 The permission bits are copied from the original file.
961 The permission bits are copied from the original file.
947
962
948 If the temporary file is going to be truncated immediately, you
963 If the temporary file is going to be truncated immediately, you
949 can use emptyok=True as an optimization.
964 can use emptyok=True as an optimization.
950
965
951 Returns the name of the temporary file.
966 Returns the name of the temporary file.
952 """
967 """
953 d, fn = os.path.split(name)
968 d, fn = os.path.split(name)
954 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
969 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
955 os.close(fd)
970 os.close(fd)
956 # Temporary files are created with mode 0600, which is usually not
971 # Temporary files are created with mode 0600, which is usually not
957 # what we want. If the original file already exists, just copy
972 # what we want. If the original file already exists, just copy
958 # its mode. Otherwise, manually obey umask.
973 # its mode. Otherwise, manually obey umask.
959 try:
974 try:
960 st_mode = os.lstat(name).st_mode & 0777
975 st_mode = os.lstat(name).st_mode & 0777
961 except OSError, inst:
976 except OSError, inst:
962 if inst.errno != errno.ENOENT:
977 if inst.errno != errno.ENOENT:
963 raise
978 raise
964 st_mode = createmode
979 st_mode = createmode
965 if st_mode is None:
980 if st_mode is None:
966 st_mode = ~umask
981 st_mode = ~umask
967 st_mode &= 0666
982 st_mode &= 0666
968 os.chmod(temp, st_mode)
983 os.chmod(temp, st_mode)
969 if emptyok:
984 if emptyok:
970 return temp
985 return temp
971 try:
986 try:
972 try:
987 try:
973 ifp = posixfile(name, "rb")
988 ifp = posixfile(name, "rb")
974 except IOError, inst:
989 except IOError, inst:
975 if inst.errno == errno.ENOENT:
990 if inst.errno == errno.ENOENT:
976 return temp
991 return temp
977 if not getattr(inst, 'filename', None):
992 if not getattr(inst, 'filename', None):
978 inst.filename = name
993 inst.filename = name
979 raise
994 raise
980 ofp = posixfile(temp, "wb")
995 ofp = posixfile(temp, "wb")
981 for chunk in filechunkiter(ifp):
996 for chunk in filechunkiter(ifp):
982 ofp.write(chunk)
997 ofp.write(chunk)
983 ifp.close()
998 ifp.close()
984 ofp.close()
999 ofp.close()
985 except:
1000 except:
986 try: os.unlink(temp)
1001 try: os.unlink(temp)
987 except: pass
1002 except: pass
988 raise
1003 raise
989 return temp
1004 return temp
990
1005
991 class atomictempfile(posixfile):
1006 class atomictempfile(posixfile):
992 """file-like object that atomically updates a file
1007 """file-like object that atomically updates a file
993
1008
994 All writes will be redirected to a temporary copy of the original
1009 All writes will be redirected to a temporary copy of the original
995 file. When rename is called, the copy is renamed to the original
1010 file. When rename is called, the copy is renamed to the original
996 name, making the changes visible.
1011 name, making the changes visible.
997 """
1012 """
998 def __init__(self, name, mode, createmode):
1013 def __init__(self, name, mode, createmode):
999 self.__name = name
1014 self.__name = name
1000 self.temp = mktempcopy(name, emptyok=('w' in mode),
1015 self.temp = mktempcopy(name, emptyok=('w' in mode),
1001 createmode=createmode)
1016 createmode=createmode)
1002 posixfile.__init__(self, self.temp, mode)
1017 posixfile.__init__(self, self.temp, mode)
1003
1018
1004 def rename(self):
1019 def rename(self):
1005 if not self.closed:
1020 if not self.closed:
1006 posixfile.close(self)
1021 posixfile.close(self)
1007 rename(self.temp, localpath(self.__name))
1022 rename(self.temp, localpath(self.__name))
1008
1023
1009 def __del__(self):
1024 def __del__(self):
1010 if not self.closed:
1025 if not self.closed:
1011 try:
1026 try:
1012 os.unlink(self.temp)
1027 os.unlink(self.temp)
1013 except: pass
1028 except: pass
1014 posixfile.close(self)
1029 posixfile.close(self)
1015
1030
1016 def makedirs(name, mode=None):
1031 def makedirs(name, mode=None):
1017 """recursive directory creation with parent mode inheritance"""
1032 """recursive directory creation with parent mode inheritance"""
1018 try:
1033 try:
1019 os.mkdir(name)
1034 os.mkdir(name)
1020 if mode is not None:
1035 if mode is not None:
1021 os.chmod(name, mode)
1036 os.chmod(name, mode)
1022 return
1037 return
1023 except OSError, err:
1038 except OSError, err:
1024 if err.errno == errno.EEXIST:
1039 if err.errno == errno.EEXIST:
1025 return
1040 return
1026 if err.errno != errno.ENOENT:
1041 if err.errno != errno.ENOENT:
1027 raise
1042 raise
1028 parent = os.path.abspath(os.path.dirname(name))
1043 parent = os.path.abspath(os.path.dirname(name))
1029 makedirs(parent, mode)
1044 makedirs(parent, mode)
1030 makedirs(name, mode)
1045 makedirs(name, mode)
1031
1046
1032 class opener(object):
1047 class opener(object):
1033 """Open files relative to a base directory
1048 """Open files relative to a base directory
1034
1049
1035 This class is used to hide the details of COW semantics and
1050 This class is used to hide the details of COW semantics and
1036 remote file access from higher level code.
1051 remote file access from higher level code.
1037 """
1052 """
1038 def __init__(self, base, audit=True):
1053 def __init__(self, base, audit=True):
1039 self.base = base
1054 self.base = base
1040 if audit:
1055 if audit:
1041 self.audit_path = path_auditor(base)
1056 self.audit_path = path_auditor(base)
1042 else:
1057 else:
1043 self.audit_path = always
1058 self.audit_path = always
1044 self.createmode = None
1059 self.createmode = None
1045
1060
1046 def __getattr__(self, name):
1061 def __getattr__(self, name):
1047 if name == '_can_symlink':
1062 if name == '_can_symlink':
1048 self._can_symlink = checklink(self.base)
1063 self._can_symlink = checklink(self.base)
1049 return self._can_symlink
1064 return self._can_symlink
1050 raise AttributeError(name)
1065 raise AttributeError(name)
1051
1066
1052 def _fixfilemode(self, name):
1067 def _fixfilemode(self, name):
1053 if self.createmode is None:
1068 if self.createmode is None:
1054 return
1069 return
1055 os.chmod(name, self.createmode & 0666)
1070 os.chmod(name, self.createmode & 0666)
1056
1071
1057 def __call__(self, path, mode="r", text=False, atomictemp=False):
1072 def __call__(self, path, mode="r", text=False, atomictemp=False):
1058 self.audit_path(path)
1073 self.audit_path(path)
1059 f = os.path.join(self.base, path)
1074 f = os.path.join(self.base, path)
1060
1075
1061 if not text and "b" not in mode:
1076 if not text and "b" not in mode:
1062 mode += "b" # for that other OS
1077 mode += "b" # for that other OS
1063
1078
1064 nlink = -1
1079 nlink = -1
1065 if mode not in ("r", "rb"):
1080 if mode not in ("r", "rb"):
1066 try:
1081 try:
1067 nlink = nlinks(f)
1082 nlink = nlinks(f)
1068 except OSError:
1083 except OSError:
1069 nlink = 0
1084 nlink = 0
1070 d = os.path.dirname(f)
1085 d = os.path.dirname(f)
1071 if not os.path.isdir(d):
1086 if not os.path.isdir(d):
1072 makedirs(d, self.createmode)
1087 makedirs(d, self.createmode)
1073 if atomictemp:
1088 if atomictemp:
1074 return atomictempfile(f, mode, self.createmode)
1089 return atomictempfile(f, mode, self.createmode)
1075 if nlink > 1:
1090 if nlink > 1:
1076 rename(mktempcopy(f), f)
1091 rename(mktempcopy(f), f)
1077 fp = posixfile(f, mode)
1092 fp = posixfile(f, mode)
1078 if nlink == 0:
1093 if nlink == 0:
1079 self._fixfilemode(f)
1094 self._fixfilemode(f)
1080 return fp
1095 return fp
1081
1096
1082 def symlink(self, src, dst):
1097 def symlink(self, src, dst):
1083 self.audit_path(dst)
1098 self.audit_path(dst)
1084 linkname = os.path.join(self.base, dst)
1099 linkname = os.path.join(self.base, dst)
1085 try:
1100 try:
1086 os.unlink(linkname)
1101 os.unlink(linkname)
1087 except OSError:
1102 except OSError:
1088 pass
1103 pass
1089
1104
1090 dirname = os.path.dirname(linkname)
1105 dirname = os.path.dirname(linkname)
1091 if not os.path.exists(dirname):
1106 if not os.path.exists(dirname):
1092 makedirs(dirname, self.createmode)
1107 makedirs(dirname, self.createmode)
1093
1108
1094 if self._can_symlink:
1109 if self._can_symlink:
1095 try:
1110 try:
1096 os.symlink(src, linkname)
1111 os.symlink(src, linkname)
1097 except OSError, err:
1112 except OSError, err:
1098 raise OSError(err.errno, _('could not symlink to %r: %s') %
1113 raise OSError(err.errno, _('could not symlink to %r: %s') %
1099 (src, err.strerror), linkname)
1114 (src, err.strerror), linkname)
1100 else:
1115 else:
1101 f = self(dst, "w")
1116 f = self(dst, "w")
1102 f.write(src)
1117 f.write(src)
1103 f.close()
1118 f.close()
1104 self._fixfilemode(dst)
1119 self._fixfilemode(dst)
1105
1120
1106 class chunkbuffer(object):
1121 class chunkbuffer(object):
1107 """Allow arbitrary sized chunks of data to be efficiently read from an
1122 """Allow arbitrary sized chunks of data to be efficiently read from an
1108 iterator over chunks of arbitrary size."""
1123 iterator over chunks of arbitrary size."""
1109
1124
1110 def __init__(self, in_iter):
1125 def __init__(self, in_iter):
1111 """in_iter is the iterator that's iterating over the input chunks.
1126 """in_iter is the iterator that's iterating over the input chunks.
1112 targetsize is how big a buffer to try to maintain."""
1127 targetsize is how big a buffer to try to maintain."""
1113 self.iter = iter(in_iter)
1128 self.iter = iter(in_iter)
1114 self.buf = ''
1129 self.buf = ''
1115 self.targetsize = 2**16
1130 self.targetsize = 2**16
1116
1131
1117 def read(self, l):
1132 def read(self, l):
1118 """Read L bytes of data from the iterator of chunks of data.
1133 """Read L bytes of data from the iterator of chunks of data.
1119 Returns less than L bytes if the iterator runs dry."""
1134 Returns less than L bytes if the iterator runs dry."""
1120 if l > len(self.buf) and self.iter:
1135 if l > len(self.buf) and self.iter:
1121 # Clamp to a multiple of self.targetsize
1136 # Clamp to a multiple of self.targetsize
1122 targetsize = max(l, self.targetsize)
1137 targetsize = max(l, self.targetsize)
1123 collector = cStringIO.StringIO()
1138 collector = cStringIO.StringIO()
1124 collector.write(self.buf)
1139 collector.write(self.buf)
1125 collected = len(self.buf)
1140 collected = len(self.buf)
1126 for chunk in self.iter:
1141 for chunk in self.iter:
1127 collector.write(chunk)
1142 collector.write(chunk)
1128 collected += len(chunk)
1143 collected += len(chunk)
1129 if collected >= targetsize:
1144 if collected >= targetsize:
1130 break
1145 break
1131 if collected < targetsize:
1146 if collected < targetsize:
1132 self.iter = False
1147 self.iter = False
1133 self.buf = collector.getvalue()
1148 self.buf = collector.getvalue()
1134 if len(self.buf) == l:
1149 if len(self.buf) == l:
1135 s, self.buf = str(self.buf), ''
1150 s, self.buf = str(self.buf), ''
1136 else:
1151 else:
1137 s, self.buf = self.buf[:l], buffer(self.buf, l)
1152 s, self.buf = self.buf[:l], buffer(self.buf, l)
1138 return s
1153 return s
1139
1154
1140 def filechunkiter(f, size=65536, limit=None):
1155 def filechunkiter(f, size=65536, limit=None):
1141 """Create a generator that produces the data in the file size
1156 """Create a generator that produces the data in the file size
1142 (default 65536) bytes at a time, up to optional limit (default is
1157 (default 65536) bytes at a time, up to optional limit (default is
1143 to read all data). Chunks may be less than size bytes if the
1158 to read all data). Chunks may be less than size bytes if the
1144 chunk is the last chunk in the file, or the file is a socket or
1159 chunk is the last chunk in the file, or the file is a socket or
1145 some other type of file that sometimes reads less data than is
1160 some other type of file that sometimes reads less data than is
1146 requested."""
1161 requested."""
1147 assert size >= 0
1162 assert size >= 0
1148 assert limit is None or limit >= 0
1163 assert limit is None or limit >= 0
1149 while True:
1164 while True:
1150 if limit is None: nbytes = size
1165 if limit is None: nbytes = size
1151 else: nbytes = min(limit, size)
1166 else: nbytes = min(limit, size)
1152 s = nbytes and f.read(nbytes)
1167 s = nbytes and f.read(nbytes)
1153 if not s: break
1168 if not s: break
1154 if limit: limit -= len(s)
1169 if limit: limit -= len(s)
1155 yield s
1170 yield s
1156
1171
1157 def makedate():
1172 def makedate():
1158 lt = time.localtime()
1173 lt = time.localtime()
1159 if lt[8] == 1 and time.daylight:
1174 if lt[8] == 1 and time.daylight:
1160 tz = time.altzone
1175 tz = time.altzone
1161 else:
1176 else:
1162 tz = time.timezone
1177 tz = time.timezone
1163 return time.mktime(lt), tz
1178 return time.mktime(lt), tz
1164
1179
1165 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1180 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1166 """represent a (unixtime, offset) tuple as a localized time.
1181 """represent a (unixtime, offset) tuple as a localized time.
1167 unixtime is seconds since the epoch, and offset is the time zone's
1182 unixtime is seconds since the epoch, and offset is the time zone's
1168 number of seconds away from UTC. if timezone is false, do not
1183 number of seconds away from UTC. if timezone is false, do not
1169 append time zone to string."""
1184 append time zone to string."""
1170 t, tz = date or makedate()
1185 t, tz = date or makedate()
1171 if "%1" in format or "%2" in format:
1186 if "%1" in format or "%2" in format:
1172 sign = (tz > 0) and "-" or "+"
1187 sign = (tz > 0) and "-" or "+"
1173 minutes = abs(tz) / 60
1188 minutes = abs(tz) / 60
1174 format = format.replace("%1", "%c%02d" % (sign, minutes / 60))
1189 format = format.replace("%1", "%c%02d" % (sign, minutes / 60))
1175 format = format.replace("%2", "%02d" % (minutes % 60))
1190 format = format.replace("%2", "%02d" % (minutes % 60))
1176 s = time.strftime(format, time.gmtime(float(t) - tz))
1191 s = time.strftime(format, time.gmtime(float(t) - tz))
1177 return s
1192 return s
1178
1193
1179 def shortdate(date=None):
1194 def shortdate(date=None):
1180 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1195 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1181 return datestr(date, format='%Y-%m-%d')
1196 return datestr(date, format='%Y-%m-%d')
1182
1197
1183 def strdate(string, format, defaults=[]):
1198 def strdate(string, format, defaults=[]):
1184 """parse a localized time string and return a (unixtime, offset) tuple.
1199 """parse a localized time string and return a (unixtime, offset) tuple.
1185 if the string cannot be parsed, ValueError is raised."""
1200 if the string cannot be parsed, ValueError is raised."""
1186 def timezone(string):
1201 def timezone(string):
1187 tz = string.split()[-1]
1202 tz = string.split()[-1]
1188 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1203 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1189 sign = (tz[0] == "+") and 1 or -1
1204 sign = (tz[0] == "+") and 1 or -1
1190 hours = int(tz[1:3])
1205 hours = int(tz[1:3])
1191 minutes = int(tz[3:5])
1206 minutes = int(tz[3:5])
1192 return -sign * (hours * 60 + minutes) * 60
1207 return -sign * (hours * 60 + minutes) * 60
1193 if tz == "GMT" or tz == "UTC":
1208 if tz == "GMT" or tz == "UTC":
1194 return 0
1209 return 0
1195 return None
1210 return None
1196
1211
1197 # NOTE: unixtime = localunixtime + offset
1212 # NOTE: unixtime = localunixtime + offset
1198 offset, date = timezone(string), string
1213 offset, date = timezone(string), string
1199 if offset != None:
1214 if offset != None:
1200 date = " ".join(string.split()[:-1])
1215 date = " ".join(string.split()[:-1])
1201
1216
1202 # add missing elements from defaults
1217 # add missing elements from defaults
1203 for part in defaults:
1218 for part in defaults:
1204 found = [True for p in part if ("%"+p) in format]
1219 found = [True for p in part if ("%"+p) in format]
1205 if not found:
1220 if not found:
1206 date += "@" + defaults[part]
1221 date += "@" + defaults[part]
1207 format += "@%" + part[0]
1222 format += "@%" + part[0]
1208
1223
1209 timetuple = time.strptime(date, format)
1224 timetuple = time.strptime(date, format)
1210 localunixtime = int(calendar.timegm(timetuple))
1225 localunixtime = int(calendar.timegm(timetuple))
1211 if offset is None:
1226 if offset is None:
1212 # local timezone
1227 # local timezone
1213 unixtime = int(time.mktime(timetuple))
1228 unixtime = int(time.mktime(timetuple))
1214 offset = unixtime - localunixtime
1229 offset = unixtime - localunixtime
1215 else:
1230 else:
1216 unixtime = localunixtime + offset
1231 unixtime = localunixtime + offset
1217 return unixtime, offset
1232 return unixtime, offset
1218
1233
1219 def parsedate(date, formats=None, defaults=None):
1234 def parsedate(date, formats=None, defaults=None):
1220 """parse a localized date/time string and return a (unixtime, offset) tuple.
1235 """parse a localized date/time string and return a (unixtime, offset) tuple.
1221
1236
1222 The date may be a "unixtime offset" string or in one of the specified
1237 The date may be a "unixtime offset" string or in one of the specified
1223 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1238 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1224 """
1239 """
1225 if not date:
1240 if not date:
1226 return 0, 0
1241 return 0, 0
1227 if isinstance(date, tuple) and len(date) == 2:
1242 if isinstance(date, tuple) and len(date) == 2:
1228 return date
1243 return date
1229 if not formats:
1244 if not formats:
1230 formats = defaultdateformats
1245 formats = defaultdateformats
1231 date = date.strip()
1246 date = date.strip()
1232 try:
1247 try:
1233 when, offset = map(int, date.split(' '))
1248 when, offset = map(int, date.split(' '))
1234 except ValueError:
1249 except ValueError:
1235 # fill out defaults
1250 # fill out defaults
1236 if not defaults:
1251 if not defaults:
1237 defaults = {}
1252 defaults = {}
1238 now = makedate()
1253 now = makedate()
1239 for part in "d mb yY HI M S".split():
1254 for part in "d mb yY HI M S".split():
1240 if part not in defaults:
1255 if part not in defaults:
1241 if part[0] in "HMS":
1256 if part[0] in "HMS":
1242 defaults[part] = "00"
1257 defaults[part] = "00"
1243 else:
1258 else:
1244 defaults[part] = datestr(now, "%" + part[0])
1259 defaults[part] = datestr(now, "%" + part[0])
1245
1260
1246 for format in formats:
1261 for format in formats:
1247 try:
1262 try:
1248 when, offset = strdate(date, format, defaults)
1263 when, offset = strdate(date, format, defaults)
1249 except (ValueError, OverflowError):
1264 except (ValueError, OverflowError):
1250 pass
1265 pass
1251 else:
1266 else:
1252 break
1267 break
1253 else:
1268 else:
1254 raise Abort(_('invalid date: %r ') % date)
1269 raise Abort(_('invalid date: %r ') % date)
1255 # validate explicit (probably user-specified) date and
1270 # validate explicit (probably user-specified) date and
1256 # time zone offset. values must fit in signed 32 bits for
1271 # time zone offset. values must fit in signed 32 bits for
1257 # current 32-bit linux runtimes. timezones go from UTC-12
1272 # current 32-bit linux runtimes. timezones go from UTC-12
1258 # to UTC+14
1273 # to UTC+14
1259 if abs(when) > 0x7fffffff:
1274 if abs(when) > 0x7fffffff:
1260 raise Abort(_('date exceeds 32 bits: %d') % when)
1275 raise Abort(_('date exceeds 32 bits: %d') % when)
1261 if offset < -50400 or offset > 43200:
1276 if offset < -50400 or offset > 43200:
1262 raise Abort(_('impossible time zone offset: %d') % offset)
1277 raise Abort(_('impossible time zone offset: %d') % offset)
1263 return when, offset
1278 return when, offset
1264
1279
1265 def matchdate(date):
1280 def matchdate(date):
1266 """Return a function that matches a given date match specifier
1281 """Return a function that matches a given date match specifier
1267
1282
1268 Formats include:
1283 Formats include:
1269
1284
1270 '{date}' match a given date to the accuracy provided
1285 '{date}' match a given date to the accuracy provided
1271
1286
1272 '<{date}' on or before a given date
1287 '<{date}' on or before a given date
1273
1288
1274 '>{date}' on or after a given date
1289 '>{date}' on or after a given date
1275
1290
1276 """
1291 """
1277
1292
1278 def lower(date):
1293 def lower(date):
1279 d = dict(mb="1", d="1")
1294 d = dict(mb="1", d="1")
1280 return parsedate(date, extendeddateformats, d)[0]
1295 return parsedate(date, extendeddateformats, d)[0]
1281
1296
1282 def upper(date):
1297 def upper(date):
1283 d = dict(mb="12", HI="23", M="59", S="59")
1298 d = dict(mb="12", HI="23", M="59", S="59")
1284 for days in "31 30 29".split():
1299 for days in "31 30 29".split():
1285 try:
1300 try:
1286 d["d"] = days
1301 d["d"] = days
1287 return parsedate(date, extendeddateformats, d)[0]
1302 return parsedate(date, extendeddateformats, d)[0]
1288 except:
1303 except:
1289 pass
1304 pass
1290 d["d"] = "28"
1305 d["d"] = "28"
1291 return parsedate(date, extendeddateformats, d)[0]
1306 return parsedate(date, extendeddateformats, d)[0]
1292
1307
1293 date = date.strip()
1308 date = date.strip()
1294 if date[0] == "<":
1309 if date[0] == "<":
1295 when = upper(date[1:])
1310 when = upper(date[1:])
1296 return lambda x: x <= when
1311 return lambda x: x <= when
1297 elif date[0] == ">":
1312 elif date[0] == ">":
1298 when = lower(date[1:])
1313 when = lower(date[1:])
1299 return lambda x: x >= when
1314 return lambda x: x >= when
1300 elif date[0] == "-":
1315 elif date[0] == "-":
1301 try:
1316 try:
1302 days = int(date[1:])
1317 days = int(date[1:])
1303 except ValueError:
1318 except ValueError:
1304 raise Abort(_("invalid day spec: %s") % date[1:])
1319 raise Abort(_("invalid day spec: %s") % date[1:])
1305 when = makedate()[0] - days * 3600 * 24
1320 when = makedate()[0] - days * 3600 * 24
1306 return lambda x: x >= when
1321 return lambda x: x >= when
1307 elif " to " in date:
1322 elif " to " in date:
1308 a, b = date.split(" to ")
1323 a, b = date.split(" to ")
1309 start, stop = lower(a), upper(b)
1324 start, stop = lower(a), upper(b)
1310 return lambda x: x >= start and x <= stop
1325 return lambda x: x >= start and x <= stop
1311 else:
1326 else:
1312 start, stop = lower(date), upper(date)
1327 start, stop = lower(date), upper(date)
1313 return lambda x: x >= start and x <= stop
1328 return lambda x: x >= start and x <= stop
1314
1329
1315 def shortuser(user):
1330 def shortuser(user):
1316 """Return a short representation of a user name or email address."""
1331 """Return a short representation of a user name or email address."""
1317 f = user.find('@')
1332 f = user.find('@')
1318 if f >= 0:
1333 if f >= 0:
1319 user = user[:f]
1334 user = user[:f]
1320 f = user.find('<')
1335 f = user.find('<')
1321 if f >= 0:
1336 if f >= 0:
1322 user = user[f+1:]
1337 user = user[f+1:]
1323 f = user.find(' ')
1338 f = user.find(' ')
1324 if f >= 0:
1339 if f >= 0:
1325 user = user[:f]
1340 user = user[:f]
1326 f = user.find('.')
1341 f = user.find('.')
1327 if f >= 0:
1342 if f >= 0:
1328 user = user[:f]
1343 user = user[:f]
1329 return user
1344 return user
1330
1345
1331 def email(author):
1346 def email(author):
1332 '''get email of author.'''
1347 '''get email of author.'''
1333 r = author.find('>')
1348 r = author.find('>')
1334 if r == -1: r = None
1349 if r == -1: r = None
1335 return author[author.find('<')+1:r]
1350 return author[author.find('<')+1:r]
1336
1351
1337 def ellipsis(text, maxlength=400):
1352 def ellipsis(text, maxlength=400):
1338 """Trim string to at most maxlength (default: 400) characters."""
1353 """Trim string to at most maxlength (default: 400) characters."""
1339 if len(text) <= maxlength:
1354 if len(text) <= maxlength:
1340 return text
1355 return text
1341 else:
1356 else:
1342 return "%s..." % (text[:maxlength-3])
1357 return "%s..." % (text[:maxlength-3])
1343
1358
1344 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1359 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1345 '''yield every hg repository under path, recursively.'''
1360 '''yield every hg repository under path, recursively.'''
1346 def errhandler(err):
1361 def errhandler(err):
1347 if err.filename == path:
1362 if err.filename == path:
1348 raise err
1363 raise err
1349 if followsym and hasattr(os.path, 'samestat'):
1364 if followsym and hasattr(os.path, 'samestat'):
1350 def _add_dir_if_not_there(dirlst, dirname):
1365 def _add_dir_if_not_there(dirlst, dirname):
1351 match = False
1366 match = False
1352 samestat = os.path.samestat
1367 samestat = os.path.samestat
1353 dirstat = os.stat(dirname)
1368 dirstat = os.stat(dirname)
1354 for lstdirstat in dirlst:
1369 for lstdirstat in dirlst:
1355 if samestat(dirstat, lstdirstat):
1370 if samestat(dirstat, lstdirstat):
1356 match = True
1371 match = True
1357 break
1372 break
1358 if not match:
1373 if not match:
1359 dirlst.append(dirstat)
1374 dirlst.append(dirstat)
1360 return not match
1375 return not match
1361 else:
1376 else:
1362 followsym = False
1377 followsym = False
1363
1378
1364 if (seen_dirs is None) and followsym:
1379 if (seen_dirs is None) and followsym:
1365 seen_dirs = []
1380 seen_dirs = []
1366 _add_dir_if_not_there(seen_dirs, path)
1381 _add_dir_if_not_there(seen_dirs, path)
1367 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1382 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1368 if '.hg' in dirs:
1383 if '.hg' in dirs:
1369 yield root # found a repository
1384 yield root # found a repository
1370 qroot = os.path.join(root, '.hg', 'patches')
1385 qroot = os.path.join(root, '.hg', 'patches')
1371 if os.path.isdir(os.path.join(qroot, '.hg')):
1386 if os.path.isdir(os.path.join(qroot, '.hg')):
1372 yield qroot # we have a patch queue repo here
1387 yield qroot # we have a patch queue repo here
1373 if recurse:
1388 if recurse:
1374 # avoid recursing inside the .hg directory
1389 # avoid recursing inside the .hg directory
1375 dirs.remove('.hg')
1390 dirs.remove('.hg')
1376 else:
1391 else:
1377 dirs[:] = [] # don't descend further
1392 dirs[:] = [] # don't descend further
1378 elif followsym:
1393 elif followsym:
1379 newdirs = []
1394 newdirs = []
1380 for d in dirs:
1395 for d in dirs:
1381 fname = os.path.join(root, d)
1396 fname = os.path.join(root, d)
1382 if _add_dir_if_not_there(seen_dirs, fname):
1397 if _add_dir_if_not_there(seen_dirs, fname):
1383 if os.path.islink(fname):
1398 if os.path.islink(fname):
1384 for hgname in walkrepos(fname, True, seen_dirs):
1399 for hgname in walkrepos(fname, True, seen_dirs):
1385 yield hgname
1400 yield hgname
1386 else:
1401 else:
1387 newdirs.append(d)
1402 newdirs.append(d)
1388 dirs[:] = newdirs
1403 dirs[:] = newdirs
1389
1404
1390 _rcpath = None
1405 _rcpath = None
1391
1406
1392 def os_rcpath():
1407 def os_rcpath():
1393 '''return default os-specific hgrc search path'''
1408 '''return default os-specific hgrc search path'''
1394 path = system_rcpath()
1409 path = system_rcpath()
1395 path.extend(user_rcpath())
1410 path.extend(user_rcpath())
1396 path = [os.path.normpath(f) for f in path]
1411 path = [os.path.normpath(f) for f in path]
1397 return path
1412 return path
1398
1413
1399 def rcpath():
1414 def rcpath():
1400 '''return hgrc search path. if env var HGRCPATH is set, use it.
1415 '''return hgrc search path. if env var HGRCPATH is set, use it.
1401 for each item in path, if directory, use files ending in .rc,
1416 for each item in path, if directory, use files ending in .rc,
1402 else use item.
1417 else use item.
1403 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1418 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1404 if no HGRCPATH, use default os-specific path.'''
1419 if no HGRCPATH, use default os-specific path.'''
1405 global _rcpath
1420 global _rcpath
1406 if _rcpath is None:
1421 if _rcpath is None:
1407 if 'HGRCPATH' in os.environ:
1422 if 'HGRCPATH' in os.environ:
1408 _rcpath = []
1423 _rcpath = []
1409 for p in os.environ['HGRCPATH'].split(os.pathsep):
1424 for p in os.environ['HGRCPATH'].split(os.pathsep):
1410 if not p: continue
1425 if not p: continue
1411 if os.path.isdir(p):
1426 if os.path.isdir(p):
1412 for f, kind in osutil.listdir(p):
1427 for f, kind in osutil.listdir(p):
1413 if f.endswith('.rc'):
1428 if f.endswith('.rc'):
1414 _rcpath.append(os.path.join(p, f))
1429 _rcpath.append(os.path.join(p, f))
1415 else:
1430 else:
1416 _rcpath.append(p)
1431 _rcpath.append(p)
1417 else:
1432 else:
1418 _rcpath = os_rcpath()
1433 _rcpath = os_rcpath()
1419 return _rcpath
1434 return _rcpath
1420
1435
1421 def bytecount(nbytes):
1436 def bytecount(nbytes):
1422 '''return byte count formatted as readable string, with units'''
1437 '''return byte count formatted as readable string, with units'''
1423
1438
1424 units = (
1439 units = (
1425 (100, 1<<30, _('%.0f GB')),
1440 (100, 1<<30, _('%.0f GB')),
1426 (10, 1<<30, _('%.1f GB')),
1441 (10, 1<<30, _('%.1f GB')),
1427 (1, 1<<30, _('%.2f GB')),
1442 (1, 1<<30, _('%.2f GB')),
1428 (100, 1<<20, _('%.0f MB')),
1443 (100, 1<<20, _('%.0f MB')),
1429 (10, 1<<20, _('%.1f MB')),
1444 (10, 1<<20, _('%.1f MB')),
1430 (1, 1<<20, _('%.2f MB')),
1445 (1, 1<<20, _('%.2f MB')),
1431 (100, 1<<10, _('%.0f KB')),
1446 (100, 1<<10, _('%.0f KB')),
1432 (10, 1<<10, _('%.1f KB')),
1447 (10, 1<<10, _('%.1f KB')),
1433 (1, 1<<10, _('%.2f KB')),
1448 (1, 1<<10, _('%.2f KB')),
1434 (1, 1, _('%.0f bytes')),
1449 (1, 1, _('%.0f bytes')),
1435 )
1450 )
1436
1451
1437 for multiplier, divisor, format in units:
1452 for multiplier, divisor, format in units:
1438 if nbytes >= divisor * multiplier:
1453 if nbytes >= divisor * multiplier:
1439 return format % (nbytes / float(divisor))
1454 return format % (nbytes / float(divisor))
1440 return units[-1][2] % nbytes
1455 return units[-1][2] % nbytes
1441
1456
1442 def drop_scheme(scheme, path):
1457 def drop_scheme(scheme, path):
1443 sc = scheme + ':'
1458 sc = scheme + ':'
1444 if path.startswith(sc):
1459 if path.startswith(sc):
1445 path = path[len(sc):]
1460 path = path[len(sc):]
1446 if path.startswith('//'):
1461 if path.startswith('//'):
1447 path = path[2:]
1462 path = path[2:]
1448 return path
1463 return path
1449
1464
1450 def uirepr(s):
1465 def uirepr(s):
1451 # Avoid double backslash in Windows path repr()
1466 # Avoid double backslash in Windows path repr()
1452 return repr(s).replace('\\\\', '\\')
1467 return repr(s).replace('\\\\', '\\')
1453
1468
1454 def termwidth():
1469 def termwidth():
1455 if 'COLUMNS' in os.environ:
1470 if 'COLUMNS' in os.environ:
1456 try:
1471 try:
1457 return int(os.environ['COLUMNS'])
1472 return int(os.environ['COLUMNS'])
1458 except ValueError:
1473 except ValueError:
1459 pass
1474 pass
1460 try:
1475 try:
1461 import termios, array, fcntl
1476 import termios, array, fcntl
1462 for dev in (sys.stdout, sys.stdin):
1477 for dev in (sys.stdout, sys.stdin):
1463 try:
1478 try:
1464 fd = dev.fileno()
1479 fd = dev.fileno()
1465 if not os.isatty(fd):
1480 if not os.isatty(fd):
1466 continue
1481 continue
1467 arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
1482 arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
1468 return array.array('h', arri)[1]
1483 return array.array('h', arri)[1]
1469 except ValueError:
1484 except ValueError:
1470 pass
1485 pass
1471 except ImportError:
1486 except ImportError:
1472 pass
1487 pass
1473 return 80
1488 return 80
1474
1489
1475 def iterlines(iterator):
1490 def iterlines(iterator):
1476 for chunk in iterator:
1491 for chunk in iterator:
1477 for line in chunk.splitlines():
1492 for line in chunk.splitlines():
1478 yield line
1493 yield line
General Comments 0
You need to be logged in to leave comments. Login now