##// END OF EJS Templates
config: move config.sortdict class into util...
Angel Ezquerra -
r21813:c2262004 default
parent child Browse files
Show More
@@ -1,190 +1,159
1 # config.py - configuration parsing for Mercurial
1 # config.py - configuration parsing for Mercurial
2 #
2 #
3 # Copyright 2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import error, util
9 import error, util
10 import os, errno
10 import os, errno
11
11
12 class sortdict(dict):
13 'a simple sorted dictionary'
14 def __init__(self, data=None):
15 self._list = []
16 if data:
17 self.update(data)
18 def copy(self):
19 return sortdict(self)
20 def __setitem__(self, key, val):
21 if key in self:
22 self._list.remove(key)
23 self._list.append(key)
24 dict.__setitem__(self, key, val)
25 def __iter__(self):
26 return self._list.__iter__()
27 def update(self, src):
28 for k in src:
29 self[k] = src[k]
30 def clear(self):
31 dict.clear(self)
32 self._list = []
33 def items(self):
34 return [(k, self[k]) for k in self._list]
35 def __delitem__(self, key):
36 dict.__delitem__(self, key)
37 self._list.remove(key)
38 def keys(self):
39 return self._list
40 def iterkeys(self):
41 return self._list.__iter__()
42
43 class config(object):
12 class config(object):
44 def __init__(self, data=None):
13 def __init__(self, data=None):
45 self._data = {}
14 self._data = {}
46 self._source = {}
15 self._source = {}
47 self._unset = []
16 self._unset = []
48 if data:
17 if data:
49 for k in data._data:
18 for k in data._data:
50 self._data[k] = data[k].copy()
19 self._data[k] = data[k].copy()
51 self._source = data._source.copy()
20 self._source = data._source.copy()
52 def copy(self):
21 def copy(self):
53 return config(self)
22 return config(self)
54 def __contains__(self, section):
23 def __contains__(self, section):
55 return section in self._data
24 return section in self._data
56 def __getitem__(self, section):
25 def __getitem__(self, section):
57 return self._data.get(section, {})
26 return self._data.get(section, {})
58 def __iter__(self):
27 def __iter__(self):
59 for d in self.sections():
28 for d in self.sections():
60 yield d
29 yield d
61 def update(self, src):
30 def update(self, src):
62 for s, n in src._unset:
31 for s, n in src._unset:
63 if s in self and n in self._data[s]:
32 if s in self and n in self._data[s]:
64 del self._data[s][n]
33 del self._data[s][n]
65 del self._source[(s, n)]
34 del self._source[(s, n)]
66 for s in src:
35 for s in src:
67 if s not in self:
36 if s not in self:
68 self._data[s] = sortdict()
37 self._data[s] = util.sortdict()
69 self._data[s].update(src._data[s])
38 self._data[s].update(src._data[s])
70 self._source.update(src._source)
39 self._source.update(src._source)
71 def get(self, section, item, default=None):
40 def get(self, section, item, default=None):
72 return self._data.get(section, {}).get(item, default)
41 return self._data.get(section, {}).get(item, default)
73
42
74 def backup(self, section, item):
43 def backup(self, section, item):
75 """return a tuple allowing restore to reinstall a previous value
44 """return a tuple allowing restore to reinstall a previous value
76
45
77 The main reason we need it is because it handles the "no data" case.
46 The main reason we need it is because it handles the "no data" case.
78 """
47 """
79 try:
48 try:
80 value = self._data[section][item]
49 value = self._data[section][item]
81 source = self.source(section, item)
50 source = self.source(section, item)
82 return (section, item, value, source)
51 return (section, item, value, source)
83 except KeyError:
52 except KeyError:
84 return (section, item)
53 return (section, item)
85
54
86 def source(self, section, item):
55 def source(self, section, item):
87 return self._source.get((section, item), "")
56 return self._source.get((section, item), "")
88 def sections(self):
57 def sections(self):
89 return sorted(self._data.keys())
58 return sorted(self._data.keys())
90 def items(self, section):
59 def items(self, section):
91 return self._data.get(section, {}).items()
60 return self._data.get(section, {}).items()
92 def set(self, section, item, value, source=""):
61 def set(self, section, item, value, source=""):
93 if section not in self:
62 if section not in self:
94 self._data[section] = sortdict()
63 self._data[section] = util.sortdict()
95 self._data[section][item] = value
64 self._data[section][item] = value
96 if source:
65 if source:
97 self._source[(section, item)] = source
66 self._source[(section, item)] = source
98
67
99 def restore(self, data):
68 def restore(self, data):
100 """restore data returned by self.backup"""
69 """restore data returned by self.backup"""
101 if len(data) == 4:
70 if len(data) == 4:
102 # restore old data
71 # restore old data
103 section, item, value, source = data
72 section, item, value, source = data
104 self._data[section][item] = value
73 self._data[section][item] = value
105 self._source[(section, item)] = source
74 self._source[(section, item)] = source
106 else:
75 else:
107 # no data before, remove everything
76 # no data before, remove everything
108 section, item = data
77 section, item = data
109 if section in self._data:
78 if section in self._data:
110 del self._data[section][item]
79 del self._data[section][item]
111 self._source.pop((section, item), None)
80 self._source.pop((section, item), None)
112
81
113 def parse(self, src, data, sections=None, remap=None, include=None):
82 def parse(self, src, data, sections=None, remap=None, include=None):
114 sectionre = util.compilere(r'\[([^\[]+)\]')
83 sectionre = util.compilere(r'\[([^\[]+)\]')
115 itemre = util.compilere(r'([^=\s][^=]*?)\s*=\s*(.*\S|)')
84 itemre = util.compilere(r'([^=\s][^=]*?)\s*=\s*(.*\S|)')
116 contre = util.compilere(r'\s+(\S|\S.*\S)\s*$')
85 contre = util.compilere(r'\s+(\S|\S.*\S)\s*$')
117 emptyre = util.compilere(r'(;|#|\s*$)')
86 emptyre = util.compilere(r'(;|#|\s*$)')
118 commentre = util.compilere(r'(;|#)')
87 commentre = util.compilere(r'(;|#)')
119 unsetre = util.compilere(r'%unset\s+(\S+)')
88 unsetre = util.compilere(r'%unset\s+(\S+)')
120 includere = util.compilere(r'%include\s+(\S|\S.*\S)\s*$')
89 includere = util.compilere(r'%include\s+(\S|\S.*\S)\s*$')
121 section = ""
90 section = ""
122 item = None
91 item = None
123 line = 0
92 line = 0
124 cont = False
93 cont = False
125
94
126 for l in data.splitlines(True):
95 for l in data.splitlines(True):
127 line += 1
96 line += 1
128 if line == 1 and l.startswith('\xef\xbb\xbf'):
97 if line == 1 and l.startswith('\xef\xbb\xbf'):
129 # Someone set us up the BOM
98 # Someone set us up the BOM
130 l = l[3:]
99 l = l[3:]
131 if cont:
100 if cont:
132 if commentre.match(l):
101 if commentre.match(l):
133 continue
102 continue
134 m = contre.match(l)
103 m = contre.match(l)
135 if m:
104 if m:
136 if sections and section not in sections:
105 if sections and section not in sections:
137 continue
106 continue
138 v = self.get(section, item) + "\n" + m.group(1)
107 v = self.get(section, item) + "\n" + m.group(1)
139 self.set(section, item, v, "%s:%d" % (src, line))
108 self.set(section, item, v, "%s:%d" % (src, line))
140 continue
109 continue
141 item = None
110 item = None
142 cont = False
111 cont = False
143 m = includere.match(l)
112 m = includere.match(l)
144 if m:
113 if m:
145 inc = util.expandpath(m.group(1))
114 inc = util.expandpath(m.group(1))
146 base = os.path.dirname(src)
115 base = os.path.dirname(src)
147 inc = os.path.normpath(os.path.join(base, inc))
116 inc = os.path.normpath(os.path.join(base, inc))
148 if include:
117 if include:
149 try:
118 try:
150 include(inc, remap=remap, sections=sections)
119 include(inc, remap=remap, sections=sections)
151 except IOError, inst:
120 except IOError, inst:
152 if inst.errno != errno.ENOENT:
121 if inst.errno != errno.ENOENT:
153 raise error.ParseError(_("cannot include %s (%s)")
122 raise error.ParseError(_("cannot include %s (%s)")
154 % (inc, inst.strerror),
123 % (inc, inst.strerror),
155 "%s:%s" % (src, line))
124 "%s:%s" % (src, line))
156 continue
125 continue
157 if emptyre.match(l):
126 if emptyre.match(l):
158 continue
127 continue
159 m = sectionre.match(l)
128 m = sectionre.match(l)
160 if m:
129 if m:
161 section = m.group(1)
130 section = m.group(1)
162 if remap:
131 if remap:
163 section = remap.get(section, section)
132 section = remap.get(section, section)
164 if section not in self:
133 if section not in self:
165 self._data[section] = sortdict()
134 self._data[section] = util.sortdict()
166 continue
135 continue
167 m = itemre.match(l)
136 m = itemre.match(l)
168 if m:
137 if m:
169 item = m.group(1)
138 item = m.group(1)
170 cont = True
139 cont = True
171 if sections and section not in sections:
140 if sections and section not in sections:
172 continue
141 continue
173 self.set(section, item, m.group(2), "%s:%d" % (src, line))
142 self.set(section, item, m.group(2), "%s:%d" % (src, line))
174 continue
143 continue
175 m = unsetre.match(l)
144 m = unsetre.match(l)
176 if m:
145 if m:
177 name = m.group(1)
146 name = m.group(1)
178 if sections and section not in sections:
147 if sections and section not in sections:
179 continue
148 continue
180 if self.get(section, name) is not None:
149 if self.get(section, name) is not None:
181 del self._data[section][name]
150 del self._data[section][name]
182 self._unset.append((section, name))
151 self._unset.append((section, name))
183 continue
152 continue
184
153
185 raise error.ParseError(l.rstrip(), ("%s:%s" % (src, line)))
154 raise error.ParseError(l.rstrip(), ("%s:%s" % (src, line)))
186
155
187 def read(self, path, fp=None, sections=None, remap=None):
156 def read(self, path, fp=None, sections=None, remap=None):
188 if not fp:
157 if not fp:
189 fp = util.posixfile(path)
158 fp = util.posixfile(path)
190 self.parse(path, fp.read(), sections, remap, self.read)
159 self.parse(path, fp.read(), sections, remap, self.read)
@@ -1,2022 +1,2053
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from i18n import _
16 from i18n import _
17 import error, osutil, encoding
17 import error, osutil, encoding
18 import errno, re, shutil, sys, tempfile, traceback
18 import errno, re, shutil, sys, tempfile, traceback
19 import os, time, datetime, calendar, textwrap, signal, collections
19 import os, time, datetime, calendar, textwrap, signal, collections
20 import imp, socket, urllib
20 import imp, socket, urllib
21
21
22 if os.name == 'nt':
22 if os.name == 'nt':
23 import windows as platform
23 import windows as platform
24 else:
24 else:
25 import posix as platform
25 import posix as platform
26
26
27 cachestat = platform.cachestat
27 cachestat = platform.cachestat
28 checkexec = platform.checkexec
28 checkexec = platform.checkexec
29 checklink = platform.checklink
29 checklink = platform.checklink
30 copymode = platform.copymode
30 copymode = platform.copymode
31 executablepath = platform.executablepath
31 executablepath = platform.executablepath
32 expandglobs = platform.expandglobs
32 expandglobs = platform.expandglobs
33 explainexit = platform.explainexit
33 explainexit = platform.explainexit
34 findexe = platform.findexe
34 findexe = platform.findexe
35 gethgcmd = platform.gethgcmd
35 gethgcmd = platform.gethgcmd
36 getuser = platform.getuser
36 getuser = platform.getuser
37 groupmembers = platform.groupmembers
37 groupmembers = platform.groupmembers
38 groupname = platform.groupname
38 groupname = platform.groupname
39 hidewindow = platform.hidewindow
39 hidewindow = platform.hidewindow
40 isexec = platform.isexec
40 isexec = platform.isexec
41 isowner = platform.isowner
41 isowner = platform.isowner
42 localpath = platform.localpath
42 localpath = platform.localpath
43 lookupreg = platform.lookupreg
43 lookupreg = platform.lookupreg
44 makedir = platform.makedir
44 makedir = platform.makedir
45 nlinks = platform.nlinks
45 nlinks = platform.nlinks
46 normpath = platform.normpath
46 normpath = platform.normpath
47 normcase = platform.normcase
47 normcase = platform.normcase
48 openhardlinks = platform.openhardlinks
48 openhardlinks = platform.openhardlinks
49 oslink = platform.oslink
49 oslink = platform.oslink
50 parsepatchoutput = platform.parsepatchoutput
50 parsepatchoutput = platform.parsepatchoutput
51 pconvert = platform.pconvert
51 pconvert = platform.pconvert
52 popen = platform.popen
52 popen = platform.popen
53 posixfile = platform.posixfile
53 posixfile = platform.posixfile
54 quotecommand = platform.quotecommand
54 quotecommand = platform.quotecommand
55 rename = platform.rename
55 rename = platform.rename
56 samedevice = platform.samedevice
56 samedevice = platform.samedevice
57 samefile = platform.samefile
57 samefile = platform.samefile
58 samestat = platform.samestat
58 samestat = platform.samestat
59 setbinary = platform.setbinary
59 setbinary = platform.setbinary
60 setflags = platform.setflags
60 setflags = platform.setflags
61 setsignalhandler = platform.setsignalhandler
61 setsignalhandler = platform.setsignalhandler
62 shellquote = platform.shellquote
62 shellquote = platform.shellquote
63 spawndetached = platform.spawndetached
63 spawndetached = platform.spawndetached
64 split = platform.split
64 split = platform.split
65 sshargs = platform.sshargs
65 sshargs = platform.sshargs
66 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
66 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
67 statisexec = platform.statisexec
67 statisexec = platform.statisexec
68 statislink = platform.statislink
68 statislink = platform.statislink
69 termwidth = platform.termwidth
69 termwidth = platform.termwidth
70 testpid = platform.testpid
70 testpid = platform.testpid
71 umask = platform.umask
71 umask = platform.umask
72 unlink = platform.unlink
72 unlink = platform.unlink
73 unlinkpath = platform.unlinkpath
73 unlinkpath = platform.unlinkpath
74 username = platform.username
74 username = platform.username
75
75
76 # Python compatibility
76 # Python compatibility
77
77
78 _notset = object()
78 _notset = object()
79
79
80 def safehasattr(thing, attr):
80 def safehasattr(thing, attr):
81 return getattr(thing, attr, _notset) is not _notset
81 return getattr(thing, attr, _notset) is not _notset
82
82
83 def sha1(s=''):
83 def sha1(s=''):
84 '''
84 '''
85 Low-overhead wrapper around Python's SHA support
85 Low-overhead wrapper around Python's SHA support
86
86
87 >>> f = _fastsha1
87 >>> f = _fastsha1
88 >>> a = sha1()
88 >>> a = sha1()
89 >>> a = f()
89 >>> a = f()
90 >>> a.hexdigest()
90 >>> a.hexdigest()
91 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
91 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
92 '''
92 '''
93
93
94 return _fastsha1(s)
94 return _fastsha1(s)
95
95
96 def _fastsha1(s=''):
96 def _fastsha1(s=''):
97 # This function will import sha1 from hashlib or sha (whichever is
97 # This function will import sha1 from hashlib or sha (whichever is
98 # available) and overwrite itself with it on the first call.
98 # available) and overwrite itself with it on the first call.
99 # Subsequent calls will go directly to the imported function.
99 # Subsequent calls will go directly to the imported function.
100 if sys.version_info >= (2, 5):
100 if sys.version_info >= (2, 5):
101 from hashlib import sha1 as _sha1
101 from hashlib import sha1 as _sha1
102 else:
102 else:
103 from sha import sha as _sha1
103 from sha import sha as _sha1
104 global _fastsha1, sha1
104 global _fastsha1, sha1
105 _fastsha1 = sha1 = _sha1
105 _fastsha1 = sha1 = _sha1
106 return _sha1(s)
106 return _sha1(s)
107
107
108 try:
108 try:
109 buffer = buffer
109 buffer = buffer
110 except NameError:
110 except NameError:
111 if sys.version_info[0] < 3:
111 if sys.version_info[0] < 3:
112 def buffer(sliceable, offset=0):
112 def buffer(sliceable, offset=0):
113 return sliceable[offset:]
113 return sliceable[offset:]
114 else:
114 else:
115 def buffer(sliceable, offset=0):
115 def buffer(sliceable, offset=0):
116 return memoryview(sliceable)[offset:]
116 return memoryview(sliceable)[offset:]
117
117
118 import subprocess
118 import subprocess
119 closefds = os.name == 'posix'
119 closefds = os.name == 'posix'
120
120
121 def popen2(cmd, env=None, newlines=False):
121 def popen2(cmd, env=None, newlines=False):
122 # Setting bufsize to -1 lets the system decide the buffer size.
122 # Setting bufsize to -1 lets the system decide the buffer size.
123 # The default for bufsize is 0, meaning unbuffered. This leads to
123 # The default for bufsize is 0, meaning unbuffered. This leads to
124 # poor performance on Mac OS X: http://bugs.python.org/issue4194
124 # poor performance on Mac OS X: http://bugs.python.org/issue4194
125 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
125 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
126 close_fds=closefds,
126 close_fds=closefds,
127 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
127 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
128 universal_newlines=newlines,
128 universal_newlines=newlines,
129 env=env)
129 env=env)
130 return p.stdin, p.stdout
130 return p.stdin, p.stdout
131
131
132 def popen3(cmd, env=None, newlines=False):
132 def popen3(cmd, env=None, newlines=False):
133 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
133 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
134 return stdin, stdout, stderr
134 return stdin, stdout, stderr
135
135
136 def popen4(cmd, env=None, newlines=False):
136 def popen4(cmd, env=None, newlines=False):
137 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
137 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
138 close_fds=closefds,
138 close_fds=closefds,
139 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
139 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
140 stderr=subprocess.PIPE,
140 stderr=subprocess.PIPE,
141 universal_newlines=newlines,
141 universal_newlines=newlines,
142 env=env)
142 env=env)
143 return p.stdin, p.stdout, p.stderr, p
143 return p.stdin, p.stdout, p.stderr, p
144
144
145 def version():
145 def version():
146 """Return version information if available."""
146 """Return version information if available."""
147 try:
147 try:
148 import __version__
148 import __version__
149 return __version__.version
149 return __version__.version
150 except ImportError:
150 except ImportError:
151 return 'unknown'
151 return 'unknown'
152
152
153 # used by parsedate
153 # used by parsedate
154 defaultdateformats = (
154 defaultdateformats = (
155 '%Y-%m-%d %H:%M:%S',
155 '%Y-%m-%d %H:%M:%S',
156 '%Y-%m-%d %I:%M:%S%p',
156 '%Y-%m-%d %I:%M:%S%p',
157 '%Y-%m-%d %H:%M',
157 '%Y-%m-%d %H:%M',
158 '%Y-%m-%d %I:%M%p',
158 '%Y-%m-%d %I:%M%p',
159 '%Y-%m-%d',
159 '%Y-%m-%d',
160 '%m-%d',
160 '%m-%d',
161 '%m/%d',
161 '%m/%d',
162 '%m/%d/%y',
162 '%m/%d/%y',
163 '%m/%d/%Y',
163 '%m/%d/%Y',
164 '%a %b %d %H:%M:%S %Y',
164 '%a %b %d %H:%M:%S %Y',
165 '%a %b %d %I:%M:%S%p %Y',
165 '%a %b %d %I:%M:%S%p %Y',
166 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
166 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
167 '%b %d %H:%M:%S %Y',
167 '%b %d %H:%M:%S %Y',
168 '%b %d %I:%M:%S%p %Y',
168 '%b %d %I:%M:%S%p %Y',
169 '%b %d %H:%M:%S',
169 '%b %d %H:%M:%S',
170 '%b %d %I:%M:%S%p',
170 '%b %d %I:%M:%S%p',
171 '%b %d %H:%M',
171 '%b %d %H:%M',
172 '%b %d %I:%M%p',
172 '%b %d %I:%M%p',
173 '%b %d %Y',
173 '%b %d %Y',
174 '%b %d',
174 '%b %d',
175 '%H:%M:%S',
175 '%H:%M:%S',
176 '%I:%M:%S%p',
176 '%I:%M:%S%p',
177 '%H:%M',
177 '%H:%M',
178 '%I:%M%p',
178 '%I:%M%p',
179 )
179 )
180
180
181 extendeddateformats = defaultdateformats + (
181 extendeddateformats = defaultdateformats + (
182 "%Y",
182 "%Y",
183 "%Y-%m",
183 "%Y-%m",
184 "%b",
184 "%b",
185 "%b %Y",
185 "%b %Y",
186 )
186 )
187
187
188 def cachefunc(func):
188 def cachefunc(func):
189 '''cache the result of function calls'''
189 '''cache the result of function calls'''
190 # XXX doesn't handle keywords args
190 # XXX doesn't handle keywords args
191 if func.func_code.co_argcount == 0:
191 if func.func_code.co_argcount == 0:
192 cache = []
192 cache = []
193 def f():
193 def f():
194 if len(cache) == 0:
194 if len(cache) == 0:
195 cache.append(func())
195 cache.append(func())
196 return cache[0]
196 return cache[0]
197 return f
197 return f
198 cache = {}
198 cache = {}
199 if func.func_code.co_argcount == 1:
199 if func.func_code.co_argcount == 1:
200 # we gain a small amount of time because
200 # we gain a small amount of time because
201 # we don't need to pack/unpack the list
201 # we don't need to pack/unpack the list
202 def f(arg):
202 def f(arg):
203 if arg not in cache:
203 if arg not in cache:
204 cache[arg] = func(arg)
204 cache[arg] = func(arg)
205 return cache[arg]
205 return cache[arg]
206 else:
206 else:
207 def f(*args):
207 def f(*args):
208 if args not in cache:
208 if args not in cache:
209 cache[args] = func(*args)
209 cache[args] = func(*args)
210 return cache[args]
210 return cache[args]
211
211
212 return f
212 return f
213
213
214 try:
214 try:
215 collections.deque.remove
215 collections.deque.remove
216 deque = collections.deque
216 deque = collections.deque
217 except AttributeError:
217 except AttributeError:
218 # python 2.4 lacks deque.remove
218 # python 2.4 lacks deque.remove
219 class deque(collections.deque):
219 class deque(collections.deque):
220 def remove(self, val):
220 def remove(self, val):
221 for i, v in enumerate(self):
221 for i, v in enumerate(self):
222 if v == val:
222 if v == val:
223 del self[i]
223 del self[i]
224 break
224 break
225
225
226 class sortdict(dict):
227 '''a simple sorted dictionary'''
228 def __init__(self, data=None):
229 self._list = []
230 if data:
231 self.update(data)
232 def copy(self):
233 return sortdict(self)
234 def __setitem__(self, key, val):
235 if key in self:
236 self._list.remove(key)
237 self._list.append(key)
238 dict.__setitem__(self, key, val)
239 def __iter__(self):
240 return self._list.__iter__()
241 def update(self, src):
242 for k in src:
243 self[k] = src[k]
244 def clear(self):
245 dict.clear(self)
246 self._list = []
247 def items(self):
248 return [(k, self[k]) for k in self._list]
249 def __delitem__(self, key):
250 dict.__delitem__(self, key)
251 self._list.remove(key)
252 def keys(self):
253 return self._list
254 def iterkeys(self):
255 return self._list.__iter__()
256
226 class lrucachedict(object):
257 class lrucachedict(object):
227 '''cache most recent gets from or sets to this dictionary'''
258 '''cache most recent gets from or sets to this dictionary'''
228 def __init__(self, maxsize):
259 def __init__(self, maxsize):
229 self._cache = {}
260 self._cache = {}
230 self._maxsize = maxsize
261 self._maxsize = maxsize
231 self._order = deque()
262 self._order = deque()
232
263
233 def __getitem__(self, key):
264 def __getitem__(self, key):
234 value = self._cache[key]
265 value = self._cache[key]
235 self._order.remove(key)
266 self._order.remove(key)
236 self._order.append(key)
267 self._order.append(key)
237 return value
268 return value
238
269
239 def __setitem__(self, key, value):
270 def __setitem__(self, key, value):
240 if key not in self._cache:
271 if key not in self._cache:
241 if len(self._cache) >= self._maxsize:
272 if len(self._cache) >= self._maxsize:
242 del self._cache[self._order.popleft()]
273 del self._cache[self._order.popleft()]
243 else:
274 else:
244 self._order.remove(key)
275 self._order.remove(key)
245 self._cache[key] = value
276 self._cache[key] = value
246 self._order.append(key)
277 self._order.append(key)
247
278
248 def __contains__(self, key):
279 def __contains__(self, key):
249 return key in self._cache
280 return key in self._cache
250
281
251 def clear(self):
282 def clear(self):
252 self._cache.clear()
283 self._cache.clear()
253 self._order = deque()
284 self._order = deque()
254
285
255 def lrucachefunc(func):
286 def lrucachefunc(func):
256 '''cache most recent results of function calls'''
287 '''cache most recent results of function calls'''
257 cache = {}
288 cache = {}
258 order = deque()
289 order = deque()
259 if func.func_code.co_argcount == 1:
290 if func.func_code.co_argcount == 1:
260 def f(arg):
291 def f(arg):
261 if arg not in cache:
292 if arg not in cache:
262 if len(cache) > 20:
293 if len(cache) > 20:
263 del cache[order.popleft()]
294 del cache[order.popleft()]
264 cache[arg] = func(arg)
295 cache[arg] = func(arg)
265 else:
296 else:
266 order.remove(arg)
297 order.remove(arg)
267 order.append(arg)
298 order.append(arg)
268 return cache[arg]
299 return cache[arg]
269 else:
300 else:
270 def f(*args):
301 def f(*args):
271 if args not in cache:
302 if args not in cache:
272 if len(cache) > 20:
303 if len(cache) > 20:
273 del cache[order.popleft()]
304 del cache[order.popleft()]
274 cache[args] = func(*args)
305 cache[args] = func(*args)
275 else:
306 else:
276 order.remove(args)
307 order.remove(args)
277 order.append(args)
308 order.append(args)
278 return cache[args]
309 return cache[args]
279
310
280 return f
311 return f
281
312
282 class propertycache(object):
313 class propertycache(object):
283 def __init__(self, func):
314 def __init__(self, func):
284 self.func = func
315 self.func = func
285 self.name = func.__name__
316 self.name = func.__name__
286 def __get__(self, obj, type=None):
317 def __get__(self, obj, type=None):
287 result = self.func(obj)
318 result = self.func(obj)
288 self.cachevalue(obj, result)
319 self.cachevalue(obj, result)
289 return result
320 return result
290
321
291 def cachevalue(self, obj, value):
322 def cachevalue(self, obj, value):
292 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
323 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
293 obj.__dict__[self.name] = value
324 obj.__dict__[self.name] = value
294
325
295 def pipefilter(s, cmd):
326 def pipefilter(s, cmd):
296 '''filter string S through command CMD, returning its output'''
327 '''filter string S through command CMD, returning its output'''
297 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
328 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
298 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
329 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
299 pout, perr = p.communicate(s)
330 pout, perr = p.communicate(s)
300 return pout
331 return pout
301
332
302 def tempfilter(s, cmd):
333 def tempfilter(s, cmd):
303 '''filter string S through a pair of temporary files with CMD.
334 '''filter string S through a pair of temporary files with CMD.
304 CMD is used as a template to create the real command to be run,
335 CMD is used as a template to create the real command to be run,
305 with the strings INFILE and OUTFILE replaced by the real names of
336 with the strings INFILE and OUTFILE replaced by the real names of
306 the temporary files generated.'''
337 the temporary files generated.'''
307 inname, outname = None, None
338 inname, outname = None, None
308 try:
339 try:
309 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
340 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
310 fp = os.fdopen(infd, 'wb')
341 fp = os.fdopen(infd, 'wb')
311 fp.write(s)
342 fp.write(s)
312 fp.close()
343 fp.close()
313 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
344 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
314 os.close(outfd)
345 os.close(outfd)
315 cmd = cmd.replace('INFILE', inname)
346 cmd = cmd.replace('INFILE', inname)
316 cmd = cmd.replace('OUTFILE', outname)
347 cmd = cmd.replace('OUTFILE', outname)
317 code = os.system(cmd)
348 code = os.system(cmd)
318 if sys.platform == 'OpenVMS' and code & 1:
349 if sys.platform == 'OpenVMS' and code & 1:
319 code = 0
350 code = 0
320 if code:
351 if code:
321 raise Abort(_("command '%s' failed: %s") %
352 raise Abort(_("command '%s' failed: %s") %
322 (cmd, explainexit(code)))
353 (cmd, explainexit(code)))
323 fp = open(outname, 'rb')
354 fp = open(outname, 'rb')
324 r = fp.read()
355 r = fp.read()
325 fp.close()
356 fp.close()
326 return r
357 return r
327 finally:
358 finally:
328 try:
359 try:
329 if inname:
360 if inname:
330 os.unlink(inname)
361 os.unlink(inname)
331 except OSError:
362 except OSError:
332 pass
363 pass
333 try:
364 try:
334 if outname:
365 if outname:
335 os.unlink(outname)
366 os.unlink(outname)
336 except OSError:
367 except OSError:
337 pass
368 pass
338
369
339 filtertable = {
370 filtertable = {
340 'tempfile:': tempfilter,
371 'tempfile:': tempfilter,
341 'pipe:': pipefilter,
372 'pipe:': pipefilter,
342 }
373 }
343
374
344 def filter(s, cmd):
375 def filter(s, cmd):
345 "filter a string through a command that transforms its input to its output"
376 "filter a string through a command that transforms its input to its output"
346 for name, fn in filtertable.iteritems():
377 for name, fn in filtertable.iteritems():
347 if cmd.startswith(name):
378 if cmd.startswith(name):
348 return fn(s, cmd[len(name):].lstrip())
379 return fn(s, cmd[len(name):].lstrip())
349 return pipefilter(s, cmd)
380 return pipefilter(s, cmd)
350
381
351 def binary(s):
382 def binary(s):
352 """return true if a string is binary data"""
383 """return true if a string is binary data"""
353 return bool(s and '\0' in s)
384 return bool(s and '\0' in s)
354
385
355 def increasingchunks(source, min=1024, max=65536):
386 def increasingchunks(source, min=1024, max=65536):
356 '''return no less than min bytes per chunk while data remains,
387 '''return no less than min bytes per chunk while data remains,
357 doubling min after each chunk until it reaches max'''
388 doubling min after each chunk until it reaches max'''
358 def log2(x):
389 def log2(x):
359 if not x:
390 if not x:
360 return 0
391 return 0
361 i = 0
392 i = 0
362 while x:
393 while x:
363 x >>= 1
394 x >>= 1
364 i += 1
395 i += 1
365 return i - 1
396 return i - 1
366
397
367 buf = []
398 buf = []
368 blen = 0
399 blen = 0
369 for chunk in source:
400 for chunk in source:
370 buf.append(chunk)
401 buf.append(chunk)
371 blen += len(chunk)
402 blen += len(chunk)
372 if blen >= min:
403 if blen >= min:
373 if min < max:
404 if min < max:
374 min = min << 1
405 min = min << 1
375 nmin = 1 << log2(blen)
406 nmin = 1 << log2(blen)
376 if nmin > min:
407 if nmin > min:
377 min = nmin
408 min = nmin
378 if min > max:
409 if min > max:
379 min = max
410 min = max
380 yield ''.join(buf)
411 yield ''.join(buf)
381 blen = 0
412 blen = 0
382 buf = []
413 buf = []
383 if buf:
414 if buf:
384 yield ''.join(buf)
415 yield ''.join(buf)
385
416
386 Abort = error.Abort
417 Abort = error.Abort
387
418
388 def always(fn):
419 def always(fn):
389 return True
420 return True
390
421
391 def never(fn):
422 def never(fn):
392 return False
423 return False
393
424
394 def pathto(root, n1, n2):
425 def pathto(root, n1, n2):
395 '''return the relative path from one place to another.
426 '''return the relative path from one place to another.
396 root should use os.sep to separate directories
427 root should use os.sep to separate directories
397 n1 should use os.sep to separate directories
428 n1 should use os.sep to separate directories
398 n2 should use "/" to separate directories
429 n2 should use "/" to separate directories
399 returns an os.sep-separated path.
430 returns an os.sep-separated path.
400
431
401 If n1 is a relative path, it's assumed it's
432 If n1 is a relative path, it's assumed it's
402 relative to root.
433 relative to root.
403 n2 should always be relative to root.
434 n2 should always be relative to root.
404 '''
435 '''
405 if not n1:
436 if not n1:
406 return localpath(n2)
437 return localpath(n2)
407 if os.path.isabs(n1):
438 if os.path.isabs(n1):
408 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
439 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
409 return os.path.join(root, localpath(n2))
440 return os.path.join(root, localpath(n2))
410 n2 = '/'.join((pconvert(root), n2))
441 n2 = '/'.join((pconvert(root), n2))
411 a, b = splitpath(n1), n2.split('/')
442 a, b = splitpath(n1), n2.split('/')
412 a.reverse()
443 a.reverse()
413 b.reverse()
444 b.reverse()
414 while a and b and a[-1] == b[-1]:
445 while a and b and a[-1] == b[-1]:
415 a.pop()
446 a.pop()
416 b.pop()
447 b.pop()
417 b.reverse()
448 b.reverse()
418 return os.sep.join((['..'] * len(a)) + b) or '.'
449 return os.sep.join((['..'] * len(a)) + b) or '.'
419
450
420 _hgexecutable = None
451 _hgexecutable = None
421
452
422 def mainfrozen():
453 def mainfrozen():
423 """return True if we are a frozen executable.
454 """return True if we are a frozen executable.
424
455
425 The code supports py2exe (most common, Windows only) and tools/freeze
456 The code supports py2exe (most common, Windows only) and tools/freeze
426 (portable, not much used).
457 (portable, not much used).
427 """
458 """
428 return (safehasattr(sys, "frozen") or # new py2exe
459 return (safehasattr(sys, "frozen") or # new py2exe
429 safehasattr(sys, "importers") or # old py2exe
460 safehasattr(sys, "importers") or # old py2exe
430 imp.is_frozen("__main__")) # tools/freeze
461 imp.is_frozen("__main__")) # tools/freeze
431
462
432 def hgexecutable():
463 def hgexecutable():
433 """return location of the 'hg' executable.
464 """return location of the 'hg' executable.
434
465
435 Defaults to $HG or 'hg' in the search path.
466 Defaults to $HG or 'hg' in the search path.
436 """
467 """
437 if _hgexecutable is None:
468 if _hgexecutable is None:
438 hg = os.environ.get('HG')
469 hg = os.environ.get('HG')
439 mainmod = sys.modules['__main__']
470 mainmod = sys.modules['__main__']
440 if hg:
471 if hg:
441 _sethgexecutable(hg)
472 _sethgexecutable(hg)
442 elif mainfrozen():
473 elif mainfrozen():
443 _sethgexecutable(sys.executable)
474 _sethgexecutable(sys.executable)
444 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
475 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
445 _sethgexecutable(mainmod.__file__)
476 _sethgexecutable(mainmod.__file__)
446 else:
477 else:
447 exe = findexe('hg') or os.path.basename(sys.argv[0])
478 exe = findexe('hg') or os.path.basename(sys.argv[0])
448 _sethgexecutable(exe)
479 _sethgexecutable(exe)
449 return _hgexecutable
480 return _hgexecutable
450
481
451 def _sethgexecutable(path):
482 def _sethgexecutable(path):
452 """set location of the 'hg' executable"""
483 """set location of the 'hg' executable"""
453 global _hgexecutable
484 global _hgexecutable
454 _hgexecutable = path
485 _hgexecutable = path
455
486
456 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
487 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
457 '''enhanced shell command execution.
488 '''enhanced shell command execution.
458 run with environment maybe modified, maybe in different dir.
489 run with environment maybe modified, maybe in different dir.
459
490
460 if command fails and onerr is None, return status. if ui object,
491 if command fails and onerr is None, return status. if ui object,
461 print error message and return status, else raise onerr object as
492 print error message and return status, else raise onerr object as
462 exception.
493 exception.
463
494
464 if out is specified, it is assumed to be a file-like object that has a
495 if out is specified, it is assumed to be a file-like object that has a
465 write() method. stdout and stderr will be redirected to out.'''
496 write() method. stdout and stderr will be redirected to out.'''
466 try:
497 try:
467 sys.stdout.flush()
498 sys.stdout.flush()
468 except Exception:
499 except Exception:
469 pass
500 pass
470 def py2shell(val):
501 def py2shell(val):
471 'convert python object into string that is useful to shell'
502 'convert python object into string that is useful to shell'
472 if val is None or val is False:
503 if val is None or val is False:
473 return '0'
504 return '0'
474 if val is True:
505 if val is True:
475 return '1'
506 return '1'
476 return str(val)
507 return str(val)
477 origcmd = cmd
508 origcmd = cmd
478 cmd = quotecommand(cmd)
509 cmd = quotecommand(cmd)
479 if sys.platform == 'plan9' and (sys.version_info[0] == 2
510 if sys.platform == 'plan9' and (sys.version_info[0] == 2
480 and sys.version_info[1] < 7):
511 and sys.version_info[1] < 7):
481 # subprocess kludge to work around issues in half-baked Python
512 # subprocess kludge to work around issues in half-baked Python
482 # ports, notably bichued/python:
513 # ports, notably bichued/python:
483 if not cwd is None:
514 if not cwd is None:
484 os.chdir(cwd)
515 os.chdir(cwd)
485 rc = os.system(cmd)
516 rc = os.system(cmd)
486 else:
517 else:
487 env = dict(os.environ)
518 env = dict(os.environ)
488 env.update((k, py2shell(v)) for k, v in environ.iteritems())
519 env.update((k, py2shell(v)) for k, v in environ.iteritems())
489 env['HG'] = hgexecutable()
520 env['HG'] = hgexecutable()
490 if out is None or out == sys.__stdout__:
521 if out is None or out == sys.__stdout__:
491 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
522 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
492 env=env, cwd=cwd)
523 env=env, cwd=cwd)
493 else:
524 else:
494 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
525 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
495 env=env, cwd=cwd, stdout=subprocess.PIPE,
526 env=env, cwd=cwd, stdout=subprocess.PIPE,
496 stderr=subprocess.STDOUT)
527 stderr=subprocess.STDOUT)
497 for line in proc.stdout:
528 for line in proc.stdout:
498 out.write(line)
529 out.write(line)
499 proc.wait()
530 proc.wait()
500 rc = proc.returncode
531 rc = proc.returncode
501 if sys.platform == 'OpenVMS' and rc & 1:
532 if sys.platform == 'OpenVMS' and rc & 1:
502 rc = 0
533 rc = 0
503 if rc and onerr:
534 if rc and onerr:
504 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
535 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
505 explainexit(rc)[0])
536 explainexit(rc)[0])
506 if errprefix:
537 if errprefix:
507 errmsg = '%s: %s' % (errprefix, errmsg)
538 errmsg = '%s: %s' % (errprefix, errmsg)
508 try:
539 try:
509 onerr.warn(errmsg + '\n')
540 onerr.warn(errmsg + '\n')
510 except AttributeError:
541 except AttributeError:
511 raise onerr(errmsg)
542 raise onerr(errmsg)
512 return rc
543 return rc
513
544
514 def checksignature(func):
545 def checksignature(func):
515 '''wrap a function with code to check for calling errors'''
546 '''wrap a function with code to check for calling errors'''
516 def check(*args, **kwargs):
547 def check(*args, **kwargs):
517 try:
548 try:
518 return func(*args, **kwargs)
549 return func(*args, **kwargs)
519 except TypeError:
550 except TypeError:
520 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
551 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
521 raise error.SignatureError
552 raise error.SignatureError
522 raise
553 raise
523
554
524 return check
555 return check
525
556
526 def copyfile(src, dest):
557 def copyfile(src, dest):
527 "copy a file, preserving mode and atime/mtime"
558 "copy a file, preserving mode and atime/mtime"
528 if os.path.lexists(dest):
559 if os.path.lexists(dest):
529 unlink(dest)
560 unlink(dest)
530 if os.path.islink(src):
561 if os.path.islink(src):
531 os.symlink(os.readlink(src), dest)
562 os.symlink(os.readlink(src), dest)
532 else:
563 else:
533 try:
564 try:
534 shutil.copyfile(src, dest)
565 shutil.copyfile(src, dest)
535 shutil.copymode(src, dest)
566 shutil.copymode(src, dest)
536 except shutil.Error, inst:
567 except shutil.Error, inst:
537 raise Abort(str(inst))
568 raise Abort(str(inst))
538
569
539 def copyfiles(src, dst, hardlink=None):
570 def copyfiles(src, dst, hardlink=None):
540 """Copy a directory tree using hardlinks if possible"""
571 """Copy a directory tree using hardlinks if possible"""
541
572
542 if hardlink is None:
573 if hardlink is None:
543 hardlink = (os.stat(src).st_dev ==
574 hardlink = (os.stat(src).st_dev ==
544 os.stat(os.path.dirname(dst)).st_dev)
575 os.stat(os.path.dirname(dst)).st_dev)
545
576
546 num = 0
577 num = 0
547 if os.path.isdir(src):
578 if os.path.isdir(src):
548 os.mkdir(dst)
579 os.mkdir(dst)
549 for name, kind in osutil.listdir(src):
580 for name, kind in osutil.listdir(src):
550 srcname = os.path.join(src, name)
581 srcname = os.path.join(src, name)
551 dstname = os.path.join(dst, name)
582 dstname = os.path.join(dst, name)
552 hardlink, n = copyfiles(srcname, dstname, hardlink)
583 hardlink, n = copyfiles(srcname, dstname, hardlink)
553 num += n
584 num += n
554 else:
585 else:
555 if hardlink:
586 if hardlink:
556 try:
587 try:
557 oslink(src, dst)
588 oslink(src, dst)
558 except (IOError, OSError):
589 except (IOError, OSError):
559 hardlink = False
590 hardlink = False
560 shutil.copy(src, dst)
591 shutil.copy(src, dst)
561 else:
592 else:
562 shutil.copy(src, dst)
593 shutil.copy(src, dst)
563 num += 1
594 num += 1
564
595
565 return hardlink, num
596 return hardlink, num
566
597
567 _winreservednames = '''con prn aux nul
598 _winreservednames = '''con prn aux nul
568 com1 com2 com3 com4 com5 com6 com7 com8 com9
599 com1 com2 com3 com4 com5 com6 com7 com8 com9
569 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
600 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
570 _winreservedchars = ':*?"<>|'
601 _winreservedchars = ':*?"<>|'
571 def checkwinfilename(path):
602 def checkwinfilename(path):
572 r'''Check that the base-relative path is a valid filename on Windows.
603 r'''Check that the base-relative path is a valid filename on Windows.
573 Returns None if the path is ok, or a UI string describing the problem.
604 Returns None if the path is ok, or a UI string describing the problem.
574
605
575 >>> checkwinfilename("just/a/normal/path")
606 >>> checkwinfilename("just/a/normal/path")
576 >>> checkwinfilename("foo/bar/con.xml")
607 >>> checkwinfilename("foo/bar/con.xml")
577 "filename contains 'con', which is reserved on Windows"
608 "filename contains 'con', which is reserved on Windows"
578 >>> checkwinfilename("foo/con.xml/bar")
609 >>> checkwinfilename("foo/con.xml/bar")
579 "filename contains 'con', which is reserved on Windows"
610 "filename contains 'con', which is reserved on Windows"
580 >>> checkwinfilename("foo/bar/xml.con")
611 >>> checkwinfilename("foo/bar/xml.con")
581 >>> checkwinfilename("foo/bar/AUX/bla.txt")
612 >>> checkwinfilename("foo/bar/AUX/bla.txt")
582 "filename contains 'AUX', which is reserved on Windows"
613 "filename contains 'AUX', which is reserved on Windows"
583 >>> checkwinfilename("foo/bar/bla:.txt")
614 >>> checkwinfilename("foo/bar/bla:.txt")
584 "filename contains ':', which is reserved on Windows"
615 "filename contains ':', which is reserved on Windows"
585 >>> checkwinfilename("foo/bar/b\07la.txt")
616 >>> checkwinfilename("foo/bar/b\07la.txt")
586 "filename contains '\\x07', which is invalid on Windows"
617 "filename contains '\\x07', which is invalid on Windows"
587 >>> checkwinfilename("foo/bar/bla ")
618 >>> checkwinfilename("foo/bar/bla ")
588 "filename ends with ' ', which is not allowed on Windows"
619 "filename ends with ' ', which is not allowed on Windows"
589 >>> checkwinfilename("../bar")
620 >>> checkwinfilename("../bar")
590 >>> checkwinfilename("foo\\")
621 >>> checkwinfilename("foo\\")
591 "filename ends with '\\', which is invalid on Windows"
622 "filename ends with '\\', which is invalid on Windows"
592 >>> checkwinfilename("foo\\/bar")
623 >>> checkwinfilename("foo\\/bar")
593 "directory name ends with '\\', which is invalid on Windows"
624 "directory name ends with '\\', which is invalid on Windows"
594 '''
625 '''
595 if path.endswith('\\'):
626 if path.endswith('\\'):
596 return _("filename ends with '\\', which is invalid on Windows")
627 return _("filename ends with '\\', which is invalid on Windows")
597 if '\\/' in path:
628 if '\\/' in path:
598 return _("directory name ends with '\\', which is invalid on Windows")
629 return _("directory name ends with '\\', which is invalid on Windows")
599 for n in path.replace('\\', '/').split('/'):
630 for n in path.replace('\\', '/').split('/'):
600 if not n:
631 if not n:
601 continue
632 continue
602 for c in n:
633 for c in n:
603 if c in _winreservedchars:
634 if c in _winreservedchars:
604 return _("filename contains '%s', which is reserved "
635 return _("filename contains '%s', which is reserved "
605 "on Windows") % c
636 "on Windows") % c
606 if ord(c) <= 31:
637 if ord(c) <= 31:
607 return _("filename contains %r, which is invalid "
638 return _("filename contains %r, which is invalid "
608 "on Windows") % c
639 "on Windows") % c
609 base = n.split('.')[0]
640 base = n.split('.')[0]
610 if base and base.lower() in _winreservednames:
641 if base and base.lower() in _winreservednames:
611 return _("filename contains '%s', which is reserved "
642 return _("filename contains '%s', which is reserved "
612 "on Windows") % base
643 "on Windows") % base
613 t = n[-1]
644 t = n[-1]
614 if t in '. ' and n not in '..':
645 if t in '. ' and n not in '..':
615 return _("filename ends with '%s', which is not allowed "
646 return _("filename ends with '%s', which is not allowed "
616 "on Windows") % t
647 "on Windows") % t
617
648
618 if os.name == 'nt':
649 if os.name == 'nt':
619 checkosfilename = checkwinfilename
650 checkosfilename = checkwinfilename
620 else:
651 else:
621 checkosfilename = platform.checkosfilename
652 checkosfilename = platform.checkosfilename
622
653
623 def makelock(info, pathname):
654 def makelock(info, pathname):
624 try:
655 try:
625 return os.symlink(info, pathname)
656 return os.symlink(info, pathname)
626 except OSError, why:
657 except OSError, why:
627 if why.errno == errno.EEXIST:
658 if why.errno == errno.EEXIST:
628 raise
659 raise
629 except AttributeError: # no symlink in os
660 except AttributeError: # no symlink in os
630 pass
661 pass
631
662
632 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
663 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
633 os.write(ld, info)
664 os.write(ld, info)
634 os.close(ld)
665 os.close(ld)
635
666
636 def readlock(pathname):
667 def readlock(pathname):
637 try:
668 try:
638 return os.readlink(pathname)
669 return os.readlink(pathname)
639 except OSError, why:
670 except OSError, why:
640 if why.errno not in (errno.EINVAL, errno.ENOSYS):
671 if why.errno not in (errno.EINVAL, errno.ENOSYS):
641 raise
672 raise
642 except AttributeError: # no symlink in os
673 except AttributeError: # no symlink in os
643 pass
674 pass
644 fp = posixfile(pathname)
675 fp = posixfile(pathname)
645 r = fp.read()
676 r = fp.read()
646 fp.close()
677 fp.close()
647 return r
678 return r
648
679
649 def fstat(fp):
680 def fstat(fp):
650 '''stat file object that may not have fileno method.'''
681 '''stat file object that may not have fileno method.'''
651 try:
682 try:
652 return os.fstat(fp.fileno())
683 return os.fstat(fp.fileno())
653 except AttributeError:
684 except AttributeError:
654 return os.stat(fp.name)
685 return os.stat(fp.name)
655
686
656 # File system features
687 # File system features
657
688
658 def checkcase(path):
689 def checkcase(path):
659 """
690 """
660 Return true if the given path is on a case-sensitive filesystem
691 Return true if the given path is on a case-sensitive filesystem
661
692
662 Requires a path (like /foo/.hg) ending with a foldable final
693 Requires a path (like /foo/.hg) ending with a foldable final
663 directory component.
694 directory component.
664 """
695 """
665 s1 = os.stat(path)
696 s1 = os.stat(path)
666 d, b = os.path.split(path)
697 d, b = os.path.split(path)
667 b2 = b.upper()
698 b2 = b.upper()
668 if b == b2:
699 if b == b2:
669 b2 = b.lower()
700 b2 = b.lower()
670 if b == b2:
701 if b == b2:
671 return True # no evidence against case sensitivity
702 return True # no evidence against case sensitivity
672 p2 = os.path.join(d, b2)
703 p2 = os.path.join(d, b2)
673 try:
704 try:
674 s2 = os.stat(p2)
705 s2 = os.stat(p2)
675 if s2 == s1:
706 if s2 == s1:
676 return False
707 return False
677 return True
708 return True
678 except OSError:
709 except OSError:
679 return True
710 return True
680
711
681 try:
712 try:
682 import re2
713 import re2
683 _re2 = None
714 _re2 = None
684 except ImportError:
715 except ImportError:
685 _re2 = False
716 _re2 = False
686
717
687 def compilere(pat, flags=0):
718 def compilere(pat, flags=0):
688 '''Compile a regular expression, using re2 if possible
719 '''Compile a regular expression, using re2 if possible
689
720
690 For best performance, use only re2-compatible regexp features. The
721 For best performance, use only re2-compatible regexp features. The
691 only flags from the re module that are re2-compatible are
722 only flags from the re module that are re2-compatible are
692 IGNORECASE and MULTILINE.'''
723 IGNORECASE and MULTILINE.'''
693 global _re2
724 global _re2
694 if _re2 is None:
725 if _re2 is None:
695 try:
726 try:
696 # check if match works, see issue3964
727 # check if match works, see issue3964
697 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
728 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
698 except ImportError:
729 except ImportError:
699 _re2 = False
730 _re2 = False
700 if _re2 and (flags & ~(re.IGNORECASE | re.MULTILINE)) == 0:
731 if _re2 and (flags & ~(re.IGNORECASE | re.MULTILINE)) == 0:
701 if flags & re.IGNORECASE:
732 if flags & re.IGNORECASE:
702 pat = '(?i)' + pat
733 pat = '(?i)' + pat
703 if flags & re.MULTILINE:
734 if flags & re.MULTILINE:
704 pat = '(?m)' + pat
735 pat = '(?m)' + pat
705 try:
736 try:
706 return re2.compile(pat)
737 return re2.compile(pat)
707 except re2.error:
738 except re2.error:
708 pass
739 pass
709 return re.compile(pat, flags)
740 return re.compile(pat, flags)
710
741
711 _fspathcache = {}
742 _fspathcache = {}
712 def fspath(name, root):
743 def fspath(name, root):
713 '''Get name in the case stored in the filesystem
744 '''Get name in the case stored in the filesystem
714
745
715 The name should be relative to root, and be normcase-ed for efficiency.
746 The name should be relative to root, and be normcase-ed for efficiency.
716
747
717 Note that this function is unnecessary, and should not be
748 Note that this function is unnecessary, and should not be
718 called, for case-sensitive filesystems (simply because it's expensive).
749 called, for case-sensitive filesystems (simply because it's expensive).
719
750
720 The root should be normcase-ed, too.
751 The root should be normcase-ed, too.
721 '''
752 '''
722 def find(p, contents):
753 def find(p, contents):
723 for n in contents:
754 for n in contents:
724 if normcase(n) == p:
755 if normcase(n) == p:
725 return n
756 return n
726 return None
757 return None
727
758
728 seps = os.sep
759 seps = os.sep
729 if os.altsep:
760 if os.altsep:
730 seps = seps + os.altsep
761 seps = seps + os.altsep
731 # Protect backslashes. This gets silly very quickly.
762 # Protect backslashes. This gets silly very quickly.
732 seps.replace('\\','\\\\')
763 seps.replace('\\','\\\\')
733 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
764 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
734 dir = os.path.normpath(root)
765 dir = os.path.normpath(root)
735 result = []
766 result = []
736 for part, sep in pattern.findall(name):
767 for part, sep in pattern.findall(name):
737 if sep:
768 if sep:
738 result.append(sep)
769 result.append(sep)
739 continue
770 continue
740
771
741 if dir not in _fspathcache:
772 if dir not in _fspathcache:
742 _fspathcache[dir] = os.listdir(dir)
773 _fspathcache[dir] = os.listdir(dir)
743 contents = _fspathcache[dir]
774 contents = _fspathcache[dir]
744
775
745 found = find(part, contents)
776 found = find(part, contents)
746 if not found:
777 if not found:
747 # retry "once per directory" per "dirstate.walk" which
778 # retry "once per directory" per "dirstate.walk" which
748 # may take place for each patches of "hg qpush", for example
779 # may take place for each patches of "hg qpush", for example
749 contents = os.listdir(dir)
780 contents = os.listdir(dir)
750 _fspathcache[dir] = contents
781 _fspathcache[dir] = contents
751 found = find(part, contents)
782 found = find(part, contents)
752
783
753 result.append(found or part)
784 result.append(found or part)
754 dir = os.path.join(dir, part)
785 dir = os.path.join(dir, part)
755
786
756 return ''.join(result)
787 return ''.join(result)
757
788
758 def checknlink(testfile):
789 def checknlink(testfile):
759 '''check whether hardlink count reporting works properly'''
790 '''check whether hardlink count reporting works properly'''
760
791
761 # testfile may be open, so we need a separate file for checking to
792 # testfile may be open, so we need a separate file for checking to
762 # work around issue2543 (or testfile may get lost on Samba shares)
793 # work around issue2543 (or testfile may get lost on Samba shares)
763 f1 = testfile + ".hgtmp1"
794 f1 = testfile + ".hgtmp1"
764 if os.path.lexists(f1):
795 if os.path.lexists(f1):
765 return False
796 return False
766 try:
797 try:
767 posixfile(f1, 'w').close()
798 posixfile(f1, 'w').close()
768 except IOError:
799 except IOError:
769 return False
800 return False
770
801
771 f2 = testfile + ".hgtmp2"
802 f2 = testfile + ".hgtmp2"
772 fd = None
803 fd = None
773 try:
804 try:
774 try:
805 try:
775 oslink(f1, f2)
806 oslink(f1, f2)
776 except OSError:
807 except OSError:
777 return False
808 return False
778
809
779 # nlinks() may behave differently for files on Windows shares if
810 # nlinks() may behave differently for files on Windows shares if
780 # the file is open.
811 # the file is open.
781 fd = posixfile(f2)
812 fd = posixfile(f2)
782 return nlinks(f2) > 1
813 return nlinks(f2) > 1
783 finally:
814 finally:
784 if fd is not None:
815 if fd is not None:
785 fd.close()
816 fd.close()
786 for f in (f1, f2):
817 for f in (f1, f2):
787 try:
818 try:
788 os.unlink(f)
819 os.unlink(f)
789 except OSError:
820 except OSError:
790 pass
821 pass
791
822
792 def endswithsep(path):
823 def endswithsep(path):
793 '''Check path ends with os.sep or os.altsep.'''
824 '''Check path ends with os.sep or os.altsep.'''
794 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
825 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
795
826
796 def splitpath(path):
827 def splitpath(path):
797 '''Split path by os.sep.
828 '''Split path by os.sep.
798 Note that this function does not use os.altsep because this is
829 Note that this function does not use os.altsep because this is
799 an alternative of simple "xxx.split(os.sep)".
830 an alternative of simple "xxx.split(os.sep)".
800 It is recommended to use os.path.normpath() before using this
831 It is recommended to use os.path.normpath() before using this
801 function if need.'''
832 function if need.'''
802 return path.split(os.sep)
833 return path.split(os.sep)
803
834
804 def gui():
835 def gui():
805 '''Are we running in a GUI?'''
836 '''Are we running in a GUI?'''
806 if sys.platform == 'darwin':
837 if sys.platform == 'darwin':
807 if 'SSH_CONNECTION' in os.environ:
838 if 'SSH_CONNECTION' in os.environ:
808 # handle SSH access to a box where the user is logged in
839 # handle SSH access to a box where the user is logged in
809 return False
840 return False
810 elif getattr(osutil, 'isgui', None):
841 elif getattr(osutil, 'isgui', None):
811 # check if a CoreGraphics session is available
842 # check if a CoreGraphics session is available
812 return osutil.isgui()
843 return osutil.isgui()
813 else:
844 else:
814 # pure build; use a safe default
845 # pure build; use a safe default
815 return True
846 return True
816 else:
847 else:
817 return os.name == "nt" or os.environ.get("DISPLAY")
848 return os.name == "nt" or os.environ.get("DISPLAY")
818
849
819 def mktempcopy(name, emptyok=False, createmode=None):
850 def mktempcopy(name, emptyok=False, createmode=None):
820 """Create a temporary file with the same contents from name
851 """Create a temporary file with the same contents from name
821
852
822 The permission bits are copied from the original file.
853 The permission bits are copied from the original file.
823
854
824 If the temporary file is going to be truncated immediately, you
855 If the temporary file is going to be truncated immediately, you
825 can use emptyok=True as an optimization.
856 can use emptyok=True as an optimization.
826
857
827 Returns the name of the temporary file.
858 Returns the name of the temporary file.
828 """
859 """
829 d, fn = os.path.split(name)
860 d, fn = os.path.split(name)
830 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
861 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
831 os.close(fd)
862 os.close(fd)
832 # Temporary files are created with mode 0600, which is usually not
863 # Temporary files are created with mode 0600, which is usually not
833 # what we want. If the original file already exists, just copy
864 # what we want. If the original file already exists, just copy
834 # its mode. Otherwise, manually obey umask.
865 # its mode. Otherwise, manually obey umask.
835 copymode(name, temp, createmode)
866 copymode(name, temp, createmode)
836 if emptyok:
867 if emptyok:
837 return temp
868 return temp
838 try:
869 try:
839 try:
870 try:
840 ifp = posixfile(name, "rb")
871 ifp = posixfile(name, "rb")
841 except IOError, inst:
872 except IOError, inst:
842 if inst.errno == errno.ENOENT:
873 if inst.errno == errno.ENOENT:
843 return temp
874 return temp
844 if not getattr(inst, 'filename', None):
875 if not getattr(inst, 'filename', None):
845 inst.filename = name
876 inst.filename = name
846 raise
877 raise
847 ofp = posixfile(temp, "wb")
878 ofp = posixfile(temp, "wb")
848 for chunk in filechunkiter(ifp):
879 for chunk in filechunkiter(ifp):
849 ofp.write(chunk)
880 ofp.write(chunk)
850 ifp.close()
881 ifp.close()
851 ofp.close()
882 ofp.close()
852 except: # re-raises
883 except: # re-raises
853 try: os.unlink(temp)
884 try: os.unlink(temp)
854 except OSError: pass
885 except OSError: pass
855 raise
886 raise
856 return temp
887 return temp
857
888
858 class atomictempfile(object):
889 class atomictempfile(object):
859 '''writable file object that atomically updates a file
890 '''writable file object that atomically updates a file
860
891
861 All writes will go to a temporary copy of the original file. Call
892 All writes will go to a temporary copy of the original file. Call
862 close() when you are done writing, and atomictempfile will rename
893 close() when you are done writing, and atomictempfile will rename
863 the temporary copy to the original name, making the changes
894 the temporary copy to the original name, making the changes
864 visible. If the object is destroyed without being closed, all your
895 visible. If the object is destroyed without being closed, all your
865 writes are discarded.
896 writes are discarded.
866 '''
897 '''
867 def __init__(self, name, mode='w+b', createmode=None):
898 def __init__(self, name, mode='w+b', createmode=None):
868 self.__name = name # permanent name
899 self.__name = name # permanent name
869 self._tempname = mktempcopy(name, emptyok=('w' in mode),
900 self._tempname = mktempcopy(name, emptyok=('w' in mode),
870 createmode=createmode)
901 createmode=createmode)
871 self._fp = posixfile(self._tempname, mode)
902 self._fp = posixfile(self._tempname, mode)
872
903
873 # delegated methods
904 # delegated methods
874 self.write = self._fp.write
905 self.write = self._fp.write
875 self.seek = self._fp.seek
906 self.seek = self._fp.seek
876 self.tell = self._fp.tell
907 self.tell = self._fp.tell
877 self.fileno = self._fp.fileno
908 self.fileno = self._fp.fileno
878
909
879 def close(self):
910 def close(self):
880 if not self._fp.closed:
911 if not self._fp.closed:
881 self._fp.close()
912 self._fp.close()
882 rename(self._tempname, localpath(self.__name))
913 rename(self._tempname, localpath(self.__name))
883
914
884 def discard(self):
915 def discard(self):
885 if not self._fp.closed:
916 if not self._fp.closed:
886 try:
917 try:
887 os.unlink(self._tempname)
918 os.unlink(self._tempname)
888 except OSError:
919 except OSError:
889 pass
920 pass
890 self._fp.close()
921 self._fp.close()
891
922
892 def __del__(self):
923 def __del__(self):
893 if safehasattr(self, '_fp'): # constructor actually did something
924 if safehasattr(self, '_fp'): # constructor actually did something
894 self.discard()
925 self.discard()
895
926
896 def makedirs(name, mode=None, notindexed=False):
927 def makedirs(name, mode=None, notindexed=False):
897 """recursive directory creation with parent mode inheritance"""
928 """recursive directory creation with parent mode inheritance"""
898 try:
929 try:
899 makedir(name, notindexed)
930 makedir(name, notindexed)
900 except OSError, err:
931 except OSError, err:
901 if err.errno == errno.EEXIST:
932 if err.errno == errno.EEXIST:
902 return
933 return
903 if err.errno != errno.ENOENT or not name:
934 if err.errno != errno.ENOENT or not name:
904 raise
935 raise
905 parent = os.path.dirname(os.path.abspath(name))
936 parent = os.path.dirname(os.path.abspath(name))
906 if parent == name:
937 if parent == name:
907 raise
938 raise
908 makedirs(parent, mode, notindexed)
939 makedirs(parent, mode, notindexed)
909 makedir(name, notindexed)
940 makedir(name, notindexed)
910 if mode is not None:
941 if mode is not None:
911 os.chmod(name, mode)
942 os.chmod(name, mode)
912
943
913 def ensuredirs(name, mode=None):
944 def ensuredirs(name, mode=None):
914 """race-safe recursive directory creation"""
945 """race-safe recursive directory creation"""
915 if os.path.isdir(name):
946 if os.path.isdir(name):
916 return
947 return
917 parent = os.path.dirname(os.path.abspath(name))
948 parent = os.path.dirname(os.path.abspath(name))
918 if parent != name:
949 if parent != name:
919 ensuredirs(parent, mode)
950 ensuredirs(parent, mode)
920 try:
951 try:
921 os.mkdir(name)
952 os.mkdir(name)
922 except OSError, err:
953 except OSError, err:
923 if err.errno == errno.EEXIST and os.path.isdir(name):
954 if err.errno == errno.EEXIST and os.path.isdir(name):
924 # someone else seems to have won a directory creation race
955 # someone else seems to have won a directory creation race
925 return
956 return
926 raise
957 raise
927 if mode is not None:
958 if mode is not None:
928 os.chmod(name, mode)
959 os.chmod(name, mode)
929
960
930 def readfile(path):
961 def readfile(path):
931 fp = open(path, 'rb')
962 fp = open(path, 'rb')
932 try:
963 try:
933 return fp.read()
964 return fp.read()
934 finally:
965 finally:
935 fp.close()
966 fp.close()
936
967
937 def writefile(path, text):
968 def writefile(path, text):
938 fp = open(path, 'wb')
969 fp = open(path, 'wb')
939 try:
970 try:
940 fp.write(text)
971 fp.write(text)
941 finally:
972 finally:
942 fp.close()
973 fp.close()
943
974
944 def appendfile(path, text):
975 def appendfile(path, text):
945 fp = open(path, 'ab')
976 fp = open(path, 'ab')
946 try:
977 try:
947 fp.write(text)
978 fp.write(text)
948 finally:
979 finally:
949 fp.close()
980 fp.close()
950
981
951 class chunkbuffer(object):
982 class chunkbuffer(object):
952 """Allow arbitrary sized chunks of data to be efficiently read from an
983 """Allow arbitrary sized chunks of data to be efficiently read from an
953 iterator over chunks of arbitrary size."""
984 iterator over chunks of arbitrary size."""
954
985
955 def __init__(self, in_iter):
986 def __init__(self, in_iter):
956 """in_iter is the iterator that's iterating over the input chunks.
987 """in_iter is the iterator that's iterating over the input chunks.
957 targetsize is how big a buffer to try to maintain."""
988 targetsize is how big a buffer to try to maintain."""
958 def splitbig(chunks):
989 def splitbig(chunks):
959 for chunk in chunks:
990 for chunk in chunks:
960 if len(chunk) > 2**20:
991 if len(chunk) > 2**20:
961 pos = 0
992 pos = 0
962 while pos < len(chunk):
993 while pos < len(chunk):
963 end = pos + 2 ** 18
994 end = pos + 2 ** 18
964 yield chunk[pos:end]
995 yield chunk[pos:end]
965 pos = end
996 pos = end
966 else:
997 else:
967 yield chunk
998 yield chunk
968 self.iter = splitbig(in_iter)
999 self.iter = splitbig(in_iter)
969 self._queue = deque()
1000 self._queue = deque()
970
1001
971 def read(self, l=None):
1002 def read(self, l=None):
972 """Read L bytes of data from the iterator of chunks of data.
1003 """Read L bytes of data from the iterator of chunks of data.
973 Returns less than L bytes if the iterator runs dry.
1004 Returns less than L bytes if the iterator runs dry.
974
1005
975 If size parameter is ommited, read everything"""
1006 If size parameter is ommited, read everything"""
976 left = l
1007 left = l
977 buf = []
1008 buf = []
978 queue = self._queue
1009 queue = self._queue
979 while left is None or left > 0:
1010 while left is None or left > 0:
980 # refill the queue
1011 # refill the queue
981 if not queue:
1012 if not queue:
982 target = 2**18
1013 target = 2**18
983 for chunk in self.iter:
1014 for chunk in self.iter:
984 queue.append(chunk)
1015 queue.append(chunk)
985 target -= len(chunk)
1016 target -= len(chunk)
986 if target <= 0:
1017 if target <= 0:
987 break
1018 break
988 if not queue:
1019 if not queue:
989 break
1020 break
990
1021
991 chunk = queue.popleft()
1022 chunk = queue.popleft()
992 if left is not None:
1023 if left is not None:
993 left -= len(chunk)
1024 left -= len(chunk)
994 if left is not None and left < 0:
1025 if left is not None and left < 0:
995 queue.appendleft(chunk[left:])
1026 queue.appendleft(chunk[left:])
996 buf.append(chunk[:left])
1027 buf.append(chunk[:left])
997 else:
1028 else:
998 buf.append(chunk)
1029 buf.append(chunk)
999
1030
1000 return ''.join(buf)
1031 return ''.join(buf)
1001
1032
1002 def filechunkiter(f, size=65536, limit=None):
1033 def filechunkiter(f, size=65536, limit=None):
1003 """Create a generator that produces the data in the file size
1034 """Create a generator that produces the data in the file size
1004 (default 65536) bytes at a time, up to optional limit (default is
1035 (default 65536) bytes at a time, up to optional limit (default is
1005 to read all data). Chunks may be less than size bytes if the
1036 to read all data). Chunks may be less than size bytes if the
1006 chunk is the last chunk in the file, or the file is a socket or
1037 chunk is the last chunk in the file, or the file is a socket or
1007 some other type of file that sometimes reads less data than is
1038 some other type of file that sometimes reads less data than is
1008 requested."""
1039 requested."""
1009 assert size >= 0
1040 assert size >= 0
1010 assert limit is None or limit >= 0
1041 assert limit is None or limit >= 0
1011 while True:
1042 while True:
1012 if limit is None:
1043 if limit is None:
1013 nbytes = size
1044 nbytes = size
1014 else:
1045 else:
1015 nbytes = min(limit, size)
1046 nbytes = min(limit, size)
1016 s = nbytes and f.read(nbytes)
1047 s = nbytes and f.read(nbytes)
1017 if not s:
1048 if not s:
1018 break
1049 break
1019 if limit:
1050 if limit:
1020 limit -= len(s)
1051 limit -= len(s)
1021 yield s
1052 yield s
1022
1053
1023 def makedate(timestamp=None):
1054 def makedate(timestamp=None):
1024 '''Return a unix timestamp (or the current time) as a (unixtime,
1055 '''Return a unix timestamp (or the current time) as a (unixtime,
1025 offset) tuple based off the local timezone.'''
1056 offset) tuple based off the local timezone.'''
1026 if timestamp is None:
1057 if timestamp is None:
1027 timestamp = time.time()
1058 timestamp = time.time()
1028 if timestamp < 0:
1059 if timestamp < 0:
1029 hint = _("check your clock")
1060 hint = _("check your clock")
1030 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1061 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1031 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1062 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1032 datetime.datetime.fromtimestamp(timestamp))
1063 datetime.datetime.fromtimestamp(timestamp))
1033 tz = delta.days * 86400 + delta.seconds
1064 tz = delta.days * 86400 + delta.seconds
1034 return timestamp, tz
1065 return timestamp, tz
1035
1066
1036 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1067 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1037 """represent a (unixtime, offset) tuple as a localized time.
1068 """represent a (unixtime, offset) tuple as a localized time.
1038 unixtime is seconds since the epoch, and offset is the time zone's
1069 unixtime is seconds since the epoch, and offset is the time zone's
1039 number of seconds away from UTC. if timezone is false, do not
1070 number of seconds away from UTC. if timezone is false, do not
1040 append time zone to string."""
1071 append time zone to string."""
1041 t, tz = date or makedate()
1072 t, tz = date or makedate()
1042 if t < 0:
1073 if t < 0:
1043 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1074 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1044 tz = 0
1075 tz = 0
1045 if "%1" in format or "%2" in format or "%z" in format:
1076 if "%1" in format or "%2" in format or "%z" in format:
1046 sign = (tz > 0) and "-" or "+"
1077 sign = (tz > 0) and "-" or "+"
1047 minutes = abs(tz) // 60
1078 minutes = abs(tz) // 60
1048 format = format.replace("%z", "%1%2")
1079 format = format.replace("%z", "%1%2")
1049 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1080 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1050 format = format.replace("%2", "%02d" % (minutes % 60))
1081 format = format.replace("%2", "%02d" % (minutes % 60))
1051 try:
1082 try:
1052 t = time.gmtime(float(t) - tz)
1083 t = time.gmtime(float(t) - tz)
1053 except ValueError:
1084 except ValueError:
1054 # time was out of range
1085 # time was out of range
1055 t = time.gmtime(sys.maxint)
1086 t = time.gmtime(sys.maxint)
1056 s = time.strftime(format, t)
1087 s = time.strftime(format, t)
1057 return s
1088 return s
1058
1089
1059 def shortdate(date=None):
1090 def shortdate(date=None):
1060 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1091 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1061 return datestr(date, format='%Y-%m-%d')
1092 return datestr(date, format='%Y-%m-%d')
1062
1093
1063 def strdate(string, format, defaults=[]):
1094 def strdate(string, format, defaults=[]):
1064 """parse a localized time string and return a (unixtime, offset) tuple.
1095 """parse a localized time string and return a (unixtime, offset) tuple.
1065 if the string cannot be parsed, ValueError is raised."""
1096 if the string cannot be parsed, ValueError is raised."""
1066 def timezone(string):
1097 def timezone(string):
1067 tz = string.split()[-1]
1098 tz = string.split()[-1]
1068 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1099 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1069 sign = (tz[0] == "+") and 1 or -1
1100 sign = (tz[0] == "+") and 1 or -1
1070 hours = int(tz[1:3])
1101 hours = int(tz[1:3])
1071 minutes = int(tz[3:5])
1102 minutes = int(tz[3:5])
1072 return -sign * (hours * 60 + minutes) * 60
1103 return -sign * (hours * 60 + minutes) * 60
1073 if tz == "GMT" or tz == "UTC":
1104 if tz == "GMT" or tz == "UTC":
1074 return 0
1105 return 0
1075 return None
1106 return None
1076
1107
1077 # NOTE: unixtime = localunixtime + offset
1108 # NOTE: unixtime = localunixtime + offset
1078 offset, date = timezone(string), string
1109 offset, date = timezone(string), string
1079 if offset is not None:
1110 if offset is not None:
1080 date = " ".join(string.split()[:-1])
1111 date = " ".join(string.split()[:-1])
1081
1112
1082 # add missing elements from defaults
1113 # add missing elements from defaults
1083 usenow = False # default to using biased defaults
1114 usenow = False # default to using biased defaults
1084 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1115 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1085 found = [True for p in part if ("%"+p) in format]
1116 found = [True for p in part if ("%"+p) in format]
1086 if not found:
1117 if not found:
1087 date += "@" + defaults[part][usenow]
1118 date += "@" + defaults[part][usenow]
1088 format += "@%" + part[0]
1119 format += "@%" + part[0]
1089 else:
1120 else:
1090 # We've found a specific time element, less specific time
1121 # We've found a specific time element, less specific time
1091 # elements are relative to today
1122 # elements are relative to today
1092 usenow = True
1123 usenow = True
1093
1124
1094 timetuple = time.strptime(date, format)
1125 timetuple = time.strptime(date, format)
1095 localunixtime = int(calendar.timegm(timetuple))
1126 localunixtime = int(calendar.timegm(timetuple))
1096 if offset is None:
1127 if offset is None:
1097 # local timezone
1128 # local timezone
1098 unixtime = int(time.mktime(timetuple))
1129 unixtime = int(time.mktime(timetuple))
1099 offset = unixtime - localunixtime
1130 offset = unixtime - localunixtime
1100 else:
1131 else:
1101 unixtime = localunixtime + offset
1132 unixtime = localunixtime + offset
1102 return unixtime, offset
1133 return unixtime, offset
1103
1134
1104 def parsedate(date, formats=None, bias={}):
1135 def parsedate(date, formats=None, bias={}):
1105 """parse a localized date/time and return a (unixtime, offset) tuple.
1136 """parse a localized date/time and return a (unixtime, offset) tuple.
1106
1137
1107 The date may be a "unixtime offset" string or in one of the specified
1138 The date may be a "unixtime offset" string or in one of the specified
1108 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1139 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1109
1140
1110 >>> parsedate(' today ') == parsedate(\
1141 >>> parsedate(' today ') == parsedate(\
1111 datetime.date.today().strftime('%b %d'))
1142 datetime.date.today().strftime('%b %d'))
1112 True
1143 True
1113 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1144 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1114 datetime.timedelta(days=1)\
1145 datetime.timedelta(days=1)\
1115 ).strftime('%b %d'))
1146 ).strftime('%b %d'))
1116 True
1147 True
1117 >>> now, tz = makedate()
1148 >>> now, tz = makedate()
1118 >>> strnow, strtz = parsedate('now')
1149 >>> strnow, strtz = parsedate('now')
1119 >>> (strnow - now) < 1
1150 >>> (strnow - now) < 1
1120 True
1151 True
1121 >>> tz == strtz
1152 >>> tz == strtz
1122 True
1153 True
1123 """
1154 """
1124 if not date:
1155 if not date:
1125 return 0, 0
1156 return 0, 0
1126 if isinstance(date, tuple) and len(date) == 2:
1157 if isinstance(date, tuple) and len(date) == 2:
1127 return date
1158 return date
1128 if not formats:
1159 if not formats:
1129 formats = defaultdateformats
1160 formats = defaultdateformats
1130 date = date.strip()
1161 date = date.strip()
1131
1162
1132 if date == _('now'):
1163 if date == _('now'):
1133 return makedate()
1164 return makedate()
1134 if date == _('today'):
1165 if date == _('today'):
1135 date = datetime.date.today().strftime('%b %d')
1166 date = datetime.date.today().strftime('%b %d')
1136 elif date == _('yesterday'):
1167 elif date == _('yesterday'):
1137 date = (datetime.date.today() -
1168 date = (datetime.date.today() -
1138 datetime.timedelta(days=1)).strftime('%b %d')
1169 datetime.timedelta(days=1)).strftime('%b %d')
1139
1170
1140 try:
1171 try:
1141 when, offset = map(int, date.split(' '))
1172 when, offset = map(int, date.split(' '))
1142 except ValueError:
1173 except ValueError:
1143 # fill out defaults
1174 # fill out defaults
1144 now = makedate()
1175 now = makedate()
1145 defaults = {}
1176 defaults = {}
1146 for part in ("d", "mb", "yY", "HI", "M", "S"):
1177 for part in ("d", "mb", "yY", "HI", "M", "S"):
1147 # this piece is for rounding the specific end of unknowns
1178 # this piece is for rounding the specific end of unknowns
1148 b = bias.get(part)
1179 b = bias.get(part)
1149 if b is None:
1180 if b is None:
1150 if part[0] in "HMS":
1181 if part[0] in "HMS":
1151 b = "00"
1182 b = "00"
1152 else:
1183 else:
1153 b = "0"
1184 b = "0"
1154
1185
1155 # this piece is for matching the generic end to today's date
1186 # this piece is for matching the generic end to today's date
1156 n = datestr(now, "%" + part[0])
1187 n = datestr(now, "%" + part[0])
1157
1188
1158 defaults[part] = (b, n)
1189 defaults[part] = (b, n)
1159
1190
1160 for format in formats:
1191 for format in formats:
1161 try:
1192 try:
1162 when, offset = strdate(date, format, defaults)
1193 when, offset = strdate(date, format, defaults)
1163 except (ValueError, OverflowError):
1194 except (ValueError, OverflowError):
1164 pass
1195 pass
1165 else:
1196 else:
1166 break
1197 break
1167 else:
1198 else:
1168 raise Abort(_('invalid date: %r') % date)
1199 raise Abort(_('invalid date: %r') % date)
1169 # validate explicit (probably user-specified) date and
1200 # validate explicit (probably user-specified) date and
1170 # time zone offset. values must fit in signed 32 bits for
1201 # time zone offset. values must fit in signed 32 bits for
1171 # current 32-bit linux runtimes. timezones go from UTC-12
1202 # current 32-bit linux runtimes. timezones go from UTC-12
1172 # to UTC+14
1203 # to UTC+14
1173 if abs(when) > 0x7fffffff:
1204 if abs(when) > 0x7fffffff:
1174 raise Abort(_('date exceeds 32 bits: %d') % when)
1205 raise Abort(_('date exceeds 32 bits: %d') % when)
1175 if when < 0:
1206 if when < 0:
1176 raise Abort(_('negative date value: %d') % when)
1207 raise Abort(_('negative date value: %d') % when)
1177 if offset < -50400 or offset > 43200:
1208 if offset < -50400 or offset > 43200:
1178 raise Abort(_('impossible time zone offset: %d') % offset)
1209 raise Abort(_('impossible time zone offset: %d') % offset)
1179 return when, offset
1210 return when, offset
1180
1211
1181 def matchdate(date):
1212 def matchdate(date):
1182 """Return a function that matches a given date match specifier
1213 """Return a function that matches a given date match specifier
1183
1214
1184 Formats include:
1215 Formats include:
1185
1216
1186 '{date}' match a given date to the accuracy provided
1217 '{date}' match a given date to the accuracy provided
1187
1218
1188 '<{date}' on or before a given date
1219 '<{date}' on or before a given date
1189
1220
1190 '>{date}' on or after a given date
1221 '>{date}' on or after a given date
1191
1222
1192 >>> p1 = parsedate("10:29:59")
1223 >>> p1 = parsedate("10:29:59")
1193 >>> p2 = parsedate("10:30:00")
1224 >>> p2 = parsedate("10:30:00")
1194 >>> p3 = parsedate("10:30:59")
1225 >>> p3 = parsedate("10:30:59")
1195 >>> p4 = parsedate("10:31:00")
1226 >>> p4 = parsedate("10:31:00")
1196 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1227 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1197 >>> f = matchdate("10:30")
1228 >>> f = matchdate("10:30")
1198 >>> f(p1[0])
1229 >>> f(p1[0])
1199 False
1230 False
1200 >>> f(p2[0])
1231 >>> f(p2[0])
1201 True
1232 True
1202 >>> f(p3[0])
1233 >>> f(p3[0])
1203 True
1234 True
1204 >>> f(p4[0])
1235 >>> f(p4[0])
1205 False
1236 False
1206 >>> f(p5[0])
1237 >>> f(p5[0])
1207 False
1238 False
1208 """
1239 """
1209
1240
1210 def lower(date):
1241 def lower(date):
1211 d = {'mb': "1", 'd': "1"}
1242 d = {'mb': "1", 'd': "1"}
1212 return parsedate(date, extendeddateformats, d)[0]
1243 return parsedate(date, extendeddateformats, d)[0]
1213
1244
1214 def upper(date):
1245 def upper(date):
1215 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1246 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1216 for days in ("31", "30", "29"):
1247 for days in ("31", "30", "29"):
1217 try:
1248 try:
1218 d["d"] = days
1249 d["d"] = days
1219 return parsedate(date, extendeddateformats, d)[0]
1250 return parsedate(date, extendeddateformats, d)[0]
1220 except Abort:
1251 except Abort:
1221 pass
1252 pass
1222 d["d"] = "28"
1253 d["d"] = "28"
1223 return parsedate(date, extendeddateformats, d)[0]
1254 return parsedate(date, extendeddateformats, d)[0]
1224
1255
1225 date = date.strip()
1256 date = date.strip()
1226
1257
1227 if not date:
1258 if not date:
1228 raise Abort(_("dates cannot consist entirely of whitespace"))
1259 raise Abort(_("dates cannot consist entirely of whitespace"))
1229 elif date[0] == "<":
1260 elif date[0] == "<":
1230 if not date[1:]:
1261 if not date[1:]:
1231 raise Abort(_("invalid day spec, use '<DATE'"))
1262 raise Abort(_("invalid day spec, use '<DATE'"))
1232 when = upper(date[1:])
1263 when = upper(date[1:])
1233 return lambda x: x <= when
1264 return lambda x: x <= when
1234 elif date[0] == ">":
1265 elif date[0] == ">":
1235 if not date[1:]:
1266 if not date[1:]:
1236 raise Abort(_("invalid day spec, use '>DATE'"))
1267 raise Abort(_("invalid day spec, use '>DATE'"))
1237 when = lower(date[1:])
1268 when = lower(date[1:])
1238 return lambda x: x >= when
1269 return lambda x: x >= when
1239 elif date[0] == "-":
1270 elif date[0] == "-":
1240 try:
1271 try:
1241 days = int(date[1:])
1272 days = int(date[1:])
1242 except ValueError:
1273 except ValueError:
1243 raise Abort(_("invalid day spec: %s") % date[1:])
1274 raise Abort(_("invalid day spec: %s") % date[1:])
1244 if days < 0:
1275 if days < 0:
1245 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1276 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1246 % date[1:])
1277 % date[1:])
1247 when = makedate()[0] - days * 3600 * 24
1278 when = makedate()[0] - days * 3600 * 24
1248 return lambda x: x >= when
1279 return lambda x: x >= when
1249 elif " to " in date:
1280 elif " to " in date:
1250 a, b = date.split(" to ")
1281 a, b = date.split(" to ")
1251 start, stop = lower(a), upper(b)
1282 start, stop = lower(a), upper(b)
1252 return lambda x: x >= start and x <= stop
1283 return lambda x: x >= start and x <= stop
1253 else:
1284 else:
1254 start, stop = lower(date), upper(date)
1285 start, stop = lower(date), upper(date)
1255 return lambda x: x >= start and x <= stop
1286 return lambda x: x >= start and x <= stop
1256
1287
1257 def shortuser(user):
1288 def shortuser(user):
1258 """Return a short representation of a user name or email address."""
1289 """Return a short representation of a user name or email address."""
1259 f = user.find('@')
1290 f = user.find('@')
1260 if f >= 0:
1291 if f >= 0:
1261 user = user[:f]
1292 user = user[:f]
1262 f = user.find('<')
1293 f = user.find('<')
1263 if f >= 0:
1294 if f >= 0:
1264 user = user[f + 1:]
1295 user = user[f + 1:]
1265 f = user.find(' ')
1296 f = user.find(' ')
1266 if f >= 0:
1297 if f >= 0:
1267 user = user[:f]
1298 user = user[:f]
1268 f = user.find('.')
1299 f = user.find('.')
1269 if f >= 0:
1300 if f >= 0:
1270 user = user[:f]
1301 user = user[:f]
1271 return user
1302 return user
1272
1303
1273 def emailuser(user):
1304 def emailuser(user):
1274 """Return the user portion of an email address."""
1305 """Return the user portion of an email address."""
1275 f = user.find('@')
1306 f = user.find('@')
1276 if f >= 0:
1307 if f >= 0:
1277 user = user[:f]
1308 user = user[:f]
1278 f = user.find('<')
1309 f = user.find('<')
1279 if f >= 0:
1310 if f >= 0:
1280 user = user[f + 1:]
1311 user = user[f + 1:]
1281 return user
1312 return user
1282
1313
1283 def email(author):
1314 def email(author):
1284 '''get email of author.'''
1315 '''get email of author.'''
1285 r = author.find('>')
1316 r = author.find('>')
1286 if r == -1:
1317 if r == -1:
1287 r = None
1318 r = None
1288 return author[author.find('<') + 1:r]
1319 return author[author.find('<') + 1:r]
1289
1320
1290 def _ellipsis(text, maxlength):
1321 def _ellipsis(text, maxlength):
1291 if len(text) <= maxlength:
1322 if len(text) <= maxlength:
1292 return text, False
1323 return text, False
1293 else:
1324 else:
1294 return "%s..." % (text[:maxlength - 3]), True
1325 return "%s..." % (text[:maxlength - 3]), True
1295
1326
1296 def ellipsis(text, maxlength=400):
1327 def ellipsis(text, maxlength=400):
1297 """Trim string to at most maxlength (default: 400) characters."""
1328 """Trim string to at most maxlength (default: 400) characters."""
1298 try:
1329 try:
1299 # use unicode not to split at intermediate multi-byte sequence
1330 # use unicode not to split at intermediate multi-byte sequence
1300 utext, truncated = _ellipsis(text.decode(encoding.encoding),
1331 utext, truncated = _ellipsis(text.decode(encoding.encoding),
1301 maxlength)
1332 maxlength)
1302 if not truncated:
1333 if not truncated:
1303 return text
1334 return text
1304 return utext.encode(encoding.encoding)
1335 return utext.encode(encoding.encoding)
1305 except (UnicodeDecodeError, UnicodeEncodeError):
1336 except (UnicodeDecodeError, UnicodeEncodeError):
1306 return _ellipsis(text, maxlength)[0]
1337 return _ellipsis(text, maxlength)[0]
1307
1338
1308 def unitcountfn(*unittable):
1339 def unitcountfn(*unittable):
1309 '''return a function that renders a readable count of some quantity'''
1340 '''return a function that renders a readable count of some quantity'''
1310
1341
1311 def go(count):
1342 def go(count):
1312 for multiplier, divisor, format in unittable:
1343 for multiplier, divisor, format in unittable:
1313 if count >= divisor * multiplier:
1344 if count >= divisor * multiplier:
1314 return format % (count / float(divisor))
1345 return format % (count / float(divisor))
1315 return unittable[-1][2] % count
1346 return unittable[-1][2] % count
1316
1347
1317 return go
1348 return go
1318
1349
1319 bytecount = unitcountfn(
1350 bytecount = unitcountfn(
1320 (100, 1 << 30, _('%.0f GB')),
1351 (100, 1 << 30, _('%.0f GB')),
1321 (10, 1 << 30, _('%.1f GB')),
1352 (10, 1 << 30, _('%.1f GB')),
1322 (1, 1 << 30, _('%.2f GB')),
1353 (1, 1 << 30, _('%.2f GB')),
1323 (100, 1 << 20, _('%.0f MB')),
1354 (100, 1 << 20, _('%.0f MB')),
1324 (10, 1 << 20, _('%.1f MB')),
1355 (10, 1 << 20, _('%.1f MB')),
1325 (1, 1 << 20, _('%.2f MB')),
1356 (1, 1 << 20, _('%.2f MB')),
1326 (100, 1 << 10, _('%.0f KB')),
1357 (100, 1 << 10, _('%.0f KB')),
1327 (10, 1 << 10, _('%.1f KB')),
1358 (10, 1 << 10, _('%.1f KB')),
1328 (1, 1 << 10, _('%.2f KB')),
1359 (1, 1 << 10, _('%.2f KB')),
1329 (1, 1, _('%.0f bytes')),
1360 (1, 1, _('%.0f bytes')),
1330 )
1361 )
1331
1362
1332 def uirepr(s):
1363 def uirepr(s):
1333 # Avoid double backslash in Windows path repr()
1364 # Avoid double backslash in Windows path repr()
1334 return repr(s).replace('\\\\', '\\')
1365 return repr(s).replace('\\\\', '\\')
1335
1366
1336 # delay import of textwrap
1367 # delay import of textwrap
1337 def MBTextWrapper(**kwargs):
1368 def MBTextWrapper(**kwargs):
1338 class tw(textwrap.TextWrapper):
1369 class tw(textwrap.TextWrapper):
1339 """
1370 """
1340 Extend TextWrapper for width-awareness.
1371 Extend TextWrapper for width-awareness.
1341
1372
1342 Neither number of 'bytes' in any encoding nor 'characters' is
1373 Neither number of 'bytes' in any encoding nor 'characters' is
1343 appropriate to calculate terminal columns for specified string.
1374 appropriate to calculate terminal columns for specified string.
1344
1375
1345 Original TextWrapper implementation uses built-in 'len()' directly,
1376 Original TextWrapper implementation uses built-in 'len()' directly,
1346 so overriding is needed to use width information of each characters.
1377 so overriding is needed to use width information of each characters.
1347
1378
1348 In addition, characters classified into 'ambiguous' width are
1379 In addition, characters classified into 'ambiguous' width are
1349 treated as wide in East Asian area, but as narrow in other.
1380 treated as wide in East Asian area, but as narrow in other.
1350
1381
1351 This requires use decision to determine width of such characters.
1382 This requires use decision to determine width of such characters.
1352 """
1383 """
1353 def __init__(self, **kwargs):
1384 def __init__(self, **kwargs):
1354 textwrap.TextWrapper.__init__(self, **kwargs)
1385 textwrap.TextWrapper.__init__(self, **kwargs)
1355
1386
1356 # for compatibility between 2.4 and 2.6
1387 # for compatibility between 2.4 and 2.6
1357 if getattr(self, 'drop_whitespace', None) is None:
1388 if getattr(self, 'drop_whitespace', None) is None:
1358 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1389 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1359
1390
1360 def _cutdown(self, ucstr, space_left):
1391 def _cutdown(self, ucstr, space_left):
1361 l = 0
1392 l = 0
1362 colwidth = encoding.ucolwidth
1393 colwidth = encoding.ucolwidth
1363 for i in xrange(len(ucstr)):
1394 for i in xrange(len(ucstr)):
1364 l += colwidth(ucstr[i])
1395 l += colwidth(ucstr[i])
1365 if space_left < l:
1396 if space_left < l:
1366 return (ucstr[:i], ucstr[i:])
1397 return (ucstr[:i], ucstr[i:])
1367 return ucstr, ''
1398 return ucstr, ''
1368
1399
1369 # overriding of base class
1400 # overriding of base class
1370 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1401 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1371 space_left = max(width - cur_len, 1)
1402 space_left = max(width - cur_len, 1)
1372
1403
1373 if self.break_long_words:
1404 if self.break_long_words:
1374 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1405 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1375 cur_line.append(cut)
1406 cur_line.append(cut)
1376 reversed_chunks[-1] = res
1407 reversed_chunks[-1] = res
1377 elif not cur_line:
1408 elif not cur_line:
1378 cur_line.append(reversed_chunks.pop())
1409 cur_line.append(reversed_chunks.pop())
1379
1410
1380 # this overriding code is imported from TextWrapper of python 2.6
1411 # this overriding code is imported from TextWrapper of python 2.6
1381 # to calculate columns of string by 'encoding.ucolwidth()'
1412 # to calculate columns of string by 'encoding.ucolwidth()'
1382 def _wrap_chunks(self, chunks):
1413 def _wrap_chunks(self, chunks):
1383 colwidth = encoding.ucolwidth
1414 colwidth = encoding.ucolwidth
1384
1415
1385 lines = []
1416 lines = []
1386 if self.width <= 0:
1417 if self.width <= 0:
1387 raise ValueError("invalid width %r (must be > 0)" % self.width)
1418 raise ValueError("invalid width %r (must be > 0)" % self.width)
1388
1419
1389 # Arrange in reverse order so items can be efficiently popped
1420 # Arrange in reverse order so items can be efficiently popped
1390 # from a stack of chucks.
1421 # from a stack of chucks.
1391 chunks.reverse()
1422 chunks.reverse()
1392
1423
1393 while chunks:
1424 while chunks:
1394
1425
1395 # Start the list of chunks that will make up the current line.
1426 # Start the list of chunks that will make up the current line.
1396 # cur_len is just the length of all the chunks in cur_line.
1427 # cur_len is just the length of all the chunks in cur_line.
1397 cur_line = []
1428 cur_line = []
1398 cur_len = 0
1429 cur_len = 0
1399
1430
1400 # Figure out which static string will prefix this line.
1431 # Figure out which static string will prefix this line.
1401 if lines:
1432 if lines:
1402 indent = self.subsequent_indent
1433 indent = self.subsequent_indent
1403 else:
1434 else:
1404 indent = self.initial_indent
1435 indent = self.initial_indent
1405
1436
1406 # Maximum width for this line.
1437 # Maximum width for this line.
1407 width = self.width - len(indent)
1438 width = self.width - len(indent)
1408
1439
1409 # First chunk on line is whitespace -- drop it, unless this
1440 # First chunk on line is whitespace -- drop it, unless this
1410 # is the very beginning of the text (i.e. no lines started yet).
1441 # is the very beginning of the text (i.e. no lines started yet).
1411 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1442 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1412 del chunks[-1]
1443 del chunks[-1]
1413
1444
1414 while chunks:
1445 while chunks:
1415 l = colwidth(chunks[-1])
1446 l = colwidth(chunks[-1])
1416
1447
1417 # Can at least squeeze this chunk onto the current line.
1448 # Can at least squeeze this chunk onto the current line.
1418 if cur_len + l <= width:
1449 if cur_len + l <= width:
1419 cur_line.append(chunks.pop())
1450 cur_line.append(chunks.pop())
1420 cur_len += l
1451 cur_len += l
1421
1452
1422 # Nope, this line is full.
1453 # Nope, this line is full.
1423 else:
1454 else:
1424 break
1455 break
1425
1456
1426 # The current line is full, and the next chunk is too big to
1457 # The current line is full, and the next chunk is too big to
1427 # fit on *any* line (not just this one).
1458 # fit on *any* line (not just this one).
1428 if chunks and colwidth(chunks[-1]) > width:
1459 if chunks and colwidth(chunks[-1]) > width:
1429 self._handle_long_word(chunks, cur_line, cur_len, width)
1460 self._handle_long_word(chunks, cur_line, cur_len, width)
1430
1461
1431 # If the last chunk on this line is all whitespace, drop it.
1462 # If the last chunk on this line is all whitespace, drop it.
1432 if (self.drop_whitespace and
1463 if (self.drop_whitespace and
1433 cur_line and cur_line[-1].strip() == ''):
1464 cur_line and cur_line[-1].strip() == ''):
1434 del cur_line[-1]
1465 del cur_line[-1]
1435
1466
1436 # Convert current line back to a string and store it in list
1467 # Convert current line back to a string and store it in list
1437 # of all lines (return value).
1468 # of all lines (return value).
1438 if cur_line:
1469 if cur_line:
1439 lines.append(indent + ''.join(cur_line))
1470 lines.append(indent + ''.join(cur_line))
1440
1471
1441 return lines
1472 return lines
1442
1473
1443 global MBTextWrapper
1474 global MBTextWrapper
1444 MBTextWrapper = tw
1475 MBTextWrapper = tw
1445 return tw(**kwargs)
1476 return tw(**kwargs)
1446
1477
1447 def wrap(line, width, initindent='', hangindent=''):
1478 def wrap(line, width, initindent='', hangindent=''):
1448 maxindent = max(len(hangindent), len(initindent))
1479 maxindent = max(len(hangindent), len(initindent))
1449 if width <= maxindent:
1480 if width <= maxindent:
1450 # adjust for weird terminal size
1481 # adjust for weird terminal size
1451 width = max(78, maxindent + 1)
1482 width = max(78, maxindent + 1)
1452 line = line.decode(encoding.encoding, encoding.encodingmode)
1483 line = line.decode(encoding.encoding, encoding.encodingmode)
1453 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1484 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1454 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1485 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1455 wrapper = MBTextWrapper(width=width,
1486 wrapper = MBTextWrapper(width=width,
1456 initial_indent=initindent,
1487 initial_indent=initindent,
1457 subsequent_indent=hangindent)
1488 subsequent_indent=hangindent)
1458 return wrapper.fill(line).encode(encoding.encoding)
1489 return wrapper.fill(line).encode(encoding.encoding)
1459
1490
1460 def iterlines(iterator):
1491 def iterlines(iterator):
1461 for chunk in iterator:
1492 for chunk in iterator:
1462 for line in chunk.splitlines():
1493 for line in chunk.splitlines():
1463 yield line
1494 yield line
1464
1495
1465 def expandpath(path):
1496 def expandpath(path):
1466 return os.path.expanduser(os.path.expandvars(path))
1497 return os.path.expanduser(os.path.expandvars(path))
1467
1498
1468 def hgcmd():
1499 def hgcmd():
1469 """Return the command used to execute current hg
1500 """Return the command used to execute current hg
1470
1501
1471 This is different from hgexecutable() because on Windows we want
1502 This is different from hgexecutable() because on Windows we want
1472 to avoid things opening new shell windows like batch files, so we
1503 to avoid things opening new shell windows like batch files, so we
1473 get either the python call or current executable.
1504 get either the python call or current executable.
1474 """
1505 """
1475 if mainfrozen():
1506 if mainfrozen():
1476 return [sys.executable]
1507 return [sys.executable]
1477 return gethgcmd()
1508 return gethgcmd()
1478
1509
1479 def rundetached(args, condfn):
1510 def rundetached(args, condfn):
1480 """Execute the argument list in a detached process.
1511 """Execute the argument list in a detached process.
1481
1512
1482 condfn is a callable which is called repeatedly and should return
1513 condfn is a callable which is called repeatedly and should return
1483 True once the child process is known to have started successfully.
1514 True once the child process is known to have started successfully.
1484 At this point, the child process PID is returned. If the child
1515 At this point, the child process PID is returned. If the child
1485 process fails to start or finishes before condfn() evaluates to
1516 process fails to start or finishes before condfn() evaluates to
1486 True, return -1.
1517 True, return -1.
1487 """
1518 """
1488 # Windows case is easier because the child process is either
1519 # Windows case is easier because the child process is either
1489 # successfully starting and validating the condition or exiting
1520 # successfully starting and validating the condition or exiting
1490 # on failure. We just poll on its PID. On Unix, if the child
1521 # on failure. We just poll on its PID. On Unix, if the child
1491 # process fails to start, it will be left in a zombie state until
1522 # process fails to start, it will be left in a zombie state until
1492 # the parent wait on it, which we cannot do since we expect a long
1523 # the parent wait on it, which we cannot do since we expect a long
1493 # running process on success. Instead we listen for SIGCHLD telling
1524 # running process on success. Instead we listen for SIGCHLD telling
1494 # us our child process terminated.
1525 # us our child process terminated.
1495 terminated = set()
1526 terminated = set()
1496 def handler(signum, frame):
1527 def handler(signum, frame):
1497 terminated.add(os.wait())
1528 terminated.add(os.wait())
1498 prevhandler = None
1529 prevhandler = None
1499 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1530 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1500 if SIGCHLD is not None:
1531 if SIGCHLD is not None:
1501 prevhandler = signal.signal(SIGCHLD, handler)
1532 prevhandler = signal.signal(SIGCHLD, handler)
1502 try:
1533 try:
1503 pid = spawndetached(args)
1534 pid = spawndetached(args)
1504 while not condfn():
1535 while not condfn():
1505 if ((pid in terminated or not testpid(pid))
1536 if ((pid in terminated or not testpid(pid))
1506 and not condfn()):
1537 and not condfn()):
1507 return -1
1538 return -1
1508 time.sleep(0.1)
1539 time.sleep(0.1)
1509 return pid
1540 return pid
1510 finally:
1541 finally:
1511 if prevhandler is not None:
1542 if prevhandler is not None:
1512 signal.signal(signal.SIGCHLD, prevhandler)
1543 signal.signal(signal.SIGCHLD, prevhandler)
1513
1544
1514 try:
1545 try:
1515 any, all = any, all
1546 any, all = any, all
1516 except NameError:
1547 except NameError:
1517 def any(iterable):
1548 def any(iterable):
1518 for i in iterable:
1549 for i in iterable:
1519 if i:
1550 if i:
1520 return True
1551 return True
1521 return False
1552 return False
1522
1553
1523 def all(iterable):
1554 def all(iterable):
1524 for i in iterable:
1555 for i in iterable:
1525 if not i:
1556 if not i:
1526 return False
1557 return False
1527 return True
1558 return True
1528
1559
1529 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1560 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1530 """Return the result of interpolating items in the mapping into string s.
1561 """Return the result of interpolating items in the mapping into string s.
1531
1562
1532 prefix is a single character string, or a two character string with
1563 prefix is a single character string, or a two character string with
1533 a backslash as the first character if the prefix needs to be escaped in
1564 a backslash as the first character if the prefix needs to be escaped in
1534 a regular expression.
1565 a regular expression.
1535
1566
1536 fn is an optional function that will be applied to the replacement text
1567 fn is an optional function that will be applied to the replacement text
1537 just before replacement.
1568 just before replacement.
1538
1569
1539 escape_prefix is an optional flag that allows using doubled prefix for
1570 escape_prefix is an optional flag that allows using doubled prefix for
1540 its escaping.
1571 its escaping.
1541 """
1572 """
1542 fn = fn or (lambda s: s)
1573 fn = fn or (lambda s: s)
1543 patterns = '|'.join(mapping.keys())
1574 patterns = '|'.join(mapping.keys())
1544 if escape_prefix:
1575 if escape_prefix:
1545 patterns += '|' + prefix
1576 patterns += '|' + prefix
1546 if len(prefix) > 1:
1577 if len(prefix) > 1:
1547 prefix_char = prefix[1:]
1578 prefix_char = prefix[1:]
1548 else:
1579 else:
1549 prefix_char = prefix
1580 prefix_char = prefix
1550 mapping[prefix_char] = prefix_char
1581 mapping[prefix_char] = prefix_char
1551 r = re.compile(r'%s(%s)' % (prefix, patterns))
1582 r = re.compile(r'%s(%s)' % (prefix, patterns))
1552 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1583 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1553
1584
1554 def getport(port):
1585 def getport(port):
1555 """Return the port for a given network service.
1586 """Return the port for a given network service.
1556
1587
1557 If port is an integer, it's returned as is. If it's a string, it's
1588 If port is an integer, it's returned as is. If it's a string, it's
1558 looked up using socket.getservbyname(). If there's no matching
1589 looked up using socket.getservbyname(). If there's no matching
1559 service, util.Abort is raised.
1590 service, util.Abort is raised.
1560 """
1591 """
1561 try:
1592 try:
1562 return int(port)
1593 return int(port)
1563 except ValueError:
1594 except ValueError:
1564 pass
1595 pass
1565
1596
1566 try:
1597 try:
1567 return socket.getservbyname(port)
1598 return socket.getservbyname(port)
1568 except socket.error:
1599 except socket.error:
1569 raise Abort(_("no port number associated with service '%s'") % port)
1600 raise Abort(_("no port number associated with service '%s'") % port)
1570
1601
1571 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1602 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1572 '0': False, 'no': False, 'false': False, 'off': False,
1603 '0': False, 'no': False, 'false': False, 'off': False,
1573 'never': False}
1604 'never': False}
1574
1605
1575 def parsebool(s):
1606 def parsebool(s):
1576 """Parse s into a boolean.
1607 """Parse s into a boolean.
1577
1608
1578 If s is not a valid boolean, returns None.
1609 If s is not a valid boolean, returns None.
1579 """
1610 """
1580 return _booleans.get(s.lower(), None)
1611 return _booleans.get(s.lower(), None)
1581
1612
1582 _hexdig = '0123456789ABCDEFabcdef'
1613 _hexdig = '0123456789ABCDEFabcdef'
1583 _hextochr = dict((a + b, chr(int(a + b, 16)))
1614 _hextochr = dict((a + b, chr(int(a + b, 16)))
1584 for a in _hexdig for b in _hexdig)
1615 for a in _hexdig for b in _hexdig)
1585
1616
1586 def _urlunquote(s):
1617 def _urlunquote(s):
1587 """Decode HTTP/HTML % encoding.
1618 """Decode HTTP/HTML % encoding.
1588
1619
1589 >>> _urlunquote('abc%20def')
1620 >>> _urlunquote('abc%20def')
1590 'abc def'
1621 'abc def'
1591 """
1622 """
1592 res = s.split('%')
1623 res = s.split('%')
1593 # fastpath
1624 # fastpath
1594 if len(res) == 1:
1625 if len(res) == 1:
1595 return s
1626 return s
1596 s = res[0]
1627 s = res[0]
1597 for item in res[1:]:
1628 for item in res[1:]:
1598 try:
1629 try:
1599 s += _hextochr[item[:2]] + item[2:]
1630 s += _hextochr[item[:2]] + item[2:]
1600 except KeyError:
1631 except KeyError:
1601 s += '%' + item
1632 s += '%' + item
1602 except UnicodeDecodeError:
1633 except UnicodeDecodeError:
1603 s += unichr(int(item[:2], 16)) + item[2:]
1634 s += unichr(int(item[:2], 16)) + item[2:]
1604 return s
1635 return s
1605
1636
1606 class url(object):
1637 class url(object):
1607 r"""Reliable URL parser.
1638 r"""Reliable URL parser.
1608
1639
1609 This parses URLs and provides attributes for the following
1640 This parses URLs and provides attributes for the following
1610 components:
1641 components:
1611
1642
1612 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1643 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1613
1644
1614 Missing components are set to None. The only exception is
1645 Missing components are set to None. The only exception is
1615 fragment, which is set to '' if present but empty.
1646 fragment, which is set to '' if present but empty.
1616
1647
1617 If parsefragment is False, fragment is included in query. If
1648 If parsefragment is False, fragment is included in query. If
1618 parsequery is False, query is included in path. If both are
1649 parsequery is False, query is included in path. If both are
1619 False, both fragment and query are included in path.
1650 False, both fragment and query are included in path.
1620
1651
1621 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1652 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1622
1653
1623 Note that for backward compatibility reasons, bundle URLs do not
1654 Note that for backward compatibility reasons, bundle URLs do not
1624 take host names. That means 'bundle://../' has a path of '../'.
1655 take host names. That means 'bundle://../' has a path of '../'.
1625
1656
1626 Examples:
1657 Examples:
1627
1658
1628 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1659 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1629 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1660 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1630 >>> url('ssh://[::1]:2200//home/joe/repo')
1661 >>> url('ssh://[::1]:2200//home/joe/repo')
1631 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1662 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1632 >>> url('file:///home/joe/repo')
1663 >>> url('file:///home/joe/repo')
1633 <url scheme: 'file', path: '/home/joe/repo'>
1664 <url scheme: 'file', path: '/home/joe/repo'>
1634 >>> url('file:///c:/temp/foo/')
1665 >>> url('file:///c:/temp/foo/')
1635 <url scheme: 'file', path: 'c:/temp/foo/'>
1666 <url scheme: 'file', path: 'c:/temp/foo/'>
1636 >>> url('bundle:foo')
1667 >>> url('bundle:foo')
1637 <url scheme: 'bundle', path: 'foo'>
1668 <url scheme: 'bundle', path: 'foo'>
1638 >>> url('bundle://../foo')
1669 >>> url('bundle://../foo')
1639 <url scheme: 'bundle', path: '../foo'>
1670 <url scheme: 'bundle', path: '../foo'>
1640 >>> url(r'c:\foo\bar')
1671 >>> url(r'c:\foo\bar')
1641 <url path: 'c:\\foo\\bar'>
1672 <url path: 'c:\\foo\\bar'>
1642 >>> url(r'\\blah\blah\blah')
1673 >>> url(r'\\blah\blah\blah')
1643 <url path: '\\\\blah\\blah\\blah'>
1674 <url path: '\\\\blah\\blah\\blah'>
1644 >>> url(r'\\blah\blah\blah#baz')
1675 >>> url(r'\\blah\blah\blah#baz')
1645 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1676 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1646 >>> url(r'file:///C:\users\me')
1677 >>> url(r'file:///C:\users\me')
1647 <url scheme: 'file', path: 'C:\\users\\me'>
1678 <url scheme: 'file', path: 'C:\\users\\me'>
1648
1679
1649 Authentication credentials:
1680 Authentication credentials:
1650
1681
1651 >>> url('ssh://joe:xyz@x/repo')
1682 >>> url('ssh://joe:xyz@x/repo')
1652 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1683 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1653 >>> url('ssh://joe@x/repo')
1684 >>> url('ssh://joe@x/repo')
1654 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1685 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1655
1686
1656 Query strings and fragments:
1687 Query strings and fragments:
1657
1688
1658 >>> url('http://host/a?b#c')
1689 >>> url('http://host/a?b#c')
1659 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1690 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1660 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1691 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1661 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1692 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1662 """
1693 """
1663
1694
1664 _safechars = "!~*'()+"
1695 _safechars = "!~*'()+"
1665 _safepchars = "/!~*'()+:\\"
1696 _safepchars = "/!~*'()+:\\"
1666 _matchscheme = re.compile(r'^[a-zA-Z0-9+.\-]+:').match
1697 _matchscheme = re.compile(r'^[a-zA-Z0-9+.\-]+:').match
1667
1698
1668 def __init__(self, path, parsequery=True, parsefragment=True):
1699 def __init__(self, path, parsequery=True, parsefragment=True):
1669 # We slowly chomp away at path until we have only the path left
1700 # We slowly chomp away at path until we have only the path left
1670 self.scheme = self.user = self.passwd = self.host = None
1701 self.scheme = self.user = self.passwd = self.host = None
1671 self.port = self.path = self.query = self.fragment = None
1702 self.port = self.path = self.query = self.fragment = None
1672 self._localpath = True
1703 self._localpath = True
1673 self._hostport = ''
1704 self._hostport = ''
1674 self._origpath = path
1705 self._origpath = path
1675
1706
1676 if parsefragment and '#' in path:
1707 if parsefragment and '#' in path:
1677 path, self.fragment = path.split('#', 1)
1708 path, self.fragment = path.split('#', 1)
1678 if not path:
1709 if not path:
1679 path = None
1710 path = None
1680
1711
1681 # special case for Windows drive letters and UNC paths
1712 # special case for Windows drive letters and UNC paths
1682 if hasdriveletter(path) or path.startswith(r'\\'):
1713 if hasdriveletter(path) or path.startswith(r'\\'):
1683 self.path = path
1714 self.path = path
1684 return
1715 return
1685
1716
1686 # For compatibility reasons, we can't handle bundle paths as
1717 # For compatibility reasons, we can't handle bundle paths as
1687 # normal URLS
1718 # normal URLS
1688 if path.startswith('bundle:'):
1719 if path.startswith('bundle:'):
1689 self.scheme = 'bundle'
1720 self.scheme = 'bundle'
1690 path = path[7:]
1721 path = path[7:]
1691 if path.startswith('//'):
1722 if path.startswith('//'):
1692 path = path[2:]
1723 path = path[2:]
1693 self.path = path
1724 self.path = path
1694 return
1725 return
1695
1726
1696 if self._matchscheme(path):
1727 if self._matchscheme(path):
1697 parts = path.split(':', 1)
1728 parts = path.split(':', 1)
1698 if parts[0]:
1729 if parts[0]:
1699 self.scheme, path = parts
1730 self.scheme, path = parts
1700 self._localpath = False
1731 self._localpath = False
1701
1732
1702 if not path:
1733 if not path:
1703 path = None
1734 path = None
1704 if self._localpath:
1735 if self._localpath:
1705 self.path = ''
1736 self.path = ''
1706 return
1737 return
1707 else:
1738 else:
1708 if self._localpath:
1739 if self._localpath:
1709 self.path = path
1740 self.path = path
1710 return
1741 return
1711
1742
1712 if parsequery and '?' in path:
1743 if parsequery and '?' in path:
1713 path, self.query = path.split('?', 1)
1744 path, self.query = path.split('?', 1)
1714 if not path:
1745 if not path:
1715 path = None
1746 path = None
1716 if not self.query:
1747 if not self.query:
1717 self.query = None
1748 self.query = None
1718
1749
1719 # // is required to specify a host/authority
1750 # // is required to specify a host/authority
1720 if path and path.startswith('//'):
1751 if path and path.startswith('//'):
1721 parts = path[2:].split('/', 1)
1752 parts = path[2:].split('/', 1)
1722 if len(parts) > 1:
1753 if len(parts) > 1:
1723 self.host, path = parts
1754 self.host, path = parts
1724 else:
1755 else:
1725 self.host = parts[0]
1756 self.host = parts[0]
1726 path = None
1757 path = None
1727 if not self.host:
1758 if not self.host:
1728 self.host = None
1759 self.host = None
1729 # path of file:///d is /d
1760 # path of file:///d is /d
1730 # path of file:///d:/ is d:/, not /d:/
1761 # path of file:///d:/ is d:/, not /d:/
1731 if path and not hasdriveletter(path):
1762 if path and not hasdriveletter(path):
1732 path = '/' + path
1763 path = '/' + path
1733
1764
1734 if self.host and '@' in self.host:
1765 if self.host and '@' in self.host:
1735 self.user, self.host = self.host.rsplit('@', 1)
1766 self.user, self.host = self.host.rsplit('@', 1)
1736 if ':' in self.user:
1767 if ':' in self.user:
1737 self.user, self.passwd = self.user.split(':', 1)
1768 self.user, self.passwd = self.user.split(':', 1)
1738 if not self.host:
1769 if not self.host:
1739 self.host = None
1770 self.host = None
1740
1771
1741 # Don't split on colons in IPv6 addresses without ports
1772 # Don't split on colons in IPv6 addresses without ports
1742 if (self.host and ':' in self.host and
1773 if (self.host and ':' in self.host and
1743 not (self.host.startswith('[') and self.host.endswith(']'))):
1774 not (self.host.startswith('[') and self.host.endswith(']'))):
1744 self._hostport = self.host
1775 self._hostport = self.host
1745 self.host, self.port = self.host.rsplit(':', 1)
1776 self.host, self.port = self.host.rsplit(':', 1)
1746 if not self.host:
1777 if not self.host:
1747 self.host = None
1778 self.host = None
1748
1779
1749 if (self.host and self.scheme == 'file' and
1780 if (self.host and self.scheme == 'file' and
1750 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1781 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1751 raise Abort(_('file:// URLs can only refer to localhost'))
1782 raise Abort(_('file:// URLs can only refer to localhost'))
1752
1783
1753 self.path = path
1784 self.path = path
1754
1785
1755 # leave the query string escaped
1786 # leave the query string escaped
1756 for a in ('user', 'passwd', 'host', 'port',
1787 for a in ('user', 'passwd', 'host', 'port',
1757 'path', 'fragment'):
1788 'path', 'fragment'):
1758 v = getattr(self, a)
1789 v = getattr(self, a)
1759 if v is not None:
1790 if v is not None:
1760 setattr(self, a, _urlunquote(v))
1791 setattr(self, a, _urlunquote(v))
1761
1792
1762 def __repr__(self):
1793 def __repr__(self):
1763 attrs = []
1794 attrs = []
1764 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1795 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1765 'query', 'fragment'):
1796 'query', 'fragment'):
1766 v = getattr(self, a)
1797 v = getattr(self, a)
1767 if v is not None:
1798 if v is not None:
1768 attrs.append('%s: %r' % (a, v))
1799 attrs.append('%s: %r' % (a, v))
1769 return '<url %s>' % ', '.join(attrs)
1800 return '<url %s>' % ', '.join(attrs)
1770
1801
1771 def __str__(self):
1802 def __str__(self):
1772 r"""Join the URL's components back into a URL string.
1803 r"""Join the URL's components back into a URL string.
1773
1804
1774 Examples:
1805 Examples:
1775
1806
1776 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1807 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1777 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1808 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1778 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1809 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1779 'http://user:pw@host:80/?foo=bar&baz=42'
1810 'http://user:pw@host:80/?foo=bar&baz=42'
1780 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1811 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1781 'http://user:pw@host:80/?foo=bar%3dbaz'
1812 'http://user:pw@host:80/?foo=bar%3dbaz'
1782 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1813 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1783 'ssh://user:pw@[::1]:2200//home/joe#'
1814 'ssh://user:pw@[::1]:2200//home/joe#'
1784 >>> str(url('http://localhost:80//'))
1815 >>> str(url('http://localhost:80//'))
1785 'http://localhost:80//'
1816 'http://localhost:80//'
1786 >>> str(url('http://localhost:80/'))
1817 >>> str(url('http://localhost:80/'))
1787 'http://localhost:80/'
1818 'http://localhost:80/'
1788 >>> str(url('http://localhost:80'))
1819 >>> str(url('http://localhost:80'))
1789 'http://localhost:80/'
1820 'http://localhost:80/'
1790 >>> str(url('bundle:foo'))
1821 >>> str(url('bundle:foo'))
1791 'bundle:foo'
1822 'bundle:foo'
1792 >>> str(url('bundle://../foo'))
1823 >>> str(url('bundle://../foo'))
1793 'bundle:../foo'
1824 'bundle:../foo'
1794 >>> str(url('path'))
1825 >>> str(url('path'))
1795 'path'
1826 'path'
1796 >>> str(url('file:///tmp/foo/bar'))
1827 >>> str(url('file:///tmp/foo/bar'))
1797 'file:///tmp/foo/bar'
1828 'file:///tmp/foo/bar'
1798 >>> str(url('file:///c:/tmp/foo/bar'))
1829 >>> str(url('file:///c:/tmp/foo/bar'))
1799 'file:///c:/tmp/foo/bar'
1830 'file:///c:/tmp/foo/bar'
1800 >>> print url(r'bundle:foo\bar')
1831 >>> print url(r'bundle:foo\bar')
1801 bundle:foo\bar
1832 bundle:foo\bar
1802 >>> print url(r'file:///D:\data\hg')
1833 >>> print url(r'file:///D:\data\hg')
1803 file:///D:\data\hg
1834 file:///D:\data\hg
1804 """
1835 """
1805 if self._localpath:
1836 if self._localpath:
1806 s = self.path
1837 s = self.path
1807 if self.scheme == 'bundle':
1838 if self.scheme == 'bundle':
1808 s = 'bundle:' + s
1839 s = 'bundle:' + s
1809 if self.fragment:
1840 if self.fragment:
1810 s += '#' + self.fragment
1841 s += '#' + self.fragment
1811 return s
1842 return s
1812
1843
1813 s = self.scheme + ':'
1844 s = self.scheme + ':'
1814 if self.user or self.passwd or self.host:
1845 if self.user or self.passwd or self.host:
1815 s += '//'
1846 s += '//'
1816 elif self.scheme and (not self.path or self.path.startswith('/')
1847 elif self.scheme and (not self.path or self.path.startswith('/')
1817 or hasdriveletter(self.path)):
1848 or hasdriveletter(self.path)):
1818 s += '//'
1849 s += '//'
1819 if hasdriveletter(self.path):
1850 if hasdriveletter(self.path):
1820 s += '/'
1851 s += '/'
1821 if self.user:
1852 if self.user:
1822 s += urllib.quote(self.user, safe=self._safechars)
1853 s += urllib.quote(self.user, safe=self._safechars)
1823 if self.passwd:
1854 if self.passwd:
1824 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
1855 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
1825 if self.user or self.passwd:
1856 if self.user or self.passwd:
1826 s += '@'
1857 s += '@'
1827 if self.host:
1858 if self.host:
1828 if not (self.host.startswith('[') and self.host.endswith(']')):
1859 if not (self.host.startswith('[') and self.host.endswith(']')):
1829 s += urllib.quote(self.host)
1860 s += urllib.quote(self.host)
1830 else:
1861 else:
1831 s += self.host
1862 s += self.host
1832 if self.port:
1863 if self.port:
1833 s += ':' + urllib.quote(self.port)
1864 s += ':' + urllib.quote(self.port)
1834 if self.host:
1865 if self.host:
1835 s += '/'
1866 s += '/'
1836 if self.path:
1867 if self.path:
1837 # TODO: similar to the query string, we should not unescape the
1868 # TODO: similar to the query string, we should not unescape the
1838 # path when we store it, the path might contain '%2f' = '/',
1869 # path when we store it, the path might contain '%2f' = '/',
1839 # which we should *not* escape.
1870 # which we should *not* escape.
1840 s += urllib.quote(self.path, safe=self._safepchars)
1871 s += urllib.quote(self.path, safe=self._safepchars)
1841 if self.query:
1872 if self.query:
1842 # we store the query in escaped form.
1873 # we store the query in escaped form.
1843 s += '?' + self.query
1874 s += '?' + self.query
1844 if self.fragment is not None:
1875 if self.fragment is not None:
1845 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
1876 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
1846 return s
1877 return s
1847
1878
1848 def authinfo(self):
1879 def authinfo(self):
1849 user, passwd = self.user, self.passwd
1880 user, passwd = self.user, self.passwd
1850 try:
1881 try:
1851 self.user, self.passwd = None, None
1882 self.user, self.passwd = None, None
1852 s = str(self)
1883 s = str(self)
1853 finally:
1884 finally:
1854 self.user, self.passwd = user, passwd
1885 self.user, self.passwd = user, passwd
1855 if not self.user:
1886 if not self.user:
1856 return (s, None)
1887 return (s, None)
1857 # authinfo[1] is passed to urllib2 password manager, and its
1888 # authinfo[1] is passed to urllib2 password manager, and its
1858 # URIs must not contain credentials. The host is passed in the
1889 # URIs must not contain credentials. The host is passed in the
1859 # URIs list because Python < 2.4.3 uses only that to search for
1890 # URIs list because Python < 2.4.3 uses only that to search for
1860 # a password.
1891 # a password.
1861 return (s, (None, (s, self.host),
1892 return (s, (None, (s, self.host),
1862 self.user, self.passwd or ''))
1893 self.user, self.passwd or ''))
1863
1894
1864 def isabs(self):
1895 def isabs(self):
1865 if self.scheme and self.scheme != 'file':
1896 if self.scheme and self.scheme != 'file':
1866 return True # remote URL
1897 return True # remote URL
1867 if hasdriveletter(self.path):
1898 if hasdriveletter(self.path):
1868 return True # absolute for our purposes - can't be joined()
1899 return True # absolute for our purposes - can't be joined()
1869 if self.path.startswith(r'\\'):
1900 if self.path.startswith(r'\\'):
1870 return True # Windows UNC path
1901 return True # Windows UNC path
1871 if self.path.startswith('/'):
1902 if self.path.startswith('/'):
1872 return True # POSIX-style
1903 return True # POSIX-style
1873 return False
1904 return False
1874
1905
1875 def localpath(self):
1906 def localpath(self):
1876 if self.scheme == 'file' or self.scheme == 'bundle':
1907 if self.scheme == 'file' or self.scheme == 'bundle':
1877 path = self.path or '/'
1908 path = self.path or '/'
1878 # For Windows, we need to promote hosts containing drive
1909 # For Windows, we need to promote hosts containing drive
1879 # letters to paths with drive letters.
1910 # letters to paths with drive letters.
1880 if hasdriveletter(self._hostport):
1911 if hasdriveletter(self._hostport):
1881 path = self._hostport + '/' + self.path
1912 path = self._hostport + '/' + self.path
1882 elif (self.host is not None and self.path
1913 elif (self.host is not None and self.path
1883 and not hasdriveletter(path)):
1914 and not hasdriveletter(path)):
1884 path = '/' + path
1915 path = '/' + path
1885 return path
1916 return path
1886 return self._origpath
1917 return self._origpath
1887
1918
1888 def islocal(self):
1919 def islocal(self):
1889 '''whether localpath will return something that posixfile can open'''
1920 '''whether localpath will return something that posixfile can open'''
1890 return (not self.scheme or self.scheme == 'file'
1921 return (not self.scheme or self.scheme == 'file'
1891 or self.scheme == 'bundle')
1922 or self.scheme == 'bundle')
1892
1923
1893 def hasscheme(path):
1924 def hasscheme(path):
1894 return bool(url(path).scheme)
1925 return bool(url(path).scheme)
1895
1926
1896 def hasdriveletter(path):
1927 def hasdriveletter(path):
1897 return path and path[1:2] == ':' and path[0:1].isalpha()
1928 return path and path[1:2] == ':' and path[0:1].isalpha()
1898
1929
1899 def urllocalpath(path):
1930 def urllocalpath(path):
1900 return url(path, parsequery=False, parsefragment=False).localpath()
1931 return url(path, parsequery=False, parsefragment=False).localpath()
1901
1932
1902 def hidepassword(u):
1933 def hidepassword(u):
1903 '''hide user credential in a url string'''
1934 '''hide user credential in a url string'''
1904 u = url(u)
1935 u = url(u)
1905 if u.passwd:
1936 if u.passwd:
1906 u.passwd = '***'
1937 u.passwd = '***'
1907 return str(u)
1938 return str(u)
1908
1939
1909 def removeauth(u):
1940 def removeauth(u):
1910 '''remove all authentication information from a url string'''
1941 '''remove all authentication information from a url string'''
1911 u = url(u)
1942 u = url(u)
1912 u.user = u.passwd = None
1943 u.user = u.passwd = None
1913 return str(u)
1944 return str(u)
1914
1945
1915 def isatty(fd):
1946 def isatty(fd):
1916 try:
1947 try:
1917 return fd.isatty()
1948 return fd.isatty()
1918 except AttributeError:
1949 except AttributeError:
1919 return False
1950 return False
1920
1951
1921 timecount = unitcountfn(
1952 timecount = unitcountfn(
1922 (1, 1e3, _('%.0f s')),
1953 (1, 1e3, _('%.0f s')),
1923 (100, 1, _('%.1f s')),
1954 (100, 1, _('%.1f s')),
1924 (10, 1, _('%.2f s')),
1955 (10, 1, _('%.2f s')),
1925 (1, 1, _('%.3f s')),
1956 (1, 1, _('%.3f s')),
1926 (100, 0.001, _('%.1f ms')),
1957 (100, 0.001, _('%.1f ms')),
1927 (10, 0.001, _('%.2f ms')),
1958 (10, 0.001, _('%.2f ms')),
1928 (1, 0.001, _('%.3f ms')),
1959 (1, 0.001, _('%.3f ms')),
1929 (100, 0.000001, _('%.1f us')),
1960 (100, 0.000001, _('%.1f us')),
1930 (10, 0.000001, _('%.2f us')),
1961 (10, 0.000001, _('%.2f us')),
1931 (1, 0.000001, _('%.3f us')),
1962 (1, 0.000001, _('%.3f us')),
1932 (100, 0.000000001, _('%.1f ns')),
1963 (100, 0.000000001, _('%.1f ns')),
1933 (10, 0.000000001, _('%.2f ns')),
1964 (10, 0.000000001, _('%.2f ns')),
1934 (1, 0.000000001, _('%.3f ns')),
1965 (1, 0.000000001, _('%.3f ns')),
1935 )
1966 )
1936
1967
1937 _timenesting = [0]
1968 _timenesting = [0]
1938
1969
1939 def timed(func):
1970 def timed(func):
1940 '''Report the execution time of a function call to stderr.
1971 '''Report the execution time of a function call to stderr.
1941
1972
1942 During development, use as a decorator when you need to measure
1973 During development, use as a decorator when you need to measure
1943 the cost of a function, e.g. as follows:
1974 the cost of a function, e.g. as follows:
1944
1975
1945 @util.timed
1976 @util.timed
1946 def foo(a, b, c):
1977 def foo(a, b, c):
1947 pass
1978 pass
1948 '''
1979 '''
1949
1980
1950 def wrapper(*args, **kwargs):
1981 def wrapper(*args, **kwargs):
1951 start = time.time()
1982 start = time.time()
1952 indent = 2
1983 indent = 2
1953 _timenesting[0] += indent
1984 _timenesting[0] += indent
1954 try:
1985 try:
1955 return func(*args, **kwargs)
1986 return func(*args, **kwargs)
1956 finally:
1987 finally:
1957 elapsed = time.time() - start
1988 elapsed = time.time() - start
1958 _timenesting[0] -= indent
1989 _timenesting[0] -= indent
1959 sys.stderr.write('%s%s: %s\n' %
1990 sys.stderr.write('%s%s: %s\n' %
1960 (' ' * _timenesting[0], func.__name__,
1991 (' ' * _timenesting[0], func.__name__,
1961 timecount(elapsed)))
1992 timecount(elapsed)))
1962 return wrapper
1993 return wrapper
1963
1994
1964 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
1995 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
1965 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
1996 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
1966
1997
1967 def sizetoint(s):
1998 def sizetoint(s):
1968 '''Convert a space specifier to a byte count.
1999 '''Convert a space specifier to a byte count.
1969
2000
1970 >>> sizetoint('30')
2001 >>> sizetoint('30')
1971 30
2002 30
1972 >>> sizetoint('2.2kb')
2003 >>> sizetoint('2.2kb')
1973 2252
2004 2252
1974 >>> sizetoint('6M')
2005 >>> sizetoint('6M')
1975 6291456
2006 6291456
1976 '''
2007 '''
1977 t = s.strip().lower()
2008 t = s.strip().lower()
1978 try:
2009 try:
1979 for k, u in _sizeunits:
2010 for k, u in _sizeunits:
1980 if t.endswith(k):
2011 if t.endswith(k):
1981 return int(float(t[:-len(k)]) * u)
2012 return int(float(t[:-len(k)]) * u)
1982 return int(t)
2013 return int(t)
1983 except ValueError:
2014 except ValueError:
1984 raise error.ParseError(_("couldn't parse size: %s") % s)
2015 raise error.ParseError(_("couldn't parse size: %s") % s)
1985
2016
1986 class hooks(object):
2017 class hooks(object):
1987 '''A collection of hook functions that can be used to extend a
2018 '''A collection of hook functions that can be used to extend a
1988 function's behaviour. Hooks are called in lexicographic order,
2019 function's behaviour. Hooks are called in lexicographic order,
1989 based on the names of their sources.'''
2020 based on the names of their sources.'''
1990
2021
1991 def __init__(self):
2022 def __init__(self):
1992 self._hooks = []
2023 self._hooks = []
1993
2024
1994 def add(self, source, hook):
2025 def add(self, source, hook):
1995 self._hooks.append((source, hook))
2026 self._hooks.append((source, hook))
1996
2027
1997 def __call__(self, *args):
2028 def __call__(self, *args):
1998 self._hooks.sort(key=lambda x: x[0])
2029 self._hooks.sort(key=lambda x: x[0])
1999 results = []
2030 results = []
2000 for source, hook in self._hooks:
2031 for source, hook in self._hooks:
2001 results.append(hook(*args))
2032 results.append(hook(*args))
2002 return results
2033 return results
2003
2034
2004 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2035 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2005 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2036 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2006 Skips the 'skip' last entries. By default it will flush stdout first.
2037 Skips the 'skip' last entries. By default it will flush stdout first.
2007 It can be used everywhere and do intentionally not require an ui object.
2038 It can be used everywhere and do intentionally not require an ui object.
2008 Not be used in production code but very convenient while developing.
2039 Not be used in production code but very convenient while developing.
2009 '''
2040 '''
2010 if otherf:
2041 if otherf:
2011 otherf.flush()
2042 otherf.flush()
2012 f.write('%s at:\n' % msg)
2043 f.write('%s at:\n' % msg)
2013 entries = [('%s:%s' % (fn, ln), func)
2044 entries = [('%s:%s' % (fn, ln), func)
2014 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2045 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2015 if entries:
2046 if entries:
2016 fnmax = max(len(entry[0]) for entry in entries)
2047 fnmax = max(len(entry[0]) for entry in entries)
2017 for fnln, func in entries:
2048 for fnln, func in entries:
2018 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2049 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2019 f.flush()
2050 f.flush()
2020
2051
2021 # convenient shortcut
2052 # convenient shortcut
2022 dst = debugstacktrace
2053 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now