##// END OF EJS Templates
util: don't mess with builtins to emulate buffer()
Matt Mackall -
r15657:d976b1ef default
parent child Browse files
Show More
@@ -1,204 +1,204 b''
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import mdiff, parsers, error, revlog
9 import mdiff, parsers, error, revlog, util
10 import array, struct
10 import array, struct
11
11
12 class manifestdict(dict):
12 class manifestdict(dict):
13 def __init__(self, mapping=None, flags=None):
13 def __init__(self, mapping=None, flags=None):
14 if mapping is None:
14 if mapping is None:
15 mapping = {}
15 mapping = {}
16 if flags is None:
16 if flags is None:
17 flags = {}
17 flags = {}
18 dict.__init__(self, mapping)
18 dict.__init__(self, mapping)
19 self._flags = flags
19 self._flags = flags
20 def flags(self, f):
20 def flags(self, f):
21 return self._flags.get(f, "")
21 return self._flags.get(f, "")
22 def set(self, f, flags):
22 def set(self, f, flags):
23 self._flags[f] = flags
23 self._flags[f] = flags
24 def copy(self):
24 def copy(self):
25 return manifestdict(self, dict.copy(self._flags))
25 return manifestdict(self, dict.copy(self._flags))
26
26
27 class manifest(revlog.revlog):
27 class manifest(revlog.revlog):
28 def __init__(self, opener):
28 def __init__(self, opener):
29 self._mancache = None
29 self._mancache = None
30 revlog.revlog.__init__(self, opener, "00manifest.i")
30 revlog.revlog.__init__(self, opener, "00manifest.i")
31
31
32 def parse(self, lines):
32 def parse(self, lines):
33 mfdict = manifestdict()
33 mfdict = manifestdict()
34 parsers.parse_manifest(mfdict, mfdict._flags, lines)
34 parsers.parse_manifest(mfdict, mfdict._flags, lines)
35 return mfdict
35 return mfdict
36
36
37 def readdelta(self, node):
37 def readdelta(self, node):
38 r = self.rev(node)
38 r = self.rev(node)
39 return self.parse(mdiff.patchtext(self.revdiff(self.deltaparent(r), r)))
39 return self.parse(mdiff.patchtext(self.revdiff(self.deltaparent(r), r)))
40
40
41 def readfast(self, node):
41 def readfast(self, node):
42 '''use the faster of readdelta or read'''
42 '''use the faster of readdelta or read'''
43 r = self.rev(node)
43 r = self.rev(node)
44 deltaparent = self.deltaparent(r)
44 deltaparent = self.deltaparent(r)
45 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
45 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
46 return self.readdelta(node)
46 return self.readdelta(node)
47 return self.read(node)
47 return self.read(node)
48
48
49 def read(self, node):
49 def read(self, node):
50 if node == revlog.nullid:
50 if node == revlog.nullid:
51 return manifestdict() # don't upset local cache
51 return manifestdict() # don't upset local cache
52 if self._mancache and self._mancache[0] == node:
52 if self._mancache and self._mancache[0] == node:
53 return self._mancache[1]
53 return self._mancache[1]
54 text = self.revision(node)
54 text = self.revision(node)
55 arraytext = array.array('c', text)
55 arraytext = array.array('c', text)
56 mapping = self.parse(text)
56 mapping = self.parse(text)
57 self._mancache = (node, mapping, arraytext)
57 self._mancache = (node, mapping, arraytext)
58 return mapping
58 return mapping
59
59
60 def _search(self, m, s, lo=0, hi=None):
60 def _search(self, m, s, lo=0, hi=None):
61 '''return a tuple (start, end) that says where to find s within m.
61 '''return a tuple (start, end) that says where to find s within m.
62
62
63 If the string is found m[start:end] are the line containing
63 If the string is found m[start:end] are the line containing
64 that string. If start == end the string was not found and
64 that string. If start == end the string was not found and
65 they indicate the proper sorted insertion point. This was
65 they indicate the proper sorted insertion point. This was
66 taken from bisect_left, and modified to find line start/end as
66 taken from bisect_left, and modified to find line start/end as
67 it goes along.
67 it goes along.
68
68
69 m should be a buffer or a string
69 m should be a buffer or a string
70 s is a string'''
70 s is a string'''
71 def advance(i, c):
71 def advance(i, c):
72 while i < lenm and m[i] != c:
72 while i < lenm and m[i] != c:
73 i += 1
73 i += 1
74 return i
74 return i
75 if not s:
75 if not s:
76 return (lo, lo)
76 return (lo, lo)
77 lenm = len(m)
77 lenm = len(m)
78 if not hi:
78 if not hi:
79 hi = lenm
79 hi = lenm
80 while lo < hi:
80 while lo < hi:
81 mid = (lo + hi) // 2
81 mid = (lo + hi) // 2
82 start = mid
82 start = mid
83 while start > 0 and m[start - 1] != '\n':
83 while start > 0 and m[start - 1] != '\n':
84 start -= 1
84 start -= 1
85 end = advance(start, '\0')
85 end = advance(start, '\0')
86 if m[start:end] < s:
86 if m[start:end] < s:
87 # we know that after the null there are 40 bytes of sha1
87 # we know that after the null there are 40 bytes of sha1
88 # this translates to the bisect lo = mid + 1
88 # this translates to the bisect lo = mid + 1
89 lo = advance(end + 40, '\n') + 1
89 lo = advance(end + 40, '\n') + 1
90 else:
90 else:
91 # this translates to the bisect hi = mid
91 # this translates to the bisect hi = mid
92 hi = start
92 hi = start
93 end = advance(lo, '\0')
93 end = advance(lo, '\0')
94 found = m[lo:end]
94 found = m[lo:end]
95 if s == found:
95 if s == found:
96 # we know that after the null there are 40 bytes of sha1
96 # we know that after the null there are 40 bytes of sha1
97 end = advance(end + 40, '\n')
97 end = advance(end + 40, '\n')
98 return (lo, end + 1)
98 return (lo, end + 1)
99 else:
99 else:
100 return (lo, lo)
100 return (lo, lo)
101
101
102 def find(self, node, f):
102 def find(self, node, f):
103 '''look up entry for a single file efficiently.
103 '''look up entry for a single file efficiently.
104 return (node, flags) pair if found, (None, None) if not.'''
104 return (node, flags) pair if found, (None, None) if not.'''
105 if self._mancache and self._mancache[0] == node:
105 if self._mancache and self._mancache[0] == node:
106 return self._mancache[1].get(f), self._mancache[1].flags(f)
106 return self._mancache[1].get(f), self._mancache[1].flags(f)
107 text = self.revision(node)
107 text = self.revision(node)
108 start, end = self._search(text, f)
108 start, end = self._search(text, f)
109 if start == end:
109 if start == end:
110 return None, None
110 return None, None
111 l = text[start:end]
111 l = text[start:end]
112 f, n = l.split('\0')
112 f, n = l.split('\0')
113 return revlog.bin(n[:40]), n[40:-1]
113 return revlog.bin(n[:40]), n[40:-1]
114
114
115 def add(self, map, transaction, link, p1=None, p2=None,
115 def add(self, map, transaction, link, p1=None, p2=None,
116 changed=None):
116 changed=None):
117 # apply the changes collected during the bisect loop to our addlist
117 # apply the changes collected during the bisect loop to our addlist
118 # return a delta suitable for addrevision
118 # return a delta suitable for addrevision
119 def addlistdelta(addlist, x):
119 def addlistdelta(addlist, x):
120 # start from the bottom up
120 # start from the bottom up
121 # so changes to the offsets don't mess things up.
121 # so changes to the offsets don't mess things up.
122 for start, end, content in reversed(x):
122 for start, end, content in reversed(x):
123 if content:
123 if content:
124 addlist[start:end] = array.array('c', content)
124 addlist[start:end] = array.array('c', content)
125 else:
125 else:
126 del addlist[start:end]
126 del addlist[start:end]
127 return "".join(struct.pack(">lll", start, end, len(content)) + content
127 return "".join(struct.pack(">lll", start, end, len(content)) + content
128 for start, end, content in x)
128 for start, end, content in x)
129
129
130 def checkforbidden(l):
130 def checkforbidden(l):
131 for f in l:
131 for f in l:
132 if '\n' in f or '\r' in f:
132 if '\n' in f or '\r' in f:
133 raise error.RevlogError(
133 raise error.RevlogError(
134 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
134 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
135
135
136 # if we're using the cache, make sure it is valid and
136 # if we're using the cache, make sure it is valid and
137 # parented by the same node we're diffing against
137 # parented by the same node we're diffing against
138 if not (changed and self._mancache and p1 and self._mancache[0] == p1):
138 if not (changed and self._mancache and p1 and self._mancache[0] == p1):
139 files = sorted(map)
139 files = sorted(map)
140 checkforbidden(files)
140 checkforbidden(files)
141
141
142 # if this is changed to support newlines in filenames,
142 # if this is changed to support newlines in filenames,
143 # be sure to check the templates/ dir again (especially *-raw.tmpl)
143 # be sure to check the templates/ dir again (especially *-raw.tmpl)
144 hex, flags = revlog.hex, map.flags
144 hex, flags = revlog.hex, map.flags
145 text = ''.join("%s\0%s%s\n" % (f, hex(map[f]), flags(f))
145 text = ''.join("%s\0%s%s\n" % (f, hex(map[f]), flags(f))
146 for f in files)
146 for f in files)
147 arraytext = array.array('c', text)
147 arraytext = array.array('c', text)
148 cachedelta = None
148 cachedelta = None
149 else:
149 else:
150 added, removed = changed
150 added, removed = changed
151 addlist = self._mancache[2]
151 addlist = self._mancache[2]
152
152
153 checkforbidden(added)
153 checkforbidden(added)
154 # combine the changed lists into one list for sorting
154 # combine the changed lists into one list for sorting
155 work = [(x, False) for x in added]
155 work = [(x, False) for x in added]
156 work.extend((x, True) for x in removed)
156 work.extend((x, True) for x in removed)
157 # this could use heapq.merge() (from python2.6+) or equivalent
157 # this could use heapq.merge() (from python2.6+) or equivalent
158 # since the lists are already sorted
158 # since the lists are already sorted
159 work.sort()
159 work.sort()
160
160
161 delta = []
161 delta = []
162 dstart = None
162 dstart = None
163 dend = None
163 dend = None
164 dline = [""]
164 dline = [""]
165 start = 0
165 start = 0
166 # zero copy representation of addlist as a buffer
166 # zero copy representation of addlist as a buffer
167 addbuf = buffer(addlist)
167 addbuf = util.buffer(addlist)
168
168
169 # start with a readonly loop that finds the offset of
169 # start with a readonly loop that finds the offset of
170 # each line and creates the deltas
170 # each line and creates the deltas
171 for f, todelete in work:
171 for f, todelete in work:
172 # bs will either be the index of the item or the insert point
172 # bs will either be the index of the item or the insert point
173 start, end = self._search(addbuf, f, start)
173 start, end = self._search(addbuf, f, start)
174 if not todelete:
174 if not todelete:
175 l = "%s\0%s%s\n" % (f, revlog.hex(map[f]), map.flags(f))
175 l = "%s\0%s%s\n" % (f, revlog.hex(map[f]), map.flags(f))
176 else:
176 else:
177 if start == end:
177 if start == end:
178 # item we want to delete was not found, error out
178 # item we want to delete was not found, error out
179 raise AssertionError(
179 raise AssertionError(
180 _("failed to remove %s from manifest") % f)
180 _("failed to remove %s from manifest") % f)
181 l = ""
181 l = ""
182 if dstart is not None and dstart <= start and dend >= start:
182 if dstart is not None and dstart <= start and dend >= start:
183 if dend < end:
183 if dend < end:
184 dend = end
184 dend = end
185 if l:
185 if l:
186 dline.append(l)
186 dline.append(l)
187 else:
187 else:
188 if dstart is not None:
188 if dstart is not None:
189 delta.append([dstart, dend, "".join(dline)])
189 delta.append([dstart, dend, "".join(dline)])
190 dstart = start
190 dstart = start
191 dend = end
191 dend = end
192 dline = [l]
192 dline = [l]
193
193
194 if dstart is not None:
194 if dstart is not None:
195 delta.append([dstart, dend, "".join(dline)])
195 delta.append([dstart, dend, "".join(dline)])
196 # apply the delta to the addlist, and get a delta for addrevision
196 # apply the delta to the addlist, and get a delta for addrevision
197 cachedelta = (self.rev(p1), addlistdelta(addlist, delta))
197 cachedelta = (self.rev(p1), addlistdelta(addlist, delta))
198 arraytext = addlist
198 arraytext = addlist
199 text = buffer(arraytext)
199 text = util.buffer(arraytext)
200
200
201 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
201 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
202 self._mancache = (n, map, arraytext)
202 self._mancache = (n, map, arraytext)
203
203
204 return n
204 return n
@@ -1,333 +1,333 b''
1 # mdiff.py - diff and patch routines for mercurial
1 # mdiff.py - diff and patch routines for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import bdiff, mpatch, util
9 import bdiff, mpatch, util
10 import re, struct
10 import re, struct
11
11
12 def splitnewlines(text):
12 def splitnewlines(text):
13 '''like str.splitlines, but only split on newlines.'''
13 '''like str.splitlines, but only split on newlines.'''
14 lines = [l + '\n' for l in text.split('\n')]
14 lines = [l + '\n' for l in text.split('\n')]
15 if lines:
15 if lines:
16 if lines[-1] == '\n':
16 if lines[-1] == '\n':
17 lines.pop()
17 lines.pop()
18 else:
18 else:
19 lines[-1] = lines[-1][:-1]
19 lines[-1] = lines[-1][:-1]
20 return lines
20 return lines
21
21
22 class diffopts(object):
22 class diffopts(object):
23 '''context is the number of context lines
23 '''context is the number of context lines
24 text treats all files as text
24 text treats all files as text
25 showfunc enables diff -p output
25 showfunc enables diff -p output
26 git enables the git extended patch format
26 git enables the git extended patch format
27 nodates removes dates from diff headers
27 nodates removes dates from diff headers
28 ignorews ignores all whitespace changes in the diff
28 ignorews ignores all whitespace changes in the diff
29 ignorewsamount ignores changes in the amount of whitespace
29 ignorewsamount ignores changes in the amount of whitespace
30 ignoreblanklines ignores changes whose lines are all blank
30 ignoreblanklines ignores changes whose lines are all blank
31 upgrade generates git diffs to avoid data loss
31 upgrade generates git diffs to avoid data loss
32 '''
32 '''
33
33
34 defaults = {
34 defaults = {
35 'context': 3,
35 'context': 3,
36 'text': False,
36 'text': False,
37 'showfunc': False,
37 'showfunc': False,
38 'git': False,
38 'git': False,
39 'nodates': False,
39 'nodates': False,
40 'ignorews': False,
40 'ignorews': False,
41 'ignorewsamount': False,
41 'ignorewsamount': False,
42 'ignoreblanklines': False,
42 'ignoreblanklines': False,
43 'upgrade': False,
43 'upgrade': False,
44 }
44 }
45
45
46 __slots__ = defaults.keys()
46 __slots__ = defaults.keys()
47
47
48 def __init__(self, **opts):
48 def __init__(self, **opts):
49 for k in self.__slots__:
49 for k in self.__slots__:
50 v = opts.get(k)
50 v = opts.get(k)
51 if v is None:
51 if v is None:
52 v = self.defaults[k]
52 v = self.defaults[k]
53 setattr(self, k, v)
53 setattr(self, k, v)
54
54
55 try:
55 try:
56 self.context = int(self.context)
56 self.context = int(self.context)
57 except ValueError:
57 except ValueError:
58 raise util.Abort(_('diff context lines count must be '
58 raise util.Abort(_('diff context lines count must be '
59 'an integer, not %r') % self.context)
59 'an integer, not %r') % self.context)
60
60
61 def copy(self, **kwargs):
61 def copy(self, **kwargs):
62 opts = dict((k, getattr(self, k)) for k in self.defaults)
62 opts = dict((k, getattr(self, k)) for k in self.defaults)
63 opts.update(kwargs)
63 opts.update(kwargs)
64 return diffopts(**opts)
64 return diffopts(**opts)
65
65
66 defaultopts = diffopts()
66 defaultopts = diffopts()
67
67
68 def wsclean(opts, text, blank=True):
68 def wsclean(opts, text, blank=True):
69 if opts.ignorews:
69 if opts.ignorews:
70 text = bdiff.fixws(text, 1)
70 text = bdiff.fixws(text, 1)
71 elif opts.ignorewsamount:
71 elif opts.ignorewsamount:
72 text = bdiff.fixws(text, 0)
72 text = bdiff.fixws(text, 0)
73 if blank and opts.ignoreblanklines:
73 if blank and opts.ignoreblanklines:
74 text = re.sub('\n+', '\n', text).strip('\n')
74 text = re.sub('\n+', '\n', text).strip('\n')
75 return text
75 return text
76
76
77 def splitblock(base1, lines1, base2, lines2, opts):
77 def splitblock(base1, lines1, base2, lines2, opts):
78 # The input lines matches except for interwoven blank lines. We
78 # The input lines matches except for interwoven blank lines. We
79 # transform it into a sequence of matching blocks and blank blocks.
79 # transform it into a sequence of matching blocks and blank blocks.
80 lines1 = [(wsclean(opts, l) and 1 or 0) for l in lines1]
80 lines1 = [(wsclean(opts, l) and 1 or 0) for l in lines1]
81 lines2 = [(wsclean(opts, l) and 1 or 0) for l in lines2]
81 lines2 = [(wsclean(opts, l) and 1 or 0) for l in lines2]
82 s1, e1 = 0, len(lines1)
82 s1, e1 = 0, len(lines1)
83 s2, e2 = 0, len(lines2)
83 s2, e2 = 0, len(lines2)
84 while s1 < e1 or s2 < e2:
84 while s1 < e1 or s2 < e2:
85 i1, i2, btype = s1, s2, '='
85 i1, i2, btype = s1, s2, '='
86 if (i1 >= e1 or lines1[i1] == 0
86 if (i1 >= e1 or lines1[i1] == 0
87 or i2 >= e2 or lines2[i2] == 0):
87 or i2 >= e2 or lines2[i2] == 0):
88 # Consume the block of blank lines
88 # Consume the block of blank lines
89 btype = '~'
89 btype = '~'
90 while i1 < e1 and lines1[i1] == 0:
90 while i1 < e1 and lines1[i1] == 0:
91 i1 += 1
91 i1 += 1
92 while i2 < e2 and lines2[i2] == 0:
92 while i2 < e2 and lines2[i2] == 0:
93 i2 += 1
93 i2 += 1
94 else:
94 else:
95 # Consume the matching lines
95 # Consume the matching lines
96 while i1 < e1 and lines1[i1] == 1 and lines2[i2] == 1:
96 while i1 < e1 and lines1[i1] == 1 and lines2[i2] == 1:
97 i1 += 1
97 i1 += 1
98 i2 += 1
98 i2 += 1
99 yield [base1 + s1, base1 + i1, base2 + s2, base2 + i2], btype
99 yield [base1 + s1, base1 + i1, base2 + s2, base2 + i2], btype
100 s1 = i1
100 s1 = i1
101 s2 = i2
101 s2 = i2
102
102
103 def allblocks(text1, text2, opts=None, lines1=None, lines2=None, refine=False):
103 def allblocks(text1, text2, opts=None, lines1=None, lines2=None, refine=False):
104 """Return (block, type) tuples, where block is an mdiff.blocks
104 """Return (block, type) tuples, where block is an mdiff.blocks
105 line entry. type is '=' for blocks matching exactly one another
105 line entry. type is '=' for blocks matching exactly one another
106 (bdiff blocks), '!' for non-matching blocks and '~' for blocks
106 (bdiff blocks), '!' for non-matching blocks and '~' for blocks
107 matching only after having filtered blank lines. If refine is True,
107 matching only after having filtered blank lines. If refine is True,
108 then '~' blocks are refined and are only made of blank lines.
108 then '~' blocks are refined and are only made of blank lines.
109 line1 and line2 are text1 and text2 split with splitnewlines() if
109 line1 and line2 are text1 and text2 split with splitnewlines() if
110 they are already available.
110 they are already available.
111 """
111 """
112 if opts is None:
112 if opts is None:
113 opts = defaultopts
113 opts = defaultopts
114 if opts.ignorews or opts.ignorewsamount:
114 if opts.ignorews or opts.ignorewsamount:
115 text1 = wsclean(opts, text1, False)
115 text1 = wsclean(opts, text1, False)
116 text2 = wsclean(opts, text2, False)
116 text2 = wsclean(opts, text2, False)
117 diff = bdiff.blocks(text1, text2)
117 diff = bdiff.blocks(text1, text2)
118 for i, s1 in enumerate(diff):
118 for i, s1 in enumerate(diff):
119 # The first match is special.
119 # The first match is special.
120 # we've either found a match starting at line 0 or a match later
120 # we've either found a match starting at line 0 or a match later
121 # in the file. If it starts later, old and new below will both be
121 # in the file. If it starts later, old and new below will both be
122 # empty and we'll continue to the next match.
122 # empty and we'll continue to the next match.
123 if i > 0:
123 if i > 0:
124 s = diff[i - 1]
124 s = diff[i - 1]
125 else:
125 else:
126 s = [0, 0, 0, 0]
126 s = [0, 0, 0, 0]
127 s = [s[1], s1[0], s[3], s1[2]]
127 s = [s[1], s1[0], s[3], s1[2]]
128
128
129 # bdiff sometimes gives huge matches past eof, this check eats them,
129 # bdiff sometimes gives huge matches past eof, this check eats them,
130 # and deals with the special first match case described above
130 # and deals with the special first match case described above
131 if s[0] != s[1] or s[2] != s[3]:
131 if s[0] != s[1] or s[2] != s[3]:
132 type = '!'
132 type = '!'
133 if opts.ignoreblanklines:
133 if opts.ignoreblanklines:
134 if lines1 is None:
134 if lines1 is None:
135 lines1 = splitnewlines(text1)
135 lines1 = splitnewlines(text1)
136 if lines2 is None:
136 if lines2 is None:
137 lines2 = splitnewlines(text2)
137 lines2 = splitnewlines(text2)
138 old = wsclean(opts, "".join(lines1[s[0]:s[1]]))
138 old = wsclean(opts, "".join(lines1[s[0]:s[1]]))
139 new = wsclean(opts, "".join(lines2[s[2]:s[3]]))
139 new = wsclean(opts, "".join(lines2[s[2]:s[3]]))
140 if old == new:
140 if old == new:
141 type = '~'
141 type = '~'
142 yield s, type
142 yield s, type
143 yield s1, '='
143 yield s1, '='
144
144
145 def diffline(revs, a, b, opts):
145 def diffline(revs, a, b, opts):
146 parts = ['diff']
146 parts = ['diff']
147 if opts.git:
147 if opts.git:
148 parts.append('--git')
148 parts.append('--git')
149 if revs and not opts.git:
149 if revs and not opts.git:
150 parts.append(' '.join(["-r %s" % rev for rev in revs]))
150 parts.append(' '.join(["-r %s" % rev for rev in revs]))
151 if opts.git:
151 if opts.git:
152 parts.append('a/%s' % a)
152 parts.append('a/%s' % a)
153 parts.append('b/%s' % b)
153 parts.append('b/%s' % b)
154 else:
154 else:
155 parts.append(a)
155 parts.append(a)
156 return ' '.join(parts) + '\n'
156 return ' '.join(parts) + '\n'
157
157
158 def unidiff(a, ad, b, bd, fn1, fn2, r=None, opts=defaultopts):
158 def unidiff(a, ad, b, bd, fn1, fn2, r=None, opts=defaultopts):
159 def datetag(date, addtab=True):
159 def datetag(date, addtab=True):
160 if not opts.git and not opts.nodates:
160 if not opts.git and not opts.nodates:
161 return '\t%s\n' % date
161 return '\t%s\n' % date
162 if addtab and ' ' in fn1:
162 if addtab and ' ' in fn1:
163 return '\t\n'
163 return '\t\n'
164 return '\n'
164 return '\n'
165
165
166 if not a and not b:
166 if not a and not b:
167 return ""
167 return ""
168 epoch = util.datestr((0, 0))
168 epoch = util.datestr((0, 0))
169
169
170 fn1 = util.pconvert(fn1)
170 fn1 = util.pconvert(fn1)
171 fn2 = util.pconvert(fn2)
171 fn2 = util.pconvert(fn2)
172
172
173 if not opts.text and (util.binary(a) or util.binary(b)):
173 if not opts.text and (util.binary(a) or util.binary(b)):
174 if a and b and len(a) == len(b) and a == b:
174 if a and b and len(a) == len(b) and a == b:
175 return ""
175 return ""
176 l = ['Binary file %s has changed\n' % fn1]
176 l = ['Binary file %s has changed\n' % fn1]
177 elif not a:
177 elif not a:
178 b = splitnewlines(b)
178 b = splitnewlines(b)
179 if a is None:
179 if a is None:
180 l1 = '--- /dev/null%s' % datetag(epoch, False)
180 l1 = '--- /dev/null%s' % datetag(epoch, False)
181 else:
181 else:
182 l1 = "--- %s%s" % ("a/" + fn1, datetag(ad))
182 l1 = "--- %s%s" % ("a/" + fn1, datetag(ad))
183 l2 = "+++ %s%s" % ("b/" + fn2, datetag(bd))
183 l2 = "+++ %s%s" % ("b/" + fn2, datetag(bd))
184 l3 = "@@ -0,0 +1,%d @@\n" % len(b)
184 l3 = "@@ -0,0 +1,%d @@\n" % len(b)
185 l = [l1, l2, l3] + ["+" + e for e in b]
185 l = [l1, l2, l3] + ["+" + e for e in b]
186 elif not b:
186 elif not b:
187 a = splitnewlines(a)
187 a = splitnewlines(a)
188 l1 = "--- %s%s" % ("a/" + fn1, datetag(ad))
188 l1 = "--- %s%s" % ("a/" + fn1, datetag(ad))
189 if b is None:
189 if b is None:
190 l2 = '+++ /dev/null%s' % datetag(epoch, False)
190 l2 = '+++ /dev/null%s' % datetag(epoch, False)
191 else:
191 else:
192 l2 = "+++ %s%s" % ("b/" + fn2, datetag(bd))
192 l2 = "+++ %s%s" % ("b/" + fn2, datetag(bd))
193 l3 = "@@ -1,%d +0,0 @@\n" % len(a)
193 l3 = "@@ -1,%d +0,0 @@\n" % len(a)
194 l = [l1, l2, l3] + ["-" + e for e in a]
194 l = [l1, l2, l3] + ["-" + e for e in a]
195 else:
195 else:
196 al = splitnewlines(a)
196 al = splitnewlines(a)
197 bl = splitnewlines(b)
197 bl = splitnewlines(b)
198 l = list(_unidiff(a, b, al, bl, opts=opts))
198 l = list(_unidiff(a, b, al, bl, opts=opts))
199 if not l:
199 if not l:
200 return ""
200 return ""
201
201
202 l.insert(0, "--- a/%s%s" % (fn1, datetag(ad)))
202 l.insert(0, "--- a/%s%s" % (fn1, datetag(ad)))
203 l.insert(1, "+++ b/%s%s" % (fn2, datetag(bd)))
203 l.insert(1, "+++ b/%s%s" % (fn2, datetag(bd)))
204
204
205 for ln in xrange(len(l)):
205 for ln in xrange(len(l)):
206 if l[ln][-1] != '\n':
206 if l[ln][-1] != '\n':
207 l[ln] += "\n\ No newline at end of file\n"
207 l[ln] += "\n\ No newline at end of file\n"
208
208
209 if r:
209 if r:
210 l.insert(0, diffline(r, fn1, fn2, opts))
210 l.insert(0, diffline(r, fn1, fn2, opts))
211
211
212 return "".join(l)
212 return "".join(l)
213
213
214 # creates a headerless unified diff
214 # creates a headerless unified diff
215 # t1 and t2 are the text to be diffed
215 # t1 and t2 are the text to be diffed
216 # l1 and l2 are the text broken up into lines
216 # l1 and l2 are the text broken up into lines
217 def _unidiff(t1, t2, l1, l2, opts=defaultopts):
217 def _unidiff(t1, t2, l1, l2, opts=defaultopts):
218 def contextend(l, len):
218 def contextend(l, len):
219 ret = l + opts.context
219 ret = l + opts.context
220 if ret > len:
220 if ret > len:
221 ret = len
221 ret = len
222 return ret
222 return ret
223
223
224 def contextstart(l):
224 def contextstart(l):
225 ret = l - opts.context
225 ret = l - opts.context
226 if ret < 0:
226 if ret < 0:
227 return 0
227 return 0
228 return ret
228 return ret
229
229
230 lastfunc = [0, '']
230 lastfunc = [0, '']
231 def yieldhunk(hunk):
231 def yieldhunk(hunk):
232 (astart, a2, bstart, b2, delta) = hunk
232 (astart, a2, bstart, b2, delta) = hunk
233 aend = contextend(a2, len(l1))
233 aend = contextend(a2, len(l1))
234 alen = aend - astart
234 alen = aend - astart
235 blen = b2 - bstart + aend - a2
235 blen = b2 - bstart + aend - a2
236
236
237 func = ""
237 func = ""
238 if opts.showfunc:
238 if opts.showfunc:
239 lastpos, func = lastfunc
239 lastpos, func = lastfunc
240 # walk backwards from the start of the context up to the start of
240 # walk backwards from the start of the context up to the start of
241 # the previous hunk context until we find a line starting with an
241 # the previous hunk context until we find a line starting with an
242 # alphanumeric char.
242 # alphanumeric char.
243 for i in xrange(astart - 1, lastpos - 1, -1):
243 for i in xrange(astart - 1, lastpos - 1, -1):
244 if l1[i][0].isalnum():
244 if l1[i][0].isalnum():
245 func = ' ' + l1[i].rstrip()[:40]
245 func = ' ' + l1[i].rstrip()[:40]
246 lastfunc[1] = func
246 lastfunc[1] = func
247 break
247 break
248 # by recording this hunk's starting point as the next place to
248 # by recording this hunk's starting point as the next place to
249 # start looking for function lines, we avoid reading any line in
249 # start looking for function lines, we avoid reading any line in
250 # the file more than once.
250 # the file more than once.
251 lastfunc[0] = astart
251 lastfunc[0] = astart
252
252
253 # zero-length hunk ranges report their start line as one less
253 # zero-length hunk ranges report their start line as one less
254 if alen:
254 if alen:
255 astart += 1
255 astart += 1
256 if blen:
256 if blen:
257 bstart += 1
257 bstart += 1
258
258
259 yield "@@ -%d,%d +%d,%d @@%s\n" % (astart, alen,
259 yield "@@ -%d,%d +%d,%d @@%s\n" % (astart, alen,
260 bstart, blen, func)
260 bstart, blen, func)
261 for x in delta:
261 for x in delta:
262 yield x
262 yield x
263 for x in xrange(a2, aend):
263 for x in xrange(a2, aend):
264 yield ' ' + l1[x]
264 yield ' ' + l1[x]
265
265
266 # bdiff.blocks gives us the matching sequences in the files. The loop
266 # bdiff.blocks gives us the matching sequences in the files. The loop
267 # below finds the spaces between those matching sequences and translates
267 # below finds the spaces between those matching sequences and translates
268 # them into diff output.
268 # them into diff output.
269 #
269 #
270 hunk = None
270 hunk = None
271 for s, stype in allblocks(t1, t2, opts, l1, l2):
271 for s, stype in allblocks(t1, t2, opts, l1, l2):
272 if stype != '!':
272 if stype != '!':
273 continue
273 continue
274 delta = []
274 delta = []
275 a1, a2, b1, b2 = s
275 a1, a2, b1, b2 = s
276 old = l1[a1:a2]
276 old = l1[a1:a2]
277 new = l2[b1:b2]
277 new = l2[b1:b2]
278
278
279 astart = contextstart(a1)
279 astart = contextstart(a1)
280 bstart = contextstart(b1)
280 bstart = contextstart(b1)
281 prev = None
281 prev = None
282 if hunk:
282 if hunk:
283 # join with the previous hunk if it falls inside the context
283 # join with the previous hunk if it falls inside the context
284 if astart < hunk[1] + opts.context + 1:
284 if astart < hunk[1] + opts.context + 1:
285 prev = hunk
285 prev = hunk
286 astart = hunk[1]
286 astart = hunk[1]
287 bstart = hunk[3]
287 bstart = hunk[3]
288 else:
288 else:
289 for x in yieldhunk(hunk):
289 for x in yieldhunk(hunk):
290 yield x
290 yield x
291 if prev:
291 if prev:
292 # we've joined the previous hunk, record the new ending points.
292 # we've joined the previous hunk, record the new ending points.
293 hunk[1] = a2
293 hunk[1] = a2
294 hunk[3] = b2
294 hunk[3] = b2
295 delta = hunk[4]
295 delta = hunk[4]
296 else:
296 else:
297 # create a new hunk
297 # create a new hunk
298 hunk = [astart, a2, bstart, b2, delta]
298 hunk = [astart, a2, bstart, b2, delta]
299
299
300 delta[len(delta):] = [' ' + x for x in l1[astart:a1]]
300 delta[len(delta):] = [' ' + x for x in l1[astart:a1]]
301 delta[len(delta):] = ['-' + x for x in old]
301 delta[len(delta):] = ['-' + x for x in old]
302 delta[len(delta):] = ['+' + x for x in new]
302 delta[len(delta):] = ['+' + x for x in new]
303
303
304 if hunk:
304 if hunk:
305 for x in yieldhunk(hunk):
305 for x in yieldhunk(hunk):
306 yield x
306 yield x
307
307
308 def patchtext(bin):
308 def patchtext(bin):
309 pos = 0
309 pos = 0
310 t = []
310 t = []
311 while pos < len(bin):
311 while pos < len(bin):
312 p1, p2, l = struct.unpack(">lll", bin[pos:pos + 12])
312 p1, p2, l = struct.unpack(">lll", bin[pos:pos + 12])
313 pos += 12
313 pos += 12
314 t.append(bin[pos:pos + l])
314 t.append(bin[pos:pos + l])
315 pos += l
315 pos += l
316 return "".join(t)
316 return "".join(t)
317
317
318 def patch(a, bin):
318 def patch(a, bin):
319 if len(a) == 0:
319 if len(a) == 0:
320 # skip over trivial delta header
320 # skip over trivial delta header
321 return buffer(bin, 12)
321 return util.buffer(bin, 12)
322 return mpatch.patches(a, [bin])
322 return mpatch.patches(a, [bin])
323
323
324 # similar to difflib.SequenceMatcher.get_matching_blocks
324 # similar to difflib.SequenceMatcher.get_matching_blocks
325 def get_matching_blocks(a, b):
325 def get_matching_blocks(a, b):
326 return [(d[0], d[2], d[1] - d[0]) for d in bdiff.blocks(a, b)]
326 return [(d[0], d[2], d[1] - d[0]) for d in bdiff.blocks(a, b)]
327
327
328 def trivialdiffheader(length):
328 def trivialdiffheader(length):
329 return struct.pack(">lll", 0, 0, length)
329 return struct.pack(">lll", 0, 0, length)
330
330
331 patches = mpatch.patches
331 patches = mpatch.patches
332 patchedsize = mpatch.patchedsize
332 patchedsize = mpatch.patchedsize
333 textdiff = bdiff.bdiff
333 textdiff = bdiff.bdiff
@@ -1,1747 +1,1744 b''
1 # util.py - Mercurial utility functions and platform specfic implementations
1 # util.py - Mercurial utility functions and platform specfic implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specfic implementations.
10 """Mercurial utility functions and platform specfic implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from i18n import _
16 from i18n import _
17 import error, osutil, encoding
17 import error, osutil, encoding
18 import errno, re, shutil, sys, tempfile, traceback
18 import errno, re, shutil, sys, tempfile, traceback
19 import os, time, datetime, calendar, textwrap, signal
19 import os, time, datetime, calendar, textwrap, signal
20 import imp, socket, urllib
20 import imp, socket, urllib
21
21
22 if os.name == 'nt':
22 if os.name == 'nt':
23 import windows as platform
23 import windows as platform
24 else:
24 else:
25 import posix as platform
25 import posix as platform
26
26
27 cachestat = platform.cachestat
27 cachestat = platform.cachestat
28 checkexec = platform.checkexec
28 checkexec = platform.checkexec
29 checklink = platform.checklink
29 checklink = platform.checklink
30 copymode = platform.copymode
30 copymode = platform.copymode
31 executablepath = platform.executablepath
31 executablepath = platform.executablepath
32 expandglobs = platform.expandglobs
32 expandglobs = platform.expandglobs
33 explainexit = platform.explainexit
33 explainexit = platform.explainexit
34 findexe = platform.findexe
34 findexe = platform.findexe
35 gethgcmd = platform.gethgcmd
35 gethgcmd = platform.gethgcmd
36 getuser = platform.getuser
36 getuser = platform.getuser
37 groupmembers = platform.groupmembers
37 groupmembers = platform.groupmembers
38 groupname = platform.groupname
38 groupname = platform.groupname
39 hidewindow = platform.hidewindow
39 hidewindow = platform.hidewindow
40 isexec = platform.isexec
40 isexec = platform.isexec
41 isowner = platform.isowner
41 isowner = platform.isowner
42 localpath = platform.localpath
42 localpath = platform.localpath
43 lookupreg = platform.lookupreg
43 lookupreg = platform.lookupreg
44 makedir = platform.makedir
44 makedir = platform.makedir
45 nlinks = platform.nlinks
45 nlinks = platform.nlinks
46 normpath = platform.normpath
46 normpath = platform.normpath
47 normcase = platform.normcase
47 normcase = platform.normcase
48 nulldev = platform.nulldev
48 nulldev = platform.nulldev
49 openhardlinks = platform.openhardlinks
49 openhardlinks = platform.openhardlinks
50 oslink = platform.oslink
50 oslink = platform.oslink
51 parsepatchoutput = platform.parsepatchoutput
51 parsepatchoutput = platform.parsepatchoutput
52 pconvert = platform.pconvert
52 pconvert = platform.pconvert
53 popen = platform.popen
53 popen = platform.popen
54 posixfile = platform.posixfile
54 posixfile = platform.posixfile
55 quotecommand = platform.quotecommand
55 quotecommand = platform.quotecommand
56 realpath = platform.realpath
56 realpath = platform.realpath
57 rename = platform.rename
57 rename = platform.rename
58 samedevice = platform.samedevice
58 samedevice = platform.samedevice
59 samefile = platform.samefile
59 samefile = platform.samefile
60 samestat = platform.samestat
60 samestat = platform.samestat
61 setbinary = platform.setbinary
61 setbinary = platform.setbinary
62 setflags = platform.setflags
62 setflags = platform.setflags
63 setsignalhandler = platform.setsignalhandler
63 setsignalhandler = platform.setsignalhandler
64 shellquote = platform.shellquote
64 shellquote = platform.shellquote
65 spawndetached = platform.spawndetached
65 spawndetached = platform.spawndetached
66 sshargs = platform.sshargs
66 sshargs = platform.sshargs
67 statfiles = platform.statfiles
67 statfiles = platform.statfiles
68 termwidth = platform.termwidth
68 termwidth = platform.termwidth
69 testpid = platform.testpid
69 testpid = platform.testpid
70 umask = platform.umask
70 umask = platform.umask
71 unlink = platform.unlink
71 unlink = platform.unlink
72 unlinkpath = platform.unlinkpath
72 unlinkpath = platform.unlinkpath
73 username = platform.username
73 username = platform.username
74
74
75 # Python compatibility
75 # Python compatibility
76
76
77 _notset = object()
77 _notset = object()
78
78
79 def safehasattr(thing, attr):
79 def safehasattr(thing, attr):
80 return getattr(thing, attr, _notset) is not _notset
80 return getattr(thing, attr, _notset) is not _notset
81
81
82 def sha1(s=''):
82 def sha1(s=''):
83 '''
83 '''
84 Low-overhead wrapper around Python's SHA support
84 Low-overhead wrapper around Python's SHA support
85
85
86 >>> f = _fastsha1
86 >>> f = _fastsha1
87 >>> a = sha1()
87 >>> a = sha1()
88 >>> a = f()
88 >>> a = f()
89 >>> a.hexdigest()
89 >>> a.hexdigest()
90 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
90 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
91 '''
91 '''
92
92
93 return _fastsha1(s)
93 return _fastsha1(s)
94
94
95 def _fastsha1(s=''):
95 def _fastsha1(s=''):
96 # This function will import sha1 from hashlib or sha (whichever is
96 # This function will import sha1 from hashlib or sha (whichever is
97 # available) and overwrite itself with it on the first call.
97 # available) and overwrite itself with it on the first call.
98 # Subsequent calls will go directly to the imported function.
98 # Subsequent calls will go directly to the imported function.
99 if sys.version_info >= (2, 5):
99 if sys.version_info >= (2, 5):
100 from hashlib import sha1 as _sha1
100 from hashlib import sha1 as _sha1
101 else:
101 else:
102 from sha import sha as _sha1
102 from sha import sha as _sha1
103 global _fastsha1, sha1
103 global _fastsha1, sha1
104 _fastsha1 = sha1 = _sha1
104 _fastsha1 = sha1 = _sha1
105 return _sha1(s)
105 return _sha1(s)
106
106
107 import __builtin__
108
109 if sys.version_info[0] < 3:
110 def fakebuffer(sliceable, offset=0):
111 return sliceable[offset:]
112 else:
113 def fakebuffer(sliceable, offset=0):
114 return memoryview(sliceable)[offset:]
115 try:
107 try:
116 buffer
108 buffer = buffer
117 except NameError:
109 except NameError:
118 __builtin__.buffer = fakebuffer
110 if sys.version_info[0] < 3:
111 def buffer(sliceable, offset=0):
112 return sliceable[offset:]
113 else:
114 def buffer(sliceable, offset=0):
115 return memoryview(sliceable)[offset:]
119
116
120 import subprocess
117 import subprocess
121 closefds = os.name == 'posix'
118 closefds = os.name == 'posix'
122
119
123 def popen2(cmd, env=None, newlines=False):
120 def popen2(cmd, env=None, newlines=False):
124 # Setting bufsize to -1 lets the system decide the buffer size.
121 # Setting bufsize to -1 lets the system decide the buffer size.
125 # The default for bufsize is 0, meaning unbuffered. This leads to
122 # The default for bufsize is 0, meaning unbuffered. This leads to
126 # poor performance on Mac OS X: http://bugs.python.org/issue4194
123 # poor performance on Mac OS X: http://bugs.python.org/issue4194
127 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
124 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
128 close_fds=closefds,
125 close_fds=closefds,
129 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
126 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
130 universal_newlines=newlines,
127 universal_newlines=newlines,
131 env=env)
128 env=env)
132 return p.stdin, p.stdout
129 return p.stdin, p.stdout
133
130
134 def popen3(cmd, env=None, newlines=False):
131 def popen3(cmd, env=None, newlines=False):
135 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
132 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
136 close_fds=closefds,
133 close_fds=closefds,
137 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
134 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
138 stderr=subprocess.PIPE,
135 stderr=subprocess.PIPE,
139 universal_newlines=newlines,
136 universal_newlines=newlines,
140 env=env)
137 env=env)
141 return p.stdin, p.stdout, p.stderr
138 return p.stdin, p.stdout, p.stderr
142
139
143 def version():
140 def version():
144 """Return version information if available."""
141 """Return version information if available."""
145 try:
142 try:
146 import __version__
143 import __version__
147 return __version__.version
144 return __version__.version
148 except ImportError:
145 except ImportError:
149 return 'unknown'
146 return 'unknown'
150
147
151 # used by parsedate
148 # used by parsedate
152 defaultdateformats = (
149 defaultdateformats = (
153 '%Y-%m-%d %H:%M:%S',
150 '%Y-%m-%d %H:%M:%S',
154 '%Y-%m-%d %I:%M:%S%p',
151 '%Y-%m-%d %I:%M:%S%p',
155 '%Y-%m-%d %H:%M',
152 '%Y-%m-%d %H:%M',
156 '%Y-%m-%d %I:%M%p',
153 '%Y-%m-%d %I:%M%p',
157 '%Y-%m-%d',
154 '%Y-%m-%d',
158 '%m-%d',
155 '%m-%d',
159 '%m/%d',
156 '%m/%d',
160 '%m/%d/%y',
157 '%m/%d/%y',
161 '%m/%d/%Y',
158 '%m/%d/%Y',
162 '%a %b %d %H:%M:%S %Y',
159 '%a %b %d %H:%M:%S %Y',
163 '%a %b %d %I:%M:%S%p %Y',
160 '%a %b %d %I:%M:%S%p %Y',
164 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
161 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
165 '%b %d %H:%M:%S %Y',
162 '%b %d %H:%M:%S %Y',
166 '%b %d %I:%M:%S%p %Y',
163 '%b %d %I:%M:%S%p %Y',
167 '%b %d %H:%M:%S',
164 '%b %d %H:%M:%S',
168 '%b %d %I:%M:%S%p',
165 '%b %d %I:%M:%S%p',
169 '%b %d %H:%M',
166 '%b %d %H:%M',
170 '%b %d %I:%M%p',
167 '%b %d %I:%M%p',
171 '%b %d %Y',
168 '%b %d %Y',
172 '%b %d',
169 '%b %d',
173 '%H:%M:%S',
170 '%H:%M:%S',
174 '%I:%M:%S%p',
171 '%I:%M:%S%p',
175 '%H:%M',
172 '%H:%M',
176 '%I:%M%p',
173 '%I:%M%p',
177 )
174 )
178
175
179 extendeddateformats = defaultdateformats + (
176 extendeddateformats = defaultdateformats + (
180 "%Y",
177 "%Y",
181 "%Y-%m",
178 "%Y-%m",
182 "%b",
179 "%b",
183 "%b %Y",
180 "%b %Y",
184 )
181 )
185
182
186 def cachefunc(func):
183 def cachefunc(func):
187 '''cache the result of function calls'''
184 '''cache the result of function calls'''
188 # XXX doesn't handle keywords args
185 # XXX doesn't handle keywords args
189 cache = {}
186 cache = {}
190 if func.func_code.co_argcount == 1:
187 if func.func_code.co_argcount == 1:
191 # we gain a small amount of time because
188 # we gain a small amount of time because
192 # we don't need to pack/unpack the list
189 # we don't need to pack/unpack the list
193 def f(arg):
190 def f(arg):
194 if arg not in cache:
191 if arg not in cache:
195 cache[arg] = func(arg)
192 cache[arg] = func(arg)
196 return cache[arg]
193 return cache[arg]
197 else:
194 else:
198 def f(*args):
195 def f(*args):
199 if args not in cache:
196 if args not in cache:
200 cache[args] = func(*args)
197 cache[args] = func(*args)
201 return cache[args]
198 return cache[args]
202
199
203 return f
200 return f
204
201
205 def lrucachefunc(func):
202 def lrucachefunc(func):
206 '''cache most recent results of function calls'''
203 '''cache most recent results of function calls'''
207 cache = {}
204 cache = {}
208 order = []
205 order = []
209 if func.func_code.co_argcount == 1:
206 if func.func_code.co_argcount == 1:
210 def f(arg):
207 def f(arg):
211 if arg not in cache:
208 if arg not in cache:
212 if len(cache) > 20:
209 if len(cache) > 20:
213 del cache[order.pop(0)]
210 del cache[order.pop(0)]
214 cache[arg] = func(arg)
211 cache[arg] = func(arg)
215 else:
212 else:
216 order.remove(arg)
213 order.remove(arg)
217 order.append(arg)
214 order.append(arg)
218 return cache[arg]
215 return cache[arg]
219 else:
216 else:
220 def f(*args):
217 def f(*args):
221 if args not in cache:
218 if args not in cache:
222 if len(cache) > 20:
219 if len(cache) > 20:
223 del cache[order.pop(0)]
220 del cache[order.pop(0)]
224 cache[args] = func(*args)
221 cache[args] = func(*args)
225 else:
222 else:
226 order.remove(args)
223 order.remove(args)
227 order.append(args)
224 order.append(args)
228 return cache[args]
225 return cache[args]
229
226
230 return f
227 return f
231
228
232 class propertycache(object):
229 class propertycache(object):
233 def __init__(self, func):
230 def __init__(self, func):
234 self.func = func
231 self.func = func
235 self.name = func.__name__
232 self.name = func.__name__
236 def __get__(self, obj, type=None):
233 def __get__(self, obj, type=None):
237 result = self.func(obj)
234 result = self.func(obj)
238 setattr(obj, self.name, result)
235 setattr(obj, self.name, result)
239 return result
236 return result
240
237
241 def pipefilter(s, cmd):
238 def pipefilter(s, cmd):
242 '''filter string S through command CMD, returning its output'''
239 '''filter string S through command CMD, returning its output'''
243 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
240 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
244 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
241 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
245 pout, perr = p.communicate(s)
242 pout, perr = p.communicate(s)
246 return pout
243 return pout
247
244
248 def tempfilter(s, cmd):
245 def tempfilter(s, cmd):
249 '''filter string S through a pair of temporary files with CMD.
246 '''filter string S through a pair of temporary files with CMD.
250 CMD is used as a template to create the real command to be run,
247 CMD is used as a template to create the real command to be run,
251 with the strings INFILE and OUTFILE replaced by the real names of
248 with the strings INFILE and OUTFILE replaced by the real names of
252 the temporary files generated.'''
249 the temporary files generated.'''
253 inname, outname = None, None
250 inname, outname = None, None
254 try:
251 try:
255 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
252 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
256 fp = os.fdopen(infd, 'wb')
253 fp = os.fdopen(infd, 'wb')
257 fp.write(s)
254 fp.write(s)
258 fp.close()
255 fp.close()
259 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
256 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
260 os.close(outfd)
257 os.close(outfd)
261 cmd = cmd.replace('INFILE', inname)
258 cmd = cmd.replace('INFILE', inname)
262 cmd = cmd.replace('OUTFILE', outname)
259 cmd = cmd.replace('OUTFILE', outname)
263 code = os.system(cmd)
260 code = os.system(cmd)
264 if sys.platform == 'OpenVMS' and code & 1:
261 if sys.platform == 'OpenVMS' and code & 1:
265 code = 0
262 code = 0
266 if code:
263 if code:
267 raise Abort(_("command '%s' failed: %s") %
264 raise Abort(_("command '%s' failed: %s") %
268 (cmd, explainexit(code)))
265 (cmd, explainexit(code)))
269 fp = open(outname, 'rb')
266 fp = open(outname, 'rb')
270 r = fp.read()
267 r = fp.read()
271 fp.close()
268 fp.close()
272 return r
269 return r
273 finally:
270 finally:
274 try:
271 try:
275 if inname:
272 if inname:
276 os.unlink(inname)
273 os.unlink(inname)
277 except OSError:
274 except OSError:
278 pass
275 pass
279 try:
276 try:
280 if outname:
277 if outname:
281 os.unlink(outname)
278 os.unlink(outname)
282 except OSError:
279 except OSError:
283 pass
280 pass
284
281
285 filtertable = {
282 filtertable = {
286 'tempfile:': tempfilter,
283 'tempfile:': tempfilter,
287 'pipe:': pipefilter,
284 'pipe:': pipefilter,
288 }
285 }
289
286
290 def filter(s, cmd):
287 def filter(s, cmd):
291 "filter a string through a command that transforms its input to its output"
288 "filter a string through a command that transforms its input to its output"
292 for name, fn in filtertable.iteritems():
289 for name, fn in filtertable.iteritems():
293 if cmd.startswith(name):
290 if cmd.startswith(name):
294 return fn(s, cmd[len(name):].lstrip())
291 return fn(s, cmd[len(name):].lstrip())
295 return pipefilter(s, cmd)
292 return pipefilter(s, cmd)
296
293
297 def binary(s):
294 def binary(s):
298 """return true if a string is binary data"""
295 """return true if a string is binary data"""
299 return bool(s and '\0' in s)
296 return bool(s and '\0' in s)
300
297
301 def increasingchunks(source, min=1024, max=65536):
298 def increasingchunks(source, min=1024, max=65536):
302 '''return no less than min bytes per chunk while data remains,
299 '''return no less than min bytes per chunk while data remains,
303 doubling min after each chunk until it reaches max'''
300 doubling min after each chunk until it reaches max'''
304 def log2(x):
301 def log2(x):
305 if not x:
302 if not x:
306 return 0
303 return 0
307 i = 0
304 i = 0
308 while x:
305 while x:
309 x >>= 1
306 x >>= 1
310 i += 1
307 i += 1
311 return i - 1
308 return i - 1
312
309
313 buf = []
310 buf = []
314 blen = 0
311 blen = 0
315 for chunk in source:
312 for chunk in source:
316 buf.append(chunk)
313 buf.append(chunk)
317 blen += len(chunk)
314 blen += len(chunk)
318 if blen >= min:
315 if blen >= min:
319 if min < max:
316 if min < max:
320 min = min << 1
317 min = min << 1
321 nmin = 1 << log2(blen)
318 nmin = 1 << log2(blen)
322 if nmin > min:
319 if nmin > min:
323 min = nmin
320 min = nmin
324 if min > max:
321 if min > max:
325 min = max
322 min = max
326 yield ''.join(buf)
323 yield ''.join(buf)
327 blen = 0
324 blen = 0
328 buf = []
325 buf = []
329 if buf:
326 if buf:
330 yield ''.join(buf)
327 yield ''.join(buf)
331
328
332 Abort = error.Abort
329 Abort = error.Abort
333
330
334 def always(fn):
331 def always(fn):
335 return True
332 return True
336
333
337 def never(fn):
334 def never(fn):
338 return False
335 return False
339
336
340 def pathto(root, n1, n2):
337 def pathto(root, n1, n2):
341 '''return the relative path from one place to another.
338 '''return the relative path from one place to another.
342 root should use os.sep to separate directories
339 root should use os.sep to separate directories
343 n1 should use os.sep to separate directories
340 n1 should use os.sep to separate directories
344 n2 should use "/" to separate directories
341 n2 should use "/" to separate directories
345 returns an os.sep-separated path.
342 returns an os.sep-separated path.
346
343
347 If n1 is a relative path, it's assumed it's
344 If n1 is a relative path, it's assumed it's
348 relative to root.
345 relative to root.
349 n2 should always be relative to root.
346 n2 should always be relative to root.
350 '''
347 '''
351 if not n1:
348 if not n1:
352 return localpath(n2)
349 return localpath(n2)
353 if os.path.isabs(n1):
350 if os.path.isabs(n1):
354 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
351 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
355 return os.path.join(root, localpath(n2))
352 return os.path.join(root, localpath(n2))
356 n2 = '/'.join((pconvert(root), n2))
353 n2 = '/'.join((pconvert(root), n2))
357 a, b = splitpath(n1), n2.split('/')
354 a, b = splitpath(n1), n2.split('/')
358 a.reverse()
355 a.reverse()
359 b.reverse()
356 b.reverse()
360 while a and b and a[-1] == b[-1]:
357 while a and b and a[-1] == b[-1]:
361 a.pop()
358 a.pop()
362 b.pop()
359 b.pop()
363 b.reverse()
360 b.reverse()
364 return os.sep.join((['..'] * len(a)) + b) or '.'
361 return os.sep.join((['..'] * len(a)) + b) or '.'
365
362
366 _hgexecutable = None
363 _hgexecutable = None
367
364
368 def mainfrozen():
365 def mainfrozen():
369 """return True if we are a frozen executable.
366 """return True if we are a frozen executable.
370
367
371 The code supports py2exe (most common, Windows only) and tools/freeze
368 The code supports py2exe (most common, Windows only) and tools/freeze
372 (portable, not much used).
369 (portable, not much used).
373 """
370 """
374 return (safehasattr(sys, "frozen") or # new py2exe
371 return (safehasattr(sys, "frozen") or # new py2exe
375 safehasattr(sys, "importers") or # old py2exe
372 safehasattr(sys, "importers") or # old py2exe
376 imp.is_frozen("__main__")) # tools/freeze
373 imp.is_frozen("__main__")) # tools/freeze
377
374
378 def hgexecutable():
375 def hgexecutable():
379 """return location of the 'hg' executable.
376 """return location of the 'hg' executable.
380
377
381 Defaults to $HG or 'hg' in the search path.
378 Defaults to $HG or 'hg' in the search path.
382 """
379 """
383 if _hgexecutable is None:
380 if _hgexecutable is None:
384 hg = os.environ.get('HG')
381 hg = os.environ.get('HG')
385 mainmod = sys.modules['__main__']
382 mainmod = sys.modules['__main__']
386 if hg:
383 if hg:
387 _sethgexecutable(hg)
384 _sethgexecutable(hg)
388 elif mainfrozen():
385 elif mainfrozen():
389 _sethgexecutable(sys.executable)
386 _sethgexecutable(sys.executable)
390 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
387 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
391 _sethgexecutable(mainmod.__file__)
388 _sethgexecutable(mainmod.__file__)
392 else:
389 else:
393 exe = findexe('hg') or os.path.basename(sys.argv[0])
390 exe = findexe('hg') or os.path.basename(sys.argv[0])
394 _sethgexecutable(exe)
391 _sethgexecutable(exe)
395 return _hgexecutable
392 return _hgexecutable
396
393
397 def _sethgexecutable(path):
394 def _sethgexecutable(path):
398 """set location of the 'hg' executable"""
395 """set location of the 'hg' executable"""
399 global _hgexecutable
396 global _hgexecutable
400 _hgexecutable = path
397 _hgexecutable = path
401
398
402 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
399 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
403 '''enhanced shell command execution.
400 '''enhanced shell command execution.
404 run with environment maybe modified, maybe in different dir.
401 run with environment maybe modified, maybe in different dir.
405
402
406 if command fails and onerr is None, return status. if ui object,
403 if command fails and onerr is None, return status. if ui object,
407 print error message and return status, else raise onerr object as
404 print error message and return status, else raise onerr object as
408 exception.
405 exception.
409
406
410 if out is specified, it is assumed to be a file-like object that has a
407 if out is specified, it is assumed to be a file-like object that has a
411 write() method. stdout and stderr will be redirected to out.'''
408 write() method. stdout and stderr will be redirected to out.'''
412 try:
409 try:
413 sys.stdout.flush()
410 sys.stdout.flush()
414 except Exception:
411 except Exception:
415 pass
412 pass
416 def py2shell(val):
413 def py2shell(val):
417 'convert python object into string that is useful to shell'
414 'convert python object into string that is useful to shell'
418 if val is None or val is False:
415 if val is None or val is False:
419 return '0'
416 return '0'
420 if val is True:
417 if val is True:
421 return '1'
418 return '1'
422 return str(val)
419 return str(val)
423 origcmd = cmd
420 origcmd = cmd
424 cmd = quotecommand(cmd)
421 cmd = quotecommand(cmd)
425 env = dict(os.environ)
422 env = dict(os.environ)
426 env.update((k, py2shell(v)) for k, v in environ.iteritems())
423 env.update((k, py2shell(v)) for k, v in environ.iteritems())
427 env['HG'] = hgexecutable()
424 env['HG'] = hgexecutable()
428 if out is None or out == sys.__stdout__:
425 if out is None or out == sys.__stdout__:
429 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
426 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
430 env=env, cwd=cwd)
427 env=env, cwd=cwd)
431 else:
428 else:
432 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
429 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
433 env=env, cwd=cwd, stdout=subprocess.PIPE,
430 env=env, cwd=cwd, stdout=subprocess.PIPE,
434 stderr=subprocess.STDOUT)
431 stderr=subprocess.STDOUT)
435 for line in proc.stdout:
432 for line in proc.stdout:
436 out.write(line)
433 out.write(line)
437 proc.wait()
434 proc.wait()
438 rc = proc.returncode
435 rc = proc.returncode
439 if sys.platform == 'OpenVMS' and rc & 1:
436 if sys.platform == 'OpenVMS' and rc & 1:
440 rc = 0
437 rc = 0
441 if rc and onerr:
438 if rc and onerr:
442 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
439 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
443 explainexit(rc)[0])
440 explainexit(rc)[0])
444 if errprefix:
441 if errprefix:
445 errmsg = '%s: %s' % (errprefix, errmsg)
442 errmsg = '%s: %s' % (errprefix, errmsg)
446 try:
443 try:
447 onerr.warn(errmsg + '\n')
444 onerr.warn(errmsg + '\n')
448 except AttributeError:
445 except AttributeError:
449 raise onerr(errmsg)
446 raise onerr(errmsg)
450 return rc
447 return rc
451
448
452 def checksignature(func):
449 def checksignature(func):
453 '''wrap a function with code to check for calling errors'''
450 '''wrap a function with code to check for calling errors'''
454 def check(*args, **kwargs):
451 def check(*args, **kwargs):
455 try:
452 try:
456 return func(*args, **kwargs)
453 return func(*args, **kwargs)
457 except TypeError:
454 except TypeError:
458 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
455 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
459 raise error.SignatureError
456 raise error.SignatureError
460 raise
457 raise
461
458
462 return check
459 return check
463
460
464 def copyfile(src, dest):
461 def copyfile(src, dest):
465 "copy a file, preserving mode and atime/mtime"
462 "copy a file, preserving mode and atime/mtime"
466 if os.path.islink(src):
463 if os.path.islink(src):
467 try:
464 try:
468 os.unlink(dest)
465 os.unlink(dest)
469 except OSError:
466 except OSError:
470 pass
467 pass
471 os.symlink(os.readlink(src), dest)
468 os.symlink(os.readlink(src), dest)
472 else:
469 else:
473 try:
470 try:
474 shutil.copyfile(src, dest)
471 shutil.copyfile(src, dest)
475 shutil.copymode(src, dest)
472 shutil.copymode(src, dest)
476 except shutil.Error, inst:
473 except shutil.Error, inst:
477 raise Abort(str(inst))
474 raise Abort(str(inst))
478
475
479 def copyfiles(src, dst, hardlink=None):
476 def copyfiles(src, dst, hardlink=None):
480 """Copy a directory tree using hardlinks if possible"""
477 """Copy a directory tree using hardlinks if possible"""
481
478
482 if hardlink is None:
479 if hardlink is None:
483 hardlink = (os.stat(src).st_dev ==
480 hardlink = (os.stat(src).st_dev ==
484 os.stat(os.path.dirname(dst)).st_dev)
481 os.stat(os.path.dirname(dst)).st_dev)
485
482
486 num = 0
483 num = 0
487 if os.path.isdir(src):
484 if os.path.isdir(src):
488 os.mkdir(dst)
485 os.mkdir(dst)
489 for name, kind in osutil.listdir(src):
486 for name, kind in osutil.listdir(src):
490 srcname = os.path.join(src, name)
487 srcname = os.path.join(src, name)
491 dstname = os.path.join(dst, name)
488 dstname = os.path.join(dst, name)
492 hardlink, n = copyfiles(srcname, dstname, hardlink)
489 hardlink, n = copyfiles(srcname, dstname, hardlink)
493 num += n
490 num += n
494 else:
491 else:
495 if hardlink:
492 if hardlink:
496 try:
493 try:
497 oslink(src, dst)
494 oslink(src, dst)
498 except (IOError, OSError):
495 except (IOError, OSError):
499 hardlink = False
496 hardlink = False
500 shutil.copy(src, dst)
497 shutil.copy(src, dst)
501 else:
498 else:
502 shutil.copy(src, dst)
499 shutil.copy(src, dst)
503 num += 1
500 num += 1
504
501
505 return hardlink, num
502 return hardlink, num
506
503
507 _winreservednames = '''con prn aux nul
504 _winreservednames = '''con prn aux nul
508 com1 com2 com3 com4 com5 com6 com7 com8 com9
505 com1 com2 com3 com4 com5 com6 com7 com8 com9
509 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
506 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
510 _winreservedchars = ':*?"<>|'
507 _winreservedchars = ':*?"<>|'
511 def checkwinfilename(path):
508 def checkwinfilename(path):
512 '''Check that the base-relative path is a valid filename on Windows.
509 '''Check that the base-relative path is a valid filename on Windows.
513 Returns None if the path is ok, or a UI string describing the problem.
510 Returns None if the path is ok, or a UI string describing the problem.
514
511
515 >>> checkwinfilename("just/a/normal/path")
512 >>> checkwinfilename("just/a/normal/path")
516 >>> checkwinfilename("foo/bar/con.xml")
513 >>> checkwinfilename("foo/bar/con.xml")
517 "filename contains 'con', which is reserved on Windows"
514 "filename contains 'con', which is reserved on Windows"
518 >>> checkwinfilename("foo/con.xml/bar")
515 >>> checkwinfilename("foo/con.xml/bar")
519 "filename contains 'con', which is reserved on Windows"
516 "filename contains 'con', which is reserved on Windows"
520 >>> checkwinfilename("foo/bar/xml.con")
517 >>> checkwinfilename("foo/bar/xml.con")
521 >>> checkwinfilename("foo/bar/AUX/bla.txt")
518 >>> checkwinfilename("foo/bar/AUX/bla.txt")
522 "filename contains 'AUX', which is reserved on Windows"
519 "filename contains 'AUX', which is reserved on Windows"
523 >>> checkwinfilename("foo/bar/bla:.txt")
520 >>> checkwinfilename("foo/bar/bla:.txt")
524 "filename contains ':', which is reserved on Windows"
521 "filename contains ':', which is reserved on Windows"
525 >>> checkwinfilename("foo/bar/b\07la.txt")
522 >>> checkwinfilename("foo/bar/b\07la.txt")
526 "filename contains '\\\\x07', which is invalid on Windows"
523 "filename contains '\\\\x07', which is invalid on Windows"
527 >>> checkwinfilename("foo/bar/bla ")
524 >>> checkwinfilename("foo/bar/bla ")
528 "filename ends with ' ', which is not allowed on Windows"
525 "filename ends with ' ', which is not allowed on Windows"
529 >>> checkwinfilename("../bar")
526 >>> checkwinfilename("../bar")
530 '''
527 '''
531 for n in path.replace('\\', '/').split('/'):
528 for n in path.replace('\\', '/').split('/'):
532 if not n:
529 if not n:
533 continue
530 continue
534 for c in n:
531 for c in n:
535 if c in _winreservedchars:
532 if c in _winreservedchars:
536 return _("filename contains '%s', which is reserved "
533 return _("filename contains '%s', which is reserved "
537 "on Windows") % c
534 "on Windows") % c
538 if ord(c) <= 31:
535 if ord(c) <= 31:
539 return _("filename contains %r, which is invalid "
536 return _("filename contains %r, which is invalid "
540 "on Windows") % c
537 "on Windows") % c
541 base = n.split('.')[0]
538 base = n.split('.')[0]
542 if base and base.lower() in _winreservednames:
539 if base and base.lower() in _winreservednames:
543 return _("filename contains '%s', which is reserved "
540 return _("filename contains '%s', which is reserved "
544 "on Windows") % base
541 "on Windows") % base
545 t = n[-1]
542 t = n[-1]
546 if t in '. ' and n not in '..':
543 if t in '. ' and n not in '..':
547 return _("filename ends with '%s', which is not allowed "
544 return _("filename ends with '%s', which is not allowed "
548 "on Windows") % t
545 "on Windows") % t
549
546
550 if os.name == 'nt':
547 if os.name == 'nt':
551 checkosfilename = checkwinfilename
548 checkosfilename = checkwinfilename
552 else:
549 else:
553 checkosfilename = platform.checkosfilename
550 checkosfilename = platform.checkosfilename
554
551
555 def makelock(info, pathname):
552 def makelock(info, pathname):
556 try:
553 try:
557 return os.symlink(info, pathname)
554 return os.symlink(info, pathname)
558 except OSError, why:
555 except OSError, why:
559 if why.errno == errno.EEXIST:
556 if why.errno == errno.EEXIST:
560 raise
557 raise
561 except AttributeError: # no symlink in os
558 except AttributeError: # no symlink in os
562 pass
559 pass
563
560
564 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
561 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
565 os.write(ld, info)
562 os.write(ld, info)
566 os.close(ld)
563 os.close(ld)
567
564
568 def readlock(pathname):
565 def readlock(pathname):
569 try:
566 try:
570 return os.readlink(pathname)
567 return os.readlink(pathname)
571 except OSError, why:
568 except OSError, why:
572 if why.errno not in (errno.EINVAL, errno.ENOSYS):
569 if why.errno not in (errno.EINVAL, errno.ENOSYS):
573 raise
570 raise
574 except AttributeError: # no symlink in os
571 except AttributeError: # no symlink in os
575 pass
572 pass
576 fp = posixfile(pathname)
573 fp = posixfile(pathname)
577 r = fp.read()
574 r = fp.read()
578 fp.close()
575 fp.close()
579 return r
576 return r
580
577
581 def fstat(fp):
578 def fstat(fp):
582 '''stat file object that may not have fileno method.'''
579 '''stat file object that may not have fileno method.'''
583 try:
580 try:
584 return os.fstat(fp.fileno())
581 return os.fstat(fp.fileno())
585 except AttributeError:
582 except AttributeError:
586 return os.stat(fp.name)
583 return os.stat(fp.name)
587
584
588 # File system features
585 # File system features
589
586
590 def checkcase(path):
587 def checkcase(path):
591 """
588 """
592 Check whether the given path is on a case-sensitive filesystem
589 Check whether the given path is on a case-sensitive filesystem
593
590
594 Requires a path (like /foo/.hg) ending with a foldable final
591 Requires a path (like /foo/.hg) ending with a foldable final
595 directory component.
592 directory component.
596 """
593 """
597 s1 = os.stat(path)
594 s1 = os.stat(path)
598 d, b = os.path.split(path)
595 d, b = os.path.split(path)
599 p2 = os.path.join(d, b.upper())
596 p2 = os.path.join(d, b.upper())
600 if path == p2:
597 if path == p2:
601 p2 = os.path.join(d, b.lower())
598 p2 = os.path.join(d, b.lower())
602 try:
599 try:
603 s2 = os.stat(p2)
600 s2 = os.stat(p2)
604 if s2 == s1:
601 if s2 == s1:
605 return False
602 return False
606 return True
603 return True
607 except OSError:
604 except OSError:
608 return True
605 return True
609
606
610 _fspathcache = {}
607 _fspathcache = {}
611 def fspath(name, root):
608 def fspath(name, root):
612 '''Get name in the case stored in the filesystem
609 '''Get name in the case stored in the filesystem
613
610
614 The name is either relative to root, or it is an absolute path starting
611 The name is either relative to root, or it is an absolute path starting
615 with root. Note that this function is unnecessary, and should not be
612 with root. Note that this function is unnecessary, and should not be
616 called, for case-sensitive filesystems (simply because it's expensive).
613 called, for case-sensitive filesystems (simply because it's expensive).
617 '''
614 '''
618 # If name is absolute, make it relative
615 # If name is absolute, make it relative
619 if name.lower().startswith(root.lower()):
616 if name.lower().startswith(root.lower()):
620 l = len(root)
617 l = len(root)
621 if name[l] == os.sep or name[l] == os.altsep:
618 if name[l] == os.sep or name[l] == os.altsep:
622 l = l + 1
619 l = l + 1
623 name = name[l:]
620 name = name[l:]
624
621
625 if not os.path.lexists(os.path.join(root, name)):
622 if not os.path.lexists(os.path.join(root, name)):
626 return None
623 return None
627
624
628 seps = os.sep
625 seps = os.sep
629 if os.altsep:
626 if os.altsep:
630 seps = seps + os.altsep
627 seps = seps + os.altsep
631 # Protect backslashes. This gets silly very quickly.
628 # Protect backslashes. This gets silly very quickly.
632 seps.replace('\\','\\\\')
629 seps.replace('\\','\\\\')
633 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
630 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
634 dir = os.path.normcase(os.path.normpath(root))
631 dir = os.path.normcase(os.path.normpath(root))
635 result = []
632 result = []
636 for part, sep in pattern.findall(name):
633 for part, sep in pattern.findall(name):
637 if sep:
634 if sep:
638 result.append(sep)
635 result.append(sep)
639 continue
636 continue
640
637
641 if dir not in _fspathcache:
638 if dir not in _fspathcache:
642 _fspathcache[dir] = os.listdir(dir)
639 _fspathcache[dir] = os.listdir(dir)
643 contents = _fspathcache[dir]
640 contents = _fspathcache[dir]
644
641
645 lpart = part.lower()
642 lpart = part.lower()
646 lenp = len(part)
643 lenp = len(part)
647 for n in contents:
644 for n in contents:
648 if lenp == len(n) and n.lower() == lpart:
645 if lenp == len(n) and n.lower() == lpart:
649 result.append(n)
646 result.append(n)
650 break
647 break
651 else:
648 else:
652 # Cannot happen, as the file exists!
649 # Cannot happen, as the file exists!
653 result.append(part)
650 result.append(part)
654 dir = os.path.join(dir, lpart)
651 dir = os.path.join(dir, lpart)
655
652
656 return ''.join(result)
653 return ''.join(result)
657
654
658 def checknlink(testfile):
655 def checknlink(testfile):
659 '''check whether hardlink count reporting works properly'''
656 '''check whether hardlink count reporting works properly'''
660
657
661 # testfile may be open, so we need a separate file for checking to
658 # testfile may be open, so we need a separate file for checking to
662 # work around issue2543 (or testfile may get lost on Samba shares)
659 # work around issue2543 (or testfile may get lost on Samba shares)
663 f1 = testfile + ".hgtmp1"
660 f1 = testfile + ".hgtmp1"
664 if os.path.lexists(f1):
661 if os.path.lexists(f1):
665 return False
662 return False
666 try:
663 try:
667 posixfile(f1, 'w').close()
664 posixfile(f1, 'w').close()
668 except IOError:
665 except IOError:
669 return False
666 return False
670
667
671 f2 = testfile + ".hgtmp2"
668 f2 = testfile + ".hgtmp2"
672 fd = None
669 fd = None
673 try:
670 try:
674 try:
671 try:
675 oslink(f1, f2)
672 oslink(f1, f2)
676 except OSError:
673 except OSError:
677 return False
674 return False
678
675
679 # nlinks() may behave differently for files on Windows shares if
676 # nlinks() may behave differently for files on Windows shares if
680 # the file is open.
677 # the file is open.
681 fd = posixfile(f2)
678 fd = posixfile(f2)
682 return nlinks(f2) > 1
679 return nlinks(f2) > 1
683 finally:
680 finally:
684 if fd is not None:
681 if fd is not None:
685 fd.close()
682 fd.close()
686 for f in (f1, f2):
683 for f in (f1, f2):
687 try:
684 try:
688 os.unlink(f)
685 os.unlink(f)
689 except OSError:
686 except OSError:
690 pass
687 pass
691
688
692 return False
689 return False
693
690
694 def endswithsep(path):
691 def endswithsep(path):
695 '''Check path ends with os.sep or os.altsep.'''
692 '''Check path ends with os.sep or os.altsep.'''
696 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
693 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
697
694
698 def splitpath(path):
695 def splitpath(path):
699 '''Split path by os.sep.
696 '''Split path by os.sep.
700 Note that this function does not use os.altsep because this is
697 Note that this function does not use os.altsep because this is
701 an alternative of simple "xxx.split(os.sep)".
698 an alternative of simple "xxx.split(os.sep)".
702 It is recommended to use os.path.normpath() before using this
699 It is recommended to use os.path.normpath() before using this
703 function if need.'''
700 function if need.'''
704 return path.split(os.sep)
701 return path.split(os.sep)
705
702
706 def gui():
703 def gui():
707 '''Are we running in a GUI?'''
704 '''Are we running in a GUI?'''
708 if sys.platform == 'darwin':
705 if sys.platform == 'darwin':
709 if 'SSH_CONNECTION' in os.environ:
706 if 'SSH_CONNECTION' in os.environ:
710 # handle SSH access to a box where the user is logged in
707 # handle SSH access to a box where the user is logged in
711 return False
708 return False
712 elif getattr(osutil, 'isgui', None):
709 elif getattr(osutil, 'isgui', None):
713 # check if a CoreGraphics session is available
710 # check if a CoreGraphics session is available
714 return osutil.isgui()
711 return osutil.isgui()
715 else:
712 else:
716 # pure build; use a safe default
713 # pure build; use a safe default
717 return True
714 return True
718 else:
715 else:
719 return os.name == "nt" or os.environ.get("DISPLAY")
716 return os.name == "nt" or os.environ.get("DISPLAY")
720
717
721 def mktempcopy(name, emptyok=False, createmode=None):
718 def mktempcopy(name, emptyok=False, createmode=None):
722 """Create a temporary file with the same contents from name
719 """Create a temporary file with the same contents from name
723
720
724 The permission bits are copied from the original file.
721 The permission bits are copied from the original file.
725
722
726 If the temporary file is going to be truncated immediately, you
723 If the temporary file is going to be truncated immediately, you
727 can use emptyok=True as an optimization.
724 can use emptyok=True as an optimization.
728
725
729 Returns the name of the temporary file.
726 Returns the name of the temporary file.
730 """
727 """
731 d, fn = os.path.split(name)
728 d, fn = os.path.split(name)
732 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
729 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
733 os.close(fd)
730 os.close(fd)
734 # Temporary files are created with mode 0600, which is usually not
731 # Temporary files are created with mode 0600, which is usually not
735 # what we want. If the original file already exists, just copy
732 # what we want. If the original file already exists, just copy
736 # its mode. Otherwise, manually obey umask.
733 # its mode. Otherwise, manually obey umask.
737 copymode(name, temp, createmode)
734 copymode(name, temp, createmode)
738 if emptyok:
735 if emptyok:
739 return temp
736 return temp
740 try:
737 try:
741 try:
738 try:
742 ifp = posixfile(name, "rb")
739 ifp = posixfile(name, "rb")
743 except IOError, inst:
740 except IOError, inst:
744 if inst.errno == errno.ENOENT:
741 if inst.errno == errno.ENOENT:
745 return temp
742 return temp
746 if not getattr(inst, 'filename', None):
743 if not getattr(inst, 'filename', None):
747 inst.filename = name
744 inst.filename = name
748 raise
745 raise
749 ofp = posixfile(temp, "wb")
746 ofp = posixfile(temp, "wb")
750 for chunk in filechunkiter(ifp):
747 for chunk in filechunkiter(ifp):
751 ofp.write(chunk)
748 ofp.write(chunk)
752 ifp.close()
749 ifp.close()
753 ofp.close()
750 ofp.close()
754 except:
751 except:
755 try: os.unlink(temp)
752 try: os.unlink(temp)
756 except: pass
753 except: pass
757 raise
754 raise
758 return temp
755 return temp
759
756
760 class atomictempfile(object):
757 class atomictempfile(object):
761 '''writeable file object that atomically updates a file
758 '''writeable file object that atomically updates a file
762
759
763 All writes will go to a temporary copy of the original file. Call
760 All writes will go to a temporary copy of the original file. Call
764 close() when you are done writing, and atomictempfile will rename
761 close() when you are done writing, and atomictempfile will rename
765 the temporary copy to the original name, making the changes
762 the temporary copy to the original name, making the changes
766 visible. If the object is destroyed without being closed, all your
763 visible. If the object is destroyed without being closed, all your
767 writes are discarded.
764 writes are discarded.
768 '''
765 '''
769 def __init__(self, name, mode='w+b', createmode=None):
766 def __init__(self, name, mode='w+b', createmode=None):
770 self.__name = name # permanent name
767 self.__name = name # permanent name
771 self._tempname = mktempcopy(name, emptyok=('w' in mode),
768 self._tempname = mktempcopy(name, emptyok=('w' in mode),
772 createmode=createmode)
769 createmode=createmode)
773 self._fp = posixfile(self._tempname, mode)
770 self._fp = posixfile(self._tempname, mode)
774
771
775 # delegated methods
772 # delegated methods
776 self.write = self._fp.write
773 self.write = self._fp.write
777 self.fileno = self._fp.fileno
774 self.fileno = self._fp.fileno
778
775
779 def close(self):
776 def close(self):
780 if not self._fp.closed:
777 if not self._fp.closed:
781 self._fp.close()
778 self._fp.close()
782 rename(self._tempname, localpath(self.__name))
779 rename(self._tempname, localpath(self.__name))
783
780
784 def discard(self):
781 def discard(self):
785 if not self._fp.closed:
782 if not self._fp.closed:
786 try:
783 try:
787 os.unlink(self._tempname)
784 os.unlink(self._tempname)
788 except OSError:
785 except OSError:
789 pass
786 pass
790 self._fp.close()
787 self._fp.close()
791
788
792 def __del__(self):
789 def __del__(self):
793 if safehasattr(self, '_fp'): # constructor actually did something
790 if safehasattr(self, '_fp'): # constructor actually did something
794 self.discard()
791 self.discard()
795
792
796 def makedirs(name, mode=None):
793 def makedirs(name, mode=None):
797 """recursive directory creation with parent mode inheritance"""
794 """recursive directory creation with parent mode inheritance"""
798 try:
795 try:
799 os.mkdir(name)
796 os.mkdir(name)
800 except OSError, err:
797 except OSError, err:
801 if err.errno == errno.EEXIST:
798 if err.errno == errno.EEXIST:
802 return
799 return
803 if err.errno != errno.ENOENT or not name:
800 if err.errno != errno.ENOENT or not name:
804 raise
801 raise
805 parent = os.path.dirname(os.path.abspath(name))
802 parent = os.path.dirname(os.path.abspath(name))
806 if parent == name:
803 if parent == name:
807 raise
804 raise
808 makedirs(parent, mode)
805 makedirs(parent, mode)
809 os.mkdir(name)
806 os.mkdir(name)
810 if mode is not None:
807 if mode is not None:
811 os.chmod(name, mode)
808 os.chmod(name, mode)
812
809
813 def readfile(path):
810 def readfile(path):
814 fp = open(path, 'rb')
811 fp = open(path, 'rb')
815 try:
812 try:
816 return fp.read()
813 return fp.read()
817 finally:
814 finally:
818 fp.close()
815 fp.close()
819
816
820 def writefile(path, text):
817 def writefile(path, text):
821 fp = open(path, 'wb')
818 fp = open(path, 'wb')
822 try:
819 try:
823 fp.write(text)
820 fp.write(text)
824 finally:
821 finally:
825 fp.close()
822 fp.close()
826
823
827 def appendfile(path, text):
824 def appendfile(path, text):
828 fp = open(path, 'ab')
825 fp = open(path, 'ab')
829 try:
826 try:
830 fp.write(text)
827 fp.write(text)
831 finally:
828 finally:
832 fp.close()
829 fp.close()
833
830
834 class chunkbuffer(object):
831 class chunkbuffer(object):
835 """Allow arbitrary sized chunks of data to be efficiently read from an
832 """Allow arbitrary sized chunks of data to be efficiently read from an
836 iterator over chunks of arbitrary size."""
833 iterator over chunks of arbitrary size."""
837
834
838 def __init__(self, in_iter):
835 def __init__(self, in_iter):
839 """in_iter is the iterator that's iterating over the input chunks.
836 """in_iter is the iterator that's iterating over the input chunks.
840 targetsize is how big a buffer to try to maintain."""
837 targetsize is how big a buffer to try to maintain."""
841 def splitbig(chunks):
838 def splitbig(chunks):
842 for chunk in chunks:
839 for chunk in chunks:
843 if len(chunk) > 2**20:
840 if len(chunk) > 2**20:
844 pos = 0
841 pos = 0
845 while pos < len(chunk):
842 while pos < len(chunk):
846 end = pos + 2 ** 18
843 end = pos + 2 ** 18
847 yield chunk[pos:end]
844 yield chunk[pos:end]
848 pos = end
845 pos = end
849 else:
846 else:
850 yield chunk
847 yield chunk
851 self.iter = splitbig(in_iter)
848 self.iter = splitbig(in_iter)
852 self._queue = []
849 self._queue = []
853
850
854 def read(self, l):
851 def read(self, l):
855 """Read L bytes of data from the iterator of chunks of data.
852 """Read L bytes of data from the iterator of chunks of data.
856 Returns less than L bytes if the iterator runs dry."""
853 Returns less than L bytes if the iterator runs dry."""
857 left = l
854 left = l
858 buf = ''
855 buf = ''
859 queue = self._queue
856 queue = self._queue
860 while left > 0:
857 while left > 0:
861 # refill the queue
858 # refill the queue
862 if not queue:
859 if not queue:
863 target = 2**18
860 target = 2**18
864 for chunk in self.iter:
861 for chunk in self.iter:
865 queue.append(chunk)
862 queue.append(chunk)
866 target -= len(chunk)
863 target -= len(chunk)
867 if target <= 0:
864 if target <= 0:
868 break
865 break
869 if not queue:
866 if not queue:
870 break
867 break
871
868
872 chunk = queue.pop(0)
869 chunk = queue.pop(0)
873 left -= len(chunk)
870 left -= len(chunk)
874 if left < 0:
871 if left < 0:
875 queue.insert(0, chunk[left:])
872 queue.insert(0, chunk[left:])
876 buf += chunk[:left]
873 buf += chunk[:left]
877 else:
874 else:
878 buf += chunk
875 buf += chunk
879
876
880 return buf
877 return buf
881
878
882 def filechunkiter(f, size=65536, limit=None):
879 def filechunkiter(f, size=65536, limit=None):
883 """Create a generator that produces the data in the file size
880 """Create a generator that produces the data in the file size
884 (default 65536) bytes at a time, up to optional limit (default is
881 (default 65536) bytes at a time, up to optional limit (default is
885 to read all data). Chunks may be less than size bytes if the
882 to read all data). Chunks may be less than size bytes if the
886 chunk is the last chunk in the file, or the file is a socket or
883 chunk is the last chunk in the file, or the file is a socket or
887 some other type of file that sometimes reads less data than is
884 some other type of file that sometimes reads less data than is
888 requested."""
885 requested."""
889 assert size >= 0
886 assert size >= 0
890 assert limit is None or limit >= 0
887 assert limit is None or limit >= 0
891 while True:
888 while True:
892 if limit is None:
889 if limit is None:
893 nbytes = size
890 nbytes = size
894 else:
891 else:
895 nbytes = min(limit, size)
892 nbytes = min(limit, size)
896 s = nbytes and f.read(nbytes)
893 s = nbytes and f.read(nbytes)
897 if not s:
894 if not s:
898 break
895 break
899 if limit:
896 if limit:
900 limit -= len(s)
897 limit -= len(s)
901 yield s
898 yield s
902
899
903 def makedate():
900 def makedate():
904 ct = time.time()
901 ct = time.time()
905 if ct < 0:
902 if ct < 0:
906 hint = _("check your clock")
903 hint = _("check your clock")
907 raise Abort(_("negative timestamp: %d") % ct, hint=hint)
904 raise Abort(_("negative timestamp: %d") % ct, hint=hint)
908 delta = (datetime.datetime.utcfromtimestamp(ct) -
905 delta = (datetime.datetime.utcfromtimestamp(ct) -
909 datetime.datetime.fromtimestamp(ct))
906 datetime.datetime.fromtimestamp(ct))
910 tz = delta.days * 86400 + delta.seconds
907 tz = delta.days * 86400 + delta.seconds
911 return ct, tz
908 return ct, tz
912
909
913 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
910 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
914 """represent a (unixtime, offset) tuple as a localized time.
911 """represent a (unixtime, offset) tuple as a localized time.
915 unixtime is seconds since the epoch, and offset is the time zone's
912 unixtime is seconds since the epoch, and offset is the time zone's
916 number of seconds away from UTC. if timezone is false, do not
913 number of seconds away from UTC. if timezone is false, do not
917 append time zone to string."""
914 append time zone to string."""
918 t, tz = date or makedate()
915 t, tz = date or makedate()
919 if t < 0:
916 if t < 0:
920 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
917 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
921 tz = 0
918 tz = 0
922 if "%1" in format or "%2" in format:
919 if "%1" in format or "%2" in format:
923 sign = (tz > 0) and "-" or "+"
920 sign = (tz > 0) and "-" or "+"
924 minutes = abs(tz) // 60
921 minutes = abs(tz) // 60
925 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
922 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
926 format = format.replace("%2", "%02d" % (minutes % 60))
923 format = format.replace("%2", "%02d" % (minutes % 60))
927 try:
924 try:
928 t = time.gmtime(float(t) - tz)
925 t = time.gmtime(float(t) - tz)
929 except ValueError:
926 except ValueError:
930 # time was out of range
927 # time was out of range
931 t = time.gmtime(sys.maxint)
928 t = time.gmtime(sys.maxint)
932 s = time.strftime(format, t)
929 s = time.strftime(format, t)
933 return s
930 return s
934
931
935 def shortdate(date=None):
932 def shortdate(date=None):
936 """turn (timestamp, tzoff) tuple into iso 8631 date."""
933 """turn (timestamp, tzoff) tuple into iso 8631 date."""
937 return datestr(date, format='%Y-%m-%d')
934 return datestr(date, format='%Y-%m-%d')
938
935
939 def strdate(string, format, defaults=[]):
936 def strdate(string, format, defaults=[]):
940 """parse a localized time string and return a (unixtime, offset) tuple.
937 """parse a localized time string and return a (unixtime, offset) tuple.
941 if the string cannot be parsed, ValueError is raised."""
938 if the string cannot be parsed, ValueError is raised."""
942 def timezone(string):
939 def timezone(string):
943 tz = string.split()[-1]
940 tz = string.split()[-1]
944 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
941 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
945 sign = (tz[0] == "+") and 1 or -1
942 sign = (tz[0] == "+") and 1 or -1
946 hours = int(tz[1:3])
943 hours = int(tz[1:3])
947 minutes = int(tz[3:5])
944 minutes = int(tz[3:5])
948 return -sign * (hours * 60 + minutes) * 60
945 return -sign * (hours * 60 + minutes) * 60
949 if tz == "GMT" or tz == "UTC":
946 if tz == "GMT" or tz == "UTC":
950 return 0
947 return 0
951 return None
948 return None
952
949
953 # NOTE: unixtime = localunixtime + offset
950 # NOTE: unixtime = localunixtime + offset
954 offset, date = timezone(string), string
951 offset, date = timezone(string), string
955 if offset is not None:
952 if offset is not None:
956 date = " ".join(string.split()[:-1])
953 date = " ".join(string.split()[:-1])
957
954
958 # add missing elements from defaults
955 # add missing elements from defaults
959 usenow = False # default to using biased defaults
956 usenow = False # default to using biased defaults
960 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
957 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
961 found = [True for p in part if ("%"+p) in format]
958 found = [True for p in part if ("%"+p) in format]
962 if not found:
959 if not found:
963 date += "@" + defaults[part][usenow]
960 date += "@" + defaults[part][usenow]
964 format += "@%" + part[0]
961 format += "@%" + part[0]
965 else:
962 else:
966 # We've found a specific time element, less specific time
963 # We've found a specific time element, less specific time
967 # elements are relative to today
964 # elements are relative to today
968 usenow = True
965 usenow = True
969
966
970 timetuple = time.strptime(date, format)
967 timetuple = time.strptime(date, format)
971 localunixtime = int(calendar.timegm(timetuple))
968 localunixtime = int(calendar.timegm(timetuple))
972 if offset is None:
969 if offset is None:
973 # local timezone
970 # local timezone
974 unixtime = int(time.mktime(timetuple))
971 unixtime = int(time.mktime(timetuple))
975 offset = unixtime - localunixtime
972 offset = unixtime - localunixtime
976 else:
973 else:
977 unixtime = localunixtime + offset
974 unixtime = localunixtime + offset
978 return unixtime, offset
975 return unixtime, offset
979
976
980 def parsedate(date, formats=None, bias={}):
977 def parsedate(date, formats=None, bias={}):
981 """parse a localized date/time and return a (unixtime, offset) tuple.
978 """parse a localized date/time and return a (unixtime, offset) tuple.
982
979
983 The date may be a "unixtime offset" string or in one of the specified
980 The date may be a "unixtime offset" string or in one of the specified
984 formats. If the date already is a (unixtime, offset) tuple, it is returned.
981 formats. If the date already is a (unixtime, offset) tuple, it is returned.
985 """
982 """
986 if not date:
983 if not date:
987 return 0, 0
984 return 0, 0
988 if isinstance(date, tuple) and len(date) == 2:
985 if isinstance(date, tuple) and len(date) == 2:
989 return date
986 return date
990 if not formats:
987 if not formats:
991 formats = defaultdateformats
988 formats = defaultdateformats
992 date = date.strip()
989 date = date.strip()
993 try:
990 try:
994 when, offset = map(int, date.split(' '))
991 when, offset = map(int, date.split(' '))
995 except ValueError:
992 except ValueError:
996 # fill out defaults
993 # fill out defaults
997 now = makedate()
994 now = makedate()
998 defaults = {}
995 defaults = {}
999 for part in ("d", "mb", "yY", "HI", "M", "S"):
996 for part in ("d", "mb", "yY", "HI", "M", "S"):
1000 # this piece is for rounding the specific end of unknowns
997 # this piece is for rounding the specific end of unknowns
1001 b = bias.get(part)
998 b = bias.get(part)
1002 if b is None:
999 if b is None:
1003 if part[0] in "HMS":
1000 if part[0] in "HMS":
1004 b = "00"
1001 b = "00"
1005 else:
1002 else:
1006 b = "0"
1003 b = "0"
1007
1004
1008 # this piece is for matching the generic end to today's date
1005 # this piece is for matching the generic end to today's date
1009 n = datestr(now, "%" + part[0])
1006 n = datestr(now, "%" + part[0])
1010
1007
1011 defaults[part] = (b, n)
1008 defaults[part] = (b, n)
1012
1009
1013 for format in formats:
1010 for format in formats:
1014 try:
1011 try:
1015 when, offset = strdate(date, format, defaults)
1012 when, offset = strdate(date, format, defaults)
1016 except (ValueError, OverflowError):
1013 except (ValueError, OverflowError):
1017 pass
1014 pass
1018 else:
1015 else:
1019 break
1016 break
1020 else:
1017 else:
1021 raise Abort(_('invalid date: %r') % date)
1018 raise Abort(_('invalid date: %r') % date)
1022 # validate explicit (probably user-specified) date and
1019 # validate explicit (probably user-specified) date and
1023 # time zone offset. values must fit in signed 32 bits for
1020 # time zone offset. values must fit in signed 32 bits for
1024 # current 32-bit linux runtimes. timezones go from UTC-12
1021 # current 32-bit linux runtimes. timezones go from UTC-12
1025 # to UTC+14
1022 # to UTC+14
1026 if abs(when) > 0x7fffffff:
1023 if abs(when) > 0x7fffffff:
1027 raise Abort(_('date exceeds 32 bits: %d') % when)
1024 raise Abort(_('date exceeds 32 bits: %d') % when)
1028 if when < 0:
1025 if when < 0:
1029 raise Abort(_('negative date value: %d') % when)
1026 raise Abort(_('negative date value: %d') % when)
1030 if offset < -50400 or offset > 43200:
1027 if offset < -50400 or offset > 43200:
1031 raise Abort(_('impossible time zone offset: %d') % offset)
1028 raise Abort(_('impossible time zone offset: %d') % offset)
1032 return when, offset
1029 return when, offset
1033
1030
1034 def matchdate(date):
1031 def matchdate(date):
1035 """Return a function that matches a given date match specifier
1032 """Return a function that matches a given date match specifier
1036
1033
1037 Formats include:
1034 Formats include:
1038
1035
1039 '{date}' match a given date to the accuracy provided
1036 '{date}' match a given date to the accuracy provided
1040
1037
1041 '<{date}' on or before a given date
1038 '<{date}' on or before a given date
1042
1039
1043 '>{date}' on or after a given date
1040 '>{date}' on or after a given date
1044
1041
1045 >>> p1 = parsedate("10:29:59")
1042 >>> p1 = parsedate("10:29:59")
1046 >>> p2 = parsedate("10:30:00")
1043 >>> p2 = parsedate("10:30:00")
1047 >>> p3 = parsedate("10:30:59")
1044 >>> p3 = parsedate("10:30:59")
1048 >>> p4 = parsedate("10:31:00")
1045 >>> p4 = parsedate("10:31:00")
1049 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1046 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1050 >>> f = matchdate("10:30")
1047 >>> f = matchdate("10:30")
1051 >>> f(p1[0])
1048 >>> f(p1[0])
1052 False
1049 False
1053 >>> f(p2[0])
1050 >>> f(p2[0])
1054 True
1051 True
1055 >>> f(p3[0])
1052 >>> f(p3[0])
1056 True
1053 True
1057 >>> f(p4[0])
1054 >>> f(p4[0])
1058 False
1055 False
1059 >>> f(p5[0])
1056 >>> f(p5[0])
1060 False
1057 False
1061 """
1058 """
1062
1059
1063 def lower(date):
1060 def lower(date):
1064 d = dict(mb="1", d="1")
1061 d = dict(mb="1", d="1")
1065 return parsedate(date, extendeddateformats, d)[0]
1062 return parsedate(date, extendeddateformats, d)[0]
1066
1063
1067 def upper(date):
1064 def upper(date):
1068 d = dict(mb="12", HI="23", M="59", S="59")
1065 d = dict(mb="12", HI="23", M="59", S="59")
1069 for days in ("31", "30", "29"):
1066 for days in ("31", "30", "29"):
1070 try:
1067 try:
1071 d["d"] = days
1068 d["d"] = days
1072 return parsedate(date, extendeddateformats, d)[0]
1069 return parsedate(date, extendeddateformats, d)[0]
1073 except:
1070 except:
1074 pass
1071 pass
1075 d["d"] = "28"
1072 d["d"] = "28"
1076 return parsedate(date, extendeddateformats, d)[0]
1073 return parsedate(date, extendeddateformats, d)[0]
1077
1074
1078 date = date.strip()
1075 date = date.strip()
1079
1076
1080 if not date:
1077 if not date:
1081 raise Abort(_("dates cannot consist entirely of whitespace"))
1078 raise Abort(_("dates cannot consist entirely of whitespace"))
1082 elif date[0] == "<":
1079 elif date[0] == "<":
1083 if not date[1:]:
1080 if not date[1:]:
1084 raise Abort(_("invalid day spec, use '<DATE'"))
1081 raise Abort(_("invalid day spec, use '<DATE'"))
1085 when = upper(date[1:])
1082 when = upper(date[1:])
1086 return lambda x: x <= when
1083 return lambda x: x <= when
1087 elif date[0] == ">":
1084 elif date[0] == ">":
1088 if not date[1:]:
1085 if not date[1:]:
1089 raise Abort(_("invalid day spec, use '>DATE'"))
1086 raise Abort(_("invalid day spec, use '>DATE'"))
1090 when = lower(date[1:])
1087 when = lower(date[1:])
1091 return lambda x: x >= when
1088 return lambda x: x >= when
1092 elif date[0] == "-":
1089 elif date[0] == "-":
1093 try:
1090 try:
1094 days = int(date[1:])
1091 days = int(date[1:])
1095 except ValueError:
1092 except ValueError:
1096 raise Abort(_("invalid day spec: %s") % date[1:])
1093 raise Abort(_("invalid day spec: %s") % date[1:])
1097 if days < 0:
1094 if days < 0:
1098 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1095 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1099 % date[1:])
1096 % date[1:])
1100 when = makedate()[0] - days * 3600 * 24
1097 when = makedate()[0] - days * 3600 * 24
1101 return lambda x: x >= when
1098 return lambda x: x >= when
1102 elif " to " in date:
1099 elif " to " in date:
1103 a, b = date.split(" to ")
1100 a, b = date.split(" to ")
1104 start, stop = lower(a), upper(b)
1101 start, stop = lower(a), upper(b)
1105 return lambda x: x >= start and x <= stop
1102 return lambda x: x >= start and x <= stop
1106 else:
1103 else:
1107 start, stop = lower(date), upper(date)
1104 start, stop = lower(date), upper(date)
1108 return lambda x: x >= start and x <= stop
1105 return lambda x: x >= start and x <= stop
1109
1106
1110 def shortuser(user):
1107 def shortuser(user):
1111 """Return a short representation of a user name or email address."""
1108 """Return a short representation of a user name or email address."""
1112 f = user.find('@')
1109 f = user.find('@')
1113 if f >= 0:
1110 if f >= 0:
1114 user = user[:f]
1111 user = user[:f]
1115 f = user.find('<')
1112 f = user.find('<')
1116 if f >= 0:
1113 if f >= 0:
1117 user = user[f + 1:]
1114 user = user[f + 1:]
1118 f = user.find(' ')
1115 f = user.find(' ')
1119 if f >= 0:
1116 if f >= 0:
1120 user = user[:f]
1117 user = user[:f]
1121 f = user.find('.')
1118 f = user.find('.')
1122 if f >= 0:
1119 if f >= 0:
1123 user = user[:f]
1120 user = user[:f]
1124 return user
1121 return user
1125
1122
1126 def email(author):
1123 def email(author):
1127 '''get email of author.'''
1124 '''get email of author.'''
1128 r = author.find('>')
1125 r = author.find('>')
1129 if r == -1:
1126 if r == -1:
1130 r = None
1127 r = None
1131 return author[author.find('<') + 1:r]
1128 return author[author.find('<') + 1:r]
1132
1129
1133 def _ellipsis(text, maxlength):
1130 def _ellipsis(text, maxlength):
1134 if len(text) <= maxlength:
1131 if len(text) <= maxlength:
1135 return text, False
1132 return text, False
1136 else:
1133 else:
1137 return "%s..." % (text[:maxlength - 3]), True
1134 return "%s..." % (text[:maxlength - 3]), True
1138
1135
1139 def ellipsis(text, maxlength=400):
1136 def ellipsis(text, maxlength=400):
1140 """Trim string to at most maxlength (default: 400) characters."""
1137 """Trim string to at most maxlength (default: 400) characters."""
1141 try:
1138 try:
1142 # use unicode not to split at intermediate multi-byte sequence
1139 # use unicode not to split at intermediate multi-byte sequence
1143 utext, truncated = _ellipsis(text.decode(encoding.encoding),
1140 utext, truncated = _ellipsis(text.decode(encoding.encoding),
1144 maxlength)
1141 maxlength)
1145 if not truncated:
1142 if not truncated:
1146 return text
1143 return text
1147 return utext.encode(encoding.encoding)
1144 return utext.encode(encoding.encoding)
1148 except (UnicodeDecodeError, UnicodeEncodeError):
1145 except (UnicodeDecodeError, UnicodeEncodeError):
1149 return _ellipsis(text, maxlength)[0]
1146 return _ellipsis(text, maxlength)[0]
1150
1147
1151 def bytecount(nbytes):
1148 def bytecount(nbytes):
1152 '''return byte count formatted as readable string, with units'''
1149 '''return byte count formatted as readable string, with units'''
1153
1150
1154 units = (
1151 units = (
1155 (100, 1 << 30, _('%.0f GB')),
1152 (100, 1 << 30, _('%.0f GB')),
1156 (10, 1 << 30, _('%.1f GB')),
1153 (10, 1 << 30, _('%.1f GB')),
1157 (1, 1 << 30, _('%.2f GB')),
1154 (1, 1 << 30, _('%.2f GB')),
1158 (100, 1 << 20, _('%.0f MB')),
1155 (100, 1 << 20, _('%.0f MB')),
1159 (10, 1 << 20, _('%.1f MB')),
1156 (10, 1 << 20, _('%.1f MB')),
1160 (1, 1 << 20, _('%.2f MB')),
1157 (1, 1 << 20, _('%.2f MB')),
1161 (100, 1 << 10, _('%.0f KB')),
1158 (100, 1 << 10, _('%.0f KB')),
1162 (10, 1 << 10, _('%.1f KB')),
1159 (10, 1 << 10, _('%.1f KB')),
1163 (1, 1 << 10, _('%.2f KB')),
1160 (1, 1 << 10, _('%.2f KB')),
1164 (1, 1, _('%.0f bytes')),
1161 (1, 1, _('%.0f bytes')),
1165 )
1162 )
1166
1163
1167 for multiplier, divisor, format in units:
1164 for multiplier, divisor, format in units:
1168 if nbytes >= divisor * multiplier:
1165 if nbytes >= divisor * multiplier:
1169 return format % (nbytes / float(divisor))
1166 return format % (nbytes / float(divisor))
1170 return units[-1][2] % nbytes
1167 return units[-1][2] % nbytes
1171
1168
1172 def uirepr(s):
1169 def uirepr(s):
1173 # Avoid double backslash in Windows path repr()
1170 # Avoid double backslash in Windows path repr()
1174 return repr(s).replace('\\\\', '\\')
1171 return repr(s).replace('\\\\', '\\')
1175
1172
1176 # delay import of textwrap
1173 # delay import of textwrap
1177 def MBTextWrapper(**kwargs):
1174 def MBTextWrapper(**kwargs):
1178 class tw(textwrap.TextWrapper):
1175 class tw(textwrap.TextWrapper):
1179 """
1176 """
1180 Extend TextWrapper for width-awareness.
1177 Extend TextWrapper for width-awareness.
1181
1178
1182 Neither number of 'bytes' in any encoding nor 'characters' is
1179 Neither number of 'bytes' in any encoding nor 'characters' is
1183 appropriate to calculate terminal columns for specified string.
1180 appropriate to calculate terminal columns for specified string.
1184
1181
1185 Original TextWrapper implementation uses built-in 'len()' directly,
1182 Original TextWrapper implementation uses built-in 'len()' directly,
1186 so overriding is needed to use width information of each characters.
1183 so overriding is needed to use width information of each characters.
1187
1184
1188 In addition, characters classified into 'ambiguous' width are
1185 In addition, characters classified into 'ambiguous' width are
1189 treated as wide in east asian area, but as narrow in other.
1186 treated as wide in east asian area, but as narrow in other.
1190
1187
1191 This requires use decision to determine width of such characters.
1188 This requires use decision to determine width of such characters.
1192 """
1189 """
1193 def __init__(self, **kwargs):
1190 def __init__(self, **kwargs):
1194 textwrap.TextWrapper.__init__(self, **kwargs)
1191 textwrap.TextWrapper.__init__(self, **kwargs)
1195
1192
1196 # for compatibility between 2.4 and 2.6
1193 # for compatibility between 2.4 and 2.6
1197 if getattr(self, 'drop_whitespace', None) is None:
1194 if getattr(self, 'drop_whitespace', None) is None:
1198 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1195 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1199
1196
1200 def _cutdown(self, ucstr, space_left):
1197 def _cutdown(self, ucstr, space_left):
1201 l = 0
1198 l = 0
1202 colwidth = encoding.ucolwidth
1199 colwidth = encoding.ucolwidth
1203 for i in xrange(len(ucstr)):
1200 for i in xrange(len(ucstr)):
1204 l += colwidth(ucstr[i])
1201 l += colwidth(ucstr[i])
1205 if space_left < l:
1202 if space_left < l:
1206 return (ucstr[:i], ucstr[i:])
1203 return (ucstr[:i], ucstr[i:])
1207 return ucstr, ''
1204 return ucstr, ''
1208
1205
1209 # overriding of base class
1206 # overriding of base class
1210 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1207 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1211 space_left = max(width - cur_len, 1)
1208 space_left = max(width - cur_len, 1)
1212
1209
1213 if self.break_long_words:
1210 if self.break_long_words:
1214 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1211 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1215 cur_line.append(cut)
1212 cur_line.append(cut)
1216 reversed_chunks[-1] = res
1213 reversed_chunks[-1] = res
1217 elif not cur_line:
1214 elif not cur_line:
1218 cur_line.append(reversed_chunks.pop())
1215 cur_line.append(reversed_chunks.pop())
1219
1216
1220 # this overriding code is imported from TextWrapper of python 2.6
1217 # this overriding code is imported from TextWrapper of python 2.6
1221 # to calculate columns of string by 'encoding.ucolwidth()'
1218 # to calculate columns of string by 'encoding.ucolwidth()'
1222 def _wrap_chunks(self, chunks):
1219 def _wrap_chunks(self, chunks):
1223 colwidth = encoding.ucolwidth
1220 colwidth = encoding.ucolwidth
1224
1221
1225 lines = []
1222 lines = []
1226 if self.width <= 0:
1223 if self.width <= 0:
1227 raise ValueError("invalid width %r (must be > 0)" % self.width)
1224 raise ValueError("invalid width %r (must be > 0)" % self.width)
1228
1225
1229 # Arrange in reverse order so items can be efficiently popped
1226 # Arrange in reverse order so items can be efficiently popped
1230 # from a stack of chucks.
1227 # from a stack of chucks.
1231 chunks.reverse()
1228 chunks.reverse()
1232
1229
1233 while chunks:
1230 while chunks:
1234
1231
1235 # Start the list of chunks that will make up the current line.
1232 # Start the list of chunks that will make up the current line.
1236 # cur_len is just the length of all the chunks in cur_line.
1233 # cur_len is just the length of all the chunks in cur_line.
1237 cur_line = []
1234 cur_line = []
1238 cur_len = 0
1235 cur_len = 0
1239
1236
1240 # Figure out which static string will prefix this line.
1237 # Figure out which static string will prefix this line.
1241 if lines:
1238 if lines:
1242 indent = self.subsequent_indent
1239 indent = self.subsequent_indent
1243 else:
1240 else:
1244 indent = self.initial_indent
1241 indent = self.initial_indent
1245
1242
1246 # Maximum width for this line.
1243 # Maximum width for this line.
1247 width = self.width - len(indent)
1244 width = self.width - len(indent)
1248
1245
1249 # First chunk on line is whitespace -- drop it, unless this
1246 # First chunk on line is whitespace -- drop it, unless this
1250 # is the very beginning of the text (ie. no lines started yet).
1247 # is the very beginning of the text (ie. no lines started yet).
1251 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1248 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1252 del chunks[-1]
1249 del chunks[-1]
1253
1250
1254 while chunks:
1251 while chunks:
1255 l = colwidth(chunks[-1])
1252 l = colwidth(chunks[-1])
1256
1253
1257 # Can at least squeeze this chunk onto the current line.
1254 # Can at least squeeze this chunk onto the current line.
1258 if cur_len + l <= width:
1255 if cur_len + l <= width:
1259 cur_line.append(chunks.pop())
1256 cur_line.append(chunks.pop())
1260 cur_len += l
1257 cur_len += l
1261
1258
1262 # Nope, this line is full.
1259 # Nope, this line is full.
1263 else:
1260 else:
1264 break
1261 break
1265
1262
1266 # The current line is full, and the next chunk is too big to
1263 # The current line is full, and the next chunk is too big to
1267 # fit on *any* line (not just this one).
1264 # fit on *any* line (not just this one).
1268 if chunks and colwidth(chunks[-1]) > width:
1265 if chunks and colwidth(chunks[-1]) > width:
1269 self._handle_long_word(chunks, cur_line, cur_len, width)
1266 self._handle_long_word(chunks, cur_line, cur_len, width)
1270
1267
1271 # If the last chunk on this line is all whitespace, drop it.
1268 # If the last chunk on this line is all whitespace, drop it.
1272 if (self.drop_whitespace and
1269 if (self.drop_whitespace and
1273 cur_line and cur_line[-1].strip() == ''):
1270 cur_line and cur_line[-1].strip() == ''):
1274 del cur_line[-1]
1271 del cur_line[-1]
1275
1272
1276 # Convert current line back to a string and store it in list
1273 # Convert current line back to a string and store it in list
1277 # of all lines (return value).
1274 # of all lines (return value).
1278 if cur_line:
1275 if cur_line:
1279 lines.append(indent + ''.join(cur_line))
1276 lines.append(indent + ''.join(cur_line))
1280
1277
1281 return lines
1278 return lines
1282
1279
1283 global MBTextWrapper
1280 global MBTextWrapper
1284 MBTextWrapper = tw
1281 MBTextWrapper = tw
1285 return tw(**kwargs)
1282 return tw(**kwargs)
1286
1283
1287 def wrap(line, width, initindent='', hangindent=''):
1284 def wrap(line, width, initindent='', hangindent=''):
1288 maxindent = max(len(hangindent), len(initindent))
1285 maxindent = max(len(hangindent), len(initindent))
1289 if width <= maxindent:
1286 if width <= maxindent:
1290 # adjust for weird terminal size
1287 # adjust for weird terminal size
1291 width = max(78, maxindent + 1)
1288 width = max(78, maxindent + 1)
1292 line = line.decode(encoding.encoding, encoding.encodingmode)
1289 line = line.decode(encoding.encoding, encoding.encodingmode)
1293 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1290 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1294 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1291 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1295 wrapper = MBTextWrapper(width=width,
1292 wrapper = MBTextWrapper(width=width,
1296 initial_indent=initindent,
1293 initial_indent=initindent,
1297 subsequent_indent=hangindent)
1294 subsequent_indent=hangindent)
1298 return wrapper.fill(line).encode(encoding.encoding)
1295 return wrapper.fill(line).encode(encoding.encoding)
1299
1296
1300 def iterlines(iterator):
1297 def iterlines(iterator):
1301 for chunk in iterator:
1298 for chunk in iterator:
1302 for line in chunk.splitlines():
1299 for line in chunk.splitlines():
1303 yield line
1300 yield line
1304
1301
1305 def expandpath(path):
1302 def expandpath(path):
1306 return os.path.expanduser(os.path.expandvars(path))
1303 return os.path.expanduser(os.path.expandvars(path))
1307
1304
1308 def hgcmd():
1305 def hgcmd():
1309 """Return the command used to execute current hg
1306 """Return the command used to execute current hg
1310
1307
1311 This is different from hgexecutable() because on Windows we want
1308 This is different from hgexecutable() because on Windows we want
1312 to avoid things opening new shell windows like batch files, so we
1309 to avoid things opening new shell windows like batch files, so we
1313 get either the python call or current executable.
1310 get either the python call or current executable.
1314 """
1311 """
1315 if mainfrozen():
1312 if mainfrozen():
1316 return [sys.executable]
1313 return [sys.executable]
1317 return gethgcmd()
1314 return gethgcmd()
1318
1315
1319 def rundetached(args, condfn):
1316 def rundetached(args, condfn):
1320 """Execute the argument list in a detached process.
1317 """Execute the argument list in a detached process.
1321
1318
1322 condfn is a callable which is called repeatedly and should return
1319 condfn is a callable which is called repeatedly and should return
1323 True once the child process is known to have started successfully.
1320 True once the child process is known to have started successfully.
1324 At this point, the child process PID is returned. If the child
1321 At this point, the child process PID is returned. If the child
1325 process fails to start or finishes before condfn() evaluates to
1322 process fails to start or finishes before condfn() evaluates to
1326 True, return -1.
1323 True, return -1.
1327 """
1324 """
1328 # Windows case is easier because the child process is either
1325 # Windows case is easier because the child process is either
1329 # successfully starting and validating the condition or exiting
1326 # successfully starting and validating the condition or exiting
1330 # on failure. We just poll on its PID. On Unix, if the child
1327 # on failure. We just poll on its PID. On Unix, if the child
1331 # process fails to start, it will be left in a zombie state until
1328 # process fails to start, it will be left in a zombie state until
1332 # the parent wait on it, which we cannot do since we expect a long
1329 # the parent wait on it, which we cannot do since we expect a long
1333 # running process on success. Instead we listen for SIGCHLD telling
1330 # running process on success. Instead we listen for SIGCHLD telling
1334 # us our child process terminated.
1331 # us our child process terminated.
1335 terminated = set()
1332 terminated = set()
1336 def handler(signum, frame):
1333 def handler(signum, frame):
1337 terminated.add(os.wait())
1334 terminated.add(os.wait())
1338 prevhandler = None
1335 prevhandler = None
1339 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1336 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1340 if SIGCHLD is not None:
1337 if SIGCHLD is not None:
1341 prevhandler = signal.signal(SIGCHLD, handler)
1338 prevhandler = signal.signal(SIGCHLD, handler)
1342 try:
1339 try:
1343 pid = spawndetached(args)
1340 pid = spawndetached(args)
1344 while not condfn():
1341 while not condfn():
1345 if ((pid in terminated or not testpid(pid))
1342 if ((pid in terminated or not testpid(pid))
1346 and not condfn()):
1343 and not condfn()):
1347 return -1
1344 return -1
1348 time.sleep(0.1)
1345 time.sleep(0.1)
1349 return pid
1346 return pid
1350 finally:
1347 finally:
1351 if prevhandler is not None:
1348 if prevhandler is not None:
1352 signal.signal(signal.SIGCHLD, prevhandler)
1349 signal.signal(signal.SIGCHLD, prevhandler)
1353
1350
1354 try:
1351 try:
1355 any, all = any, all
1352 any, all = any, all
1356 except NameError:
1353 except NameError:
1357 def any(iterable):
1354 def any(iterable):
1358 for i in iterable:
1355 for i in iterable:
1359 if i:
1356 if i:
1360 return True
1357 return True
1361 return False
1358 return False
1362
1359
1363 def all(iterable):
1360 def all(iterable):
1364 for i in iterable:
1361 for i in iterable:
1365 if not i:
1362 if not i:
1366 return False
1363 return False
1367 return True
1364 return True
1368
1365
1369 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1366 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1370 """Return the result of interpolating items in the mapping into string s.
1367 """Return the result of interpolating items in the mapping into string s.
1371
1368
1372 prefix is a single character string, or a two character string with
1369 prefix is a single character string, or a two character string with
1373 a backslash as the first character if the prefix needs to be escaped in
1370 a backslash as the first character if the prefix needs to be escaped in
1374 a regular expression.
1371 a regular expression.
1375
1372
1376 fn is an optional function that will be applied to the replacement text
1373 fn is an optional function that will be applied to the replacement text
1377 just before replacement.
1374 just before replacement.
1378
1375
1379 escape_prefix is an optional flag that allows using doubled prefix for
1376 escape_prefix is an optional flag that allows using doubled prefix for
1380 its escaping.
1377 its escaping.
1381 """
1378 """
1382 fn = fn or (lambda s: s)
1379 fn = fn or (lambda s: s)
1383 patterns = '|'.join(mapping.keys())
1380 patterns = '|'.join(mapping.keys())
1384 if escape_prefix:
1381 if escape_prefix:
1385 patterns += '|' + prefix
1382 patterns += '|' + prefix
1386 if len(prefix) > 1:
1383 if len(prefix) > 1:
1387 prefix_char = prefix[1:]
1384 prefix_char = prefix[1:]
1388 else:
1385 else:
1389 prefix_char = prefix
1386 prefix_char = prefix
1390 mapping[prefix_char] = prefix_char
1387 mapping[prefix_char] = prefix_char
1391 r = re.compile(r'%s(%s)' % (prefix, patterns))
1388 r = re.compile(r'%s(%s)' % (prefix, patterns))
1392 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1389 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1393
1390
1394 def getport(port):
1391 def getport(port):
1395 """Return the port for a given network service.
1392 """Return the port for a given network service.
1396
1393
1397 If port is an integer, it's returned as is. If it's a string, it's
1394 If port is an integer, it's returned as is. If it's a string, it's
1398 looked up using socket.getservbyname(). If there's no matching
1395 looked up using socket.getservbyname(). If there's no matching
1399 service, util.Abort is raised.
1396 service, util.Abort is raised.
1400 """
1397 """
1401 try:
1398 try:
1402 return int(port)
1399 return int(port)
1403 except ValueError:
1400 except ValueError:
1404 pass
1401 pass
1405
1402
1406 try:
1403 try:
1407 return socket.getservbyname(port)
1404 return socket.getservbyname(port)
1408 except socket.error:
1405 except socket.error:
1409 raise Abort(_("no port number associated with service '%s'") % port)
1406 raise Abort(_("no port number associated with service '%s'") % port)
1410
1407
1411 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1408 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1412 '0': False, 'no': False, 'false': False, 'off': False,
1409 '0': False, 'no': False, 'false': False, 'off': False,
1413 'never': False}
1410 'never': False}
1414
1411
1415 def parsebool(s):
1412 def parsebool(s):
1416 """Parse s into a boolean.
1413 """Parse s into a boolean.
1417
1414
1418 If s is not a valid boolean, returns None.
1415 If s is not a valid boolean, returns None.
1419 """
1416 """
1420 return _booleans.get(s.lower(), None)
1417 return _booleans.get(s.lower(), None)
1421
1418
1422 _hexdig = '0123456789ABCDEFabcdef'
1419 _hexdig = '0123456789ABCDEFabcdef'
1423 _hextochr = dict((a + b, chr(int(a + b, 16)))
1420 _hextochr = dict((a + b, chr(int(a + b, 16)))
1424 for a in _hexdig for b in _hexdig)
1421 for a in _hexdig for b in _hexdig)
1425
1422
1426 def _urlunquote(s):
1423 def _urlunquote(s):
1427 """unquote('abc%20def') -> 'abc def'."""
1424 """unquote('abc%20def') -> 'abc def'."""
1428 res = s.split('%')
1425 res = s.split('%')
1429 # fastpath
1426 # fastpath
1430 if len(res) == 1:
1427 if len(res) == 1:
1431 return s
1428 return s
1432 s = res[0]
1429 s = res[0]
1433 for item in res[1:]:
1430 for item in res[1:]:
1434 try:
1431 try:
1435 s += _hextochr[item[:2]] + item[2:]
1432 s += _hextochr[item[:2]] + item[2:]
1436 except KeyError:
1433 except KeyError:
1437 s += '%' + item
1434 s += '%' + item
1438 except UnicodeDecodeError:
1435 except UnicodeDecodeError:
1439 s += unichr(int(item[:2], 16)) + item[2:]
1436 s += unichr(int(item[:2], 16)) + item[2:]
1440 return s
1437 return s
1441
1438
1442 class url(object):
1439 class url(object):
1443 r"""Reliable URL parser.
1440 r"""Reliable URL parser.
1444
1441
1445 This parses URLs and provides attributes for the following
1442 This parses URLs and provides attributes for the following
1446 components:
1443 components:
1447
1444
1448 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1445 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1449
1446
1450 Missing components are set to None. The only exception is
1447 Missing components are set to None. The only exception is
1451 fragment, which is set to '' if present but empty.
1448 fragment, which is set to '' if present but empty.
1452
1449
1453 If parsefragment is False, fragment is included in query. If
1450 If parsefragment is False, fragment is included in query. If
1454 parsequery is False, query is included in path. If both are
1451 parsequery is False, query is included in path. If both are
1455 False, both fragment and query are included in path.
1452 False, both fragment and query are included in path.
1456
1453
1457 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1454 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1458
1455
1459 Note that for backward compatibility reasons, bundle URLs do not
1456 Note that for backward compatibility reasons, bundle URLs do not
1460 take host names. That means 'bundle://../' has a path of '../'.
1457 take host names. That means 'bundle://../' has a path of '../'.
1461
1458
1462 Examples:
1459 Examples:
1463
1460
1464 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1461 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1465 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1462 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1466 >>> url('ssh://[::1]:2200//home/joe/repo')
1463 >>> url('ssh://[::1]:2200//home/joe/repo')
1467 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1464 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1468 >>> url('file:///home/joe/repo')
1465 >>> url('file:///home/joe/repo')
1469 <url scheme: 'file', path: '/home/joe/repo'>
1466 <url scheme: 'file', path: '/home/joe/repo'>
1470 >>> url('file:///c:/temp/foo/')
1467 >>> url('file:///c:/temp/foo/')
1471 <url scheme: 'file', path: 'c:/temp/foo/'>
1468 <url scheme: 'file', path: 'c:/temp/foo/'>
1472 >>> url('bundle:foo')
1469 >>> url('bundle:foo')
1473 <url scheme: 'bundle', path: 'foo'>
1470 <url scheme: 'bundle', path: 'foo'>
1474 >>> url('bundle://../foo')
1471 >>> url('bundle://../foo')
1475 <url scheme: 'bundle', path: '../foo'>
1472 <url scheme: 'bundle', path: '../foo'>
1476 >>> url(r'c:\foo\bar')
1473 >>> url(r'c:\foo\bar')
1477 <url path: 'c:\\foo\\bar'>
1474 <url path: 'c:\\foo\\bar'>
1478 >>> url(r'\\blah\blah\blah')
1475 >>> url(r'\\blah\blah\blah')
1479 <url path: '\\\\blah\\blah\\blah'>
1476 <url path: '\\\\blah\\blah\\blah'>
1480 >>> url(r'\\blah\blah\blah#baz')
1477 >>> url(r'\\blah\blah\blah#baz')
1481 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1478 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1482
1479
1483 Authentication credentials:
1480 Authentication credentials:
1484
1481
1485 >>> url('ssh://joe:xyz@x/repo')
1482 >>> url('ssh://joe:xyz@x/repo')
1486 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1483 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1487 >>> url('ssh://joe@x/repo')
1484 >>> url('ssh://joe@x/repo')
1488 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1485 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1489
1486
1490 Query strings and fragments:
1487 Query strings and fragments:
1491
1488
1492 >>> url('http://host/a?b#c')
1489 >>> url('http://host/a?b#c')
1493 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1490 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1494 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1491 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1495 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1492 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1496 """
1493 """
1497
1494
1498 _safechars = "!~*'()+"
1495 _safechars = "!~*'()+"
1499 _safepchars = "/!~*'()+:"
1496 _safepchars = "/!~*'()+:"
1500 _matchscheme = re.compile(r'^[a-zA-Z0-9+.\-]+:').match
1497 _matchscheme = re.compile(r'^[a-zA-Z0-9+.\-]+:').match
1501
1498
1502 def __init__(self, path, parsequery=True, parsefragment=True):
1499 def __init__(self, path, parsequery=True, parsefragment=True):
1503 # We slowly chomp away at path until we have only the path left
1500 # We slowly chomp away at path until we have only the path left
1504 self.scheme = self.user = self.passwd = self.host = None
1501 self.scheme = self.user = self.passwd = self.host = None
1505 self.port = self.path = self.query = self.fragment = None
1502 self.port = self.path = self.query = self.fragment = None
1506 self._localpath = True
1503 self._localpath = True
1507 self._hostport = ''
1504 self._hostport = ''
1508 self._origpath = path
1505 self._origpath = path
1509
1506
1510 if parsefragment and '#' in path:
1507 if parsefragment and '#' in path:
1511 path, self.fragment = path.split('#', 1)
1508 path, self.fragment = path.split('#', 1)
1512 if not path:
1509 if not path:
1513 path = None
1510 path = None
1514
1511
1515 # special case for Windows drive letters and UNC paths
1512 # special case for Windows drive letters and UNC paths
1516 if hasdriveletter(path) or path.startswith(r'\\'):
1513 if hasdriveletter(path) or path.startswith(r'\\'):
1517 self.path = path
1514 self.path = path
1518 return
1515 return
1519
1516
1520 # For compatibility reasons, we can't handle bundle paths as
1517 # For compatibility reasons, we can't handle bundle paths as
1521 # normal URLS
1518 # normal URLS
1522 if path.startswith('bundle:'):
1519 if path.startswith('bundle:'):
1523 self.scheme = 'bundle'
1520 self.scheme = 'bundle'
1524 path = path[7:]
1521 path = path[7:]
1525 if path.startswith('//'):
1522 if path.startswith('//'):
1526 path = path[2:]
1523 path = path[2:]
1527 self.path = path
1524 self.path = path
1528 return
1525 return
1529
1526
1530 if self._matchscheme(path):
1527 if self._matchscheme(path):
1531 parts = path.split(':', 1)
1528 parts = path.split(':', 1)
1532 if parts[0]:
1529 if parts[0]:
1533 self.scheme, path = parts
1530 self.scheme, path = parts
1534 self._localpath = False
1531 self._localpath = False
1535
1532
1536 if not path:
1533 if not path:
1537 path = None
1534 path = None
1538 if self._localpath:
1535 if self._localpath:
1539 self.path = ''
1536 self.path = ''
1540 return
1537 return
1541 else:
1538 else:
1542 if self._localpath:
1539 if self._localpath:
1543 self.path = path
1540 self.path = path
1544 return
1541 return
1545
1542
1546 if parsequery and '?' in path:
1543 if parsequery and '?' in path:
1547 path, self.query = path.split('?', 1)
1544 path, self.query = path.split('?', 1)
1548 if not path:
1545 if not path:
1549 path = None
1546 path = None
1550 if not self.query:
1547 if not self.query:
1551 self.query = None
1548 self.query = None
1552
1549
1553 # // is required to specify a host/authority
1550 # // is required to specify a host/authority
1554 if path and path.startswith('//'):
1551 if path and path.startswith('//'):
1555 parts = path[2:].split('/', 1)
1552 parts = path[2:].split('/', 1)
1556 if len(parts) > 1:
1553 if len(parts) > 1:
1557 self.host, path = parts
1554 self.host, path = parts
1558 path = path
1555 path = path
1559 else:
1556 else:
1560 self.host = parts[0]
1557 self.host = parts[0]
1561 path = None
1558 path = None
1562 if not self.host:
1559 if not self.host:
1563 self.host = None
1560 self.host = None
1564 # path of file:///d is /d
1561 # path of file:///d is /d
1565 # path of file:///d:/ is d:/, not /d:/
1562 # path of file:///d:/ is d:/, not /d:/
1566 if path and not hasdriveletter(path):
1563 if path and not hasdriveletter(path):
1567 path = '/' + path
1564 path = '/' + path
1568
1565
1569 if self.host and '@' in self.host:
1566 if self.host and '@' in self.host:
1570 self.user, self.host = self.host.rsplit('@', 1)
1567 self.user, self.host = self.host.rsplit('@', 1)
1571 if ':' in self.user:
1568 if ':' in self.user:
1572 self.user, self.passwd = self.user.split(':', 1)
1569 self.user, self.passwd = self.user.split(':', 1)
1573 if not self.host:
1570 if not self.host:
1574 self.host = None
1571 self.host = None
1575
1572
1576 # Don't split on colons in IPv6 addresses without ports
1573 # Don't split on colons in IPv6 addresses without ports
1577 if (self.host and ':' in self.host and
1574 if (self.host and ':' in self.host and
1578 not (self.host.startswith('[') and self.host.endswith(']'))):
1575 not (self.host.startswith('[') and self.host.endswith(']'))):
1579 self._hostport = self.host
1576 self._hostport = self.host
1580 self.host, self.port = self.host.rsplit(':', 1)
1577 self.host, self.port = self.host.rsplit(':', 1)
1581 if not self.host:
1578 if not self.host:
1582 self.host = None
1579 self.host = None
1583
1580
1584 if (self.host and self.scheme == 'file' and
1581 if (self.host and self.scheme == 'file' and
1585 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1582 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1586 raise Abort(_('file:// URLs can only refer to localhost'))
1583 raise Abort(_('file:// URLs can only refer to localhost'))
1587
1584
1588 self.path = path
1585 self.path = path
1589
1586
1590 # leave the query string escaped
1587 # leave the query string escaped
1591 for a in ('user', 'passwd', 'host', 'port',
1588 for a in ('user', 'passwd', 'host', 'port',
1592 'path', 'fragment'):
1589 'path', 'fragment'):
1593 v = getattr(self, a)
1590 v = getattr(self, a)
1594 if v is not None:
1591 if v is not None:
1595 setattr(self, a, _urlunquote(v))
1592 setattr(self, a, _urlunquote(v))
1596
1593
1597 def __repr__(self):
1594 def __repr__(self):
1598 attrs = []
1595 attrs = []
1599 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1596 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1600 'query', 'fragment'):
1597 'query', 'fragment'):
1601 v = getattr(self, a)
1598 v = getattr(self, a)
1602 if v is not None:
1599 if v is not None:
1603 attrs.append('%s: %r' % (a, v))
1600 attrs.append('%s: %r' % (a, v))
1604 return '<url %s>' % ', '.join(attrs)
1601 return '<url %s>' % ', '.join(attrs)
1605
1602
1606 def __str__(self):
1603 def __str__(self):
1607 r"""Join the URL's components back into a URL string.
1604 r"""Join the URL's components back into a URL string.
1608
1605
1609 Examples:
1606 Examples:
1610
1607
1611 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1608 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1612 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1609 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1613 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1610 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1614 'http://user:pw@host:80/?foo=bar&baz=42'
1611 'http://user:pw@host:80/?foo=bar&baz=42'
1615 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1612 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1616 'http://user:pw@host:80/?foo=bar%3dbaz'
1613 'http://user:pw@host:80/?foo=bar%3dbaz'
1617 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1614 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1618 'ssh://user:pw@[::1]:2200//home/joe#'
1615 'ssh://user:pw@[::1]:2200//home/joe#'
1619 >>> str(url('http://localhost:80//'))
1616 >>> str(url('http://localhost:80//'))
1620 'http://localhost:80//'
1617 'http://localhost:80//'
1621 >>> str(url('http://localhost:80/'))
1618 >>> str(url('http://localhost:80/'))
1622 'http://localhost:80/'
1619 'http://localhost:80/'
1623 >>> str(url('http://localhost:80'))
1620 >>> str(url('http://localhost:80'))
1624 'http://localhost:80/'
1621 'http://localhost:80/'
1625 >>> str(url('bundle:foo'))
1622 >>> str(url('bundle:foo'))
1626 'bundle:foo'
1623 'bundle:foo'
1627 >>> str(url('bundle://../foo'))
1624 >>> str(url('bundle://../foo'))
1628 'bundle:../foo'
1625 'bundle:../foo'
1629 >>> str(url('path'))
1626 >>> str(url('path'))
1630 'path'
1627 'path'
1631 >>> str(url('file:///tmp/foo/bar'))
1628 >>> str(url('file:///tmp/foo/bar'))
1632 'file:///tmp/foo/bar'
1629 'file:///tmp/foo/bar'
1633 >>> str(url('file:///c:/tmp/foo/bar'))
1630 >>> str(url('file:///c:/tmp/foo/bar'))
1634 'file:///c:/tmp/foo/bar'
1631 'file:///c:/tmp/foo/bar'
1635 >>> print url(r'bundle:foo\bar')
1632 >>> print url(r'bundle:foo\bar')
1636 bundle:foo\bar
1633 bundle:foo\bar
1637 """
1634 """
1638 if self._localpath:
1635 if self._localpath:
1639 s = self.path
1636 s = self.path
1640 if self.scheme == 'bundle':
1637 if self.scheme == 'bundle':
1641 s = 'bundle:' + s
1638 s = 'bundle:' + s
1642 if self.fragment:
1639 if self.fragment:
1643 s += '#' + self.fragment
1640 s += '#' + self.fragment
1644 return s
1641 return s
1645
1642
1646 s = self.scheme + ':'
1643 s = self.scheme + ':'
1647 if self.user or self.passwd or self.host:
1644 if self.user or self.passwd or self.host:
1648 s += '//'
1645 s += '//'
1649 elif self.scheme and (not self.path or self.path.startswith('/')
1646 elif self.scheme and (not self.path or self.path.startswith('/')
1650 or hasdriveletter(self.path)):
1647 or hasdriveletter(self.path)):
1651 s += '//'
1648 s += '//'
1652 if hasdriveletter(self.path):
1649 if hasdriveletter(self.path):
1653 s += '/'
1650 s += '/'
1654 if self.user:
1651 if self.user:
1655 s += urllib.quote(self.user, safe=self._safechars)
1652 s += urllib.quote(self.user, safe=self._safechars)
1656 if self.passwd:
1653 if self.passwd:
1657 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
1654 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
1658 if self.user or self.passwd:
1655 if self.user or self.passwd:
1659 s += '@'
1656 s += '@'
1660 if self.host:
1657 if self.host:
1661 if not (self.host.startswith('[') and self.host.endswith(']')):
1658 if not (self.host.startswith('[') and self.host.endswith(']')):
1662 s += urllib.quote(self.host)
1659 s += urllib.quote(self.host)
1663 else:
1660 else:
1664 s += self.host
1661 s += self.host
1665 if self.port:
1662 if self.port:
1666 s += ':' + urllib.quote(self.port)
1663 s += ':' + urllib.quote(self.port)
1667 if self.host:
1664 if self.host:
1668 s += '/'
1665 s += '/'
1669 if self.path:
1666 if self.path:
1670 # TODO: similar to the query string, we should not unescape the
1667 # TODO: similar to the query string, we should not unescape the
1671 # path when we store it, the path might contain '%2f' = '/',
1668 # path when we store it, the path might contain '%2f' = '/',
1672 # which we should *not* escape.
1669 # which we should *not* escape.
1673 s += urllib.quote(self.path, safe=self._safepchars)
1670 s += urllib.quote(self.path, safe=self._safepchars)
1674 if self.query:
1671 if self.query:
1675 # we store the query in escaped form.
1672 # we store the query in escaped form.
1676 s += '?' + self.query
1673 s += '?' + self.query
1677 if self.fragment is not None:
1674 if self.fragment is not None:
1678 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
1675 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
1679 return s
1676 return s
1680
1677
1681 def authinfo(self):
1678 def authinfo(self):
1682 user, passwd = self.user, self.passwd
1679 user, passwd = self.user, self.passwd
1683 try:
1680 try:
1684 self.user, self.passwd = None, None
1681 self.user, self.passwd = None, None
1685 s = str(self)
1682 s = str(self)
1686 finally:
1683 finally:
1687 self.user, self.passwd = user, passwd
1684 self.user, self.passwd = user, passwd
1688 if not self.user:
1685 if not self.user:
1689 return (s, None)
1686 return (s, None)
1690 # authinfo[1] is passed to urllib2 password manager, and its
1687 # authinfo[1] is passed to urllib2 password manager, and its
1691 # URIs must not contain credentials. The host is passed in the
1688 # URIs must not contain credentials. The host is passed in the
1692 # URIs list because Python < 2.4.3 uses only that to search for
1689 # URIs list because Python < 2.4.3 uses only that to search for
1693 # a password.
1690 # a password.
1694 return (s, (None, (s, self.host),
1691 return (s, (None, (s, self.host),
1695 self.user, self.passwd or ''))
1692 self.user, self.passwd or ''))
1696
1693
1697 def isabs(self):
1694 def isabs(self):
1698 if self.scheme and self.scheme != 'file':
1695 if self.scheme and self.scheme != 'file':
1699 return True # remote URL
1696 return True # remote URL
1700 if hasdriveletter(self.path):
1697 if hasdriveletter(self.path):
1701 return True # absolute for our purposes - can't be joined()
1698 return True # absolute for our purposes - can't be joined()
1702 if self.path.startswith(r'\\'):
1699 if self.path.startswith(r'\\'):
1703 return True # Windows UNC path
1700 return True # Windows UNC path
1704 if self.path.startswith('/'):
1701 if self.path.startswith('/'):
1705 return True # POSIX-style
1702 return True # POSIX-style
1706 return False
1703 return False
1707
1704
1708 def localpath(self):
1705 def localpath(self):
1709 if self.scheme == 'file' or self.scheme == 'bundle':
1706 if self.scheme == 'file' or self.scheme == 'bundle':
1710 path = self.path or '/'
1707 path = self.path or '/'
1711 # For Windows, we need to promote hosts containing drive
1708 # For Windows, we need to promote hosts containing drive
1712 # letters to paths with drive letters.
1709 # letters to paths with drive letters.
1713 if hasdriveletter(self._hostport):
1710 if hasdriveletter(self._hostport):
1714 path = self._hostport + '/' + self.path
1711 path = self._hostport + '/' + self.path
1715 elif (self.host is not None and self.path
1712 elif (self.host is not None and self.path
1716 and not hasdriveletter(path)):
1713 and not hasdriveletter(path)):
1717 path = '/' + path
1714 path = '/' + path
1718 return path
1715 return path
1719 return self._origpath
1716 return self._origpath
1720
1717
1721 def hasscheme(path):
1718 def hasscheme(path):
1722 return bool(url(path).scheme)
1719 return bool(url(path).scheme)
1723
1720
1724 def hasdriveletter(path):
1721 def hasdriveletter(path):
1725 return path and path[1:2] == ':' and path[0:1].isalpha()
1722 return path and path[1:2] == ':' and path[0:1].isalpha()
1726
1723
1727 def urllocalpath(path):
1724 def urllocalpath(path):
1728 return url(path, parsequery=False, parsefragment=False).localpath()
1725 return url(path, parsequery=False, parsefragment=False).localpath()
1729
1726
1730 def hidepassword(u):
1727 def hidepassword(u):
1731 '''hide user credential in a url string'''
1728 '''hide user credential in a url string'''
1732 u = url(u)
1729 u = url(u)
1733 if u.passwd:
1730 if u.passwd:
1734 u.passwd = '***'
1731 u.passwd = '***'
1735 return str(u)
1732 return str(u)
1736
1733
1737 def removeauth(u):
1734 def removeauth(u):
1738 '''remove all authentication information from a url string'''
1735 '''remove all authentication information from a url string'''
1739 u = url(u)
1736 u = url(u)
1740 u.user = u.passwd = None
1737 u.user = u.passwd = None
1741 return str(u)
1738 return str(u)
1742
1739
1743 def isatty(fd):
1740 def isatty(fd):
1744 try:
1741 try:
1745 return fd.isatty()
1742 return fd.isatty()
1746 except AttributeError:
1743 except AttributeError:
1747 return False
1744 return False
General Comments 0
You need to be logged in to leave comments. Login now