##// END OF EJS Templates
i18n: fix "% inside _()" problems...
FUJIWARA Katsunori -
r20869:9658a799 stable
parent child Browse files
Show More
@@ -1,553 +1,553 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 #
2 #
3 # check-code - a style and portability checker for Mercurial
3 # check-code - a style and portability checker for Mercurial
4 #
4 #
5 # Copyright 2010 Matt Mackall <mpm@selenic.com>
5 # Copyright 2010 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """style and portability checker for Mercurial
10 """style and portability checker for Mercurial
11
11
12 when a rule triggers wrong, do one of the following (prefer one from top):
12 when a rule triggers wrong, do one of the following (prefer one from top):
13 * do the work-around the rule suggests
13 * do the work-around the rule suggests
14 * doublecheck that it is a false match
14 * doublecheck that it is a false match
15 * improve the rule pattern
15 * improve the rule pattern
16 * add an ignore pattern to the rule (3rd arg) which matches your good line
16 * add an ignore pattern to the rule (3rd arg) which matches your good line
17 (you can append a short comment and match this, like: #re-raises, # no-py24)
17 (you can append a short comment and match this, like: #re-raises, # no-py24)
18 * change the pattern to a warning and list the exception in test-check-code-hg
18 * change the pattern to a warning and list the exception in test-check-code-hg
19 * ONLY use no--check-code for skipping entire files from external sources
19 * ONLY use no--check-code for skipping entire files from external sources
20 """
20 """
21
21
22 import re, glob, os, sys
22 import re, glob, os, sys
23 import keyword
23 import keyword
24 import optparse
24 import optparse
25 try:
25 try:
26 import re2
26 import re2
27 except ImportError:
27 except ImportError:
28 re2 = None
28 re2 = None
29
29
30 def compilere(pat, multiline=False):
30 def compilere(pat, multiline=False):
31 if multiline:
31 if multiline:
32 pat = '(?m)' + pat
32 pat = '(?m)' + pat
33 if re2:
33 if re2:
34 try:
34 try:
35 return re2.compile(pat)
35 return re2.compile(pat)
36 except re2.error:
36 except re2.error:
37 pass
37 pass
38 return re.compile(pat)
38 return re.compile(pat)
39
39
40 def repquote(m):
40 def repquote(m):
41 fromc = '.:'
41 fromc = '.:'
42 tochr = 'pq'
42 tochr = 'pq'
43 def encodechr(i):
43 def encodechr(i):
44 if i > 255:
44 if i > 255:
45 return 'u'
45 return 'u'
46 c = chr(i)
46 c = chr(i)
47 if c in ' \n':
47 if c in ' \n':
48 return c
48 return c
49 if c.isalpha():
49 if c.isalpha():
50 return 'x'
50 return 'x'
51 if c.isdigit():
51 if c.isdigit():
52 return 'n'
52 return 'n'
53 try:
53 try:
54 return tochr[fromc.find(c)]
54 return tochr[fromc.find(c)]
55 except (ValueError, IndexError):
55 except (ValueError, IndexError):
56 return 'o'
56 return 'o'
57 t = m.group('text')
57 t = m.group('text')
58 tt = ''.join(encodechr(i) for i in xrange(256))
58 tt = ''.join(encodechr(i) for i in xrange(256))
59 t = t.translate(tt)
59 t = t.translate(tt)
60 return m.group('quote') + t + m.group('quote')
60 return m.group('quote') + t + m.group('quote')
61
61
62 def reppython(m):
62 def reppython(m):
63 comment = m.group('comment')
63 comment = m.group('comment')
64 if comment:
64 if comment:
65 l = len(comment.rstrip())
65 l = len(comment.rstrip())
66 return "#" * l + comment[l:]
66 return "#" * l + comment[l:]
67 return repquote(m)
67 return repquote(m)
68
68
69 def repcomment(m):
69 def repcomment(m):
70 return m.group(1) + "#" * len(m.group(2))
70 return m.group(1) + "#" * len(m.group(2))
71
71
72 def repccomment(m):
72 def repccomment(m):
73 t = re.sub(r"((?<=\n) )|\S", "x", m.group(2))
73 t = re.sub(r"((?<=\n) )|\S", "x", m.group(2))
74 return m.group(1) + t + "*/"
74 return m.group(1) + t + "*/"
75
75
76 def repcallspaces(m):
76 def repcallspaces(m):
77 t = re.sub(r"\n\s+", "\n", m.group(2))
77 t = re.sub(r"\n\s+", "\n", m.group(2))
78 return m.group(1) + t
78 return m.group(1) + t
79
79
80 def repinclude(m):
80 def repinclude(m):
81 return m.group(1) + "<foo>"
81 return m.group(1) + "<foo>"
82
82
83 def rephere(m):
83 def rephere(m):
84 t = re.sub(r"\S", "x", m.group(2))
84 t = re.sub(r"\S", "x", m.group(2))
85 return m.group(1) + t
85 return m.group(1) + t
86
86
87
87
88 testpats = [
88 testpats = [
89 [
89 [
90 (r'pushd|popd', "don't use 'pushd' or 'popd', use 'cd'"),
90 (r'pushd|popd', "don't use 'pushd' or 'popd', use 'cd'"),
91 (r'\W\$?\(\([^\)\n]*\)\)', "don't use (()) or $(()), use 'expr'"),
91 (r'\W\$?\(\([^\)\n]*\)\)', "don't use (()) or $(()), use 'expr'"),
92 (r'grep.*-q', "don't use 'grep -q', redirect to /dev/null"),
92 (r'grep.*-q', "don't use 'grep -q', redirect to /dev/null"),
93 (r'(?<!hg )grep.*-a', "don't use 'grep -a', use in-line python"),
93 (r'(?<!hg )grep.*-a', "don't use 'grep -a', use in-line python"),
94 (r'sed.*-i', "don't use 'sed -i', use a temporary file"),
94 (r'sed.*-i', "don't use 'sed -i', use a temporary file"),
95 (r'\becho\b.*\\n', "don't use 'echo \\n', use printf"),
95 (r'\becho\b.*\\n', "don't use 'echo \\n', use printf"),
96 (r'echo -n', "don't use 'echo -n', use printf"),
96 (r'echo -n', "don't use 'echo -n', use printf"),
97 (r'(^| )wc[^|]*$\n(?!.*\(re\))', "filter wc output"),
97 (r'(^| )wc[^|]*$\n(?!.*\(re\))', "filter wc output"),
98 (r'head -c', "don't use 'head -c', use 'dd'"),
98 (r'head -c', "don't use 'head -c', use 'dd'"),
99 (r'tail -n', "don't use the '-n' option to tail, just use '-<num>'"),
99 (r'tail -n', "don't use the '-n' option to tail, just use '-<num>'"),
100 (r'sha1sum', "don't use sha1sum, use $TESTDIR/md5sum.py"),
100 (r'sha1sum', "don't use sha1sum, use $TESTDIR/md5sum.py"),
101 (r'ls.*-\w*R', "don't use 'ls -R', use 'find'"),
101 (r'ls.*-\w*R', "don't use 'ls -R', use 'find'"),
102 (r'printf.*[^\\]\\([1-9]|0\d)', "don't use 'printf \NNN', use Python"),
102 (r'printf.*[^\\]\\([1-9]|0\d)', "don't use 'printf \NNN', use Python"),
103 (r'printf.*[^\\]\\x', "don't use printf \\x, use Python"),
103 (r'printf.*[^\\]\\x', "don't use printf \\x, use Python"),
104 (r'\$\(.*\)', "don't use $(expr), use `expr`"),
104 (r'\$\(.*\)', "don't use $(expr), use `expr`"),
105 (r'rm -rf \*', "don't use naked rm -rf, target a directory"),
105 (r'rm -rf \*', "don't use naked rm -rf, target a directory"),
106 (r'(^|\|\s*)grep (-\w\s+)*[^|]*[(|]\w',
106 (r'(^|\|\s*)grep (-\w\s+)*[^|]*[(|]\w',
107 "use egrep for extended grep syntax"),
107 "use egrep for extended grep syntax"),
108 (r'/bin/', "don't use explicit paths for tools"),
108 (r'/bin/', "don't use explicit paths for tools"),
109 (r'[^\n]\Z', "no trailing newline"),
109 (r'[^\n]\Z', "no trailing newline"),
110 (r'export.*=', "don't export and assign at once"),
110 (r'export.*=', "don't export and assign at once"),
111 (r'^source\b', "don't use 'source', use '.'"),
111 (r'^source\b', "don't use 'source', use '.'"),
112 (r'touch -d', "don't use 'touch -d', use 'touch -t' instead"),
112 (r'touch -d', "don't use 'touch -d', use 'touch -t' instead"),
113 (r'ls +[^|\n-]+ +-', "options to 'ls' must come before filenames"),
113 (r'ls +[^|\n-]+ +-', "options to 'ls' must come before filenames"),
114 (r'[^>\n]>\s*\$HGRCPATH', "don't overwrite $HGRCPATH, append to it"),
114 (r'[^>\n]>\s*\$HGRCPATH', "don't overwrite $HGRCPATH, append to it"),
115 (r'^stop\(\)', "don't use 'stop' as a shell function name"),
115 (r'^stop\(\)', "don't use 'stop' as a shell function name"),
116 (r'(\[|\btest\b).*-e ', "don't use 'test -e', use 'test -f'"),
116 (r'(\[|\btest\b).*-e ', "don't use 'test -e', use 'test -f'"),
117 (r'^alias\b.*=', "don't use alias, use a function"),
117 (r'^alias\b.*=', "don't use alias, use a function"),
118 (r'if\s*!', "don't use '!' to negate exit status"),
118 (r'if\s*!', "don't use '!' to negate exit status"),
119 (r'/dev/u?random', "don't use entropy, use /dev/zero"),
119 (r'/dev/u?random', "don't use entropy, use /dev/zero"),
120 (r'do\s*true;\s*done', "don't use true as loop body, use sleep 0"),
120 (r'do\s*true;\s*done', "don't use true as loop body, use sleep 0"),
121 (r'^( *)\t', "don't use tabs to indent"),
121 (r'^( *)\t', "don't use tabs to indent"),
122 (r'sed (-e )?\'(\d+|/[^/]*/)i(?!\\\n)',
122 (r'sed (-e )?\'(\d+|/[^/]*/)i(?!\\\n)',
123 "put a backslash-escaped newline after sed 'i' command"),
123 "put a backslash-escaped newline after sed 'i' command"),
124 ],
124 ],
125 # warnings
125 # warnings
126 [
126 [
127 (r'^function', "don't use 'function', use old style"),
127 (r'^function', "don't use 'function', use old style"),
128 (r'^diff.*-\w*N', "don't use 'diff -N'"),
128 (r'^diff.*-\w*N', "don't use 'diff -N'"),
129 (r'\$PWD|\${PWD}', "don't use $PWD, use `pwd`"),
129 (r'\$PWD|\${PWD}', "don't use $PWD, use `pwd`"),
130 (r'^([^"\'\n]|("[^"\n]*")|(\'[^\'\n]*\'))*\^', "^ must be quoted"),
130 (r'^([^"\'\n]|("[^"\n]*")|(\'[^\'\n]*\'))*\^', "^ must be quoted"),
131 (r'kill (`|\$\()', "don't use kill, use killdaemons.py")
131 (r'kill (`|\$\()', "don't use kill, use killdaemons.py")
132 ]
132 ]
133 ]
133 ]
134
134
135 testfilters = [
135 testfilters = [
136 (r"( *)(#([^\n]*\S)?)", repcomment),
136 (r"( *)(#([^\n]*\S)?)", repcomment),
137 (r"<<(\S+)((.|\n)*?\n\1)", rephere),
137 (r"<<(\S+)((.|\n)*?\n\1)", rephere),
138 ]
138 ]
139
139
140 winglobmsg = "use (glob) to match Windows paths too"
140 winglobmsg = "use (glob) to match Windows paths too"
141 uprefix = r"^ \$ "
141 uprefix = r"^ \$ "
142 utestpats = [
142 utestpats = [
143 [
143 [
144 (r'^(\S.*|| [$>] .*)[ \t]\n', "trailing whitespace on non-output"),
144 (r'^(\S.*|| [$>] .*)[ \t]\n', "trailing whitespace on non-output"),
145 (uprefix + r'.*\|\s*sed[^|>\n]*\n',
145 (uprefix + r'.*\|\s*sed[^|>\n]*\n',
146 "use regex test output patterns instead of sed"),
146 "use regex test output patterns instead of sed"),
147 (uprefix + r'(true|exit 0)', "explicit zero exit unnecessary"),
147 (uprefix + r'(true|exit 0)', "explicit zero exit unnecessary"),
148 (uprefix + r'.*(?<!\[)\$\?', "explicit exit code checks unnecessary"),
148 (uprefix + r'.*(?<!\[)\$\?', "explicit exit code checks unnecessary"),
149 (uprefix + r'.*\|\| echo.*(fail|error)',
149 (uprefix + r'.*\|\| echo.*(fail|error)',
150 "explicit exit code checks unnecessary"),
150 "explicit exit code checks unnecessary"),
151 (uprefix + r'set -e', "don't use set -e"),
151 (uprefix + r'set -e', "don't use set -e"),
152 (uprefix + r'(\s|fi\b|done\b)', "use > for continued lines"),
152 (uprefix + r'(\s|fi\b|done\b)', "use > for continued lines"),
153 (r'^ saved backup bundle to \$TESTTMP.*\.hg$', winglobmsg),
153 (r'^ saved backup bundle to \$TESTTMP.*\.hg$', winglobmsg),
154 (r'^ changeset .* references (corrupted|missing) \$TESTTMP/.*[^)]$',
154 (r'^ changeset .* references (corrupted|missing) \$TESTTMP/.*[^)]$',
155 winglobmsg),
155 winglobmsg),
156 (r'^ pulling from \$TESTTMP/.*[^)]$', winglobmsg,
156 (r'^ pulling from \$TESTTMP/.*[^)]$', winglobmsg,
157 '\$TESTTMP/unix-repo$'), # in test-issue1802.t which skipped on windows
157 '\$TESTTMP/unix-repo$'), # in test-issue1802.t which skipped on windows
158 (r'^ reverting .*/.*[^)]$', winglobmsg),
158 (r'^ reverting .*/.*[^)]$', winglobmsg),
159 (r'^ cloning subrepo \S+/.*[^)]$', winglobmsg),
159 (r'^ cloning subrepo \S+/.*[^)]$', winglobmsg),
160 (r'^ pushing to \$TESTTMP/.*[^)]$', winglobmsg),
160 (r'^ pushing to \$TESTTMP/.*[^)]$', winglobmsg),
161 (r'^ pushing subrepo \S+/\S+ to.*[^)]$', winglobmsg),
161 (r'^ pushing subrepo \S+/\S+ to.*[^)]$', winglobmsg),
162 (r'^ moving \S+/.*[^)]$', winglobmsg),
162 (r'^ moving \S+/.*[^)]$', winglobmsg),
163 (r'^ no changes made to subrepo since.*/.*[^)]$', winglobmsg),
163 (r'^ no changes made to subrepo since.*/.*[^)]$', winglobmsg),
164 (r'^ .*: largefile \S+ not available from file:.*/.*[^)]$', winglobmsg),
164 (r'^ .*: largefile \S+ not available from file:.*/.*[^)]$', winglobmsg),
165 ],
165 ],
166 # warnings
166 # warnings
167 [
167 [
168 (r'^ [^*?/\n]* \(glob\)$',
168 (r'^ [^*?/\n]* \(glob\)$',
169 "glob match with no glob character (?*/)"),
169 "glob match with no glob character (?*/)"),
170 ]
170 ]
171 ]
171 ]
172
172
173 for i in [0, 1]:
173 for i in [0, 1]:
174 for p, m in testpats[i]:
174 for p, m in testpats[i]:
175 if p.startswith(r'^'):
175 if p.startswith(r'^'):
176 p = r"^ [$>] (%s)" % p[1:]
176 p = r"^ [$>] (%s)" % p[1:]
177 else:
177 else:
178 p = r"^ [$>] .*(%s)" % p
178 p = r"^ [$>] .*(%s)" % p
179 utestpats[i].append((p, m))
179 utestpats[i].append((p, m))
180
180
181 utestfilters = [
181 utestfilters = [
182 (r"<<(\S+)((.|\n)*?\n > \1)", rephere),
182 (r"<<(\S+)((.|\n)*?\n > \1)", rephere),
183 (r"( *)(#([^\n]*\S)?)", repcomment),
183 (r"( *)(#([^\n]*\S)?)", repcomment),
184 ]
184 ]
185
185
186 pypats = [
186 pypats = [
187 [
187 [
188 (r'^\s*def\s*\w+\s*\(.*,\s*\(',
188 (r'^\s*def\s*\w+\s*\(.*,\s*\(',
189 "tuple parameter unpacking not available in Python 3+"),
189 "tuple parameter unpacking not available in Python 3+"),
190 (r'lambda\s*\(.*,.*\)',
190 (r'lambda\s*\(.*,.*\)',
191 "tuple parameter unpacking not available in Python 3+"),
191 "tuple parameter unpacking not available in Python 3+"),
192 (r'import (.+,[^.]+\.[^.]+|[^.]+\.[^.]+,)',
192 (r'import (.+,[^.]+\.[^.]+|[^.]+\.[^.]+,)',
193 '2to3 can\'t always rewrite "import qux, foo.bar", '
193 '2to3 can\'t always rewrite "import qux, foo.bar", '
194 'use "import foo.bar" on its own line instead.'),
194 'use "import foo.bar" on its own line instead.'),
195 (r'(?<!def)\s+(cmp)\(', "cmp is not available in Python 3+"),
195 (r'(?<!def)\s+(cmp)\(', "cmp is not available in Python 3+"),
196 (r'\breduce\s*\(.*', "reduce is not available in Python 3+"),
196 (r'\breduce\s*\(.*', "reduce is not available in Python 3+"),
197 (r'\.has_key\b', "dict.has_key is not available in Python 3+"),
197 (r'\.has_key\b', "dict.has_key is not available in Python 3+"),
198 (r'\s<>\s', '<> operator is not available in Python 3+, use !='),
198 (r'\s<>\s', '<> operator is not available in Python 3+, use !='),
199 (r'^\s*\t', "don't use tabs"),
199 (r'^\s*\t', "don't use tabs"),
200 (r'\S;\s*\n', "semicolon"),
200 (r'\S;\s*\n', "semicolon"),
201 (r'[^_]_\("[^"]+"[ \t\n]*%', "don't use % inside _()"),
201 (r'[^_]_\((?:"[^"]+"[ \t\n+]*)+%', "don't use % inside _()"),
202 (r"[^_]_\('[^']+'[ \t\n]*%", "don't use % inside _()"),
202 (r"[^_]_\((?:'[^']+'[ \t\n+]*)+%", "don't use % inside _()"),
203 (r'(\w|\)),\w', "missing whitespace after ,"),
203 (r'(\w|\)),\w', "missing whitespace after ,"),
204 (r'(\w|\))[+/*\-<>]\w', "missing whitespace in expression"),
204 (r'(\w|\))[+/*\-<>]\w', "missing whitespace in expression"),
205 (r'^\s+(\w|\.)+=\w[^,()\n]*$', "missing whitespace in assignment"),
205 (r'^\s+(\w|\.)+=\w[^,()\n]*$', "missing whitespace in assignment"),
206 (r'(\s+)try:\n((?:\n|\1\s.*\n)+?)\1except.*?:\n'
206 (r'(\s+)try:\n((?:\n|\1\s.*\n)+?)\1except.*?:\n'
207 r'((?:\n|\1\s.*\n)+?)\1finally:', 'no try/except/finally in Python 2.4'),
207 r'((?:\n|\1\s.*\n)+?)\1finally:', 'no try/except/finally in Python 2.4'),
208 (r'(?<!def)(\s+|^|\()next\(.+\)',
208 (r'(?<!def)(\s+|^|\()next\(.+\)',
209 'no next(foo) in Python 2.4 and 2.5, use foo.next() instead'),
209 'no next(foo) in Python 2.4 and 2.5, use foo.next() instead'),
210 (r'(\s+)try:\n((?:\n|\1\s.*\n)*?)\1\s*yield\b.*?'
210 (r'(\s+)try:\n((?:\n|\1\s.*\n)*?)\1\s*yield\b.*?'
211 r'((?:\n|\1\s.*\n)+?)\1finally:',
211 r'((?:\n|\1\s.*\n)+?)\1finally:',
212 'no yield inside try/finally in Python 2.4'),
212 'no yield inside try/finally in Python 2.4'),
213 (r'.{81}', "line too long"),
213 (r'.{81}', "line too long"),
214 (r' x+[xo][\'"]\n\s+[\'"]x', 'string join across lines with no space'),
214 (r' x+[xo][\'"]\n\s+[\'"]x', 'string join across lines with no space'),
215 (r'[^\n]\Z', "no trailing newline"),
215 (r'[^\n]\Z', "no trailing newline"),
216 (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"),
216 (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"),
217 # (r'^\s+[^_ \n][^_. \n]+_[^_\n]+\s*=',
217 # (r'^\s+[^_ \n][^_. \n]+_[^_\n]+\s*=',
218 # "don't use underbars in identifiers"),
218 # "don't use underbars in identifiers"),
219 (r'^\s+(self\.)?[A-za-z][a-z0-9]+[A-Z]\w* = ',
219 (r'^\s+(self\.)?[A-za-z][a-z0-9]+[A-Z]\w* = ',
220 "don't use camelcase in identifiers"),
220 "don't use camelcase in identifiers"),
221 (r'^\s*(if|while|def|class|except|try)\s[^[\n]*:\s*[^\\n]#\s]+',
221 (r'^\s*(if|while|def|class|except|try)\s[^[\n]*:\s*[^\\n]#\s]+',
222 "linebreak after :"),
222 "linebreak after :"),
223 (r'class\s[^( \n]+:', "old-style class, use class foo(object)"),
223 (r'class\s[^( \n]+:', "old-style class, use class foo(object)"),
224 (r'class\s[^( \n]+\(\):',
224 (r'class\s[^( \n]+\(\):',
225 "class foo() not available in Python 2.4, use class foo(object)"),
225 "class foo() not available in Python 2.4, use class foo(object)"),
226 (r'\b(%s)\(' % '|'.join(keyword.kwlist),
226 (r'\b(%s)\(' % '|'.join(keyword.kwlist),
227 "Python keyword is not a function"),
227 "Python keyword is not a function"),
228 (r',]', "unneeded trailing ',' in list"),
228 (r',]', "unneeded trailing ',' in list"),
229 # (r'class\s[A-Z][^\(]*\((?!Exception)',
229 # (r'class\s[A-Z][^\(]*\((?!Exception)',
230 # "don't capitalize non-exception classes"),
230 # "don't capitalize non-exception classes"),
231 # (r'in range\(', "use xrange"),
231 # (r'in range\(', "use xrange"),
232 # (r'^\s*print\s+', "avoid using print in core and extensions"),
232 # (r'^\s*print\s+', "avoid using print in core and extensions"),
233 (r'[\x80-\xff]', "non-ASCII character literal"),
233 (r'[\x80-\xff]', "non-ASCII character literal"),
234 (r'("\')\.format\(', "str.format() not available in Python 2.4"),
234 (r'("\')\.format\(', "str.format() not available in Python 2.4"),
235 (r'^\s*with\s+', "with not available in Python 2.4"),
235 (r'^\s*with\s+', "with not available in Python 2.4"),
236 (r'\.isdisjoint\(', "set.isdisjoint not available in Python 2.4"),
236 (r'\.isdisjoint\(', "set.isdisjoint not available in Python 2.4"),
237 (r'^\s*except.* as .*:', "except as not available in Python 2.4"),
237 (r'^\s*except.* as .*:', "except as not available in Python 2.4"),
238 (r'^\s*os\.path\.relpath', "relpath not available in Python 2.4"),
238 (r'^\s*os\.path\.relpath', "relpath not available in Python 2.4"),
239 (r'(?<!def)\s+(any|all|format)\(',
239 (r'(?<!def)\s+(any|all|format)\(',
240 "any/all/format not available in Python 2.4", 'no-py24'),
240 "any/all/format not available in Python 2.4", 'no-py24'),
241 (r'(?<!def)\s+(callable)\(',
241 (r'(?<!def)\s+(callable)\(',
242 "callable not available in Python 3, use getattr(f, '__call__', None)"),
242 "callable not available in Python 3, use getattr(f, '__call__', None)"),
243 (r'if\s.*\selse', "if ... else form not available in Python 2.4"),
243 (r'if\s.*\selse', "if ... else form not available in Python 2.4"),
244 (r'^\s*(%s)\s\s' % '|'.join(keyword.kwlist),
244 (r'^\s*(%s)\s\s' % '|'.join(keyword.kwlist),
245 "gratuitous whitespace after Python keyword"),
245 "gratuitous whitespace after Python keyword"),
246 (r'([\(\[][ \t]\S)|(\S[ \t][\)\]])', "gratuitous whitespace in () or []"),
246 (r'([\(\[][ \t]\S)|(\S[ \t][\)\]])', "gratuitous whitespace in () or []"),
247 # (r'\s\s=', "gratuitous whitespace before ="),
247 # (r'\s\s=', "gratuitous whitespace before ="),
248 (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S',
248 (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S',
249 "missing whitespace around operator"),
249 "missing whitespace around operator"),
250 (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\s',
250 (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\s',
251 "missing whitespace around operator"),
251 "missing whitespace around operator"),
252 (r'\s(\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S',
252 (r'\s(\+=|-=|!=|<>|<=|>=|<<=|>>=|%=)\S',
253 "missing whitespace around operator"),
253 "missing whitespace around operator"),
254 (r'[^^+=*/!<>&| %-](\s=|=\s)[^= ]',
254 (r'[^^+=*/!<>&| %-](\s=|=\s)[^= ]',
255 "wrong whitespace around ="),
255 "wrong whitespace around ="),
256 (r'\([^()]*( =[^=]|[^<>!=]= )',
256 (r'\([^()]*( =[^=]|[^<>!=]= )',
257 "no whitespace around = for named parameters"),
257 "no whitespace around = for named parameters"),
258 (r'raise Exception', "don't raise generic exceptions"),
258 (r'raise Exception', "don't raise generic exceptions"),
259 (r'raise [^,(]+, (\([^\)]+\)|[^,\(\)]+)$',
259 (r'raise [^,(]+, (\([^\)]+\)|[^,\(\)]+)$',
260 "don't use old-style two-argument raise, use Exception(message)"),
260 "don't use old-style two-argument raise, use Exception(message)"),
261 (r' is\s+(not\s+)?["\'0-9-]', "object comparison with literal"),
261 (r' is\s+(not\s+)?["\'0-9-]', "object comparison with literal"),
262 (r' [=!]=\s+(True|False|None)',
262 (r' [=!]=\s+(True|False|None)',
263 "comparison with singleton, use 'is' or 'is not' instead"),
263 "comparison with singleton, use 'is' or 'is not' instead"),
264 (r'^\s*(while|if) [01]:',
264 (r'^\s*(while|if) [01]:',
265 "use True/False for constant Boolean expression"),
265 "use True/False for constant Boolean expression"),
266 (r'(?:(?<!def)\s+|\()hasattr',
266 (r'(?:(?<!def)\s+|\()hasattr',
267 'hasattr(foo, bar) is broken, use util.safehasattr(foo, bar) instead'),
267 'hasattr(foo, bar) is broken, use util.safehasattr(foo, bar) instead'),
268 (r'opener\([^)]*\).read\(',
268 (r'opener\([^)]*\).read\(',
269 "use opener.read() instead"),
269 "use opener.read() instead"),
270 (r'BaseException', 'not in Python 2.4, use Exception'),
270 (r'BaseException', 'not in Python 2.4, use Exception'),
271 (r'os\.path\.relpath', 'os.path.relpath is not in Python 2.5'),
271 (r'os\.path\.relpath', 'os.path.relpath is not in Python 2.5'),
272 (r'opener\([^)]*\).write\(',
272 (r'opener\([^)]*\).write\(',
273 "use opener.write() instead"),
273 "use opener.write() instead"),
274 (r'[\s\(](open|file)\([^)]*\)\.read\(',
274 (r'[\s\(](open|file)\([^)]*\)\.read\(',
275 "use util.readfile() instead"),
275 "use util.readfile() instead"),
276 (r'[\s\(](open|file)\([^)]*\)\.write\(',
276 (r'[\s\(](open|file)\([^)]*\)\.write\(',
277 "use util.writefile() instead"),
277 "use util.writefile() instead"),
278 (r'^[\s\(]*(open(er)?|file)\([^)]*\)',
278 (r'^[\s\(]*(open(er)?|file)\([^)]*\)',
279 "always assign an opened file to a variable, and close it afterwards"),
279 "always assign an opened file to a variable, and close it afterwards"),
280 (r'[\s\(](open|file)\([^)]*\)\.',
280 (r'[\s\(](open|file)\([^)]*\)\.',
281 "always assign an opened file to a variable, and close it afterwards"),
281 "always assign an opened file to a variable, and close it afterwards"),
282 (r'(?i)descendent', "the proper spelling is descendAnt"),
282 (r'(?i)descendent', "the proper spelling is descendAnt"),
283 (r'\.debug\(\_', "don't mark debug messages for translation"),
283 (r'\.debug\(\_', "don't mark debug messages for translation"),
284 (r'\.strip\(\)\.split\(\)', "no need to strip before splitting"),
284 (r'\.strip\(\)\.split\(\)', "no need to strip before splitting"),
285 (r'^\s*except\s*:', "naked except clause", r'#.*re-raises'),
285 (r'^\s*except\s*:', "naked except clause", r'#.*re-raises'),
286 (r':\n( )*( ){1,3}[^ ]', "must indent 4 spaces"),
286 (r':\n( )*( ){1,3}[^ ]', "must indent 4 spaces"),
287 (r'ui\.(status|progress|write|note|warn)\([\'\"]x',
287 (r'ui\.(status|progress|write|note|warn)\([\'\"]x',
288 "missing _() in ui message (use () to hide false-positives)"),
288 "missing _() in ui message (use () to hide false-positives)"),
289 (r'release\(.*wlock, .*lock\)', "wrong lock release order"),
289 (r'release\(.*wlock, .*lock\)', "wrong lock release order"),
290 ],
290 ],
291 # warnings
291 # warnings
292 [
292 [
293 (r'(^| )pp +xxxxqq[ \n][^\n]', "add two newlines after '.. note::'"),
293 (r'(^| )pp +xxxxqq[ \n][^\n]', "add two newlines after '.. note::'"),
294 ]
294 ]
295 ]
295 ]
296
296
297 pyfilters = [
297 pyfilters = [
298 (r"""(?msx)(?P<comment>\#.*?$)|
298 (r"""(?msx)(?P<comment>\#.*?$)|
299 ((?P<quote>('''|\"\"\"|(?<!')'(?!')|(?<!")"(?!")))
299 ((?P<quote>('''|\"\"\"|(?<!')'(?!')|(?<!")"(?!")))
300 (?P<text>(([^\\]|\\.)*?))
300 (?P<text>(([^\\]|\\.)*?))
301 (?P=quote))""", reppython),
301 (?P=quote))""", reppython),
302 ]
302 ]
303
303
304 txtfilters = []
304 txtfilters = []
305
305
306 txtpats = [
306 txtpats = [
307 [
307 [
308 ('\s$', 'trailing whitespace'),
308 ('\s$', 'trailing whitespace'),
309 ],
309 ],
310 []
310 []
311 ]
311 ]
312
312
313 cpats = [
313 cpats = [
314 [
314 [
315 (r'//', "don't use //-style comments"),
315 (r'//', "don't use //-style comments"),
316 (r'^ ', "don't use spaces to indent"),
316 (r'^ ', "don't use spaces to indent"),
317 (r'\S\t', "don't use tabs except for indent"),
317 (r'\S\t', "don't use tabs except for indent"),
318 (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"),
318 (r'(\S[ \t]+|^[ \t]+)\n', "trailing whitespace"),
319 (r'.{81}', "line too long"),
319 (r'.{81}', "line too long"),
320 (r'(while|if|do|for)\(', "use space after while/if/do/for"),
320 (r'(while|if|do|for)\(', "use space after while/if/do/for"),
321 (r'return\(', "return is not a function"),
321 (r'return\(', "return is not a function"),
322 (r' ;', "no space before ;"),
322 (r' ;', "no space before ;"),
323 (r'[)][{]', "space between ) and {"),
323 (r'[)][{]', "space between ) and {"),
324 (r'\w+\* \w+', "use int *foo, not int* foo"),
324 (r'\w+\* \w+', "use int *foo, not int* foo"),
325 (r'\W\([^\)]+\) \w+', "use (int)foo, not (int) foo"),
325 (r'\W\([^\)]+\) \w+', "use (int)foo, not (int) foo"),
326 (r'\w+ (\+\+|--)', "use foo++, not foo ++"),
326 (r'\w+ (\+\+|--)', "use foo++, not foo ++"),
327 (r'\w,\w', "missing whitespace after ,"),
327 (r'\w,\w', "missing whitespace after ,"),
328 (r'^[^#]\w[+/*]\w', "missing whitespace in expression"),
328 (r'^[^#]\w[+/*]\w', "missing whitespace in expression"),
329 (r'^#\s+\w', "use #foo, not # foo"),
329 (r'^#\s+\w', "use #foo, not # foo"),
330 (r'[^\n]\Z', "no trailing newline"),
330 (r'[^\n]\Z', "no trailing newline"),
331 (r'^\s*#import\b', "use only #include in standard C code"),
331 (r'^\s*#import\b', "use only #include in standard C code"),
332 ],
332 ],
333 # warnings
333 # warnings
334 []
334 []
335 ]
335 ]
336
336
337 cfilters = [
337 cfilters = [
338 (r'(/\*)(((\*(?!/))|[^*])*)\*/', repccomment),
338 (r'(/\*)(((\*(?!/))|[^*])*)\*/', repccomment),
339 (r'''(?P<quote>(?<!")")(?P<text>([^"]|\\")+)"(?!")''', repquote),
339 (r'''(?P<quote>(?<!")")(?P<text>([^"]|\\")+)"(?!")''', repquote),
340 (r'''(#\s*include\s+<)([^>]+)>''', repinclude),
340 (r'''(#\s*include\s+<)([^>]+)>''', repinclude),
341 (r'(\()([^)]+\))', repcallspaces),
341 (r'(\()([^)]+\))', repcallspaces),
342 ]
342 ]
343
343
344 inutilpats = [
344 inutilpats = [
345 [
345 [
346 (r'\bui\.', "don't use ui in util"),
346 (r'\bui\.', "don't use ui in util"),
347 ],
347 ],
348 # warnings
348 # warnings
349 []
349 []
350 ]
350 ]
351
351
352 inrevlogpats = [
352 inrevlogpats = [
353 [
353 [
354 (r'\brepo\.', "don't use repo in revlog"),
354 (r'\brepo\.', "don't use repo in revlog"),
355 ],
355 ],
356 # warnings
356 # warnings
357 []
357 []
358 ]
358 ]
359
359
360 checks = [
360 checks = [
361 ('python', r'.*\.(py|cgi)$', pyfilters, pypats),
361 ('python', r'.*\.(py|cgi)$', pyfilters, pypats),
362 ('test script', r'(.*/)?test-[^.~]*$', testfilters, testpats),
362 ('test script', r'(.*/)?test-[^.~]*$', testfilters, testpats),
363 ('c', r'.*\.[ch]$', cfilters, cpats),
363 ('c', r'.*\.[ch]$', cfilters, cpats),
364 ('unified test', r'.*\.t$', utestfilters, utestpats),
364 ('unified test', r'.*\.t$', utestfilters, utestpats),
365 ('layering violation repo in revlog', r'mercurial/revlog\.py', pyfilters,
365 ('layering violation repo in revlog', r'mercurial/revlog\.py', pyfilters,
366 inrevlogpats),
366 inrevlogpats),
367 ('layering violation ui in util', r'mercurial/util\.py', pyfilters,
367 ('layering violation ui in util', r'mercurial/util\.py', pyfilters,
368 inutilpats),
368 inutilpats),
369 ('txt', r'.*\.txt$', txtfilters, txtpats),
369 ('txt', r'.*\.txt$', txtfilters, txtpats),
370 ]
370 ]
371
371
372 def _preparepats():
372 def _preparepats():
373 for c in checks:
373 for c in checks:
374 failandwarn = c[-1]
374 failandwarn = c[-1]
375 for pats in failandwarn:
375 for pats in failandwarn:
376 for i, pseq in enumerate(pats):
376 for i, pseq in enumerate(pats):
377 # fix-up regexes for multi-line searches
377 # fix-up regexes for multi-line searches
378 p = pseq[0]
378 p = pseq[0]
379 # \s doesn't match \n
379 # \s doesn't match \n
380 p = re.sub(r'(?<!\\)\\s', r'[ \\t]', p)
380 p = re.sub(r'(?<!\\)\\s', r'[ \\t]', p)
381 # [^...] doesn't match newline
381 # [^...] doesn't match newline
382 p = re.sub(r'(?<!\\)\[\^', r'[^\\n', p)
382 p = re.sub(r'(?<!\\)\[\^', r'[^\\n', p)
383
383
384 pats[i] = (re.compile(p, re.MULTILINE),) + pseq[1:]
384 pats[i] = (re.compile(p, re.MULTILINE),) + pseq[1:]
385 filters = c[2]
385 filters = c[2]
386 for i, flt in enumerate(filters):
386 for i, flt in enumerate(filters):
387 filters[i] = re.compile(flt[0]), flt[1]
387 filters[i] = re.compile(flt[0]), flt[1]
388 _preparepats()
388 _preparepats()
389
389
390 class norepeatlogger(object):
390 class norepeatlogger(object):
391 def __init__(self):
391 def __init__(self):
392 self._lastseen = None
392 self._lastseen = None
393
393
394 def log(self, fname, lineno, line, msg, blame):
394 def log(self, fname, lineno, line, msg, blame):
395 """print error related a to given line of a given file.
395 """print error related a to given line of a given file.
396
396
397 The faulty line will also be printed but only once in the case
397 The faulty line will also be printed but only once in the case
398 of multiple errors.
398 of multiple errors.
399
399
400 :fname: filename
400 :fname: filename
401 :lineno: line number
401 :lineno: line number
402 :line: actual content of the line
402 :line: actual content of the line
403 :msg: error message
403 :msg: error message
404 """
404 """
405 msgid = fname, lineno, line
405 msgid = fname, lineno, line
406 if msgid != self._lastseen:
406 if msgid != self._lastseen:
407 if blame:
407 if blame:
408 print "%s:%d (%s):" % (fname, lineno, blame)
408 print "%s:%d (%s):" % (fname, lineno, blame)
409 else:
409 else:
410 print "%s:%d:" % (fname, lineno)
410 print "%s:%d:" % (fname, lineno)
411 print " > %s" % line
411 print " > %s" % line
412 self._lastseen = msgid
412 self._lastseen = msgid
413 print " " + msg
413 print " " + msg
414
414
415 _defaultlogger = norepeatlogger()
415 _defaultlogger = norepeatlogger()
416
416
417 def getblame(f):
417 def getblame(f):
418 lines = []
418 lines = []
419 for l in os.popen('hg annotate -un %s' % f):
419 for l in os.popen('hg annotate -un %s' % f):
420 start, line = l.split(':', 1)
420 start, line = l.split(':', 1)
421 user, rev = start.split()
421 user, rev = start.split()
422 lines.append((line[1:-1], user, rev))
422 lines.append((line[1:-1], user, rev))
423 return lines
423 return lines
424
424
425 def checkfile(f, logfunc=_defaultlogger.log, maxerr=None, warnings=False,
425 def checkfile(f, logfunc=_defaultlogger.log, maxerr=None, warnings=False,
426 blame=False, debug=False, lineno=True):
426 blame=False, debug=False, lineno=True):
427 """checks style and portability of a given file
427 """checks style and portability of a given file
428
428
429 :f: filepath
429 :f: filepath
430 :logfunc: function used to report error
430 :logfunc: function used to report error
431 logfunc(filename, linenumber, linecontent, errormessage)
431 logfunc(filename, linenumber, linecontent, errormessage)
432 :maxerr: number of error to display before aborting.
432 :maxerr: number of error to display before aborting.
433 Set to false (default) to report all errors
433 Set to false (default) to report all errors
434
434
435 return True if no error is found, False otherwise.
435 return True if no error is found, False otherwise.
436 """
436 """
437 blamecache = None
437 blamecache = None
438 result = True
438 result = True
439 for name, match, filters, pats in checks:
439 for name, match, filters, pats in checks:
440 if debug:
440 if debug:
441 print name, f
441 print name, f
442 fc = 0
442 fc = 0
443 if not re.match(match, f):
443 if not re.match(match, f):
444 if debug:
444 if debug:
445 print "Skipping %s for %s it doesn't match %s" % (
445 print "Skipping %s for %s it doesn't match %s" % (
446 name, match, f)
446 name, match, f)
447 continue
447 continue
448 try:
448 try:
449 fp = open(f)
449 fp = open(f)
450 except IOError, e:
450 except IOError, e:
451 print "Skipping %s, %s" % (f, str(e).split(':', 1)[0])
451 print "Skipping %s, %s" % (f, str(e).split(':', 1)[0])
452 continue
452 continue
453 pre = post = fp.read()
453 pre = post = fp.read()
454 fp.close()
454 fp.close()
455 if "no-" "check-code" in pre:
455 if "no-" "check-code" in pre:
456 print "Skipping %s it has no-" "check-code" % f
456 print "Skipping %s it has no-" "check-code" % f
457 return "Skip" # skip checking this file
457 return "Skip" # skip checking this file
458 for p, r in filters:
458 for p, r in filters:
459 post = re.sub(p, r, post)
459 post = re.sub(p, r, post)
460 nerrs = len(pats[0]) # nerr elements are errors
460 nerrs = len(pats[0]) # nerr elements are errors
461 if warnings:
461 if warnings:
462 pats = pats[0] + pats[1]
462 pats = pats[0] + pats[1]
463 else:
463 else:
464 pats = pats[0]
464 pats = pats[0]
465 # print post # uncomment to show filtered version
465 # print post # uncomment to show filtered version
466
466
467 if debug:
467 if debug:
468 print "Checking %s for %s" % (name, f)
468 print "Checking %s for %s" % (name, f)
469
469
470 prelines = None
470 prelines = None
471 errors = []
471 errors = []
472 for i, pat in enumerate(pats):
472 for i, pat in enumerate(pats):
473 if len(pat) == 3:
473 if len(pat) == 3:
474 p, msg, ignore = pat
474 p, msg, ignore = pat
475 else:
475 else:
476 p, msg = pat
476 p, msg = pat
477 ignore = None
477 ignore = None
478 if i >= nerrs:
478 if i >= nerrs:
479 msg = "warning: " + msg
479 msg = "warning: " + msg
480
480
481 pos = 0
481 pos = 0
482 n = 0
482 n = 0
483 for m in p.finditer(post):
483 for m in p.finditer(post):
484 if prelines is None:
484 if prelines is None:
485 prelines = pre.splitlines()
485 prelines = pre.splitlines()
486 postlines = post.splitlines(True)
486 postlines = post.splitlines(True)
487
487
488 start = m.start()
488 start = m.start()
489 while n < len(postlines):
489 while n < len(postlines):
490 step = len(postlines[n])
490 step = len(postlines[n])
491 if pos + step > start:
491 if pos + step > start:
492 break
492 break
493 pos += step
493 pos += step
494 n += 1
494 n += 1
495 l = prelines[n]
495 l = prelines[n]
496
496
497 if ignore and re.search(ignore, l, re.MULTILINE):
497 if ignore and re.search(ignore, l, re.MULTILINE):
498 if debug:
498 if debug:
499 print "Skipping %s for %s:%s (ignore pattern)" % (
499 print "Skipping %s for %s:%s (ignore pattern)" % (
500 name, f, n)
500 name, f, n)
501 continue
501 continue
502 bd = ""
502 bd = ""
503 if blame:
503 if blame:
504 bd = 'working directory'
504 bd = 'working directory'
505 if not blamecache:
505 if not blamecache:
506 blamecache = getblame(f)
506 blamecache = getblame(f)
507 if n < len(blamecache):
507 if n < len(blamecache):
508 bl, bu, br = blamecache[n]
508 bl, bu, br = blamecache[n]
509 if bl == l:
509 if bl == l:
510 bd = '%s@%s' % (bu, br)
510 bd = '%s@%s' % (bu, br)
511
511
512 errors.append((f, lineno and n + 1, l, msg, bd))
512 errors.append((f, lineno and n + 1, l, msg, bd))
513 result = False
513 result = False
514
514
515 errors.sort()
515 errors.sort()
516 for e in errors:
516 for e in errors:
517 logfunc(*e)
517 logfunc(*e)
518 fc += 1
518 fc += 1
519 if maxerr and fc >= maxerr:
519 if maxerr and fc >= maxerr:
520 print " (too many errors, giving up)"
520 print " (too many errors, giving up)"
521 break
521 break
522
522
523 return result
523 return result
524
524
525 if __name__ == "__main__":
525 if __name__ == "__main__":
526 parser = optparse.OptionParser("%prog [options] [files]")
526 parser = optparse.OptionParser("%prog [options] [files]")
527 parser.add_option("-w", "--warnings", action="store_true",
527 parser.add_option("-w", "--warnings", action="store_true",
528 help="include warning-level checks")
528 help="include warning-level checks")
529 parser.add_option("-p", "--per-file", type="int",
529 parser.add_option("-p", "--per-file", type="int",
530 help="max warnings per file")
530 help="max warnings per file")
531 parser.add_option("-b", "--blame", action="store_true",
531 parser.add_option("-b", "--blame", action="store_true",
532 help="use annotate to generate blame info")
532 help="use annotate to generate blame info")
533 parser.add_option("", "--debug", action="store_true",
533 parser.add_option("", "--debug", action="store_true",
534 help="show debug information")
534 help="show debug information")
535 parser.add_option("", "--nolineno", action="store_false",
535 parser.add_option("", "--nolineno", action="store_false",
536 dest='lineno', help="don't show line numbers")
536 dest='lineno', help="don't show line numbers")
537
537
538 parser.set_defaults(per_file=15, warnings=False, blame=False, debug=False,
538 parser.set_defaults(per_file=15, warnings=False, blame=False, debug=False,
539 lineno=True)
539 lineno=True)
540 (options, args) = parser.parse_args()
540 (options, args) = parser.parse_args()
541
541
542 if len(args) == 0:
542 if len(args) == 0:
543 check = glob.glob("*")
543 check = glob.glob("*")
544 else:
544 else:
545 check = args
545 check = args
546
546
547 ret = 0
547 ret = 0
548 for f in check:
548 for f in check:
549 if not checkfile(f, maxerr=options.per_file, warnings=options.warnings,
549 if not checkfile(f, maxerr=options.per_file, warnings=options.warnings,
550 blame=options.blame, debug=options.debug,
550 blame=options.blame, debug=options.debug,
551 lineno=options.lineno):
551 lineno=options.lineno):
552 ret = 1
552 ret = 1
553 sys.exit(ret)
553 sys.exit(ret)
@@ -1,350 +1,350 b''
1 """automatically manage newlines in repository files
1 """automatically manage newlines in repository files
2
2
3 This extension allows you to manage the type of line endings (CRLF or
3 This extension allows you to manage the type of line endings (CRLF or
4 LF) that are used in the repository and in the local working
4 LF) that are used in the repository and in the local working
5 directory. That way you can get CRLF line endings on Windows and LF on
5 directory. That way you can get CRLF line endings on Windows and LF on
6 Unix/Mac, thereby letting everybody use their OS native line endings.
6 Unix/Mac, thereby letting everybody use their OS native line endings.
7
7
8 The extension reads its configuration from a versioned ``.hgeol``
8 The extension reads its configuration from a versioned ``.hgeol``
9 configuration file found in the root of the working copy. The
9 configuration file found in the root of the working copy. The
10 ``.hgeol`` file use the same syntax as all other Mercurial
10 ``.hgeol`` file use the same syntax as all other Mercurial
11 configuration files. It uses two sections, ``[patterns]`` and
11 configuration files. It uses two sections, ``[patterns]`` and
12 ``[repository]``.
12 ``[repository]``.
13
13
14 The ``[patterns]`` section specifies how line endings should be
14 The ``[patterns]`` section specifies how line endings should be
15 converted between the working copy and the repository. The format is
15 converted between the working copy and the repository. The format is
16 specified by a file pattern. The first match is used, so put more
16 specified by a file pattern. The first match is used, so put more
17 specific patterns first. The available line endings are ``LF``,
17 specific patterns first. The available line endings are ``LF``,
18 ``CRLF``, and ``BIN``.
18 ``CRLF``, and ``BIN``.
19
19
20 Files with the declared format of ``CRLF`` or ``LF`` are always
20 Files with the declared format of ``CRLF`` or ``LF`` are always
21 checked out and stored in the repository in that format and files
21 checked out and stored in the repository in that format and files
22 declared to be binary (``BIN``) are left unchanged. Additionally,
22 declared to be binary (``BIN``) are left unchanged. Additionally,
23 ``native`` is an alias for checking out in the platform's default line
23 ``native`` is an alias for checking out in the platform's default line
24 ending: ``LF`` on Unix (including Mac OS X) and ``CRLF`` on
24 ending: ``LF`` on Unix (including Mac OS X) and ``CRLF`` on
25 Windows. Note that ``BIN`` (do nothing to line endings) is Mercurial's
25 Windows. Note that ``BIN`` (do nothing to line endings) is Mercurial's
26 default behaviour; it is only needed if you need to override a later,
26 default behaviour; it is only needed if you need to override a later,
27 more general pattern.
27 more general pattern.
28
28
29 The optional ``[repository]`` section specifies the line endings to
29 The optional ``[repository]`` section specifies the line endings to
30 use for files stored in the repository. It has a single setting,
30 use for files stored in the repository. It has a single setting,
31 ``native``, which determines the storage line endings for files
31 ``native``, which determines the storage line endings for files
32 declared as ``native`` in the ``[patterns]`` section. It can be set to
32 declared as ``native`` in the ``[patterns]`` section. It can be set to
33 ``LF`` or ``CRLF``. The default is ``LF``. For example, this means
33 ``LF`` or ``CRLF``. The default is ``LF``. For example, this means
34 that on Windows, files configured as ``native`` (``CRLF`` by default)
34 that on Windows, files configured as ``native`` (``CRLF`` by default)
35 will be converted to ``LF`` when stored in the repository. Files
35 will be converted to ``LF`` when stored in the repository. Files
36 declared as ``LF``, ``CRLF``, or ``BIN`` in the ``[patterns]`` section
36 declared as ``LF``, ``CRLF``, or ``BIN`` in the ``[patterns]`` section
37 are always stored as-is in the repository.
37 are always stored as-is in the repository.
38
38
39 Example versioned ``.hgeol`` file::
39 Example versioned ``.hgeol`` file::
40
40
41 [patterns]
41 [patterns]
42 **.py = native
42 **.py = native
43 **.vcproj = CRLF
43 **.vcproj = CRLF
44 **.txt = native
44 **.txt = native
45 Makefile = LF
45 Makefile = LF
46 **.jpg = BIN
46 **.jpg = BIN
47
47
48 [repository]
48 [repository]
49 native = LF
49 native = LF
50
50
51 .. note::
51 .. note::
52
52
53 The rules will first apply when files are touched in the working
53 The rules will first apply when files are touched in the working
54 copy, e.g. by updating to null and back to tip to touch all files.
54 copy, e.g. by updating to null and back to tip to touch all files.
55
55
56 The extension uses an optional ``[eol]`` section read from both the
56 The extension uses an optional ``[eol]`` section read from both the
57 normal Mercurial configuration files and the ``.hgeol`` file, with the
57 normal Mercurial configuration files and the ``.hgeol`` file, with the
58 latter overriding the former. You can use that section to control the
58 latter overriding the former. You can use that section to control the
59 overall behavior. There are three settings:
59 overall behavior. There are three settings:
60
60
61 - ``eol.native`` (default ``os.linesep``) can be set to ``LF`` or
61 - ``eol.native`` (default ``os.linesep``) can be set to ``LF`` or
62 ``CRLF`` to override the default interpretation of ``native`` for
62 ``CRLF`` to override the default interpretation of ``native`` for
63 checkout. This can be used with :hg:`archive` on Unix, say, to
63 checkout. This can be used with :hg:`archive` on Unix, say, to
64 generate an archive where files have line endings for Windows.
64 generate an archive where files have line endings for Windows.
65
65
66 - ``eol.only-consistent`` (default True) can be set to False to make
66 - ``eol.only-consistent`` (default True) can be set to False to make
67 the extension convert files with inconsistent EOLs. Inconsistent
67 the extension convert files with inconsistent EOLs. Inconsistent
68 means that there is both ``CRLF`` and ``LF`` present in the file.
68 means that there is both ``CRLF`` and ``LF`` present in the file.
69 Such files are normally not touched under the assumption that they
69 Such files are normally not touched under the assumption that they
70 have mixed EOLs on purpose.
70 have mixed EOLs on purpose.
71
71
72 - ``eol.fix-trailing-newline`` (default False) can be set to True to
72 - ``eol.fix-trailing-newline`` (default False) can be set to True to
73 ensure that converted files end with a EOL character (either ``\\n``
73 ensure that converted files end with a EOL character (either ``\\n``
74 or ``\\r\\n`` as per the configured patterns).
74 or ``\\r\\n`` as per the configured patterns).
75
75
76 The extension provides ``cleverencode:`` and ``cleverdecode:`` filters
76 The extension provides ``cleverencode:`` and ``cleverdecode:`` filters
77 like the deprecated win32text extension does. This means that you can
77 like the deprecated win32text extension does. This means that you can
78 disable win32text and enable eol and your filters will still work. You
78 disable win32text and enable eol and your filters will still work. You
79 only need to these filters until you have prepared a ``.hgeol`` file.
79 only need to these filters until you have prepared a ``.hgeol`` file.
80
80
81 The ``win32text.forbid*`` hooks provided by the win32text extension
81 The ``win32text.forbid*`` hooks provided by the win32text extension
82 have been unified into a single hook named ``eol.checkheadshook``. The
82 have been unified into a single hook named ``eol.checkheadshook``. The
83 hook will lookup the expected line endings from the ``.hgeol`` file,
83 hook will lookup the expected line endings from the ``.hgeol`` file,
84 which means you must migrate to a ``.hgeol`` file first before using
84 which means you must migrate to a ``.hgeol`` file first before using
85 the hook. ``eol.checkheadshook`` only checks heads, intermediate
85 the hook. ``eol.checkheadshook`` only checks heads, intermediate
86 invalid revisions will be pushed. To forbid them completely, use the
86 invalid revisions will be pushed. To forbid them completely, use the
87 ``eol.checkallhook`` hook. These hooks are best used as
87 ``eol.checkallhook`` hook. These hooks are best used as
88 ``pretxnchangegroup`` hooks.
88 ``pretxnchangegroup`` hooks.
89
89
90 See :hg:`help patterns` for more information about the glob patterns
90 See :hg:`help patterns` for more information about the glob patterns
91 used.
91 used.
92 """
92 """
93
93
94 from mercurial.i18n import _
94 from mercurial.i18n import _
95 from mercurial import util, config, extensions, match, error
95 from mercurial import util, config, extensions, match, error
96 import re, os
96 import re, os
97
97
98 testedwith = 'internal'
98 testedwith = 'internal'
99
99
100 # Matches a lone LF, i.e., one that is not part of CRLF.
100 # Matches a lone LF, i.e., one that is not part of CRLF.
101 singlelf = re.compile('(^|[^\r])\n')
101 singlelf = re.compile('(^|[^\r])\n')
102 # Matches a single EOL which can either be a CRLF where repeated CR
102 # Matches a single EOL which can either be a CRLF where repeated CR
103 # are removed or a LF. We do not care about old Macintosh files, so a
103 # are removed or a LF. We do not care about old Macintosh files, so a
104 # stray CR is an error.
104 # stray CR is an error.
105 eolre = re.compile('\r*\n')
105 eolre = re.compile('\r*\n')
106
106
107
107
108 def inconsistenteol(data):
108 def inconsistenteol(data):
109 return '\r\n' in data and singlelf.search(data)
109 return '\r\n' in data and singlelf.search(data)
110
110
111 def tolf(s, params, ui, **kwargs):
111 def tolf(s, params, ui, **kwargs):
112 """Filter to convert to LF EOLs."""
112 """Filter to convert to LF EOLs."""
113 if util.binary(s):
113 if util.binary(s):
114 return s
114 return s
115 if ui.configbool('eol', 'only-consistent', True) and inconsistenteol(s):
115 if ui.configbool('eol', 'only-consistent', True) and inconsistenteol(s):
116 return s
116 return s
117 if (ui.configbool('eol', 'fix-trailing-newline', False)
117 if (ui.configbool('eol', 'fix-trailing-newline', False)
118 and s and s[-1] != '\n'):
118 and s and s[-1] != '\n'):
119 s = s + '\n'
119 s = s + '\n'
120 return eolre.sub('\n', s)
120 return eolre.sub('\n', s)
121
121
122 def tocrlf(s, params, ui, **kwargs):
122 def tocrlf(s, params, ui, **kwargs):
123 """Filter to convert to CRLF EOLs."""
123 """Filter to convert to CRLF EOLs."""
124 if util.binary(s):
124 if util.binary(s):
125 return s
125 return s
126 if ui.configbool('eol', 'only-consistent', True) and inconsistenteol(s):
126 if ui.configbool('eol', 'only-consistent', True) and inconsistenteol(s):
127 return s
127 return s
128 if (ui.configbool('eol', 'fix-trailing-newline', False)
128 if (ui.configbool('eol', 'fix-trailing-newline', False)
129 and s and s[-1] != '\n'):
129 and s and s[-1] != '\n'):
130 s = s + '\n'
130 s = s + '\n'
131 return eolre.sub('\r\n', s)
131 return eolre.sub('\r\n', s)
132
132
133 def isbinary(s, params):
133 def isbinary(s, params):
134 """Filter to do nothing with the file."""
134 """Filter to do nothing with the file."""
135 return s
135 return s
136
136
137 filters = {
137 filters = {
138 'to-lf': tolf,
138 'to-lf': tolf,
139 'to-crlf': tocrlf,
139 'to-crlf': tocrlf,
140 'is-binary': isbinary,
140 'is-binary': isbinary,
141 # The following provide backwards compatibility with win32text
141 # The following provide backwards compatibility with win32text
142 'cleverencode:': tolf,
142 'cleverencode:': tolf,
143 'cleverdecode:': tocrlf
143 'cleverdecode:': tocrlf
144 }
144 }
145
145
146 class eolfile(object):
146 class eolfile(object):
147 def __init__(self, ui, root, data):
147 def __init__(self, ui, root, data):
148 self._decode = {'LF': 'to-lf', 'CRLF': 'to-crlf', 'BIN': 'is-binary'}
148 self._decode = {'LF': 'to-lf', 'CRLF': 'to-crlf', 'BIN': 'is-binary'}
149 self._encode = {'LF': 'to-lf', 'CRLF': 'to-crlf', 'BIN': 'is-binary'}
149 self._encode = {'LF': 'to-lf', 'CRLF': 'to-crlf', 'BIN': 'is-binary'}
150
150
151 self.cfg = config.config()
151 self.cfg = config.config()
152 # Our files should not be touched. The pattern must be
152 # Our files should not be touched. The pattern must be
153 # inserted first override a '** = native' pattern.
153 # inserted first override a '** = native' pattern.
154 self.cfg.set('patterns', '.hg*', 'BIN')
154 self.cfg.set('patterns', '.hg*', 'BIN')
155 # We can then parse the user's patterns.
155 # We can then parse the user's patterns.
156 self.cfg.parse('.hgeol', data)
156 self.cfg.parse('.hgeol', data)
157
157
158 isrepolf = self.cfg.get('repository', 'native') != 'CRLF'
158 isrepolf = self.cfg.get('repository', 'native') != 'CRLF'
159 self._encode['NATIVE'] = isrepolf and 'to-lf' or 'to-crlf'
159 self._encode['NATIVE'] = isrepolf and 'to-lf' or 'to-crlf'
160 iswdlf = ui.config('eol', 'native', os.linesep) in ('LF', '\n')
160 iswdlf = ui.config('eol', 'native', os.linesep) in ('LF', '\n')
161 self._decode['NATIVE'] = iswdlf and 'to-lf' or 'to-crlf'
161 self._decode['NATIVE'] = iswdlf and 'to-lf' or 'to-crlf'
162
162
163 include = []
163 include = []
164 exclude = []
164 exclude = []
165 for pattern, style in self.cfg.items('patterns'):
165 for pattern, style in self.cfg.items('patterns'):
166 key = style.upper()
166 key = style.upper()
167 if key == 'BIN':
167 if key == 'BIN':
168 exclude.append(pattern)
168 exclude.append(pattern)
169 else:
169 else:
170 include.append(pattern)
170 include.append(pattern)
171 # This will match the files for which we need to care
171 # This will match the files for which we need to care
172 # about inconsistent newlines.
172 # about inconsistent newlines.
173 self.match = match.match(root, '', [], include, exclude)
173 self.match = match.match(root, '', [], include, exclude)
174
174
175 def copytoui(self, ui):
175 def copytoui(self, ui):
176 for pattern, style in self.cfg.items('patterns'):
176 for pattern, style in self.cfg.items('patterns'):
177 key = style.upper()
177 key = style.upper()
178 try:
178 try:
179 ui.setconfig('decode', pattern, self._decode[key])
179 ui.setconfig('decode', pattern, self._decode[key])
180 ui.setconfig('encode', pattern, self._encode[key])
180 ui.setconfig('encode', pattern, self._encode[key])
181 except KeyError:
181 except KeyError:
182 ui.warn(_("ignoring unknown EOL style '%s' from %s\n")
182 ui.warn(_("ignoring unknown EOL style '%s' from %s\n")
183 % (style, self.cfg.source('patterns', pattern)))
183 % (style, self.cfg.source('patterns', pattern)))
184 # eol.only-consistent can be specified in ~/.hgrc or .hgeol
184 # eol.only-consistent can be specified in ~/.hgrc or .hgeol
185 for k, v in self.cfg.items('eol'):
185 for k, v in self.cfg.items('eol'):
186 ui.setconfig('eol', k, v)
186 ui.setconfig('eol', k, v)
187
187
188 def checkrev(self, repo, ctx, files):
188 def checkrev(self, repo, ctx, files):
189 failed = []
189 failed = []
190 for f in (files or ctx.files()):
190 for f in (files or ctx.files()):
191 if f not in ctx:
191 if f not in ctx:
192 continue
192 continue
193 for pattern, style in self.cfg.items('patterns'):
193 for pattern, style in self.cfg.items('patterns'):
194 if not match.match(repo.root, '', [pattern])(f):
194 if not match.match(repo.root, '', [pattern])(f):
195 continue
195 continue
196 target = self._encode[style.upper()]
196 target = self._encode[style.upper()]
197 data = ctx[f].data()
197 data = ctx[f].data()
198 if (target == "to-lf" and "\r\n" in data
198 if (target == "to-lf" and "\r\n" in data
199 or target == "to-crlf" and singlelf.search(data)):
199 or target == "to-crlf" and singlelf.search(data)):
200 failed.append((str(ctx), target, f))
200 failed.append((str(ctx), target, f))
201 break
201 break
202 return failed
202 return failed
203
203
204 def parseeol(ui, repo, nodes):
204 def parseeol(ui, repo, nodes):
205 try:
205 try:
206 for node in nodes:
206 for node in nodes:
207 try:
207 try:
208 if node is None:
208 if node is None:
209 # Cannot use workingctx.data() since it would load
209 # Cannot use workingctx.data() since it would load
210 # and cache the filters before we configure them.
210 # and cache the filters before we configure them.
211 data = repo.wfile('.hgeol').read()
211 data = repo.wfile('.hgeol').read()
212 else:
212 else:
213 data = repo[node]['.hgeol'].data()
213 data = repo[node]['.hgeol'].data()
214 return eolfile(ui, repo.root, data)
214 return eolfile(ui, repo.root, data)
215 except (IOError, LookupError):
215 except (IOError, LookupError):
216 pass
216 pass
217 except error.ParseError, inst:
217 except error.ParseError, inst:
218 ui.warn(_("warning: ignoring .hgeol file due to parse error "
218 ui.warn(_("warning: ignoring .hgeol file due to parse error "
219 "at %s: %s\n") % (inst.args[1], inst.args[0]))
219 "at %s: %s\n") % (inst.args[1], inst.args[0]))
220 return None
220 return None
221
221
222 def _checkhook(ui, repo, node, headsonly):
222 def _checkhook(ui, repo, node, headsonly):
223 # Get revisions to check and touched files at the same time
223 # Get revisions to check and touched files at the same time
224 files = set()
224 files = set()
225 revs = set()
225 revs = set()
226 for rev in xrange(repo[node].rev(), len(repo)):
226 for rev in xrange(repo[node].rev(), len(repo)):
227 revs.add(rev)
227 revs.add(rev)
228 if headsonly:
228 if headsonly:
229 ctx = repo[rev]
229 ctx = repo[rev]
230 files.update(ctx.files())
230 files.update(ctx.files())
231 for pctx in ctx.parents():
231 for pctx in ctx.parents():
232 revs.discard(pctx.rev())
232 revs.discard(pctx.rev())
233 failed = []
233 failed = []
234 for rev in revs:
234 for rev in revs:
235 ctx = repo[rev]
235 ctx = repo[rev]
236 eol = parseeol(ui, repo, [ctx.node()])
236 eol = parseeol(ui, repo, [ctx.node()])
237 if eol:
237 if eol:
238 failed.extend(eol.checkrev(repo, ctx, files))
238 failed.extend(eol.checkrev(repo, ctx, files))
239
239
240 if failed:
240 if failed:
241 eols = {'to-lf': 'CRLF', 'to-crlf': 'LF'}
241 eols = {'to-lf': 'CRLF', 'to-crlf': 'LF'}
242 msgs = []
242 msgs = []
243 for node, target, f in failed:
243 for node, target, f in failed:
244 msgs.append(_(" %s in %s should not have %s line endings") %
244 msgs.append(_(" %s in %s should not have %s line endings") %
245 (f, node, eols[target]))
245 (f, node, eols[target]))
246 raise util.Abort(_("end-of-line check failed:\n") + "\n".join(msgs))
246 raise util.Abort(_("end-of-line check failed:\n") + "\n".join(msgs))
247
247
248 def checkallhook(ui, repo, node, hooktype, **kwargs):
248 def checkallhook(ui, repo, node, hooktype, **kwargs):
249 """verify that files have expected EOLs"""
249 """verify that files have expected EOLs"""
250 _checkhook(ui, repo, node, False)
250 _checkhook(ui, repo, node, False)
251
251
252 def checkheadshook(ui, repo, node, hooktype, **kwargs):
252 def checkheadshook(ui, repo, node, hooktype, **kwargs):
253 """verify that files have expected EOLs"""
253 """verify that files have expected EOLs"""
254 _checkhook(ui, repo, node, True)
254 _checkhook(ui, repo, node, True)
255
255
256 # "checkheadshook" used to be called "hook"
256 # "checkheadshook" used to be called "hook"
257 hook = checkheadshook
257 hook = checkheadshook
258
258
259 def preupdate(ui, repo, hooktype, parent1, parent2):
259 def preupdate(ui, repo, hooktype, parent1, parent2):
260 repo.loadeol([parent1])
260 repo.loadeol([parent1])
261 return False
261 return False
262
262
263 def uisetup(ui):
263 def uisetup(ui):
264 ui.setconfig('hooks', 'preupdate.eol', preupdate)
264 ui.setconfig('hooks', 'preupdate.eol', preupdate)
265
265
266 def extsetup(ui):
266 def extsetup(ui):
267 try:
267 try:
268 extensions.find('win32text')
268 extensions.find('win32text')
269 ui.warn(_("the eol extension is incompatible with the "
269 ui.warn(_("the eol extension is incompatible with the "
270 "win32text extension\n"))
270 "win32text extension\n"))
271 except KeyError:
271 except KeyError:
272 pass
272 pass
273
273
274
274
275 def reposetup(ui, repo):
275 def reposetup(ui, repo):
276 uisetup(repo.ui)
276 uisetup(repo.ui)
277
277
278 if not repo.local():
278 if not repo.local():
279 return
279 return
280 for name, fn in filters.iteritems():
280 for name, fn in filters.iteritems():
281 repo.adddatafilter(name, fn)
281 repo.adddatafilter(name, fn)
282
282
283 ui.setconfig('patch', 'eol', 'auto')
283 ui.setconfig('patch', 'eol', 'auto')
284
284
285 class eolrepo(repo.__class__):
285 class eolrepo(repo.__class__):
286
286
287 def loadeol(self, nodes):
287 def loadeol(self, nodes):
288 eol = parseeol(self.ui, self, nodes)
288 eol = parseeol(self.ui, self, nodes)
289 if eol is None:
289 if eol is None:
290 return None
290 return None
291 eol.copytoui(self.ui)
291 eol.copytoui(self.ui)
292 return eol.match
292 return eol.match
293
293
294 def _hgcleardirstate(self):
294 def _hgcleardirstate(self):
295 self._eolfile = self.loadeol([None, 'tip'])
295 self._eolfile = self.loadeol([None, 'tip'])
296 if not self._eolfile:
296 if not self._eolfile:
297 self._eolfile = util.never
297 self._eolfile = util.never
298 return
298 return
299
299
300 try:
300 try:
301 cachemtime = os.path.getmtime(self.join("eol.cache"))
301 cachemtime = os.path.getmtime(self.join("eol.cache"))
302 except OSError:
302 except OSError:
303 cachemtime = 0
303 cachemtime = 0
304
304
305 try:
305 try:
306 eolmtime = os.path.getmtime(self.wjoin(".hgeol"))
306 eolmtime = os.path.getmtime(self.wjoin(".hgeol"))
307 except OSError:
307 except OSError:
308 eolmtime = 0
308 eolmtime = 0
309
309
310 if eolmtime > cachemtime:
310 if eolmtime > cachemtime:
311 self.ui.debug("eol: detected change in .hgeol\n")
311 self.ui.debug("eol: detected change in .hgeol\n")
312 wlock = None
312 wlock = None
313 try:
313 try:
314 wlock = self.wlock()
314 wlock = self.wlock()
315 for f in self.dirstate:
315 for f in self.dirstate:
316 if self.dirstate[f] == 'n':
316 if self.dirstate[f] == 'n':
317 # all normal files need to be looked at
317 # all normal files need to be looked at
318 # again since the new .hgeol file might no
318 # again since the new .hgeol file might no
319 # longer match a file it matched before
319 # longer match a file it matched before
320 self.dirstate.normallookup(f)
320 self.dirstate.normallookup(f)
321 # Create or touch the cache to update mtime
321 # Create or touch the cache to update mtime
322 self.opener("eol.cache", "w").close()
322 self.opener("eol.cache", "w").close()
323 wlock.release()
323 wlock.release()
324 except error.LockUnavailable:
324 except error.LockUnavailable:
325 # If we cannot lock the repository and clear the
325 # If we cannot lock the repository and clear the
326 # dirstate, then a commit might not see all files
326 # dirstate, then a commit might not see all files
327 # as modified. But if we cannot lock the
327 # as modified. But if we cannot lock the
328 # repository, then we can also not make a commit,
328 # repository, then we can also not make a commit,
329 # so ignore the error.
329 # so ignore the error.
330 pass
330 pass
331
331
332 def commitctx(self, ctx, error=False):
332 def commitctx(self, ctx, error=False):
333 for f in sorted(ctx.added() + ctx.modified()):
333 for f in sorted(ctx.added() + ctx.modified()):
334 if not self._eolfile(f):
334 if not self._eolfile(f):
335 continue
335 continue
336 try:
336 try:
337 data = ctx[f].data()
337 data = ctx[f].data()
338 except IOError:
338 except IOError:
339 continue
339 continue
340 if util.binary(data):
340 if util.binary(data):
341 # We should not abort here, since the user should
341 # We should not abort here, since the user should
342 # be able to say "** = native" to automatically
342 # be able to say "** = native" to automatically
343 # have all non-binary files taken care of.
343 # have all non-binary files taken care of.
344 continue
344 continue
345 if inconsistenteol(data):
345 if inconsistenteol(data):
346 raise util.Abort(_("inconsistent newline style "
346 raise util.Abort(_("inconsistent newline style "
347 "in %s\n" % f))
347 "in %s\n") % f)
348 return super(eolrepo, self).commitctx(ctx, error)
348 return super(eolrepo, self).commitctx(ctx, error)
349 repo.__class__ = eolrepo
349 repo.__class__ = eolrepo
350 repo._hgcleardirstate()
350 repo._hgcleardirstate()
@@ -1,1931 +1,1931 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import cStringIO, email, os, errno, re, posixpath
9 import cStringIO, email, os, errno, re, posixpath
10 import tempfile, zlib, shutil
10 import tempfile, zlib, shutil
11 # On python2.4 you have to import these by name or they fail to
11 # On python2.4 you have to import these by name or they fail to
12 # load. This was not a problem on Python 2.7.
12 # load. This was not a problem on Python 2.7.
13 import email.Generator
13 import email.Generator
14 import email.Parser
14 import email.Parser
15
15
16 from i18n import _
16 from i18n import _
17 from node import hex, short
17 from node import hex, short
18 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
18 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
19
19
20 gitre = re.compile('diff --git a/(.*) b/(.*)')
20 gitre = re.compile('diff --git a/(.*) b/(.*)')
21
21
22 class PatchError(Exception):
22 class PatchError(Exception):
23 pass
23 pass
24
24
25
25
26 # public functions
26 # public functions
27
27
28 def split(stream):
28 def split(stream):
29 '''return an iterator of individual patches from a stream'''
29 '''return an iterator of individual patches from a stream'''
30 def isheader(line, inheader):
30 def isheader(line, inheader):
31 if inheader and line[0] in (' ', '\t'):
31 if inheader and line[0] in (' ', '\t'):
32 # continuation
32 # continuation
33 return True
33 return True
34 if line[0] in (' ', '-', '+'):
34 if line[0] in (' ', '-', '+'):
35 # diff line - don't check for header pattern in there
35 # diff line - don't check for header pattern in there
36 return False
36 return False
37 l = line.split(': ', 1)
37 l = line.split(': ', 1)
38 return len(l) == 2 and ' ' not in l[0]
38 return len(l) == 2 and ' ' not in l[0]
39
39
40 def chunk(lines):
40 def chunk(lines):
41 return cStringIO.StringIO(''.join(lines))
41 return cStringIO.StringIO(''.join(lines))
42
42
43 def hgsplit(stream, cur):
43 def hgsplit(stream, cur):
44 inheader = True
44 inheader = True
45
45
46 for line in stream:
46 for line in stream:
47 if not line.strip():
47 if not line.strip():
48 inheader = False
48 inheader = False
49 if not inheader and line.startswith('# HG changeset patch'):
49 if not inheader and line.startswith('# HG changeset patch'):
50 yield chunk(cur)
50 yield chunk(cur)
51 cur = []
51 cur = []
52 inheader = True
52 inheader = True
53
53
54 cur.append(line)
54 cur.append(line)
55
55
56 if cur:
56 if cur:
57 yield chunk(cur)
57 yield chunk(cur)
58
58
59 def mboxsplit(stream, cur):
59 def mboxsplit(stream, cur):
60 for line in stream:
60 for line in stream:
61 if line.startswith('From '):
61 if line.startswith('From '):
62 for c in split(chunk(cur[1:])):
62 for c in split(chunk(cur[1:])):
63 yield c
63 yield c
64 cur = []
64 cur = []
65
65
66 cur.append(line)
66 cur.append(line)
67
67
68 if cur:
68 if cur:
69 for c in split(chunk(cur[1:])):
69 for c in split(chunk(cur[1:])):
70 yield c
70 yield c
71
71
72 def mimesplit(stream, cur):
72 def mimesplit(stream, cur):
73 def msgfp(m):
73 def msgfp(m):
74 fp = cStringIO.StringIO()
74 fp = cStringIO.StringIO()
75 g = email.Generator.Generator(fp, mangle_from_=False)
75 g = email.Generator.Generator(fp, mangle_from_=False)
76 g.flatten(m)
76 g.flatten(m)
77 fp.seek(0)
77 fp.seek(0)
78 return fp
78 return fp
79
79
80 for line in stream:
80 for line in stream:
81 cur.append(line)
81 cur.append(line)
82 c = chunk(cur)
82 c = chunk(cur)
83
83
84 m = email.Parser.Parser().parse(c)
84 m = email.Parser.Parser().parse(c)
85 if not m.is_multipart():
85 if not m.is_multipart():
86 yield msgfp(m)
86 yield msgfp(m)
87 else:
87 else:
88 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
88 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
89 for part in m.walk():
89 for part in m.walk():
90 ct = part.get_content_type()
90 ct = part.get_content_type()
91 if ct not in ok_types:
91 if ct not in ok_types:
92 continue
92 continue
93 yield msgfp(part)
93 yield msgfp(part)
94
94
95 def headersplit(stream, cur):
95 def headersplit(stream, cur):
96 inheader = False
96 inheader = False
97
97
98 for line in stream:
98 for line in stream:
99 if not inheader and isheader(line, inheader):
99 if not inheader and isheader(line, inheader):
100 yield chunk(cur)
100 yield chunk(cur)
101 cur = []
101 cur = []
102 inheader = True
102 inheader = True
103 if inheader and not isheader(line, inheader):
103 if inheader and not isheader(line, inheader):
104 inheader = False
104 inheader = False
105
105
106 cur.append(line)
106 cur.append(line)
107
107
108 if cur:
108 if cur:
109 yield chunk(cur)
109 yield chunk(cur)
110
110
111 def remainder(cur):
111 def remainder(cur):
112 yield chunk(cur)
112 yield chunk(cur)
113
113
114 class fiter(object):
114 class fiter(object):
115 def __init__(self, fp):
115 def __init__(self, fp):
116 self.fp = fp
116 self.fp = fp
117
117
118 def __iter__(self):
118 def __iter__(self):
119 return self
119 return self
120
120
121 def next(self):
121 def next(self):
122 l = self.fp.readline()
122 l = self.fp.readline()
123 if not l:
123 if not l:
124 raise StopIteration
124 raise StopIteration
125 return l
125 return l
126
126
127 inheader = False
127 inheader = False
128 cur = []
128 cur = []
129
129
130 mimeheaders = ['content-type']
130 mimeheaders = ['content-type']
131
131
132 if not util.safehasattr(stream, 'next'):
132 if not util.safehasattr(stream, 'next'):
133 # http responses, for example, have readline but not next
133 # http responses, for example, have readline but not next
134 stream = fiter(stream)
134 stream = fiter(stream)
135
135
136 for line in stream:
136 for line in stream:
137 cur.append(line)
137 cur.append(line)
138 if line.startswith('# HG changeset patch'):
138 if line.startswith('# HG changeset patch'):
139 return hgsplit(stream, cur)
139 return hgsplit(stream, cur)
140 elif line.startswith('From '):
140 elif line.startswith('From '):
141 return mboxsplit(stream, cur)
141 return mboxsplit(stream, cur)
142 elif isheader(line, inheader):
142 elif isheader(line, inheader):
143 inheader = True
143 inheader = True
144 if line.split(':', 1)[0].lower() in mimeheaders:
144 if line.split(':', 1)[0].lower() in mimeheaders:
145 # let email parser handle this
145 # let email parser handle this
146 return mimesplit(stream, cur)
146 return mimesplit(stream, cur)
147 elif line.startswith('--- ') and inheader:
147 elif line.startswith('--- ') and inheader:
148 # No evil headers seen by diff start, split by hand
148 # No evil headers seen by diff start, split by hand
149 return headersplit(stream, cur)
149 return headersplit(stream, cur)
150 # Not enough info, keep reading
150 # Not enough info, keep reading
151
151
152 # if we are here, we have a very plain patch
152 # if we are here, we have a very plain patch
153 return remainder(cur)
153 return remainder(cur)
154
154
155 def extract(ui, fileobj):
155 def extract(ui, fileobj):
156 '''extract patch from data read from fileobj.
156 '''extract patch from data read from fileobj.
157
157
158 patch can be a normal patch or contained in an email message.
158 patch can be a normal patch or contained in an email message.
159
159
160 return tuple (filename, message, user, date, branch, node, p1, p2).
160 return tuple (filename, message, user, date, branch, node, p1, p2).
161 Any item in the returned tuple can be None. If filename is None,
161 Any item in the returned tuple can be None. If filename is None,
162 fileobj did not contain a patch. Caller must unlink filename when done.'''
162 fileobj did not contain a patch. Caller must unlink filename when done.'''
163
163
164 # attempt to detect the start of a patch
164 # attempt to detect the start of a patch
165 # (this heuristic is borrowed from quilt)
165 # (this heuristic is borrowed from quilt)
166 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
166 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
167 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
167 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
168 r'---[ \t].*?^\+\+\+[ \t]|'
168 r'---[ \t].*?^\+\+\+[ \t]|'
169 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
169 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
170
170
171 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
171 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
172 tmpfp = os.fdopen(fd, 'w')
172 tmpfp = os.fdopen(fd, 'w')
173 try:
173 try:
174 msg = email.Parser.Parser().parse(fileobj)
174 msg = email.Parser.Parser().parse(fileobj)
175
175
176 subject = msg['Subject']
176 subject = msg['Subject']
177 user = msg['From']
177 user = msg['From']
178 if not subject and not user:
178 if not subject and not user:
179 # Not an email, restore parsed headers if any
179 # Not an email, restore parsed headers if any
180 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
180 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
181
181
182 # should try to parse msg['Date']
182 # should try to parse msg['Date']
183 date = None
183 date = None
184 nodeid = None
184 nodeid = None
185 branch = None
185 branch = None
186 parents = []
186 parents = []
187
187
188 if subject:
188 if subject:
189 if subject.startswith('[PATCH'):
189 if subject.startswith('[PATCH'):
190 pend = subject.find(']')
190 pend = subject.find(']')
191 if pend >= 0:
191 if pend >= 0:
192 subject = subject[pend + 1:].lstrip()
192 subject = subject[pend + 1:].lstrip()
193 subject = re.sub(r'\n[ \t]+', ' ', subject)
193 subject = re.sub(r'\n[ \t]+', ' ', subject)
194 ui.debug('Subject: %s\n' % subject)
194 ui.debug('Subject: %s\n' % subject)
195 if user:
195 if user:
196 ui.debug('From: %s\n' % user)
196 ui.debug('From: %s\n' % user)
197 diffs_seen = 0
197 diffs_seen = 0
198 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
198 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
199 message = ''
199 message = ''
200 for part in msg.walk():
200 for part in msg.walk():
201 content_type = part.get_content_type()
201 content_type = part.get_content_type()
202 ui.debug('Content-Type: %s\n' % content_type)
202 ui.debug('Content-Type: %s\n' % content_type)
203 if content_type not in ok_types:
203 if content_type not in ok_types:
204 continue
204 continue
205 payload = part.get_payload(decode=True)
205 payload = part.get_payload(decode=True)
206 m = diffre.search(payload)
206 m = diffre.search(payload)
207 if m:
207 if m:
208 hgpatch = False
208 hgpatch = False
209 hgpatchheader = False
209 hgpatchheader = False
210 ignoretext = False
210 ignoretext = False
211
211
212 ui.debug('found patch at byte %d\n' % m.start(0))
212 ui.debug('found patch at byte %d\n' % m.start(0))
213 diffs_seen += 1
213 diffs_seen += 1
214 cfp = cStringIO.StringIO()
214 cfp = cStringIO.StringIO()
215 for line in payload[:m.start(0)].splitlines():
215 for line in payload[:m.start(0)].splitlines():
216 if line.startswith('# HG changeset patch') and not hgpatch:
216 if line.startswith('# HG changeset patch') and not hgpatch:
217 ui.debug('patch generated by hg export\n')
217 ui.debug('patch generated by hg export\n')
218 hgpatch = True
218 hgpatch = True
219 hgpatchheader = True
219 hgpatchheader = True
220 # drop earlier commit message content
220 # drop earlier commit message content
221 cfp.seek(0)
221 cfp.seek(0)
222 cfp.truncate()
222 cfp.truncate()
223 subject = None
223 subject = None
224 elif hgpatchheader:
224 elif hgpatchheader:
225 if line.startswith('# User '):
225 if line.startswith('# User '):
226 user = line[7:]
226 user = line[7:]
227 ui.debug('From: %s\n' % user)
227 ui.debug('From: %s\n' % user)
228 elif line.startswith("# Date "):
228 elif line.startswith("# Date "):
229 date = line[7:]
229 date = line[7:]
230 elif line.startswith("# Branch "):
230 elif line.startswith("# Branch "):
231 branch = line[9:]
231 branch = line[9:]
232 elif line.startswith("# Node ID "):
232 elif line.startswith("# Node ID "):
233 nodeid = line[10:]
233 nodeid = line[10:]
234 elif line.startswith("# Parent "):
234 elif line.startswith("# Parent "):
235 parents.append(line[9:].lstrip())
235 parents.append(line[9:].lstrip())
236 elif not line.startswith("# "):
236 elif not line.startswith("# "):
237 hgpatchheader = False
237 hgpatchheader = False
238 elif line == '---':
238 elif line == '---':
239 ignoretext = True
239 ignoretext = True
240 if not hgpatchheader and not ignoretext:
240 if not hgpatchheader and not ignoretext:
241 cfp.write(line)
241 cfp.write(line)
242 cfp.write('\n')
242 cfp.write('\n')
243 message = cfp.getvalue()
243 message = cfp.getvalue()
244 if tmpfp:
244 if tmpfp:
245 tmpfp.write(payload)
245 tmpfp.write(payload)
246 if not payload.endswith('\n'):
246 if not payload.endswith('\n'):
247 tmpfp.write('\n')
247 tmpfp.write('\n')
248 elif not diffs_seen and message and content_type == 'text/plain':
248 elif not diffs_seen and message and content_type == 'text/plain':
249 message += '\n' + payload
249 message += '\n' + payload
250 except: # re-raises
250 except: # re-raises
251 tmpfp.close()
251 tmpfp.close()
252 os.unlink(tmpname)
252 os.unlink(tmpname)
253 raise
253 raise
254
254
255 if subject and not message.startswith(subject):
255 if subject and not message.startswith(subject):
256 message = '%s\n%s' % (subject, message)
256 message = '%s\n%s' % (subject, message)
257 tmpfp.close()
257 tmpfp.close()
258 if not diffs_seen:
258 if not diffs_seen:
259 os.unlink(tmpname)
259 os.unlink(tmpname)
260 return None, message, user, date, branch, None, None, None
260 return None, message, user, date, branch, None, None, None
261 p1 = parents and parents.pop(0) or None
261 p1 = parents and parents.pop(0) or None
262 p2 = parents and parents.pop(0) or None
262 p2 = parents and parents.pop(0) or None
263 return tmpname, message, user, date, branch, nodeid, p1, p2
263 return tmpname, message, user, date, branch, nodeid, p1, p2
264
264
265 class patchmeta(object):
265 class patchmeta(object):
266 """Patched file metadata
266 """Patched file metadata
267
267
268 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
268 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
269 or COPY. 'path' is patched file path. 'oldpath' is set to the
269 or COPY. 'path' is patched file path. 'oldpath' is set to the
270 origin file when 'op' is either COPY or RENAME, None otherwise. If
270 origin file when 'op' is either COPY or RENAME, None otherwise. If
271 file mode is changed, 'mode' is a tuple (islink, isexec) where
271 file mode is changed, 'mode' is a tuple (islink, isexec) where
272 'islink' is True if the file is a symlink and 'isexec' is True if
272 'islink' is True if the file is a symlink and 'isexec' is True if
273 the file is executable. Otherwise, 'mode' is None.
273 the file is executable. Otherwise, 'mode' is None.
274 """
274 """
275 def __init__(self, path):
275 def __init__(self, path):
276 self.path = path
276 self.path = path
277 self.oldpath = None
277 self.oldpath = None
278 self.mode = None
278 self.mode = None
279 self.op = 'MODIFY'
279 self.op = 'MODIFY'
280 self.binary = False
280 self.binary = False
281
281
282 def setmode(self, mode):
282 def setmode(self, mode):
283 islink = mode & 020000
283 islink = mode & 020000
284 isexec = mode & 0100
284 isexec = mode & 0100
285 self.mode = (islink, isexec)
285 self.mode = (islink, isexec)
286
286
287 def copy(self):
287 def copy(self):
288 other = patchmeta(self.path)
288 other = patchmeta(self.path)
289 other.oldpath = self.oldpath
289 other.oldpath = self.oldpath
290 other.mode = self.mode
290 other.mode = self.mode
291 other.op = self.op
291 other.op = self.op
292 other.binary = self.binary
292 other.binary = self.binary
293 return other
293 return other
294
294
295 def _ispatchinga(self, afile):
295 def _ispatchinga(self, afile):
296 if afile == '/dev/null':
296 if afile == '/dev/null':
297 return self.op == 'ADD'
297 return self.op == 'ADD'
298 return afile == 'a/' + (self.oldpath or self.path)
298 return afile == 'a/' + (self.oldpath or self.path)
299
299
300 def _ispatchingb(self, bfile):
300 def _ispatchingb(self, bfile):
301 if bfile == '/dev/null':
301 if bfile == '/dev/null':
302 return self.op == 'DELETE'
302 return self.op == 'DELETE'
303 return bfile == 'b/' + self.path
303 return bfile == 'b/' + self.path
304
304
305 def ispatching(self, afile, bfile):
305 def ispatching(self, afile, bfile):
306 return self._ispatchinga(afile) and self._ispatchingb(bfile)
306 return self._ispatchinga(afile) and self._ispatchingb(bfile)
307
307
308 def __repr__(self):
308 def __repr__(self):
309 return "<patchmeta %s %r>" % (self.op, self.path)
309 return "<patchmeta %s %r>" % (self.op, self.path)
310
310
311 def readgitpatch(lr):
311 def readgitpatch(lr):
312 """extract git-style metadata about patches from <patchname>"""
312 """extract git-style metadata about patches from <patchname>"""
313
313
314 # Filter patch for git information
314 # Filter patch for git information
315 gp = None
315 gp = None
316 gitpatches = []
316 gitpatches = []
317 for line in lr:
317 for line in lr:
318 line = line.rstrip(' \r\n')
318 line = line.rstrip(' \r\n')
319 if line.startswith('diff --git a/'):
319 if line.startswith('diff --git a/'):
320 m = gitre.match(line)
320 m = gitre.match(line)
321 if m:
321 if m:
322 if gp:
322 if gp:
323 gitpatches.append(gp)
323 gitpatches.append(gp)
324 dst = m.group(2)
324 dst = m.group(2)
325 gp = patchmeta(dst)
325 gp = patchmeta(dst)
326 elif gp:
326 elif gp:
327 if line.startswith('--- '):
327 if line.startswith('--- '):
328 gitpatches.append(gp)
328 gitpatches.append(gp)
329 gp = None
329 gp = None
330 continue
330 continue
331 if line.startswith('rename from '):
331 if line.startswith('rename from '):
332 gp.op = 'RENAME'
332 gp.op = 'RENAME'
333 gp.oldpath = line[12:]
333 gp.oldpath = line[12:]
334 elif line.startswith('rename to '):
334 elif line.startswith('rename to '):
335 gp.path = line[10:]
335 gp.path = line[10:]
336 elif line.startswith('copy from '):
336 elif line.startswith('copy from '):
337 gp.op = 'COPY'
337 gp.op = 'COPY'
338 gp.oldpath = line[10:]
338 gp.oldpath = line[10:]
339 elif line.startswith('copy to '):
339 elif line.startswith('copy to '):
340 gp.path = line[8:]
340 gp.path = line[8:]
341 elif line.startswith('deleted file'):
341 elif line.startswith('deleted file'):
342 gp.op = 'DELETE'
342 gp.op = 'DELETE'
343 elif line.startswith('new file mode '):
343 elif line.startswith('new file mode '):
344 gp.op = 'ADD'
344 gp.op = 'ADD'
345 gp.setmode(int(line[-6:], 8))
345 gp.setmode(int(line[-6:], 8))
346 elif line.startswith('new mode '):
346 elif line.startswith('new mode '):
347 gp.setmode(int(line[-6:], 8))
347 gp.setmode(int(line[-6:], 8))
348 elif line.startswith('GIT binary patch'):
348 elif line.startswith('GIT binary patch'):
349 gp.binary = True
349 gp.binary = True
350 if gp:
350 if gp:
351 gitpatches.append(gp)
351 gitpatches.append(gp)
352
352
353 return gitpatches
353 return gitpatches
354
354
355 class linereader(object):
355 class linereader(object):
356 # simple class to allow pushing lines back into the input stream
356 # simple class to allow pushing lines back into the input stream
357 def __init__(self, fp):
357 def __init__(self, fp):
358 self.fp = fp
358 self.fp = fp
359 self.buf = []
359 self.buf = []
360
360
361 def push(self, line):
361 def push(self, line):
362 if line is not None:
362 if line is not None:
363 self.buf.append(line)
363 self.buf.append(line)
364
364
365 def readline(self):
365 def readline(self):
366 if self.buf:
366 if self.buf:
367 l = self.buf[0]
367 l = self.buf[0]
368 del self.buf[0]
368 del self.buf[0]
369 return l
369 return l
370 return self.fp.readline()
370 return self.fp.readline()
371
371
372 def __iter__(self):
372 def __iter__(self):
373 while True:
373 while True:
374 l = self.readline()
374 l = self.readline()
375 if not l:
375 if not l:
376 break
376 break
377 yield l
377 yield l
378
378
379 class abstractbackend(object):
379 class abstractbackend(object):
380 def __init__(self, ui):
380 def __init__(self, ui):
381 self.ui = ui
381 self.ui = ui
382
382
383 def getfile(self, fname):
383 def getfile(self, fname):
384 """Return target file data and flags as a (data, (islink,
384 """Return target file data and flags as a (data, (islink,
385 isexec)) tuple.
385 isexec)) tuple.
386 """
386 """
387 raise NotImplementedError
387 raise NotImplementedError
388
388
389 def setfile(self, fname, data, mode, copysource):
389 def setfile(self, fname, data, mode, copysource):
390 """Write data to target file fname and set its mode. mode is a
390 """Write data to target file fname and set its mode. mode is a
391 (islink, isexec) tuple. If data is None, the file content should
391 (islink, isexec) tuple. If data is None, the file content should
392 be left unchanged. If the file is modified after being copied,
392 be left unchanged. If the file is modified after being copied,
393 copysource is set to the original file name.
393 copysource is set to the original file name.
394 """
394 """
395 raise NotImplementedError
395 raise NotImplementedError
396
396
397 def unlink(self, fname):
397 def unlink(self, fname):
398 """Unlink target file."""
398 """Unlink target file."""
399 raise NotImplementedError
399 raise NotImplementedError
400
400
401 def writerej(self, fname, failed, total, lines):
401 def writerej(self, fname, failed, total, lines):
402 """Write rejected lines for fname. total is the number of hunks
402 """Write rejected lines for fname. total is the number of hunks
403 which failed to apply and total the total number of hunks for this
403 which failed to apply and total the total number of hunks for this
404 files.
404 files.
405 """
405 """
406 pass
406 pass
407
407
408 def exists(self, fname):
408 def exists(self, fname):
409 raise NotImplementedError
409 raise NotImplementedError
410
410
411 class fsbackend(abstractbackend):
411 class fsbackend(abstractbackend):
412 def __init__(self, ui, basedir):
412 def __init__(self, ui, basedir):
413 super(fsbackend, self).__init__(ui)
413 super(fsbackend, self).__init__(ui)
414 self.opener = scmutil.opener(basedir)
414 self.opener = scmutil.opener(basedir)
415
415
416 def _join(self, f):
416 def _join(self, f):
417 return os.path.join(self.opener.base, f)
417 return os.path.join(self.opener.base, f)
418
418
419 def getfile(self, fname):
419 def getfile(self, fname):
420 path = self._join(fname)
420 path = self._join(fname)
421 if os.path.islink(path):
421 if os.path.islink(path):
422 return (os.readlink(path), (True, False))
422 return (os.readlink(path), (True, False))
423 isexec = False
423 isexec = False
424 try:
424 try:
425 isexec = os.lstat(path).st_mode & 0100 != 0
425 isexec = os.lstat(path).st_mode & 0100 != 0
426 except OSError, e:
426 except OSError, e:
427 if e.errno != errno.ENOENT:
427 if e.errno != errno.ENOENT:
428 raise
428 raise
429 return (self.opener.read(fname), (False, isexec))
429 return (self.opener.read(fname), (False, isexec))
430
430
431 def setfile(self, fname, data, mode, copysource):
431 def setfile(self, fname, data, mode, copysource):
432 islink, isexec = mode
432 islink, isexec = mode
433 if data is None:
433 if data is None:
434 util.setflags(self._join(fname), islink, isexec)
434 util.setflags(self._join(fname), islink, isexec)
435 return
435 return
436 if islink:
436 if islink:
437 self.opener.symlink(data, fname)
437 self.opener.symlink(data, fname)
438 else:
438 else:
439 self.opener.write(fname, data)
439 self.opener.write(fname, data)
440 if isexec:
440 if isexec:
441 util.setflags(self._join(fname), False, True)
441 util.setflags(self._join(fname), False, True)
442
442
443 def unlink(self, fname):
443 def unlink(self, fname):
444 util.unlinkpath(self._join(fname), ignoremissing=True)
444 util.unlinkpath(self._join(fname), ignoremissing=True)
445
445
446 def writerej(self, fname, failed, total, lines):
446 def writerej(self, fname, failed, total, lines):
447 fname = fname + ".rej"
447 fname = fname + ".rej"
448 self.ui.warn(
448 self.ui.warn(
449 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
449 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
450 (failed, total, fname))
450 (failed, total, fname))
451 fp = self.opener(fname, 'w')
451 fp = self.opener(fname, 'w')
452 fp.writelines(lines)
452 fp.writelines(lines)
453 fp.close()
453 fp.close()
454
454
455 def exists(self, fname):
455 def exists(self, fname):
456 return os.path.lexists(self._join(fname))
456 return os.path.lexists(self._join(fname))
457
457
458 class workingbackend(fsbackend):
458 class workingbackend(fsbackend):
459 def __init__(self, ui, repo, similarity):
459 def __init__(self, ui, repo, similarity):
460 super(workingbackend, self).__init__(ui, repo.root)
460 super(workingbackend, self).__init__(ui, repo.root)
461 self.repo = repo
461 self.repo = repo
462 self.similarity = similarity
462 self.similarity = similarity
463 self.removed = set()
463 self.removed = set()
464 self.changed = set()
464 self.changed = set()
465 self.copied = []
465 self.copied = []
466
466
467 def _checkknown(self, fname):
467 def _checkknown(self, fname):
468 if self.repo.dirstate[fname] == '?' and self.exists(fname):
468 if self.repo.dirstate[fname] == '?' and self.exists(fname):
469 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
469 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
470
470
471 def setfile(self, fname, data, mode, copysource):
471 def setfile(self, fname, data, mode, copysource):
472 self._checkknown(fname)
472 self._checkknown(fname)
473 super(workingbackend, self).setfile(fname, data, mode, copysource)
473 super(workingbackend, self).setfile(fname, data, mode, copysource)
474 if copysource is not None:
474 if copysource is not None:
475 self.copied.append((copysource, fname))
475 self.copied.append((copysource, fname))
476 self.changed.add(fname)
476 self.changed.add(fname)
477
477
478 def unlink(self, fname):
478 def unlink(self, fname):
479 self._checkknown(fname)
479 self._checkknown(fname)
480 super(workingbackend, self).unlink(fname)
480 super(workingbackend, self).unlink(fname)
481 self.removed.add(fname)
481 self.removed.add(fname)
482 self.changed.add(fname)
482 self.changed.add(fname)
483
483
484 def close(self):
484 def close(self):
485 wctx = self.repo[None]
485 wctx = self.repo[None]
486 changed = set(self.changed)
486 changed = set(self.changed)
487 for src, dst in self.copied:
487 for src, dst in self.copied:
488 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
488 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
489 if self.removed:
489 if self.removed:
490 wctx.forget(sorted(self.removed))
490 wctx.forget(sorted(self.removed))
491 for f in self.removed:
491 for f in self.removed:
492 if f not in self.repo.dirstate:
492 if f not in self.repo.dirstate:
493 # File was deleted and no longer belongs to the
493 # File was deleted and no longer belongs to the
494 # dirstate, it was probably marked added then
494 # dirstate, it was probably marked added then
495 # deleted, and should not be considered by
495 # deleted, and should not be considered by
496 # marktouched().
496 # marktouched().
497 changed.discard(f)
497 changed.discard(f)
498 if changed:
498 if changed:
499 scmutil.marktouched(self.repo, changed, self.similarity)
499 scmutil.marktouched(self.repo, changed, self.similarity)
500 return sorted(self.changed)
500 return sorted(self.changed)
501
501
502 class filestore(object):
502 class filestore(object):
503 def __init__(self, maxsize=None):
503 def __init__(self, maxsize=None):
504 self.opener = None
504 self.opener = None
505 self.files = {}
505 self.files = {}
506 self.created = 0
506 self.created = 0
507 self.maxsize = maxsize
507 self.maxsize = maxsize
508 if self.maxsize is None:
508 if self.maxsize is None:
509 self.maxsize = 4*(2**20)
509 self.maxsize = 4*(2**20)
510 self.size = 0
510 self.size = 0
511 self.data = {}
511 self.data = {}
512
512
513 def setfile(self, fname, data, mode, copied=None):
513 def setfile(self, fname, data, mode, copied=None):
514 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
514 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
515 self.data[fname] = (data, mode, copied)
515 self.data[fname] = (data, mode, copied)
516 self.size += len(data)
516 self.size += len(data)
517 else:
517 else:
518 if self.opener is None:
518 if self.opener is None:
519 root = tempfile.mkdtemp(prefix='hg-patch-')
519 root = tempfile.mkdtemp(prefix='hg-patch-')
520 self.opener = scmutil.opener(root)
520 self.opener = scmutil.opener(root)
521 # Avoid filename issues with these simple names
521 # Avoid filename issues with these simple names
522 fn = str(self.created)
522 fn = str(self.created)
523 self.opener.write(fn, data)
523 self.opener.write(fn, data)
524 self.created += 1
524 self.created += 1
525 self.files[fname] = (fn, mode, copied)
525 self.files[fname] = (fn, mode, copied)
526
526
527 def getfile(self, fname):
527 def getfile(self, fname):
528 if fname in self.data:
528 if fname in self.data:
529 return self.data[fname]
529 return self.data[fname]
530 if not self.opener or fname not in self.files:
530 if not self.opener or fname not in self.files:
531 raise IOError
531 raise IOError
532 fn, mode, copied = self.files[fname]
532 fn, mode, copied = self.files[fname]
533 return self.opener.read(fn), mode, copied
533 return self.opener.read(fn), mode, copied
534
534
535 def close(self):
535 def close(self):
536 if self.opener:
536 if self.opener:
537 shutil.rmtree(self.opener.base)
537 shutil.rmtree(self.opener.base)
538
538
539 class repobackend(abstractbackend):
539 class repobackend(abstractbackend):
540 def __init__(self, ui, repo, ctx, store):
540 def __init__(self, ui, repo, ctx, store):
541 super(repobackend, self).__init__(ui)
541 super(repobackend, self).__init__(ui)
542 self.repo = repo
542 self.repo = repo
543 self.ctx = ctx
543 self.ctx = ctx
544 self.store = store
544 self.store = store
545 self.changed = set()
545 self.changed = set()
546 self.removed = set()
546 self.removed = set()
547 self.copied = {}
547 self.copied = {}
548
548
549 def _checkknown(self, fname):
549 def _checkknown(self, fname):
550 if fname not in self.ctx:
550 if fname not in self.ctx:
551 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
551 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
552
552
553 def getfile(self, fname):
553 def getfile(self, fname):
554 try:
554 try:
555 fctx = self.ctx[fname]
555 fctx = self.ctx[fname]
556 except error.LookupError:
556 except error.LookupError:
557 raise IOError
557 raise IOError
558 flags = fctx.flags()
558 flags = fctx.flags()
559 return fctx.data(), ('l' in flags, 'x' in flags)
559 return fctx.data(), ('l' in flags, 'x' in flags)
560
560
561 def setfile(self, fname, data, mode, copysource):
561 def setfile(self, fname, data, mode, copysource):
562 if copysource:
562 if copysource:
563 self._checkknown(copysource)
563 self._checkknown(copysource)
564 if data is None:
564 if data is None:
565 data = self.ctx[fname].data()
565 data = self.ctx[fname].data()
566 self.store.setfile(fname, data, mode, copysource)
566 self.store.setfile(fname, data, mode, copysource)
567 self.changed.add(fname)
567 self.changed.add(fname)
568 if copysource:
568 if copysource:
569 self.copied[fname] = copysource
569 self.copied[fname] = copysource
570
570
571 def unlink(self, fname):
571 def unlink(self, fname):
572 self._checkknown(fname)
572 self._checkknown(fname)
573 self.removed.add(fname)
573 self.removed.add(fname)
574
574
575 def exists(self, fname):
575 def exists(self, fname):
576 return fname in self.ctx
576 return fname in self.ctx
577
577
578 def close(self):
578 def close(self):
579 return self.changed | self.removed
579 return self.changed | self.removed
580
580
581 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
581 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
582 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
582 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
583 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
583 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
584 eolmodes = ['strict', 'crlf', 'lf', 'auto']
584 eolmodes = ['strict', 'crlf', 'lf', 'auto']
585
585
586 class patchfile(object):
586 class patchfile(object):
587 def __init__(self, ui, gp, backend, store, eolmode='strict'):
587 def __init__(self, ui, gp, backend, store, eolmode='strict'):
588 self.fname = gp.path
588 self.fname = gp.path
589 self.eolmode = eolmode
589 self.eolmode = eolmode
590 self.eol = None
590 self.eol = None
591 self.backend = backend
591 self.backend = backend
592 self.ui = ui
592 self.ui = ui
593 self.lines = []
593 self.lines = []
594 self.exists = False
594 self.exists = False
595 self.missing = True
595 self.missing = True
596 self.mode = gp.mode
596 self.mode = gp.mode
597 self.copysource = gp.oldpath
597 self.copysource = gp.oldpath
598 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
598 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
599 self.remove = gp.op == 'DELETE'
599 self.remove = gp.op == 'DELETE'
600 try:
600 try:
601 if self.copysource is None:
601 if self.copysource is None:
602 data, mode = backend.getfile(self.fname)
602 data, mode = backend.getfile(self.fname)
603 self.exists = True
603 self.exists = True
604 else:
604 else:
605 data, mode = store.getfile(self.copysource)[:2]
605 data, mode = store.getfile(self.copysource)[:2]
606 self.exists = backend.exists(self.fname)
606 self.exists = backend.exists(self.fname)
607 self.missing = False
607 self.missing = False
608 if data:
608 if data:
609 self.lines = mdiff.splitnewlines(data)
609 self.lines = mdiff.splitnewlines(data)
610 if self.mode is None:
610 if self.mode is None:
611 self.mode = mode
611 self.mode = mode
612 if self.lines:
612 if self.lines:
613 # Normalize line endings
613 # Normalize line endings
614 if self.lines[0].endswith('\r\n'):
614 if self.lines[0].endswith('\r\n'):
615 self.eol = '\r\n'
615 self.eol = '\r\n'
616 elif self.lines[0].endswith('\n'):
616 elif self.lines[0].endswith('\n'):
617 self.eol = '\n'
617 self.eol = '\n'
618 if eolmode != 'strict':
618 if eolmode != 'strict':
619 nlines = []
619 nlines = []
620 for l in self.lines:
620 for l in self.lines:
621 if l.endswith('\r\n'):
621 if l.endswith('\r\n'):
622 l = l[:-2] + '\n'
622 l = l[:-2] + '\n'
623 nlines.append(l)
623 nlines.append(l)
624 self.lines = nlines
624 self.lines = nlines
625 except IOError:
625 except IOError:
626 if self.create:
626 if self.create:
627 self.missing = False
627 self.missing = False
628 if self.mode is None:
628 if self.mode is None:
629 self.mode = (False, False)
629 self.mode = (False, False)
630 if self.missing:
630 if self.missing:
631 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
631 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
632
632
633 self.hash = {}
633 self.hash = {}
634 self.dirty = 0
634 self.dirty = 0
635 self.offset = 0
635 self.offset = 0
636 self.skew = 0
636 self.skew = 0
637 self.rej = []
637 self.rej = []
638 self.fileprinted = False
638 self.fileprinted = False
639 self.printfile(False)
639 self.printfile(False)
640 self.hunks = 0
640 self.hunks = 0
641
641
642 def writelines(self, fname, lines, mode):
642 def writelines(self, fname, lines, mode):
643 if self.eolmode == 'auto':
643 if self.eolmode == 'auto':
644 eol = self.eol
644 eol = self.eol
645 elif self.eolmode == 'crlf':
645 elif self.eolmode == 'crlf':
646 eol = '\r\n'
646 eol = '\r\n'
647 else:
647 else:
648 eol = '\n'
648 eol = '\n'
649
649
650 if self.eolmode != 'strict' and eol and eol != '\n':
650 if self.eolmode != 'strict' and eol and eol != '\n':
651 rawlines = []
651 rawlines = []
652 for l in lines:
652 for l in lines:
653 if l and l[-1] == '\n':
653 if l and l[-1] == '\n':
654 l = l[:-1] + eol
654 l = l[:-1] + eol
655 rawlines.append(l)
655 rawlines.append(l)
656 lines = rawlines
656 lines = rawlines
657
657
658 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
658 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
659
659
660 def printfile(self, warn):
660 def printfile(self, warn):
661 if self.fileprinted:
661 if self.fileprinted:
662 return
662 return
663 if warn or self.ui.verbose:
663 if warn or self.ui.verbose:
664 self.fileprinted = True
664 self.fileprinted = True
665 s = _("patching file %s\n") % self.fname
665 s = _("patching file %s\n") % self.fname
666 if warn:
666 if warn:
667 self.ui.warn(s)
667 self.ui.warn(s)
668 else:
668 else:
669 self.ui.note(s)
669 self.ui.note(s)
670
670
671
671
672 def findlines(self, l, linenum):
672 def findlines(self, l, linenum):
673 # looks through the hash and finds candidate lines. The
673 # looks through the hash and finds candidate lines. The
674 # result is a list of line numbers sorted based on distance
674 # result is a list of line numbers sorted based on distance
675 # from linenum
675 # from linenum
676
676
677 cand = self.hash.get(l, [])
677 cand = self.hash.get(l, [])
678 if len(cand) > 1:
678 if len(cand) > 1:
679 # resort our list of potentials forward then back.
679 # resort our list of potentials forward then back.
680 cand.sort(key=lambda x: abs(x - linenum))
680 cand.sort(key=lambda x: abs(x - linenum))
681 return cand
681 return cand
682
682
683 def write_rej(self):
683 def write_rej(self):
684 # our rejects are a little different from patch(1). This always
684 # our rejects are a little different from patch(1). This always
685 # creates rejects in the same form as the original patch. A file
685 # creates rejects in the same form as the original patch. A file
686 # header is inserted so that you can run the reject through patch again
686 # header is inserted so that you can run the reject through patch again
687 # without having to type the filename.
687 # without having to type the filename.
688 if not self.rej:
688 if not self.rej:
689 return
689 return
690 base = os.path.basename(self.fname)
690 base = os.path.basename(self.fname)
691 lines = ["--- %s\n+++ %s\n" % (base, base)]
691 lines = ["--- %s\n+++ %s\n" % (base, base)]
692 for x in self.rej:
692 for x in self.rej:
693 for l in x.hunk:
693 for l in x.hunk:
694 lines.append(l)
694 lines.append(l)
695 if l[-1] != '\n':
695 if l[-1] != '\n':
696 lines.append("\n\ No newline at end of file\n")
696 lines.append("\n\ No newline at end of file\n")
697 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
697 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
698
698
699 def apply(self, h):
699 def apply(self, h):
700 if not h.complete():
700 if not h.complete():
701 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
701 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
702 (h.number, h.desc, len(h.a), h.lena, len(h.b),
702 (h.number, h.desc, len(h.a), h.lena, len(h.b),
703 h.lenb))
703 h.lenb))
704
704
705 self.hunks += 1
705 self.hunks += 1
706
706
707 if self.missing:
707 if self.missing:
708 self.rej.append(h)
708 self.rej.append(h)
709 return -1
709 return -1
710
710
711 if self.exists and self.create:
711 if self.exists and self.create:
712 if self.copysource:
712 if self.copysource:
713 self.ui.warn(_("cannot create %s: destination already "
713 self.ui.warn(_("cannot create %s: destination already "
714 "exists\n" % self.fname))
714 "exists\n") % self.fname)
715 else:
715 else:
716 self.ui.warn(_("file %s already exists\n") % self.fname)
716 self.ui.warn(_("file %s already exists\n") % self.fname)
717 self.rej.append(h)
717 self.rej.append(h)
718 return -1
718 return -1
719
719
720 if isinstance(h, binhunk):
720 if isinstance(h, binhunk):
721 if self.remove:
721 if self.remove:
722 self.backend.unlink(self.fname)
722 self.backend.unlink(self.fname)
723 else:
723 else:
724 l = h.new(self.lines)
724 l = h.new(self.lines)
725 self.lines[:] = l
725 self.lines[:] = l
726 self.offset += len(l)
726 self.offset += len(l)
727 self.dirty = True
727 self.dirty = True
728 return 0
728 return 0
729
729
730 horig = h
730 horig = h
731 if (self.eolmode in ('crlf', 'lf')
731 if (self.eolmode in ('crlf', 'lf')
732 or self.eolmode == 'auto' and self.eol):
732 or self.eolmode == 'auto' and self.eol):
733 # If new eols are going to be normalized, then normalize
733 # If new eols are going to be normalized, then normalize
734 # hunk data before patching. Otherwise, preserve input
734 # hunk data before patching. Otherwise, preserve input
735 # line-endings.
735 # line-endings.
736 h = h.getnormalized()
736 h = h.getnormalized()
737
737
738 # fast case first, no offsets, no fuzz
738 # fast case first, no offsets, no fuzz
739 old, oldstart, new, newstart = h.fuzzit(0, False)
739 old, oldstart, new, newstart = h.fuzzit(0, False)
740 oldstart += self.offset
740 oldstart += self.offset
741 orig_start = oldstart
741 orig_start = oldstart
742 # if there's skew we want to emit the "(offset %d lines)" even
742 # if there's skew we want to emit the "(offset %d lines)" even
743 # when the hunk cleanly applies at start + skew, so skip the
743 # when the hunk cleanly applies at start + skew, so skip the
744 # fast case code
744 # fast case code
745 if (self.skew == 0 and
745 if (self.skew == 0 and
746 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
746 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
747 if self.remove:
747 if self.remove:
748 self.backend.unlink(self.fname)
748 self.backend.unlink(self.fname)
749 else:
749 else:
750 self.lines[oldstart:oldstart + len(old)] = new
750 self.lines[oldstart:oldstart + len(old)] = new
751 self.offset += len(new) - len(old)
751 self.offset += len(new) - len(old)
752 self.dirty = True
752 self.dirty = True
753 return 0
753 return 0
754
754
755 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
755 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
756 self.hash = {}
756 self.hash = {}
757 for x, s in enumerate(self.lines):
757 for x, s in enumerate(self.lines):
758 self.hash.setdefault(s, []).append(x)
758 self.hash.setdefault(s, []).append(x)
759
759
760 for fuzzlen in xrange(3):
760 for fuzzlen in xrange(3):
761 for toponly in [True, False]:
761 for toponly in [True, False]:
762 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
762 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
763 oldstart = oldstart + self.offset + self.skew
763 oldstart = oldstart + self.offset + self.skew
764 oldstart = min(oldstart, len(self.lines))
764 oldstart = min(oldstart, len(self.lines))
765 if old:
765 if old:
766 cand = self.findlines(old[0][1:], oldstart)
766 cand = self.findlines(old[0][1:], oldstart)
767 else:
767 else:
768 # Only adding lines with no or fuzzed context, just
768 # Only adding lines with no or fuzzed context, just
769 # take the skew in account
769 # take the skew in account
770 cand = [oldstart]
770 cand = [oldstart]
771
771
772 for l in cand:
772 for l in cand:
773 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
773 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
774 self.lines[l : l + len(old)] = new
774 self.lines[l : l + len(old)] = new
775 self.offset += len(new) - len(old)
775 self.offset += len(new) - len(old)
776 self.skew = l - orig_start
776 self.skew = l - orig_start
777 self.dirty = True
777 self.dirty = True
778 offset = l - orig_start - fuzzlen
778 offset = l - orig_start - fuzzlen
779 if fuzzlen:
779 if fuzzlen:
780 msg = _("Hunk #%d succeeded at %d "
780 msg = _("Hunk #%d succeeded at %d "
781 "with fuzz %d "
781 "with fuzz %d "
782 "(offset %d lines).\n")
782 "(offset %d lines).\n")
783 self.printfile(True)
783 self.printfile(True)
784 self.ui.warn(msg %
784 self.ui.warn(msg %
785 (h.number, l + 1, fuzzlen, offset))
785 (h.number, l + 1, fuzzlen, offset))
786 else:
786 else:
787 msg = _("Hunk #%d succeeded at %d "
787 msg = _("Hunk #%d succeeded at %d "
788 "(offset %d lines).\n")
788 "(offset %d lines).\n")
789 self.ui.note(msg % (h.number, l + 1, offset))
789 self.ui.note(msg % (h.number, l + 1, offset))
790 return fuzzlen
790 return fuzzlen
791 self.printfile(True)
791 self.printfile(True)
792 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
792 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
793 self.rej.append(horig)
793 self.rej.append(horig)
794 return -1
794 return -1
795
795
796 def close(self):
796 def close(self):
797 if self.dirty:
797 if self.dirty:
798 self.writelines(self.fname, self.lines, self.mode)
798 self.writelines(self.fname, self.lines, self.mode)
799 self.write_rej()
799 self.write_rej()
800 return len(self.rej)
800 return len(self.rej)
801
801
802 class hunk(object):
802 class hunk(object):
803 def __init__(self, desc, num, lr, context):
803 def __init__(self, desc, num, lr, context):
804 self.number = num
804 self.number = num
805 self.desc = desc
805 self.desc = desc
806 self.hunk = [desc]
806 self.hunk = [desc]
807 self.a = []
807 self.a = []
808 self.b = []
808 self.b = []
809 self.starta = self.lena = None
809 self.starta = self.lena = None
810 self.startb = self.lenb = None
810 self.startb = self.lenb = None
811 if lr is not None:
811 if lr is not None:
812 if context:
812 if context:
813 self.read_context_hunk(lr)
813 self.read_context_hunk(lr)
814 else:
814 else:
815 self.read_unified_hunk(lr)
815 self.read_unified_hunk(lr)
816
816
817 def getnormalized(self):
817 def getnormalized(self):
818 """Return a copy with line endings normalized to LF."""
818 """Return a copy with line endings normalized to LF."""
819
819
820 def normalize(lines):
820 def normalize(lines):
821 nlines = []
821 nlines = []
822 for line in lines:
822 for line in lines:
823 if line.endswith('\r\n'):
823 if line.endswith('\r\n'):
824 line = line[:-2] + '\n'
824 line = line[:-2] + '\n'
825 nlines.append(line)
825 nlines.append(line)
826 return nlines
826 return nlines
827
827
828 # Dummy object, it is rebuilt manually
828 # Dummy object, it is rebuilt manually
829 nh = hunk(self.desc, self.number, None, None)
829 nh = hunk(self.desc, self.number, None, None)
830 nh.number = self.number
830 nh.number = self.number
831 nh.desc = self.desc
831 nh.desc = self.desc
832 nh.hunk = self.hunk
832 nh.hunk = self.hunk
833 nh.a = normalize(self.a)
833 nh.a = normalize(self.a)
834 nh.b = normalize(self.b)
834 nh.b = normalize(self.b)
835 nh.starta = self.starta
835 nh.starta = self.starta
836 nh.startb = self.startb
836 nh.startb = self.startb
837 nh.lena = self.lena
837 nh.lena = self.lena
838 nh.lenb = self.lenb
838 nh.lenb = self.lenb
839 return nh
839 return nh
840
840
841 def read_unified_hunk(self, lr):
841 def read_unified_hunk(self, lr):
842 m = unidesc.match(self.desc)
842 m = unidesc.match(self.desc)
843 if not m:
843 if not m:
844 raise PatchError(_("bad hunk #%d") % self.number)
844 raise PatchError(_("bad hunk #%d") % self.number)
845 self.starta, self.lena, self.startb, self.lenb = m.groups()
845 self.starta, self.lena, self.startb, self.lenb = m.groups()
846 if self.lena is None:
846 if self.lena is None:
847 self.lena = 1
847 self.lena = 1
848 else:
848 else:
849 self.lena = int(self.lena)
849 self.lena = int(self.lena)
850 if self.lenb is None:
850 if self.lenb is None:
851 self.lenb = 1
851 self.lenb = 1
852 else:
852 else:
853 self.lenb = int(self.lenb)
853 self.lenb = int(self.lenb)
854 self.starta = int(self.starta)
854 self.starta = int(self.starta)
855 self.startb = int(self.startb)
855 self.startb = int(self.startb)
856 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
856 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
857 self.b)
857 self.b)
858 # if we hit eof before finishing out the hunk, the last line will
858 # if we hit eof before finishing out the hunk, the last line will
859 # be zero length. Lets try to fix it up.
859 # be zero length. Lets try to fix it up.
860 while len(self.hunk[-1]) == 0:
860 while len(self.hunk[-1]) == 0:
861 del self.hunk[-1]
861 del self.hunk[-1]
862 del self.a[-1]
862 del self.a[-1]
863 del self.b[-1]
863 del self.b[-1]
864 self.lena -= 1
864 self.lena -= 1
865 self.lenb -= 1
865 self.lenb -= 1
866 self._fixnewline(lr)
866 self._fixnewline(lr)
867
867
868 def read_context_hunk(self, lr):
868 def read_context_hunk(self, lr):
869 self.desc = lr.readline()
869 self.desc = lr.readline()
870 m = contextdesc.match(self.desc)
870 m = contextdesc.match(self.desc)
871 if not m:
871 if not m:
872 raise PatchError(_("bad hunk #%d") % self.number)
872 raise PatchError(_("bad hunk #%d") % self.number)
873 self.starta, aend = m.groups()
873 self.starta, aend = m.groups()
874 self.starta = int(self.starta)
874 self.starta = int(self.starta)
875 if aend is None:
875 if aend is None:
876 aend = self.starta
876 aend = self.starta
877 self.lena = int(aend) - self.starta
877 self.lena = int(aend) - self.starta
878 if self.starta:
878 if self.starta:
879 self.lena += 1
879 self.lena += 1
880 for x in xrange(self.lena):
880 for x in xrange(self.lena):
881 l = lr.readline()
881 l = lr.readline()
882 if l.startswith('---'):
882 if l.startswith('---'):
883 # lines addition, old block is empty
883 # lines addition, old block is empty
884 lr.push(l)
884 lr.push(l)
885 break
885 break
886 s = l[2:]
886 s = l[2:]
887 if l.startswith('- ') or l.startswith('! '):
887 if l.startswith('- ') or l.startswith('! '):
888 u = '-' + s
888 u = '-' + s
889 elif l.startswith(' '):
889 elif l.startswith(' '):
890 u = ' ' + s
890 u = ' ' + s
891 else:
891 else:
892 raise PatchError(_("bad hunk #%d old text line %d") %
892 raise PatchError(_("bad hunk #%d old text line %d") %
893 (self.number, x))
893 (self.number, x))
894 self.a.append(u)
894 self.a.append(u)
895 self.hunk.append(u)
895 self.hunk.append(u)
896
896
897 l = lr.readline()
897 l = lr.readline()
898 if l.startswith('\ '):
898 if l.startswith('\ '):
899 s = self.a[-1][:-1]
899 s = self.a[-1][:-1]
900 self.a[-1] = s
900 self.a[-1] = s
901 self.hunk[-1] = s
901 self.hunk[-1] = s
902 l = lr.readline()
902 l = lr.readline()
903 m = contextdesc.match(l)
903 m = contextdesc.match(l)
904 if not m:
904 if not m:
905 raise PatchError(_("bad hunk #%d") % self.number)
905 raise PatchError(_("bad hunk #%d") % self.number)
906 self.startb, bend = m.groups()
906 self.startb, bend = m.groups()
907 self.startb = int(self.startb)
907 self.startb = int(self.startb)
908 if bend is None:
908 if bend is None:
909 bend = self.startb
909 bend = self.startb
910 self.lenb = int(bend) - self.startb
910 self.lenb = int(bend) - self.startb
911 if self.startb:
911 if self.startb:
912 self.lenb += 1
912 self.lenb += 1
913 hunki = 1
913 hunki = 1
914 for x in xrange(self.lenb):
914 for x in xrange(self.lenb):
915 l = lr.readline()
915 l = lr.readline()
916 if l.startswith('\ '):
916 if l.startswith('\ '):
917 # XXX: the only way to hit this is with an invalid line range.
917 # XXX: the only way to hit this is with an invalid line range.
918 # The no-eol marker is not counted in the line range, but I
918 # The no-eol marker is not counted in the line range, but I
919 # guess there are diff(1) out there which behave differently.
919 # guess there are diff(1) out there which behave differently.
920 s = self.b[-1][:-1]
920 s = self.b[-1][:-1]
921 self.b[-1] = s
921 self.b[-1] = s
922 self.hunk[hunki - 1] = s
922 self.hunk[hunki - 1] = s
923 continue
923 continue
924 if not l:
924 if not l:
925 # line deletions, new block is empty and we hit EOF
925 # line deletions, new block is empty and we hit EOF
926 lr.push(l)
926 lr.push(l)
927 break
927 break
928 s = l[2:]
928 s = l[2:]
929 if l.startswith('+ ') or l.startswith('! '):
929 if l.startswith('+ ') or l.startswith('! '):
930 u = '+' + s
930 u = '+' + s
931 elif l.startswith(' '):
931 elif l.startswith(' '):
932 u = ' ' + s
932 u = ' ' + s
933 elif len(self.b) == 0:
933 elif len(self.b) == 0:
934 # line deletions, new block is empty
934 # line deletions, new block is empty
935 lr.push(l)
935 lr.push(l)
936 break
936 break
937 else:
937 else:
938 raise PatchError(_("bad hunk #%d old text line %d") %
938 raise PatchError(_("bad hunk #%d old text line %d") %
939 (self.number, x))
939 (self.number, x))
940 self.b.append(s)
940 self.b.append(s)
941 while True:
941 while True:
942 if hunki >= len(self.hunk):
942 if hunki >= len(self.hunk):
943 h = ""
943 h = ""
944 else:
944 else:
945 h = self.hunk[hunki]
945 h = self.hunk[hunki]
946 hunki += 1
946 hunki += 1
947 if h == u:
947 if h == u:
948 break
948 break
949 elif h.startswith('-'):
949 elif h.startswith('-'):
950 continue
950 continue
951 else:
951 else:
952 self.hunk.insert(hunki - 1, u)
952 self.hunk.insert(hunki - 1, u)
953 break
953 break
954
954
955 if not self.a:
955 if not self.a:
956 # this happens when lines were only added to the hunk
956 # this happens when lines were only added to the hunk
957 for x in self.hunk:
957 for x in self.hunk:
958 if x.startswith('-') or x.startswith(' '):
958 if x.startswith('-') or x.startswith(' '):
959 self.a.append(x)
959 self.a.append(x)
960 if not self.b:
960 if not self.b:
961 # this happens when lines were only deleted from the hunk
961 # this happens when lines were only deleted from the hunk
962 for x in self.hunk:
962 for x in self.hunk:
963 if x.startswith('+') or x.startswith(' '):
963 if x.startswith('+') or x.startswith(' '):
964 self.b.append(x[1:])
964 self.b.append(x[1:])
965 # @@ -start,len +start,len @@
965 # @@ -start,len +start,len @@
966 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
966 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
967 self.startb, self.lenb)
967 self.startb, self.lenb)
968 self.hunk[0] = self.desc
968 self.hunk[0] = self.desc
969 self._fixnewline(lr)
969 self._fixnewline(lr)
970
970
971 def _fixnewline(self, lr):
971 def _fixnewline(self, lr):
972 l = lr.readline()
972 l = lr.readline()
973 if l.startswith('\ '):
973 if l.startswith('\ '):
974 diffhelpers.fix_newline(self.hunk, self.a, self.b)
974 diffhelpers.fix_newline(self.hunk, self.a, self.b)
975 else:
975 else:
976 lr.push(l)
976 lr.push(l)
977
977
978 def complete(self):
978 def complete(self):
979 return len(self.a) == self.lena and len(self.b) == self.lenb
979 return len(self.a) == self.lena and len(self.b) == self.lenb
980
980
981 def _fuzzit(self, old, new, fuzz, toponly):
981 def _fuzzit(self, old, new, fuzz, toponly):
982 # this removes context lines from the top and bottom of list 'l'. It
982 # this removes context lines from the top and bottom of list 'l'. It
983 # checks the hunk to make sure only context lines are removed, and then
983 # checks the hunk to make sure only context lines are removed, and then
984 # returns a new shortened list of lines.
984 # returns a new shortened list of lines.
985 fuzz = min(fuzz, len(old))
985 fuzz = min(fuzz, len(old))
986 if fuzz:
986 if fuzz:
987 top = 0
987 top = 0
988 bot = 0
988 bot = 0
989 hlen = len(self.hunk)
989 hlen = len(self.hunk)
990 for x in xrange(hlen - 1):
990 for x in xrange(hlen - 1):
991 # the hunk starts with the @@ line, so use x+1
991 # the hunk starts with the @@ line, so use x+1
992 if self.hunk[x + 1][0] == ' ':
992 if self.hunk[x + 1][0] == ' ':
993 top += 1
993 top += 1
994 else:
994 else:
995 break
995 break
996 if not toponly:
996 if not toponly:
997 for x in xrange(hlen - 1):
997 for x in xrange(hlen - 1):
998 if self.hunk[hlen - bot - 1][0] == ' ':
998 if self.hunk[hlen - bot - 1][0] == ' ':
999 bot += 1
999 bot += 1
1000 else:
1000 else:
1001 break
1001 break
1002
1002
1003 bot = min(fuzz, bot)
1003 bot = min(fuzz, bot)
1004 top = min(fuzz, top)
1004 top = min(fuzz, top)
1005 return old[top:len(old) - bot], new[top:len(new) - bot], top
1005 return old[top:len(old) - bot], new[top:len(new) - bot], top
1006 return old, new, 0
1006 return old, new, 0
1007
1007
1008 def fuzzit(self, fuzz, toponly):
1008 def fuzzit(self, fuzz, toponly):
1009 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1009 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1010 oldstart = self.starta + top
1010 oldstart = self.starta + top
1011 newstart = self.startb + top
1011 newstart = self.startb + top
1012 # zero length hunk ranges already have their start decremented
1012 # zero length hunk ranges already have their start decremented
1013 if self.lena and oldstart > 0:
1013 if self.lena and oldstart > 0:
1014 oldstart -= 1
1014 oldstart -= 1
1015 if self.lenb and newstart > 0:
1015 if self.lenb and newstart > 0:
1016 newstart -= 1
1016 newstart -= 1
1017 return old, oldstart, new, newstart
1017 return old, oldstart, new, newstart
1018
1018
1019 class binhunk(object):
1019 class binhunk(object):
1020 'A binary patch file.'
1020 'A binary patch file.'
1021 def __init__(self, lr, fname):
1021 def __init__(self, lr, fname):
1022 self.text = None
1022 self.text = None
1023 self.delta = False
1023 self.delta = False
1024 self.hunk = ['GIT binary patch\n']
1024 self.hunk = ['GIT binary patch\n']
1025 self._fname = fname
1025 self._fname = fname
1026 self._read(lr)
1026 self._read(lr)
1027
1027
1028 def complete(self):
1028 def complete(self):
1029 return self.text is not None
1029 return self.text is not None
1030
1030
1031 def new(self, lines):
1031 def new(self, lines):
1032 if self.delta:
1032 if self.delta:
1033 return [applybindelta(self.text, ''.join(lines))]
1033 return [applybindelta(self.text, ''.join(lines))]
1034 return [self.text]
1034 return [self.text]
1035
1035
1036 def _read(self, lr):
1036 def _read(self, lr):
1037 def getline(lr, hunk):
1037 def getline(lr, hunk):
1038 l = lr.readline()
1038 l = lr.readline()
1039 hunk.append(l)
1039 hunk.append(l)
1040 return l.rstrip('\r\n')
1040 return l.rstrip('\r\n')
1041
1041
1042 size = 0
1042 size = 0
1043 while True:
1043 while True:
1044 line = getline(lr, self.hunk)
1044 line = getline(lr, self.hunk)
1045 if not line:
1045 if not line:
1046 raise PatchError(_('could not extract "%s" binary data')
1046 raise PatchError(_('could not extract "%s" binary data')
1047 % self._fname)
1047 % self._fname)
1048 if line.startswith('literal '):
1048 if line.startswith('literal '):
1049 size = int(line[8:].rstrip())
1049 size = int(line[8:].rstrip())
1050 break
1050 break
1051 if line.startswith('delta '):
1051 if line.startswith('delta '):
1052 size = int(line[6:].rstrip())
1052 size = int(line[6:].rstrip())
1053 self.delta = True
1053 self.delta = True
1054 break
1054 break
1055 dec = []
1055 dec = []
1056 line = getline(lr, self.hunk)
1056 line = getline(lr, self.hunk)
1057 while len(line) > 1:
1057 while len(line) > 1:
1058 l = line[0]
1058 l = line[0]
1059 if l <= 'Z' and l >= 'A':
1059 if l <= 'Z' and l >= 'A':
1060 l = ord(l) - ord('A') + 1
1060 l = ord(l) - ord('A') + 1
1061 else:
1061 else:
1062 l = ord(l) - ord('a') + 27
1062 l = ord(l) - ord('a') + 27
1063 try:
1063 try:
1064 dec.append(base85.b85decode(line[1:])[:l])
1064 dec.append(base85.b85decode(line[1:])[:l])
1065 except ValueError, e:
1065 except ValueError, e:
1066 raise PatchError(_('could not decode "%s" binary patch: %s')
1066 raise PatchError(_('could not decode "%s" binary patch: %s')
1067 % (self._fname, str(e)))
1067 % (self._fname, str(e)))
1068 line = getline(lr, self.hunk)
1068 line = getline(lr, self.hunk)
1069 text = zlib.decompress(''.join(dec))
1069 text = zlib.decompress(''.join(dec))
1070 if len(text) != size:
1070 if len(text) != size:
1071 raise PatchError(_('"%s" length is %d bytes, should be %d')
1071 raise PatchError(_('"%s" length is %d bytes, should be %d')
1072 % (self._fname, len(text), size))
1072 % (self._fname, len(text), size))
1073 self.text = text
1073 self.text = text
1074
1074
1075 def parsefilename(str):
1075 def parsefilename(str):
1076 # --- filename \t|space stuff
1076 # --- filename \t|space stuff
1077 s = str[4:].rstrip('\r\n')
1077 s = str[4:].rstrip('\r\n')
1078 i = s.find('\t')
1078 i = s.find('\t')
1079 if i < 0:
1079 if i < 0:
1080 i = s.find(' ')
1080 i = s.find(' ')
1081 if i < 0:
1081 if i < 0:
1082 return s
1082 return s
1083 return s[:i]
1083 return s[:i]
1084
1084
1085 def pathstrip(path, strip):
1085 def pathstrip(path, strip):
1086 pathlen = len(path)
1086 pathlen = len(path)
1087 i = 0
1087 i = 0
1088 if strip == 0:
1088 if strip == 0:
1089 return '', path.rstrip()
1089 return '', path.rstrip()
1090 count = strip
1090 count = strip
1091 while count > 0:
1091 while count > 0:
1092 i = path.find('/', i)
1092 i = path.find('/', i)
1093 if i == -1:
1093 if i == -1:
1094 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1094 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1095 (count, strip, path))
1095 (count, strip, path))
1096 i += 1
1096 i += 1
1097 # consume '//' in the path
1097 # consume '//' in the path
1098 while i < pathlen - 1 and path[i] == '/':
1098 while i < pathlen - 1 and path[i] == '/':
1099 i += 1
1099 i += 1
1100 count -= 1
1100 count -= 1
1101 return path[:i].lstrip(), path[i:].rstrip()
1101 return path[:i].lstrip(), path[i:].rstrip()
1102
1102
1103 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
1103 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
1104 nulla = afile_orig == "/dev/null"
1104 nulla = afile_orig == "/dev/null"
1105 nullb = bfile_orig == "/dev/null"
1105 nullb = bfile_orig == "/dev/null"
1106 create = nulla and hunk.starta == 0 and hunk.lena == 0
1106 create = nulla and hunk.starta == 0 and hunk.lena == 0
1107 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1107 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1108 abase, afile = pathstrip(afile_orig, strip)
1108 abase, afile = pathstrip(afile_orig, strip)
1109 gooda = not nulla and backend.exists(afile)
1109 gooda = not nulla and backend.exists(afile)
1110 bbase, bfile = pathstrip(bfile_orig, strip)
1110 bbase, bfile = pathstrip(bfile_orig, strip)
1111 if afile == bfile:
1111 if afile == bfile:
1112 goodb = gooda
1112 goodb = gooda
1113 else:
1113 else:
1114 goodb = not nullb and backend.exists(bfile)
1114 goodb = not nullb and backend.exists(bfile)
1115 missing = not goodb and not gooda and not create
1115 missing = not goodb and not gooda and not create
1116
1116
1117 # some diff programs apparently produce patches where the afile is
1117 # some diff programs apparently produce patches where the afile is
1118 # not /dev/null, but afile starts with bfile
1118 # not /dev/null, but afile starts with bfile
1119 abasedir = afile[:afile.rfind('/') + 1]
1119 abasedir = afile[:afile.rfind('/') + 1]
1120 bbasedir = bfile[:bfile.rfind('/') + 1]
1120 bbasedir = bfile[:bfile.rfind('/') + 1]
1121 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1121 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1122 and hunk.starta == 0 and hunk.lena == 0):
1122 and hunk.starta == 0 and hunk.lena == 0):
1123 create = True
1123 create = True
1124 missing = False
1124 missing = False
1125
1125
1126 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1126 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1127 # diff is between a file and its backup. In this case, the original
1127 # diff is between a file and its backup. In this case, the original
1128 # file should be patched (see original mpatch code).
1128 # file should be patched (see original mpatch code).
1129 isbackup = (abase == bbase and bfile.startswith(afile))
1129 isbackup = (abase == bbase and bfile.startswith(afile))
1130 fname = None
1130 fname = None
1131 if not missing:
1131 if not missing:
1132 if gooda and goodb:
1132 if gooda and goodb:
1133 fname = isbackup and afile or bfile
1133 fname = isbackup and afile or bfile
1134 elif gooda:
1134 elif gooda:
1135 fname = afile
1135 fname = afile
1136
1136
1137 if not fname:
1137 if not fname:
1138 if not nullb:
1138 if not nullb:
1139 fname = isbackup and afile or bfile
1139 fname = isbackup and afile or bfile
1140 elif not nulla:
1140 elif not nulla:
1141 fname = afile
1141 fname = afile
1142 else:
1142 else:
1143 raise PatchError(_("undefined source and destination files"))
1143 raise PatchError(_("undefined source and destination files"))
1144
1144
1145 gp = patchmeta(fname)
1145 gp = patchmeta(fname)
1146 if create:
1146 if create:
1147 gp.op = 'ADD'
1147 gp.op = 'ADD'
1148 elif remove:
1148 elif remove:
1149 gp.op = 'DELETE'
1149 gp.op = 'DELETE'
1150 return gp
1150 return gp
1151
1151
1152 def scangitpatch(lr, firstline):
1152 def scangitpatch(lr, firstline):
1153 """
1153 """
1154 Git patches can emit:
1154 Git patches can emit:
1155 - rename a to b
1155 - rename a to b
1156 - change b
1156 - change b
1157 - copy a to c
1157 - copy a to c
1158 - change c
1158 - change c
1159
1159
1160 We cannot apply this sequence as-is, the renamed 'a' could not be
1160 We cannot apply this sequence as-is, the renamed 'a' could not be
1161 found for it would have been renamed already. And we cannot copy
1161 found for it would have been renamed already. And we cannot copy
1162 from 'b' instead because 'b' would have been changed already. So
1162 from 'b' instead because 'b' would have been changed already. So
1163 we scan the git patch for copy and rename commands so we can
1163 we scan the git patch for copy and rename commands so we can
1164 perform the copies ahead of time.
1164 perform the copies ahead of time.
1165 """
1165 """
1166 pos = 0
1166 pos = 0
1167 try:
1167 try:
1168 pos = lr.fp.tell()
1168 pos = lr.fp.tell()
1169 fp = lr.fp
1169 fp = lr.fp
1170 except IOError:
1170 except IOError:
1171 fp = cStringIO.StringIO(lr.fp.read())
1171 fp = cStringIO.StringIO(lr.fp.read())
1172 gitlr = linereader(fp)
1172 gitlr = linereader(fp)
1173 gitlr.push(firstline)
1173 gitlr.push(firstline)
1174 gitpatches = readgitpatch(gitlr)
1174 gitpatches = readgitpatch(gitlr)
1175 fp.seek(pos)
1175 fp.seek(pos)
1176 return gitpatches
1176 return gitpatches
1177
1177
1178 def iterhunks(fp):
1178 def iterhunks(fp):
1179 """Read a patch and yield the following events:
1179 """Read a patch and yield the following events:
1180 - ("file", afile, bfile, firsthunk): select a new target file.
1180 - ("file", afile, bfile, firsthunk): select a new target file.
1181 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1181 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1182 "file" event.
1182 "file" event.
1183 - ("git", gitchanges): current diff is in git format, gitchanges
1183 - ("git", gitchanges): current diff is in git format, gitchanges
1184 maps filenames to gitpatch records. Unique event.
1184 maps filenames to gitpatch records. Unique event.
1185 """
1185 """
1186 afile = ""
1186 afile = ""
1187 bfile = ""
1187 bfile = ""
1188 state = None
1188 state = None
1189 hunknum = 0
1189 hunknum = 0
1190 emitfile = newfile = False
1190 emitfile = newfile = False
1191 gitpatches = None
1191 gitpatches = None
1192
1192
1193 # our states
1193 # our states
1194 BFILE = 1
1194 BFILE = 1
1195 context = None
1195 context = None
1196 lr = linereader(fp)
1196 lr = linereader(fp)
1197
1197
1198 while True:
1198 while True:
1199 x = lr.readline()
1199 x = lr.readline()
1200 if not x:
1200 if not x:
1201 break
1201 break
1202 if state == BFILE and (
1202 if state == BFILE and (
1203 (not context and x[0] == '@')
1203 (not context and x[0] == '@')
1204 or (context is not False and x.startswith('***************'))
1204 or (context is not False and x.startswith('***************'))
1205 or x.startswith('GIT binary patch')):
1205 or x.startswith('GIT binary patch')):
1206 gp = None
1206 gp = None
1207 if (gitpatches and
1207 if (gitpatches and
1208 gitpatches[-1].ispatching(afile, bfile)):
1208 gitpatches[-1].ispatching(afile, bfile)):
1209 gp = gitpatches.pop()
1209 gp = gitpatches.pop()
1210 if x.startswith('GIT binary patch'):
1210 if x.startswith('GIT binary patch'):
1211 h = binhunk(lr, gp.path)
1211 h = binhunk(lr, gp.path)
1212 else:
1212 else:
1213 if context is None and x.startswith('***************'):
1213 if context is None and x.startswith('***************'):
1214 context = True
1214 context = True
1215 h = hunk(x, hunknum + 1, lr, context)
1215 h = hunk(x, hunknum + 1, lr, context)
1216 hunknum += 1
1216 hunknum += 1
1217 if emitfile:
1217 if emitfile:
1218 emitfile = False
1218 emitfile = False
1219 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1219 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1220 yield 'hunk', h
1220 yield 'hunk', h
1221 elif x.startswith('diff --git a/'):
1221 elif x.startswith('diff --git a/'):
1222 m = gitre.match(x.rstrip(' \r\n'))
1222 m = gitre.match(x.rstrip(' \r\n'))
1223 if not m:
1223 if not m:
1224 continue
1224 continue
1225 if gitpatches is None:
1225 if gitpatches is None:
1226 # scan whole input for git metadata
1226 # scan whole input for git metadata
1227 gitpatches = scangitpatch(lr, x)
1227 gitpatches = scangitpatch(lr, x)
1228 yield 'git', [g.copy() for g in gitpatches
1228 yield 'git', [g.copy() for g in gitpatches
1229 if g.op in ('COPY', 'RENAME')]
1229 if g.op in ('COPY', 'RENAME')]
1230 gitpatches.reverse()
1230 gitpatches.reverse()
1231 afile = 'a/' + m.group(1)
1231 afile = 'a/' + m.group(1)
1232 bfile = 'b/' + m.group(2)
1232 bfile = 'b/' + m.group(2)
1233 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1233 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1234 gp = gitpatches.pop()
1234 gp = gitpatches.pop()
1235 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1235 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1236 if not gitpatches:
1236 if not gitpatches:
1237 raise PatchError(_('failed to synchronize metadata for "%s"')
1237 raise PatchError(_('failed to synchronize metadata for "%s"')
1238 % afile[2:])
1238 % afile[2:])
1239 gp = gitpatches[-1]
1239 gp = gitpatches[-1]
1240 newfile = True
1240 newfile = True
1241 elif x.startswith('---'):
1241 elif x.startswith('---'):
1242 # check for a unified diff
1242 # check for a unified diff
1243 l2 = lr.readline()
1243 l2 = lr.readline()
1244 if not l2.startswith('+++'):
1244 if not l2.startswith('+++'):
1245 lr.push(l2)
1245 lr.push(l2)
1246 continue
1246 continue
1247 newfile = True
1247 newfile = True
1248 context = False
1248 context = False
1249 afile = parsefilename(x)
1249 afile = parsefilename(x)
1250 bfile = parsefilename(l2)
1250 bfile = parsefilename(l2)
1251 elif x.startswith('***'):
1251 elif x.startswith('***'):
1252 # check for a context diff
1252 # check for a context diff
1253 l2 = lr.readline()
1253 l2 = lr.readline()
1254 if not l2.startswith('---'):
1254 if not l2.startswith('---'):
1255 lr.push(l2)
1255 lr.push(l2)
1256 continue
1256 continue
1257 l3 = lr.readline()
1257 l3 = lr.readline()
1258 lr.push(l3)
1258 lr.push(l3)
1259 if not l3.startswith("***************"):
1259 if not l3.startswith("***************"):
1260 lr.push(l2)
1260 lr.push(l2)
1261 continue
1261 continue
1262 newfile = True
1262 newfile = True
1263 context = True
1263 context = True
1264 afile = parsefilename(x)
1264 afile = parsefilename(x)
1265 bfile = parsefilename(l2)
1265 bfile = parsefilename(l2)
1266
1266
1267 if newfile:
1267 if newfile:
1268 newfile = False
1268 newfile = False
1269 emitfile = True
1269 emitfile = True
1270 state = BFILE
1270 state = BFILE
1271 hunknum = 0
1271 hunknum = 0
1272
1272
1273 while gitpatches:
1273 while gitpatches:
1274 gp = gitpatches.pop()
1274 gp = gitpatches.pop()
1275 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1275 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1276
1276
1277 def applybindelta(binchunk, data):
1277 def applybindelta(binchunk, data):
1278 """Apply a binary delta hunk
1278 """Apply a binary delta hunk
1279 The algorithm used is the algorithm from git's patch-delta.c
1279 The algorithm used is the algorithm from git's patch-delta.c
1280 """
1280 """
1281 def deltahead(binchunk):
1281 def deltahead(binchunk):
1282 i = 0
1282 i = 0
1283 for c in binchunk:
1283 for c in binchunk:
1284 i += 1
1284 i += 1
1285 if not (ord(c) & 0x80):
1285 if not (ord(c) & 0x80):
1286 return i
1286 return i
1287 return i
1287 return i
1288 out = ""
1288 out = ""
1289 s = deltahead(binchunk)
1289 s = deltahead(binchunk)
1290 binchunk = binchunk[s:]
1290 binchunk = binchunk[s:]
1291 s = deltahead(binchunk)
1291 s = deltahead(binchunk)
1292 binchunk = binchunk[s:]
1292 binchunk = binchunk[s:]
1293 i = 0
1293 i = 0
1294 while i < len(binchunk):
1294 while i < len(binchunk):
1295 cmd = ord(binchunk[i])
1295 cmd = ord(binchunk[i])
1296 i += 1
1296 i += 1
1297 if (cmd & 0x80):
1297 if (cmd & 0x80):
1298 offset = 0
1298 offset = 0
1299 size = 0
1299 size = 0
1300 if (cmd & 0x01):
1300 if (cmd & 0x01):
1301 offset = ord(binchunk[i])
1301 offset = ord(binchunk[i])
1302 i += 1
1302 i += 1
1303 if (cmd & 0x02):
1303 if (cmd & 0x02):
1304 offset |= ord(binchunk[i]) << 8
1304 offset |= ord(binchunk[i]) << 8
1305 i += 1
1305 i += 1
1306 if (cmd & 0x04):
1306 if (cmd & 0x04):
1307 offset |= ord(binchunk[i]) << 16
1307 offset |= ord(binchunk[i]) << 16
1308 i += 1
1308 i += 1
1309 if (cmd & 0x08):
1309 if (cmd & 0x08):
1310 offset |= ord(binchunk[i]) << 24
1310 offset |= ord(binchunk[i]) << 24
1311 i += 1
1311 i += 1
1312 if (cmd & 0x10):
1312 if (cmd & 0x10):
1313 size = ord(binchunk[i])
1313 size = ord(binchunk[i])
1314 i += 1
1314 i += 1
1315 if (cmd & 0x20):
1315 if (cmd & 0x20):
1316 size |= ord(binchunk[i]) << 8
1316 size |= ord(binchunk[i]) << 8
1317 i += 1
1317 i += 1
1318 if (cmd & 0x40):
1318 if (cmd & 0x40):
1319 size |= ord(binchunk[i]) << 16
1319 size |= ord(binchunk[i]) << 16
1320 i += 1
1320 i += 1
1321 if size == 0:
1321 if size == 0:
1322 size = 0x10000
1322 size = 0x10000
1323 offset_end = offset + size
1323 offset_end = offset + size
1324 out += data[offset:offset_end]
1324 out += data[offset:offset_end]
1325 elif cmd != 0:
1325 elif cmd != 0:
1326 offset_end = i + cmd
1326 offset_end = i + cmd
1327 out += binchunk[i:offset_end]
1327 out += binchunk[i:offset_end]
1328 i += cmd
1328 i += cmd
1329 else:
1329 else:
1330 raise PatchError(_('unexpected delta opcode 0'))
1330 raise PatchError(_('unexpected delta opcode 0'))
1331 return out
1331 return out
1332
1332
1333 def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
1333 def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
1334 """Reads a patch from fp and tries to apply it.
1334 """Reads a patch from fp and tries to apply it.
1335
1335
1336 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1336 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1337 there was any fuzz.
1337 there was any fuzz.
1338
1338
1339 If 'eolmode' is 'strict', the patch content and patched file are
1339 If 'eolmode' is 'strict', the patch content and patched file are
1340 read in binary mode. Otherwise, line endings are ignored when
1340 read in binary mode. Otherwise, line endings are ignored when
1341 patching then normalized according to 'eolmode'.
1341 patching then normalized according to 'eolmode'.
1342 """
1342 """
1343 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1343 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1344 eolmode=eolmode)
1344 eolmode=eolmode)
1345
1345
1346 def _applydiff(ui, fp, patcher, backend, store, strip=1,
1346 def _applydiff(ui, fp, patcher, backend, store, strip=1,
1347 eolmode='strict'):
1347 eolmode='strict'):
1348
1348
1349 def pstrip(p):
1349 def pstrip(p):
1350 return pathstrip(p, strip - 1)[1]
1350 return pathstrip(p, strip - 1)[1]
1351
1351
1352 rejects = 0
1352 rejects = 0
1353 err = 0
1353 err = 0
1354 current_file = None
1354 current_file = None
1355
1355
1356 for state, values in iterhunks(fp):
1356 for state, values in iterhunks(fp):
1357 if state == 'hunk':
1357 if state == 'hunk':
1358 if not current_file:
1358 if not current_file:
1359 continue
1359 continue
1360 ret = current_file.apply(values)
1360 ret = current_file.apply(values)
1361 if ret > 0:
1361 if ret > 0:
1362 err = 1
1362 err = 1
1363 elif state == 'file':
1363 elif state == 'file':
1364 if current_file:
1364 if current_file:
1365 rejects += current_file.close()
1365 rejects += current_file.close()
1366 current_file = None
1366 current_file = None
1367 afile, bfile, first_hunk, gp = values
1367 afile, bfile, first_hunk, gp = values
1368 if gp:
1368 if gp:
1369 gp.path = pstrip(gp.path)
1369 gp.path = pstrip(gp.path)
1370 if gp.oldpath:
1370 if gp.oldpath:
1371 gp.oldpath = pstrip(gp.oldpath)
1371 gp.oldpath = pstrip(gp.oldpath)
1372 else:
1372 else:
1373 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1373 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1374 if gp.op == 'RENAME':
1374 if gp.op == 'RENAME':
1375 backend.unlink(gp.oldpath)
1375 backend.unlink(gp.oldpath)
1376 if not first_hunk:
1376 if not first_hunk:
1377 if gp.op == 'DELETE':
1377 if gp.op == 'DELETE':
1378 backend.unlink(gp.path)
1378 backend.unlink(gp.path)
1379 continue
1379 continue
1380 data, mode = None, None
1380 data, mode = None, None
1381 if gp.op in ('RENAME', 'COPY'):
1381 if gp.op in ('RENAME', 'COPY'):
1382 data, mode = store.getfile(gp.oldpath)[:2]
1382 data, mode = store.getfile(gp.oldpath)[:2]
1383 if gp.mode:
1383 if gp.mode:
1384 mode = gp.mode
1384 mode = gp.mode
1385 if gp.op == 'ADD':
1385 if gp.op == 'ADD':
1386 # Added files without content have no hunk and
1386 # Added files without content have no hunk and
1387 # must be created
1387 # must be created
1388 data = ''
1388 data = ''
1389 if data or mode:
1389 if data or mode:
1390 if (gp.op in ('ADD', 'RENAME', 'COPY')
1390 if (gp.op in ('ADD', 'RENAME', 'COPY')
1391 and backend.exists(gp.path)):
1391 and backend.exists(gp.path)):
1392 raise PatchError(_("cannot create %s: destination "
1392 raise PatchError(_("cannot create %s: destination "
1393 "already exists") % gp.path)
1393 "already exists") % gp.path)
1394 backend.setfile(gp.path, data, mode, gp.oldpath)
1394 backend.setfile(gp.path, data, mode, gp.oldpath)
1395 continue
1395 continue
1396 try:
1396 try:
1397 current_file = patcher(ui, gp, backend, store,
1397 current_file = patcher(ui, gp, backend, store,
1398 eolmode=eolmode)
1398 eolmode=eolmode)
1399 except PatchError, inst:
1399 except PatchError, inst:
1400 ui.warn(str(inst) + '\n')
1400 ui.warn(str(inst) + '\n')
1401 current_file = None
1401 current_file = None
1402 rejects += 1
1402 rejects += 1
1403 continue
1403 continue
1404 elif state == 'git':
1404 elif state == 'git':
1405 for gp in values:
1405 for gp in values:
1406 path = pstrip(gp.oldpath)
1406 path = pstrip(gp.oldpath)
1407 try:
1407 try:
1408 data, mode = backend.getfile(path)
1408 data, mode = backend.getfile(path)
1409 except IOError, e:
1409 except IOError, e:
1410 if e.errno != errno.ENOENT:
1410 if e.errno != errno.ENOENT:
1411 raise
1411 raise
1412 # The error ignored here will trigger a getfile()
1412 # The error ignored here will trigger a getfile()
1413 # error in a place more appropriate for error
1413 # error in a place more appropriate for error
1414 # handling, and will not interrupt the patching
1414 # handling, and will not interrupt the patching
1415 # process.
1415 # process.
1416 else:
1416 else:
1417 store.setfile(path, data, mode)
1417 store.setfile(path, data, mode)
1418 else:
1418 else:
1419 raise util.Abort(_('unsupported parser state: %s') % state)
1419 raise util.Abort(_('unsupported parser state: %s') % state)
1420
1420
1421 if current_file:
1421 if current_file:
1422 rejects += current_file.close()
1422 rejects += current_file.close()
1423
1423
1424 if rejects:
1424 if rejects:
1425 return -1
1425 return -1
1426 return err
1426 return err
1427
1427
1428 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1428 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1429 similarity):
1429 similarity):
1430 """use <patcher> to apply <patchname> to the working directory.
1430 """use <patcher> to apply <patchname> to the working directory.
1431 returns whether patch was applied with fuzz factor."""
1431 returns whether patch was applied with fuzz factor."""
1432
1432
1433 fuzz = False
1433 fuzz = False
1434 args = []
1434 args = []
1435 cwd = repo.root
1435 cwd = repo.root
1436 if cwd:
1436 if cwd:
1437 args.append('-d %s' % util.shellquote(cwd))
1437 args.append('-d %s' % util.shellquote(cwd))
1438 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1438 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1439 util.shellquote(patchname)))
1439 util.shellquote(patchname)))
1440 try:
1440 try:
1441 for line in fp:
1441 for line in fp:
1442 line = line.rstrip()
1442 line = line.rstrip()
1443 ui.note(line + '\n')
1443 ui.note(line + '\n')
1444 if line.startswith('patching file '):
1444 if line.startswith('patching file '):
1445 pf = util.parsepatchoutput(line)
1445 pf = util.parsepatchoutput(line)
1446 printed_file = False
1446 printed_file = False
1447 files.add(pf)
1447 files.add(pf)
1448 elif line.find('with fuzz') >= 0:
1448 elif line.find('with fuzz') >= 0:
1449 fuzz = True
1449 fuzz = True
1450 if not printed_file:
1450 if not printed_file:
1451 ui.warn(pf + '\n')
1451 ui.warn(pf + '\n')
1452 printed_file = True
1452 printed_file = True
1453 ui.warn(line + '\n')
1453 ui.warn(line + '\n')
1454 elif line.find('saving rejects to file') >= 0:
1454 elif line.find('saving rejects to file') >= 0:
1455 ui.warn(line + '\n')
1455 ui.warn(line + '\n')
1456 elif line.find('FAILED') >= 0:
1456 elif line.find('FAILED') >= 0:
1457 if not printed_file:
1457 if not printed_file:
1458 ui.warn(pf + '\n')
1458 ui.warn(pf + '\n')
1459 printed_file = True
1459 printed_file = True
1460 ui.warn(line + '\n')
1460 ui.warn(line + '\n')
1461 finally:
1461 finally:
1462 if files:
1462 if files:
1463 scmutil.marktouched(repo, files, similarity)
1463 scmutil.marktouched(repo, files, similarity)
1464 code = fp.close()
1464 code = fp.close()
1465 if code:
1465 if code:
1466 raise PatchError(_("patch command failed: %s") %
1466 raise PatchError(_("patch command failed: %s") %
1467 util.explainexit(code)[0])
1467 util.explainexit(code)[0])
1468 return fuzz
1468 return fuzz
1469
1469
1470 def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'):
1470 def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'):
1471 if files is None:
1471 if files is None:
1472 files = set()
1472 files = set()
1473 if eolmode is None:
1473 if eolmode is None:
1474 eolmode = ui.config('patch', 'eol', 'strict')
1474 eolmode = ui.config('patch', 'eol', 'strict')
1475 if eolmode.lower() not in eolmodes:
1475 if eolmode.lower() not in eolmodes:
1476 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1476 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1477 eolmode = eolmode.lower()
1477 eolmode = eolmode.lower()
1478
1478
1479 store = filestore()
1479 store = filestore()
1480 try:
1480 try:
1481 fp = open(patchobj, 'rb')
1481 fp = open(patchobj, 'rb')
1482 except TypeError:
1482 except TypeError:
1483 fp = patchobj
1483 fp = patchobj
1484 try:
1484 try:
1485 ret = applydiff(ui, fp, backend, store, strip=strip,
1485 ret = applydiff(ui, fp, backend, store, strip=strip,
1486 eolmode=eolmode)
1486 eolmode=eolmode)
1487 finally:
1487 finally:
1488 if fp != patchobj:
1488 if fp != patchobj:
1489 fp.close()
1489 fp.close()
1490 files.update(backend.close())
1490 files.update(backend.close())
1491 store.close()
1491 store.close()
1492 if ret < 0:
1492 if ret < 0:
1493 raise PatchError(_('patch failed to apply'))
1493 raise PatchError(_('patch failed to apply'))
1494 return ret > 0
1494 return ret > 0
1495
1495
1496 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1496 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1497 similarity=0):
1497 similarity=0):
1498 """use builtin patch to apply <patchobj> to the working directory.
1498 """use builtin patch to apply <patchobj> to the working directory.
1499 returns whether patch was applied with fuzz factor."""
1499 returns whether patch was applied with fuzz factor."""
1500 backend = workingbackend(ui, repo, similarity)
1500 backend = workingbackend(ui, repo, similarity)
1501 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1501 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1502
1502
1503 def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
1503 def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
1504 eolmode='strict'):
1504 eolmode='strict'):
1505 backend = repobackend(ui, repo, ctx, store)
1505 backend = repobackend(ui, repo, ctx, store)
1506 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1506 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1507
1507
1508 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1508 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1509 similarity=0):
1509 similarity=0):
1510 """Apply <patchname> to the working directory.
1510 """Apply <patchname> to the working directory.
1511
1511
1512 'eolmode' specifies how end of lines should be handled. It can be:
1512 'eolmode' specifies how end of lines should be handled. It can be:
1513 - 'strict': inputs are read in binary mode, EOLs are preserved
1513 - 'strict': inputs are read in binary mode, EOLs are preserved
1514 - 'crlf': EOLs are ignored when patching and reset to CRLF
1514 - 'crlf': EOLs are ignored when patching and reset to CRLF
1515 - 'lf': EOLs are ignored when patching and reset to LF
1515 - 'lf': EOLs are ignored when patching and reset to LF
1516 - None: get it from user settings, default to 'strict'
1516 - None: get it from user settings, default to 'strict'
1517 'eolmode' is ignored when using an external patcher program.
1517 'eolmode' is ignored when using an external patcher program.
1518
1518
1519 Returns whether patch was applied with fuzz factor.
1519 Returns whether patch was applied with fuzz factor.
1520 """
1520 """
1521 patcher = ui.config('ui', 'patch')
1521 patcher = ui.config('ui', 'patch')
1522 if files is None:
1522 if files is None:
1523 files = set()
1523 files = set()
1524 try:
1524 try:
1525 if patcher:
1525 if patcher:
1526 return _externalpatch(ui, repo, patcher, patchname, strip,
1526 return _externalpatch(ui, repo, patcher, patchname, strip,
1527 files, similarity)
1527 files, similarity)
1528 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1528 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1529 similarity)
1529 similarity)
1530 except PatchError, err:
1530 except PatchError, err:
1531 raise util.Abort(str(err))
1531 raise util.Abort(str(err))
1532
1532
1533 def changedfiles(ui, repo, patchpath, strip=1):
1533 def changedfiles(ui, repo, patchpath, strip=1):
1534 backend = fsbackend(ui, repo.root)
1534 backend = fsbackend(ui, repo.root)
1535 fp = open(patchpath, 'rb')
1535 fp = open(patchpath, 'rb')
1536 try:
1536 try:
1537 changed = set()
1537 changed = set()
1538 for state, values in iterhunks(fp):
1538 for state, values in iterhunks(fp):
1539 if state == 'file':
1539 if state == 'file':
1540 afile, bfile, first_hunk, gp = values
1540 afile, bfile, first_hunk, gp = values
1541 if gp:
1541 if gp:
1542 gp.path = pathstrip(gp.path, strip - 1)[1]
1542 gp.path = pathstrip(gp.path, strip - 1)[1]
1543 if gp.oldpath:
1543 if gp.oldpath:
1544 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1544 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1545 else:
1545 else:
1546 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1546 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1547 changed.add(gp.path)
1547 changed.add(gp.path)
1548 if gp.op == 'RENAME':
1548 if gp.op == 'RENAME':
1549 changed.add(gp.oldpath)
1549 changed.add(gp.oldpath)
1550 elif state not in ('hunk', 'git'):
1550 elif state not in ('hunk', 'git'):
1551 raise util.Abort(_('unsupported parser state: %s') % state)
1551 raise util.Abort(_('unsupported parser state: %s') % state)
1552 return changed
1552 return changed
1553 finally:
1553 finally:
1554 fp.close()
1554 fp.close()
1555
1555
1556 class GitDiffRequired(Exception):
1556 class GitDiffRequired(Exception):
1557 pass
1557 pass
1558
1558
1559 def diffopts(ui, opts=None, untrusted=False, section='diff'):
1559 def diffopts(ui, opts=None, untrusted=False, section='diff'):
1560 def get(key, name=None, getter=ui.configbool):
1560 def get(key, name=None, getter=ui.configbool):
1561 return ((opts and opts.get(key)) or
1561 return ((opts and opts.get(key)) or
1562 getter(section, name or key, None, untrusted=untrusted))
1562 getter(section, name or key, None, untrusted=untrusted))
1563 return mdiff.diffopts(
1563 return mdiff.diffopts(
1564 text=opts and opts.get('text'),
1564 text=opts and opts.get('text'),
1565 git=get('git'),
1565 git=get('git'),
1566 nodates=get('nodates'),
1566 nodates=get('nodates'),
1567 showfunc=get('show_function', 'showfunc'),
1567 showfunc=get('show_function', 'showfunc'),
1568 ignorews=get('ignore_all_space', 'ignorews'),
1568 ignorews=get('ignore_all_space', 'ignorews'),
1569 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1569 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1570 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1570 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1571 context=get('unified', getter=ui.config))
1571 context=get('unified', getter=ui.config))
1572
1572
1573 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1573 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1574 losedatafn=None, prefix=''):
1574 losedatafn=None, prefix=''):
1575 '''yields diff of changes to files between two nodes, or node and
1575 '''yields diff of changes to files between two nodes, or node and
1576 working directory.
1576 working directory.
1577
1577
1578 if node1 is None, use first dirstate parent instead.
1578 if node1 is None, use first dirstate parent instead.
1579 if node2 is None, compare node1 with working directory.
1579 if node2 is None, compare node1 with working directory.
1580
1580
1581 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1581 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1582 every time some change cannot be represented with the current
1582 every time some change cannot be represented with the current
1583 patch format. Return False to upgrade to git patch format, True to
1583 patch format. Return False to upgrade to git patch format, True to
1584 accept the loss or raise an exception to abort the diff. It is
1584 accept the loss or raise an exception to abort the diff. It is
1585 called with the name of current file being diffed as 'fn'. If set
1585 called with the name of current file being diffed as 'fn'. If set
1586 to None, patches will always be upgraded to git format when
1586 to None, patches will always be upgraded to git format when
1587 necessary.
1587 necessary.
1588
1588
1589 prefix is a filename prefix that is prepended to all filenames on
1589 prefix is a filename prefix that is prepended to all filenames on
1590 display (used for subrepos).
1590 display (used for subrepos).
1591 '''
1591 '''
1592
1592
1593 if opts is None:
1593 if opts is None:
1594 opts = mdiff.defaultopts
1594 opts = mdiff.defaultopts
1595
1595
1596 if not node1 and not node2:
1596 if not node1 and not node2:
1597 node1 = repo.dirstate.p1()
1597 node1 = repo.dirstate.p1()
1598
1598
1599 def lrugetfilectx():
1599 def lrugetfilectx():
1600 cache = {}
1600 cache = {}
1601 order = util.deque()
1601 order = util.deque()
1602 def getfilectx(f, ctx):
1602 def getfilectx(f, ctx):
1603 fctx = ctx.filectx(f, filelog=cache.get(f))
1603 fctx = ctx.filectx(f, filelog=cache.get(f))
1604 if f not in cache:
1604 if f not in cache:
1605 if len(cache) > 20:
1605 if len(cache) > 20:
1606 del cache[order.popleft()]
1606 del cache[order.popleft()]
1607 cache[f] = fctx.filelog()
1607 cache[f] = fctx.filelog()
1608 else:
1608 else:
1609 order.remove(f)
1609 order.remove(f)
1610 order.append(f)
1610 order.append(f)
1611 return fctx
1611 return fctx
1612 return getfilectx
1612 return getfilectx
1613 getfilectx = lrugetfilectx()
1613 getfilectx = lrugetfilectx()
1614
1614
1615 ctx1 = repo[node1]
1615 ctx1 = repo[node1]
1616 ctx2 = repo[node2]
1616 ctx2 = repo[node2]
1617
1617
1618 if not changes:
1618 if not changes:
1619 changes = repo.status(ctx1, ctx2, match=match)
1619 changes = repo.status(ctx1, ctx2, match=match)
1620 modified, added, removed = changes[:3]
1620 modified, added, removed = changes[:3]
1621
1621
1622 if not modified and not added and not removed:
1622 if not modified and not added and not removed:
1623 return []
1623 return []
1624
1624
1625 revs = None
1625 revs = None
1626 hexfunc = repo.ui.debugflag and hex or short
1626 hexfunc = repo.ui.debugflag and hex or short
1627 revs = [hexfunc(node) for node in [node1, node2] if node]
1627 revs = [hexfunc(node) for node in [node1, node2] if node]
1628
1628
1629 copy = {}
1629 copy = {}
1630 if opts.git or opts.upgrade:
1630 if opts.git or opts.upgrade:
1631 copy = copies.pathcopies(ctx1, ctx2)
1631 copy = copies.pathcopies(ctx1, ctx2)
1632
1632
1633 def difffn(opts, losedata):
1633 def difffn(opts, losedata):
1634 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1634 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1635 copy, getfilectx, opts, losedata, prefix)
1635 copy, getfilectx, opts, losedata, prefix)
1636 if opts.upgrade and not opts.git:
1636 if opts.upgrade and not opts.git:
1637 try:
1637 try:
1638 def losedata(fn):
1638 def losedata(fn):
1639 if not losedatafn or not losedatafn(fn=fn):
1639 if not losedatafn or not losedatafn(fn=fn):
1640 raise GitDiffRequired
1640 raise GitDiffRequired
1641 # Buffer the whole output until we are sure it can be generated
1641 # Buffer the whole output until we are sure it can be generated
1642 return list(difffn(opts.copy(git=False), losedata))
1642 return list(difffn(opts.copy(git=False), losedata))
1643 except GitDiffRequired:
1643 except GitDiffRequired:
1644 return difffn(opts.copy(git=True), None)
1644 return difffn(opts.copy(git=True), None)
1645 else:
1645 else:
1646 return difffn(opts, None)
1646 return difffn(opts, None)
1647
1647
1648 def difflabel(func, *args, **kw):
1648 def difflabel(func, *args, **kw):
1649 '''yields 2-tuples of (output, label) based on the output of func()'''
1649 '''yields 2-tuples of (output, label) based on the output of func()'''
1650 headprefixes = [('diff', 'diff.diffline'),
1650 headprefixes = [('diff', 'diff.diffline'),
1651 ('copy', 'diff.extended'),
1651 ('copy', 'diff.extended'),
1652 ('rename', 'diff.extended'),
1652 ('rename', 'diff.extended'),
1653 ('old', 'diff.extended'),
1653 ('old', 'diff.extended'),
1654 ('new', 'diff.extended'),
1654 ('new', 'diff.extended'),
1655 ('deleted', 'diff.extended'),
1655 ('deleted', 'diff.extended'),
1656 ('---', 'diff.file_a'),
1656 ('---', 'diff.file_a'),
1657 ('+++', 'diff.file_b')]
1657 ('+++', 'diff.file_b')]
1658 textprefixes = [('@', 'diff.hunk'),
1658 textprefixes = [('@', 'diff.hunk'),
1659 ('-', 'diff.deleted'),
1659 ('-', 'diff.deleted'),
1660 ('+', 'diff.inserted')]
1660 ('+', 'diff.inserted')]
1661 head = False
1661 head = False
1662 for chunk in func(*args, **kw):
1662 for chunk in func(*args, **kw):
1663 lines = chunk.split('\n')
1663 lines = chunk.split('\n')
1664 for i, line in enumerate(lines):
1664 for i, line in enumerate(lines):
1665 if i != 0:
1665 if i != 0:
1666 yield ('\n', '')
1666 yield ('\n', '')
1667 if head:
1667 if head:
1668 if line.startswith('@'):
1668 if line.startswith('@'):
1669 head = False
1669 head = False
1670 else:
1670 else:
1671 if line and line[0] not in ' +-@\\':
1671 if line and line[0] not in ' +-@\\':
1672 head = True
1672 head = True
1673 stripline = line
1673 stripline = line
1674 if not head and line and line[0] in '+-':
1674 if not head and line and line[0] in '+-':
1675 # highlight trailing whitespace, but only in changed lines
1675 # highlight trailing whitespace, but only in changed lines
1676 stripline = line.rstrip()
1676 stripline = line.rstrip()
1677 prefixes = textprefixes
1677 prefixes = textprefixes
1678 if head:
1678 if head:
1679 prefixes = headprefixes
1679 prefixes = headprefixes
1680 for prefix, label in prefixes:
1680 for prefix, label in prefixes:
1681 if stripline.startswith(prefix):
1681 if stripline.startswith(prefix):
1682 yield (stripline, label)
1682 yield (stripline, label)
1683 break
1683 break
1684 else:
1684 else:
1685 yield (line, '')
1685 yield (line, '')
1686 if line != stripline:
1686 if line != stripline:
1687 yield (line[len(stripline):], 'diff.trailingwhitespace')
1687 yield (line[len(stripline):], 'diff.trailingwhitespace')
1688
1688
1689 def diffui(*args, **kw):
1689 def diffui(*args, **kw):
1690 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1690 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1691 return difflabel(diff, *args, **kw)
1691 return difflabel(diff, *args, **kw)
1692
1692
1693 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1693 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1694 copy, getfilectx, opts, losedatafn, prefix):
1694 copy, getfilectx, opts, losedatafn, prefix):
1695
1695
1696 def join(f):
1696 def join(f):
1697 return posixpath.join(prefix, f)
1697 return posixpath.join(prefix, f)
1698
1698
1699 def addmodehdr(header, omode, nmode):
1699 def addmodehdr(header, omode, nmode):
1700 if omode != nmode:
1700 if omode != nmode:
1701 header.append('old mode %s\n' % omode)
1701 header.append('old mode %s\n' % omode)
1702 header.append('new mode %s\n' % nmode)
1702 header.append('new mode %s\n' % nmode)
1703
1703
1704 def addindexmeta(meta, revs):
1704 def addindexmeta(meta, revs):
1705 if opts.git:
1705 if opts.git:
1706 i = len(revs)
1706 i = len(revs)
1707 if i==2:
1707 if i==2:
1708 meta.append('index %s..%s\n' % tuple(revs))
1708 meta.append('index %s..%s\n' % tuple(revs))
1709 elif i==3:
1709 elif i==3:
1710 meta.append('index %s,%s..%s\n' % tuple(revs))
1710 meta.append('index %s,%s..%s\n' % tuple(revs))
1711
1711
1712 def gitindex(text):
1712 def gitindex(text):
1713 if not text:
1713 if not text:
1714 text = ""
1714 text = ""
1715 l = len(text)
1715 l = len(text)
1716 s = util.sha1('blob %d\0' % l)
1716 s = util.sha1('blob %d\0' % l)
1717 s.update(text)
1717 s.update(text)
1718 return s.hexdigest()
1718 return s.hexdigest()
1719
1719
1720 def diffline(a, b, revs):
1720 def diffline(a, b, revs):
1721 if opts.git:
1721 if opts.git:
1722 line = 'diff --git a/%s b/%s\n' % (a, b)
1722 line = 'diff --git a/%s b/%s\n' % (a, b)
1723 elif not repo.ui.quiet:
1723 elif not repo.ui.quiet:
1724 if revs:
1724 if revs:
1725 revinfo = ' '.join(["-r %s" % rev for rev in revs])
1725 revinfo = ' '.join(["-r %s" % rev for rev in revs])
1726 line = 'diff %s %s\n' % (revinfo, a)
1726 line = 'diff %s %s\n' % (revinfo, a)
1727 else:
1727 else:
1728 line = 'diff %s\n' % a
1728 line = 'diff %s\n' % a
1729 else:
1729 else:
1730 line = ''
1730 line = ''
1731 return line
1731 return line
1732
1732
1733 date1 = util.datestr(ctx1.date())
1733 date1 = util.datestr(ctx1.date())
1734 man1 = ctx1.manifest()
1734 man1 = ctx1.manifest()
1735
1735
1736 gone = set()
1736 gone = set()
1737 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1737 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1738
1738
1739 copyto = dict([(v, k) for k, v in copy.items()])
1739 copyto = dict([(v, k) for k, v in copy.items()])
1740
1740
1741 if opts.git:
1741 if opts.git:
1742 revs = None
1742 revs = None
1743
1743
1744 for f in sorted(modified + added + removed):
1744 for f in sorted(modified + added + removed):
1745 to = None
1745 to = None
1746 tn = None
1746 tn = None
1747 dodiff = True
1747 dodiff = True
1748 header = []
1748 header = []
1749 if f in man1:
1749 if f in man1:
1750 to = getfilectx(f, ctx1).data()
1750 to = getfilectx(f, ctx1).data()
1751 if f not in removed:
1751 if f not in removed:
1752 tn = getfilectx(f, ctx2).data()
1752 tn = getfilectx(f, ctx2).data()
1753 a, b = f, f
1753 a, b = f, f
1754 if opts.git or losedatafn:
1754 if opts.git or losedatafn:
1755 if f in added or (f in modified and to is None):
1755 if f in added or (f in modified and to is None):
1756 mode = gitmode[ctx2.flags(f)]
1756 mode = gitmode[ctx2.flags(f)]
1757 if f in copy or f in copyto:
1757 if f in copy or f in copyto:
1758 if opts.git:
1758 if opts.git:
1759 if f in copy:
1759 if f in copy:
1760 a = copy[f]
1760 a = copy[f]
1761 else:
1761 else:
1762 a = copyto[f]
1762 a = copyto[f]
1763 omode = gitmode[man1.flags(a)]
1763 omode = gitmode[man1.flags(a)]
1764 addmodehdr(header, omode, mode)
1764 addmodehdr(header, omode, mode)
1765 if a in removed and a not in gone:
1765 if a in removed and a not in gone:
1766 op = 'rename'
1766 op = 'rename'
1767 gone.add(a)
1767 gone.add(a)
1768 else:
1768 else:
1769 op = 'copy'
1769 op = 'copy'
1770 header.append('%s from %s\n' % (op, join(a)))
1770 header.append('%s from %s\n' % (op, join(a)))
1771 header.append('%s to %s\n' % (op, join(f)))
1771 header.append('%s to %s\n' % (op, join(f)))
1772 to = getfilectx(a, ctx1).data()
1772 to = getfilectx(a, ctx1).data()
1773 else:
1773 else:
1774 losedatafn(f)
1774 losedatafn(f)
1775 else:
1775 else:
1776 if opts.git:
1776 if opts.git:
1777 header.append('new file mode %s\n' % mode)
1777 header.append('new file mode %s\n' % mode)
1778 elif ctx2.flags(f):
1778 elif ctx2.flags(f):
1779 losedatafn(f)
1779 losedatafn(f)
1780 # In theory, if tn was copied or renamed we should check
1780 # In theory, if tn was copied or renamed we should check
1781 # if the source is binary too but the copy record already
1781 # if the source is binary too but the copy record already
1782 # forces git mode.
1782 # forces git mode.
1783 if util.binary(tn):
1783 if util.binary(tn):
1784 if opts.git:
1784 if opts.git:
1785 dodiff = 'binary'
1785 dodiff = 'binary'
1786 else:
1786 else:
1787 losedatafn(f)
1787 losedatafn(f)
1788 if not opts.git and not tn:
1788 if not opts.git and not tn:
1789 # regular diffs cannot represent new empty file
1789 # regular diffs cannot represent new empty file
1790 losedatafn(f)
1790 losedatafn(f)
1791 elif f in removed or (f in modified and tn is None):
1791 elif f in removed or (f in modified and tn is None):
1792 if opts.git:
1792 if opts.git:
1793 # have we already reported a copy above?
1793 # have we already reported a copy above?
1794 if ((f in copy and copy[f] in added
1794 if ((f in copy and copy[f] in added
1795 and copyto[copy[f]] == f) or
1795 and copyto[copy[f]] == f) or
1796 (f in copyto and copyto[f] in added
1796 (f in copyto and copyto[f] in added
1797 and copy[copyto[f]] == f)):
1797 and copy[copyto[f]] == f)):
1798 dodiff = False
1798 dodiff = False
1799 else:
1799 else:
1800 header.append('deleted file mode %s\n' %
1800 header.append('deleted file mode %s\n' %
1801 gitmode[man1.flags(f)])
1801 gitmode[man1.flags(f)])
1802 if util.binary(to):
1802 if util.binary(to):
1803 dodiff = 'binary'
1803 dodiff = 'binary'
1804 elif not to or util.binary(to):
1804 elif not to or util.binary(to):
1805 # regular diffs cannot represent empty file deletion
1805 # regular diffs cannot represent empty file deletion
1806 losedatafn(f)
1806 losedatafn(f)
1807 else:
1807 else:
1808 oflag = man1.flags(f)
1808 oflag = man1.flags(f)
1809 nflag = ctx2.flags(f)
1809 nflag = ctx2.flags(f)
1810 binary = util.binary(to) or util.binary(tn)
1810 binary = util.binary(to) or util.binary(tn)
1811 if opts.git:
1811 if opts.git:
1812 addmodehdr(header, gitmode[oflag], gitmode[nflag])
1812 addmodehdr(header, gitmode[oflag], gitmode[nflag])
1813 if binary:
1813 if binary:
1814 dodiff = 'binary'
1814 dodiff = 'binary'
1815 elif binary or nflag != oflag:
1815 elif binary or nflag != oflag:
1816 losedatafn(f)
1816 losedatafn(f)
1817
1817
1818 if dodiff:
1818 if dodiff:
1819 if opts.git or revs:
1819 if opts.git or revs:
1820 header.insert(0, diffline(join(a), join(b), revs))
1820 header.insert(0, diffline(join(a), join(b), revs))
1821 if dodiff == 'binary':
1821 if dodiff == 'binary':
1822 text = mdiff.b85diff(to, tn)
1822 text = mdiff.b85diff(to, tn)
1823 if text:
1823 if text:
1824 addindexmeta(header, [gitindex(to), gitindex(tn)])
1824 addindexmeta(header, [gitindex(to), gitindex(tn)])
1825 else:
1825 else:
1826 text = mdiff.unidiff(to, date1,
1826 text = mdiff.unidiff(to, date1,
1827 # ctx2 date may be dynamic
1827 # ctx2 date may be dynamic
1828 tn, util.datestr(ctx2.date()),
1828 tn, util.datestr(ctx2.date()),
1829 join(a), join(b), opts=opts)
1829 join(a), join(b), opts=opts)
1830 if header and (text or len(header) > 1):
1830 if header and (text or len(header) > 1):
1831 yield ''.join(header)
1831 yield ''.join(header)
1832 if text:
1832 if text:
1833 yield text
1833 yield text
1834
1834
1835 def diffstatsum(stats):
1835 def diffstatsum(stats):
1836 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1836 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1837 for f, a, r, b in stats:
1837 for f, a, r, b in stats:
1838 maxfile = max(maxfile, encoding.colwidth(f))
1838 maxfile = max(maxfile, encoding.colwidth(f))
1839 maxtotal = max(maxtotal, a + r)
1839 maxtotal = max(maxtotal, a + r)
1840 addtotal += a
1840 addtotal += a
1841 removetotal += r
1841 removetotal += r
1842 binary = binary or b
1842 binary = binary or b
1843
1843
1844 return maxfile, maxtotal, addtotal, removetotal, binary
1844 return maxfile, maxtotal, addtotal, removetotal, binary
1845
1845
1846 def diffstatdata(lines):
1846 def diffstatdata(lines):
1847 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1847 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1848
1848
1849 results = []
1849 results = []
1850 filename, adds, removes, isbinary = None, 0, 0, False
1850 filename, adds, removes, isbinary = None, 0, 0, False
1851
1851
1852 def addresult():
1852 def addresult():
1853 if filename:
1853 if filename:
1854 results.append((filename, adds, removes, isbinary))
1854 results.append((filename, adds, removes, isbinary))
1855
1855
1856 for line in lines:
1856 for line in lines:
1857 if line.startswith('diff'):
1857 if line.startswith('diff'):
1858 addresult()
1858 addresult()
1859 # set numbers to 0 anyway when starting new file
1859 # set numbers to 0 anyway when starting new file
1860 adds, removes, isbinary = 0, 0, False
1860 adds, removes, isbinary = 0, 0, False
1861 if line.startswith('diff --git a/'):
1861 if line.startswith('diff --git a/'):
1862 filename = gitre.search(line).group(1)
1862 filename = gitre.search(line).group(1)
1863 elif line.startswith('diff -r'):
1863 elif line.startswith('diff -r'):
1864 # format: "diff -r ... -r ... filename"
1864 # format: "diff -r ... -r ... filename"
1865 filename = diffre.search(line).group(1)
1865 filename = diffre.search(line).group(1)
1866 elif line.startswith('+') and not line.startswith('+++ '):
1866 elif line.startswith('+') and not line.startswith('+++ '):
1867 adds += 1
1867 adds += 1
1868 elif line.startswith('-') and not line.startswith('--- '):
1868 elif line.startswith('-') and not line.startswith('--- '):
1869 removes += 1
1869 removes += 1
1870 elif (line.startswith('GIT binary patch') or
1870 elif (line.startswith('GIT binary patch') or
1871 line.startswith('Binary file')):
1871 line.startswith('Binary file')):
1872 isbinary = True
1872 isbinary = True
1873 addresult()
1873 addresult()
1874 return results
1874 return results
1875
1875
1876 def diffstat(lines, width=80, git=False):
1876 def diffstat(lines, width=80, git=False):
1877 output = []
1877 output = []
1878 stats = diffstatdata(lines)
1878 stats = diffstatdata(lines)
1879 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1879 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1880
1880
1881 countwidth = len(str(maxtotal))
1881 countwidth = len(str(maxtotal))
1882 if hasbinary and countwidth < 3:
1882 if hasbinary and countwidth < 3:
1883 countwidth = 3
1883 countwidth = 3
1884 graphwidth = width - countwidth - maxname - 6
1884 graphwidth = width - countwidth - maxname - 6
1885 if graphwidth < 10:
1885 if graphwidth < 10:
1886 graphwidth = 10
1886 graphwidth = 10
1887
1887
1888 def scale(i):
1888 def scale(i):
1889 if maxtotal <= graphwidth:
1889 if maxtotal <= graphwidth:
1890 return i
1890 return i
1891 # If diffstat runs out of room it doesn't print anything,
1891 # If diffstat runs out of room it doesn't print anything,
1892 # which isn't very useful, so always print at least one + or -
1892 # which isn't very useful, so always print at least one + or -
1893 # if there were at least some changes.
1893 # if there were at least some changes.
1894 return max(i * graphwidth // maxtotal, int(bool(i)))
1894 return max(i * graphwidth // maxtotal, int(bool(i)))
1895
1895
1896 for filename, adds, removes, isbinary in stats:
1896 for filename, adds, removes, isbinary in stats:
1897 if isbinary:
1897 if isbinary:
1898 count = 'Bin'
1898 count = 'Bin'
1899 else:
1899 else:
1900 count = adds + removes
1900 count = adds + removes
1901 pluses = '+' * scale(adds)
1901 pluses = '+' * scale(adds)
1902 minuses = '-' * scale(removes)
1902 minuses = '-' * scale(removes)
1903 output.append(' %s%s | %*s %s%s\n' %
1903 output.append(' %s%s | %*s %s%s\n' %
1904 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1904 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1905 countwidth, count, pluses, minuses))
1905 countwidth, count, pluses, minuses))
1906
1906
1907 if stats:
1907 if stats:
1908 output.append(_(' %d files changed, %d insertions(+), '
1908 output.append(_(' %d files changed, %d insertions(+), '
1909 '%d deletions(-)\n')
1909 '%d deletions(-)\n')
1910 % (len(stats), totaladds, totalremoves))
1910 % (len(stats), totaladds, totalremoves))
1911
1911
1912 return ''.join(output)
1912 return ''.join(output)
1913
1913
1914 def diffstatui(*args, **kw):
1914 def diffstatui(*args, **kw):
1915 '''like diffstat(), but yields 2-tuples of (output, label) for
1915 '''like diffstat(), but yields 2-tuples of (output, label) for
1916 ui.write()
1916 ui.write()
1917 '''
1917 '''
1918
1918
1919 for line in diffstat(*args, **kw).splitlines():
1919 for line in diffstat(*args, **kw).splitlines():
1920 if line and line[-1] in '+-':
1920 if line and line[-1] in '+-':
1921 name, graph = line.rsplit(' ', 1)
1921 name, graph = line.rsplit(' ', 1)
1922 yield (name + ' ', '')
1922 yield (name + ' ', '')
1923 m = re.search(r'\++', graph)
1923 m = re.search(r'\++', graph)
1924 if m:
1924 if m:
1925 yield (m.group(0), 'diffstat.inserted')
1925 yield (m.group(0), 'diffstat.inserted')
1926 m = re.search(r'-+', graph)
1926 m = re.search(r'-+', graph)
1927 if m:
1927 if m:
1928 yield (m.group(0), 'diffstat.deleted')
1928 yield (m.group(0), 'diffstat.deleted')
1929 else:
1929 else:
1930 yield (line, '')
1930 yield (line, '')
1931 yield ('\n', '')
1931 yield ('\n', '')
@@ -1,1537 +1,1537 b''
1 # subrepo.py - sub-repository handling for Mercurial
1 # subrepo.py - sub-repository handling for Mercurial
2 #
2 #
3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import errno, os, re, shutil, posixpath, sys
8 import errno, os, re, shutil, posixpath, sys
9 import xml.dom.minidom
9 import xml.dom.minidom
10 import stat, subprocess, tarfile
10 import stat, subprocess, tarfile
11 from i18n import _
11 from i18n import _
12 import config, util, node, error, cmdutil, bookmarks, match as matchmod
12 import config, util, node, error, cmdutil, bookmarks, match as matchmod
13 import phases
13 import phases
14 import pathutil
14 import pathutil
15 hg = None
15 hg = None
16 propertycache = util.propertycache
16 propertycache = util.propertycache
17
17
18 nullstate = ('', '', 'empty')
18 nullstate = ('', '', 'empty')
19
19
20 def _expandedabspath(path):
20 def _expandedabspath(path):
21 '''
21 '''
22 get a path or url and if it is a path expand it and return an absolute path
22 get a path or url and if it is a path expand it and return an absolute path
23 '''
23 '''
24 expandedpath = util.urllocalpath(util.expandpath(path))
24 expandedpath = util.urllocalpath(util.expandpath(path))
25 u = util.url(expandedpath)
25 u = util.url(expandedpath)
26 if not u.scheme:
26 if not u.scheme:
27 path = util.normpath(os.path.abspath(u.path))
27 path = util.normpath(os.path.abspath(u.path))
28 return path
28 return path
29
29
30 def _getstorehashcachename(remotepath):
30 def _getstorehashcachename(remotepath):
31 '''get a unique filename for the store hash cache of a remote repository'''
31 '''get a unique filename for the store hash cache of a remote repository'''
32 return util.sha1(_expandedabspath(remotepath)).hexdigest()[0:12]
32 return util.sha1(_expandedabspath(remotepath)).hexdigest()[0:12]
33
33
34 def _calcfilehash(filename):
34 def _calcfilehash(filename):
35 data = ''
35 data = ''
36 if os.path.exists(filename):
36 if os.path.exists(filename):
37 fd = open(filename, 'rb')
37 fd = open(filename, 'rb')
38 data = fd.read()
38 data = fd.read()
39 fd.close()
39 fd.close()
40 return util.sha1(data).hexdigest()
40 return util.sha1(data).hexdigest()
41
41
42 class SubrepoAbort(error.Abort):
42 class SubrepoAbort(error.Abort):
43 """Exception class used to avoid handling a subrepo error more than once"""
43 """Exception class used to avoid handling a subrepo error more than once"""
44 def __init__(self, *args, **kw):
44 def __init__(self, *args, **kw):
45 error.Abort.__init__(self, *args, **kw)
45 error.Abort.__init__(self, *args, **kw)
46 self.subrepo = kw.get('subrepo')
46 self.subrepo = kw.get('subrepo')
47 self.cause = kw.get('cause')
47 self.cause = kw.get('cause')
48
48
49 def annotatesubrepoerror(func):
49 def annotatesubrepoerror(func):
50 def decoratedmethod(self, *args, **kargs):
50 def decoratedmethod(self, *args, **kargs):
51 try:
51 try:
52 res = func(self, *args, **kargs)
52 res = func(self, *args, **kargs)
53 except SubrepoAbort, ex:
53 except SubrepoAbort, ex:
54 # This exception has already been handled
54 # This exception has already been handled
55 raise ex
55 raise ex
56 except error.Abort, ex:
56 except error.Abort, ex:
57 subrepo = subrelpath(self)
57 subrepo = subrelpath(self)
58 errormsg = str(ex) + ' ' + _('(in subrepo %s)') % subrepo
58 errormsg = str(ex) + ' ' + _('(in subrepo %s)') % subrepo
59 # avoid handling this exception by raising a SubrepoAbort exception
59 # avoid handling this exception by raising a SubrepoAbort exception
60 raise SubrepoAbort(errormsg, hint=ex.hint, subrepo=subrepo,
60 raise SubrepoAbort(errormsg, hint=ex.hint, subrepo=subrepo,
61 cause=sys.exc_info())
61 cause=sys.exc_info())
62 return res
62 return res
63 return decoratedmethod
63 return decoratedmethod
64
64
65 def state(ctx, ui):
65 def state(ctx, ui):
66 """return a state dict, mapping subrepo paths configured in .hgsub
66 """return a state dict, mapping subrepo paths configured in .hgsub
67 to tuple: (source from .hgsub, revision from .hgsubstate, kind
67 to tuple: (source from .hgsub, revision from .hgsubstate, kind
68 (key in types dict))
68 (key in types dict))
69 """
69 """
70 p = config.config()
70 p = config.config()
71 def read(f, sections=None, remap=None):
71 def read(f, sections=None, remap=None):
72 if f in ctx:
72 if f in ctx:
73 try:
73 try:
74 data = ctx[f].data()
74 data = ctx[f].data()
75 except IOError, err:
75 except IOError, err:
76 if err.errno != errno.ENOENT:
76 if err.errno != errno.ENOENT:
77 raise
77 raise
78 # handle missing subrepo spec files as removed
78 # handle missing subrepo spec files as removed
79 ui.warn(_("warning: subrepo spec file %s not found\n") % f)
79 ui.warn(_("warning: subrepo spec file %s not found\n") % f)
80 return
80 return
81 p.parse(f, data, sections, remap, read)
81 p.parse(f, data, sections, remap, read)
82 else:
82 else:
83 raise util.Abort(_("subrepo spec file %s not found") % f)
83 raise util.Abort(_("subrepo spec file %s not found") % f)
84
84
85 if '.hgsub' in ctx:
85 if '.hgsub' in ctx:
86 read('.hgsub')
86 read('.hgsub')
87
87
88 for path, src in ui.configitems('subpaths'):
88 for path, src in ui.configitems('subpaths'):
89 p.set('subpaths', path, src, ui.configsource('subpaths', path))
89 p.set('subpaths', path, src, ui.configsource('subpaths', path))
90
90
91 rev = {}
91 rev = {}
92 if '.hgsubstate' in ctx:
92 if '.hgsubstate' in ctx:
93 try:
93 try:
94 for i, l in enumerate(ctx['.hgsubstate'].data().splitlines()):
94 for i, l in enumerate(ctx['.hgsubstate'].data().splitlines()):
95 l = l.lstrip()
95 l = l.lstrip()
96 if not l:
96 if not l:
97 continue
97 continue
98 try:
98 try:
99 revision, path = l.split(" ", 1)
99 revision, path = l.split(" ", 1)
100 except ValueError:
100 except ValueError:
101 raise util.Abort(_("invalid subrepository revision "
101 raise util.Abort(_("invalid subrepository revision "
102 "specifier in .hgsubstate line %d")
102 "specifier in .hgsubstate line %d")
103 % (i + 1))
103 % (i + 1))
104 rev[path] = revision
104 rev[path] = revision
105 except IOError, err:
105 except IOError, err:
106 if err.errno != errno.ENOENT:
106 if err.errno != errno.ENOENT:
107 raise
107 raise
108
108
109 def remap(src):
109 def remap(src):
110 for pattern, repl in p.items('subpaths'):
110 for pattern, repl in p.items('subpaths'):
111 # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
111 # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
112 # does a string decode.
112 # does a string decode.
113 repl = repl.encode('string-escape')
113 repl = repl.encode('string-escape')
114 # However, we still want to allow back references to go
114 # However, we still want to allow back references to go
115 # through unharmed, so we turn r'\\1' into r'\1'. Again,
115 # through unharmed, so we turn r'\\1' into r'\1'. Again,
116 # extra escapes are needed because re.sub string decodes.
116 # extra escapes are needed because re.sub string decodes.
117 repl = re.sub(r'\\\\([0-9]+)', r'\\\1', repl)
117 repl = re.sub(r'\\\\([0-9]+)', r'\\\1', repl)
118 try:
118 try:
119 src = re.sub(pattern, repl, src, 1)
119 src = re.sub(pattern, repl, src, 1)
120 except re.error, e:
120 except re.error, e:
121 raise util.Abort(_("bad subrepository pattern in %s: %s")
121 raise util.Abort(_("bad subrepository pattern in %s: %s")
122 % (p.source('subpaths', pattern), e))
122 % (p.source('subpaths', pattern), e))
123 return src
123 return src
124
124
125 state = {}
125 state = {}
126 for path, src in p[''].items():
126 for path, src in p[''].items():
127 kind = 'hg'
127 kind = 'hg'
128 if src.startswith('['):
128 if src.startswith('['):
129 if ']' not in src:
129 if ']' not in src:
130 raise util.Abort(_('missing ] in subrepo source'))
130 raise util.Abort(_('missing ] in subrepo source'))
131 kind, src = src.split(']', 1)
131 kind, src = src.split(']', 1)
132 kind = kind[1:]
132 kind = kind[1:]
133 src = src.lstrip() # strip any extra whitespace after ']'
133 src = src.lstrip() # strip any extra whitespace after ']'
134
134
135 if not util.url(src).isabs():
135 if not util.url(src).isabs():
136 parent = _abssource(ctx._repo, abort=False)
136 parent = _abssource(ctx._repo, abort=False)
137 if parent:
137 if parent:
138 parent = util.url(parent)
138 parent = util.url(parent)
139 parent.path = posixpath.join(parent.path or '', src)
139 parent.path = posixpath.join(parent.path or '', src)
140 parent.path = posixpath.normpath(parent.path)
140 parent.path = posixpath.normpath(parent.path)
141 joined = str(parent)
141 joined = str(parent)
142 # Remap the full joined path and use it if it changes,
142 # Remap the full joined path and use it if it changes,
143 # else remap the original source.
143 # else remap the original source.
144 remapped = remap(joined)
144 remapped = remap(joined)
145 if remapped == joined:
145 if remapped == joined:
146 src = remap(src)
146 src = remap(src)
147 else:
147 else:
148 src = remapped
148 src = remapped
149
149
150 src = remap(src)
150 src = remap(src)
151 state[util.pconvert(path)] = (src.strip(), rev.get(path, ''), kind)
151 state[util.pconvert(path)] = (src.strip(), rev.get(path, ''), kind)
152
152
153 return state
153 return state
154
154
155 def writestate(repo, state):
155 def writestate(repo, state):
156 """rewrite .hgsubstate in (outer) repo with these subrepo states"""
156 """rewrite .hgsubstate in (outer) repo with these subrepo states"""
157 lines = ['%s %s\n' % (state[s][1], s) for s in sorted(state)]
157 lines = ['%s %s\n' % (state[s][1], s) for s in sorted(state)]
158 repo.wwrite('.hgsubstate', ''.join(lines), '')
158 repo.wwrite('.hgsubstate', ''.join(lines), '')
159
159
160 def submerge(repo, wctx, mctx, actx, overwrite):
160 def submerge(repo, wctx, mctx, actx, overwrite):
161 """delegated from merge.applyupdates: merging of .hgsubstate file
161 """delegated from merge.applyupdates: merging of .hgsubstate file
162 in working context, merging context and ancestor context"""
162 in working context, merging context and ancestor context"""
163 if mctx == actx: # backwards?
163 if mctx == actx: # backwards?
164 actx = wctx.p1()
164 actx = wctx.p1()
165 s1 = wctx.substate
165 s1 = wctx.substate
166 s2 = mctx.substate
166 s2 = mctx.substate
167 sa = actx.substate
167 sa = actx.substate
168 sm = {}
168 sm = {}
169
169
170 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
170 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
171
171
172 def debug(s, msg, r=""):
172 def debug(s, msg, r=""):
173 if r:
173 if r:
174 r = "%s:%s:%s" % r
174 r = "%s:%s:%s" % r
175 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
175 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
176
176
177 for s, l in sorted(s1.iteritems()):
177 for s, l in sorted(s1.iteritems()):
178 a = sa.get(s, nullstate)
178 a = sa.get(s, nullstate)
179 ld = l # local state with possible dirty flag for compares
179 ld = l # local state with possible dirty flag for compares
180 if wctx.sub(s).dirty():
180 if wctx.sub(s).dirty():
181 ld = (l[0], l[1] + "+")
181 ld = (l[0], l[1] + "+")
182 if wctx == actx: # overwrite
182 if wctx == actx: # overwrite
183 a = ld
183 a = ld
184
184
185 if s in s2:
185 if s in s2:
186 r = s2[s]
186 r = s2[s]
187 if ld == r or r == a: # no change or local is newer
187 if ld == r or r == a: # no change or local is newer
188 sm[s] = l
188 sm[s] = l
189 continue
189 continue
190 elif ld == a: # other side changed
190 elif ld == a: # other side changed
191 debug(s, "other changed, get", r)
191 debug(s, "other changed, get", r)
192 wctx.sub(s).get(r, overwrite)
192 wctx.sub(s).get(r, overwrite)
193 sm[s] = r
193 sm[s] = r
194 elif ld[0] != r[0]: # sources differ
194 elif ld[0] != r[0]: # sources differ
195 if repo.ui.promptchoice(
195 if repo.ui.promptchoice(
196 _(' subrepository sources for %s differ\n'
196 _(' subrepository sources for %s differ\n'
197 'use (l)ocal source (%s) or (r)emote source (%s)?'
197 'use (l)ocal source (%s) or (r)emote source (%s)?'
198 '$$ &Local $$ &Remote') % (s, l[0], r[0]), 0):
198 '$$ &Local $$ &Remote') % (s, l[0], r[0]), 0):
199 debug(s, "prompt changed, get", r)
199 debug(s, "prompt changed, get", r)
200 wctx.sub(s).get(r, overwrite)
200 wctx.sub(s).get(r, overwrite)
201 sm[s] = r
201 sm[s] = r
202 elif ld[1] == a[1]: # local side is unchanged
202 elif ld[1] == a[1]: # local side is unchanged
203 debug(s, "other side changed, get", r)
203 debug(s, "other side changed, get", r)
204 wctx.sub(s).get(r, overwrite)
204 wctx.sub(s).get(r, overwrite)
205 sm[s] = r
205 sm[s] = r
206 else:
206 else:
207 debug(s, "both sides changed")
207 debug(s, "both sides changed")
208 option = repo.ui.promptchoice(
208 option = repo.ui.promptchoice(
209 _(' subrepository %s diverged (local revision: %s, '
209 _(' subrepository %s diverged (local revision: %s, '
210 'remote revision: %s)\n'
210 'remote revision: %s)\n'
211 '(M)erge, keep (l)ocal or keep (r)emote?'
211 '(M)erge, keep (l)ocal or keep (r)emote?'
212 '$$ &Merge $$ &Local $$ &Remote')
212 '$$ &Merge $$ &Local $$ &Remote')
213 % (s, l[1][:12], r[1][:12]), 0)
213 % (s, l[1][:12], r[1][:12]), 0)
214 if option == 0:
214 if option == 0:
215 wctx.sub(s).merge(r)
215 wctx.sub(s).merge(r)
216 sm[s] = l
216 sm[s] = l
217 debug(s, "merge with", r)
217 debug(s, "merge with", r)
218 elif option == 1:
218 elif option == 1:
219 sm[s] = l
219 sm[s] = l
220 debug(s, "keep local subrepo revision", l)
220 debug(s, "keep local subrepo revision", l)
221 else:
221 else:
222 wctx.sub(s).get(r, overwrite)
222 wctx.sub(s).get(r, overwrite)
223 sm[s] = r
223 sm[s] = r
224 debug(s, "get remote subrepo revision", r)
224 debug(s, "get remote subrepo revision", r)
225 elif ld == a: # remote removed, local unchanged
225 elif ld == a: # remote removed, local unchanged
226 debug(s, "remote removed, remove")
226 debug(s, "remote removed, remove")
227 wctx.sub(s).remove()
227 wctx.sub(s).remove()
228 elif a == nullstate: # not present in remote or ancestor
228 elif a == nullstate: # not present in remote or ancestor
229 debug(s, "local added, keep")
229 debug(s, "local added, keep")
230 sm[s] = l
230 sm[s] = l
231 continue
231 continue
232 else:
232 else:
233 if repo.ui.promptchoice(
233 if repo.ui.promptchoice(
234 _(' local changed subrepository %s which remote removed\n'
234 _(' local changed subrepository %s which remote removed\n'
235 'use (c)hanged version or (d)elete?'
235 'use (c)hanged version or (d)elete?'
236 '$$ &Changed $$ &Delete') % s, 0):
236 '$$ &Changed $$ &Delete') % s, 0):
237 debug(s, "prompt remove")
237 debug(s, "prompt remove")
238 wctx.sub(s).remove()
238 wctx.sub(s).remove()
239
239
240 for s, r in sorted(s2.items()):
240 for s, r in sorted(s2.items()):
241 if s in s1:
241 if s in s1:
242 continue
242 continue
243 elif s not in sa:
243 elif s not in sa:
244 debug(s, "remote added, get", r)
244 debug(s, "remote added, get", r)
245 mctx.sub(s).get(r)
245 mctx.sub(s).get(r)
246 sm[s] = r
246 sm[s] = r
247 elif r != sa[s]:
247 elif r != sa[s]:
248 if repo.ui.promptchoice(
248 if repo.ui.promptchoice(
249 _(' remote changed subrepository %s which local removed\n'
249 _(' remote changed subrepository %s which local removed\n'
250 'use (c)hanged version or (d)elete?'
250 'use (c)hanged version or (d)elete?'
251 '$$ &Changed $$ &Delete') % s, 0) == 0:
251 '$$ &Changed $$ &Delete') % s, 0) == 0:
252 debug(s, "prompt recreate", r)
252 debug(s, "prompt recreate", r)
253 wctx.sub(s).get(r)
253 wctx.sub(s).get(r)
254 sm[s] = r
254 sm[s] = r
255
255
256 # record merged .hgsubstate
256 # record merged .hgsubstate
257 writestate(repo, sm)
257 writestate(repo, sm)
258 return sm
258 return sm
259
259
260 def _updateprompt(ui, sub, dirty, local, remote):
260 def _updateprompt(ui, sub, dirty, local, remote):
261 if dirty:
261 if dirty:
262 msg = (_(' subrepository sources for %s differ\n'
262 msg = (_(' subrepository sources for %s differ\n'
263 'use (l)ocal source (%s) or (r)emote source (%s)?\n'
263 'use (l)ocal source (%s) or (r)emote source (%s)?\n'
264 '$$ &Local $$ &Remote')
264 '$$ &Local $$ &Remote')
265 % (subrelpath(sub), local, remote))
265 % (subrelpath(sub), local, remote))
266 else:
266 else:
267 msg = (_(' subrepository sources for %s differ (in checked out '
267 msg = (_(' subrepository sources for %s differ (in checked out '
268 'version)\n'
268 'version)\n'
269 'use (l)ocal source (%s) or (r)emote source (%s)?\n'
269 'use (l)ocal source (%s) or (r)emote source (%s)?\n'
270 '$$ &Local $$ &Remote')
270 '$$ &Local $$ &Remote')
271 % (subrelpath(sub), local, remote))
271 % (subrelpath(sub), local, remote))
272 return ui.promptchoice(msg, 0)
272 return ui.promptchoice(msg, 0)
273
273
274 def reporelpath(repo):
274 def reporelpath(repo):
275 """return path to this (sub)repo as seen from outermost repo"""
275 """return path to this (sub)repo as seen from outermost repo"""
276 parent = repo
276 parent = repo
277 while util.safehasattr(parent, '_subparent'):
277 while util.safehasattr(parent, '_subparent'):
278 parent = parent._subparent
278 parent = parent._subparent
279 p = parent.root.rstrip(os.sep)
279 p = parent.root.rstrip(os.sep)
280 return repo.root[len(p) + 1:]
280 return repo.root[len(p) + 1:]
281
281
282 def subrelpath(sub):
282 def subrelpath(sub):
283 """return path to this subrepo as seen from outermost repo"""
283 """return path to this subrepo as seen from outermost repo"""
284 if util.safehasattr(sub, '_relpath'):
284 if util.safehasattr(sub, '_relpath'):
285 return sub._relpath
285 return sub._relpath
286 if not util.safehasattr(sub, '_repo'):
286 if not util.safehasattr(sub, '_repo'):
287 return sub._path
287 return sub._path
288 return reporelpath(sub._repo)
288 return reporelpath(sub._repo)
289
289
290 def _abssource(repo, push=False, abort=True):
290 def _abssource(repo, push=False, abort=True):
291 """return pull/push path of repo - either based on parent repo .hgsub info
291 """return pull/push path of repo - either based on parent repo .hgsub info
292 or on the top repo config. Abort or return None if no source found."""
292 or on the top repo config. Abort or return None if no source found."""
293 if util.safehasattr(repo, '_subparent'):
293 if util.safehasattr(repo, '_subparent'):
294 source = util.url(repo._subsource)
294 source = util.url(repo._subsource)
295 if source.isabs():
295 if source.isabs():
296 return str(source)
296 return str(source)
297 source.path = posixpath.normpath(source.path)
297 source.path = posixpath.normpath(source.path)
298 parent = _abssource(repo._subparent, push, abort=False)
298 parent = _abssource(repo._subparent, push, abort=False)
299 if parent:
299 if parent:
300 parent = util.url(util.pconvert(parent))
300 parent = util.url(util.pconvert(parent))
301 parent.path = posixpath.join(parent.path or '', source.path)
301 parent.path = posixpath.join(parent.path or '', source.path)
302 parent.path = posixpath.normpath(parent.path)
302 parent.path = posixpath.normpath(parent.path)
303 return str(parent)
303 return str(parent)
304 else: # recursion reached top repo
304 else: # recursion reached top repo
305 if util.safehasattr(repo, '_subtoppath'):
305 if util.safehasattr(repo, '_subtoppath'):
306 return repo._subtoppath
306 return repo._subtoppath
307 if push and repo.ui.config('paths', 'default-push'):
307 if push and repo.ui.config('paths', 'default-push'):
308 return repo.ui.config('paths', 'default-push')
308 return repo.ui.config('paths', 'default-push')
309 if repo.ui.config('paths', 'default'):
309 if repo.ui.config('paths', 'default'):
310 return repo.ui.config('paths', 'default')
310 return repo.ui.config('paths', 'default')
311 if repo.sharedpath != repo.path:
311 if repo.sharedpath != repo.path:
312 # chop off the .hg component to get the default path form
312 # chop off the .hg component to get the default path form
313 return os.path.dirname(repo.sharedpath)
313 return os.path.dirname(repo.sharedpath)
314 if abort:
314 if abort:
315 raise util.Abort(_("default path for subrepository not found"))
315 raise util.Abort(_("default path for subrepository not found"))
316
316
317 def _sanitize(ui, path):
317 def _sanitize(ui, path):
318 def v(arg, dirname, names):
318 def v(arg, dirname, names):
319 if os.path.basename(dirname).lower() != '.hg':
319 if os.path.basename(dirname).lower() != '.hg':
320 return
320 return
321 for f in names:
321 for f in names:
322 if f.lower() == 'hgrc':
322 if f.lower() == 'hgrc':
323 ui.warn(
323 ui.warn(
324 _("warning: removing potentially hostile .hg/hgrc in '%s'")
324 _("warning: removing potentially hostile .hg/hgrc in '%s'")
325 % path)
325 % path)
326 os.unlink(os.path.join(dirname, f))
326 os.unlink(os.path.join(dirname, f))
327 os.walk(path, v, None)
327 os.walk(path, v, None)
328
328
329 def itersubrepos(ctx1, ctx2):
329 def itersubrepos(ctx1, ctx2):
330 """find subrepos in ctx1 or ctx2"""
330 """find subrepos in ctx1 or ctx2"""
331 # Create a (subpath, ctx) mapping where we prefer subpaths from
331 # Create a (subpath, ctx) mapping where we prefer subpaths from
332 # ctx1. The subpaths from ctx2 are important when the .hgsub file
332 # ctx1. The subpaths from ctx2 are important when the .hgsub file
333 # has been modified (in ctx2) but not yet committed (in ctx1).
333 # has been modified (in ctx2) but not yet committed (in ctx1).
334 subpaths = dict.fromkeys(ctx2.substate, ctx2)
334 subpaths = dict.fromkeys(ctx2.substate, ctx2)
335 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
335 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
336 for subpath, ctx in sorted(subpaths.iteritems()):
336 for subpath, ctx in sorted(subpaths.iteritems()):
337 yield subpath, ctx.sub(subpath)
337 yield subpath, ctx.sub(subpath)
338
338
339 def subrepo(ctx, path):
339 def subrepo(ctx, path):
340 """return instance of the right subrepo class for subrepo in path"""
340 """return instance of the right subrepo class for subrepo in path"""
341 # subrepo inherently violates our import layering rules
341 # subrepo inherently violates our import layering rules
342 # because it wants to make repo objects from deep inside the stack
342 # because it wants to make repo objects from deep inside the stack
343 # so we manually delay the circular imports to not break
343 # so we manually delay the circular imports to not break
344 # scripts that don't use our demand-loading
344 # scripts that don't use our demand-loading
345 global hg
345 global hg
346 import hg as h
346 import hg as h
347 hg = h
347 hg = h
348
348
349 pathutil.pathauditor(ctx._repo.root)(path)
349 pathutil.pathauditor(ctx._repo.root)(path)
350 state = ctx.substate[path]
350 state = ctx.substate[path]
351 if state[2] not in types:
351 if state[2] not in types:
352 raise util.Abort(_('unknown subrepo type %s') % state[2])
352 raise util.Abort(_('unknown subrepo type %s') % state[2])
353 return types[state[2]](ctx, path, state[:2])
353 return types[state[2]](ctx, path, state[:2])
354
354
355 def newcommitphase(ui, ctx):
355 def newcommitphase(ui, ctx):
356 commitphase = phases.newcommitphase(ui)
356 commitphase = phases.newcommitphase(ui)
357 substate = getattr(ctx, "substate", None)
357 substate = getattr(ctx, "substate", None)
358 if not substate:
358 if not substate:
359 return commitphase
359 return commitphase
360 check = ui.config('phases', 'checksubrepos', 'follow')
360 check = ui.config('phases', 'checksubrepos', 'follow')
361 if check not in ('ignore', 'follow', 'abort'):
361 if check not in ('ignore', 'follow', 'abort'):
362 raise util.Abort(_('invalid phases.checksubrepos configuration: %s')
362 raise util.Abort(_('invalid phases.checksubrepos configuration: %s')
363 % (check))
363 % (check))
364 if check == 'ignore':
364 if check == 'ignore':
365 return commitphase
365 return commitphase
366 maxphase = phases.public
366 maxphase = phases.public
367 maxsub = None
367 maxsub = None
368 for s in sorted(substate):
368 for s in sorted(substate):
369 sub = ctx.sub(s)
369 sub = ctx.sub(s)
370 subphase = sub.phase(substate[s][1])
370 subphase = sub.phase(substate[s][1])
371 if maxphase < subphase:
371 if maxphase < subphase:
372 maxphase = subphase
372 maxphase = subphase
373 maxsub = s
373 maxsub = s
374 if commitphase < maxphase:
374 if commitphase < maxphase:
375 if check == 'abort':
375 if check == 'abort':
376 raise util.Abort(_("can't commit in %s phase"
376 raise util.Abort(_("can't commit in %s phase"
377 " conflicting %s from subrepository %s") %
377 " conflicting %s from subrepository %s") %
378 (phases.phasenames[commitphase],
378 (phases.phasenames[commitphase],
379 phases.phasenames[maxphase], maxsub))
379 phases.phasenames[maxphase], maxsub))
380 ui.warn(_("warning: changes are committed in"
380 ui.warn(_("warning: changes are committed in"
381 " %s phase from subrepository %s\n") %
381 " %s phase from subrepository %s\n") %
382 (phases.phasenames[maxphase], maxsub))
382 (phases.phasenames[maxphase], maxsub))
383 return maxphase
383 return maxphase
384 return commitphase
384 return commitphase
385
385
386 # subrepo classes need to implement the following abstract class:
386 # subrepo classes need to implement the following abstract class:
387
387
388 class abstractsubrepo(object):
388 class abstractsubrepo(object):
389
389
390 def storeclean(self, path):
390 def storeclean(self, path):
391 """
391 """
392 returns true if the repository has not changed since it was last
392 returns true if the repository has not changed since it was last
393 cloned from or pushed to a given repository.
393 cloned from or pushed to a given repository.
394 """
394 """
395 return False
395 return False
396
396
397 def dirty(self, ignoreupdate=False):
397 def dirty(self, ignoreupdate=False):
398 """returns true if the dirstate of the subrepo is dirty or does not
398 """returns true if the dirstate of the subrepo is dirty or does not
399 match current stored state. If ignoreupdate is true, only check
399 match current stored state. If ignoreupdate is true, only check
400 whether the subrepo has uncommitted changes in its dirstate.
400 whether the subrepo has uncommitted changes in its dirstate.
401 """
401 """
402 raise NotImplementedError
402 raise NotImplementedError
403
403
404 def basestate(self):
404 def basestate(self):
405 """current working directory base state, disregarding .hgsubstate
405 """current working directory base state, disregarding .hgsubstate
406 state and working directory modifications"""
406 state and working directory modifications"""
407 raise NotImplementedError
407 raise NotImplementedError
408
408
409 def checknested(self, path):
409 def checknested(self, path):
410 """check if path is a subrepository within this repository"""
410 """check if path is a subrepository within this repository"""
411 return False
411 return False
412
412
413 def commit(self, text, user, date):
413 def commit(self, text, user, date):
414 """commit the current changes to the subrepo with the given
414 """commit the current changes to the subrepo with the given
415 log message. Use given user and date if possible. Return the
415 log message. Use given user and date if possible. Return the
416 new state of the subrepo.
416 new state of the subrepo.
417 """
417 """
418 raise NotImplementedError
418 raise NotImplementedError
419
419
420 def phase(self, state):
420 def phase(self, state):
421 """returns phase of specified state in the subrepository.
421 """returns phase of specified state in the subrepository.
422 """
422 """
423 return phases.public
423 return phases.public
424
424
425 def remove(self):
425 def remove(self):
426 """remove the subrepo
426 """remove the subrepo
427
427
428 (should verify the dirstate is not dirty first)
428 (should verify the dirstate is not dirty first)
429 """
429 """
430 raise NotImplementedError
430 raise NotImplementedError
431
431
432 def get(self, state, overwrite=False):
432 def get(self, state, overwrite=False):
433 """run whatever commands are needed to put the subrepo into
433 """run whatever commands are needed to put the subrepo into
434 this state
434 this state
435 """
435 """
436 raise NotImplementedError
436 raise NotImplementedError
437
437
438 def merge(self, state):
438 def merge(self, state):
439 """merge currently-saved state with the new state."""
439 """merge currently-saved state with the new state."""
440 raise NotImplementedError
440 raise NotImplementedError
441
441
442 def push(self, opts):
442 def push(self, opts):
443 """perform whatever action is analogous to 'hg push'
443 """perform whatever action is analogous to 'hg push'
444
444
445 This may be a no-op on some systems.
445 This may be a no-op on some systems.
446 """
446 """
447 raise NotImplementedError
447 raise NotImplementedError
448
448
449 def add(self, ui, match, dryrun, listsubrepos, prefix, explicitonly):
449 def add(self, ui, match, dryrun, listsubrepos, prefix, explicitonly):
450 return []
450 return []
451
451
452 def status(self, rev2, **opts):
452 def status(self, rev2, **opts):
453 return [], [], [], [], [], [], []
453 return [], [], [], [], [], [], []
454
454
455 def diff(self, ui, diffopts, node2, match, prefix, **opts):
455 def diff(self, ui, diffopts, node2, match, prefix, **opts):
456 pass
456 pass
457
457
458 def outgoing(self, ui, dest, opts):
458 def outgoing(self, ui, dest, opts):
459 return 1
459 return 1
460
460
461 def incoming(self, ui, source, opts):
461 def incoming(self, ui, source, opts):
462 return 1
462 return 1
463
463
464 def files(self):
464 def files(self):
465 """return filename iterator"""
465 """return filename iterator"""
466 raise NotImplementedError
466 raise NotImplementedError
467
467
468 def filedata(self, name):
468 def filedata(self, name):
469 """return file data"""
469 """return file data"""
470 raise NotImplementedError
470 raise NotImplementedError
471
471
472 def fileflags(self, name):
472 def fileflags(self, name):
473 """return file flags"""
473 """return file flags"""
474 return ''
474 return ''
475
475
476 def archive(self, ui, archiver, prefix, match=None):
476 def archive(self, ui, archiver, prefix, match=None):
477 if match is not None:
477 if match is not None:
478 files = [f for f in self.files() if match(f)]
478 files = [f for f in self.files() if match(f)]
479 else:
479 else:
480 files = self.files()
480 files = self.files()
481 total = len(files)
481 total = len(files)
482 relpath = subrelpath(self)
482 relpath = subrelpath(self)
483 ui.progress(_('archiving (%s)') % relpath, 0,
483 ui.progress(_('archiving (%s)') % relpath, 0,
484 unit=_('files'), total=total)
484 unit=_('files'), total=total)
485 for i, name in enumerate(files):
485 for i, name in enumerate(files):
486 flags = self.fileflags(name)
486 flags = self.fileflags(name)
487 mode = 'x' in flags and 0755 or 0644
487 mode = 'x' in flags and 0755 or 0644
488 symlink = 'l' in flags
488 symlink = 'l' in flags
489 archiver.addfile(os.path.join(prefix, self._path, name),
489 archiver.addfile(os.path.join(prefix, self._path, name),
490 mode, symlink, self.filedata(name))
490 mode, symlink, self.filedata(name))
491 ui.progress(_('archiving (%s)') % relpath, i + 1,
491 ui.progress(_('archiving (%s)') % relpath, i + 1,
492 unit=_('files'), total=total)
492 unit=_('files'), total=total)
493 ui.progress(_('archiving (%s)') % relpath, None)
493 ui.progress(_('archiving (%s)') % relpath, None)
494 return total
494 return total
495
495
496 def walk(self, match):
496 def walk(self, match):
497 '''
497 '''
498 walk recursively through the directory tree, finding all files
498 walk recursively through the directory tree, finding all files
499 matched by the match function
499 matched by the match function
500 '''
500 '''
501 pass
501 pass
502
502
503 def forget(self, ui, match, prefix):
503 def forget(self, ui, match, prefix):
504 return ([], [])
504 return ([], [])
505
505
506 def revert(self, ui, substate, *pats, **opts):
506 def revert(self, ui, substate, *pats, **opts):
507 ui.warn('%s: reverting %s subrepos is unsupported\n' \
507 ui.warn('%s: reverting %s subrepos is unsupported\n' \
508 % (substate[0], substate[2]))
508 % (substate[0], substate[2]))
509 return []
509 return []
510
510
511 class hgsubrepo(abstractsubrepo):
511 class hgsubrepo(abstractsubrepo):
512 def __init__(self, ctx, path, state):
512 def __init__(self, ctx, path, state):
513 self._path = path
513 self._path = path
514 self._state = state
514 self._state = state
515 r = ctx._repo
515 r = ctx._repo
516 root = r.wjoin(path)
516 root = r.wjoin(path)
517 create = False
517 create = False
518 if not os.path.exists(os.path.join(root, '.hg')):
518 if not os.path.exists(os.path.join(root, '.hg')):
519 create = True
519 create = True
520 util.makedirs(root)
520 util.makedirs(root)
521 self._repo = hg.repository(r.baseui, root, create=create)
521 self._repo = hg.repository(r.baseui, root, create=create)
522 for s, k in [('ui', 'commitsubrepos')]:
522 for s, k in [('ui', 'commitsubrepos')]:
523 v = r.ui.config(s, k)
523 v = r.ui.config(s, k)
524 if v:
524 if v:
525 self._repo.ui.setconfig(s, k, v)
525 self._repo.ui.setconfig(s, k, v)
526 self._repo.ui.setconfig('ui', '_usedassubrepo', 'True')
526 self._repo.ui.setconfig('ui', '_usedassubrepo', 'True')
527 self._initrepo(r, state[0], create)
527 self._initrepo(r, state[0], create)
528
528
529 def storeclean(self, path):
529 def storeclean(self, path):
530 clean = True
530 clean = True
531 lock = self._repo.lock()
531 lock = self._repo.lock()
532 itercache = self._calcstorehash(path)
532 itercache = self._calcstorehash(path)
533 try:
533 try:
534 for filehash in self._readstorehashcache(path):
534 for filehash in self._readstorehashcache(path):
535 if filehash != itercache.next():
535 if filehash != itercache.next():
536 clean = False
536 clean = False
537 break
537 break
538 except StopIteration:
538 except StopIteration:
539 # the cached and current pull states have a different size
539 # the cached and current pull states have a different size
540 clean = False
540 clean = False
541 if clean:
541 if clean:
542 try:
542 try:
543 itercache.next()
543 itercache.next()
544 # the cached and current pull states have a different size
544 # the cached and current pull states have a different size
545 clean = False
545 clean = False
546 except StopIteration:
546 except StopIteration:
547 pass
547 pass
548 lock.release()
548 lock.release()
549 return clean
549 return clean
550
550
551 def _calcstorehash(self, remotepath):
551 def _calcstorehash(self, remotepath):
552 '''calculate a unique "store hash"
552 '''calculate a unique "store hash"
553
553
554 This method is used to to detect when there are changes that may
554 This method is used to to detect when there are changes that may
555 require a push to a given remote path.'''
555 require a push to a given remote path.'''
556 # sort the files that will be hashed in increasing (likely) file size
556 # sort the files that will be hashed in increasing (likely) file size
557 filelist = ('bookmarks', 'store/phaseroots', 'store/00changelog.i')
557 filelist = ('bookmarks', 'store/phaseroots', 'store/00changelog.i')
558 yield '# %s\n' % _expandedabspath(remotepath)
558 yield '# %s\n' % _expandedabspath(remotepath)
559 for relname in filelist:
559 for relname in filelist:
560 absname = os.path.normpath(self._repo.join(relname))
560 absname = os.path.normpath(self._repo.join(relname))
561 yield '%s = %s\n' % (relname, _calcfilehash(absname))
561 yield '%s = %s\n' % (relname, _calcfilehash(absname))
562
562
563 def _getstorehashcachepath(self, remotepath):
563 def _getstorehashcachepath(self, remotepath):
564 '''get a unique path for the store hash cache'''
564 '''get a unique path for the store hash cache'''
565 return self._repo.join(os.path.join(
565 return self._repo.join(os.path.join(
566 'cache', 'storehash', _getstorehashcachename(remotepath)))
566 'cache', 'storehash', _getstorehashcachename(remotepath)))
567
567
568 def _readstorehashcache(self, remotepath):
568 def _readstorehashcache(self, remotepath):
569 '''read the store hash cache for a given remote repository'''
569 '''read the store hash cache for a given remote repository'''
570 cachefile = self._getstorehashcachepath(remotepath)
570 cachefile = self._getstorehashcachepath(remotepath)
571 if not os.path.exists(cachefile):
571 if not os.path.exists(cachefile):
572 return ''
572 return ''
573 fd = open(cachefile, 'r')
573 fd = open(cachefile, 'r')
574 pullstate = fd.readlines()
574 pullstate = fd.readlines()
575 fd.close()
575 fd.close()
576 return pullstate
576 return pullstate
577
577
578 def _cachestorehash(self, remotepath):
578 def _cachestorehash(self, remotepath):
579 '''cache the current store hash
579 '''cache the current store hash
580
580
581 Each remote repo requires its own store hash cache, because a subrepo
581 Each remote repo requires its own store hash cache, because a subrepo
582 store may be "clean" versus a given remote repo, but not versus another
582 store may be "clean" versus a given remote repo, but not versus another
583 '''
583 '''
584 cachefile = self._getstorehashcachepath(remotepath)
584 cachefile = self._getstorehashcachepath(remotepath)
585 lock = self._repo.lock()
585 lock = self._repo.lock()
586 storehash = list(self._calcstorehash(remotepath))
586 storehash = list(self._calcstorehash(remotepath))
587 cachedir = os.path.dirname(cachefile)
587 cachedir = os.path.dirname(cachefile)
588 if not os.path.exists(cachedir):
588 if not os.path.exists(cachedir):
589 util.makedirs(cachedir, notindexed=True)
589 util.makedirs(cachedir, notindexed=True)
590 fd = open(cachefile, 'w')
590 fd = open(cachefile, 'w')
591 fd.writelines(storehash)
591 fd.writelines(storehash)
592 fd.close()
592 fd.close()
593 lock.release()
593 lock.release()
594
594
595 @annotatesubrepoerror
595 @annotatesubrepoerror
596 def _initrepo(self, parentrepo, source, create):
596 def _initrepo(self, parentrepo, source, create):
597 self._repo._subparent = parentrepo
597 self._repo._subparent = parentrepo
598 self._repo._subsource = source
598 self._repo._subsource = source
599
599
600 if create:
600 if create:
601 fp = self._repo.opener("hgrc", "w", text=True)
601 fp = self._repo.opener("hgrc", "w", text=True)
602 fp.write('[paths]\n')
602 fp.write('[paths]\n')
603
603
604 def addpathconfig(key, value):
604 def addpathconfig(key, value):
605 if value:
605 if value:
606 fp.write('%s = %s\n' % (key, value))
606 fp.write('%s = %s\n' % (key, value))
607 self._repo.ui.setconfig('paths', key, value)
607 self._repo.ui.setconfig('paths', key, value)
608
608
609 defpath = _abssource(self._repo, abort=False)
609 defpath = _abssource(self._repo, abort=False)
610 defpushpath = _abssource(self._repo, True, abort=False)
610 defpushpath = _abssource(self._repo, True, abort=False)
611 addpathconfig('default', defpath)
611 addpathconfig('default', defpath)
612 if defpath != defpushpath:
612 if defpath != defpushpath:
613 addpathconfig('default-push', defpushpath)
613 addpathconfig('default-push', defpushpath)
614 fp.close()
614 fp.close()
615
615
616 @annotatesubrepoerror
616 @annotatesubrepoerror
617 def add(self, ui, match, dryrun, listsubrepos, prefix, explicitonly):
617 def add(self, ui, match, dryrun, listsubrepos, prefix, explicitonly):
618 return cmdutil.add(ui, self._repo, match, dryrun, listsubrepos,
618 return cmdutil.add(ui, self._repo, match, dryrun, listsubrepos,
619 os.path.join(prefix, self._path), explicitonly)
619 os.path.join(prefix, self._path), explicitonly)
620
620
621 @annotatesubrepoerror
621 @annotatesubrepoerror
622 def status(self, rev2, **opts):
622 def status(self, rev2, **opts):
623 try:
623 try:
624 rev1 = self._state[1]
624 rev1 = self._state[1]
625 ctx1 = self._repo[rev1]
625 ctx1 = self._repo[rev1]
626 ctx2 = self._repo[rev2]
626 ctx2 = self._repo[rev2]
627 return self._repo.status(ctx1, ctx2, **opts)
627 return self._repo.status(ctx1, ctx2, **opts)
628 except error.RepoLookupError, inst:
628 except error.RepoLookupError, inst:
629 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
629 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
630 % (inst, subrelpath(self)))
630 % (inst, subrelpath(self)))
631 return [], [], [], [], [], [], []
631 return [], [], [], [], [], [], []
632
632
633 @annotatesubrepoerror
633 @annotatesubrepoerror
634 def diff(self, ui, diffopts, node2, match, prefix, **opts):
634 def diff(self, ui, diffopts, node2, match, prefix, **opts):
635 try:
635 try:
636 node1 = node.bin(self._state[1])
636 node1 = node.bin(self._state[1])
637 # We currently expect node2 to come from substate and be
637 # We currently expect node2 to come from substate and be
638 # in hex format
638 # in hex format
639 if node2 is not None:
639 if node2 is not None:
640 node2 = node.bin(node2)
640 node2 = node.bin(node2)
641 cmdutil.diffordiffstat(ui, self._repo, diffopts,
641 cmdutil.diffordiffstat(ui, self._repo, diffopts,
642 node1, node2, match,
642 node1, node2, match,
643 prefix=posixpath.join(prefix, self._path),
643 prefix=posixpath.join(prefix, self._path),
644 listsubrepos=True, **opts)
644 listsubrepos=True, **opts)
645 except error.RepoLookupError, inst:
645 except error.RepoLookupError, inst:
646 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
646 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
647 % (inst, subrelpath(self)))
647 % (inst, subrelpath(self)))
648
648
649 @annotatesubrepoerror
649 @annotatesubrepoerror
650 def archive(self, ui, archiver, prefix, match=None):
650 def archive(self, ui, archiver, prefix, match=None):
651 self._get(self._state + ('hg',))
651 self._get(self._state + ('hg',))
652 total = abstractsubrepo.archive(self, ui, archiver, prefix, match)
652 total = abstractsubrepo.archive(self, ui, archiver, prefix, match)
653 rev = self._state[1]
653 rev = self._state[1]
654 ctx = self._repo[rev]
654 ctx = self._repo[rev]
655 for subpath in ctx.substate:
655 for subpath in ctx.substate:
656 s = subrepo(ctx, subpath)
656 s = subrepo(ctx, subpath)
657 submatch = matchmod.narrowmatcher(subpath, match)
657 submatch = matchmod.narrowmatcher(subpath, match)
658 total += s.archive(
658 total += s.archive(
659 ui, archiver, os.path.join(prefix, self._path), submatch)
659 ui, archiver, os.path.join(prefix, self._path), submatch)
660 return total
660 return total
661
661
662 @annotatesubrepoerror
662 @annotatesubrepoerror
663 def dirty(self, ignoreupdate=False):
663 def dirty(self, ignoreupdate=False):
664 r = self._state[1]
664 r = self._state[1]
665 if r == '' and not ignoreupdate: # no state recorded
665 if r == '' and not ignoreupdate: # no state recorded
666 return True
666 return True
667 w = self._repo[None]
667 w = self._repo[None]
668 if r != w.p1().hex() and not ignoreupdate:
668 if r != w.p1().hex() and not ignoreupdate:
669 # different version checked out
669 # different version checked out
670 return True
670 return True
671 return w.dirty() # working directory changed
671 return w.dirty() # working directory changed
672
672
673 def basestate(self):
673 def basestate(self):
674 return self._repo['.'].hex()
674 return self._repo['.'].hex()
675
675
676 def checknested(self, path):
676 def checknested(self, path):
677 return self._repo._checknested(self._repo.wjoin(path))
677 return self._repo._checknested(self._repo.wjoin(path))
678
678
679 @annotatesubrepoerror
679 @annotatesubrepoerror
680 def commit(self, text, user, date):
680 def commit(self, text, user, date):
681 # don't bother committing in the subrepo if it's only been
681 # don't bother committing in the subrepo if it's only been
682 # updated
682 # updated
683 if not self.dirty(True):
683 if not self.dirty(True):
684 return self._repo['.'].hex()
684 return self._repo['.'].hex()
685 self._repo.ui.debug("committing subrepo %s\n" % subrelpath(self))
685 self._repo.ui.debug("committing subrepo %s\n" % subrelpath(self))
686 n = self._repo.commit(text, user, date)
686 n = self._repo.commit(text, user, date)
687 if not n:
687 if not n:
688 return self._repo['.'].hex() # different version checked out
688 return self._repo['.'].hex() # different version checked out
689 return node.hex(n)
689 return node.hex(n)
690
690
691 @annotatesubrepoerror
691 @annotatesubrepoerror
692 def phase(self, state):
692 def phase(self, state):
693 return self._repo[state].phase()
693 return self._repo[state].phase()
694
694
695 @annotatesubrepoerror
695 @annotatesubrepoerror
696 def remove(self):
696 def remove(self):
697 # we can't fully delete the repository as it may contain
697 # we can't fully delete the repository as it may contain
698 # local-only history
698 # local-only history
699 self._repo.ui.note(_('removing subrepo %s\n') % subrelpath(self))
699 self._repo.ui.note(_('removing subrepo %s\n') % subrelpath(self))
700 hg.clean(self._repo, node.nullid, False)
700 hg.clean(self._repo, node.nullid, False)
701
701
702 def _get(self, state):
702 def _get(self, state):
703 source, revision, kind = state
703 source, revision, kind = state
704 if revision in self._repo.unfiltered():
704 if revision in self._repo.unfiltered():
705 return True
705 return True
706 self._repo._subsource = source
706 self._repo._subsource = source
707 srcurl = _abssource(self._repo)
707 srcurl = _abssource(self._repo)
708 other = hg.peer(self._repo, {}, srcurl)
708 other = hg.peer(self._repo, {}, srcurl)
709 if len(self._repo) == 0:
709 if len(self._repo) == 0:
710 self._repo.ui.status(_('cloning subrepo %s from %s\n')
710 self._repo.ui.status(_('cloning subrepo %s from %s\n')
711 % (subrelpath(self), srcurl))
711 % (subrelpath(self), srcurl))
712 parentrepo = self._repo._subparent
712 parentrepo = self._repo._subparent
713 shutil.rmtree(self._repo.path)
713 shutil.rmtree(self._repo.path)
714 other, cloned = hg.clone(self._repo._subparent.baseui, {},
714 other, cloned = hg.clone(self._repo._subparent.baseui, {},
715 other, self._repo.root,
715 other, self._repo.root,
716 update=False)
716 update=False)
717 self._repo = cloned.local()
717 self._repo = cloned.local()
718 self._initrepo(parentrepo, source, create=True)
718 self._initrepo(parentrepo, source, create=True)
719 self._cachestorehash(srcurl)
719 self._cachestorehash(srcurl)
720 else:
720 else:
721 self._repo.ui.status(_('pulling subrepo %s from %s\n')
721 self._repo.ui.status(_('pulling subrepo %s from %s\n')
722 % (subrelpath(self), srcurl))
722 % (subrelpath(self), srcurl))
723 cleansub = self.storeclean(srcurl)
723 cleansub = self.storeclean(srcurl)
724 remotebookmarks = other.listkeys('bookmarks')
724 remotebookmarks = other.listkeys('bookmarks')
725 self._repo.pull(other)
725 self._repo.pull(other)
726 bookmarks.updatefromremote(self._repo.ui, self._repo,
726 bookmarks.updatefromremote(self._repo.ui, self._repo,
727 remotebookmarks, srcurl)
727 remotebookmarks, srcurl)
728 if cleansub:
728 if cleansub:
729 # keep the repo clean after pull
729 # keep the repo clean after pull
730 self._cachestorehash(srcurl)
730 self._cachestorehash(srcurl)
731 return False
731 return False
732
732
733 @annotatesubrepoerror
733 @annotatesubrepoerror
734 def get(self, state, overwrite=False):
734 def get(self, state, overwrite=False):
735 inrepo = self._get(state)
735 inrepo = self._get(state)
736 source, revision, kind = state
736 source, revision, kind = state
737 repo = self._repo
737 repo = self._repo
738 repo.ui.debug("getting subrepo %s\n" % self._path)
738 repo.ui.debug("getting subrepo %s\n" % self._path)
739 if inrepo:
739 if inrepo:
740 urepo = repo.unfiltered()
740 urepo = repo.unfiltered()
741 ctx = urepo[revision]
741 ctx = urepo[revision]
742 if ctx.hidden():
742 if ctx.hidden():
743 urepo.ui.warn(
743 urepo.ui.warn(
744 _('revision %s in subrepo %s is hidden\n') \
744 _('revision %s in subrepo %s is hidden\n') \
745 % (revision[0:12], self._path))
745 % (revision[0:12], self._path))
746 repo = urepo
746 repo = urepo
747 hg.updaterepo(repo, revision, overwrite)
747 hg.updaterepo(repo, revision, overwrite)
748
748
749 @annotatesubrepoerror
749 @annotatesubrepoerror
750 def merge(self, state):
750 def merge(self, state):
751 self._get(state)
751 self._get(state)
752 cur = self._repo['.']
752 cur = self._repo['.']
753 dst = self._repo[state[1]]
753 dst = self._repo[state[1]]
754 anc = dst.ancestor(cur)
754 anc = dst.ancestor(cur)
755
755
756 def mergefunc():
756 def mergefunc():
757 if anc == cur and dst.branch() == cur.branch():
757 if anc == cur and dst.branch() == cur.branch():
758 self._repo.ui.debug("updating subrepo %s\n" % subrelpath(self))
758 self._repo.ui.debug("updating subrepo %s\n" % subrelpath(self))
759 hg.update(self._repo, state[1])
759 hg.update(self._repo, state[1])
760 elif anc == dst:
760 elif anc == dst:
761 self._repo.ui.debug("skipping subrepo %s\n" % subrelpath(self))
761 self._repo.ui.debug("skipping subrepo %s\n" % subrelpath(self))
762 else:
762 else:
763 self._repo.ui.debug("merging subrepo %s\n" % subrelpath(self))
763 self._repo.ui.debug("merging subrepo %s\n" % subrelpath(self))
764 hg.merge(self._repo, state[1], remind=False)
764 hg.merge(self._repo, state[1], remind=False)
765
765
766 wctx = self._repo[None]
766 wctx = self._repo[None]
767 if self.dirty():
767 if self.dirty():
768 if anc != dst:
768 if anc != dst:
769 if _updateprompt(self._repo.ui, self, wctx.dirty(), cur, dst):
769 if _updateprompt(self._repo.ui, self, wctx.dirty(), cur, dst):
770 mergefunc()
770 mergefunc()
771 else:
771 else:
772 mergefunc()
772 mergefunc()
773 else:
773 else:
774 mergefunc()
774 mergefunc()
775
775
776 @annotatesubrepoerror
776 @annotatesubrepoerror
777 def push(self, opts):
777 def push(self, opts):
778 force = opts.get('force')
778 force = opts.get('force')
779 newbranch = opts.get('new_branch')
779 newbranch = opts.get('new_branch')
780 ssh = opts.get('ssh')
780 ssh = opts.get('ssh')
781
781
782 # push subrepos depth-first for coherent ordering
782 # push subrepos depth-first for coherent ordering
783 c = self._repo['']
783 c = self._repo['']
784 subs = c.substate # only repos that are committed
784 subs = c.substate # only repos that are committed
785 for s in sorted(subs):
785 for s in sorted(subs):
786 if c.sub(s).push(opts) == 0:
786 if c.sub(s).push(opts) == 0:
787 return False
787 return False
788
788
789 dsturl = _abssource(self._repo, True)
789 dsturl = _abssource(self._repo, True)
790 if not force:
790 if not force:
791 if self.storeclean(dsturl):
791 if self.storeclean(dsturl):
792 self._repo.ui.status(
792 self._repo.ui.status(
793 _('no changes made to subrepo %s since last push to %s\n')
793 _('no changes made to subrepo %s since last push to %s\n')
794 % (subrelpath(self), dsturl))
794 % (subrelpath(self), dsturl))
795 return None
795 return None
796 self._repo.ui.status(_('pushing subrepo %s to %s\n') %
796 self._repo.ui.status(_('pushing subrepo %s to %s\n') %
797 (subrelpath(self), dsturl))
797 (subrelpath(self), dsturl))
798 other = hg.peer(self._repo, {'ssh': ssh}, dsturl)
798 other = hg.peer(self._repo, {'ssh': ssh}, dsturl)
799 res = self._repo.push(other, force, newbranch=newbranch)
799 res = self._repo.push(other, force, newbranch=newbranch)
800
800
801 # the repo is now clean
801 # the repo is now clean
802 self._cachestorehash(dsturl)
802 self._cachestorehash(dsturl)
803 return res
803 return res
804
804
805 @annotatesubrepoerror
805 @annotatesubrepoerror
806 def outgoing(self, ui, dest, opts):
806 def outgoing(self, ui, dest, opts):
807 return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
807 return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
808
808
809 @annotatesubrepoerror
809 @annotatesubrepoerror
810 def incoming(self, ui, source, opts):
810 def incoming(self, ui, source, opts):
811 return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
811 return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
812
812
813 @annotatesubrepoerror
813 @annotatesubrepoerror
814 def files(self):
814 def files(self):
815 rev = self._state[1]
815 rev = self._state[1]
816 ctx = self._repo[rev]
816 ctx = self._repo[rev]
817 return ctx.manifest()
817 return ctx.manifest()
818
818
819 def filedata(self, name):
819 def filedata(self, name):
820 rev = self._state[1]
820 rev = self._state[1]
821 return self._repo[rev][name].data()
821 return self._repo[rev][name].data()
822
822
823 def fileflags(self, name):
823 def fileflags(self, name):
824 rev = self._state[1]
824 rev = self._state[1]
825 ctx = self._repo[rev]
825 ctx = self._repo[rev]
826 return ctx.flags(name)
826 return ctx.flags(name)
827
827
828 def walk(self, match):
828 def walk(self, match):
829 ctx = self._repo[None]
829 ctx = self._repo[None]
830 return ctx.walk(match)
830 return ctx.walk(match)
831
831
832 @annotatesubrepoerror
832 @annotatesubrepoerror
833 def forget(self, ui, match, prefix):
833 def forget(self, ui, match, prefix):
834 return cmdutil.forget(ui, self._repo, match,
834 return cmdutil.forget(ui, self._repo, match,
835 os.path.join(prefix, self._path), True)
835 os.path.join(prefix, self._path), True)
836
836
837 @annotatesubrepoerror
837 @annotatesubrepoerror
838 def revert(self, ui, substate, *pats, **opts):
838 def revert(self, ui, substate, *pats, **opts):
839 # reverting a subrepo is a 2 step process:
839 # reverting a subrepo is a 2 step process:
840 # 1. if the no_backup is not set, revert all modified
840 # 1. if the no_backup is not set, revert all modified
841 # files inside the subrepo
841 # files inside the subrepo
842 # 2. update the subrepo to the revision specified in
842 # 2. update the subrepo to the revision specified in
843 # the corresponding substate dictionary
843 # the corresponding substate dictionary
844 ui.status(_('reverting subrepo %s\n') % substate[0])
844 ui.status(_('reverting subrepo %s\n') % substate[0])
845 if not opts.get('no_backup'):
845 if not opts.get('no_backup'):
846 # Revert all files on the subrepo, creating backups
846 # Revert all files on the subrepo, creating backups
847 # Note that this will not recursively revert subrepos
847 # Note that this will not recursively revert subrepos
848 # We could do it if there was a set:subrepos() predicate
848 # We could do it if there was a set:subrepos() predicate
849 opts = opts.copy()
849 opts = opts.copy()
850 opts['date'] = None
850 opts['date'] = None
851 opts['rev'] = substate[1]
851 opts['rev'] = substate[1]
852
852
853 pats = []
853 pats = []
854 if not opts.get('all'):
854 if not opts.get('all'):
855 pats = ['set:modified()']
855 pats = ['set:modified()']
856 self.filerevert(ui, *pats, **opts)
856 self.filerevert(ui, *pats, **opts)
857
857
858 # Update the repo to the revision specified in the given substate
858 # Update the repo to the revision specified in the given substate
859 self.get(substate, overwrite=True)
859 self.get(substate, overwrite=True)
860
860
861 def filerevert(self, ui, *pats, **opts):
861 def filerevert(self, ui, *pats, **opts):
862 ctx = self._repo[opts['rev']]
862 ctx = self._repo[opts['rev']]
863 parents = self._repo.dirstate.parents()
863 parents = self._repo.dirstate.parents()
864 if opts.get('all'):
864 if opts.get('all'):
865 pats = ['set:modified()']
865 pats = ['set:modified()']
866 else:
866 else:
867 pats = []
867 pats = []
868 cmdutil.revert(ui, self._repo, ctx, parents, *pats, **opts)
868 cmdutil.revert(ui, self._repo, ctx, parents, *pats, **opts)
869
869
870 class svnsubrepo(abstractsubrepo):
870 class svnsubrepo(abstractsubrepo):
871 def __init__(self, ctx, path, state):
871 def __init__(self, ctx, path, state):
872 self._path = path
872 self._path = path
873 self._state = state
873 self._state = state
874 self._ctx = ctx
874 self._ctx = ctx
875 self._ui = ctx._repo.ui
875 self._ui = ctx._repo.ui
876 self._exe = util.findexe('svn')
876 self._exe = util.findexe('svn')
877 if not self._exe:
877 if not self._exe:
878 raise util.Abort(_("'svn' executable not found for subrepo '%s'")
878 raise util.Abort(_("'svn' executable not found for subrepo '%s'")
879 % self._path)
879 % self._path)
880
880
881 def _svncommand(self, commands, filename='', failok=False):
881 def _svncommand(self, commands, filename='', failok=False):
882 cmd = [self._exe]
882 cmd = [self._exe]
883 extrakw = {}
883 extrakw = {}
884 if not self._ui.interactive():
884 if not self._ui.interactive():
885 # Making stdin be a pipe should prevent svn from behaving
885 # Making stdin be a pipe should prevent svn from behaving
886 # interactively even if we can't pass --non-interactive.
886 # interactively even if we can't pass --non-interactive.
887 extrakw['stdin'] = subprocess.PIPE
887 extrakw['stdin'] = subprocess.PIPE
888 # Starting in svn 1.5 --non-interactive is a global flag
888 # Starting in svn 1.5 --non-interactive is a global flag
889 # instead of being per-command, but we need to support 1.4 so
889 # instead of being per-command, but we need to support 1.4 so
890 # we have to be intelligent about what commands take
890 # we have to be intelligent about what commands take
891 # --non-interactive.
891 # --non-interactive.
892 if commands[0] in ('update', 'checkout', 'commit'):
892 if commands[0] in ('update', 'checkout', 'commit'):
893 cmd.append('--non-interactive')
893 cmd.append('--non-interactive')
894 cmd.extend(commands)
894 cmd.extend(commands)
895 if filename is not None:
895 if filename is not None:
896 path = os.path.join(self._ctx._repo.origroot, self._path, filename)
896 path = os.path.join(self._ctx._repo.origroot, self._path, filename)
897 cmd.append(path)
897 cmd.append(path)
898 env = dict(os.environ)
898 env = dict(os.environ)
899 # Avoid localized output, preserve current locale for everything else.
899 # Avoid localized output, preserve current locale for everything else.
900 lc_all = env.get('LC_ALL')
900 lc_all = env.get('LC_ALL')
901 if lc_all:
901 if lc_all:
902 env['LANG'] = lc_all
902 env['LANG'] = lc_all
903 del env['LC_ALL']
903 del env['LC_ALL']
904 env['LC_MESSAGES'] = 'C'
904 env['LC_MESSAGES'] = 'C'
905 p = subprocess.Popen(cmd, bufsize=-1, close_fds=util.closefds,
905 p = subprocess.Popen(cmd, bufsize=-1, close_fds=util.closefds,
906 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
906 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
907 universal_newlines=True, env=env, **extrakw)
907 universal_newlines=True, env=env, **extrakw)
908 stdout, stderr = p.communicate()
908 stdout, stderr = p.communicate()
909 stderr = stderr.strip()
909 stderr = stderr.strip()
910 if not failok:
910 if not failok:
911 if p.returncode:
911 if p.returncode:
912 raise util.Abort(stderr or 'exited with code %d' % p.returncode)
912 raise util.Abort(stderr or 'exited with code %d' % p.returncode)
913 if stderr:
913 if stderr:
914 self._ui.warn(stderr + '\n')
914 self._ui.warn(stderr + '\n')
915 return stdout, stderr
915 return stdout, stderr
916
916
917 @propertycache
917 @propertycache
918 def _svnversion(self):
918 def _svnversion(self):
919 output, err = self._svncommand(['--version', '--quiet'], filename=None)
919 output, err = self._svncommand(['--version', '--quiet'], filename=None)
920 m = re.search(r'^(\d+)\.(\d+)', output)
920 m = re.search(r'^(\d+)\.(\d+)', output)
921 if not m:
921 if not m:
922 raise util.Abort(_('cannot retrieve svn tool version'))
922 raise util.Abort(_('cannot retrieve svn tool version'))
923 return (int(m.group(1)), int(m.group(2)))
923 return (int(m.group(1)), int(m.group(2)))
924
924
925 def _wcrevs(self):
925 def _wcrevs(self):
926 # Get the working directory revision as well as the last
926 # Get the working directory revision as well as the last
927 # commit revision so we can compare the subrepo state with
927 # commit revision so we can compare the subrepo state with
928 # both. We used to store the working directory one.
928 # both. We used to store the working directory one.
929 output, err = self._svncommand(['info', '--xml'])
929 output, err = self._svncommand(['info', '--xml'])
930 doc = xml.dom.minidom.parseString(output)
930 doc = xml.dom.minidom.parseString(output)
931 entries = doc.getElementsByTagName('entry')
931 entries = doc.getElementsByTagName('entry')
932 lastrev, rev = '0', '0'
932 lastrev, rev = '0', '0'
933 if entries:
933 if entries:
934 rev = str(entries[0].getAttribute('revision')) or '0'
934 rev = str(entries[0].getAttribute('revision')) or '0'
935 commits = entries[0].getElementsByTagName('commit')
935 commits = entries[0].getElementsByTagName('commit')
936 if commits:
936 if commits:
937 lastrev = str(commits[0].getAttribute('revision')) or '0'
937 lastrev = str(commits[0].getAttribute('revision')) or '0'
938 return (lastrev, rev)
938 return (lastrev, rev)
939
939
940 def _wcrev(self):
940 def _wcrev(self):
941 return self._wcrevs()[0]
941 return self._wcrevs()[0]
942
942
943 def _wcchanged(self):
943 def _wcchanged(self):
944 """Return (changes, extchanges, missing) where changes is True
944 """Return (changes, extchanges, missing) where changes is True
945 if the working directory was changed, extchanges is
945 if the working directory was changed, extchanges is
946 True if any of these changes concern an external entry and missing
946 True if any of these changes concern an external entry and missing
947 is True if any change is a missing entry.
947 is True if any change is a missing entry.
948 """
948 """
949 output, err = self._svncommand(['status', '--xml'])
949 output, err = self._svncommand(['status', '--xml'])
950 externals, changes, missing = [], [], []
950 externals, changes, missing = [], [], []
951 doc = xml.dom.minidom.parseString(output)
951 doc = xml.dom.minidom.parseString(output)
952 for e in doc.getElementsByTagName('entry'):
952 for e in doc.getElementsByTagName('entry'):
953 s = e.getElementsByTagName('wc-status')
953 s = e.getElementsByTagName('wc-status')
954 if not s:
954 if not s:
955 continue
955 continue
956 item = s[0].getAttribute('item')
956 item = s[0].getAttribute('item')
957 props = s[0].getAttribute('props')
957 props = s[0].getAttribute('props')
958 path = e.getAttribute('path')
958 path = e.getAttribute('path')
959 if item == 'external':
959 if item == 'external':
960 externals.append(path)
960 externals.append(path)
961 elif item == 'missing':
961 elif item == 'missing':
962 missing.append(path)
962 missing.append(path)
963 if (item not in ('', 'normal', 'unversioned', 'external')
963 if (item not in ('', 'normal', 'unversioned', 'external')
964 or props not in ('', 'none', 'normal')):
964 or props not in ('', 'none', 'normal')):
965 changes.append(path)
965 changes.append(path)
966 for path in changes:
966 for path in changes:
967 for ext in externals:
967 for ext in externals:
968 if path == ext or path.startswith(ext + os.sep):
968 if path == ext or path.startswith(ext + os.sep):
969 return True, True, bool(missing)
969 return True, True, bool(missing)
970 return bool(changes), False, bool(missing)
970 return bool(changes), False, bool(missing)
971
971
972 def dirty(self, ignoreupdate=False):
972 def dirty(self, ignoreupdate=False):
973 if not self._wcchanged()[0]:
973 if not self._wcchanged()[0]:
974 if self._state[1] in self._wcrevs() or ignoreupdate:
974 if self._state[1] in self._wcrevs() or ignoreupdate:
975 return False
975 return False
976 return True
976 return True
977
977
978 def basestate(self):
978 def basestate(self):
979 lastrev, rev = self._wcrevs()
979 lastrev, rev = self._wcrevs()
980 if lastrev != rev:
980 if lastrev != rev:
981 # Last committed rev is not the same than rev. We would
981 # Last committed rev is not the same than rev. We would
982 # like to take lastrev but we do not know if the subrepo
982 # like to take lastrev but we do not know if the subrepo
983 # URL exists at lastrev. Test it and fallback to rev it
983 # URL exists at lastrev. Test it and fallback to rev it
984 # is not there.
984 # is not there.
985 try:
985 try:
986 self._svncommand(['list', '%s@%s' % (self._state[0], lastrev)])
986 self._svncommand(['list', '%s@%s' % (self._state[0], lastrev)])
987 return lastrev
987 return lastrev
988 except error.Abort:
988 except error.Abort:
989 pass
989 pass
990 return rev
990 return rev
991
991
992 @annotatesubrepoerror
992 @annotatesubrepoerror
993 def commit(self, text, user, date):
993 def commit(self, text, user, date):
994 # user and date are out of our hands since svn is centralized
994 # user and date are out of our hands since svn is centralized
995 changed, extchanged, missing = self._wcchanged()
995 changed, extchanged, missing = self._wcchanged()
996 if not changed:
996 if not changed:
997 return self.basestate()
997 return self.basestate()
998 if extchanged:
998 if extchanged:
999 # Do not try to commit externals
999 # Do not try to commit externals
1000 raise util.Abort(_('cannot commit svn externals'))
1000 raise util.Abort(_('cannot commit svn externals'))
1001 if missing:
1001 if missing:
1002 # svn can commit with missing entries but aborting like hg
1002 # svn can commit with missing entries but aborting like hg
1003 # seems a better approach.
1003 # seems a better approach.
1004 raise util.Abort(_('cannot commit missing svn entries'))
1004 raise util.Abort(_('cannot commit missing svn entries'))
1005 commitinfo, err = self._svncommand(['commit', '-m', text])
1005 commitinfo, err = self._svncommand(['commit', '-m', text])
1006 self._ui.status(commitinfo)
1006 self._ui.status(commitinfo)
1007 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
1007 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
1008 if not newrev:
1008 if not newrev:
1009 if not commitinfo.strip():
1009 if not commitinfo.strip():
1010 # Sometimes, our definition of "changed" differs from
1010 # Sometimes, our definition of "changed" differs from
1011 # svn one. For instance, svn ignores missing files
1011 # svn one. For instance, svn ignores missing files
1012 # when committing. If there are only missing files, no
1012 # when committing. If there are only missing files, no
1013 # commit is made, no output and no error code.
1013 # commit is made, no output and no error code.
1014 raise util.Abort(_('failed to commit svn changes'))
1014 raise util.Abort(_('failed to commit svn changes'))
1015 raise util.Abort(commitinfo.splitlines()[-1])
1015 raise util.Abort(commitinfo.splitlines()[-1])
1016 newrev = newrev.groups()[0]
1016 newrev = newrev.groups()[0]
1017 self._ui.status(self._svncommand(['update', '-r', newrev])[0])
1017 self._ui.status(self._svncommand(['update', '-r', newrev])[0])
1018 return newrev
1018 return newrev
1019
1019
1020 @annotatesubrepoerror
1020 @annotatesubrepoerror
1021 def remove(self):
1021 def remove(self):
1022 if self.dirty():
1022 if self.dirty():
1023 self._ui.warn(_('not removing repo %s because '
1023 self._ui.warn(_('not removing repo %s because '
1024 'it has changes.\n' % self._path))
1024 'it has changes.\n') % self._path)
1025 return
1025 return
1026 self._ui.note(_('removing subrepo %s\n') % self._path)
1026 self._ui.note(_('removing subrepo %s\n') % self._path)
1027
1027
1028 def onerror(function, path, excinfo):
1028 def onerror(function, path, excinfo):
1029 if function is not os.remove:
1029 if function is not os.remove:
1030 raise
1030 raise
1031 # read-only files cannot be unlinked under Windows
1031 # read-only files cannot be unlinked under Windows
1032 s = os.stat(path)
1032 s = os.stat(path)
1033 if (s.st_mode & stat.S_IWRITE) != 0:
1033 if (s.st_mode & stat.S_IWRITE) != 0:
1034 raise
1034 raise
1035 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
1035 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
1036 os.remove(path)
1036 os.remove(path)
1037
1037
1038 path = self._ctx._repo.wjoin(self._path)
1038 path = self._ctx._repo.wjoin(self._path)
1039 shutil.rmtree(path, onerror=onerror)
1039 shutil.rmtree(path, onerror=onerror)
1040 try:
1040 try:
1041 os.removedirs(os.path.dirname(path))
1041 os.removedirs(os.path.dirname(path))
1042 except OSError:
1042 except OSError:
1043 pass
1043 pass
1044
1044
1045 @annotatesubrepoerror
1045 @annotatesubrepoerror
1046 def get(self, state, overwrite=False):
1046 def get(self, state, overwrite=False):
1047 if overwrite:
1047 if overwrite:
1048 self._svncommand(['revert', '--recursive'])
1048 self._svncommand(['revert', '--recursive'])
1049 args = ['checkout']
1049 args = ['checkout']
1050 if self._svnversion >= (1, 5):
1050 if self._svnversion >= (1, 5):
1051 args.append('--force')
1051 args.append('--force')
1052 # The revision must be specified at the end of the URL to properly
1052 # The revision must be specified at the end of the URL to properly
1053 # update to a directory which has since been deleted and recreated.
1053 # update to a directory which has since been deleted and recreated.
1054 args.append('%s@%s' % (state[0], state[1]))
1054 args.append('%s@%s' % (state[0], state[1]))
1055 status, err = self._svncommand(args, failok=True)
1055 status, err = self._svncommand(args, failok=True)
1056 _sanitize(self._ui, self._path)
1056 _sanitize(self._ui, self._path)
1057 if not re.search('Checked out revision [0-9]+.', status):
1057 if not re.search('Checked out revision [0-9]+.', status):
1058 if ('is already a working copy for a different URL' in err
1058 if ('is already a working copy for a different URL' in err
1059 and (self._wcchanged()[:2] == (False, False))):
1059 and (self._wcchanged()[:2] == (False, False))):
1060 # obstructed but clean working copy, so just blow it away.
1060 # obstructed but clean working copy, so just blow it away.
1061 self.remove()
1061 self.remove()
1062 self.get(state, overwrite=False)
1062 self.get(state, overwrite=False)
1063 return
1063 return
1064 raise util.Abort((status or err).splitlines()[-1])
1064 raise util.Abort((status or err).splitlines()[-1])
1065 self._ui.status(status)
1065 self._ui.status(status)
1066
1066
1067 @annotatesubrepoerror
1067 @annotatesubrepoerror
1068 def merge(self, state):
1068 def merge(self, state):
1069 old = self._state[1]
1069 old = self._state[1]
1070 new = state[1]
1070 new = state[1]
1071 wcrev = self._wcrev()
1071 wcrev = self._wcrev()
1072 if new != wcrev:
1072 if new != wcrev:
1073 dirty = old == wcrev or self._wcchanged()[0]
1073 dirty = old == wcrev or self._wcchanged()[0]
1074 if _updateprompt(self._ui, self, dirty, wcrev, new):
1074 if _updateprompt(self._ui, self, dirty, wcrev, new):
1075 self.get(state, False)
1075 self.get(state, False)
1076
1076
1077 def push(self, opts):
1077 def push(self, opts):
1078 # push is a no-op for SVN
1078 # push is a no-op for SVN
1079 return True
1079 return True
1080
1080
1081 @annotatesubrepoerror
1081 @annotatesubrepoerror
1082 def files(self):
1082 def files(self):
1083 output = self._svncommand(['list', '--recursive', '--xml'])[0]
1083 output = self._svncommand(['list', '--recursive', '--xml'])[0]
1084 doc = xml.dom.minidom.parseString(output)
1084 doc = xml.dom.minidom.parseString(output)
1085 paths = []
1085 paths = []
1086 for e in doc.getElementsByTagName('entry'):
1086 for e in doc.getElementsByTagName('entry'):
1087 kind = str(e.getAttribute('kind'))
1087 kind = str(e.getAttribute('kind'))
1088 if kind != 'file':
1088 if kind != 'file':
1089 continue
1089 continue
1090 name = ''.join(c.data for c
1090 name = ''.join(c.data for c
1091 in e.getElementsByTagName('name')[0].childNodes
1091 in e.getElementsByTagName('name')[0].childNodes
1092 if c.nodeType == c.TEXT_NODE)
1092 if c.nodeType == c.TEXT_NODE)
1093 paths.append(name.encode('utf-8'))
1093 paths.append(name.encode('utf-8'))
1094 return paths
1094 return paths
1095
1095
1096 def filedata(self, name):
1096 def filedata(self, name):
1097 return self._svncommand(['cat'], name)[0]
1097 return self._svncommand(['cat'], name)[0]
1098
1098
1099
1099
1100 class gitsubrepo(abstractsubrepo):
1100 class gitsubrepo(abstractsubrepo):
1101 def __init__(self, ctx, path, state):
1101 def __init__(self, ctx, path, state):
1102 self._state = state
1102 self._state = state
1103 self._ctx = ctx
1103 self._ctx = ctx
1104 self._path = path
1104 self._path = path
1105 self._relpath = os.path.join(reporelpath(ctx._repo), path)
1105 self._relpath = os.path.join(reporelpath(ctx._repo), path)
1106 self._abspath = ctx._repo.wjoin(path)
1106 self._abspath = ctx._repo.wjoin(path)
1107 self._subparent = ctx._repo
1107 self._subparent = ctx._repo
1108 self._ui = ctx._repo.ui
1108 self._ui = ctx._repo.ui
1109 self._ensuregit()
1109 self._ensuregit()
1110
1110
1111 def _ensuregit(self):
1111 def _ensuregit(self):
1112 try:
1112 try:
1113 self._gitexecutable = 'git'
1113 self._gitexecutable = 'git'
1114 out, err = self._gitnodir(['--version'])
1114 out, err = self._gitnodir(['--version'])
1115 except OSError, e:
1115 except OSError, e:
1116 if e.errno != 2 or os.name != 'nt':
1116 if e.errno != 2 or os.name != 'nt':
1117 raise
1117 raise
1118 self._gitexecutable = 'git.cmd'
1118 self._gitexecutable = 'git.cmd'
1119 out, err = self._gitnodir(['--version'])
1119 out, err = self._gitnodir(['--version'])
1120 m = re.search(r'^git version (\d+)\.(\d+)', out)
1120 m = re.search(r'^git version (\d+)\.(\d+)', out)
1121 if not m:
1121 if not m:
1122 self._ui.warn(_('cannot retrieve git version\n'))
1122 self._ui.warn(_('cannot retrieve git version\n'))
1123 return
1123 return
1124 version = (int(m.group(1)), int(m.group(2)))
1124 version = (int(m.group(1)), int(m.group(2)))
1125 # git 1.4.0 can't work at all, but 1.5.X can in at least some cases,
1125 # git 1.4.0 can't work at all, but 1.5.X can in at least some cases,
1126 # despite the docstring comment. For now, error on 1.4.0, warn on
1126 # despite the docstring comment. For now, error on 1.4.0, warn on
1127 # 1.5.0 but attempt to continue.
1127 # 1.5.0 but attempt to continue.
1128 if version < (1, 5):
1128 if version < (1, 5):
1129 raise util.Abort(_('git subrepo requires at least 1.6.0 or later'))
1129 raise util.Abort(_('git subrepo requires at least 1.6.0 or later'))
1130 elif version < (1, 6):
1130 elif version < (1, 6):
1131 self._ui.warn(_('git subrepo requires at least 1.6.0 or later\n'))
1131 self._ui.warn(_('git subrepo requires at least 1.6.0 or later\n'))
1132
1132
1133 def _gitcommand(self, commands, env=None, stream=False):
1133 def _gitcommand(self, commands, env=None, stream=False):
1134 return self._gitdir(commands, env=env, stream=stream)[0]
1134 return self._gitdir(commands, env=env, stream=stream)[0]
1135
1135
1136 def _gitdir(self, commands, env=None, stream=False):
1136 def _gitdir(self, commands, env=None, stream=False):
1137 return self._gitnodir(commands, env=env, stream=stream,
1137 return self._gitnodir(commands, env=env, stream=stream,
1138 cwd=self._abspath)
1138 cwd=self._abspath)
1139
1139
1140 def _gitnodir(self, commands, env=None, stream=False, cwd=None):
1140 def _gitnodir(self, commands, env=None, stream=False, cwd=None):
1141 """Calls the git command
1141 """Calls the git command
1142
1142
1143 The methods tries to call the git command. versions prior to 1.6.0
1143 The methods tries to call the git command. versions prior to 1.6.0
1144 are not supported and very probably fail.
1144 are not supported and very probably fail.
1145 """
1145 """
1146 self._ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
1146 self._ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
1147 # unless ui.quiet is set, print git's stderr,
1147 # unless ui.quiet is set, print git's stderr,
1148 # which is mostly progress and useful info
1148 # which is mostly progress and useful info
1149 errpipe = None
1149 errpipe = None
1150 if self._ui.quiet:
1150 if self._ui.quiet:
1151 errpipe = open(os.devnull, 'w')
1151 errpipe = open(os.devnull, 'w')
1152 p = subprocess.Popen([self._gitexecutable] + commands, bufsize=-1,
1152 p = subprocess.Popen([self._gitexecutable] + commands, bufsize=-1,
1153 cwd=cwd, env=env, close_fds=util.closefds,
1153 cwd=cwd, env=env, close_fds=util.closefds,
1154 stdout=subprocess.PIPE, stderr=errpipe)
1154 stdout=subprocess.PIPE, stderr=errpipe)
1155 if stream:
1155 if stream:
1156 return p.stdout, None
1156 return p.stdout, None
1157
1157
1158 retdata = p.stdout.read().strip()
1158 retdata = p.stdout.read().strip()
1159 # wait for the child to exit to avoid race condition.
1159 # wait for the child to exit to avoid race condition.
1160 p.wait()
1160 p.wait()
1161
1161
1162 if p.returncode != 0 and p.returncode != 1:
1162 if p.returncode != 0 and p.returncode != 1:
1163 # there are certain error codes that are ok
1163 # there are certain error codes that are ok
1164 command = commands[0]
1164 command = commands[0]
1165 if command in ('cat-file', 'symbolic-ref'):
1165 if command in ('cat-file', 'symbolic-ref'):
1166 return retdata, p.returncode
1166 return retdata, p.returncode
1167 # for all others, abort
1167 # for all others, abort
1168 raise util.Abort('git %s error %d in %s' %
1168 raise util.Abort('git %s error %d in %s' %
1169 (command, p.returncode, self._relpath))
1169 (command, p.returncode, self._relpath))
1170
1170
1171 return retdata, p.returncode
1171 return retdata, p.returncode
1172
1172
1173 def _gitmissing(self):
1173 def _gitmissing(self):
1174 return not os.path.exists(os.path.join(self._abspath, '.git'))
1174 return not os.path.exists(os.path.join(self._abspath, '.git'))
1175
1175
1176 def _gitstate(self):
1176 def _gitstate(self):
1177 return self._gitcommand(['rev-parse', 'HEAD'])
1177 return self._gitcommand(['rev-parse', 'HEAD'])
1178
1178
1179 def _gitcurrentbranch(self):
1179 def _gitcurrentbranch(self):
1180 current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet'])
1180 current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet'])
1181 if err:
1181 if err:
1182 current = None
1182 current = None
1183 return current
1183 return current
1184
1184
1185 def _gitremote(self, remote):
1185 def _gitremote(self, remote):
1186 out = self._gitcommand(['remote', 'show', '-n', remote])
1186 out = self._gitcommand(['remote', 'show', '-n', remote])
1187 line = out.split('\n')[1]
1187 line = out.split('\n')[1]
1188 i = line.index('URL: ') + len('URL: ')
1188 i = line.index('URL: ') + len('URL: ')
1189 return line[i:]
1189 return line[i:]
1190
1190
1191 def _githavelocally(self, revision):
1191 def _githavelocally(self, revision):
1192 out, code = self._gitdir(['cat-file', '-e', revision])
1192 out, code = self._gitdir(['cat-file', '-e', revision])
1193 return code == 0
1193 return code == 0
1194
1194
1195 def _gitisancestor(self, r1, r2):
1195 def _gitisancestor(self, r1, r2):
1196 base = self._gitcommand(['merge-base', r1, r2])
1196 base = self._gitcommand(['merge-base', r1, r2])
1197 return base == r1
1197 return base == r1
1198
1198
1199 def _gitisbare(self):
1199 def _gitisbare(self):
1200 return self._gitcommand(['config', '--bool', 'core.bare']) == 'true'
1200 return self._gitcommand(['config', '--bool', 'core.bare']) == 'true'
1201
1201
1202 def _gitupdatestat(self):
1202 def _gitupdatestat(self):
1203 """This must be run before git diff-index.
1203 """This must be run before git diff-index.
1204 diff-index only looks at changes to file stat;
1204 diff-index only looks at changes to file stat;
1205 this command looks at file contents and updates the stat."""
1205 this command looks at file contents and updates the stat."""
1206 self._gitcommand(['update-index', '-q', '--refresh'])
1206 self._gitcommand(['update-index', '-q', '--refresh'])
1207
1207
1208 def _gitbranchmap(self):
1208 def _gitbranchmap(self):
1209 '''returns 2 things:
1209 '''returns 2 things:
1210 a map from git branch to revision
1210 a map from git branch to revision
1211 a map from revision to branches'''
1211 a map from revision to branches'''
1212 branch2rev = {}
1212 branch2rev = {}
1213 rev2branch = {}
1213 rev2branch = {}
1214
1214
1215 out = self._gitcommand(['for-each-ref', '--format',
1215 out = self._gitcommand(['for-each-ref', '--format',
1216 '%(objectname) %(refname)'])
1216 '%(objectname) %(refname)'])
1217 for line in out.split('\n'):
1217 for line in out.split('\n'):
1218 revision, ref = line.split(' ')
1218 revision, ref = line.split(' ')
1219 if (not ref.startswith('refs/heads/') and
1219 if (not ref.startswith('refs/heads/') and
1220 not ref.startswith('refs/remotes/')):
1220 not ref.startswith('refs/remotes/')):
1221 continue
1221 continue
1222 if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'):
1222 if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'):
1223 continue # ignore remote/HEAD redirects
1223 continue # ignore remote/HEAD redirects
1224 branch2rev[ref] = revision
1224 branch2rev[ref] = revision
1225 rev2branch.setdefault(revision, []).append(ref)
1225 rev2branch.setdefault(revision, []).append(ref)
1226 return branch2rev, rev2branch
1226 return branch2rev, rev2branch
1227
1227
1228 def _gittracking(self, branches):
1228 def _gittracking(self, branches):
1229 'return map of remote branch to local tracking branch'
1229 'return map of remote branch to local tracking branch'
1230 # assumes no more than one local tracking branch for each remote
1230 # assumes no more than one local tracking branch for each remote
1231 tracking = {}
1231 tracking = {}
1232 for b in branches:
1232 for b in branches:
1233 if b.startswith('refs/remotes/'):
1233 if b.startswith('refs/remotes/'):
1234 continue
1234 continue
1235 bname = b.split('/', 2)[2]
1235 bname = b.split('/', 2)[2]
1236 remote = self._gitcommand(['config', 'branch.%s.remote' % bname])
1236 remote = self._gitcommand(['config', 'branch.%s.remote' % bname])
1237 if remote:
1237 if remote:
1238 ref = self._gitcommand(['config', 'branch.%s.merge' % bname])
1238 ref = self._gitcommand(['config', 'branch.%s.merge' % bname])
1239 tracking['refs/remotes/%s/%s' %
1239 tracking['refs/remotes/%s/%s' %
1240 (remote, ref.split('/', 2)[2])] = b
1240 (remote, ref.split('/', 2)[2])] = b
1241 return tracking
1241 return tracking
1242
1242
1243 def _abssource(self, source):
1243 def _abssource(self, source):
1244 if '://' not in source:
1244 if '://' not in source:
1245 # recognize the scp syntax as an absolute source
1245 # recognize the scp syntax as an absolute source
1246 colon = source.find(':')
1246 colon = source.find(':')
1247 if colon != -1 and '/' not in source[:colon]:
1247 if colon != -1 and '/' not in source[:colon]:
1248 return source
1248 return source
1249 self._subsource = source
1249 self._subsource = source
1250 return _abssource(self)
1250 return _abssource(self)
1251
1251
1252 def _fetch(self, source, revision):
1252 def _fetch(self, source, revision):
1253 if self._gitmissing():
1253 if self._gitmissing():
1254 source = self._abssource(source)
1254 source = self._abssource(source)
1255 self._ui.status(_('cloning subrepo %s from %s\n') %
1255 self._ui.status(_('cloning subrepo %s from %s\n') %
1256 (self._relpath, source))
1256 (self._relpath, source))
1257 self._gitnodir(['clone', source, self._abspath])
1257 self._gitnodir(['clone', source, self._abspath])
1258 if self._githavelocally(revision):
1258 if self._githavelocally(revision):
1259 return
1259 return
1260 self._ui.status(_('pulling subrepo %s from %s\n') %
1260 self._ui.status(_('pulling subrepo %s from %s\n') %
1261 (self._relpath, self._gitremote('origin')))
1261 (self._relpath, self._gitremote('origin')))
1262 # try only origin: the originally cloned repo
1262 # try only origin: the originally cloned repo
1263 self._gitcommand(['fetch'])
1263 self._gitcommand(['fetch'])
1264 if not self._githavelocally(revision):
1264 if not self._githavelocally(revision):
1265 raise util.Abort(_("revision %s does not exist in subrepo %s\n") %
1265 raise util.Abort(_("revision %s does not exist in subrepo %s\n") %
1266 (revision, self._relpath))
1266 (revision, self._relpath))
1267
1267
1268 @annotatesubrepoerror
1268 @annotatesubrepoerror
1269 def dirty(self, ignoreupdate=False):
1269 def dirty(self, ignoreupdate=False):
1270 if self._gitmissing():
1270 if self._gitmissing():
1271 return self._state[1] != ''
1271 return self._state[1] != ''
1272 if self._gitisbare():
1272 if self._gitisbare():
1273 return True
1273 return True
1274 if not ignoreupdate and self._state[1] != self._gitstate():
1274 if not ignoreupdate and self._state[1] != self._gitstate():
1275 # different version checked out
1275 # different version checked out
1276 return True
1276 return True
1277 # check for staged changes or modified files; ignore untracked files
1277 # check for staged changes or modified files; ignore untracked files
1278 self._gitupdatestat()
1278 self._gitupdatestat()
1279 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1279 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1280 return code == 1
1280 return code == 1
1281
1281
1282 def basestate(self):
1282 def basestate(self):
1283 return self._gitstate()
1283 return self._gitstate()
1284
1284
1285 @annotatesubrepoerror
1285 @annotatesubrepoerror
1286 def get(self, state, overwrite=False):
1286 def get(self, state, overwrite=False):
1287 source, revision, kind = state
1287 source, revision, kind = state
1288 if not revision:
1288 if not revision:
1289 self.remove()
1289 self.remove()
1290 return
1290 return
1291 self._fetch(source, revision)
1291 self._fetch(source, revision)
1292 # if the repo was set to be bare, unbare it
1292 # if the repo was set to be bare, unbare it
1293 if self._gitisbare():
1293 if self._gitisbare():
1294 self._gitcommand(['config', 'core.bare', 'false'])
1294 self._gitcommand(['config', 'core.bare', 'false'])
1295 if self._gitstate() == revision:
1295 if self._gitstate() == revision:
1296 self._gitcommand(['reset', '--hard', 'HEAD'])
1296 self._gitcommand(['reset', '--hard', 'HEAD'])
1297 return
1297 return
1298 elif self._gitstate() == revision:
1298 elif self._gitstate() == revision:
1299 if overwrite:
1299 if overwrite:
1300 # first reset the index to unmark new files for commit, because
1300 # first reset the index to unmark new files for commit, because
1301 # reset --hard will otherwise throw away files added for commit,
1301 # reset --hard will otherwise throw away files added for commit,
1302 # not just unmark them.
1302 # not just unmark them.
1303 self._gitcommand(['reset', 'HEAD'])
1303 self._gitcommand(['reset', 'HEAD'])
1304 self._gitcommand(['reset', '--hard', 'HEAD'])
1304 self._gitcommand(['reset', '--hard', 'HEAD'])
1305 return
1305 return
1306 branch2rev, rev2branch = self._gitbranchmap()
1306 branch2rev, rev2branch = self._gitbranchmap()
1307
1307
1308 def checkout(args):
1308 def checkout(args):
1309 cmd = ['checkout']
1309 cmd = ['checkout']
1310 if overwrite:
1310 if overwrite:
1311 # first reset the index to unmark new files for commit, because
1311 # first reset the index to unmark new files for commit, because
1312 # the -f option will otherwise throw away files added for
1312 # the -f option will otherwise throw away files added for
1313 # commit, not just unmark them.
1313 # commit, not just unmark them.
1314 self._gitcommand(['reset', 'HEAD'])
1314 self._gitcommand(['reset', 'HEAD'])
1315 cmd.append('-f')
1315 cmd.append('-f')
1316 self._gitcommand(cmd + args)
1316 self._gitcommand(cmd + args)
1317 _sanitize(self._ui, self._path)
1317 _sanitize(self._ui, self._path)
1318
1318
1319 def rawcheckout():
1319 def rawcheckout():
1320 # no branch to checkout, check it out with no branch
1320 # no branch to checkout, check it out with no branch
1321 self._ui.warn(_('checking out detached HEAD in subrepo %s\n') %
1321 self._ui.warn(_('checking out detached HEAD in subrepo %s\n') %
1322 self._relpath)
1322 self._relpath)
1323 self._ui.warn(_('check out a git branch if you intend '
1323 self._ui.warn(_('check out a git branch if you intend '
1324 'to make changes\n'))
1324 'to make changes\n'))
1325 checkout(['-q', revision])
1325 checkout(['-q', revision])
1326
1326
1327 if revision not in rev2branch:
1327 if revision not in rev2branch:
1328 rawcheckout()
1328 rawcheckout()
1329 return
1329 return
1330 branches = rev2branch[revision]
1330 branches = rev2branch[revision]
1331 firstlocalbranch = None
1331 firstlocalbranch = None
1332 for b in branches:
1332 for b in branches:
1333 if b == 'refs/heads/master':
1333 if b == 'refs/heads/master':
1334 # master trumps all other branches
1334 # master trumps all other branches
1335 checkout(['refs/heads/master'])
1335 checkout(['refs/heads/master'])
1336 return
1336 return
1337 if not firstlocalbranch and not b.startswith('refs/remotes/'):
1337 if not firstlocalbranch and not b.startswith('refs/remotes/'):
1338 firstlocalbranch = b
1338 firstlocalbranch = b
1339 if firstlocalbranch:
1339 if firstlocalbranch:
1340 checkout([firstlocalbranch])
1340 checkout([firstlocalbranch])
1341 return
1341 return
1342
1342
1343 tracking = self._gittracking(branch2rev.keys())
1343 tracking = self._gittracking(branch2rev.keys())
1344 # choose a remote branch already tracked if possible
1344 # choose a remote branch already tracked if possible
1345 remote = branches[0]
1345 remote = branches[0]
1346 if remote not in tracking:
1346 if remote not in tracking:
1347 for b in branches:
1347 for b in branches:
1348 if b in tracking:
1348 if b in tracking:
1349 remote = b
1349 remote = b
1350 break
1350 break
1351
1351
1352 if remote not in tracking:
1352 if remote not in tracking:
1353 # create a new local tracking branch
1353 # create a new local tracking branch
1354 local = remote.split('/', 3)[3]
1354 local = remote.split('/', 3)[3]
1355 checkout(['-b', local, remote])
1355 checkout(['-b', local, remote])
1356 elif self._gitisancestor(branch2rev[tracking[remote]], remote):
1356 elif self._gitisancestor(branch2rev[tracking[remote]], remote):
1357 # When updating to a tracked remote branch,
1357 # When updating to a tracked remote branch,
1358 # if the local tracking branch is downstream of it,
1358 # if the local tracking branch is downstream of it,
1359 # a normal `git pull` would have performed a "fast-forward merge"
1359 # a normal `git pull` would have performed a "fast-forward merge"
1360 # which is equivalent to updating the local branch to the remote.
1360 # which is equivalent to updating the local branch to the remote.
1361 # Since we are only looking at branching at update, we need to
1361 # Since we are only looking at branching at update, we need to
1362 # detect this situation and perform this action lazily.
1362 # detect this situation and perform this action lazily.
1363 if tracking[remote] != self._gitcurrentbranch():
1363 if tracking[remote] != self._gitcurrentbranch():
1364 checkout([tracking[remote]])
1364 checkout([tracking[remote]])
1365 self._gitcommand(['merge', '--ff', remote])
1365 self._gitcommand(['merge', '--ff', remote])
1366 else:
1366 else:
1367 # a real merge would be required, just checkout the revision
1367 # a real merge would be required, just checkout the revision
1368 rawcheckout()
1368 rawcheckout()
1369
1369
1370 @annotatesubrepoerror
1370 @annotatesubrepoerror
1371 def commit(self, text, user, date):
1371 def commit(self, text, user, date):
1372 if self._gitmissing():
1372 if self._gitmissing():
1373 raise util.Abort(_("subrepo %s is missing") % self._relpath)
1373 raise util.Abort(_("subrepo %s is missing") % self._relpath)
1374 cmd = ['commit', '-a', '-m', text]
1374 cmd = ['commit', '-a', '-m', text]
1375 env = os.environ.copy()
1375 env = os.environ.copy()
1376 if user:
1376 if user:
1377 cmd += ['--author', user]
1377 cmd += ['--author', user]
1378 if date:
1378 if date:
1379 # git's date parser silently ignores when seconds < 1e9
1379 # git's date parser silently ignores when seconds < 1e9
1380 # convert to ISO8601
1380 # convert to ISO8601
1381 env['GIT_AUTHOR_DATE'] = util.datestr(date,
1381 env['GIT_AUTHOR_DATE'] = util.datestr(date,
1382 '%Y-%m-%dT%H:%M:%S %1%2')
1382 '%Y-%m-%dT%H:%M:%S %1%2')
1383 self._gitcommand(cmd, env=env)
1383 self._gitcommand(cmd, env=env)
1384 # make sure commit works otherwise HEAD might not exist under certain
1384 # make sure commit works otherwise HEAD might not exist under certain
1385 # circumstances
1385 # circumstances
1386 return self._gitstate()
1386 return self._gitstate()
1387
1387
1388 @annotatesubrepoerror
1388 @annotatesubrepoerror
1389 def merge(self, state):
1389 def merge(self, state):
1390 source, revision, kind = state
1390 source, revision, kind = state
1391 self._fetch(source, revision)
1391 self._fetch(source, revision)
1392 base = self._gitcommand(['merge-base', revision, self._state[1]])
1392 base = self._gitcommand(['merge-base', revision, self._state[1]])
1393 self._gitupdatestat()
1393 self._gitupdatestat()
1394 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1394 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1395
1395
1396 def mergefunc():
1396 def mergefunc():
1397 if base == revision:
1397 if base == revision:
1398 self.get(state) # fast forward merge
1398 self.get(state) # fast forward merge
1399 elif base != self._state[1]:
1399 elif base != self._state[1]:
1400 self._gitcommand(['merge', '--no-commit', revision])
1400 self._gitcommand(['merge', '--no-commit', revision])
1401 _sanitize(self._ui, self._path)
1401 _sanitize(self._ui, self._path)
1402
1402
1403 if self.dirty():
1403 if self.dirty():
1404 if self._gitstate() != revision:
1404 if self._gitstate() != revision:
1405 dirty = self._gitstate() == self._state[1] or code != 0
1405 dirty = self._gitstate() == self._state[1] or code != 0
1406 if _updateprompt(self._ui, self, dirty,
1406 if _updateprompt(self._ui, self, dirty,
1407 self._state[1][:7], revision[:7]):
1407 self._state[1][:7], revision[:7]):
1408 mergefunc()
1408 mergefunc()
1409 else:
1409 else:
1410 mergefunc()
1410 mergefunc()
1411
1411
1412 @annotatesubrepoerror
1412 @annotatesubrepoerror
1413 def push(self, opts):
1413 def push(self, opts):
1414 force = opts.get('force')
1414 force = opts.get('force')
1415
1415
1416 if not self._state[1]:
1416 if not self._state[1]:
1417 return True
1417 return True
1418 if self._gitmissing():
1418 if self._gitmissing():
1419 raise util.Abort(_("subrepo %s is missing") % self._relpath)
1419 raise util.Abort(_("subrepo %s is missing") % self._relpath)
1420 # if a branch in origin contains the revision, nothing to do
1420 # if a branch in origin contains the revision, nothing to do
1421 branch2rev, rev2branch = self._gitbranchmap()
1421 branch2rev, rev2branch = self._gitbranchmap()
1422 if self._state[1] in rev2branch:
1422 if self._state[1] in rev2branch:
1423 for b in rev2branch[self._state[1]]:
1423 for b in rev2branch[self._state[1]]:
1424 if b.startswith('refs/remotes/origin/'):
1424 if b.startswith('refs/remotes/origin/'):
1425 return True
1425 return True
1426 for b, revision in branch2rev.iteritems():
1426 for b, revision in branch2rev.iteritems():
1427 if b.startswith('refs/remotes/origin/'):
1427 if b.startswith('refs/remotes/origin/'):
1428 if self._gitisancestor(self._state[1], revision):
1428 if self._gitisancestor(self._state[1], revision):
1429 return True
1429 return True
1430 # otherwise, try to push the currently checked out branch
1430 # otherwise, try to push the currently checked out branch
1431 cmd = ['push']
1431 cmd = ['push']
1432 if force:
1432 if force:
1433 cmd.append('--force')
1433 cmd.append('--force')
1434
1434
1435 current = self._gitcurrentbranch()
1435 current = self._gitcurrentbranch()
1436 if current:
1436 if current:
1437 # determine if the current branch is even useful
1437 # determine if the current branch is even useful
1438 if not self._gitisancestor(self._state[1], current):
1438 if not self._gitisancestor(self._state[1], current):
1439 self._ui.warn(_('unrelated git branch checked out '
1439 self._ui.warn(_('unrelated git branch checked out '
1440 'in subrepo %s\n') % self._relpath)
1440 'in subrepo %s\n') % self._relpath)
1441 return False
1441 return False
1442 self._ui.status(_('pushing branch %s of subrepo %s\n') %
1442 self._ui.status(_('pushing branch %s of subrepo %s\n') %
1443 (current.split('/', 2)[2], self._relpath))
1443 (current.split('/', 2)[2], self._relpath))
1444 self._gitcommand(cmd + ['origin', current])
1444 self._gitcommand(cmd + ['origin', current])
1445 return True
1445 return True
1446 else:
1446 else:
1447 self._ui.warn(_('no branch checked out in subrepo %s\n'
1447 self._ui.warn(_('no branch checked out in subrepo %s\n'
1448 'cannot push revision %s\n') %
1448 'cannot push revision %s\n') %
1449 (self._relpath, self._state[1]))
1449 (self._relpath, self._state[1]))
1450 return False
1450 return False
1451
1451
1452 @annotatesubrepoerror
1452 @annotatesubrepoerror
1453 def remove(self):
1453 def remove(self):
1454 if self._gitmissing():
1454 if self._gitmissing():
1455 return
1455 return
1456 if self.dirty():
1456 if self.dirty():
1457 self._ui.warn(_('not removing repo %s because '
1457 self._ui.warn(_('not removing repo %s because '
1458 'it has changes.\n') % self._relpath)
1458 'it has changes.\n') % self._relpath)
1459 return
1459 return
1460 # we can't fully delete the repository as it may contain
1460 # we can't fully delete the repository as it may contain
1461 # local-only history
1461 # local-only history
1462 self._ui.note(_('removing subrepo %s\n') % self._relpath)
1462 self._ui.note(_('removing subrepo %s\n') % self._relpath)
1463 self._gitcommand(['config', 'core.bare', 'true'])
1463 self._gitcommand(['config', 'core.bare', 'true'])
1464 for f in os.listdir(self._abspath):
1464 for f in os.listdir(self._abspath):
1465 if f == '.git':
1465 if f == '.git':
1466 continue
1466 continue
1467 path = os.path.join(self._abspath, f)
1467 path = os.path.join(self._abspath, f)
1468 if os.path.isdir(path) and not os.path.islink(path):
1468 if os.path.isdir(path) and not os.path.islink(path):
1469 shutil.rmtree(path)
1469 shutil.rmtree(path)
1470 else:
1470 else:
1471 os.remove(path)
1471 os.remove(path)
1472
1472
1473 def archive(self, ui, archiver, prefix, match=None):
1473 def archive(self, ui, archiver, prefix, match=None):
1474 total = 0
1474 total = 0
1475 source, revision = self._state
1475 source, revision = self._state
1476 if not revision:
1476 if not revision:
1477 return total
1477 return total
1478 self._fetch(source, revision)
1478 self._fetch(source, revision)
1479
1479
1480 # Parse git's native archive command.
1480 # Parse git's native archive command.
1481 # This should be much faster than manually traversing the trees
1481 # This should be much faster than manually traversing the trees
1482 # and objects with many subprocess calls.
1482 # and objects with many subprocess calls.
1483 tarstream = self._gitcommand(['archive', revision], stream=True)
1483 tarstream = self._gitcommand(['archive', revision], stream=True)
1484 tar = tarfile.open(fileobj=tarstream, mode='r|')
1484 tar = tarfile.open(fileobj=tarstream, mode='r|')
1485 relpath = subrelpath(self)
1485 relpath = subrelpath(self)
1486 ui.progress(_('archiving (%s)') % relpath, 0, unit=_('files'))
1486 ui.progress(_('archiving (%s)') % relpath, 0, unit=_('files'))
1487 for i, info in enumerate(tar):
1487 for i, info in enumerate(tar):
1488 if info.isdir():
1488 if info.isdir():
1489 continue
1489 continue
1490 if match and not match(info.name):
1490 if match and not match(info.name):
1491 continue
1491 continue
1492 if info.issym():
1492 if info.issym():
1493 data = info.linkname
1493 data = info.linkname
1494 else:
1494 else:
1495 data = tar.extractfile(info).read()
1495 data = tar.extractfile(info).read()
1496 archiver.addfile(os.path.join(prefix, self._path, info.name),
1496 archiver.addfile(os.path.join(prefix, self._path, info.name),
1497 info.mode, info.issym(), data)
1497 info.mode, info.issym(), data)
1498 total += 1
1498 total += 1
1499 ui.progress(_('archiving (%s)') % relpath, i + 1,
1499 ui.progress(_('archiving (%s)') % relpath, i + 1,
1500 unit=_('files'))
1500 unit=_('files'))
1501 ui.progress(_('archiving (%s)') % relpath, None)
1501 ui.progress(_('archiving (%s)') % relpath, None)
1502 return total
1502 return total
1503
1503
1504
1504
1505 @annotatesubrepoerror
1505 @annotatesubrepoerror
1506 def status(self, rev2, **opts):
1506 def status(self, rev2, **opts):
1507 rev1 = self._state[1]
1507 rev1 = self._state[1]
1508 if self._gitmissing() or not rev1:
1508 if self._gitmissing() or not rev1:
1509 # if the repo is missing, return no results
1509 # if the repo is missing, return no results
1510 return [], [], [], [], [], [], []
1510 return [], [], [], [], [], [], []
1511 modified, added, removed = [], [], []
1511 modified, added, removed = [], [], []
1512 self._gitupdatestat()
1512 self._gitupdatestat()
1513 if rev2:
1513 if rev2:
1514 command = ['diff-tree', rev1, rev2]
1514 command = ['diff-tree', rev1, rev2]
1515 else:
1515 else:
1516 command = ['diff-index', rev1]
1516 command = ['diff-index', rev1]
1517 out = self._gitcommand(command)
1517 out = self._gitcommand(command)
1518 for line in out.split('\n'):
1518 for line in out.split('\n'):
1519 tab = line.find('\t')
1519 tab = line.find('\t')
1520 if tab == -1:
1520 if tab == -1:
1521 continue
1521 continue
1522 status, f = line[tab - 1], line[tab + 1:]
1522 status, f = line[tab - 1], line[tab + 1:]
1523 if status == 'M':
1523 if status == 'M':
1524 modified.append(f)
1524 modified.append(f)
1525 elif status == 'A':
1525 elif status == 'A':
1526 added.append(f)
1526 added.append(f)
1527 elif status == 'D':
1527 elif status == 'D':
1528 removed.append(f)
1528 removed.append(f)
1529
1529
1530 deleted = unknown = ignored = clean = []
1530 deleted = unknown = ignored = clean = []
1531 return modified, added, removed, deleted, unknown, ignored, clean
1531 return modified, added, removed, deleted, unknown, ignored, clean
1532
1532
1533 types = {
1533 types = {
1534 'hg': hgsubrepo,
1534 'hg': hgsubrepo,
1535 'svn': svnsubrepo,
1535 'svn': svnsubrepo,
1536 'git': gitsubrepo,
1536 'git': gitsubrepo,
1537 }
1537 }
General Comments 0
You need to be logged in to leave comments. Login now