##// END OF EJS Templates
check-code: flag 0/1 used as constant Boolean expression
Martin Geisler -
r14494:1ffeeb91 default
parent child Browse files
Show More
@@ -1,377 +1,379
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 #
2 #
3 # check-code - a style and portability checker for Mercurial
3 # check-code - a style and portability checker for Mercurial
4 #
4 #
5 # Copyright 2010 Matt Mackall <mpm@selenic.com>
5 # Copyright 2010 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 import re, glob, os, sys
10 import re, glob, os, sys
11 import keyword
11 import keyword
12 import optparse
12 import optparse
13
13
14 def repquote(m):
14 def repquote(m):
15 t = re.sub(r"\w", "x", m.group('text'))
15 t = re.sub(r"\w", "x", m.group('text'))
16 t = re.sub(r"[^\sx]", "o", t)
16 t = re.sub(r"[^\sx]", "o", t)
17 return m.group('quote') + t + m.group('quote')
17 return m.group('quote') + t + m.group('quote')
18
18
19 def reppython(m):
19 def reppython(m):
20 comment = m.group('comment')
20 comment = m.group('comment')
21 if comment:
21 if comment:
22 return "#" * len(comment)
22 return "#" * len(comment)
23 return repquote(m)
23 return repquote(m)
24
24
25 def repcomment(m):
25 def repcomment(m):
26 return m.group(1) + "#" * len(m.group(2))
26 return m.group(1) + "#" * len(m.group(2))
27
27
28 def repccomment(m):
28 def repccomment(m):
29 t = re.sub(r"((?<=\n) )|\S", "x", m.group(2))
29 t = re.sub(r"((?<=\n) )|\S", "x", m.group(2))
30 return m.group(1) + t + "*/"
30 return m.group(1) + t + "*/"
31
31
32 def repcallspaces(m):
32 def repcallspaces(m):
33 t = re.sub(r"\n\s+", "\n", m.group(2))
33 t = re.sub(r"\n\s+", "\n", m.group(2))
34 return m.group(1) + t
34 return m.group(1) + t
35
35
36 def repinclude(m):
36 def repinclude(m):
37 return m.group(1) + "<foo>"
37 return m.group(1) + "<foo>"
38
38
39 def rephere(m):
39 def rephere(m):
40 t = re.sub(r"\S", "x", m.group(2))
40 t = re.sub(r"\S", "x", m.group(2))
41 return m.group(1) + t
41 return m.group(1) + t
42
42
43
43
44 testpats = [
44 testpats = [
45 [
45 [
46 (r'(pushd|popd)', "don't use 'pushd' or 'popd', use 'cd'"),
46 (r'(pushd|popd)', "don't use 'pushd' or 'popd', use 'cd'"),
47 (r'\W\$?\(\([^\)]*\)\)', "don't use (()) or $(()), use 'expr'"),
47 (r'\W\$?\(\([^\)]*\)\)', "don't use (()) or $(()), use 'expr'"),
48 (r'^function', "don't use 'function', use old style"),
48 (r'^function', "don't use 'function', use old style"),
49 (r'grep.*-q', "don't use 'grep -q', redirect to /dev/null"),
49 (r'grep.*-q', "don't use 'grep -q', redirect to /dev/null"),
50 (r'echo.*\\n', "don't use 'echo \\n', use printf"),
50 (r'echo.*\\n', "don't use 'echo \\n', use printf"),
51 (r'echo -n', "don't use 'echo -n', use printf"),
51 (r'echo -n', "don't use 'echo -n', use printf"),
52 (r'^diff.*-\w*N', "don't use 'diff -N'"),
52 (r'^diff.*-\w*N', "don't use 'diff -N'"),
53 (r'(^| )wc[^|]*$', "filter wc output"),
53 (r'(^| )wc[^|]*$', "filter wc output"),
54 (r'head -c', "don't use 'head -c', use 'dd'"),
54 (r'head -c', "don't use 'head -c', use 'dd'"),
55 (r'ls.*-\w*R', "don't use 'ls -R', use 'find'"),
55 (r'ls.*-\w*R', "don't use 'ls -R', use 'find'"),
56 (r'printf.*\\\d\d\d', "don't use 'printf \NNN', use Python"),
56 (r'printf.*\\\d\d\d', "don't use 'printf \NNN', use Python"),
57 (r'printf.*\\x', "don't use printf \\x, use Python"),
57 (r'printf.*\\x', "don't use printf \\x, use Python"),
58 (r'\$\(.*\)', "don't use $(expr), use `expr`"),
58 (r'\$\(.*\)', "don't use $(expr), use `expr`"),
59 (r'rm -rf \*', "don't use naked rm -rf, target a directory"),
59 (r'rm -rf \*', "don't use naked rm -rf, target a directory"),
60 (r'(^|\|\s*)grep (-\w\s+)*[^|]*[(|]\w',
60 (r'(^|\|\s*)grep (-\w\s+)*[^|]*[(|]\w',
61 "use egrep for extended grep syntax"),
61 "use egrep for extended grep syntax"),
62 (r'/bin/', "don't use explicit paths for tools"),
62 (r'/bin/', "don't use explicit paths for tools"),
63 (r'\$PWD', "don't use $PWD, use `pwd`"),
63 (r'\$PWD', "don't use $PWD, use `pwd`"),
64 (r'[^\n]\Z', "no trailing newline"),
64 (r'[^\n]\Z', "no trailing newline"),
65 (r'export.*=', "don't export and assign at once"),
65 (r'export.*=', "don't export and assign at once"),
66 ('^([^"\']|("[^"]*")|(\'[^\']*\'))*\\^', "^ must be quoted"),
66 ('^([^"\']|("[^"]*")|(\'[^\']*\'))*\\^', "^ must be quoted"),
67 (r'^source\b', "don't use 'source', use '.'"),
67 (r'^source\b', "don't use 'source', use '.'"),
68 (r'touch -d', "don't use 'touch -d', use 'touch -t' instead"),
68 (r'touch -d', "don't use 'touch -d', use 'touch -t' instead"),
69 (r'ls\s+[^|-]+\s+-', "options to 'ls' must come before filenames"),
69 (r'ls\s+[^|-]+\s+-', "options to 'ls' must come before filenames"),
70 (r'[^>]>\s*\$HGRCPATH', "don't overwrite $HGRCPATH, append to it"),
70 (r'[^>]>\s*\$HGRCPATH', "don't overwrite $HGRCPATH, append to it"),
71 ],
71 ],
72 # warnings
72 # warnings
73 []
73 []
74 ]
74 ]
75
75
76 testfilters = [
76 testfilters = [
77 (r"( *)(#([^\n]*\S)?)", repcomment),
77 (r"( *)(#([^\n]*\S)?)", repcomment),
78 (r"<<(\S+)((.|\n)*?\n\1)", rephere),
78 (r"<<(\S+)((.|\n)*?\n\1)", rephere),
79 ]
79 ]
80
80
81 uprefix = r"^ \$ "
81 uprefix = r"^ \$ "
82 uprefixc = r"^ > "
82 uprefixc = r"^ > "
83 utestpats = [
83 utestpats = [
84 [
84 [
85 (r'^(\S| $ ).*(\S\s+|^\s+)\n', "trailing whitespace on non-output"),
85 (r'^(\S| $ ).*(\S\s+|^\s+)\n', "trailing whitespace on non-output"),
86 (uprefix + r'.*\|\s*sed', "use regex test output patterns instead of sed"),
86 (uprefix + r'.*\|\s*sed', "use regex test output patterns instead of sed"),
87 (uprefix + r'(true|exit 0)', "explicit zero exit unnecessary"),
87 (uprefix + r'(true|exit 0)', "explicit zero exit unnecessary"),
88 (uprefix + r'.*\$\?', "explicit exit code checks unnecessary"),
88 (uprefix + r'.*\$\?', "explicit exit code checks unnecessary"),
89 (uprefix + r'.*\|\| echo.*(fail|error)',
89 (uprefix + r'.*\|\| echo.*(fail|error)',
90 "explicit exit code checks unnecessary"),
90 "explicit exit code checks unnecessary"),
91 (uprefix + r'set -e', "don't use set -e"),
91 (uprefix + r'set -e', "don't use set -e"),
92 (uprefixc + r'( *)\t', "don't use tabs to indent"),
92 (uprefixc + r'( *)\t', "don't use tabs to indent"),
93 ],
93 ],
94 # warnings
94 # warnings
95 []
95 []
96 ]
96 ]
97
97
98 for i in [0, 1]:
98 for i in [0, 1]:
99 for p, m in testpats[i]:
99 for p, m in testpats[i]:
100 if p.startswith('^'):
100 if p.startswith('^'):
101 p = uprefix + p[1:]
101 p = uprefix + p[1:]
102 else:
102 else:
103 p = uprefix + p
103 p = uprefix + p
104 utestpats[i].append((p, m))
104 utestpats[i].append((p, m))
105
105
106 utestfilters = [
106 utestfilters = [
107 (r"( *)(#([^\n]*\S)?)", repcomment),
107 (r"( *)(#([^\n]*\S)?)", repcomment),
108 ]
108 ]
109
109
110 pypats = [
110 pypats = [
111 [
111 [
112 (r'^\s*def\s*\w+\s*\(.*,\s*\(',
112 (r'^\s*def\s*\w+\s*\(.*,\s*\(',
113 "tuple parameter unpacking not available in Python 3+"),
113 "tuple parameter unpacking not available in Python 3+"),
114 (r'lambda\s*\(.*,.*\)',
114 (r'lambda\s*\(.*,.*\)',
115 "tuple parameter unpacking not available in Python 3+"),
115 "tuple parameter unpacking not available in Python 3+"),
116 (r'(?<!def)\s+(cmp)\(', "cmp is not available in Python 3+"),
116 (r'(?<!def)\s+(cmp)\(', "cmp is not available in Python 3+"),
117 (r'\breduce\s*\(.*', "reduce is not available in Python 3+"),
117 (r'\breduce\s*\(.*', "reduce is not available in Python 3+"),
118 (r'\.has_key\b', "dict.has_key is not available in Python 3+"),
118 (r'\.has_key\b', "dict.has_key is not available in Python 3+"),
119 (r'^\s*\t', "don't use tabs"),
119 (r'^\s*\t', "don't use tabs"),
120 (r'\S;\s*\n', "semicolon"),
120 (r'\S;\s*\n', "semicolon"),
121 (r'\w,\w', "missing whitespace after ,"),
121 (r'\w,\w', "missing whitespace after ,"),
122 (r'\w[+/*\-<>]\w', "missing whitespace in expression"),
122 (r'\w[+/*\-<>]\w', "missing whitespace in expression"),
123 (r'^\s+\w+=\w+[^,)]$', "missing whitespace in assignment"),
123 (r'^\s+\w+=\w+[^,)]$', "missing whitespace in assignment"),
124 (r'.{85}', "line too long"),
124 (r'.{85}', "line too long"),
125 (r'[^\n]\Z', "no trailing newline"),
125 (r'[^\n]\Z', "no trailing newline"),
126 (r'(\S\s+|^\s+)\n', "trailing whitespace"),
126 (r'(\S\s+|^\s+)\n', "trailing whitespace"),
127 # (r'^\s+[^_ ][^_. ]+_[^_]+\s*=', "don't use underbars in identifiers"),
127 # (r'^\s+[^_ ][^_. ]+_[^_]+\s*=', "don't use underbars in identifiers"),
128 # (r'\w*[a-z][A-Z]\w*\s*=', "don't use camelcase in identifiers"),
128 # (r'\w*[a-z][A-Z]\w*\s*=', "don't use camelcase in identifiers"),
129 (r'^\s*(if|while|def|class|except|try)\s[^[]*:\s*[^\]#\s]+',
129 (r'^\s*(if|while|def|class|except|try)\s[^[]*:\s*[^\]#\s]+',
130 "linebreak after :"),
130 "linebreak after :"),
131 (r'class\s[^(]:', "old-style class, use class foo(object)"),
131 (r'class\s[^(]:', "old-style class, use class foo(object)"),
132 (r'\b(%s)\(' % '|'.join(keyword.kwlist),
132 (r'\b(%s)\(' % '|'.join(keyword.kwlist),
133 "Python keyword is not a function"),
133 "Python keyword is not a function"),
134 (r',]', "unneeded trailing ',' in list"),
134 (r',]', "unneeded trailing ',' in list"),
135 # (r'class\s[A-Z][^\(]*\((?!Exception)',
135 # (r'class\s[A-Z][^\(]*\((?!Exception)',
136 # "don't capitalize non-exception classes"),
136 # "don't capitalize non-exception classes"),
137 # (r'in range\(', "use xrange"),
137 # (r'in range\(', "use xrange"),
138 # (r'^\s*print\s+', "avoid using print in core and extensions"),
138 # (r'^\s*print\s+', "avoid using print in core and extensions"),
139 (r'[\x80-\xff]', "non-ASCII character literal"),
139 (r'[\x80-\xff]', "non-ASCII character literal"),
140 (r'("\')\.format\(', "str.format() not available in Python 2.4"),
140 (r'("\')\.format\(', "str.format() not available in Python 2.4"),
141 (r'^\s*with\s+', "with not available in Python 2.4"),
141 (r'^\s*with\s+', "with not available in Python 2.4"),
142 (r'\.isdisjoint\(', "set.isdisjoint not available in Python 2.4"),
142 (r'\.isdisjoint\(', "set.isdisjoint not available in Python 2.4"),
143 (r'^\s*except.* as .*:', "except as not available in Python 2.4"),
143 (r'^\s*except.* as .*:', "except as not available in Python 2.4"),
144 (r'^\s*os\.path\.relpath', "relpath not available in Python 2.4"),
144 (r'^\s*os\.path\.relpath', "relpath not available in Python 2.4"),
145 (r'(?<!def)\s+(any|all|format)\(',
145 (r'(?<!def)\s+(any|all|format)\(',
146 "any/all/format not available in Python 2.4"),
146 "any/all/format not available in Python 2.4"),
147 (r'(?<!def)\s+(callable)\(',
147 (r'(?<!def)\s+(callable)\(',
148 "callable not available in Python 3, use hasattr(f, '__call__')"),
148 "callable not available in Python 3, use hasattr(f, '__call__')"),
149 (r'if\s.*\selse', "if ... else form not available in Python 2.4"),
149 (r'if\s.*\selse', "if ... else form not available in Python 2.4"),
150 (r'^\s*(%s)\s\s' % '|'.join(keyword.kwlist),
150 (r'^\s*(%s)\s\s' % '|'.join(keyword.kwlist),
151 "gratuitous whitespace after Python keyword"),
151 "gratuitous whitespace after Python keyword"),
152 (r'([\(\[]\s\S)|(\S\s[\)\]])', "gratuitous whitespace in () or []"),
152 (r'([\(\[]\s\S)|(\S\s[\)\]])', "gratuitous whitespace in () or []"),
153 # (r'\s\s=', "gratuitous whitespace before ="),
153 # (r'\s\s=', "gratuitous whitespace before ="),
154 (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=)\S',
154 (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=)\S',
155 "missing whitespace around operator"),
155 "missing whitespace around operator"),
156 (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=)\s',
156 (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=)\s',
157 "missing whitespace around operator"),
157 "missing whitespace around operator"),
158 (r'\s(\+=|-=|!=|<>|<=|>=|<<=|>>=)\S',
158 (r'\s(\+=|-=|!=|<>|<=|>=|<<=|>>=)\S',
159 "missing whitespace around operator"),
159 "missing whitespace around operator"),
160 (r'[^+=*/!<>&| -](\s=|=\s)[^= ]',
160 (r'[^+=*/!<>&| -](\s=|=\s)[^= ]',
161 "wrong whitespace around ="),
161 "wrong whitespace around ="),
162 (r'raise Exception', "don't raise generic exceptions"),
162 (r'raise Exception', "don't raise generic exceptions"),
163 (r' is\s+(not\s+)?["\'0-9-]', "object comparison with literal"),
163 (r' is\s+(not\s+)?["\'0-9-]', "object comparison with literal"),
164 (r' [=!]=\s+(True|False|None)',
164 (r' [=!]=\s+(True|False|None)',
165 "comparison with singleton, use 'is' or 'is not' instead"),
165 "comparison with singleton, use 'is' or 'is not' instead"),
166 (r'^\s*(while|if) [01]:',
167 "use True/False for constant Boolean expression"),
166 (r'opener\([^)]*\).read\(',
168 (r'opener\([^)]*\).read\(',
167 "use opener.read() instead"),
169 "use opener.read() instead"),
168 (r'opener\([^)]*\).write\(',
170 (r'opener\([^)]*\).write\(',
169 "use opener.write() instead"),
171 "use opener.write() instead"),
170 (r'[\s\(](open|file)\([^)]*\)\.read\(',
172 (r'[\s\(](open|file)\([^)]*\)\.read\(',
171 "use util.readfile() instead"),
173 "use util.readfile() instead"),
172 (r'[\s\(](open|file)\([^)]*\)\.write\(',
174 (r'[\s\(](open|file)\([^)]*\)\.write\(',
173 "use util.readfile() instead"),
175 "use util.readfile() instead"),
174 (r'^[\s\(]*(open(er)?|file)\([^)]*\)',
176 (r'^[\s\(]*(open(er)?|file)\([^)]*\)',
175 "always assign an opened file to a variable, and close it afterwards"),
177 "always assign an opened file to a variable, and close it afterwards"),
176 (r'[\s\(](open|file)\([^)]*\)\.',
178 (r'[\s\(](open|file)\([^)]*\)\.',
177 "always assign an opened file to a variable, and close it afterwards"),
179 "always assign an opened file to a variable, and close it afterwards"),
178 ],
180 ],
179 # warnings
181 # warnings
180 [
182 [
181 (r'.{81}', "warning: line over 80 characters"),
183 (r'.{81}', "warning: line over 80 characters"),
182 (r'^\s*except:$', "warning: naked except clause"),
184 (r'^\s*except:$', "warning: naked except clause"),
183 (r'ui\.(status|progress|write|note|warn)\([\'\"]x',
185 (r'ui\.(status|progress|write|note|warn)\([\'\"]x',
184 "warning: unwrapped ui message"),
186 "warning: unwrapped ui message"),
185 ]
187 ]
186 ]
188 ]
187
189
188 pyfilters = [
190 pyfilters = [
189 (r"""(?msx)(?P<comment>\#.*?$)|
191 (r"""(?msx)(?P<comment>\#.*?$)|
190 ((?P<quote>('''|\"\"\"|(?<!')'(?!')|(?<!")"(?!")))
192 ((?P<quote>('''|\"\"\"|(?<!')'(?!')|(?<!")"(?!")))
191 (?P<text>(([^\\]|\\.)*?))
193 (?P<text>(([^\\]|\\.)*?))
192 (?P=quote))""", reppython),
194 (?P=quote))""", reppython),
193 ]
195 ]
194
196
195 cpats = [
197 cpats = [
196 [
198 [
197 (r'//', "don't use //-style comments"),
199 (r'//', "don't use //-style comments"),
198 (r'^ ', "don't use spaces to indent"),
200 (r'^ ', "don't use spaces to indent"),
199 (r'\S\t', "don't use tabs except for indent"),
201 (r'\S\t', "don't use tabs except for indent"),
200 (r'(\S\s+|^\s+)\n', "trailing whitespace"),
202 (r'(\S\s+|^\s+)\n', "trailing whitespace"),
201 (r'.{85}', "line too long"),
203 (r'.{85}', "line too long"),
202 (r'(while|if|do|for)\(', "use space after while/if/do/for"),
204 (r'(while|if|do|for)\(', "use space after while/if/do/for"),
203 (r'return\(', "return is not a function"),
205 (r'return\(', "return is not a function"),
204 (r' ;', "no space before ;"),
206 (r' ;', "no space before ;"),
205 (r'\w+\* \w+', "use int *foo, not int* foo"),
207 (r'\w+\* \w+', "use int *foo, not int* foo"),
206 (r'\([^\)]+\) \w+', "use (int)foo, not (int) foo"),
208 (r'\([^\)]+\) \w+', "use (int)foo, not (int) foo"),
207 (r'\S+ (\+\+|--)', "use foo++, not foo ++"),
209 (r'\S+ (\+\+|--)', "use foo++, not foo ++"),
208 (r'\w,\w', "missing whitespace after ,"),
210 (r'\w,\w', "missing whitespace after ,"),
209 (r'^[^#]\w[+/*]\w', "missing whitespace in expression"),
211 (r'^[^#]\w[+/*]\w', "missing whitespace in expression"),
210 (r'^#\s+\w', "use #foo, not # foo"),
212 (r'^#\s+\w', "use #foo, not # foo"),
211 (r'[^\n]\Z', "no trailing newline"),
213 (r'[^\n]\Z', "no trailing newline"),
212 (r'^\s*#import\b', "use only #include in standard C code"),
214 (r'^\s*#import\b', "use only #include in standard C code"),
213 ],
215 ],
214 # warnings
216 # warnings
215 []
217 []
216 ]
218 ]
217
219
218 cfilters = [
220 cfilters = [
219 (r'(/\*)(((\*(?!/))|[^*])*)\*/', repccomment),
221 (r'(/\*)(((\*(?!/))|[^*])*)\*/', repccomment),
220 (r'''(?P<quote>(?<!")")(?P<text>([^"]|\\")+)"(?!")''', repquote),
222 (r'''(?P<quote>(?<!")")(?P<text>([^"]|\\")+)"(?!")''', repquote),
221 (r'''(#\s*include\s+<)([^>]+)>''', repinclude),
223 (r'''(#\s*include\s+<)([^>]+)>''', repinclude),
222 (r'(\()([^)]+\))', repcallspaces),
224 (r'(\()([^)]+\))', repcallspaces),
223 ]
225 ]
224
226
225 inutilpats = [
227 inutilpats = [
226 [
228 [
227 (r'\bui\.', "don't use ui in util"),
229 (r'\bui\.', "don't use ui in util"),
228 ],
230 ],
229 # warnings
231 # warnings
230 []
232 []
231 ]
233 ]
232
234
233 inrevlogpats = [
235 inrevlogpats = [
234 [
236 [
235 (r'\brepo\.', "don't use repo in revlog"),
237 (r'\brepo\.', "don't use repo in revlog"),
236 ],
238 ],
237 # warnings
239 # warnings
238 []
240 []
239 ]
241 ]
240
242
241 checks = [
243 checks = [
242 ('python', r'.*\.(py|cgi)$', pyfilters, pypats),
244 ('python', r'.*\.(py|cgi)$', pyfilters, pypats),
243 ('test script', r'(.*/)?test-[^.~]*$', testfilters, testpats),
245 ('test script', r'(.*/)?test-[^.~]*$', testfilters, testpats),
244 ('c', r'.*\.c$', cfilters, cpats),
246 ('c', r'.*\.c$', cfilters, cpats),
245 ('unified test', r'.*\.t$', utestfilters, utestpats),
247 ('unified test', r'.*\.t$', utestfilters, utestpats),
246 ('layering violation repo in revlog', r'mercurial/revlog\.py', pyfilters,
248 ('layering violation repo in revlog', r'mercurial/revlog\.py', pyfilters,
247 inrevlogpats),
249 inrevlogpats),
248 ('layering violation ui in util', r'mercurial/util\.py', pyfilters,
250 ('layering violation ui in util', r'mercurial/util\.py', pyfilters,
249 inutilpats),
251 inutilpats),
250 ]
252 ]
251
253
252 class norepeatlogger(object):
254 class norepeatlogger(object):
253 def __init__(self):
255 def __init__(self):
254 self._lastseen = None
256 self._lastseen = None
255
257
256 def log(self, fname, lineno, line, msg, blame):
258 def log(self, fname, lineno, line, msg, blame):
257 """print error related a to given line of a given file.
259 """print error related a to given line of a given file.
258
260
259 The faulty line will also be printed but only once in the case
261 The faulty line will also be printed but only once in the case
260 of multiple errors.
262 of multiple errors.
261
263
262 :fname: filename
264 :fname: filename
263 :lineno: line number
265 :lineno: line number
264 :line: actual content of the line
266 :line: actual content of the line
265 :msg: error message
267 :msg: error message
266 """
268 """
267 msgid = fname, lineno, line
269 msgid = fname, lineno, line
268 if msgid != self._lastseen:
270 if msgid != self._lastseen:
269 if blame:
271 if blame:
270 print "%s:%d (%s):" % (fname, lineno, blame)
272 print "%s:%d (%s):" % (fname, lineno, blame)
271 else:
273 else:
272 print "%s:%d:" % (fname, lineno)
274 print "%s:%d:" % (fname, lineno)
273 print " > %s" % line
275 print " > %s" % line
274 self._lastseen = msgid
276 self._lastseen = msgid
275 print " " + msg
277 print " " + msg
276
278
277 _defaultlogger = norepeatlogger()
279 _defaultlogger = norepeatlogger()
278
280
279 def getblame(f):
281 def getblame(f):
280 lines = []
282 lines = []
281 for l in os.popen('hg annotate -un %s' % f):
283 for l in os.popen('hg annotate -un %s' % f):
282 start, line = l.split(':', 1)
284 start, line = l.split(':', 1)
283 user, rev = start.split()
285 user, rev = start.split()
284 lines.append((line[1:-1], user, rev))
286 lines.append((line[1:-1], user, rev))
285 return lines
287 return lines
286
288
287 def checkfile(f, logfunc=_defaultlogger.log, maxerr=None, warnings=False,
289 def checkfile(f, logfunc=_defaultlogger.log, maxerr=None, warnings=False,
288 blame=False, debug=False):
290 blame=False, debug=False):
289 """checks style and portability of a given file
291 """checks style and portability of a given file
290
292
291 :f: filepath
293 :f: filepath
292 :logfunc: function used to report error
294 :logfunc: function used to report error
293 logfunc(filename, linenumber, linecontent, errormessage)
295 logfunc(filename, linenumber, linecontent, errormessage)
294 :maxerr: number of error to display before arborting.
296 :maxerr: number of error to display before arborting.
295 Set to None (default) to report all errors
297 Set to None (default) to report all errors
296
298
297 return True if no error is found, False otherwise.
299 return True if no error is found, False otherwise.
298 """
300 """
299 blamecache = None
301 blamecache = None
300 result = True
302 result = True
301 for name, match, filters, pats in checks:
303 for name, match, filters, pats in checks:
302 if debug:
304 if debug:
303 print name, f
305 print name, f
304 fc = 0
306 fc = 0
305 if not re.match(match, f):
307 if not re.match(match, f):
306 if debug:
308 if debug:
307 print "Skipping %s for %s it doesn't match %s" % (
309 print "Skipping %s for %s it doesn't match %s" % (
308 name, match, f)
310 name, match, f)
309 continue
311 continue
310 fp = open(f)
312 fp = open(f)
311 pre = post = fp.read()
313 pre = post = fp.read()
312 fp.close()
314 fp.close()
313 if "no-" + "check-code" in pre:
315 if "no-" + "check-code" in pre:
314 if debug:
316 if debug:
315 print "Skipping %s for %s it has no- and check-code" % (
317 print "Skipping %s for %s it has no- and check-code" % (
316 name, f)
318 name, f)
317 break
319 break
318 for p, r in filters:
320 for p, r in filters:
319 post = re.sub(p, r, post)
321 post = re.sub(p, r, post)
320 if warnings:
322 if warnings:
321 pats = pats[0] + pats[1]
323 pats = pats[0] + pats[1]
322 else:
324 else:
323 pats = pats[0]
325 pats = pats[0]
324 # print post # uncomment to show filtered version
326 # print post # uncomment to show filtered version
325 z = enumerate(zip(pre.splitlines(), post.splitlines(True)))
327 z = enumerate(zip(pre.splitlines(), post.splitlines(True)))
326 if debug:
328 if debug:
327 print "Checking %s for %s" % (name, f)
329 print "Checking %s for %s" % (name, f)
328 for n, l in z:
330 for n, l in z:
329 if "check-code" + "-ignore" in l[0]:
331 if "check-code" + "-ignore" in l[0]:
330 if debug:
332 if debug:
331 print "Skipping %s for %s:%s (check-code -ignore)" % (
333 print "Skipping %s for %s:%s (check-code -ignore)" % (
332 name, f, n)
334 name, f, n)
333 continue
335 continue
334 for p, msg in pats:
336 for p, msg in pats:
335 if re.search(p, l[1]):
337 if re.search(p, l[1]):
336 bd = ""
338 bd = ""
337 if blame:
339 if blame:
338 bd = 'working directory'
340 bd = 'working directory'
339 if not blamecache:
341 if not blamecache:
340 blamecache = getblame(f)
342 blamecache = getblame(f)
341 if n < len(blamecache):
343 if n < len(blamecache):
342 bl, bu, br = blamecache[n]
344 bl, bu, br = blamecache[n]
343 if bl == l[0]:
345 if bl == l[0]:
344 bd = '%s@%s' % (bu, br)
346 bd = '%s@%s' % (bu, br)
345 logfunc(f, n + 1, l[0], msg, bd)
347 logfunc(f, n + 1, l[0], msg, bd)
346 fc += 1
348 fc += 1
347 result = False
349 result = False
348 if maxerr is not None and fc >= maxerr:
350 if maxerr is not None and fc >= maxerr:
349 print " (too many errors, giving up)"
351 print " (too many errors, giving up)"
350 break
352 break
351 return result
353 return result
352
354
353 if __name__ == "__main__":
355 if __name__ == "__main__":
354 parser = optparse.OptionParser("%prog [options] [files]")
356 parser = optparse.OptionParser("%prog [options] [files]")
355 parser.add_option("-w", "--warnings", action="store_true",
357 parser.add_option("-w", "--warnings", action="store_true",
356 help="include warning-level checks")
358 help="include warning-level checks")
357 parser.add_option("-p", "--per-file", type="int",
359 parser.add_option("-p", "--per-file", type="int",
358 help="max warnings per file")
360 help="max warnings per file")
359 parser.add_option("-b", "--blame", action="store_true",
361 parser.add_option("-b", "--blame", action="store_true",
360 help="use annotate to generate blame info")
362 help="use annotate to generate blame info")
361 parser.add_option("", "--debug", action="store_true",
363 parser.add_option("", "--debug", action="store_true",
362 help="show debug information")
364 help="show debug information")
363
365
364 parser.set_defaults(per_file=15, warnings=False, blame=False, debug=False)
366 parser.set_defaults(per_file=15, warnings=False, blame=False, debug=False)
365 (options, args) = parser.parse_args()
367 (options, args) = parser.parse_args()
366
368
367 if len(args) == 0:
369 if len(args) == 0:
368 check = glob.glob("*")
370 check = glob.glob("*")
369 else:
371 else:
370 check = args
372 check = args
371
373
372 for f in check:
374 for f in check:
373 ret = 0
375 ret = 0
374 if not checkfile(f, maxerr=options.per_file, warnings=options.warnings,
376 if not checkfile(f, maxerr=options.per_file, warnings=options.warnings,
375 blame=options.blame, debug=options.debug):
377 blame=options.blame, debug=options.debug):
376 ret = 1
378 ret = 1
377 sys.exit(ret)
379 sys.exit(ret)
@@ -1,166 +1,166
1 # perf.py - performance test routines
1 # perf.py - performance test routines
2 '''helper extension to measure performance'''
2 '''helper extension to measure performance'''
3
3
4 from mercurial import cmdutil, scmutil, match, commands
4 from mercurial import cmdutil, scmutil, match, commands
5 import time, os, sys
5 import time, os, sys
6
6
7 def timer(func, title=None):
7 def timer(func, title=None):
8 results = []
8 results = []
9 begin = time.time()
9 begin = time.time()
10 count = 0
10 count = 0
11 while 1:
11 while True:
12 ostart = os.times()
12 ostart = os.times()
13 cstart = time.time()
13 cstart = time.time()
14 r = func()
14 r = func()
15 cstop = time.time()
15 cstop = time.time()
16 ostop = os.times()
16 ostop = os.times()
17 count += 1
17 count += 1
18 a, b = ostart, ostop
18 a, b = ostart, ostop
19 results.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
19 results.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
20 if cstop - begin > 3 and count >= 100:
20 if cstop - begin > 3 and count >= 100:
21 break
21 break
22 if cstop - begin > 10 and count >= 3:
22 if cstop - begin > 10 and count >= 3:
23 break
23 break
24 if title:
24 if title:
25 sys.stderr.write("! %s\n" % title)
25 sys.stderr.write("! %s\n" % title)
26 if r:
26 if r:
27 sys.stderr.write("! result: %s\n" % r)
27 sys.stderr.write("! result: %s\n" % r)
28 m = min(results)
28 m = min(results)
29 sys.stderr.write("! wall %f comb %f user %f sys %f (best of %d)\n"
29 sys.stderr.write("! wall %f comb %f user %f sys %f (best of %d)\n"
30 % (m[0], m[1] + m[2], m[1], m[2], count))
30 % (m[0], m[1] + m[2], m[1], m[2], count))
31
31
32 def perfwalk(ui, repo, *pats):
32 def perfwalk(ui, repo, *pats):
33 try:
33 try:
34 m = scmutil.match(repo, pats, {})
34 m = scmutil.match(repo, pats, {})
35 timer(lambda: len(list(repo.dirstate.walk(m, [], True, False))))
35 timer(lambda: len(list(repo.dirstate.walk(m, [], True, False))))
36 except:
36 except:
37 try:
37 try:
38 m = scmutil.match(repo, pats, {})
38 m = scmutil.match(repo, pats, {})
39 timer(lambda: len([b for a, b, c in repo.dirstate.statwalk([], m)]))
39 timer(lambda: len([b for a, b, c in repo.dirstate.statwalk([], m)]))
40 except:
40 except:
41 timer(lambda: len(list(cmdutil.walk(repo, pats, {}))))
41 timer(lambda: len(list(cmdutil.walk(repo, pats, {}))))
42
42
43 def perfstatus(ui, repo, *pats):
43 def perfstatus(ui, repo, *pats):
44 #m = match.always(repo.root, repo.getcwd())
44 #m = match.always(repo.root, repo.getcwd())
45 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False, False))))
45 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False, False))))
46 timer(lambda: sum(map(len, repo.status())))
46 timer(lambda: sum(map(len, repo.status())))
47
47
48 def perfheads(ui, repo):
48 def perfheads(ui, repo):
49 timer(lambda: len(repo.changelog.heads()))
49 timer(lambda: len(repo.changelog.heads()))
50
50
51 def perftags(ui, repo):
51 def perftags(ui, repo):
52 import mercurial.changelog, mercurial.manifest
52 import mercurial.changelog, mercurial.manifest
53 def t():
53 def t():
54 repo.changelog = mercurial.changelog.changelog(repo.sopener)
54 repo.changelog = mercurial.changelog.changelog(repo.sopener)
55 repo.manifest = mercurial.manifest.manifest(repo.sopener)
55 repo.manifest = mercurial.manifest.manifest(repo.sopener)
56 repo._tags = None
56 repo._tags = None
57 return len(repo.tags())
57 return len(repo.tags())
58 timer(t)
58 timer(t)
59
59
60 def perfdirstate(ui, repo):
60 def perfdirstate(ui, repo):
61 "a" in repo.dirstate
61 "a" in repo.dirstate
62 def d():
62 def d():
63 repo.dirstate.invalidate()
63 repo.dirstate.invalidate()
64 "a" in repo.dirstate
64 "a" in repo.dirstate
65 timer(d)
65 timer(d)
66
66
67 def perfdirstatedirs(ui, repo):
67 def perfdirstatedirs(ui, repo):
68 "a" in repo.dirstate
68 "a" in repo.dirstate
69 def d():
69 def d():
70 "a" in repo.dirstate._dirs
70 "a" in repo.dirstate._dirs
71 del repo.dirstate._dirs
71 del repo.dirstate._dirs
72 timer(d)
72 timer(d)
73
73
74 def perfmanifest(ui, repo):
74 def perfmanifest(ui, repo):
75 def d():
75 def d():
76 t = repo.manifest.tip()
76 t = repo.manifest.tip()
77 m = repo.manifest.read(t)
77 m = repo.manifest.read(t)
78 repo.manifest.mapcache = None
78 repo.manifest.mapcache = None
79 repo.manifest._cache = None
79 repo.manifest._cache = None
80 timer(d)
80 timer(d)
81
81
82 def perfindex(ui, repo):
82 def perfindex(ui, repo):
83 import mercurial.revlog
83 import mercurial.revlog
84 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
84 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
85 n = repo["tip"].node()
85 n = repo["tip"].node()
86 def d():
86 def d():
87 repo.invalidate()
87 repo.invalidate()
88 repo[n]
88 repo[n]
89 timer(d)
89 timer(d)
90
90
91 def perfstartup(ui, repo):
91 def perfstartup(ui, repo):
92 cmd = sys.argv[0]
92 cmd = sys.argv[0]
93 def d():
93 def d():
94 os.system("HGRCPATH= %s version -q > /dev/null" % cmd)
94 os.system("HGRCPATH= %s version -q > /dev/null" % cmd)
95 timer(d)
95 timer(d)
96
96
97 def perfparents(ui, repo):
97 def perfparents(ui, repo):
98 nl = [repo.changelog.node(i) for i in xrange(1000)]
98 nl = [repo.changelog.node(i) for i in xrange(1000)]
99 def d():
99 def d():
100 for n in nl:
100 for n in nl:
101 repo.changelog.parents(n)
101 repo.changelog.parents(n)
102 timer(d)
102 timer(d)
103
103
104 def perflookup(ui, repo, rev):
104 def perflookup(ui, repo, rev):
105 timer(lambda: len(repo.lookup(rev)))
105 timer(lambda: len(repo.lookup(rev)))
106
106
107 def perflog(ui, repo, **opts):
107 def perflog(ui, repo, **opts):
108 ui.pushbuffer()
108 ui.pushbuffer()
109 timer(lambda: commands.log(ui, repo, rev=[], date='', user='',
109 timer(lambda: commands.log(ui, repo, rev=[], date='', user='',
110 copies=opts.get('rename')))
110 copies=opts.get('rename')))
111 ui.popbuffer()
111 ui.popbuffer()
112
112
113 def perftemplating(ui, repo):
113 def perftemplating(ui, repo):
114 ui.pushbuffer()
114 ui.pushbuffer()
115 timer(lambda: commands.log(ui, repo, rev=[], date='', user='',
115 timer(lambda: commands.log(ui, repo, rev=[], date='', user='',
116 template='{date|shortdate} [{rev}:{node|short}]'
116 template='{date|shortdate} [{rev}:{node|short}]'
117 ' {author|person}: {desc|firstline}\n'))
117 ' {author|person}: {desc|firstline}\n'))
118 ui.popbuffer()
118 ui.popbuffer()
119
119
120 def perfdiffwd(ui, repo):
120 def perfdiffwd(ui, repo):
121 """Profile diff of working directory changes"""
121 """Profile diff of working directory changes"""
122 options = {
122 options = {
123 'w': 'ignore_all_space',
123 'w': 'ignore_all_space',
124 'b': 'ignore_space_change',
124 'b': 'ignore_space_change',
125 'B': 'ignore_blank_lines',
125 'B': 'ignore_blank_lines',
126 }
126 }
127
127
128 for diffopt in ('', 'w', 'b', 'B', 'wB'):
128 for diffopt in ('', 'w', 'b', 'B', 'wB'):
129 opts = dict((options[c], '1') for c in diffopt)
129 opts = dict((options[c], '1') for c in diffopt)
130 def d():
130 def d():
131 ui.pushbuffer()
131 ui.pushbuffer()
132 commands.diff(ui, repo, **opts)
132 commands.diff(ui, repo, **opts)
133 ui.popbuffer()
133 ui.popbuffer()
134 title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none')
134 title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none')
135 timer(d, title)
135 timer(d, title)
136
136
137 def perfrevlog(ui, repo, file_, **opts):
137 def perfrevlog(ui, repo, file_, **opts):
138 from mercurial import revlog
138 from mercurial import revlog
139 dist = opts['dist']
139 dist = opts['dist']
140 def d():
140 def d():
141 r = revlog.revlog(lambda fn: open(fn, 'rb'), file_)
141 r = revlog.revlog(lambda fn: open(fn, 'rb'), file_)
142 for x in xrange(0, len(r), dist):
142 for x in xrange(0, len(r), dist):
143 r.revision(r.node(x))
143 r.revision(r.node(x))
144
144
145 timer(d)
145 timer(d)
146
146
147 cmdtable = {
147 cmdtable = {
148 'perflookup': (perflookup, []),
148 'perflookup': (perflookup, []),
149 'perfparents': (perfparents, []),
149 'perfparents': (perfparents, []),
150 'perfstartup': (perfstartup, []),
150 'perfstartup': (perfstartup, []),
151 'perfstatus': (perfstatus, []),
151 'perfstatus': (perfstatus, []),
152 'perfwalk': (perfwalk, []),
152 'perfwalk': (perfwalk, []),
153 'perfmanifest': (perfmanifest, []),
153 'perfmanifest': (perfmanifest, []),
154 'perfindex': (perfindex, []),
154 'perfindex': (perfindex, []),
155 'perfheads': (perfheads, []),
155 'perfheads': (perfheads, []),
156 'perftags': (perftags, []),
156 'perftags': (perftags, []),
157 'perfdirstate': (perfdirstate, []),
157 'perfdirstate': (perfdirstate, []),
158 'perfdirstatedirs': (perfdirstate, []),
158 'perfdirstatedirs': (perfdirstate, []),
159 'perflog': (perflog,
159 'perflog': (perflog,
160 [('', 'rename', False, 'ask log to follow renames')]),
160 [('', 'rename', False, 'ask log to follow renames')]),
161 'perftemplating': (perftemplating, []),
161 'perftemplating': (perftemplating, []),
162 'perfdiffwd': (perfdiffwd, []),
162 'perfdiffwd': (perfdiffwd, []),
163 'perfrevlog': (perfrevlog,
163 'perfrevlog': (perfrevlog,
164 [('d', 'dist', 100, 'distance between the revisions')],
164 [('d', 'dist', 100, 'distance between the revisions')],
165 "[INDEXFILE]"),
165 "[INDEXFILE]"),
166 }
166 }
@@ -1,271 +1,271
1 # cvs.py: CVS conversion code inspired by hg-cvs-import and git-cvsimport
1 # cvs.py: CVS conversion code inspired by hg-cvs-import and git-cvsimport
2 #
2 #
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import os, re, socket, errno
8 import os, re, socket, errno
9 from cStringIO import StringIO
9 from cStringIO import StringIO
10 from mercurial import encoding, util
10 from mercurial import encoding, util
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12
12
13 from common import NoRepo, commit, converter_source, checktool
13 from common import NoRepo, commit, converter_source, checktool
14 import cvsps
14 import cvsps
15
15
16 class convert_cvs(converter_source):
16 class convert_cvs(converter_source):
17 def __init__(self, ui, path, rev=None):
17 def __init__(self, ui, path, rev=None):
18 super(convert_cvs, self).__init__(ui, path, rev=rev)
18 super(convert_cvs, self).__init__(ui, path, rev=rev)
19
19
20 cvs = os.path.join(path, "CVS")
20 cvs = os.path.join(path, "CVS")
21 if not os.path.exists(cvs):
21 if not os.path.exists(cvs):
22 raise NoRepo(_("%s does not look like a CVS checkout") % path)
22 raise NoRepo(_("%s does not look like a CVS checkout") % path)
23
23
24 checktool('cvs')
24 checktool('cvs')
25
25
26 self.changeset = None
26 self.changeset = None
27 self.files = {}
27 self.files = {}
28 self.tags = {}
28 self.tags = {}
29 self.lastbranch = {}
29 self.lastbranch = {}
30 self.socket = None
30 self.socket = None
31 self.cvsroot = open(os.path.join(cvs, "Root")).read()[:-1]
31 self.cvsroot = open(os.path.join(cvs, "Root")).read()[:-1]
32 self.cvsrepo = open(os.path.join(cvs, "Repository")).read()[:-1]
32 self.cvsrepo = open(os.path.join(cvs, "Repository")).read()[:-1]
33 self.encoding = encoding.encoding
33 self.encoding = encoding.encoding
34
34
35 self._connect()
35 self._connect()
36
36
37 def _parse(self):
37 def _parse(self):
38 if self.changeset is not None:
38 if self.changeset is not None:
39 return
39 return
40 self.changeset = {}
40 self.changeset = {}
41
41
42 maxrev = 0
42 maxrev = 0
43 if self.rev:
43 if self.rev:
44 # TODO: handle tags
44 # TODO: handle tags
45 try:
45 try:
46 # patchset number?
46 # patchset number?
47 maxrev = int(self.rev)
47 maxrev = int(self.rev)
48 except ValueError:
48 except ValueError:
49 raise util.Abort(_('revision %s is not a patchset number')
49 raise util.Abort(_('revision %s is not a patchset number')
50 % self.rev)
50 % self.rev)
51
51
52 d = os.getcwd()
52 d = os.getcwd()
53 try:
53 try:
54 os.chdir(self.path)
54 os.chdir(self.path)
55 id = None
55 id = None
56
56
57 cache = 'update'
57 cache = 'update'
58 if not self.ui.configbool('convert', 'cvsps.cache', True):
58 if not self.ui.configbool('convert', 'cvsps.cache', True):
59 cache = None
59 cache = None
60 db = cvsps.createlog(self.ui, cache=cache)
60 db = cvsps.createlog(self.ui, cache=cache)
61 db = cvsps.createchangeset(self.ui, db,
61 db = cvsps.createchangeset(self.ui, db,
62 fuzz=int(self.ui.config('convert', 'cvsps.fuzz', 60)),
62 fuzz=int(self.ui.config('convert', 'cvsps.fuzz', 60)),
63 mergeto=self.ui.config('convert', 'cvsps.mergeto', None),
63 mergeto=self.ui.config('convert', 'cvsps.mergeto', None),
64 mergefrom=self.ui.config('convert', 'cvsps.mergefrom', None))
64 mergefrom=self.ui.config('convert', 'cvsps.mergefrom', None))
65
65
66 for cs in db:
66 for cs in db:
67 if maxrev and cs.id > maxrev:
67 if maxrev and cs.id > maxrev:
68 break
68 break
69 id = str(cs.id)
69 id = str(cs.id)
70 cs.author = self.recode(cs.author)
70 cs.author = self.recode(cs.author)
71 self.lastbranch[cs.branch] = id
71 self.lastbranch[cs.branch] = id
72 cs.comment = self.recode(cs.comment)
72 cs.comment = self.recode(cs.comment)
73 date = util.datestr(cs.date)
73 date = util.datestr(cs.date)
74 self.tags.update(dict.fromkeys(cs.tags, id))
74 self.tags.update(dict.fromkeys(cs.tags, id))
75
75
76 files = {}
76 files = {}
77 for f in cs.entries:
77 for f in cs.entries:
78 files[f.file] = "%s%s" % ('.'.join([str(x)
78 files[f.file] = "%s%s" % ('.'.join([str(x)
79 for x in f.revision]),
79 for x in f.revision]),
80 ['', '(DEAD)'][f.dead])
80 ['', '(DEAD)'][f.dead])
81
81
82 # add current commit to set
82 # add current commit to set
83 c = commit(author=cs.author, date=date,
83 c = commit(author=cs.author, date=date,
84 parents=[str(p.id) for p in cs.parents],
84 parents=[str(p.id) for p in cs.parents],
85 desc=cs.comment, branch=cs.branch or '')
85 desc=cs.comment, branch=cs.branch or '')
86 self.changeset[id] = c
86 self.changeset[id] = c
87 self.files[id] = files
87 self.files[id] = files
88
88
89 self.heads = self.lastbranch.values()
89 self.heads = self.lastbranch.values()
90 finally:
90 finally:
91 os.chdir(d)
91 os.chdir(d)
92
92
93 def _connect(self):
93 def _connect(self):
94 root = self.cvsroot
94 root = self.cvsroot
95 conntype = None
95 conntype = None
96 user, host = None, None
96 user, host = None, None
97 cmd = ['cvs', 'server']
97 cmd = ['cvs', 'server']
98
98
99 self.ui.status(_("connecting to %s\n") % root)
99 self.ui.status(_("connecting to %s\n") % root)
100
100
101 if root.startswith(":pserver:"):
101 if root.startswith(":pserver:"):
102 root = root[9:]
102 root = root[9:]
103 m = re.match(r'(?:(.*?)(?::(.*?))?@)?([^:\/]*)(?::(\d*))?(.*)',
103 m = re.match(r'(?:(.*?)(?::(.*?))?@)?([^:\/]*)(?::(\d*))?(.*)',
104 root)
104 root)
105 if m:
105 if m:
106 conntype = "pserver"
106 conntype = "pserver"
107 user, passw, serv, port, root = m.groups()
107 user, passw, serv, port, root = m.groups()
108 if not user:
108 if not user:
109 user = "anonymous"
109 user = "anonymous"
110 if not port:
110 if not port:
111 port = 2401
111 port = 2401
112 else:
112 else:
113 port = int(port)
113 port = int(port)
114 format0 = ":pserver:%s@%s:%s" % (user, serv, root)
114 format0 = ":pserver:%s@%s:%s" % (user, serv, root)
115 format1 = ":pserver:%s@%s:%d%s" % (user, serv, port, root)
115 format1 = ":pserver:%s@%s:%d%s" % (user, serv, port, root)
116
116
117 if not passw:
117 if not passw:
118 passw = "A"
118 passw = "A"
119 cvspass = os.path.expanduser("~/.cvspass")
119 cvspass = os.path.expanduser("~/.cvspass")
120 try:
120 try:
121 pf = open(cvspass)
121 pf = open(cvspass)
122 for line in pf.read().splitlines():
122 for line in pf.read().splitlines():
123 part1, part2 = line.split(' ', 1)
123 part1, part2 = line.split(' ', 1)
124 if part1 == '/1':
124 if part1 == '/1':
125 # /1 :pserver:user@example.com:2401/cvsroot/foo Ah<Z
125 # /1 :pserver:user@example.com:2401/cvsroot/foo Ah<Z
126 part1, part2 = part2.split(' ', 1)
126 part1, part2 = part2.split(' ', 1)
127 format = format1
127 format = format1
128 else:
128 else:
129 # :pserver:user@example.com:/cvsroot/foo Ah<Z
129 # :pserver:user@example.com:/cvsroot/foo Ah<Z
130 format = format0
130 format = format0
131 if part1 == format:
131 if part1 == format:
132 passw = part2
132 passw = part2
133 break
133 break
134 pf.close()
134 pf.close()
135 except IOError, inst:
135 except IOError, inst:
136 if inst.errno != errno.ENOENT:
136 if inst.errno != errno.ENOENT:
137 if not getattr(inst, 'filename', None):
137 if not getattr(inst, 'filename', None):
138 inst.filename = cvspass
138 inst.filename = cvspass
139 raise
139 raise
140
140
141 sck = socket.socket()
141 sck = socket.socket()
142 sck.connect((serv, port))
142 sck.connect((serv, port))
143 sck.send("\n".join(["BEGIN AUTH REQUEST", root, user, passw,
143 sck.send("\n".join(["BEGIN AUTH REQUEST", root, user, passw,
144 "END AUTH REQUEST", ""]))
144 "END AUTH REQUEST", ""]))
145 if sck.recv(128) != "I LOVE YOU\n":
145 if sck.recv(128) != "I LOVE YOU\n":
146 raise util.Abort(_("CVS pserver authentication failed"))
146 raise util.Abort(_("CVS pserver authentication failed"))
147
147
148 self.writep = self.readp = sck.makefile('r+')
148 self.writep = self.readp = sck.makefile('r+')
149
149
150 if not conntype and root.startswith(":local:"):
150 if not conntype and root.startswith(":local:"):
151 conntype = "local"
151 conntype = "local"
152 root = root[7:]
152 root = root[7:]
153
153
154 if not conntype:
154 if not conntype:
155 # :ext:user@host/home/user/path/to/cvsroot
155 # :ext:user@host/home/user/path/to/cvsroot
156 if root.startswith(":ext:"):
156 if root.startswith(":ext:"):
157 root = root[5:]
157 root = root[5:]
158 m = re.match(r'(?:([^@:/]+)@)?([^:/]+):?(.*)', root)
158 m = re.match(r'(?:([^@:/]+)@)?([^:/]+):?(.*)', root)
159 # Do not take Windows path "c:\foo\bar" for a connection strings
159 # Do not take Windows path "c:\foo\bar" for a connection strings
160 if os.path.isdir(root) or not m:
160 if os.path.isdir(root) or not m:
161 conntype = "local"
161 conntype = "local"
162 else:
162 else:
163 conntype = "rsh"
163 conntype = "rsh"
164 user, host, root = m.group(1), m.group(2), m.group(3)
164 user, host, root = m.group(1), m.group(2), m.group(3)
165
165
166 if conntype != "pserver":
166 if conntype != "pserver":
167 if conntype == "rsh":
167 if conntype == "rsh":
168 rsh = os.environ.get("CVS_RSH") or "ssh"
168 rsh = os.environ.get("CVS_RSH") or "ssh"
169 if user:
169 if user:
170 cmd = [rsh, '-l', user, host] + cmd
170 cmd = [rsh, '-l', user, host] + cmd
171 else:
171 else:
172 cmd = [rsh, host] + cmd
172 cmd = [rsh, host] + cmd
173
173
174 # popen2 does not support argument lists under Windows
174 # popen2 does not support argument lists under Windows
175 cmd = [util.shellquote(arg) for arg in cmd]
175 cmd = [util.shellquote(arg) for arg in cmd]
176 cmd = util.quotecommand(' '.join(cmd))
176 cmd = util.quotecommand(' '.join(cmd))
177 self.writep, self.readp = util.popen2(cmd)
177 self.writep, self.readp = util.popen2(cmd)
178
178
179 self.realroot = root
179 self.realroot = root
180
180
181 self.writep.write("Root %s\n" % root)
181 self.writep.write("Root %s\n" % root)
182 self.writep.write("Valid-responses ok error Valid-requests Mode"
182 self.writep.write("Valid-responses ok error Valid-requests Mode"
183 " M Mbinary E Checked-in Created Updated"
183 " M Mbinary E Checked-in Created Updated"
184 " Merged Removed\n")
184 " Merged Removed\n")
185 self.writep.write("valid-requests\n")
185 self.writep.write("valid-requests\n")
186 self.writep.flush()
186 self.writep.flush()
187 r = self.readp.readline()
187 r = self.readp.readline()
188 if not r.startswith("Valid-requests"):
188 if not r.startswith("Valid-requests"):
189 raise util.Abort(_('unexpected response from CVS server '
189 raise util.Abort(_('unexpected response from CVS server '
190 '(expected "Valid-requests", but got %r)')
190 '(expected "Valid-requests", but got %r)')
191 % r)
191 % r)
192 if "UseUnchanged" in r:
192 if "UseUnchanged" in r:
193 self.writep.write("UseUnchanged\n")
193 self.writep.write("UseUnchanged\n")
194 self.writep.flush()
194 self.writep.flush()
195 r = self.readp.readline()
195 r = self.readp.readline()
196
196
197 def getheads(self):
197 def getheads(self):
198 self._parse()
198 self._parse()
199 return self.heads
199 return self.heads
200
200
201 def getfile(self, name, rev):
201 def getfile(self, name, rev):
202
202
203 def chunkedread(fp, count):
203 def chunkedread(fp, count):
204 # file-objects returned by socked.makefile() do not handle
204 # file-objects returned by socked.makefile() do not handle
205 # large read() requests very well.
205 # large read() requests very well.
206 chunksize = 65536
206 chunksize = 65536
207 output = StringIO()
207 output = StringIO()
208 while count > 0:
208 while count > 0:
209 data = fp.read(min(count, chunksize))
209 data = fp.read(min(count, chunksize))
210 if not data:
210 if not data:
211 raise util.Abort(_("%d bytes missing from remote file")
211 raise util.Abort(_("%d bytes missing from remote file")
212 % count)
212 % count)
213 count -= len(data)
213 count -= len(data)
214 output.write(data)
214 output.write(data)
215 return output.getvalue()
215 return output.getvalue()
216
216
217 self._parse()
217 self._parse()
218 if rev.endswith("(DEAD)"):
218 if rev.endswith("(DEAD)"):
219 raise IOError
219 raise IOError
220
220
221 args = ("-N -P -kk -r %s --" % rev).split()
221 args = ("-N -P -kk -r %s --" % rev).split()
222 args.append(self.cvsrepo + '/' + name)
222 args.append(self.cvsrepo + '/' + name)
223 for x in args:
223 for x in args:
224 self.writep.write("Argument %s\n" % x)
224 self.writep.write("Argument %s\n" % x)
225 self.writep.write("Directory .\n%s\nco\n" % self.realroot)
225 self.writep.write("Directory .\n%s\nco\n" % self.realroot)
226 self.writep.flush()
226 self.writep.flush()
227
227
228 data = ""
228 data = ""
229 mode = None
229 mode = None
230 while 1:
230 while True:
231 line = self.readp.readline()
231 line = self.readp.readline()
232 if line.startswith("Created ") or line.startswith("Updated "):
232 if line.startswith("Created ") or line.startswith("Updated "):
233 self.readp.readline() # path
233 self.readp.readline() # path
234 self.readp.readline() # entries
234 self.readp.readline() # entries
235 mode = self.readp.readline()[:-1]
235 mode = self.readp.readline()[:-1]
236 count = int(self.readp.readline()[:-1])
236 count = int(self.readp.readline()[:-1])
237 data = chunkedread(self.readp, count)
237 data = chunkedread(self.readp, count)
238 elif line.startswith(" "):
238 elif line.startswith(" "):
239 data += line[1:]
239 data += line[1:]
240 elif line.startswith("M "):
240 elif line.startswith("M "):
241 pass
241 pass
242 elif line.startswith("Mbinary "):
242 elif line.startswith("Mbinary "):
243 count = int(self.readp.readline()[:-1])
243 count = int(self.readp.readline()[:-1])
244 data = chunkedread(self.readp, count)
244 data = chunkedread(self.readp, count)
245 else:
245 else:
246 if line == "ok\n":
246 if line == "ok\n":
247 if mode is None:
247 if mode is None:
248 raise util.Abort(_('malformed response from CVS'))
248 raise util.Abort(_('malformed response from CVS'))
249 return (data, "x" in mode and "x" or "")
249 return (data, "x" in mode and "x" or "")
250 elif line.startswith("E "):
250 elif line.startswith("E "):
251 self.ui.warn(_("cvs server: %s\n") % line[2:])
251 self.ui.warn(_("cvs server: %s\n") % line[2:])
252 elif line.startswith("Remove"):
252 elif line.startswith("Remove"):
253 self.readp.readline()
253 self.readp.readline()
254 else:
254 else:
255 raise util.Abort(_("unknown CVS response: %s") % line)
255 raise util.Abort(_("unknown CVS response: %s") % line)
256
256
257 def getchanges(self, rev):
257 def getchanges(self, rev):
258 self._parse()
258 self._parse()
259 return sorted(self.files[rev].iteritems()), {}
259 return sorted(self.files[rev].iteritems()), {}
260
260
261 def getcommit(self, rev):
261 def getcommit(self, rev):
262 self._parse()
262 self._parse()
263 return self.changeset[rev]
263 return self.changeset[rev]
264
264
265 def gettags(self):
265 def gettags(self):
266 self._parse()
266 self._parse()
267 return self.tags
267 return self.tags
268
268
269 def getchangedfiles(self, rev, i):
269 def getchangedfiles(self, rev, i):
270 self._parse()
270 self._parse()
271 return sorted(self.files[rev])
271 return sorted(self.files[rev])
@@ -1,1582 +1,1582
1 """ Multicast DNS Service Discovery for Python, v0.12
1 """ Multicast DNS Service Discovery for Python, v0.12
2 Copyright (C) 2003, Paul Scott-Murphy
2 Copyright (C) 2003, Paul Scott-Murphy
3
3
4 This module provides a framework for the use of DNS Service Discovery
4 This module provides a framework for the use of DNS Service Discovery
5 using IP multicast. It has been tested against the JRendezvous
5 using IP multicast. It has been tested against the JRendezvous
6 implementation from <a href="http://strangeberry.com">StrangeBerry</a>,
6 implementation from <a href="http://strangeberry.com">StrangeBerry</a>,
7 and against the mDNSResponder from Mac OS X 10.3.8.
7 and against the mDNSResponder from Mac OS X 10.3.8.
8
8
9 This library is free software; you can redistribute it and/or
9 This library is free software; you can redistribute it and/or
10 modify it under the terms of the GNU Lesser General Public
10 modify it under the terms of the GNU Lesser General Public
11 License as published by the Free Software Foundation; either
11 License as published by the Free Software Foundation; either
12 version 2.1 of the License, or (at your option) any later version.
12 version 2.1 of the License, or (at your option) any later version.
13
13
14 This library is distributed in the hope that it will be useful,
14 This library is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Lesser General Public License for more details.
17 Lesser General Public License for more details.
18
18
19 You should have received a copy of the GNU Lesser General Public
19 You should have received a copy of the GNU Lesser General Public
20 License along with this library; if not, write to the Free Software
20 License along with this library; if not, write to the Free Software
21 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22
22
23 """
23 """
24
24
25 """0.12 update - allow selection of binding interface
25 """0.12 update - allow selection of binding interface
26 typo fix - Thanks A. M. Kuchlingi
26 typo fix - Thanks A. M. Kuchlingi
27 removed all use of word 'Rendezvous' - this is an API change"""
27 removed all use of word 'Rendezvous' - this is an API change"""
28
28
29 """0.11 update - correction to comments for addListener method
29 """0.11 update - correction to comments for addListener method
30 support for new record types seen from OS X
30 support for new record types seen from OS X
31 - IPv6 address
31 - IPv6 address
32 - hostinfo
32 - hostinfo
33 ignore unknown DNS record types
33 ignore unknown DNS record types
34 fixes to name decoding
34 fixes to name decoding
35 works alongside other processes using port 5353 (e.g. on Mac OS X)
35 works alongside other processes using port 5353 (e.g. on Mac OS X)
36 tested against Mac OS X 10.3.2's mDNSResponder
36 tested against Mac OS X 10.3.2's mDNSResponder
37 corrections to removal of list entries for service browser"""
37 corrections to removal of list entries for service browser"""
38
38
39 """0.10 update - Jonathon Paisley contributed these corrections:
39 """0.10 update - Jonathon Paisley contributed these corrections:
40 always multicast replies, even when query is unicast
40 always multicast replies, even when query is unicast
41 correct a pointer encoding problem
41 correct a pointer encoding problem
42 can now write records in any order
42 can now write records in any order
43 traceback shown on failure
43 traceback shown on failure
44 better TXT record parsing
44 better TXT record parsing
45 server is now separate from name
45 server is now separate from name
46 can cancel a service browser
46 can cancel a service browser
47
47
48 modified some unit tests to accommodate these changes"""
48 modified some unit tests to accommodate these changes"""
49
49
50 """0.09 update - remove all records on service unregistration
50 """0.09 update - remove all records on service unregistration
51 fix DOS security problem with readName"""
51 fix DOS security problem with readName"""
52
52
53 """0.08 update - changed licensing to LGPL"""
53 """0.08 update - changed licensing to LGPL"""
54
54
55 """0.07 update - faster shutdown on engine
55 """0.07 update - faster shutdown on engine
56 pointer encoding of outgoing names
56 pointer encoding of outgoing names
57 ServiceBrowser now works
57 ServiceBrowser now works
58 new unit tests"""
58 new unit tests"""
59
59
60 """0.06 update - small improvements with unit tests
60 """0.06 update - small improvements with unit tests
61 added defined exception types
61 added defined exception types
62 new style objects
62 new style objects
63 fixed hostname/interface problem
63 fixed hostname/interface problem
64 fixed socket timeout problem
64 fixed socket timeout problem
65 fixed addServiceListener() typo bug
65 fixed addServiceListener() typo bug
66 using select() for socket reads
66 using select() for socket reads
67 tested on Debian unstable with Python 2.2.2"""
67 tested on Debian unstable with Python 2.2.2"""
68
68
69 """0.05 update - ensure case insensitivty on domain names
69 """0.05 update - ensure case insensitivty on domain names
70 support for unicast DNS queries"""
70 support for unicast DNS queries"""
71
71
72 """0.04 update - added some unit tests
72 """0.04 update - added some unit tests
73 added __ne__ adjuncts where required
73 added __ne__ adjuncts where required
74 ensure names end in '.local.'
74 ensure names end in '.local.'
75 timeout on receiving socket for clean shutdown"""
75 timeout on receiving socket for clean shutdown"""
76
76
77 __author__ = "Paul Scott-Murphy"
77 __author__ = "Paul Scott-Murphy"
78 __email__ = "paul at scott dash murphy dot com"
78 __email__ = "paul at scott dash murphy dot com"
79 __version__ = "0.12"
79 __version__ = "0.12"
80
80
81 import string
81 import string
82 import time
82 import time
83 import struct
83 import struct
84 import socket
84 import socket
85 import threading
85 import threading
86 import select
86 import select
87 import traceback
87 import traceback
88
88
89 __all__ = ["Zeroconf", "ServiceInfo", "ServiceBrowser"]
89 __all__ = ["Zeroconf", "ServiceInfo", "ServiceBrowser"]
90
90
91 # hook for threads
91 # hook for threads
92
92
93 globals()['_GLOBAL_DONE'] = 0
93 globals()['_GLOBAL_DONE'] = 0
94
94
95 # Some timing constants
95 # Some timing constants
96
96
97 _UNREGISTER_TIME = 125
97 _UNREGISTER_TIME = 125
98 _CHECK_TIME = 175
98 _CHECK_TIME = 175
99 _REGISTER_TIME = 225
99 _REGISTER_TIME = 225
100 _LISTENER_TIME = 200
100 _LISTENER_TIME = 200
101 _BROWSER_TIME = 500
101 _BROWSER_TIME = 500
102
102
103 # Some DNS constants
103 # Some DNS constants
104
104
105 _MDNS_ADDR = '224.0.0.251'
105 _MDNS_ADDR = '224.0.0.251'
106 _MDNS_PORT = 5353;
106 _MDNS_PORT = 5353;
107 _DNS_PORT = 53;
107 _DNS_PORT = 53;
108 _DNS_TTL = 60 * 60; # one hour default TTL
108 _DNS_TTL = 60 * 60; # one hour default TTL
109
109
110 _MAX_MSG_TYPICAL = 1460 # unused
110 _MAX_MSG_TYPICAL = 1460 # unused
111 _MAX_MSG_ABSOLUTE = 8972
111 _MAX_MSG_ABSOLUTE = 8972
112
112
113 _FLAGS_QR_MASK = 0x8000 # query response mask
113 _FLAGS_QR_MASK = 0x8000 # query response mask
114 _FLAGS_QR_QUERY = 0x0000 # query
114 _FLAGS_QR_QUERY = 0x0000 # query
115 _FLAGS_QR_RESPONSE = 0x8000 # response
115 _FLAGS_QR_RESPONSE = 0x8000 # response
116
116
117 _FLAGS_AA = 0x0400 # Authorative answer
117 _FLAGS_AA = 0x0400 # Authorative answer
118 _FLAGS_TC = 0x0200 # Truncated
118 _FLAGS_TC = 0x0200 # Truncated
119 _FLAGS_RD = 0x0100 # Recursion desired
119 _FLAGS_RD = 0x0100 # Recursion desired
120 _FLAGS_RA = 0x8000 # Recursion available
120 _FLAGS_RA = 0x8000 # Recursion available
121
121
122 _FLAGS_Z = 0x0040 # Zero
122 _FLAGS_Z = 0x0040 # Zero
123 _FLAGS_AD = 0x0020 # Authentic data
123 _FLAGS_AD = 0x0020 # Authentic data
124 _FLAGS_CD = 0x0010 # Checking disabled
124 _FLAGS_CD = 0x0010 # Checking disabled
125
125
126 _CLASS_IN = 1
126 _CLASS_IN = 1
127 _CLASS_CS = 2
127 _CLASS_CS = 2
128 _CLASS_CH = 3
128 _CLASS_CH = 3
129 _CLASS_HS = 4
129 _CLASS_HS = 4
130 _CLASS_NONE = 254
130 _CLASS_NONE = 254
131 _CLASS_ANY = 255
131 _CLASS_ANY = 255
132 _CLASS_MASK = 0x7FFF
132 _CLASS_MASK = 0x7FFF
133 _CLASS_UNIQUE = 0x8000
133 _CLASS_UNIQUE = 0x8000
134
134
135 _TYPE_A = 1
135 _TYPE_A = 1
136 _TYPE_NS = 2
136 _TYPE_NS = 2
137 _TYPE_MD = 3
137 _TYPE_MD = 3
138 _TYPE_MF = 4
138 _TYPE_MF = 4
139 _TYPE_CNAME = 5
139 _TYPE_CNAME = 5
140 _TYPE_SOA = 6
140 _TYPE_SOA = 6
141 _TYPE_MB = 7
141 _TYPE_MB = 7
142 _TYPE_MG = 8
142 _TYPE_MG = 8
143 _TYPE_MR = 9
143 _TYPE_MR = 9
144 _TYPE_NULL = 10
144 _TYPE_NULL = 10
145 _TYPE_WKS = 11
145 _TYPE_WKS = 11
146 _TYPE_PTR = 12
146 _TYPE_PTR = 12
147 _TYPE_HINFO = 13
147 _TYPE_HINFO = 13
148 _TYPE_MINFO = 14
148 _TYPE_MINFO = 14
149 _TYPE_MX = 15
149 _TYPE_MX = 15
150 _TYPE_TXT = 16
150 _TYPE_TXT = 16
151 _TYPE_AAAA = 28
151 _TYPE_AAAA = 28
152 _TYPE_SRV = 33
152 _TYPE_SRV = 33
153 _TYPE_ANY = 255
153 _TYPE_ANY = 255
154
154
155 # Mapping constants to names
155 # Mapping constants to names
156
156
157 _CLASSES = { _CLASS_IN : "in",
157 _CLASSES = { _CLASS_IN : "in",
158 _CLASS_CS : "cs",
158 _CLASS_CS : "cs",
159 _CLASS_CH : "ch",
159 _CLASS_CH : "ch",
160 _CLASS_HS : "hs",
160 _CLASS_HS : "hs",
161 _CLASS_NONE : "none",
161 _CLASS_NONE : "none",
162 _CLASS_ANY : "any" }
162 _CLASS_ANY : "any" }
163
163
164 _TYPES = { _TYPE_A : "a",
164 _TYPES = { _TYPE_A : "a",
165 _TYPE_NS : "ns",
165 _TYPE_NS : "ns",
166 _TYPE_MD : "md",
166 _TYPE_MD : "md",
167 _TYPE_MF : "mf",
167 _TYPE_MF : "mf",
168 _TYPE_CNAME : "cname",
168 _TYPE_CNAME : "cname",
169 _TYPE_SOA : "soa",
169 _TYPE_SOA : "soa",
170 _TYPE_MB : "mb",
170 _TYPE_MB : "mb",
171 _TYPE_MG : "mg",
171 _TYPE_MG : "mg",
172 _TYPE_MR : "mr",
172 _TYPE_MR : "mr",
173 _TYPE_NULL : "null",
173 _TYPE_NULL : "null",
174 _TYPE_WKS : "wks",
174 _TYPE_WKS : "wks",
175 _TYPE_PTR : "ptr",
175 _TYPE_PTR : "ptr",
176 _TYPE_HINFO : "hinfo",
176 _TYPE_HINFO : "hinfo",
177 _TYPE_MINFO : "minfo",
177 _TYPE_MINFO : "minfo",
178 _TYPE_MX : "mx",
178 _TYPE_MX : "mx",
179 _TYPE_TXT : "txt",
179 _TYPE_TXT : "txt",
180 _TYPE_AAAA : "quada",
180 _TYPE_AAAA : "quada",
181 _TYPE_SRV : "srv",
181 _TYPE_SRV : "srv",
182 _TYPE_ANY : "any" }
182 _TYPE_ANY : "any" }
183
183
184 # utility functions
184 # utility functions
185
185
186 def currentTimeMillis():
186 def currentTimeMillis():
187 """Current system time in milliseconds"""
187 """Current system time in milliseconds"""
188 return time.time() * 1000
188 return time.time() * 1000
189
189
190 # Exceptions
190 # Exceptions
191
191
192 class NonLocalNameException(Exception):
192 class NonLocalNameException(Exception):
193 pass
193 pass
194
194
195 class NonUniqueNameException(Exception):
195 class NonUniqueNameException(Exception):
196 pass
196 pass
197
197
198 class NamePartTooLongException(Exception):
198 class NamePartTooLongException(Exception):
199 pass
199 pass
200
200
201 class AbstractMethodException(Exception):
201 class AbstractMethodException(Exception):
202 pass
202 pass
203
203
204 class BadTypeInNameException(Exception):
204 class BadTypeInNameException(Exception):
205 pass
205 pass
206
206
207 class BadDomainName(Exception):
207 class BadDomainName(Exception):
208 def __init__(self, pos):
208 def __init__(self, pos):
209 Exception.__init__(self, "at position %s" % pos)
209 Exception.__init__(self, "at position %s" % pos)
210
210
211 class BadDomainNameCircular(BadDomainName):
211 class BadDomainNameCircular(BadDomainName):
212 pass
212 pass
213
213
214 # implementation classes
214 # implementation classes
215
215
216 class DNSEntry(object):
216 class DNSEntry(object):
217 """A DNS entry"""
217 """A DNS entry"""
218
218
219 def __init__(self, name, type, clazz):
219 def __init__(self, name, type, clazz):
220 self.key = string.lower(name)
220 self.key = string.lower(name)
221 self.name = name
221 self.name = name
222 self.type = type
222 self.type = type
223 self.clazz = clazz & _CLASS_MASK
223 self.clazz = clazz & _CLASS_MASK
224 self.unique = (clazz & _CLASS_UNIQUE) != 0
224 self.unique = (clazz & _CLASS_UNIQUE) != 0
225
225
226 def __eq__(self, other):
226 def __eq__(self, other):
227 """Equality test on name, type, and class"""
227 """Equality test on name, type, and class"""
228 if isinstance(other, DNSEntry):
228 if isinstance(other, DNSEntry):
229 return self.name == other.name and self.type == other.type and self.clazz == other.clazz
229 return self.name == other.name and self.type == other.type and self.clazz == other.clazz
230 return 0
230 return 0
231
231
232 def __ne__(self, other):
232 def __ne__(self, other):
233 """Non-equality test"""
233 """Non-equality test"""
234 return not self.__eq__(other)
234 return not self.__eq__(other)
235
235
236 def getClazz(self, clazz):
236 def getClazz(self, clazz):
237 """Class accessor"""
237 """Class accessor"""
238 try:
238 try:
239 return _CLASSES[clazz]
239 return _CLASSES[clazz]
240 except KeyError:
240 except KeyError:
241 return "?(%s)" % (clazz)
241 return "?(%s)" % (clazz)
242
242
243 def getType(self, type):
243 def getType(self, type):
244 """Type accessor"""
244 """Type accessor"""
245 try:
245 try:
246 return _TYPES[type]
246 return _TYPES[type]
247 except KeyError:
247 except KeyError:
248 return "?(%s)" % (type)
248 return "?(%s)" % (type)
249
249
250 def toString(self, hdr, other):
250 def toString(self, hdr, other):
251 """String representation with additional information"""
251 """String representation with additional information"""
252 result = "%s[%s,%s" % (hdr, self.getType(self.type), self.getClazz(self.clazz))
252 result = "%s[%s,%s" % (hdr, self.getType(self.type), self.getClazz(self.clazz))
253 if self.unique:
253 if self.unique:
254 result += "-unique,"
254 result += "-unique,"
255 else:
255 else:
256 result += ","
256 result += ","
257 result += self.name
257 result += self.name
258 if other is not None:
258 if other is not None:
259 result += ",%s]" % (other)
259 result += ",%s]" % (other)
260 else:
260 else:
261 result += "]"
261 result += "]"
262 return result
262 return result
263
263
264 class DNSQuestion(DNSEntry):
264 class DNSQuestion(DNSEntry):
265 """A DNS question entry"""
265 """A DNS question entry"""
266
266
267 def __init__(self, name, type, clazz):
267 def __init__(self, name, type, clazz):
268 if not name.endswith(".local."):
268 if not name.endswith(".local."):
269 raise NonLocalNameException(name)
269 raise NonLocalNameException(name)
270 DNSEntry.__init__(self, name, type, clazz)
270 DNSEntry.__init__(self, name, type, clazz)
271
271
272 def answeredBy(self, rec):
272 def answeredBy(self, rec):
273 """Returns true if the question is answered by the record"""
273 """Returns true if the question is answered by the record"""
274 return self.clazz == rec.clazz and (self.type == rec.type or self.type == _TYPE_ANY) and self.name == rec.name
274 return self.clazz == rec.clazz and (self.type == rec.type or self.type == _TYPE_ANY) and self.name == rec.name
275
275
276 def __repr__(self):
276 def __repr__(self):
277 """String representation"""
277 """String representation"""
278 return DNSEntry.toString(self, "question", None)
278 return DNSEntry.toString(self, "question", None)
279
279
280
280
281 class DNSRecord(DNSEntry):
281 class DNSRecord(DNSEntry):
282 """A DNS record - like a DNS entry, but has a TTL"""
282 """A DNS record - like a DNS entry, but has a TTL"""
283
283
284 def __init__(self, name, type, clazz, ttl):
284 def __init__(self, name, type, clazz, ttl):
285 DNSEntry.__init__(self, name, type, clazz)
285 DNSEntry.__init__(self, name, type, clazz)
286 self.ttl = ttl
286 self.ttl = ttl
287 self.created = currentTimeMillis()
287 self.created = currentTimeMillis()
288
288
289 def __eq__(self, other):
289 def __eq__(self, other):
290 """Tests equality as per DNSRecord"""
290 """Tests equality as per DNSRecord"""
291 if isinstance(other, DNSRecord):
291 if isinstance(other, DNSRecord):
292 return DNSEntry.__eq__(self, other)
292 return DNSEntry.__eq__(self, other)
293 return 0
293 return 0
294
294
295 def suppressedBy(self, msg):
295 def suppressedBy(self, msg):
296 """Returns true if any answer in a message can suffice for the
296 """Returns true if any answer in a message can suffice for the
297 information held in this record."""
297 information held in this record."""
298 for record in msg.answers:
298 for record in msg.answers:
299 if self.suppressedByAnswer(record):
299 if self.suppressedByAnswer(record):
300 return 1
300 return 1
301 return 0
301 return 0
302
302
303 def suppressedByAnswer(self, other):
303 def suppressedByAnswer(self, other):
304 """Returns true if another record has same name, type and class,
304 """Returns true if another record has same name, type and class,
305 and if its TTL is at least half of this record's."""
305 and if its TTL is at least half of this record's."""
306 if self == other and other.ttl > (self.ttl / 2):
306 if self == other and other.ttl > (self.ttl / 2):
307 return 1
307 return 1
308 return 0
308 return 0
309
309
310 def getExpirationTime(self, percent):
310 def getExpirationTime(self, percent):
311 """Returns the time at which this record will have expired
311 """Returns the time at which this record will have expired
312 by a certain percentage."""
312 by a certain percentage."""
313 return self.created + (percent * self.ttl * 10)
313 return self.created + (percent * self.ttl * 10)
314
314
315 def getRemainingTTL(self, now):
315 def getRemainingTTL(self, now):
316 """Returns the remaining TTL in seconds."""
316 """Returns the remaining TTL in seconds."""
317 return max(0, (self.getExpirationTime(100) - now) / 1000)
317 return max(0, (self.getExpirationTime(100) - now) / 1000)
318
318
319 def isExpired(self, now):
319 def isExpired(self, now):
320 """Returns true if this record has expired."""
320 """Returns true if this record has expired."""
321 return self.getExpirationTime(100) <= now
321 return self.getExpirationTime(100) <= now
322
322
323 def isStale(self, now):
323 def isStale(self, now):
324 """Returns true if this record is at least half way expired."""
324 """Returns true if this record is at least half way expired."""
325 return self.getExpirationTime(50) <= now
325 return self.getExpirationTime(50) <= now
326
326
327 def resetTTL(self, other):
327 def resetTTL(self, other):
328 """Sets this record's TTL and created time to that of
328 """Sets this record's TTL and created time to that of
329 another record."""
329 another record."""
330 self.created = other.created
330 self.created = other.created
331 self.ttl = other.ttl
331 self.ttl = other.ttl
332
332
333 def write(self, out):
333 def write(self, out):
334 """Abstract method"""
334 """Abstract method"""
335 raise AbstractMethodException
335 raise AbstractMethodException
336
336
337 def toString(self, other):
337 def toString(self, other):
338 """String representation with addtional information"""
338 """String representation with addtional information"""
339 arg = "%s/%s,%s" % (self.ttl, self.getRemainingTTL(currentTimeMillis()), other)
339 arg = "%s/%s,%s" % (self.ttl, self.getRemainingTTL(currentTimeMillis()), other)
340 return DNSEntry.toString(self, "record", arg)
340 return DNSEntry.toString(self, "record", arg)
341
341
342 class DNSAddress(DNSRecord):
342 class DNSAddress(DNSRecord):
343 """A DNS address record"""
343 """A DNS address record"""
344
344
345 def __init__(self, name, type, clazz, ttl, address):
345 def __init__(self, name, type, clazz, ttl, address):
346 DNSRecord.__init__(self, name, type, clazz, ttl)
346 DNSRecord.__init__(self, name, type, clazz, ttl)
347 self.address = address
347 self.address = address
348
348
349 def write(self, out):
349 def write(self, out):
350 """Used in constructing an outgoing packet"""
350 """Used in constructing an outgoing packet"""
351 out.writeString(self.address, len(self.address))
351 out.writeString(self.address, len(self.address))
352
352
353 def __eq__(self, other):
353 def __eq__(self, other):
354 """Tests equality on address"""
354 """Tests equality on address"""
355 if isinstance(other, DNSAddress):
355 if isinstance(other, DNSAddress):
356 return self.address == other.address
356 return self.address == other.address
357 return 0
357 return 0
358
358
359 def __repr__(self):
359 def __repr__(self):
360 """String representation"""
360 """String representation"""
361 try:
361 try:
362 return socket.inet_ntoa(self.address)
362 return socket.inet_ntoa(self.address)
363 except Exception:
363 except Exception:
364 return self.address
364 return self.address
365
365
366 class DNSHinfo(DNSRecord):
366 class DNSHinfo(DNSRecord):
367 """A DNS host information record"""
367 """A DNS host information record"""
368
368
369 def __init__(self, name, type, clazz, ttl, cpu, os):
369 def __init__(self, name, type, clazz, ttl, cpu, os):
370 DNSRecord.__init__(self, name, type, clazz, ttl)
370 DNSRecord.__init__(self, name, type, clazz, ttl)
371 self.cpu = cpu
371 self.cpu = cpu
372 self.os = os
372 self.os = os
373
373
374 def write(self, out):
374 def write(self, out):
375 """Used in constructing an outgoing packet"""
375 """Used in constructing an outgoing packet"""
376 out.writeString(self.cpu, len(self.cpu))
376 out.writeString(self.cpu, len(self.cpu))
377 out.writeString(self.os, len(self.os))
377 out.writeString(self.os, len(self.os))
378
378
379 def __eq__(self, other):
379 def __eq__(self, other):
380 """Tests equality on cpu and os"""
380 """Tests equality on cpu and os"""
381 if isinstance(other, DNSHinfo):
381 if isinstance(other, DNSHinfo):
382 return self.cpu == other.cpu and self.os == other.os
382 return self.cpu == other.cpu and self.os == other.os
383 return 0
383 return 0
384
384
385 def __repr__(self):
385 def __repr__(self):
386 """String representation"""
386 """String representation"""
387 return self.cpu + " " + self.os
387 return self.cpu + " " + self.os
388
388
389 class DNSPointer(DNSRecord):
389 class DNSPointer(DNSRecord):
390 """A DNS pointer record"""
390 """A DNS pointer record"""
391
391
392 def __init__(self, name, type, clazz, ttl, alias):
392 def __init__(self, name, type, clazz, ttl, alias):
393 DNSRecord.__init__(self, name, type, clazz, ttl)
393 DNSRecord.__init__(self, name, type, clazz, ttl)
394 self.alias = alias
394 self.alias = alias
395
395
396 def write(self, out):
396 def write(self, out):
397 """Used in constructing an outgoing packet"""
397 """Used in constructing an outgoing packet"""
398 out.writeName(self.alias)
398 out.writeName(self.alias)
399
399
400 def __eq__(self, other):
400 def __eq__(self, other):
401 """Tests equality on alias"""
401 """Tests equality on alias"""
402 if isinstance(other, DNSPointer):
402 if isinstance(other, DNSPointer):
403 return self.alias == other.alias
403 return self.alias == other.alias
404 return 0
404 return 0
405
405
406 def __repr__(self):
406 def __repr__(self):
407 """String representation"""
407 """String representation"""
408 return self.toString(self.alias)
408 return self.toString(self.alias)
409
409
410 class DNSText(DNSRecord):
410 class DNSText(DNSRecord):
411 """A DNS text record"""
411 """A DNS text record"""
412
412
413 def __init__(self, name, type, clazz, ttl, text):
413 def __init__(self, name, type, clazz, ttl, text):
414 DNSRecord.__init__(self, name, type, clazz, ttl)
414 DNSRecord.__init__(self, name, type, clazz, ttl)
415 self.text = text
415 self.text = text
416
416
417 def write(self, out):
417 def write(self, out):
418 """Used in constructing an outgoing packet"""
418 """Used in constructing an outgoing packet"""
419 out.writeString(self.text, len(self.text))
419 out.writeString(self.text, len(self.text))
420
420
421 def __eq__(self, other):
421 def __eq__(self, other):
422 """Tests equality on text"""
422 """Tests equality on text"""
423 if isinstance(other, DNSText):
423 if isinstance(other, DNSText):
424 return self.text == other.text
424 return self.text == other.text
425 return 0
425 return 0
426
426
427 def __repr__(self):
427 def __repr__(self):
428 """String representation"""
428 """String representation"""
429 if len(self.text) > 10:
429 if len(self.text) > 10:
430 return self.toString(self.text[:7] + "...")
430 return self.toString(self.text[:7] + "...")
431 else:
431 else:
432 return self.toString(self.text)
432 return self.toString(self.text)
433
433
434 class DNSService(DNSRecord):
434 class DNSService(DNSRecord):
435 """A DNS service record"""
435 """A DNS service record"""
436
436
437 def __init__(self, name, type, clazz, ttl, priority, weight, port, server):
437 def __init__(self, name, type, clazz, ttl, priority, weight, port, server):
438 DNSRecord.__init__(self, name, type, clazz, ttl)
438 DNSRecord.__init__(self, name, type, clazz, ttl)
439 self.priority = priority
439 self.priority = priority
440 self.weight = weight
440 self.weight = weight
441 self.port = port
441 self.port = port
442 self.server = server
442 self.server = server
443
443
444 def write(self, out):
444 def write(self, out):
445 """Used in constructing an outgoing packet"""
445 """Used in constructing an outgoing packet"""
446 out.writeShort(self.priority)
446 out.writeShort(self.priority)
447 out.writeShort(self.weight)
447 out.writeShort(self.weight)
448 out.writeShort(self.port)
448 out.writeShort(self.port)
449 out.writeName(self.server)
449 out.writeName(self.server)
450
450
451 def __eq__(self, other):
451 def __eq__(self, other):
452 """Tests equality on priority, weight, port and server"""
452 """Tests equality on priority, weight, port and server"""
453 if isinstance(other, DNSService):
453 if isinstance(other, DNSService):
454 return self.priority == other.priority and self.weight == other.weight and self.port == other.port and self.server == other.server
454 return self.priority == other.priority and self.weight == other.weight and self.port == other.port and self.server == other.server
455 return 0
455 return 0
456
456
457 def __repr__(self):
457 def __repr__(self):
458 """String representation"""
458 """String representation"""
459 return self.toString("%s:%s" % (self.server, self.port))
459 return self.toString("%s:%s" % (self.server, self.port))
460
460
461 class DNSIncoming(object):
461 class DNSIncoming(object):
462 """Object representation of an incoming DNS packet"""
462 """Object representation of an incoming DNS packet"""
463
463
464 def __init__(self, data):
464 def __init__(self, data):
465 """Constructor from string holding bytes of packet"""
465 """Constructor from string holding bytes of packet"""
466 self.offset = 0
466 self.offset = 0
467 self.data = data
467 self.data = data
468 self.questions = []
468 self.questions = []
469 self.answers = []
469 self.answers = []
470 self.numQuestions = 0
470 self.numQuestions = 0
471 self.numAnswers = 0
471 self.numAnswers = 0
472 self.numAuthorities = 0
472 self.numAuthorities = 0
473 self.numAdditionals = 0
473 self.numAdditionals = 0
474
474
475 self.readHeader()
475 self.readHeader()
476 self.readQuestions()
476 self.readQuestions()
477 self.readOthers()
477 self.readOthers()
478
478
479 def readHeader(self):
479 def readHeader(self):
480 """Reads header portion of packet"""
480 """Reads header portion of packet"""
481 format = '!HHHHHH'
481 format = '!HHHHHH'
482 length = struct.calcsize(format)
482 length = struct.calcsize(format)
483 info = struct.unpack(format, self.data[self.offset:self.offset+length])
483 info = struct.unpack(format, self.data[self.offset:self.offset+length])
484 self.offset += length
484 self.offset += length
485
485
486 self.id = info[0]
486 self.id = info[0]
487 self.flags = info[1]
487 self.flags = info[1]
488 self.numQuestions = info[2]
488 self.numQuestions = info[2]
489 self.numAnswers = info[3]
489 self.numAnswers = info[3]
490 self.numAuthorities = info[4]
490 self.numAuthorities = info[4]
491 self.numAdditionals = info[5]
491 self.numAdditionals = info[5]
492
492
493 def readQuestions(self):
493 def readQuestions(self):
494 """Reads questions section of packet"""
494 """Reads questions section of packet"""
495 format = '!HH'
495 format = '!HH'
496 length = struct.calcsize(format)
496 length = struct.calcsize(format)
497 for i in range(0, self.numQuestions):
497 for i in range(0, self.numQuestions):
498 name = self.readName()
498 name = self.readName()
499 info = struct.unpack(format, self.data[self.offset:self.offset+length])
499 info = struct.unpack(format, self.data[self.offset:self.offset+length])
500 self.offset += length
500 self.offset += length
501
501
502 try:
502 try:
503 question = DNSQuestion(name, info[0], info[1])
503 question = DNSQuestion(name, info[0], info[1])
504 self.questions.append(question)
504 self.questions.append(question)
505 except NonLocalNameException:
505 except NonLocalNameException:
506 pass
506 pass
507
507
508 def readInt(self):
508 def readInt(self):
509 """Reads an integer from the packet"""
509 """Reads an integer from the packet"""
510 format = '!I'
510 format = '!I'
511 length = struct.calcsize(format)
511 length = struct.calcsize(format)
512 info = struct.unpack(format, self.data[self.offset:self.offset+length])
512 info = struct.unpack(format, self.data[self.offset:self.offset+length])
513 self.offset += length
513 self.offset += length
514 return info[0]
514 return info[0]
515
515
516 def readCharacterString(self):
516 def readCharacterString(self):
517 """Reads a character string from the packet"""
517 """Reads a character string from the packet"""
518 length = ord(self.data[self.offset])
518 length = ord(self.data[self.offset])
519 self.offset += 1
519 self.offset += 1
520 return self.readString(length)
520 return self.readString(length)
521
521
522 def readString(self, len):
522 def readString(self, len):
523 """Reads a string of a given length from the packet"""
523 """Reads a string of a given length from the packet"""
524 format = '!' + str(len) + 's'
524 format = '!' + str(len) + 's'
525 length = struct.calcsize(format)
525 length = struct.calcsize(format)
526 info = struct.unpack(format, self.data[self.offset:self.offset+length])
526 info = struct.unpack(format, self.data[self.offset:self.offset+length])
527 self.offset += length
527 self.offset += length
528 return info[0]
528 return info[0]
529
529
530 def readUnsignedShort(self):
530 def readUnsignedShort(self):
531 """Reads an unsigned short from the packet"""
531 """Reads an unsigned short from the packet"""
532 format = '!H'
532 format = '!H'
533 length = struct.calcsize(format)
533 length = struct.calcsize(format)
534 info = struct.unpack(format, self.data[self.offset:self.offset+length])
534 info = struct.unpack(format, self.data[self.offset:self.offset+length])
535 self.offset += length
535 self.offset += length
536 return info[0]
536 return info[0]
537
537
538 def readOthers(self):
538 def readOthers(self):
539 """Reads the answers, authorities and additionals section of the packet"""
539 """Reads the answers, authorities and additionals section of the packet"""
540 format = '!HHiH'
540 format = '!HHiH'
541 length = struct.calcsize(format)
541 length = struct.calcsize(format)
542 n = self.numAnswers + self.numAuthorities + self.numAdditionals
542 n = self.numAnswers + self.numAuthorities + self.numAdditionals
543 for i in range(0, n):
543 for i in range(0, n):
544 domain = self.readName()
544 domain = self.readName()
545 info = struct.unpack(format, self.data[self.offset:self.offset+length])
545 info = struct.unpack(format, self.data[self.offset:self.offset+length])
546 self.offset += length
546 self.offset += length
547
547
548 rec = None
548 rec = None
549 if info[0] == _TYPE_A:
549 if info[0] == _TYPE_A:
550 rec = DNSAddress(domain, info[0], info[1], info[2], self.readString(4))
550 rec = DNSAddress(domain, info[0], info[1], info[2], self.readString(4))
551 elif info[0] == _TYPE_CNAME or info[0] == _TYPE_PTR:
551 elif info[0] == _TYPE_CNAME or info[0] == _TYPE_PTR:
552 rec = DNSPointer(domain, info[0], info[1], info[2], self.readName())
552 rec = DNSPointer(domain, info[0], info[1], info[2], self.readName())
553 elif info[0] == _TYPE_TXT:
553 elif info[0] == _TYPE_TXT:
554 rec = DNSText(domain, info[0], info[1], info[2], self.readString(info[3]))
554 rec = DNSText(domain, info[0], info[1], info[2], self.readString(info[3]))
555 elif info[0] == _TYPE_SRV:
555 elif info[0] == _TYPE_SRV:
556 rec = DNSService(domain, info[0], info[1], info[2], self.readUnsignedShort(), self.readUnsignedShort(), self.readUnsignedShort(), self.readName())
556 rec = DNSService(domain, info[0], info[1], info[2], self.readUnsignedShort(), self.readUnsignedShort(), self.readUnsignedShort(), self.readName())
557 elif info[0] == _TYPE_HINFO:
557 elif info[0] == _TYPE_HINFO:
558 rec = DNSHinfo(domain, info[0], info[1], info[2], self.readCharacterString(), self.readCharacterString())
558 rec = DNSHinfo(domain, info[0], info[1], info[2], self.readCharacterString(), self.readCharacterString())
559 elif info[0] == _TYPE_AAAA:
559 elif info[0] == _TYPE_AAAA:
560 rec = DNSAddress(domain, info[0], info[1], info[2], self.readString(16))
560 rec = DNSAddress(domain, info[0], info[1], info[2], self.readString(16))
561 else:
561 else:
562 # Try to ignore types we don't know about
562 # Try to ignore types we don't know about
563 # this may mean the rest of the name is
563 # this may mean the rest of the name is
564 # unable to be parsed, and may show errors
564 # unable to be parsed, and may show errors
565 # so this is left for debugging. New types
565 # so this is left for debugging. New types
566 # encountered need to be parsed properly.
566 # encountered need to be parsed properly.
567 #
567 #
568 #print "UNKNOWN TYPE = " + str(info[0])
568 #print "UNKNOWN TYPE = " + str(info[0])
569 #raise BadTypeInNameException
569 #raise BadTypeInNameException
570 self.offset += info[3]
570 self.offset += info[3]
571
571
572 if rec is not None:
572 if rec is not None:
573 self.answers.append(rec)
573 self.answers.append(rec)
574
574
575 def isQuery(self):
575 def isQuery(self):
576 """Returns true if this is a query"""
576 """Returns true if this is a query"""
577 return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_QUERY
577 return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_QUERY
578
578
579 def isResponse(self):
579 def isResponse(self):
580 """Returns true if this is a response"""
580 """Returns true if this is a response"""
581 return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_RESPONSE
581 return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_RESPONSE
582
582
583 def readUTF(self, offset, len):
583 def readUTF(self, offset, len):
584 """Reads a UTF-8 string of a given length from the packet"""
584 """Reads a UTF-8 string of a given length from the packet"""
585 return self.data[offset:offset+len].decode('utf-8')
585 return self.data[offset:offset+len].decode('utf-8')
586
586
587 def readName(self):
587 def readName(self):
588 """Reads a domain name from the packet"""
588 """Reads a domain name from the packet"""
589 result = ''
589 result = ''
590 off = self.offset
590 off = self.offset
591 next = -1
591 next = -1
592 first = off
592 first = off
593
593
594 while 1:
594 while True:
595 len = ord(self.data[off])
595 len = ord(self.data[off])
596 off += 1
596 off += 1
597 if len == 0:
597 if len == 0:
598 break
598 break
599 t = len & 0xC0
599 t = len & 0xC0
600 if t == 0x00:
600 if t == 0x00:
601 result = ''.join((result, self.readUTF(off, len) + '.'))
601 result = ''.join((result, self.readUTF(off, len) + '.'))
602 off += len
602 off += len
603 elif t == 0xC0:
603 elif t == 0xC0:
604 if next < 0:
604 if next < 0:
605 next = off + 1
605 next = off + 1
606 off = ((len & 0x3F) << 8) | ord(self.data[off])
606 off = ((len & 0x3F) << 8) | ord(self.data[off])
607 if off >= first:
607 if off >= first:
608 raise BadDomainNameCircular(off)
608 raise BadDomainNameCircular(off)
609 first = off
609 first = off
610 else:
610 else:
611 raise BadDomainName(off)
611 raise BadDomainName(off)
612
612
613 if next >= 0:
613 if next >= 0:
614 self.offset = next
614 self.offset = next
615 else:
615 else:
616 self.offset = off
616 self.offset = off
617
617
618 return result
618 return result
619
619
620
620
621 class DNSOutgoing(object):
621 class DNSOutgoing(object):
622 """Object representation of an outgoing packet"""
622 """Object representation of an outgoing packet"""
623
623
624 def __init__(self, flags, multicast = 1):
624 def __init__(self, flags, multicast = 1):
625 self.finished = 0
625 self.finished = 0
626 self.id = 0
626 self.id = 0
627 self.multicast = multicast
627 self.multicast = multicast
628 self.flags = flags
628 self.flags = flags
629 self.names = {}
629 self.names = {}
630 self.data = []
630 self.data = []
631 self.size = 12
631 self.size = 12
632
632
633 self.questions = []
633 self.questions = []
634 self.answers = []
634 self.answers = []
635 self.authorities = []
635 self.authorities = []
636 self.additionals = []
636 self.additionals = []
637
637
638 def addQuestion(self, record):
638 def addQuestion(self, record):
639 """Adds a question"""
639 """Adds a question"""
640 self.questions.append(record)
640 self.questions.append(record)
641
641
642 def addAnswer(self, inp, record):
642 def addAnswer(self, inp, record):
643 """Adds an answer"""
643 """Adds an answer"""
644 if not record.suppressedBy(inp):
644 if not record.suppressedBy(inp):
645 self.addAnswerAtTime(record, 0)
645 self.addAnswerAtTime(record, 0)
646
646
647 def addAnswerAtTime(self, record, now):
647 def addAnswerAtTime(self, record, now):
648 """Adds an answer if if does not expire by a certain time"""
648 """Adds an answer if if does not expire by a certain time"""
649 if record is not None:
649 if record is not None:
650 if now == 0 or not record.isExpired(now):
650 if now == 0 or not record.isExpired(now):
651 self.answers.append((record, now))
651 self.answers.append((record, now))
652
652
653 def addAuthorativeAnswer(self, record):
653 def addAuthorativeAnswer(self, record):
654 """Adds an authoritative answer"""
654 """Adds an authoritative answer"""
655 self.authorities.append(record)
655 self.authorities.append(record)
656
656
657 def addAdditionalAnswer(self, record):
657 def addAdditionalAnswer(self, record):
658 """Adds an additional answer"""
658 """Adds an additional answer"""
659 self.additionals.append(record)
659 self.additionals.append(record)
660
660
661 def writeByte(self, value):
661 def writeByte(self, value):
662 """Writes a single byte to the packet"""
662 """Writes a single byte to the packet"""
663 format = '!c'
663 format = '!c'
664 self.data.append(struct.pack(format, chr(value)))
664 self.data.append(struct.pack(format, chr(value)))
665 self.size += 1
665 self.size += 1
666
666
667 def insertShort(self, index, value):
667 def insertShort(self, index, value):
668 """Inserts an unsigned short in a certain position in the packet"""
668 """Inserts an unsigned short in a certain position in the packet"""
669 format = '!H'
669 format = '!H'
670 self.data.insert(index, struct.pack(format, value))
670 self.data.insert(index, struct.pack(format, value))
671 self.size += 2
671 self.size += 2
672
672
673 def writeShort(self, value):
673 def writeShort(self, value):
674 """Writes an unsigned short to the packet"""
674 """Writes an unsigned short to the packet"""
675 format = '!H'
675 format = '!H'
676 self.data.append(struct.pack(format, value))
676 self.data.append(struct.pack(format, value))
677 self.size += 2
677 self.size += 2
678
678
679 def writeInt(self, value):
679 def writeInt(self, value):
680 """Writes an unsigned integer to the packet"""
680 """Writes an unsigned integer to the packet"""
681 format = '!I'
681 format = '!I'
682 self.data.append(struct.pack(format, int(value)))
682 self.data.append(struct.pack(format, int(value)))
683 self.size += 4
683 self.size += 4
684
684
685 def writeString(self, value, length):
685 def writeString(self, value, length):
686 """Writes a string to the packet"""
686 """Writes a string to the packet"""
687 format = '!' + str(length) + 's'
687 format = '!' + str(length) + 's'
688 self.data.append(struct.pack(format, value))
688 self.data.append(struct.pack(format, value))
689 self.size += length
689 self.size += length
690
690
691 def writeUTF(self, s):
691 def writeUTF(self, s):
692 """Writes a UTF-8 string of a given length to the packet"""
692 """Writes a UTF-8 string of a given length to the packet"""
693 utfstr = s.encode('utf-8')
693 utfstr = s.encode('utf-8')
694 length = len(utfstr)
694 length = len(utfstr)
695 if length > 64:
695 if length > 64:
696 raise NamePartTooLongException
696 raise NamePartTooLongException
697 self.writeByte(length)
697 self.writeByte(length)
698 self.writeString(utfstr, length)
698 self.writeString(utfstr, length)
699
699
700 def writeName(self, name):
700 def writeName(self, name):
701 """Writes a domain name to the packet"""
701 """Writes a domain name to the packet"""
702
702
703 try:
703 try:
704 # Find existing instance of this name in packet
704 # Find existing instance of this name in packet
705 #
705 #
706 index = self.names[name]
706 index = self.names[name]
707 except KeyError:
707 except KeyError:
708 # No record of this name already, so write it
708 # No record of this name already, so write it
709 # out as normal, recording the location of the name
709 # out as normal, recording the location of the name
710 # for future pointers to it.
710 # for future pointers to it.
711 #
711 #
712 self.names[name] = self.size
712 self.names[name] = self.size
713 parts = name.split('.')
713 parts = name.split('.')
714 if parts[-1] == '':
714 if parts[-1] == '':
715 parts = parts[:-1]
715 parts = parts[:-1]
716 for part in parts:
716 for part in parts:
717 self.writeUTF(part)
717 self.writeUTF(part)
718 self.writeByte(0)
718 self.writeByte(0)
719 return
719 return
720
720
721 # An index was found, so write a pointer to it
721 # An index was found, so write a pointer to it
722 #
722 #
723 self.writeByte((index >> 8) | 0xC0)
723 self.writeByte((index >> 8) | 0xC0)
724 self.writeByte(index)
724 self.writeByte(index)
725
725
726 def writeQuestion(self, question):
726 def writeQuestion(self, question):
727 """Writes a question to the packet"""
727 """Writes a question to the packet"""
728 self.writeName(question.name)
728 self.writeName(question.name)
729 self.writeShort(question.type)
729 self.writeShort(question.type)
730 self.writeShort(question.clazz)
730 self.writeShort(question.clazz)
731
731
732 def writeRecord(self, record, now):
732 def writeRecord(self, record, now):
733 """Writes a record (answer, authoritative answer, additional) to
733 """Writes a record (answer, authoritative answer, additional) to
734 the packet"""
734 the packet"""
735 self.writeName(record.name)
735 self.writeName(record.name)
736 self.writeShort(record.type)
736 self.writeShort(record.type)
737 if record.unique and self.multicast:
737 if record.unique and self.multicast:
738 self.writeShort(record.clazz | _CLASS_UNIQUE)
738 self.writeShort(record.clazz | _CLASS_UNIQUE)
739 else:
739 else:
740 self.writeShort(record.clazz)
740 self.writeShort(record.clazz)
741 if now == 0:
741 if now == 0:
742 self.writeInt(record.ttl)
742 self.writeInt(record.ttl)
743 else:
743 else:
744 self.writeInt(record.getRemainingTTL(now))
744 self.writeInt(record.getRemainingTTL(now))
745 index = len(self.data)
745 index = len(self.data)
746 # Adjust size for the short we will write before this record
746 # Adjust size for the short we will write before this record
747 #
747 #
748 self.size += 2
748 self.size += 2
749 record.write(self)
749 record.write(self)
750 self.size -= 2
750 self.size -= 2
751
751
752 length = len(''.join(self.data[index:]))
752 length = len(''.join(self.data[index:]))
753 self.insertShort(index, length) # Here is the short we adjusted for
753 self.insertShort(index, length) # Here is the short we adjusted for
754
754
755 def packet(self):
755 def packet(self):
756 """Returns a string containing the packet's bytes
756 """Returns a string containing the packet's bytes
757
757
758 No further parts should be added to the packet once this
758 No further parts should be added to the packet once this
759 is done."""
759 is done."""
760 if not self.finished:
760 if not self.finished:
761 self.finished = 1
761 self.finished = 1
762 for question in self.questions:
762 for question in self.questions:
763 self.writeQuestion(question)
763 self.writeQuestion(question)
764 for answer, time in self.answers:
764 for answer, time in self.answers:
765 self.writeRecord(answer, time)
765 self.writeRecord(answer, time)
766 for authority in self.authorities:
766 for authority in self.authorities:
767 self.writeRecord(authority, 0)
767 self.writeRecord(authority, 0)
768 for additional in self.additionals:
768 for additional in self.additionals:
769 self.writeRecord(additional, 0)
769 self.writeRecord(additional, 0)
770
770
771 self.insertShort(0, len(self.additionals))
771 self.insertShort(0, len(self.additionals))
772 self.insertShort(0, len(self.authorities))
772 self.insertShort(0, len(self.authorities))
773 self.insertShort(0, len(self.answers))
773 self.insertShort(0, len(self.answers))
774 self.insertShort(0, len(self.questions))
774 self.insertShort(0, len(self.questions))
775 self.insertShort(0, self.flags)
775 self.insertShort(0, self.flags)
776 if self.multicast:
776 if self.multicast:
777 self.insertShort(0, 0)
777 self.insertShort(0, 0)
778 else:
778 else:
779 self.insertShort(0, self.id)
779 self.insertShort(0, self.id)
780 return ''.join(self.data)
780 return ''.join(self.data)
781
781
782
782
783 class DNSCache(object):
783 class DNSCache(object):
784 """A cache of DNS entries"""
784 """A cache of DNS entries"""
785
785
786 def __init__(self):
786 def __init__(self):
787 self.cache = {}
787 self.cache = {}
788
788
789 def add(self, entry):
789 def add(self, entry):
790 """Adds an entry"""
790 """Adds an entry"""
791 try:
791 try:
792 list = self.cache[entry.key]
792 list = self.cache[entry.key]
793 except KeyError:
793 except KeyError:
794 list = self.cache[entry.key] = []
794 list = self.cache[entry.key] = []
795 list.append(entry)
795 list.append(entry)
796
796
797 def remove(self, entry):
797 def remove(self, entry):
798 """Removes an entry"""
798 """Removes an entry"""
799 try:
799 try:
800 list = self.cache[entry.key]
800 list = self.cache[entry.key]
801 list.remove(entry)
801 list.remove(entry)
802 except KeyError:
802 except KeyError:
803 pass
803 pass
804
804
805 def get(self, entry):
805 def get(self, entry):
806 """Gets an entry by key. Will return None if there is no
806 """Gets an entry by key. Will return None if there is no
807 matching entry."""
807 matching entry."""
808 try:
808 try:
809 list = self.cache[entry.key]
809 list = self.cache[entry.key]
810 return list[list.index(entry)]
810 return list[list.index(entry)]
811 except (KeyError, ValueError):
811 except (KeyError, ValueError):
812 return None
812 return None
813
813
814 def getByDetails(self, name, type, clazz):
814 def getByDetails(self, name, type, clazz):
815 """Gets an entry by details. Will return None if there is
815 """Gets an entry by details. Will return None if there is
816 no matching entry."""
816 no matching entry."""
817 entry = DNSEntry(name, type, clazz)
817 entry = DNSEntry(name, type, clazz)
818 return self.get(entry)
818 return self.get(entry)
819
819
820 def entriesWithName(self, name):
820 def entriesWithName(self, name):
821 """Returns a list of entries whose key matches the name."""
821 """Returns a list of entries whose key matches the name."""
822 try:
822 try:
823 return self.cache[name]
823 return self.cache[name]
824 except KeyError:
824 except KeyError:
825 return []
825 return []
826
826
827 def entries(self):
827 def entries(self):
828 """Returns a list of all entries"""
828 """Returns a list of all entries"""
829 def add(x, y): return x+y
829 def add(x, y): return x+y
830 try:
830 try:
831 return reduce(add, self.cache.values())
831 return reduce(add, self.cache.values())
832 except Exception:
832 except Exception:
833 return []
833 return []
834
834
835
835
836 class Engine(threading.Thread):
836 class Engine(threading.Thread):
837 """An engine wraps read access to sockets, allowing objects that
837 """An engine wraps read access to sockets, allowing objects that
838 need to receive data from sockets to be called back when the
838 need to receive data from sockets to be called back when the
839 sockets are ready.
839 sockets are ready.
840
840
841 A reader needs a handle_read() method, which is called when the socket
841 A reader needs a handle_read() method, which is called when the socket
842 it is interested in is ready for reading.
842 it is interested in is ready for reading.
843
843
844 Writers are not implemented here, because we only send short
844 Writers are not implemented here, because we only send short
845 packets.
845 packets.
846 """
846 """
847
847
848 def __init__(self, zeroconf):
848 def __init__(self, zeroconf):
849 threading.Thread.__init__(self)
849 threading.Thread.__init__(self)
850 self.zeroconf = zeroconf
850 self.zeroconf = zeroconf
851 self.readers = {} # maps socket to reader
851 self.readers = {} # maps socket to reader
852 self.timeout = 5
852 self.timeout = 5
853 self.condition = threading.Condition()
853 self.condition = threading.Condition()
854 self.start()
854 self.start()
855
855
856 def run(self):
856 def run(self):
857 while not globals()['_GLOBAL_DONE']:
857 while not globals()['_GLOBAL_DONE']:
858 rs = self.getReaders()
858 rs = self.getReaders()
859 if len(rs) == 0:
859 if len(rs) == 0:
860 # No sockets to manage, but we wait for the timeout
860 # No sockets to manage, but we wait for the timeout
861 # or addition of a socket
861 # or addition of a socket
862 #
862 #
863 self.condition.acquire()
863 self.condition.acquire()
864 self.condition.wait(self.timeout)
864 self.condition.wait(self.timeout)
865 self.condition.release()
865 self.condition.release()
866 else:
866 else:
867 try:
867 try:
868 rr, wr, er = select.select(rs, [], [], self.timeout)
868 rr, wr, er = select.select(rs, [], [], self.timeout)
869 for socket in rr:
869 for socket in rr:
870 try:
870 try:
871 self.readers[socket].handle_read()
871 self.readers[socket].handle_read()
872 except Exception:
872 except Exception:
873 if not globals()['_GLOBAL_DONE']:
873 if not globals()['_GLOBAL_DONE']:
874 traceback.print_exc()
874 traceback.print_exc()
875 except Exception:
875 except Exception:
876 pass
876 pass
877
877
878 def getReaders(self):
878 def getReaders(self):
879 self.condition.acquire()
879 self.condition.acquire()
880 result = self.readers.keys()
880 result = self.readers.keys()
881 self.condition.release()
881 self.condition.release()
882 return result
882 return result
883
883
884 def addReader(self, reader, socket):
884 def addReader(self, reader, socket):
885 self.condition.acquire()
885 self.condition.acquire()
886 self.readers[socket] = reader
886 self.readers[socket] = reader
887 self.condition.notify()
887 self.condition.notify()
888 self.condition.release()
888 self.condition.release()
889
889
890 def delReader(self, socket):
890 def delReader(self, socket):
891 self.condition.acquire()
891 self.condition.acquire()
892 del(self.readers[socket])
892 del(self.readers[socket])
893 self.condition.notify()
893 self.condition.notify()
894 self.condition.release()
894 self.condition.release()
895
895
896 def notify(self):
896 def notify(self):
897 self.condition.acquire()
897 self.condition.acquire()
898 self.condition.notify()
898 self.condition.notify()
899 self.condition.release()
899 self.condition.release()
900
900
901 class Listener(object):
901 class Listener(object):
902 """A Listener is used by this module to listen on the multicast
902 """A Listener is used by this module to listen on the multicast
903 group to which DNS messages are sent, allowing the implementation
903 group to which DNS messages are sent, allowing the implementation
904 to cache information as it arrives.
904 to cache information as it arrives.
905
905
906 It requires registration with an Engine object in order to have
906 It requires registration with an Engine object in order to have
907 the read() method called when a socket is availble for reading."""
907 the read() method called when a socket is availble for reading."""
908
908
909 def __init__(self, zeroconf):
909 def __init__(self, zeroconf):
910 self.zeroconf = zeroconf
910 self.zeroconf = zeroconf
911 self.zeroconf.engine.addReader(self, self.zeroconf.socket)
911 self.zeroconf.engine.addReader(self, self.zeroconf.socket)
912
912
913 def handle_read(self):
913 def handle_read(self):
914 data, (addr, port) = self.zeroconf.socket.recvfrom(_MAX_MSG_ABSOLUTE)
914 data, (addr, port) = self.zeroconf.socket.recvfrom(_MAX_MSG_ABSOLUTE)
915 self.data = data
915 self.data = data
916 msg = DNSIncoming(data)
916 msg = DNSIncoming(data)
917 if msg.isQuery():
917 if msg.isQuery():
918 # Always multicast responses
918 # Always multicast responses
919 #
919 #
920 if port == _MDNS_PORT:
920 if port == _MDNS_PORT:
921 self.zeroconf.handleQuery(msg, _MDNS_ADDR, _MDNS_PORT)
921 self.zeroconf.handleQuery(msg, _MDNS_ADDR, _MDNS_PORT)
922 # If it's not a multicast query, reply via unicast
922 # If it's not a multicast query, reply via unicast
923 # and multicast
923 # and multicast
924 #
924 #
925 elif port == _DNS_PORT:
925 elif port == _DNS_PORT:
926 self.zeroconf.handleQuery(msg, addr, port)
926 self.zeroconf.handleQuery(msg, addr, port)
927 self.zeroconf.handleQuery(msg, _MDNS_ADDR, _MDNS_PORT)
927 self.zeroconf.handleQuery(msg, _MDNS_ADDR, _MDNS_PORT)
928 else:
928 else:
929 self.zeroconf.handleResponse(msg)
929 self.zeroconf.handleResponse(msg)
930
930
931
931
932 class Reaper(threading.Thread):
932 class Reaper(threading.Thread):
933 """A Reaper is used by this module to remove cache entries that
933 """A Reaper is used by this module to remove cache entries that
934 have expired."""
934 have expired."""
935
935
936 def __init__(self, zeroconf):
936 def __init__(self, zeroconf):
937 threading.Thread.__init__(self)
937 threading.Thread.__init__(self)
938 self.zeroconf = zeroconf
938 self.zeroconf = zeroconf
939 self.start()
939 self.start()
940
940
941 def run(self):
941 def run(self):
942 while 1:
942 while True:
943 self.zeroconf.wait(10 * 1000)
943 self.zeroconf.wait(10 * 1000)
944 if globals()['_GLOBAL_DONE']:
944 if globals()['_GLOBAL_DONE']:
945 return
945 return
946 now = currentTimeMillis()
946 now = currentTimeMillis()
947 for record in self.zeroconf.cache.entries():
947 for record in self.zeroconf.cache.entries():
948 if record.isExpired(now):
948 if record.isExpired(now):
949 self.zeroconf.updateRecord(now, record)
949 self.zeroconf.updateRecord(now, record)
950 self.zeroconf.cache.remove(record)
950 self.zeroconf.cache.remove(record)
951
951
952
952
953 class ServiceBrowser(threading.Thread):
953 class ServiceBrowser(threading.Thread):
954 """Used to browse for a service of a specific type.
954 """Used to browse for a service of a specific type.
955
955
956 The listener object will have its addService() and
956 The listener object will have its addService() and
957 removeService() methods called when this browser
957 removeService() methods called when this browser
958 discovers changes in the services availability."""
958 discovers changes in the services availability."""
959
959
960 def __init__(self, zeroconf, type, listener):
960 def __init__(self, zeroconf, type, listener):
961 """Creates a browser for a specific type"""
961 """Creates a browser for a specific type"""
962 threading.Thread.__init__(self)
962 threading.Thread.__init__(self)
963 self.zeroconf = zeroconf
963 self.zeroconf = zeroconf
964 self.type = type
964 self.type = type
965 self.listener = listener
965 self.listener = listener
966 self.services = {}
966 self.services = {}
967 self.nextTime = currentTimeMillis()
967 self.nextTime = currentTimeMillis()
968 self.delay = _BROWSER_TIME
968 self.delay = _BROWSER_TIME
969 self.list = []
969 self.list = []
970
970
971 self.done = 0
971 self.done = 0
972
972
973 self.zeroconf.addListener(self, DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN))
973 self.zeroconf.addListener(self, DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN))
974 self.start()
974 self.start()
975
975
976 def updateRecord(self, zeroconf, now, record):
976 def updateRecord(self, zeroconf, now, record):
977 """Callback invoked by Zeroconf when new information arrives.
977 """Callback invoked by Zeroconf when new information arrives.
978
978
979 Updates information required by browser in the Zeroconf cache."""
979 Updates information required by browser in the Zeroconf cache."""
980 if record.type == _TYPE_PTR and record.name == self.type:
980 if record.type == _TYPE_PTR and record.name == self.type:
981 expired = record.isExpired(now)
981 expired = record.isExpired(now)
982 try:
982 try:
983 oldrecord = self.services[record.alias.lower()]
983 oldrecord = self.services[record.alias.lower()]
984 if not expired:
984 if not expired:
985 oldrecord.resetTTL(record)
985 oldrecord.resetTTL(record)
986 else:
986 else:
987 del(self.services[record.alias.lower()])
987 del(self.services[record.alias.lower()])
988 callback = lambda x: self.listener.removeService(x, self.type, record.alias)
988 callback = lambda x: self.listener.removeService(x, self.type, record.alias)
989 self.list.append(callback)
989 self.list.append(callback)
990 return
990 return
991 except Exception:
991 except Exception:
992 if not expired:
992 if not expired:
993 self.services[record.alias.lower()] = record
993 self.services[record.alias.lower()] = record
994 callback = lambda x: self.listener.addService(x, self.type, record.alias)
994 callback = lambda x: self.listener.addService(x, self.type, record.alias)
995 self.list.append(callback)
995 self.list.append(callback)
996
996
997 expires = record.getExpirationTime(75)
997 expires = record.getExpirationTime(75)
998 if expires < self.nextTime:
998 if expires < self.nextTime:
999 self.nextTime = expires
999 self.nextTime = expires
1000
1000
1001 def cancel(self):
1001 def cancel(self):
1002 self.done = 1
1002 self.done = 1
1003 self.zeroconf.notifyAll()
1003 self.zeroconf.notifyAll()
1004
1004
1005 def run(self):
1005 def run(self):
1006 while 1:
1006 while True:
1007 event = None
1007 event = None
1008 now = currentTimeMillis()
1008 now = currentTimeMillis()
1009 if len(self.list) == 0 and self.nextTime > now:
1009 if len(self.list) == 0 and self.nextTime > now:
1010 self.zeroconf.wait(self.nextTime - now)
1010 self.zeroconf.wait(self.nextTime - now)
1011 if globals()['_GLOBAL_DONE'] or self.done:
1011 if globals()['_GLOBAL_DONE'] or self.done:
1012 return
1012 return
1013 now = currentTimeMillis()
1013 now = currentTimeMillis()
1014
1014
1015 if self.nextTime <= now:
1015 if self.nextTime <= now:
1016 out = DNSOutgoing(_FLAGS_QR_QUERY)
1016 out = DNSOutgoing(_FLAGS_QR_QUERY)
1017 out.addQuestion(DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN))
1017 out.addQuestion(DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN))
1018 for record in self.services.values():
1018 for record in self.services.values():
1019 if not record.isExpired(now):
1019 if not record.isExpired(now):
1020 out.addAnswerAtTime(record, now)
1020 out.addAnswerAtTime(record, now)
1021 self.zeroconf.send(out)
1021 self.zeroconf.send(out)
1022 self.nextTime = now + self.delay
1022 self.nextTime = now + self.delay
1023 self.delay = min(20 * 1000, self.delay * 2)
1023 self.delay = min(20 * 1000, self.delay * 2)
1024
1024
1025 if len(self.list) > 0:
1025 if len(self.list) > 0:
1026 event = self.list.pop(0)
1026 event = self.list.pop(0)
1027
1027
1028 if event is not None:
1028 if event is not None:
1029 event(self.zeroconf)
1029 event(self.zeroconf)
1030
1030
1031
1031
1032 class ServiceInfo(object):
1032 class ServiceInfo(object):
1033 """Service information"""
1033 """Service information"""
1034
1034
1035 def __init__(self, type, name, address=None, port=None, weight=0, priority=0, properties=None, server=None):
1035 def __init__(self, type, name, address=None, port=None, weight=0, priority=0, properties=None, server=None):
1036 """Create a service description.
1036 """Create a service description.
1037
1037
1038 type: fully qualified service type name
1038 type: fully qualified service type name
1039 name: fully qualified service name
1039 name: fully qualified service name
1040 address: IP address as unsigned short, network byte order
1040 address: IP address as unsigned short, network byte order
1041 port: port that the service runs on
1041 port: port that the service runs on
1042 weight: weight of the service
1042 weight: weight of the service
1043 priority: priority of the service
1043 priority: priority of the service
1044 properties: dictionary of properties (or a string holding the bytes for the text field)
1044 properties: dictionary of properties (or a string holding the bytes for the text field)
1045 server: fully qualified name for service host (defaults to name)"""
1045 server: fully qualified name for service host (defaults to name)"""
1046
1046
1047 if not name.endswith(type):
1047 if not name.endswith(type):
1048 raise BadTypeInNameException
1048 raise BadTypeInNameException
1049 self.type = type
1049 self.type = type
1050 self.name = name
1050 self.name = name
1051 self.address = address
1051 self.address = address
1052 self.port = port
1052 self.port = port
1053 self.weight = weight
1053 self.weight = weight
1054 self.priority = priority
1054 self.priority = priority
1055 if server:
1055 if server:
1056 self.server = server
1056 self.server = server
1057 else:
1057 else:
1058 self.server = name
1058 self.server = name
1059 self.setProperties(properties)
1059 self.setProperties(properties)
1060
1060
1061 def setProperties(self, properties):
1061 def setProperties(self, properties):
1062 """Sets properties and text of this info from a dictionary"""
1062 """Sets properties and text of this info from a dictionary"""
1063 if isinstance(properties, dict):
1063 if isinstance(properties, dict):
1064 self.properties = properties
1064 self.properties = properties
1065 list = []
1065 list = []
1066 result = ''
1066 result = ''
1067 for key in properties:
1067 for key in properties:
1068 value = properties[key]
1068 value = properties[key]
1069 if value is None:
1069 if value is None:
1070 suffix = ''
1070 suffix = ''
1071 elif isinstance(value, str):
1071 elif isinstance(value, str):
1072 suffix = value
1072 suffix = value
1073 elif isinstance(value, int):
1073 elif isinstance(value, int):
1074 if value:
1074 if value:
1075 suffix = 'true'
1075 suffix = 'true'
1076 else:
1076 else:
1077 suffix = 'false'
1077 suffix = 'false'
1078 else:
1078 else:
1079 suffix = ''
1079 suffix = ''
1080 list.append('='.join((key, suffix)))
1080 list.append('='.join((key, suffix)))
1081 for item in list:
1081 for item in list:
1082 result = ''.join((result, struct.pack('!c', chr(len(item))), item))
1082 result = ''.join((result, struct.pack('!c', chr(len(item))), item))
1083 self.text = result
1083 self.text = result
1084 else:
1084 else:
1085 self.text = properties
1085 self.text = properties
1086
1086
1087 def setText(self, text):
1087 def setText(self, text):
1088 """Sets properties and text given a text field"""
1088 """Sets properties and text given a text field"""
1089 self.text = text
1089 self.text = text
1090 try:
1090 try:
1091 result = {}
1091 result = {}
1092 end = len(text)
1092 end = len(text)
1093 index = 0
1093 index = 0
1094 strs = []
1094 strs = []
1095 while index < end:
1095 while index < end:
1096 length = ord(text[index])
1096 length = ord(text[index])
1097 index += 1
1097 index += 1
1098 strs.append(text[index:index+length])
1098 strs.append(text[index:index+length])
1099 index += length
1099 index += length
1100
1100
1101 for s in strs:
1101 for s in strs:
1102 eindex = s.find('=')
1102 eindex = s.find('=')
1103 if eindex == -1:
1103 if eindex == -1:
1104 # No equals sign at all
1104 # No equals sign at all
1105 key = s
1105 key = s
1106 value = 0
1106 value = 0
1107 else:
1107 else:
1108 key = s[:eindex]
1108 key = s[:eindex]
1109 value = s[eindex+1:]
1109 value = s[eindex+1:]
1110 if value == 'true':
1110 if value == 'true':
1111 value = 1
1111 value = 1
1112 elif value == 'false' or not value:
1112 elif value == 'false' or not value:
1113 value = 0
1113 value = 0
1114
1114
1115 # Only update non-existent properties
1115 # Only update non-existent properties
1116 if key and result.get(key) == None:
1116 if key and result.get(key) == None:
1117 result[key] = value
1117 result[key] = value
1118
1118
1119 self.properties = result
1119 self.properties = result
1120 except Exception:
1120 except Exception:
1121 traceback.print_exc()
1121 traceback.print_exc()
1122 self.properties = None
1122 self.properties = None
1123
1123
1124 def getType(self):
1124 def getType(self):
1125 """Type accessor"""
1125 """Type accessor"""
1126 return self.type
1126 return self.type
1127
1127
1128 def getName(self):
1128 def getName(self):
1129 """Name accessor"""
1129 """Name accessor"""
1130 if self.type is not None and self.name.endswith("." + self.type):
1130 if self.type is not None and self.name.endswith("." + self.type):
1131 return self.name[:len(self.name) - len(self.type) - 1]
1131 return self.name[:len(self.name) - len(self.type) - 1]
1132 return self.name
1132 return self.name
1133
1133
1134 def getAddress(self):
1134 def getAddress(self):
1135 """Address accessor"""
1135 """Address accessor"""
1136 return self.address
1136 return self.address
1137
1137
1138 def getPort(self):
1138 def getPort(self):
1139 """Port accessor"""
1139 """Port accessor"""
1140 return self.port
1140 return self.port
1141
1141
1142 def getPriority(self):
1142 def getPriority(self):
1143 """Pirority accessor"""
1143 """Pirority accessor"""
1144 return self.priority
1144 return self.priority
1145
1145
1146 def getWeight(self):
1146 def getWeight(self):
1147 """Weight accessor"""
1147 """Weight accessor"""
1148 return self.weight
1148 return self.weight
1149
1149
1150 def getProperties(self):
1150 def getProperties(self):
1151 """Properties accessor"""
1151 """Properties accessor"""
1152 return self.properties
1152 return self.properties
1153
1153
1154 def getText(self):
1154 def getText(self):
1155 """Text accessor"""
1155 """Text accessor"""
1156 return self.text
1156 return self.text
1157
1157
1158 def getServer(self):
1158 def getServer(self):
1159 """Server accessor"""
1159 """Server accessor"""
1160 return self.server
1160 return self.server
1161
1161
1162 def updateRecord(self, zeroconf, now, record):
1162 def updateRecord(self, zeroconf, now, record):
1163 """Updates service information from a DNS record"""
1163 """Updates service information from a DNS record"""
1164 if record is not None and not record.isExpired(now):
1164 if record is not None and not record.isExpired(now):
1165 if record.type == _TYPE_A:
1165 if record.type == _TYPE_A:
1166 #if record.name == self.name:
1166 #if record.name == self.name:
1167 if record.name == self.server:
1167 if record.name == self.server:
1168 self.address = record.address
1168 self.address = record.address
1169 elif record.type == _TYPE_SRV:
1169 elif record.type == _TYPE_SRV:
1170 if record.name == self.name:
1170 if record.name == self.name:
1171 self.server = record.server
1171 self.server = record.server
1172 self.port = record.port
1172 self.port = record.port
1173 self.weight = record.weight
1173 self.weight = record.weight
1174 self.priority = record.priority
1174 self.priority = record.priority
1175 #self.address = None
1175 #self.address = None
1176 self.updateRecord(zeroconf, now, zeroconf.cache.getByDetails(self.server, _TYPE_A, _CLASS_IN))
1176 self.updateRecord(zeroconf, now, zeroconf.cache.getByDetails(self.server, _TYPE_A, _CLASS_IN))
1177 elif record.type == _TYPE_TXT:
1177 elif record.type == _TYPE_TXT:
1178 if record.name == self.name:
1178 if record.name == self.name:
1179 self.setText(record.text)
1179 self.setText(record.text)
1180
1180
1181 def request(self, zeroconf, timeout):
1181 def request(self, zeroconf, timeout):
1182 """Returns true if the service could be discovered on the
1182 """Returns true if the service could be discovered on the
1183 network, and updates this object with details discovered.
1183 network, and updates this object with details discovered.
1184 """
1184 """
1185 now = currentTimeMillis()
1185 now = currentTimeMillis()
1186 delay = _LISTENER_TIME
1186 delay = _LISTENER_TIME
1187 next = now + delay
1187 next = now + delay
1188 last = now + timeout
1188 last = now + timeout
1189 result = 0
1189 result = 0
1190 try:
1190 try:
1191 zeroconf.addListener(self, DNSQuestion(self.name, _TYPE_ANY, _CLASS_IN))
1191 zeroconf.addListener(self, DNSQuestion(self.name, _TYPE_ANY, _CLASS_IN))
1192 while self.server is None or self.address is None or self.text is None:
1192 while self.server is None or self.address is None or self.text is None:
1193 if last <= now:
1193 if last <= now:
1194 return 0
1194 return 0
1195 if next <= now:
1195 if next <= now:
1196 out = DNSOutgoing(_FLAGS_QR_QUERY)
1196 out = DNSOutgoing(_FLAGS_QR_QUERY)
1197 out.addQuestion(DNSQuestion(self.name, _TYPE_SRV, _CLASS_IN))
1197 out.addQuestion(DNSQuestion(self.name, _TYPE_SRV, _CLASS_IN))
1198 out.addAnswerAtTime(zeroconf.cache.getByDetails(self.name, _TYPE_SRV, _CLASS_IN), now)
1198 out.addAnswerAtTime(zeroconf.cache.getByDetails(self.name, _TYPE_SRV, _CLASS_IN), now)
1199 out.addQuestion(DNSQuestion(self.name, _TYPE_TXT, _CLASS_IN))
1199 out.addQuestion(DNSQuestion(self.name, _TYPE_TXT, _CLASS_IN))
1200 out.addAnswerAtTime(zeroconf.cache.getByDetails(self.name, _TYPE_TXT, _CLASS_IN), now)
1200 out.addAnswerAtTime(zeroconf.cache.getByDetails(self.name, _TYPE_TXT, _CLASS_IN), now)
1201 if self.server is not None:
1201 if self.server is not None:
1202 out.addQuestion(DNSQuestion(self.server, _TYPE_A, _CLASS_IN))
1202 out.addQuestion(DNSQuestion(self.server, _TYPE_A, _CLASS_IN))
1203 out.addAnswerAtTime(zeroconf.cache.getByDetails(self.server, _TYPE_A, _CLASS_IN), now)
1203 out.addAnswerAtTime(zeroconf.cache.getByDetails(self.server, _TYPE_A, _CLASS_IN), now)
1204 zeroconf.send(out)
1204 zeroconf.send(out)
1205 next = now + delay
1205 next = now + delay
1206 delay = delay * 2
1206 delay = delay * 2
1207
1207
1208 zeroconf.wait(min(next, last) - now)
1208 zeroconf.wait(min(next, last) - now)
1209 now = currentTimeMillis()
1209 now = currentTimeMillis()
1210 result = 1
1210 result = 1
1211 finally:
1211 finally:
1212 zeroconf.removeListener(self)
1212 zeroconf.removeListener(self)
1213
1213
1214 return result
1214 return result
1215
1215
1216 def __eq__(self, other):
1216 def __eq__(self, other):
1217 """Tests equality of service name"""
1217 """Tests equality of service name"""
1218 if isinstance(other, ServiceInfo):
1218 if isinstance(other, ServiceInfo):
1219 return other.name == self.name
1219 return other.name == self.name
1220 return 0
1220 return 0
1221
1221
1222 def __ne__(self, other):
1222 def __ne__(self, other):
1223 """Non-equality test"""
1223 """Non-equality test"""
1224 return not self.__eq__(other)
1224 return not self.__eq__(other)
1225
1225
1226 def __repr__(self):
1226 def __repr__(self):
1227 """String representation"""
1227 """String representation"""
1228 result = "service[%s,%s:%s," % (self.name, socket.inet_ntoa(self.getAddress()), self.port)
1228 result = "service[%s,%s:%s," % (self.name, socket.inet_ntoa(self.getAddress()), self.port)
1229 if self.text is None:
1229 if self.text is None:
1230 result += "None"
1230 result += "None"
1231 else:
1231 else:
1232 if len(self.text) < 20:
1232 if len(self.text) < 20:
1233 result += self.text
1233 result += self.text
1234 else:
1234 else:
1235 result += self.text[:17] + "..."
1235 result += self.text[:17] + "..."
1236 result += "]"
1236 result += "]"
1237 return result
1237 return result
1238
1238
1239
1239
1240 class Zeroconf(object):
1240 class Zeroconf(object):
1241 """Implementation of Zeroconf Multicast DNS Service Discovery
1241 """Implementation of Zeroconf Multicast DNS Service Discovery
1242
1242
1243 Supports registration, unregistration, queries and browsing.
1243 Supports registration, unregistration, queries and browsing.
1244 """
1244 """
1245 def __init__(self, bindaddress=None):
1245 def __init__(self, bindaddress=None):
1246 """Creates an instance of the Zeroconf class, establishing
1246 """Creates an instance of the Zeroconf class, establishing
1247 multicast communications, listening and reaping threads."""
1247 multicast communications, listening and reaping threads."""
1248 globals()['_GLOBAL_DONE'] = 0
1248 globals()['_GLOBAL_DONE'] = 0
1249 if bindaddress is None:
1249 if bindaddress is None:
1250 self.intf = socket.gethostbyname(socket.gethostname())
1250 self.intf = socket.gethostbyname(socket.gethostname())
1251 else:
1251 else:
1252 self.intf = bindaddress
1252 self.intf = bindaddress
1253 self.group = ('', _MDNS_PORT)
1253 self.group = ('', _MDNS_PORT)
1254 self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
1254 self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
1255 try:
1255 try:
1256 self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
1256 self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
1257 self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
1257 self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
1258 except Exception:
1258 except Exception:
1259 # SO_REUSEADDR should be equivalent to SO_REUSEPORT for
1259 # SO_REUSEADDR should be equivalent to SO_REUSEPORT for
1260 # multicast UDP sockets (p 731, "TCP/IP Illustrated,
1260 # multicast UDP sockets (p 731, "TCP/IP Illustrated,
1261 # Volume 2"), but some BSD-derived systems require
1261 # Volume 2"), but some BSD-derived systems require
1262 # SO_REUSEPORT to be specified explicity. Also, not all
1262 # SO_REUSEPORT to be specified explicity. Also, not all
1263 # versions of Python have SO_REUSEPORT available. So
1263 # versions of Python have SO_REUSEPORT available. So
1264 # if you're on a BSD-based system, and haven't upgraded
1264 # if you're on a BSD-based system, and haven't upgraded
1265 # to Python 2.3 yet, you may find this library doesn't
1265 # to Python 2.3 yet, you may find this library doesn't
1266 # work as expected.
1266 # work as expected.
1267 #
1267 #
1268 pass
1268 pass
1269 self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, 255)
1269 self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, 255)
1270 self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, 1)
1270 self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, 1)
1271 try:
1271 try:
1272 self.socket.bind(self.group)
1272 self.socket.bind(self.group)
1273 except Exception:
1273 except Exception:
1274 # Some versions of linux raise an exception even though
1274 # Some versions of linux raise an exception even though
1275 # the SO_REUSE* options have been set, so ignore it
1275 # the SO_REUSE* options have been set, so ignore it
1276 #
1276 #
1277 pass
1277 pass
1278 #self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_IF, socket.inet_aton(self.intf) + socket.inet_aton('0.0.0.0'))
1278 #self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_IF, socket.inet_aton(self.intf) + socket.inet_aton('0.0.0.0'))
1279 self.socket.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'))
1279 self.socket.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'))
1280
1280
1281 self.listeners = []
1281 self.listeners = []
1282 self.browsers = []
1282 self.browsers = []
1283 self.services = {}
1283 self.services = {}
1284 self.servicetypes = {}
1284 self.servicetypes = {}
1285
1285
1286 self.cache = DNSCache()
1286 self.cache = DNSCache()
1287
1287
1288 self.condition = threading.Condition()
1288 self.condition = threading.Condition()
1289
1289
1290 self.engine = Engine(self)
1290 self.engine = Engine(self)
1291 self.listener = Listener(self)
1291 self.listener = Listener(self)
1292 self.reaper = Reaper(self)
1292 self.reaper = Reaper(self)
1293
1293
1294 def isLoopback(self):
1294 def isLoopback(self):
1295 return self.intf.startswith("127.0.0.1")
1295 return self.intf.startswith("127.0.0.1")
1296
1296
1297 def isLinklocal(self):
1297 def isLinklocal(self):
1298 return self.intf.startswith("169.254.")
1298 return self.intf.startswith("169.254.")
1299
1299
1300 def wait(self, timeout):
1300 def wait(self, timeout):
1301 """Calling thread waits for a given number of milliseconds or
1301 """Calling thread waits for a given number of milliseconds or
1302 until notified."""
1302 until notified."""
1303 self.condition.acquire()
1303 self.condition.acquire()
1304 self.condition.wait(timeout/1000)
1304 self.condition.wait(timeout/1000)
1305 self.condition.release()
1305 self.condition.release()
1306
1306
1307 def notifyAll(self):
1307 def notifyAll(self):
1308 """Notifies all waiting threads"""
1308 """Notifies all waiting threads"""
1309 self.condition.acquire()
1309 self.condition.acquire()
1310 self.condition.notifyAll()
1310 self.condition.notifyAll()
1311 self.condition.release()
1311 self.condition.release()
1312
1312
1313 def getServiceInfo(self, type, name, timeout=3000):
1313 def getServiceInfo(self, type, name, timeout=3000):
1314 """Returns network's service information for a particular
1314 """Returns network's service information for a particular
1315 name and type, or None if no service matches by the timeout,
1315 name and type, or None if no service matches by the timeout,
1316 which defaults to 3 seconds."""
1316 which defaults to 3 seconds."""
1317 info = ServiceInfo(type, name)
1317 info = ServiceInfo(type, name)
1318 if info.request(self, timeout):
1318 if info.request(self, timeout):
1319 return info
1319 return info
1320 return None
1320 return None
1321
1321
1322 def addServiceListener(self, type, listener):
1322 def addServiceListener(self, type, listener):
1323 """Adds a listener for a particular service type. This object
1323 """Adds a listener for a particular service type. This object
1324 will then have its updateRecord method called when information
1324 will then have its updateRecord method called when information
1325 arrives for that type."""
1325 arrives for that type."""
1326 self.removeServiceListener(listener)
1326 self.removeServiceListener(listener)
1327 self.browsers.append(ServiceBrowser(self, type, listener))
1327 self.browsers.append(ServiceBrowser(self, type, listener))
1328
1328
1329 def removeServiceListener(self, listener):
1329 def removeServiceListener(self, listener):
1330 """Removes a listener from the set that is currently listening."""
1330 """Removes a listener from the set that is currently listening."""
1331 for browser in self.browsers:
1331 for browser in self.browsers:
1332 if browser.listener == listener:
1332 if browser.listener == listener:
1333 browser.cancel()
1333 browser.cancel()
1334 del(browser)
1334 del(browser)
1335
1335
1336 def registerService(self, info, ttl=_DNS_TTL):
1336 def registerService(self, info, ttl=_DNS_TTL):
1337 """Registers service information to the network with a default TTL
1337 """Registers service information to the network with a default TTL
1338 of 60 seconds. Zeroconf will then respond to requests for
1338 of 60 seconds. Zeroconf will then respond to requests for
1339 information for that service. The name of the service may be
1339 information for that service. The name of the service may be
1340 changed if needed to make it unique on the network."""
1340 changed if needed to make it unique on the network."""
1341 self.checkService(info)
1341 self.checkService(info)
1342 self.services[info.name.lower()] = info
1342 self.services[info.name.lower()] = info
1343 if self.servicetypes.has_key(info.type):
1343 if self.servicetypes.has_key(info.type):
1344 self.servicetypes[info.type]+=1
1344 self.servicetypes[info.type]+=1
1345 else:
1345 else:
1346 self.servicetypes[info.type]=1
1346 self.servicetypes[info.type]=1
1347 now = currentTimeMillis()
1347 now = currentTimeMillis()
1348 nextTime = now
1348 nextTime = now
1349 i = 0
1349 i = 0
1350 while i < 3:
1350 while i < 3:
1351 if now < nextTime:
1351 if now < nextTime:
1352 self.wait(nextTime - now)
1352 self.wait(nextTime - now)
1353 now = currentTimeMillis()
1353 now = currentTimeMillis()
1354 continue
1354 continue
1355 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1355 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1356 out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, ttl, info.name), 0)
1356 out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, ttl, info.name), 0)
1357 out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, ttl, info.priority, info.weight, info.port, info.server), 0)
1357 out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, ttl, info.priority, info.weight, info.port, info.server), 0)
1358 out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, ttl, info.text), 0)
1358 out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, ttl, info.text), 0)
1359 if info.address:
1359 if info.address:
1360 out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, _CLASS_IN, ttl, info.address), 0)
1360 out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, _CLASS_IN, ttl, info.address), 0)
1361 self.send(out)
1361 self.send(out)
1362 i += 1
1362 i += 1
1363 nextTime += _REGISTER_TIME
1363 nextTime += _REGISTER_TIME
1364
1364
1365 def unregisterService(self, info):
1365 def unregisterService(self, info):
1366 """Unregister a service."""
1366 """Unregister a service."""
1367 try:
1367 try:
1368 del(self.services[info.name.lower()])
1368 del(self.services[info.name.lower()])
1369 if self.servicetypes[info.type]>1:
1369 if self.servicetypes[info.type]>1:
1370 self.servicetypes[info.type]-=1
1370 self.servicetypes[info.type]-=1
1371 else:
1371 else:
1372 del self.servicetypes[info.type]
1372 del self.servicetypes[info.type]
1373 except KeyError:
1373 except KeyError:
1374 pass
1374 pass
1375 now = currentTimeMillis()
1375 now = currentTimeMillis()
1376 nextTime = now
1376 nextTime = now
1377 i = 0
1377 i = 0
1378 while i < 3:
1378 while i < 3:
1379 if now < nextTime:
1379 if now < nextTime:
1380 self.wait(nextTime - now)
1380 self.wait(nextTime - now)
1381 now = currentTimeMillis()
1381 now = currentTimeMillis()
1382 continue
1382 continue
1383 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1383 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1384 out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0)
1384 out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0)
1385 out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, 0, info.priority, info.weight, info.port, info.name), 0)
1385 out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, 0, info.priority, info.weight, info.port, info.name), 0)
1386 out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0)
1386 out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0)
1387 if info.address:
1387 if info.address:
1388 out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, _CLASS_IN, 0, info.address), 0)
1388 out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, _CLASS_IN, 0, info.address), 0)
1389 self.send(out)
1389 self.send(out)
1390 i += 1
1390 i += 1
1391 nextTime += _UNREGISTER_TIME
1391 nextTime += _UNREGISTER_TIME
1392
1392
1393 def unregisterAllServices(self):
1393 def unregisterAllServices(self):
1394 """Unregister all registered services."""
1394 """Unregister all registered services."""
1395 if len(self.services) > 0:
1395 if len(self.services) > 0:
1396 now = currentTimeMillis()
1396 now = currentTimeMillis()
1397 nextTime = now
1397 nextTime = now
1398 i = 0
1398 i = 0
1399 while i < 3:
1399 while i < 3:
1400 if now < nextTime:
1400 if now < nextTime:
1401 self.wait(nextTime - now)
1401 self.wait(nextTime - now)
1402 now = currentTimeMillis()
1402 now = currentTimeMillis()
1403 continue
1403 continue
1404 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1404 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1405 for info in self.services.values():
1405 for info in self.services.values():
1406 out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0)
1406 out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0)
1407 out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, 0, info.priority, info.weight, info.port, info.server), 0)
1407 out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, 0, info.priority, info.weight, info.port, info.server), 0)
1408 out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0)
1408 out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0)
1409 if info.address:
1409 if info.address:
1410 out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, _CLASS_IN, 0, info.address), 0)
1410 out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, _CLASS_IN, 0, info.address), 0)
1411 self.send(out)
1411 self.send(out)
1412 i += 1
1412 i += 1
1413 nextTime += _UNREGISTER_TIME
1413 nextTime += _UNREGISTER_TIME
1414
1414
1415 def checkService(self, info):
1415 def checkService(self, info):
1416 """Checks the network for a unique service name, modifying the
1416 """Checks the network for a unique service name, modifying the
1417 ServiceInfo passed in if it is not unique."""
1417 ServiceInfo passed in if it is not unique."""
1418 now = currentTimeMillis()
1418 now = currentTimeMillis()
1419 nextTime = now
1419 nextTime = now
1420 i = 0
1420 i = 0
1421 while i < 3:
1421 while i < 3:
1422 for record in self.cache.entriesWithName(info.type):
1422 for record in self.cache.entriesWithName(info.type):
1423 if record.type == _TYPE_PTR and not record.isExpired(now) and record.alias == info.name:
1423 if record.type == _TYPE_PTR and not record.isExpired(now) and record.alias == info.name:
1424 if (info.name.find('.') < 0):
1424 if (info.name.find('.') < 0):
1425 info.name = info.name + ".[" + info.address + ":" + info.port + "]." + info.type
1425 info.name = info.name + ".[" + info.address + ":" + info.port + "]." + info.type
1426 self.checkService(info)
1426 self.checkService(info)
1427 return
1427 return
1428 raise NonUniqueNameException
1428 raise NonUniqueNameException
1429 if now < nextTime:
1429 if now < nextTime:
1430 self.wait(nextTime - now)
1430 self.wait(nextTime - now)
1431 now = currentTimeMillis()
1431 now = currentTimeMillis()
1432 continue
1432 continue
1433 out = DNSOutgoing(_FLAGS_QR_QUERY | _FLAGS_AA)
1433 out = DNSOutgoing(_FLAGS_QR_QUERY | _FLAGS_AA)
1434 self.debug = out
1434 self.debug = out
1435 out.addQuestion(DNSQuestion(info.type, _TYPE_PTR, _CLASS_IN))
1435 out.addQuestion(DNSQuestion(info.type, _TYPE_PTR, _CLASS_IN))
1436 out.addAuthorativeAnswer(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, _DNS_TTL, info.name))
1436 out.addAuthorativeAnswer(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, _DNS_TTL, info.name))
1437 self.send(out)
1437 self.send(out)
1438 i += 1
1438 i += 1
1439 nextTime += _CHECK_TIME
1439 nextTime += _CHECK_TIME
1440
1440
1441 def addListener(self, listener, question):
1441 def addListener(self, listener, question):
1442 """Adds a listener for a given question. The listener will have
1442 """Adds a listener for a given question. The listener will have
1443 its updateRecord method called when information is available to
1443 its updateRecord method called when information is available to
1444 answer the question."""
1444 answer the question."""
1445 now = currentTimeMillis()
1445 now = currentTimeMillis()
1446 self.listeners.append(listener)
1446 self.listeners.append(listener)
1447 if question is not None:
1447 if question is not None:
1448 for record in self.cache.entriesWithName(question.name):
1448 for record in self.cache.entriesWithName(question.name):
1449 if question.answeredBy(record) and not record.isExpired(now):
1449 if question.answeredBy(record) and not record.isExpired(now):
1450 listener.updateRecord(self, now, record)
1450 listener.updateRecord(self, now, record)
1451 self.notifyAll()
1451 self.notifyAll()
1452
1452
1453 def removeListener(self, listener):
1453 def removeListener(self, listener):
1454 """Removes a listener."""
1454 """Removes a listener."""
1455 try:
1455 try:
1456 self.listeners.remove(listener)
1456 self.listeners.remove(listener)
1457 self.notifyAll()
1457 self.notifyAll()
1458 except Exception:
1458 except Exception:
1459 pass
1459 pass
1460
1460
1461 def updateRecord(self, now, rec):
1461 def updateRecord(self, now, rec):
1462 """Used to notify listeners of new information that has updated
1462 """Used to notify listeners of new information that has updated
1463 a record."""
1463 a record."""
1464 for listener in self.listeners:
1464 for listener in self.listeners:
1465 listener.updateRecord(self, now, rec)
1465 listener.updateRecord(self, now, rec)
1466 self.notifyAll()
1466 self.notifyAll()
1467
1467
1468 def handleResponse(self, msg):
1468 def handleResponse(self, msg):
1469 """Deal with incoming response packets. All answers
1469 """Deal with incoming response packets. All answers
1470 are held in the cache, and listeners are notified."""
1470 are held in the cache, and listeners are notified."""
1471 now = currentTimeMillis()
1471 now = currentTimeMillis()
1472 for record in msg.answers:
1472 for record in msg.answers:
1473 expired = record.isExpired(now)
1473 expired = record.isExpired(now)
1474 if record in self.cache.entries():
1474 if record in self.cache.entries():
1475 if expired:
1475 if expired:
1476 self.cache.remove(record)
1476 self.cache.remove(record)
1477 else:
1477 else:
1478 entry = self.cache.get(record)
1478 entry = self.cache.get(record)
1479 if entry is not None:
1479 if entry is not None:
1480 entry.resetTTL(record)
1480 entry.resetTTL(record)
1481 record = entry
1481 record = entry
1482 else:
1482 else:
1483 self.cache.add(record)
1483 self.cache.add(record)
1484
1484
1485 self.updateRecord(now, record)
1485 self.updateRecord(now, record)
1486
1486
1487 def handleQuery(self, msg, addr, port):
1487 def handleQuery(self, msg, addr, port):
1488 """Deal with incoming query packets. Provides a response if
1488 """Deal with incoming query packets. Provides a response if
1489 possible."""
1489 possible."""
1490 out = None
1490 out = None
1491
1491
1492 # Support unicast client responses
1492 # Support unicast client responses
1493 #
1493 #
1494 if port != _MDNS_PORT:
1494 if port != _MDNS_PORT:
1495 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA, 0)
1495 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA, 0)
1496 for question in msg.questions:
1496 for question in msg.questions:
1497 out.addQuestion(question)
1497 out.addQuestion(question)
1498
1498
1499 for question in msg.questions:
1499 for question in msg.questions:
1500 if question.type == _TYPE_PTR:
1500 if question.type == _TYPE_PTR:
1501 if question.name == "_services._dns-sd._udp.local.":
1501 if question.name == "_services._dns-sd._udp.local.":
1502 for stype in self.servicetypes.keys():
1502 for stype in self.servicetypes.keys():
1503 if out is None:
1503 if out is None:
1504 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1504 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1505 out.addAnswer(msg, DNSPointer("_services._dns-sd._udp.local.", _TYPE_PTR, _CLASS_IN, _DNS_TTL, stype))
1505 out.addAnswer(msg, DNSPointer("_services._dns-sd._udp.local.", _TYPE_PTR, _CLASS_IN, _DNS_TTL, stype))
1506 for service in self.services.values():
1506 for service in self.services.values():
1507 if question.name == service.type:
1507 if question.name == service.type:
1508 if out is None:
1508 if out is None:
1509 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1509 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1510 out.addAnswer(msg, DNSPointer(service.type, _TYPE_PTR, _CLASS_IN, _DNS_TTL, service.name))
1510 out.addAnswer(msg, DNSPointer(service.type, _TYPE_PTR, _CLASS_IN, _DNS_TTL, service.name))
1511 else:
1511 else:
1512 try:
1512 try:
1513 if out is None:
1513 if out is None:
1514 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1514 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1515
1515
1516 # Answer A record queries for any service addresses we know
1516 # Answer A record queries for any service addresses we know
1517 if question.type == _TYPE_A or question.type == _TYPE_ANY:
1517 if question.type == _TYPE_A or question.type == _TYPE_ANY:
1518 for service in self.services.values():
1518 for service in self.services.values():
1519 if service.server == question.name.lower():
1519 if service.server == question.name.lower():
1520 out.addAnswer(msg, DNSAddress(question.name, _TYPE_A, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.address))
1520 out.addAnswer(msg, DNSAddress(question.name, _TYPE_A, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.address))
1521
1521
1522 service = self.services.get(question.name.lower(), None)
1522 service = self.services.get(question.name.lower(), None)
1523 if not service: continue
1523 if not service: continue
1524
1524
1525 if question.type == _TYPE_SRV or question.type == _TYPE_ANY:
1525 if question.type == _TYPE_SRV or question.type == _TYPE_ANY:
1526 out.addAnswer(msg, DNSService(question.name, _TYPE_SRV, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.priority, service.weight, service.port, service.server))
1526 out.addAnswer(msg, DNSService(question.name, _TYPE_SRV, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.priority, service.weight, service.port, service.server))
1527 if question.type == _TYPE_TXT or question.type == _TYPE_ANY:
1527 if question.type == _TYPE_TXT or question.type == _TYPE_ANY:
1528 out.addAnswer(msg, DNSText(question.name, _TYPE_TXT, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.text))
1528 out.addAnswer(msg, DNSText(question.name, _TYPE_TXT, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.text))
1529 if question.type == _TYPE_SRV:
1529 if question.type == _TYPE_SRV:
1530 out.addAdditionalAnswer(DNSAddress(service.server, _TYPE_A, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.address))
1530 out.addAdditionalAnswer(DNSAddress(service.server, _TYPE_A, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.address))
1531 except Exception:
1531 except Exception:
1532 traceback.print_exc()
1532 traceback.print_exc()
1533
1533
1534 if out is not None and out.answers:
1534 if out is not None and out.answers:
1535 out.id = msg.id
1535 out.id = msg.id
1536 self.send(out, addr, port)
1536 self.send(out, addr, port)
1537
1537
1538 def send(self, out, addr = _MDNS_ADDR, port = _MDNS_PORT):
1538 def send(self, out, addr = _MDNS_ADDR, port = _MDNS_PORT):
1539 """Sends an outgoing packet."""
1539 """Sends an outgoing packet."""
1540 # This is a quick test to see if we can parse the packets we generate
1540 # This is a quick test to see if we can parse the packets we generate
1541 #temp = DNSIncoming(out.packet())
1541 #temp = DNSIncoming(out.packet())
1542 try:
1542 try:
1543 self.socket.sendto(out.packet(), 0, (addr, port))
1543 self.socket.sendto(out.packet(), 0, (addr, port))
1544 except Exception:
1544 except Exception:
1545 # Ignore this, it may be a temporary loss of network connection
1545 # Ignore this, it may be a temporary loss of network connection
1546 pass
1546 pass
1547
1547
1548 def close(self):
1548 def close(self):
1549 """Ends the background threads, and prevent this instance from
1549 """Ends the background threads, and prevent this instance from
1550 servicing further queries."""
1550 servicing further queries."""
1551 if globals()['_GLOBAL_DONE'] == 0:
1551 if globals()['_GLOBAL_DONE'] == 0:
1552 globals()['_GLOBAL_DONE'] = 1
1552 globals()['_GLOBAL_DONE'] = 1
1553 self.notifyAll()
1553 self.notifyAll()
1554 self.engine.notify()
1554 self.engine.notify()
1555 self.unregisterAllServices()
1555 self.unregisterAllServices()
1556 self.socket.setsockopt(socket.SOL_IP, socket.IP_DROP_MEMBERSHIP, socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'))
1556 self.socket.setsockopt(socket.SOL_IP, socket.IP_DROP_MEMBERSHIP, socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'))
1557 self.socket.close()
1557 self.socket.close()
1558
1558
1559 # Test a few module features, including service registration, service
1559 # Test a few module features, including service registration, service
1560 # query (for Zoe), and service unregistration.
1560 # query (for Zoe), and service unregistration.
1561
1561
1562 if __name__ == '__main__':
1562 if __name__ == '__main__':
1563 print "Multicast DNS Service Discovery for Python, version", __version__
1563 print "Multicast DNS Service Discovery for Python, version", __version__
1564 r = Zeroconf()
1564 r = Zeroconf()
1565 print "1. Testing registration of a service..."
1565 print "1. Testing registration of a service..."
1566 desc = {'version':'0.10','a':'test value', 'b':'another value'}
1566 desc = {'version':'0.10','a':'test value', 'b':'another value'}
1567 info = ServiceInfo("_http._tcp.local.", "My Service Name._http._tcp.local.", socket.inet_aton("127.0.0.1"), 1234, 0, 0, desc)
1567 info = ServiceInfo("_http._tcp.local.", "My Service Name._http._tcp.local.", socket.inet_aton("127.0.0.1"), 1234, 0, 0, desc)
1568 print " Registering service..."
1568 print " Registering service..."
1569 r.registerService(info)
1569 r.registerService(info)
1570 print " Registration done."
1570 print " Registration done."
1571 print "2. Testing query of service information..."
1571 print "2. Testing query of service information..."
1572 print " Getting ZOE service:", str(r.getServiceInfo("_http._tcp.local.", "ZOE._http._tcp.local."))
1572 print " Getting ZOE service:", str(r.getServiceInfo("_http._tcp.local.", "ZOE._http._tcp.local."))
1573 print " Query done."
1573 print " Query done."
1574 print "3. Testing query of own service..."
1574 print "3. Testing query of own service..."
1575 print " Getting self:", str(r.getServiceInfo("_http._tcp.local.", "My Service Name._http._tcp.local."))
1575 print " Getting self:", str(r.getServiceInfo("_http._tcp.local.", "My Service Name._http._tcp.local."))
1576 print " Query done."
1576 print " Query done."
1577 print "4. Testing unregister of service information..."
1577 print "4. Testing unregister of service information..."
1578 r.unregisterService(info)
1578 r.unregisterService(info)
1579 print " Unregister done."
1579 print " Unregister done."
1580 r.close()
1580 r.close()
1581
1581
1582 # no-check-code
1582 # no-check-code
@@ -1,91 +1,91
1 # ancestor.py - generic DAG ancestor algorithm for mercurial
1 # ancestor.py - generic DAG ancestor algorithm for mercurial
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import heapq
8 import heapq
9
9
10 def ancestor(a, b, pfunc):
10 def ancestor(a, b, pfunc):
11 """
11 """
12 Returns the common ancestor of a and b that is furthest from a
12 Returns the common ancestor of a and b that is furthest from a
13 root (as measured by longest path) or None if no ancestor is
13 root (as measured by longest path) or None if no ancestor is
14 found. If there are multiple common ancestors at the same
14 found. If there are multiple common ancestors at the same
15 distance, the first one found is returned.
15 distance, the first one found is returned.
16
16
17 pfunc must return a list of parent vertices for a given vertex
17 pfunc must return a list of parent vertices for a given vertex
18 """
18 """
19
19
20 if a == b:
20 if a == b:
21 return a
21 return a
22
22
23 a, b = sorted([a, b])
23 a, b = sorted([a, b])
24
24
25 # find depth from root of all ancestors
25 # find depth from root of all ancestors
26 # depth is stored as a negative for heapq
26 # depth is stored as a negative for heapq
27 parentcache = {}
27 parentcache = {}
28 visit = [a, b]
28 visit = [a, b]
29 depth = {}
29 depth = {}
30 while visit:
30 while visit:
31 vertex = visit[-1]
31 vertex = visit[-1]
32 pl = pfunc(vertex)
32 pl = pfunc(vertex)
33 parentcache[vertex] = pl
33 parentcache[vertex] = pl
34 if not pl:
34 if not pl:
35 depth[vertex] = 0
35 depth[vertex] = 0
36 visit.pop()
36 visit.pop()
37 else:
37 else:
38 for p in pl:
38 for p in pl:
39 if p == a or p == b: # did we find a or b as a parent?
39 if p == a or p == b: # did we find a or b as a parent?
40 return p # we're done
40 return p # we're done
41 if p not in depth:
41 if p not in depth:
42 visit.append(p)
42 visit.append(p)
43 if visit[-1] == vertex:
43 if visit[-1] == vertex:
44 # -(maximum distance of parents + 1)
44 # -(maximum distance of parents + 1)
45 depth[vertex] = min([depth[p] for p in pl]) - 1
45 depth[vertex] = min([depth[p] for p in pl]) - 1
46 visit.pop()
46 visit.pop()
47
47
48 # traverse ancestors in order of decreasing distance from root
48 # traverse ancestors in order of decreasing distance from root
49 def ancestors(vertex):
49 def ancestors(vertex):
50 h = [(depth[vertex], vertex)]
50 h = [(depth[vertex], vertex)]
51 seen = set()
51 seen = set()
52 while h:
52 while h:
53 d, n = heapq.heappop(h)
53 d, n = heapq.heappop(h)
54 if n not in seen:
54 if n not in seen:
55 seen.add(n)
55 seen.add(n)
56 yield (d, n)
56 yield (d, n)
57 for p in parentcache[n]:
57 for p in parentcache[n]:
58 heapq.heappush(h, (depth[p], p))
58 heapq.heappush(h, (depth[p], p))
59
59
60 def generations(vertex):
60 def generations(vertex):
61 sg, s = None, set()
61 sg, s = None, set()
62 for g, v in ancestors(vertex):
62 for g, v in ancestors(vertex):
63 if g != sg:
63 if g != sg:
64 if sg:
64 if sg:
65 yield sg, s
65 yield sg, s
66 sg, s = g, set((v,))
66 sg, s = g, set((v,))
67 else:
67 else:
68 s.add(v)
68 s.add(v)
69 yield sg, s
69 yield sg, s
70
70
71 x = generations(a)
71 x = generations(a)
72 y = generations(b)
72 y = generations(b)
73 gx = x.next()
73 gx = x.next()
74 gy = y.next()
74 gy = y.next()
75
75
76 # increment each ancestor list until it is closer to root than
76 # increment each ancestor list until it is closer to root than
77 # the other, or they match
77 # the other, or they match
78 try:
78 try:
79 while 1:
79 while True:
80 if gx[0] == gy[0]:
80 if gx[0] == gy[0]:
81 for v in gx[1]:
81 for v in gx[1]:
82 if v in gy[1]:
82 if v in gy[1]:
83 return v
83 return v
84 gy = y.next()
84 gy = y.next()
85 gx = x.next()
85 gx = x.next()
86 elif gx[0] > gy[0]:
86 elif gx[0] > gy[0]:
87 gy = y.next()
87 gy = y.next()
88 else:
88 else:
89 gx = x.next()
89 gx = x.next()
90 except StopIteration:
90 except StopIteration:
91 return None
91 return None
@@ -1,358 +1,358
1 # bundlerepo.py - repository class for viewing uncompressed bundles
1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 #
2 #
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Repository class for viewing uncompressed bundles.
8 """Repository class for viewing uncompressed bundles.
9
9
10 This provides a read-only repository interface to bundles as if they
10 This provides a read-only repository interface to bundles as if they
11 were part of the actual repository.
11 were part of the actual repository.
12 """
12 """
13
13
14 from node import nullid
14 from node import nullid
15 from i18n import _
15 from i18n import _
16 import os, tempfile, shutil
16 import os, tempfile, shutil
17 import changegroup, util, mdiff, discovery
17 import changegroup, util, mdiff, discovery
18 import localrepo, changelog, manifest, filelog, revlog, error
18 import localrepo, changelog, manifest, filelog, revlog, error
19
19
20 class bundlerevlog(revlog.revlog):
20 class bundlerevlog(revlog.revlog):
21 def __init__(self, opener, indexfile, bundle, linkmapper):
21 def __init__(self, opener, indexfile, bundle, linkmapper):
22 # How it works:
22 # How it works:
23 # to retrieve a revision, we need to know the offset of
23 # to retrieve a revision, we need to know the offset of
24 # the revision in the bundle (an unbundle object).
24 # the revision in the bundle (an unbundle object).
25 #
25 #
26 # We store this offset in the index (start), to differentiate a
26 # We store this offset in the index (start), to differentiate a
27 # rev in the bundle and from a rev in the revlog, we check
27 # rev in the bundle and from a rev in the revlog, we check
28 # len(index[r]). If the tuple is bigger than 7, it is a bundle
28 # len(index[r]). If the tuple is bigger than 7, it is a bundle
29 # (it is bigger since we store the node to which the delta is)
29 # (it is bigger since we store the node to which the delta is)
30 #
30 #
31 revlog.revlog.__init__(self, opener, indexfile)
31 revlog.revlog.__init__(self, opener, indexfile)
32 self.bundle = bundle
32 self.bundle = bundle
33 self.basemap = {}
33 self.basemap = {}
34 n = len(self)
34 n = len(self)
35 chain = None
35 chain = None
36 while 1:
36 while True:
37 chunkdata = bundle.deltachunk(chain)
37 chunkdata = bundle.deltachunk(chain)
38 if not chunkdata:
38 if not chunkdata:
39 break
39 break
40 node = chunkdata['node']
40 node = chunkdata['node']
41 p1 = chunkdata['p1']
41 p1 = chunkdata['p1']
42 p2 = chunkdata['p2']
42 p2 = chunkdata['p2']
43 cs = chunkdata['cs']
43 cs = chunkdata['cs']
44 deltabase = chunkdata['deltabase']
44 deltabase = chunkdata['deltabase']
45 delta = chunkdata['delta']
45 delta = chunkdata['delta']
46
46
47 size = len(delta)
47 size = len(delta)
48 start = bundle.tell() - size
48 start = bundle.tell() - size
49
49
50 link = linkmapper(cs)
50 link = linkmapper(cs)
51 if node in self.nodemap:
51 if node in self.nodemap:
52 # this can happen if two branches make the same change
52 # this can happen if two branches make the same change
53 chain = node
53 chain = node
54 continue
54 continue
55
55
56 for p in (p1, p2):
56 for p in (p1, p2):
57 if not p in self.nodemap:
57 if not p in self.nodemap:
58 raise error.LookupError(p, self.indexfile,
58 raise error.LookupError(p, self.indexfile,
59 _("unknown parent"))
59 _("unknown parent"))
60 # start, size, full unc. size, base (unused), link, p1, p2, node
60 # start, size, full unc. size, base (unused), link, p1, p2, node
61 e = (revlog.offset_type(start, 0), size, -1, -1, link,
61 e = (revlog.offset_type(start, 0), size, -1, -1, link,
62 self.rev(p1), self.rev(p2), node)
62 self.rev(p1), self.rev(p2), node)
63 self.basemap[n] = deltabase
63 self.basemap[n] = deltabase
64 self.index.insert(-1, e)
64 self.index.insert(-1, e)
65 self.nodemap[node] = n
65 self.nodemap[node] = n
66 chain = node
66 chain = node
67 n += 1
67 n += 1
68
68
69 def inbundle(self, rev):
69 def inbundle(self, rev):
70 """is rev from the bundle"""
70 """is rev from the bundle"""
71 if rev < 0:
71 if rev < 0:
72 return False
72 return False
73 return rev in self.basemap
73 return rev in self.basemap
74 def bundlebase(self, rev):
74 def bundlebase(self, rev):
75 return self.basemap[rev]
75 return self.basemap[rev]
76 def _chunk(self, rev):
76 def _chunk(self, rev):
77 # Warning: in case of bundle, the diff is against bundlebase,
77 # Warning: in case of bundle, the diff is against bundlebase,
78 # not against rev - 1
78 # not against rev - 1
79 # XXX: could use some caching
79 # XXX: could use some caching
80 if not self.inbundle(rev):
80 if not self.inbundle(rev):
81 return revlog.revlog._chunk(self, rev)
81 return revlog.revlog._chunk(self, rev)
82 self.bundle.seek(self.start(rev))
82 self.bundle.seek(self.start(rev))
83 return self.bundle.read(self.length(rev))
83 return self.bundle.read(self.length(rev))
84
84
85 def revdiff(self, rev1, rev2):
85 def revdiff(self, rev1, rev2):
86 """return or calculate a delta between two revisions"""
86 """return or calculate a delta between two revisions"""
87 if self.inbundle(rev1) and self.inbundle(rev2):
87 if self.inbundle(rev1) and self.inbundle(rev2):
88 # hot path for bundle
88 # hot path for bundle
89 revb = self.rev(self.bundlebase(rev2))
89 revb = self.rev(self.bundlebase(rev2))
90 if revb == rev1:
90 if revb == rev1:
91 return self._chunk(rev2)
91 return self._chunk(rev2)
92 elif not self.inbundle(rev1) and not self.inbundle(rev2):
92 elif not self.inbundle(rev1) and not self.inbundle(rev2):
93 return revlog.revlog.revdiff(self, rev1, rev2)
93 return revlog.revlog.revdiff(self, rev1, rev2)
94
94
95 return mdiff.textdiff(self.revision(self.node(rev1)),
95 return mdiff.textdiff(self.revision(self.node(rev1)),
96 self.revision(self.node(rev2)))
96 self.revision(self.node(rev2)))
97
97
98 def revision(self, node):
98 def revision(self, node):
99 """return an uncompressed revision of a given"""
99 """return an uncompressed revision of a given"""
100 if node == nullid:
100 if node == nullid:
101 return ""
101 return ""
102
102
103 text = None
103 text = None
104 chain = []
104 chain = []
105 iter_node = node
105 iter_node = node
106 rev = self.rev(iter_node)
106 rev = self.rev(iter_node)
107 # reconstruct the revision if it is from a changegroup
107 # reconstruct the revision if it is from a changegroup
108 while self.inbundle(rev):
108 while self.inbundle(rev):
109 if self._cache and self._cache[0] == iter_node:
109 if self._cache and self._cache[0] == iter_node:
110 text = self._cache[2]
110 text = self._cache[2]
111 break
111 break
112 chain.append(rev)
112 chain.append(rev)
113 iter_node = self.bundlebase(rev)
113 iter_node = self.bundlebase(rev)
114 rev = self.rev(iter_node)
114 rev = self.rev(iter_node)
115 if text is None:
115 if text is None:
116 text = revlog.revlog.revision(self, iter_node)
116 text = revlog.revlog.revision(self, iter_node)
117
117
118 while chain:
118 while chain:
119 delta = self._chunk(chain.pop())
119 delta = self._chunk(chain.pop())
120 text = mdiff.patches(text, [delta])
120 text = mdiff.patches(text, [delta])
121
121
122 p1, p2 = self.parents(node)
122 p1, p2 = self.parents(node)
123 if node != revlog.hash(text, p1, p2):
123 if node != revlog.hash(text, p1, p2):
124 raise error.RevlogError(_("integrity check failed on %s:%d")
124 raise error.RevlogError(_("integrity check failed on %s:%d")
125 % (self.datafile, self.rev(node)))
125 % (self.datafile, self.rev(node)))
126
126
127 self._cache = (node, self.rev(node), text)
127 self._cache = (node, self.rev(node), text)
128 return text
128 return text
129
129
130 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
130 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
131 raise NotImplementedError
131 raise NotImplementedError
132 def addgroup(self, revs, linkmapper, transaction):
132 def addgroup(self, revs, linkmapper, transaction):
133 raise NotImplementedError
133 raise NotImplementedError
134 def strip(self, rev, minlink):
134 def strip(self, rev, minlink):
135 raise NotImplementedError
135 raise NotImplementedError
136 def checksize(self):
136 def checksize(self):
137 raise NotImplementedError
137 raise NotImplementedError
138
138
139 class bundlechangelog(bundlerevlog, changelog.changelog):
139 class bundlechangelog(bundlerevlog, changelog.changelog):
140 def __init__(self, opener, bundle):
140 def __init__(self, opener, bundle):
141 changelog.changelog.__init__(self, opener)
141 changelog.changelog.__init__(self, opener)
142 linkmapper = lambda x: x
142 linkmapper = lambda x: x
143 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
143 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
144 linkmapper)
144 linkmapper)
145
145
146 class bundlemanifest(bundlerevlog, manifest.manifest):
146 class bundlemanifest(bundlerevlog, manifest.manifest):
147 def __init__(self, opener, bundle, linkmapper):
147 def __init__(self, opener, bundle, linkmapper):
148 manifest.manifest.__init__(self, opener)
148 manifest.manifest.__init__(self, opener)
149 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
149 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
150 linkmapper)
150 linkmapper)
151
151
152 class bundlefilelog(bundlerevlog, filelog.filelog):
152 class bundlefilelog(bundlerevlog, filelog.filelog):
153 def __init__(self, opener, path, bundle, linkmapper, repo):
153 def __init__(self, opener, path, bundle, linkmapper, repo):
154 filelog.filelog.__init__(self, opener, path)
154 filelog.filelog.__init__(self, opener, path)
155 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
155 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
156 linkmapper)
156 linkmapper)
157 self._repo = repo
157 self._repo = repo
158
158
159 def _file(self, f):
159 def _file(self, f):
160 self._repo.file(f)
160 self._repo.file(f)
161
161
162 class bundlerepository(localrepo.localrepository):
162 class bundlerepository(localrepo.localrepository):
163 def __init__(self, ui, path, bundlename):
163 def __init__(self, ui, path, bundlename):
164 self._tempparent = None
164 self._tempparent = None
165 try:
165 try:
166 localrepo.localrepository.__init__(self, ui, path)
166 localrepo.localrepository.__init__(self, ui, path)
167 except error.RepoError:
167 except error.RepoError:
168 self._tempparent = tempfile.mkdtemp()
168 self._tempparent = tempfile.mkdtemp()
169 localrepo.instance(ui, self._tempparent, 1)
169 localrepo.instance(ui, self._tempparent, 1)
170 localrepo.localrepository.__init__(self, ui, self._tempparent)
170 localrepo.localrepository.__init__(self, ui, self._tempparent)
171
171
172 if path:
172 if path:
173 self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
173 self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
174 else:
174 else:
175 self._url = 'bundle:' + bundlename
175 self._url = 'bundle:' + bundlename
176
176
177 self.tempfile = None
177 self.tempfile = None
178 f = util.posixfile(bundlename, "rb")
178 f = util.posixfile(bundlename, "rb")
179 self.bundle = changegroup.readbundle(f, bundlename)
179 self.bundle = changegroup.readbundle(f, bundlename)
180 if self.bundle.compressed():
180 if self.bundle.compressed():
181 fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
181 fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
182 suffix=".hg10un", dir=self.path)
182 suffix=".hg10un", dir=self.path)
183 self.tempfile = temp
183 self.tempfile = temp
184 fptemp = os.fdopen(fdtemp, 'wb')
184 fptemp = os.fdopen(fdtemp, 'wb')
185
185
186 try:
186 try:
187 fptemp.write("HG10UN")
187 fptemp.write("HG10UN")
188 while 1:
188 while True:
189 chunk = self.bundle.read(2**18)
189 chunk = self.bundle.read(2**18)
190 if not chunk:
190 if not chunk:
191 break
191 break
192 fptemp.write(chunk)
192 fptemp.write(chunk)
193 finally:
193 finally:
194 fptemp.close()
194 fptemp.close()
195
195
196 f = util.posixfile(self.tempfile, "rb")
196 f = util.posixfile(self.tempfile, "rb")
197 self.bundle = changegroup.readbundle(f, bundlename)
197 self.bundle = changegroup.readbundle(f, bundlename)
198
198
199 # dict with the mapping 'filename' -> position in the bundle
199 # dict with the mapping 'filename' -> position in the bundle
200 self.bundlefilespos = {}
200 self.bundlefilespos = {}
201
201
202 @util.propertycache
202 @util.propertycache
203 def changelog(self):
203 def changelog(self):
204 # consume the header if it exists
204 # consume the header if it exists
205 self.bundle.changelogheader()
205 self.bundle.changelogheader()
206 c = bundlechangelog(self.sopener, self.bundle)
206 c = bundlechangelog(self.sopener, self.bundle)
207 self.manstart = self.bundle.tell()
207 self.manstart = self.bundle.tell()
208 return c
208 return c
209
209
210 @util.propertycache
210 @util.propertycache
211 def manifest(self):
211 def manifest(self):
212 self.bundle.seek(self.manstart)
212 self.bundle.seek(self.manstart)
213 # consume the header if it exists
213 # consume the header if it exists
214 self.bundle.manifestheader()
214 self.bundle.manifestheader()
215 m = bundlemanifest(self.sopener, self.bundle, self.changelog.rev)
215 m = bundlemanifest(self.sopener, self.bundle, self.changelog.rev)
216 self.filestart = self.bundle.tell()
216 self.filestart = self.bundle.tell()
217 return m
217 return m
218
218
219 @util.propertycache
219 @util.propertycache
220 def manstart(self):
220 def manstart(self):
221 self.changelog
221 self.changelog
222 return self.manstart
222 return self.manstart
223
223
224 @util.propertycache
224 @util.propertycache
225 def filestart(self):
225 def filestart(self):
226 self.manifest
226 self.manifest
227 return self.filestart
227 return self.filestart
228
228
229 def url(self):
229 def url(self):
230 return self._url
230 return self._url
231
231
232 def file(self, f):
232 def file(self, f):
233 if not self.bundlefilespos:
233 if not self.bundlefilespos:
234 self.bundle.seek(self.filestart)
234 self.bundle.seek(self.filestart)
235 while 1:
235 while True:
236 chunkdata = self.bundle.filelogheader()
236 chunkdata = self.bundle.filelogheader()
237 if not chunkdata:
237 if not chunkdata:
238 break
238 break
239 fname = chunkdata['filename']
239 fname = chunkdata['filename']
240 self.bundlefilespos[fname] = self.bundle.tell()
240 self.bundlefilespos[fname] = self.bundle.tell()
241 while 1:
241 while True:
242 c = self.bundle.deltachunk(None)
242 c = self.bundle.deltachunk(None)
243 if not c:
243 if not c:
244 break
244 break
245
245
246 if f[0] == '/':
246 if f[0] == '/':
247 f = f[1:]
247 f = f[1:]
248 if f in self.bundlefilespos:
248 if f in self.bundlefilespos:
249 self.bundle.seek(self.bundlefilespos[f])
249 self.bundle.seek(self.bundlefilespos[f])
250 return bundlefilelog(self.sopener, f, self.bundle,
250 return bundlefilelog(self.sopener, f, self.bundle,
251 self.changelog.rev, self)
251 self.changelog.rev, self)
252 else:
252 else:
253 return filelog.filelog(self.sopener, f)
253 return filelog.filelog(self.sopener, f)
254
254
255 def close(self):
255 def close(self):
256 """Close assigned bundle file immediately."""
256 """Close assigned bundle file immediately."""
257 self.bundle.close()
257 self.bundle.close()
258 if self.tempfile is not None:
258 if self.tempfile is not None:
259 os.unlink(self.tempfile)
259 os.unlink(self.tempfile)
260 if self._tempparent:
260 if self._tempparent:
261 shutil.rmtree(self._tempparent, True)
261 shutil.rmtree(self._tempparent, True)
262
262
263 def cancopy(self):
263 def cancopy(self):
264 return False
264 return False
265
265
266 def getcwd(self):
266 def getcwd(self):
267 return os.getcwd() # always outside the repo
267 return os.getcwd() # always outside the repo
268
268
269 def instance(ui, path, create):
269 def instance(ui, path, create):
270 if create:
270 if create:
271 raise util.Abort(_('cannot create new bundle repository'))
271 raise util.Abort(_('cannot create new bundle repository'))
272 parentpath = ui.config("bundle", "mainreporoot", "")
272 parentpath = ui.config("bundle", "mainreporoot", "")
273 if parentpath:
273 if parentpath:
274 # Try to make the full path relative so we get a nice, short URL.
274 # Try to make the full path relative so we get a nice, short URL.
275 # In particular, we don't want temp dir names in test outputs.
275 # In particular, we don't want temp dir names in test outputs.
276 cwd = os.getcwd()
276 cwd = os.getcwd()
277 if parentpath == cwd:
277 if parentpath == cwd:
278 parentpath = ''
278 parentpath = ''
279 else:
279 else:
280 cwd = os.path.join(cwd,'')
280 cwd = os.path.join(cwd,'')
281 if parentpath.startswith(cwd):
281 if parentpath.startswith(cwd):
282 parentpath = parentpath[len(cwd):]
282 parentpath = parentpath[len(cwd):]
283 u = util.url(path)
283 u = util.url(path)
284 path = u.localpath()
284 path = u.localpath()
285 if u.scheme == 'bundle':
285 if u.scheme == 'bundle':
286 s = path.split("+", 1)
286 s = path.split("+", 1)
287 if len(s) == 1:
287 if len(s) == 1:
288 repopath, bundlename = parentpath, s[0]
288 repopath, bundlename = parentpath, s[0]
289 else:
289 else:
290 repopath, bundlename = s
290 repopath, bundlename = s
291 else:
291 else:
292 repopath, bundlename = parentpath, path
292 repopath, bundlename = parentpath, path
293 return bundlerepository(ui, repopath, bundlename)
293 return bundlerepository(ui, repopath, bundlename)
294
294
295 def getremotechanges(ui, repo, other, onlyheads=None, bundlename=None,
295 def getremotechanges(ui, repo, other, onlyheads=None, bundlename=None,
296 force=False):
296 force=False):
297 '''obtains a bundle of changes incoming from other
297 '''obtains a bundle of changes incoming from other
298
298
299 "onlyheads" restricts the returned changes to those reachable from the
299 "onlyheads" restricts the returned changes to those reachable from the
300 specified heads.
300 specified heads.
301 "bundlename", if given, stores the bundle to this file path permanently;
301 "bundlename", if given, stores the bundle to this file path permanently;
302 otherwise it's stored to a temp file and gets deleted again when you call
302 otherwise it's stored to a temp file and gets deleted again when you call
303 the returned "cleanupfn".
303 the returned "cleanupfn".
304 "force" indicates whether to proceed on unrelated repos.
304 "force" indicates whether to proceed on unrelated repos.
305
305
306 Returns a tuple (local, csets, cleanupfn):
306 Returns a tuple (local, csets, cleanupfn):
307
307
308 "local" is a local repo from which to obtain the actual incoming changesets; it
308 "local" is a local repo from which to obtain the actual incoming changesets; it
309 is a bundlerepo for the obtained bundle when the original "other" is remote.
309 is a bundlerepo for the obtained bundle when the original "other" is remote.
310 "csets" lists the incoming changeset node ids.
310 "csets" lists the incoming changeset node ids.
311 "cleanupfn" must be called without arguments when you're done processing the
311 "cleanupfn" must be called without arguments when you're done processing the
312 changes; it closes both the original "other" and the one returned here.
312 changes; it closes both the original "other" and the one returned here.
313 '''
313 '''
314 tmp = discovery.findcommonincoming(repo, other, heads=onlyheads, force=force)
314 tmp = discovery.findcommonincoming(repo, other, heads=onlyheads, force=force)
315 common, incoming, rheads = tmp
315 common, incoming, rheads = tmp
316 if not incoming:
316 if not incoming:
317 try:
317 try:
318 os.unlink(bundlename)
318 os.unlink(bundlename)
319 except OSError:
319 except OSError:
320 pass
320 pass
321 return other, [], other.close
321 return other, [], other.close
322
322
323 bundle = None
323 bundle = None
324 bundlerepo = None
324 bundlerepo = None
325 localrepo = other
325 localrepo = other
326 if bundlename or not other.local():
326 if bundlename or not other.local():
327 # create a bundle (uncompressed if other repo is not local)
327 # create a bundle (uncompressed if other repo is not local)
328
328
329 if other.capable('getbundle'):
329 if other.capable('getbundle'):
330 cg = other.getbundle('incoming', common=common, heads=rheads)
330 cg = other.getbundle('incoming', common=common, heads=rheads)
331 elif onlyheads is None and not other.capable('changegroupsubset'):
331 elif onlyheads is None and not other.capable('changegroupsubset'):
332 # compat with older servers when pulling all remote heads
332 # compat with older servers when pulling all remote heads
333 cg = other.changegroup(incoming, "incoming")
333 cg = other.changegroup(incoming, "incoming")
334 rheads = None
334 rheads = None
335 else:
335 else:
336 cg = other.changegroupsubset(incoming, rheads, 'incoming')
336 cg = other.changegroupsubset(incoming, rheads, 'incoming')
337 bundletype = other.local() and "HG10BZ" or "HG10UN"
337 bundletype = other.local() and "HG10BZ" or "HG10UN"
338 fname = bundle = changegroup.writebundle(cg, bundlename, bundletype)
338 fname = bundle = changegroup.writebundle(cg, bundlename, bundletype)
339 # keep written bundle?
339 # keep written bundle?
340 if bundlename:
340 if bundlename:
341 bundle = None
341 bundle = None
342 if not other.local():
342 if not other.local():
343 # use the created uncompressed bundlerepo
343 # use the created uncompressed bundlerepo
344 localrepo = bundlerepo = bundlerepository(ui, repo.root, fname)
344 localrepo = bundlerepo = bundlerepository(ui, repo.root, fname)
345 # this repo contains local and other now, so filter out local again
345 # this repo contains local and other now, so filter out local again
346 common = repo.heads()
346 common = repo.heads()
347
347
348 csets = localrepo.changelog.findmissing(common, rheads)
348 csets = localrepo.changelog.findmissing(common, rheads)
349
349
350 def cleanup():
350 def cleanup():
351 if bundlerepo:
351 if bundlerepo:
352 bundlerepo.close()
352 bundlerepo.close()
353 if bundle:
353 if bundle:
354 os.unlink(bundle)
354 os.unlink(bundle)
355 other.close()
355 other.close()
356
356
357 return (localrepo, csets, cleanup)
357 return (localrepo, csets, cleanup)
358
358
@@ -1,256 +1,256
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import nullrev
9 from node import nullrev
10 import mdiff, util
10 import mdiff, util
11 import struct, os, bz2, zlib, tempfile
11 import struct, os, bz2, zlib, tempfile
12
12
13 _BUNDLE10_DELTA_HEADER = "20s20s20s20s"
13 _BUNDLE10_DELTA_HEADER = "20s20s20s20s"
14
14
15 def readexactly(stream, n):
15 def readexactly(stream, n):
16 '''read n bytes from stream.read and abort if less was available'''
16 '''read n bytes from stream.read and abort if less was available'''
17 s = stream.read(n)
17 s = stream.read(n)
18 if len(s) < n:
18 if len(s) < n:
19 raise util.Abort(_("stream ended unexpectedly"
19 raise util.Abort(_("stream ended unexpectedly"
20 " (got %d bytes, expected %d)")
20 " (got %d bytes, expected %d)")
21 % (len(s), n))
21 % (len(s), n))
22 return s
22 return s
23
23
24 def getchunk(stream):
24 def getchunk(stream):
25 """return the next chunk from stream as a string"""
25 """return the next chunk from stream as a string"""
26 d = readexactly(stream, 4)
26 d = readexactly(stream, 4)
27 l = struct.unpack(">l", d)[0]
27 l = struct.unpack(">l", d)[0]
28 if l <= 4:
28 if l <= 4:
29 if l:
29 if l:
30 raise util.Abort(_("invalid chunk length %d") % l)
30 raise util.Abort(_("invalid chunk length %d") % l)
31 return ""
31 return ""
32 return readexactly(stream, l - 4)
32 return readexactly(stream, l - 4)
33
33
34 def chunkheader(length):
34 def chunkheader(length):
35 """return a changegroup chunk header (string)"""
35 """return a changegroup chunk header (string)"""
36 return struct.pack(">l", length + 4)
36 return struct.pack(">l", length + 4)
37
37
38 def closechunk():
38 def closechunk():
39 """return a changegroup chunk header (string) for a zero-length chunk"""
39 """return a changegroup chunk header (string) for a zero-length chunk"""
40 return struct.pack(">l", 0)
40 return struct.pack(">l", 0)
41
41
42 class nocompress(object):
42 class nocompress(object):
43 def compress(self, x):
43 def compress(self, x):
44 return x
44 return x
45 def flush(self):
45 def flush(self):
46 return ""
46 return ""
47
47
48 bundletypes = {
48 bundletypes = {
49 "": ("", nocompress), # only when using unbundle on ssh and old http servers
49 "": ("", nocompress), # only when using unbundle on ssh and old http servers
50 # since the unification ssh accepts a header but there
50 # since the unification ssh accepts a header but there
51 # is no capability signaling it.
51 # is no capability signaling it.
52 "HG10UN": ("HG10UN", nocompress),
52 "HG10UN": ("HG10UN", nocompress),
53 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
53 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
54 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
54 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
55 }
55 }
56
56
57 # hgweb uses this list to communicate its preferred type
57 # hgweb uses this list to communicate its preferred type
58 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
58 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
59
59
60 def writebundle(cg, filename, bundletype):
60 def writebundle(cg, filename, bundletype):
61 """Write a bundle file and return its filename.
61 """Write a bundle file and return its filename.
62
62
63 Existing files will not be overwritten.
63 Existing files will not be overwritten.
64 If no filename is specified, a temporary file is created.
64 If no filename is specified, a temporary file is created.
65 bz2 compression can be turned off.
65 bz2 compression can be turned off.
66 The bundle file will be deleted in case of errors.
66 The bundle file will be deleted in case of errors.
67 """
67 """
68
68
69 fh = None
69 fh = None
70 cleanup = None
70 cleanup = None
71 try:
71 try:
72 if filename:
72 if filename:
73 fh = open(filename, "wb")
73 fh = open(filename, "wb")
74 else:
74 else:
75 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
75 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
76 fh = os.fdopen(fd, "wb")
76 fh = os.fdopen(fd, "wb")
77 cleanup = filename
77 cleanup = filename
78
78
79 header, compressor = bundletypes[bundletype]
79 header, compressor = bundletypes[bundletype]
80 fh.write(header)
80 fh.write(header)
81 z = compressor()
81 z = compressor()
82
82
83 # parse the changegroup data, otherwise we will block
83 # parse the changegroup data, otherwise we will block
84 # in case of sshrepo because we don't know the end of the stream
84 # in case of sshrepo because we don't know the end of the stream
85
85
86 # an empty chunkgroup is the end of the changegroup
86 # an empty chunkgroup is the end of the changegroup
87 # a changegroup has at least 2 chunkgroups (changelog and manifest).
87 # a changegroup has at least 2 chunkgroups (changelog and manifest).
88 # after that, an empty chunkgroup is the end of the changegroup
88 # after that, an empty chunkgroup is the end of the changegroup
89 empty = False
89 empty = False
90 count = 0
90 count = 0
91 while not empty or count <= 2:
91 while not empty or count <= 2:
92 empty = True
92 empty = True
93 count += 1
93 count += 1
94 while 1:
94 while True:
95 chunk = getchunk(cg)
95 chunk = getchunk(cg)
96 if not chunk:
96 if not chunk:
97 break
97 break
98 empty = False
98 empty = False
99 fh.write(z.compress(chunkheader(len(chunk))))
99 fh.write(z.compress(chunkheader(len(chunk))))
100 pos = 0
100 pos = 0
101 while pos < len(chunk):
101 while pos < len(chunk):
102 next = pos + 2**20
102 next = pos + 2**20
103 fh.write(z.compress(chunk[pos:next]))
103 fh.write(z.compress(chunk[pos:next]))
104 pos = next
104 pos = next
105 fh.write(z.compress(closechunk()))
105 fh.write(z.compress(closechunk()))
106 fh.write(z.flush())
106 fh.write(z.flush())
107 cleanup = None
107 cleanup = None
108 return filename
108 return filename
109 finally:
109 finally:
110 if fh is not None:
110 if fh is not None:
111 fh.close()
111 fh.close()
112 if cleanup is not None:
112 if cleanup is not None:
113 os.unlink(cleanup)
113 os.unlink(cleanup)
114
114
115 def decompressor(fh, alg):
115 def decompressor(fh, alg):
116 if alg == 'UN':
116 if alg == 'UN':
117 return fh
117 return fh
118 elif alg == 'GZ':
118 elif alg == 'GZ':
119 def generator(f):
119 def generator(f):
120 zd = zlib.decompressobj()
120 zd = zlib.decompressobj()
121 for chunk in f:
121 for chunk in f:
122 yield zd.decompress(chunk)
122 yield zd.decompress(chunk)
123 elif alg == 'BZ':
123 elif alg == 'BZ':
124 def generator(f):
124 def generator(f):
125 zd = bz2.BZ2Decompressor()
125 zd = bz2.BZ2Decompressor()
126 zd.decompress("BZ")
126 zd.decompress("BZ")
127 for chunk in util.filechunkiter(f, 4096):
127 for chunk in util.filechunkiter(f, 4096):
128 yield zd.decompress(chunk)
128 yield zd.decompress(chunk)
129 else:
129 else:
130 raise util.Abort("unknown bundle compression '%s'" % alg)
130 raise util.Abort("unknown bundle compression '%s'" % alg)
131 return util.chunkbuffer(generator(fh))
131 return util.chunkbuffer(generator(fh))
132
132
133 class unbundle10(object):
133 class unbundle10(object):
134 deltaheader = _BUNDLE10_DELTA_HEADER
134 deltaheader = _BUNDLE10_DELTA_HEADER
135 deltaheadersize = struct.calcsize(deltaheader)
135 deltaheadersize = struct.calcsize(deltaheader)
136 def __init__(self, fh, alg):
136 def __init__(self, fh, alg):
137 self._stream = decompressor(fh, alg)
137 self._stream = decompressor(fh, alg)
138 self._type = alg
138 self._type = alg
139 self.callback = None
139 self.callback = None
140 def compressed(self):
140 def compressed(self):
141 return self._type != 'UN'
141 return self._type != 'UN'
142 def read(self, l):
142 def read(self, l):
143 return self._stream.read(l)
143 return self._stream.read(l)
144 def seek(self, pos):
144 def seek(self, pos):
145 return self._stream.seek(pos)
145 return self._stream.seek(pos)
146 def tell(self):
146 def tell(self):
147 return self._stream.tell()
147 return self._stream.tell()
148 def close(self):
148 def close(self):
149 return self._stream.close()
149 return self._stream.close()
150
150
151 def chunklength(self):
151 def chunklength(self):
152 d = readexactly(self._stream, 4)
152 d = readexactly(self._stream, 4)
153 l = struct.unpack(">l", d)[0]
153 l = struct.unpack(">l", d)[0]
154 if l <= 4:
154 if l <= 4:
155 if l:
155 if l:
156 raise util.Abort(_("invalid chunk length %d") % l)
156 raise util.Abort(_("invalid chunk length %d") % l)
157 return 0
157 return 0
158 if self.callback:
158 if self.callback:
159 self.callback()
159 self.callback()
160 return l - 4
160 return l - 4
161
161
162 def changelogheader(self):
162 def changelogheader(self):
163 """v10 does not have a changelog header chunk"""
163 """v10 does not have a changelog header chunk"""
164 return {}
164 return {}
165
165
166 def manifestheader(self):
166 def manifestheader(self):
167 """v10 does not have a manifest header chunk"""
167 """v10 does not have a manifest header chunk"""
168 return {}
168 return {}
169
169
170 def filelogheader(self):
170 def filelogheader(self):
171 """return the header of the filelogs chunk, v10 only has the filename"""
171 """return the header of the filelogs chunk, v10 only has the filename"""
172 l = self.chunklength()
172 l = self.chunklength()
173 if not l:
173 if not l:
174 return {}
174 return {}
175 fname = readexactly(self._stream, l)
175 fname = readexactly(self._stream, l)
176 return dict(filename=fname)
176 return dict(filename=fname)
177
177
178 def _deltaheader(self, headertuple, prevnode):
178 def _deltaheader(self, headertuple, prevnode):
179 node, p1, p2, cs = headertuple
179 node, p1, p2, cs = headertuple
180 if prevnode is None:
180 if prevnode is None:
181 deltabase = p1
181 deltabase = p1
182 else:
182 else:
183 deltabase = prevnode
183 deltabase = prevnode
184 return node, p1, p2, deltabase, cs
184 return node, p1, p2, deltabase, cs
185
185
186 def deltachunk(self, prevnode):
186 def deltachunk(self, prevnode):
187 l = self.chunklength()
187 l = self.chunklength()
188 if not l:
188 if not l:
189 return {}
189 return {}
190 headerdata = readexactly(self._stream, self.deltaheadersize)
190 headerdata = readexactly(self._stream, self.deltaheadersize)
191 header = struct.unpack(self.deltaheader, headerdata)
191 header = struct.unpack(self.deltaheader, headerdata)
192 delta = readexactly(self._stream, l - self.deltaheadersize)
192 delta = readexactly(self._stream, l - self.deltaheadersize)
193 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
193 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
194 return dict(node=node, p1=p1, p2=p2, cs=cs,
194 return dict(node=node, p1=p1, p2=p2, cs=cs,
195 deltabase=deltabase, delta=delta)
195 deltabase=deltabase, delta=delta)
196
196
197 class headerlessfixup(object):
197 class headerlessfixup(object):
198 def __init__(self, fh, h):
198 def __init__(self, fh, h):
199 self._h = h
199 self._h = h
200 self._fh = fh
200 self._fh = fh
201 def read(self, n):
201 def read(self, n):
202 if self._h:
202 if self._h:
203 d, self._h = self._h[:n], self._h[n:]
203 d, self._h = self._h[:n], self._h[n:]
204 if len(d) < n:
204 if len(d) < n:
205 d += readexactly(self._fh, n - len(d))
205 d += readexactly(self._fh, n - len(d))
206 return d
206 return d
207 return readexactly(self._fh, n)
207 return readexactly(self._fh, n)
208
208
209 def readbundle(fh, fname):
209 def readbundle(fh, fname):
210 header = readexactly(fh, 6)
210 header = readexactly(fh, 6)
211
211
212 if not fname:
212 if not fname:
213 fname = "stream"
213 fname = "stream"
214 if not header.startswith('HG') and header.startswith('\0'):
214 if not header.startswith('HG') and header.startswith('\0'):
215 fh = headerlessfixup(fh, header)
215 fh = headerlessfixup(fh, header)
216 header = "HG10UN"
216 header = "HG10UN"
217
217
218 magic, version, alg = header[0:2], header[2:4], header[4:6]
218 magic, version, alg = header[0:2], header[2:4], header[4:6]
219
219
220 if magic != 'HG':
220 if magic != 'HG':
221 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
221 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
222 if version != '10':
222 if version != '10':
223 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
223 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
224 return unbundle10(fh, alg)
224 return unbundle10(fh, alg)
225
225
226 class bundle10(object):
226 class bundle10(object):
227 deltaheader = _BUNDLE10_DELTA_HEADER
227 deltaheader = _BUNDLE10_DELTA_HEADER
228 def __init__(self, lookup):
228 def __init__(self, lookup):
229 self._lookup = lookup
229 self._lookup = lookup
230 def close(self):
230 def close(self):
231 return closechunk()
231 return closechunk()
232 def fileheader(self, fname):
232 def fileheader(self, fname):
233 return chunkheader(len(fname)) + fname
233 return chunkheader(len(fname)) + fname
234 def revchunk(self, revlog, rev, prev):
234 def revchunk(self, revlog, rev, prev):
235 node = revlog.node(rev)
235 node = revlog.node(rev)
236 p1, p2 = revlog.parentrevs(rev)
236 p1, p2 = revlog.parentrevs(rev)
237 base = prev
237 base = prev
238
238
239 prefix = ''
239 prefix = ''
240 if base == nullrev:
240 if base == nullrev:
241 delta = revlog.revision(node)
241 delta = revlog.revision(node)
242 prefix = mdiff.trivialdiffheader(len(delta))
242 prefix = mdiff.trivialdiffheader(len(delta))
243 else:
243 else:
244 delta = revlog.revdiff(base, rev)
244 delta = revlog.revdiff(base, rev)
245 linknode = self._lookup(revlog, node)
245 linknode = self._lookup(revlog, node)
246 p1n, p2n = revlog.parents(node)
246 p1n, p2n = revlog.parents(node)
247 basenode = revlog.node(base)
247 basenode = revlog.node(base)
248 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
248 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
249 meta += prefix
249 meta += prefix
250 l = len(meta) + len(delta)
250 l = len(meta) + len(delta)
251 yield chunkheader(l)
251 yield chunkheader(l)
252 yield meta
252 yield meta
253 yield delta
253 yield delta
254 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
254 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
255 # do nothing with basenode, it is implicitly the previous one in HG10
255 # do nothing with basenode, it is implicitly the previous one in HG10
256 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
256 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
@@ -1,5082 +1,5082
1 # commands.py - command processing for mercurial
1 # commands.py - command processing for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import hex, bin, nullid, nullrev, short
8 from node import hex, bin, nullid, nullrev, short
9 from lock import release
9 from lock import release
10 from i18n import _, gettext
10 from i18n import _, gettext
11 import os, re, sys, difflib, time, tempfile, errno
11 import os, re, sys, difflib, time, tempfile, errno
12 import hg, scmutil, util, revlog, extensions, copies, error, bookmarks
12 import hg, scmutil, util, revlog, extensions, copies, error, bookmarks
13 import patch, help, url, encoding, templatekw, discovery
13 import patch, help, url, encoding, templatekw, discovery
14 import archival, changegroup, cmdutil, sshserver, hbisect, hgweb, hgweb.server
14 import archival, changegroup, cmdutil, sshserver, hbisect, hgweb, hgweb.server
15 import merge as mergemod
15 import merge as mergemod
16 import minirst, revset
16 import minirst, revset
17 import dagparser, context, simplemerge
17 import dagparser, context, simplemerge
18 import random, setdiscovery, treediscovery, dagutil
18 import random, setdiscovery, treediscovery, dagutil
19
19
20 table = {}
20 table = {}
21
21
22 command = cmdutil.command(table)
22 command = cmdutil.command(table)
23
23
24 # common command options
24 # common command options
25
25
26 globalopts = [
26 globalopts = [
27 ('R', 'repository', '',
27 ('R', 'repository', '',
28 _('repository root directory or name of overlay bundle file'),
28 _('repository root directory or name of overlay bundle file'),
29 _('REPO')),
29 _('REPO')),
30 ('', 'cwd', '',
30 ('', 'cwd', '',
31 _('change working directory'), _('DIR')),
31 _('change working directory'), _('DIR')),
32 ('y', 'noninteractive', None,
32 ('y', 'noninteractive', None,
33 _('do not prompt, assume \'yes\' for any required answers')),
33 _('do not prompt, assume \'yes\' for any required answers')),
34 ('q', 'quiet', None, _('suppress output')),
34 ('q', 'quiet', None, _('suppress output')),
35 ('v', 'verbose', None, _('enable additional output')),
35 ('v', 'verbose', None, _('enable additional output')),
36 ('', 'config', [],
36 ('', 'config', [],
37 _('set/override config option (use \'section.name=value\')'),
37 _('set/override config option (use \'section.name=value\')'),
38 _('CONFIG')),
38 _('CONFIG')),
39 ('', 'debug', None, _('enable debugging output')),
39 ('', 'debug', None, _('enable debugging output')),
40 ('', 'debugger', None, _('start debugger')),
40 ('', 'debugger', None, _('start debugger')),
41 ('', 'encoding', encoding.encoding, _('set the charset encoding'),
41 ('', 'encoding', encoding.encoding, _('set the charset encoding'),
42 _('ENCODE')),
42 _('ENCODE')),
43 ('', 'encodingmode', encoding.encodingmode,
43 ('', 'encodingmode', encoding.encodingmode,
44 _('set the charset encoding mode'), _('MODE')),
44 _('set the charset encoding mode'), _('MODE')),
45 ('', 'traceback', None, _('always print a traceback on exception')),
45 ('', 'traceback', None, _('always print a traceback on exception')),
46 ('', 'time', None, _('time how long the command takes')),
46 ('', 'time', None, _('time how long the command takes')),
47 ('', 'profile', None, _('print command execution profile')),
47 ('', 'profile', None, _('print command execution profile')),
48 ('', 'version', None, _('output version information and exit')),
48 ('', 'version', None, _('output version information and exit')),
49 ('h', 'help', None, _('display help and exit')),
49 ('h', 'help', None, _('display help and exit')),
50 ]
50 ]
51
51
52 dryrunopts = [('n', 'dry-run', None,
52 dryrunopts = [('n', 'dry-run', None,
53 _('do not perform actions, just print output'))]
53 _('do not perform actions, just print output'))]
54
54
55 remoteopts = [
55 remoteopts = [
56 ('e', 'ssh', '',
56 ('e', 'ssh', '',
57 _('specify ssh command to use'), _('CMD')),
57 _('specify ssh command to use'), _('CMD')),
58 ('', 'remotecmd', '',
58 ('', 'remotecmd', '',
59 _('specify hg command to run on the remote side'), _('CMD')),
59 _('specify hg command to run on the remote side'), _('CMD')),
60 ('', 'insecure', None,
60 ('', 'insecure', None,
61 _('do not verify server certificate (ignoring web.cacerts config)')),
61 _('do not verify server certificate (ignoring web.cacerts config)')),
62 ]
62 ]
63
63
64 walkopts = [
64 walkopts = [
65 ('I', 'include', [],
65 ('I', 'include', [],
66 _('include names matching the given patterns'), _('PATTERN')),
66 _('include names matching the given patterns'), _('PATTERN')),
67 ('X', 'exclude', [],
67 ('X', 'exclude', [],
68 _('exclude names matching the given patterns'), _('PATTERN')),
68 _('exclude names matching the given patterns'), _('PATTERN')),
69 ]
69 ]
70
70
71 commitopts = [
71 commitopts = [
72 ('m', 'message', '',
72 ('m', 'message', '',
73 _('use text as commit message'), _('TEXT')),
73 _('use text as commit message'), _('TEXT')),
74 ('l', 'logfile', '',
74 ('l', 'logfile', '',
75 _('read commit message from file'), _('FILE')),
75 _('read commit message from file'), _('FILE')),
76 ]
76 ]
77
77
78 commitopts2 = [
78 commitopts2 = [
79 ('d', 'date', '',
79 ('d', 'date', '',
80 _('record the specified date as commit date'), _('DATE')),
80 _('record the specified date as commit date'), _('DATE')),
81 ('u', 'user', '',
81 ('u', 'user', '',
82 _('record the specified user as committer'), _('USER')),
82 _('record the specified user as committer'), _('USER')),
83 ]
83 ]
84
84
85 templateopts = [
85 templateopts = [
86 ('', 'style', '',
86 ('', 'style', '',
87 _('display using template map file'), _('STYLE')),
87 _('display using template map file'), _('STYLE')),
88 ('', 'template', '',
88 ('', 'template', '',
89 _('display with template'), _('TEMPLATE')),
89 _('display with template'), _('TEMPLATE')),
90 ]
90 ]
91
91
92 logopts = [
92 logopts = [
93 ('p', 'patch', None, _('show patch')),
93 ('p', 'patch', None, _('show patch')),
94 ('g', 'git', None, _('use git extended diff format')),
94 ('g', 'git', None, _('use git extended diff format')),
95 ('l', 'limit', '',
95 ('l', 'limit', '',
96 _('limit number of changes displayed'), _('NUM')),
96 _('limit number of changes displayed'), _('NUM')),
97 ('M', 'no-merges', None, _('do not show merges')),
97 ('M', 'no-merges', None, _('do not show merges')),
98 ('', 'stat', None, _('output diffstat-style summary of changes')),
98 ('', 'stat', None, _('output diffstat-style summary of changes')),
99 ] + templateopts
99 ] + templateopts
100
100
101 diffopts = [
101 diffopts = [
102 ('a', 'text', None, _('treat all files as text')),
102 ('a', 'text', None, _('treat all files as text')),
103 ('g', 'git', None, _('use git extended diff format')),
103 ('g', 'git', None, _('use git extended diff format')),
104 ('', 'nodates', None, _('omit dates from diff headers'))
104 ('', 'nodates', None, _('omit dates from diff headers'))
105 ]
105 ]
106
106
107 diffopts2 = [
107 diffopts2 = [
108 ('p', 'show-function', None, _('show which function each change is in')),
108 ('p', 'show-function', None, _('show which function each change is in')),
109 ('', 'reverse', None, _('produce a diff that undoes the changes')),
109 ('', 'reverse', None, _('produce a diff that undoes the changes')),
110 ('w', 'ignore-all-space', None,
110 ('w', 'ignore-all-space', None,
111 _('ignore white space when comparing lines')),
111 _('ignore white space when comparing lines')),
112 ('b', 'ignore-space-change', None,
112 ('b', 'ignore-space-change', None,
113 _('ignore changes in the amount of white space')),
113 _('ignore changes in the amount of white space')),
114 ('B', 'ignore-blank-lines', None,
114 ('B', 'ignore-blank-lines', None,
115 _('ignore changes whose lines are all blank')),
115 _('ignore changes whose lines are all blank')),
116 ('U', 'unified', '',
116 ('U', 'unified', '',
117 _('number of lines of context to show'), _('NUM')),
117 _('number of lines of context to show'), _('NUM')),
118 ('', 'stat', None, _('output diffstat-style summary of changes')),
118 ('', 'stat', None, _('output diffstat-style summary of changes')),
119 ]
119 ]
120
120
121 similarityopts = [
121 similarityopts = [
122 ('s', 'similarity', '',
122 ('s', 'similarity', '',
123 _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
123 _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
124 ]
124 ]
125
125
126 subrepoopts = [
126 subrepoopts = [
127 ('S', 'subrepos', None,
127 ('S', 'subrepos', None,
128 _('recurse into subrepositories'))
128 _('recurse into subrepositories'))
129 ]
129 ]
130
130
131 # Commands start here, listed alphabetically
131 # Commands start here, listed alphabetically
132
132
133 @command('^add',
133 @command('^add',
134 walkopts + subrepoopts + dryrunopts,
134 walkopts + subrepoopts + dryrunopts,
135 _('[OPTION]... [FILE]...'))
135 _('[OPTION]... [FILE]...'))
136 def add(ui, repo, *pats, **opts):
136 def add(ui, repo, *pats, **opts):
137 """add the specified files on the next commit
137 """add the specified files on the next commit
138
138
139 Schedule files to be version controlled and added to the
139 Schedule files to be version controlled and added to the
140 repository.
140 repository.
141
141
142 The files will be added to the repository at the next commit. To
142 The files will be added to the repository at the next commit. To
143 undo an add before that, see :hg:`forget`.
143 undo an add before that, see :hg:`forget`.
144
144
145 If no names are given, add all files to the repository.
145 If no names are given, add all files to the repository.
146
146
147 .. container:: verbose
147 .. container:: verbose
148
148
149 An example showing how new (unknown) files are added
149 An example showing how new (unknown) files are added
150 automatically by :hg:`add`::
150 automatically by :hg:`add`::
151
151
152 $ ls
152 $ ls
153 foo.c
153 foo.c
154 $ hg status
154 $ hg status
155 ? foo.c
155 ? foo.c
156 $ hg add
156 $ hg add
157 adding foo.c
157 adding foo.c
158 $ hg status
158 $ hg status
159 A foo.c
159 A foo.c
160
160
161 Returns 0 if all files are successfully added.
161 Returns 0 if all files are successfully added.
162 """
162 """
163
163
164 m = scmutil.match(repo, pats, opts)
164 m = scmutil.match(repo, pats, opts)
165 rejected = cmdutil.add(ui, repo, m, opts.get('dry_run'),
165 rejected = cmdutil.add(ui, repo, m, opts.get('dry_run'),
166 opts.get('subrepos'), prefix="")
166 opts.get('subrepos'), prefix="")
167 return rejected and 1 or 0
167 return rejected and 1 or 0
168
168
169 @command('addremove',
169 @command('addremove',
170 similarityopts + walkopts + dryrunopts,
170 similarityopts + walkopts + dryrunopts,
171 _('[OPTION]... [FILE]...'))
171 _('[OPTION]... [FILE]...'))
172 def addremove(ui, repo, *pats, **opts):
172 def addremove(ui, repo, *pats, **opts):
173 """add all new files, delete all missing files
173 """add all new files, delete all missing files
174
174
175 Add all new files and remove all missing files from the
175 Add all new files and remove all missing files from the
176 repository.
176 repository.
177
177
178 New files are ignored if they match any of the patterns in
178 New files are ignored if they match any of the patterns in
179 ``.hgignore``. As with add, these changes take effect at the next
179 ``.hgignore``. As with add, these changes take effect at the next
180 commit.
180 commit.
181
181
182 Use the -s/--similarity option to detect renamed files. With a
182 Use the -s/--similarity option to detect renamed files. With a
183 parameter greater than 0, this compares every removed file with
183 parameter greater than 0, this compares every removed file with
184 every added file and records those similar enough as renames. This
184 every added file and records those similar enough as renames. This
185 option takes a percentage between 0 (disabled) and 100 (files must
185 option takes a percentage between 0 (disabled) and 100 (files must
186 be identical) as its parameter. Detecting renamed files this way
186 be identical) as its parameter. Detecting renamed files this way
187 can be expensive. After using this option, :hg:`status -C` can be
187 can be expensive. After using this option, :hg:`status -C` can be
188 used to check which files were identified as moved or renamed.
188 used to check which files were identified as moved or renamed.
189
189
190 Returns 0 if all files are successfully added.
190 Returns 0 if all files are successfully added.
191 """
191 """
192 try:
192 try:
193 sim = float(opts.get('similarity') or 100)
193 sim = float(opts.get('similarity') or 100)
194 except ValueError:
194 except ValueError:
195 raise util.Abort(_('similarity must be a number'))
195 raise util.Abort(_('similarity must be a number'))
196 if sim < 0 or sim > 100:
196 if sim < 0 or sim > 100:
197 raise util.Abort(_('similarity must be between 0 and 100'))
197 raise util.Abort(_('similarity must be between 0 and 100'))
198 return scmutil.addremove(repo, pats, opts, similarity=sim / 100.0)
198 return scmutil.addremove(repo, pats, opts, similarity=sim / 100.0)
199
199
200 @command('^annotate|blame',
200 @command('^annotate|blame',
201 [('r', 'rev', '', _('annotate the specified revision'), _('REV')),
201 [('r', 'rev', '', _('annotate the specified revision'), _('REV')),
202 ('', 'follow', None,
202 ('', 'follow', None,
203 _('follow copies/renames and list the filename (DEPRECATED)')),
203 _('follow copies/renames and list the filename (DEPRECATED)')),
204 ('', 'no-follow', None, _("don't follow copies and renames")),
204 ('', 'no-follow', None, _("don't follow copies and renames")),
205 ('a', 'text', None, _('treat all files as text')),
205 ('a', 'text', None, _('treat all files as text')),
206 ('u', 'user', None, _('list the author (long with -v)')),
206 ('u', 'user', None, _('list the author (long with -v)')),
207 ('f', 'file', None, _('list the filename')),
207 ('f', 'file', None, _('list the filename')),
208 ('d', 'date', None, _('list the date (short with -q)')),
208 ('d', 'date', None, _('list the date (short with -q)')),
209 ('n', 'number', None, _('list the revision number (default)')),
209 ('n', 'number', None, _('list the revision number (default)')),
210 ('c', 'changeset', None, _('list the changeset')),
210 ('c', 'changeset', None, _('list the changeset')),
211 ('l', 'line-number', None, _('show line number at the first appearance'))
211 ('l', 'line-number', None, _('show line number at the first appearance'))
212 ] + walkopts,
212 ] + walkopts,
213 _('[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...'))
213 _('[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...'))
214 def annotate(ui, repo, *pats, **opts):
214 def annotate(ui, repo, *pats, **opts):
215 """show changeset information by line for each file
215 """show changeset information by line for each file
216
216
217 List changes in files, showing the revision id responsible for
217 List changes in files, showing the revision id responsible for
218 each line
218 each line
219
219
220 This command is useful for discovering when a change was made and
220 This command is useful for discovering when a change was made and
221 by whom.
221 by whom.
222
222
223 Without the -a/--text option, annotate will avoid processing files
223 Without the -a/--text option, annotate will avoid processing files
224 it detects as binary. With -a, annotate will annotate the file
224 it detects as binary. With -a, annotate will annotate the file
225 anyway, although the results will probably be neither useful
225 anyway, although the results will probably be neither useful
226 nor desirable.
226 nor desirable.
227
227
228 Returns 0 on success.
228 Returns 0 on success.
229 """
229 """
230 if opts.get('follow'):
230 if opts.get('follow'):
231 # --follow is deprecated and now just an alias for -f/--file
231 # --follow is deprecated and now just an alias for -f/--file
232 # to mimic the behavior of Mercurial before version 1.5
232 # to mimic the behavior of Mercurial before version 1.5
233 opts['file'] = True
233 opts['file'] = True
234
234
235 datefunc = ui.quiet and util.shortdate or util.datestr
235 datefunc = ui.quiet and util.shortdate or util.datestr
236 getdate = util.cachefunc(lambda x: datefunc(x[0].date()))
236 getdate = util.cachefunc(lambda x: datefunc(x[0].date()))
237
237
238 if not pats:
238 if not pats:
239 raise util.Abort(_('at least one filename or pattern is required'))
239 raise util.Abort(_('at least one filename or pattern is required'))
240
240
241 opmap = [('user', ' ', lambda x: ui.shortuser(x[0].user())),
241 opmap = [('user', ' ', lambda x: ui.shortuser(x[0].user())),
242 ('number', ' ', lambda x: str(x[0].rev())),
242 ('number', ' ', lambda x: str(x[0].rev())),
243 ('changeset', ' ', lambda x: short(x[0].node())),
243 ('changeset', ' ', lambda x: short(x[0].node())),
244 ('date', ' ', getdate),
244 ('date', ' ', getdate),
245 ('file', ' ', lambda x: x[0].path()),
245 ('file', ' ', lambda x: x[0].path()),
246 ('line_number', ':', lambda x: str(x[1])),
246 ('line_number', ':', lambda x: str(x[1])),
247 ]
247 ]
248
248
249 if (not opts.get('user') and not opts.get('changeset')
249 if (not opts.get('user') and not opts.get('changeset')
250 and not opts.get('date') and not opts.get('file')):
250 and not opts.get('date') and not opts.get('file')):
251 opts['number'] = True
251 opts['number'] = True
252
252
253 linenumber = opts.get('line_number') is not None
253 linenumber = opts.get('line_number') is not None
254 if linenumber and (not opts.get('changeset')) and (not opts.get('number')):
254 if linenumber and (not opts.get('changeset')) and (not opts.get('number')):
255 raise util.Abort(_('at least one of -n/-c is required for -l'))
255 raise util.Abort(_('at least one of -n/-c is required for -l'))
256
256
257 funcmap = [(func, sep) for op, sep, func in opmap if opts.get(op)]
257 funcmap = [(func, sep) for op, sep, func in opmap if opts.get(op)]
258 funcmap[0] = (funcmap[0][0], '') # no separator in front of first column
258 funcmap[0] = (funcmap[0][0], '') # no separator in front of first column
259
259
260 def bad(x, y):
260 def bad(x, y):
261 raise util.Abort("%s: %s" % (x, y))
261 raise util.Abort("%s: %s" % (x, y))
262
262
263 ctx = scmutil.revsingle(repo, opts.get('rev'))
263 ctx = scmutil.revsingle(repo, opts.get('rev'))
264 m = scmutil.match(repo, pats, opts)
264 m = scmutil.match(repo, pats, opts)
265 m.bad = bad
265 m.bad = bad
266 follow = not opts.get('no_follow')
266 follow = not opts.get('no_follow')
267 for abs in ctx.walk(m):
267 for abs in ctx.walk(m):
268 fctx = ctx[abs]
268 fctx = ctx[abs]
269 if not opts.get('text') and util.binary(fctx.data()):
269 if not opts.get('text') and util.binary(fctx.data()):
270 ui.write(_("%s: binary file\n") % ((pats and m.rel(abs)) or abs))
270 ui.write(_("%s: binary file\n") % ((pats and m.rel(abs)) or abs))
271 continue
271 continue
272
272
273 lines = fctx.annotate(follow=follow, linenumber=linenumber)
273 lines = fctx.annotate(follow=follow, linenumber=linenumber)
274 pieces = []
274 pieces = []
275
275
276 for f, sep in funcmap:
276 for f, sep in funcmap:
277 l = [f(n) for n, dummy in lines]
277 l = [f(n) for n, dummy in lines]
278 if l:
278 if l:
279 sized = [(x, encoding.colwidth(x)) for x in l]
279 sized = [(x, encoding.colwidth(x)) for x in l]
280 ml = max([w for x, w in sized])
280 ml = max([w for x, w in sized])
281 pieces.append(["%s%s%s" % (sep, ' ' * (ml - w), x)
281 pieces.append(["%s%s%s" % (sep, ' ' * (ml - w), x)
282 for x, w in sized])
282 for x, w in sized])
283
283
284 if pieces:
284 if pieces:
285 for p, l in zip(zip(*pieces), lines):
285 for p, l in zip(zip(*pieces), lines):
286 ui.write("%s: %s" % ("".join(p), l[1]))
286 ui.write("%s: %s" % ("".join(p), l[1]))
287
287
288 @command('archive',
288 @command('archive',
289 [('', 'no-decode', None, _('do not pass files through decoders')),
289 [('', 'no-decode', None, _('do not pass files through decoders')),
290 ('p', 'prefix', '', _('directory prefix for files in archive'),
290 ('p', 'prefix', '', _('directory prefix for files in archive'),
291 _('PREFIX')),
291 _('PREFIX')),
292 ('r', 'rev', '', _('revision to distribute'), _('REV')),
292 ('r', 'rev', '', _('revision to distribute'), _('REV')),
293 ('t', 'type', '', _('type of distribution to create'), _('TYPE')),
293 ('t', 'type', '', _('type of distribution to create'), _('TYPE')),
294 ] + subrepoopts + walkopts,
294 ] + subrepoopts + walkopts,
295 _('[OPTION]... DEST'))
295 _('[OPTION]... DEST'))
296 def archive(ui, repo, dest, **opts):
296 def archive(ui, repo, dest, **opts):
297 '''create an unversioned archive of a repository revision
297 '''create an unversioned archive of a repository revision
298
298
299 By default, the revision used is the parent of the working
299 By default, the revision used is the parent of the working
300 directory; use -r/--rev to specify a different revision.
300 directory; use -r/--rev to specify a different revision.
301
301
302 The archive type is automatically detected based on file
302 The archive type is automatically detected based on file
303 extension (or override using -t/--type).
303 extension (or override using -t/--type).
304
304
305 Valid types are:
305 Valid types are:
306
306
307 :``files``: a directory full of files (default)
307 :``files``: a directory full of files (default)
308 :``tar``: tar archive, uncompressed
308 :``tar``: tar archive, uncompressed
309 :``tbz2``: tar archive, compressed using bzip2
309 :``tbz2``: tar archive, compressed using bzip2
310 :``tgz``: tar archive, compressed using gzip
310 :``tgz``: tar archive, compressed using gzip
311 :``uzip``: zip archive, uncompressed
311 :``uzip``: zip archive, uncompressed
312 :``zip``: zip archive, compressed using deflate
312 :``zip``: zip archive, compressed using deflate
313
313
314 The exact name of the destination archive or directory is given
314 The exact name of the destination archive or directory is given
315 using a format string; see :hg:`help export` for details.
315 using a format string; see :hg:`help export` for details.
316
316
317 Each member added to an archive file has a directory prefix
317 Each member added to an archive file has a directory prefix
318 prepended. Use -p/--prefix to specify a format string for the
318 prepended. Use -p/--prefix to specify a format string for the
319 prefix. The default is the basename of the archive, with suffixes
319 prefix. The default is the basename of the archive, with suffixes
320 removed.
320 removed.
321
321
322 Returns 0 on success.
322 Returns 0 on success.
323 '''
323 '''
324
324
325 ctx = scmutil.revsingle(repo, opts.get('rev'))
325 ctx = scmutil.revsingle(repo, opts.get('rev'))
326 if not ctx:
326 if not ctx:
327 raise util.Abort(_('no working directory: please specify a revision'))
327 raise util.Abort(_('no working directory: please specify a revision'))
328 node = ctx.node()
328 node = ctx.node()
329 dest = cmdutil.makefilename(repo, dest, node)
329 dest = cmdutil.makefilename(repo, dest, node)
330 if os.path.realpath(dest) == repo.root:
330 if os.path.realpath(dest) == repo.root:
331 raise util.Abort(_('repository root cannot be destination'))
331 raise util.Abort(_('repository root cannot be destination'))
332
332
333 kind = opts.get('type') or archival.guesskind(dest) or 'files'
333 kind = opts.get('type') or archival.guesskind(dest) or 'files'
334 prefix = opts.get('prefix')
334 prefix = opts.get('prefix')
335
335
336 if dest == '-':
336 if dest == '-':
337 if kind == 'files':
337 if kind == 'files':
338 raise util.Abort(_('cannot archive plain files to stdout'))
338 raise util.Abort(_('cannot archive plain files to stdout'))
339 dest = sys.stdout
339 dest = sys.stdout
340 if not prefix:
340 if not prefix:
341 prefix = os.path.basename(repo.root) + '-%h'
341 prefix = os.path.basename(repo.root) + '-%h'
342
342
343 prefix = cmdutil.makefilename(repo, prefix, node)
343 prefix = cmdutil.makefilename(repo, prefix, node)
344 matchfn = scmutil.match(repo, [], opts)
344 matchfn = scmutil.match(repo, [], opts)
345 archival.archive(repo, dest, node, kind, not opts.get('no_decode'),
345 archival.archive(repo, dest, node, kind, not opts.get('no_decode'),
346 matchfn, prefix, subrepos=opts.get('subrepos'))
346 matchfn, prefix, subrepos=opts.get('subrepos'))
347
347
348 @command('backout',
348 @command('backout',
349 [('', 'merge', None, _('merge with old dirstate parent after backout')),
349 [('', 'merge', None, _('merge with old dirstate parent after backout')),
350 ('', 'parent', '', _('parent to choose when backing out merge'), _('REV')),
350 ('', 'parent', '', _('parent to choose when backing out merge'), _('REV')),
351 ('t', 'tool', '', _('specify merge tool')),
351 ('t', 'tool', '', _('specify merge tool')),
352 ('r', 'rev', '', _('revision to backout'), _('REV')),
352 ('r', 'rev', '', _('revision to backout'), _('REV')),
353 ] + walkopts + commitopts + commitopts2,
353 ] + walkopts + commitopts + commitopts2,
354 _('[OPTION]... [-r] REV'))
354 _('[OPTION]... [-r] REV'))
355 def backout(ui, repo, node=None, rev=None, **opts):
355 def backout(ui, repo, node=None, rev=None, **opts):
356 '''reverse effect of earlier changeset
356 '''reverse effect of earlier changeset
357
357
358 Prepare a new changeset with the effect of REV undone in the
358 Prepare a new changeset with the effect of REV undone in the
359 current working directory.
359 current working directory.
360
360
361 If REV is the parent of the working directory, then this new changeset
361 If REV is the parent of the working directory, then this new changeset
362 is committed automatically. Otherwise, hg needs to merge the
362 is committed automatically. Otherwise, hg needs to merge the
363 changes and the merged result is left uncommitted.
363 changes and the merged result is left uncommitted.
364
364
365 By default, the pending changeset will have one parent,
365 By default, the pending changeset will have one parent,
366 maintaining a linear history. With --merge, the pending changeset
366 maintaining a linear history. With --merge, the pending changeset
367 will instead have two parents: the old parent of the working
367 will instead have two parents: the old parent of the working
368 directory and a new child of REV that simply undoes REV.
368 directory and a new child of REV that simply undoes REV.
369
369
370 Before version 1.7, the behavior without --merge was equivalent to
370 Before version 1.7, the behavior without --merge was equivalent to
371 specifying --merge followed by :hg:`update --clean .` to cancel
371 specifying --merge followed by :hg:`update --clean .` to cancel
372 the merge and leave the child of REV as a head to be merged
372 the merge and leave the child of REV as a head to be merged
373 separately.
373 separately.
374
374
375 See :hg:`help dates` for a list of formats valid for -d/--date.
375 See :hg:`help dates` for a list of formats valid for -d/--date.
376
376
377 Returns 0 on success.
377 Returns 0 on success.
378 '''
378 '''
379 if rev and node:
379 if rev and node:
380 raise util.Abort(_("please specify just one revision"))
380 raise util.Abort(_("please specify just one revision"))
381
381
382 if not rev:
382 if not rev:
383 rev = node
383 rev = node
384
384
385 if not rev:
385 if not rev:
386 raise util.Abort(_("please specify a revision to backout"))
386 raise util.Abort(_("please specify a revision to backout"))
387
387
388 date = opts.get('date')
388 date = opts.get('date')
389 if date:
389 if date:
390 opts['date'] = util.parsedate(date)
390 opts['date'] = util.parsedate(date)
391
391
392 cmdutil.bailifchanged(repo)
392 cmdutil.bailifchanged(repo)
393 node = scmutil.revsingle(repo, rev).node()
393 node = scmutil.revsingle(repo, rev).node()
394
394
395 op1, op2 = repo.dirstate.parents()
395 op1, op2 = repo.dirstate.parents()
396 a = repo.changelog.ancestor(op1, node)
396 a = repo.changelog.ancestor(op1, node)
397 if a != node:
397 if a != node:
398 raise util.Abort(_('cannot backout change on a different branch'))
398 raise util.Abort(_('cannot backout change on a different branch'))
399
399
400 p1, p2 = repo.changelog.parents(node)
400 p1, p2 = repo.changelog.parents(node)
401 if p1 == nullid:
401 if p1 == nullid:
402 raise util.Abort(_('cannot backout a change with no parents'))
402 raise util.Abort(_('cannot backout a change with no parents'))
403 if p2 != nullid:
403 if p2 != nullid:
404 if not opts.get('parent'):
404 if not opts.get('parent'):
405 raise util.Abort(_('cannot backout a merge changeset without '
405 raise util.Abort(_('cannot backout a merge changeset without '
406 '--parent'))
406 '--parent'))
407 p = repo.lookup(opts['parent'])
407 p = repo.lookup(opts['parent'])
408 if p not in (p1, p2):
408 if p not in (p1, p2):
409 raise util.Abort(_('%s is not a parent of %s') %
409 raise util.Abort(_('%s is not a parent of %s') %
410 (short(p), short(node)))
410 (short(p), short(node)))
411 parent = p
411 parent = p
412 else:
412 else:
413 if opts.get('parent'):
413 if opts.get('parent'):
414 raise util.Abort(_('cannot use --parent on non-merge changeset'))
414 raise util.Abort(_('cannot use --parent on non-merge changeset'))
415 parent = p1
415 parent = p1
416
416
417 # the backout should appear on the same branch
417 # the backout should appear on the same branch
418 branch = repo.dirstate.branch()
418 branch = repo.dirstate.branch()
419 hg.clean(repo, node, show_stats=False)
419 hg.clean(repo, node, show_stats=False)
420 repo.dirstate.setbranch(branch)
420 repo.dirstate.setbranch(branch)
421 revert_opts = opts.copy()
421 revert_opts = opts.copy()
422 revert_opts['date'] = None
422 revert_opts['date'] = None
423 revert_opts['all'] = True
423 revert_opts['all'] = True
424 revert_opts['rev'] = hex(parent)
424 revert_opts['rev'] = hex(parent)
425 revert_opts['no_backup'] = None
425 revert_opts['no_backup'] = None
426 revert(ui, repo, **revert_opts)
426 revert(ui, repo, **revert_opts)
427 if not opts.get('merge') and op1 != node:
427 if not opts.get('merge') and op1 != node:
428 try:
428 try:
429 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
429 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
430 return hg.update(repo, op1)
430 return hg.update(repo, op1)
431 finally:
431 finally:
432 ui.setconfig('ui', 'forcemerge', '')
432 ui.setconfig('ui', 'forcemerge', '')
433
433
434 commit_opts = opts.copy()
434 commit_opts = opts.copy()
435 commit_opts['addremove'] = False
435 commit_opts['addremove'] = False
436 if not commit_opts['message'] and not commit_opts['logfile']:
436 if not commit_opts['message'] and not commit_opts['logfile']:
437 # we don't translate commit messages
437 # we don't translate commit messages
438 commit_opts['message'] = "Backed out changeset %s" % short(node)
438 commit_opts['message'] = "Backed out changeset %s" % short(node)
439 commit_opts['force_editor'] = True
439 commit_opts['force_editor'] = True
440 commit(ui, repo, **commit_opts)
440 commit(ui, repo, **commit_opts)
441 def nice(node):
441 def nice(node):
442 return '%d:%s' % (repo.changelog.rev(node), short(node))
442 return '%d:%s' % (repo.changelog.rev(node), short(node))
443 ui.status(_('changeset %s backs out changeset %s\n') %
443 ui.status(_('changeset %s backs out changeset %s\n') %
444 (nice(repo.changelog.tip()), nice(node)))
444 (nice(repo.changelog.tip()), nice(node)))
445 if opts.get('merge') and op1 != node:
445 if opts.get('merge') and op1 != node:
446 hg.clean(repo, op1, show_stats=False)
446 hg.clean(repo, op1, show_stats=False)
447 ui.status(_('merging with changeset %s\n')
447 ui.status(_('merging with changeset %s\n')
448 % nice(repo.changelog.tip()))
448 % nice(repo.changelog.tip()))
449 try:
449 try:
450 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
450 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
451 return hg.merge(repo, hex(repo.changelog.tip()))
451 return hg.merge(repo, hex(repo.changelog.tip()))
452 finally:
452 finally:
453 ui.setconfig('ui', 'forcemerge', '')
453 ui.setconfig('ui', 'forcemerge', '')
454 return 0
454 return 0
455
455
456 @command('bisect',
456 @command('bisect',
457 [('r', 'reset', False, _('reset bisect state')),
457 [('r', 'reset', False, _('reset bisect state')),
458 ('g', 'good', False, _('mark changeset good')),
458 ('g', 'good', False, _('mark changeset good')),
459 ('b', 'bad', False, _('mark changeset bad')),
459 ('b', 'bad', False, _('mark changeset bad')),
460 ('s', 'skip', False, _('skip testing changeset')),
460 ('s', 'skip', False, _('skip testing changeset')),
461 ('e', 'extend', False, _('extend the bisect range')),
461 ('e', 'extend', False, _('extend the bisect range')),
462 ('c', 'command', '', _('use command to check changeset state'), _('CMD')),
462 ('c', 'command', '', _('use command to check changeset state'), _('CMD')),
463 ('U', 'noupdate', False, _('do not update to target'))],
463 ('U', 'noupdate', False, _('do not update to target'))],
464 _("[-gbsr] [-U] [-c CMD] [REV]"))
464 _("[-gbsr] [-U] [-c CMD] [REV]"))
465 def bisect(ui, repo, rev=None, extra=None, command=None,
465 def bisect(ui, repo, rev=None, extra=None, command=None,
466 reset=None, good=None, bad=None, skip=None, extend=None,
466 reset=None, good=None, bad=None, skip=None, extend=None,
467 noupdate=None):
467 noupdate=None):
468 """subdivision search of changesets
468 """subdivision search of changesets
469
469
470 This command helps to find changesets which introduce problems. To
470 This command helps to find changesets which introduce problems. To
471 use, mark the earliest changeset you know exhibits the problem as
471 use, mark the earliest changeset you know exhibits the problem as
472 bad, then mark the latest changeset which is free from the problem
472 bad, then mark the latest changeset which is free from the problem
473 as good. Bisect will update your working directory to a revision
473 as good. Bisect will update your working directory to a revision
474 for testing (unless the -U/--noupdate option is specified). Once
474 for testing (unless the -U/--noupdate option is specified). Once
475 you have performed tests, mark the working directory as good or
475 you have performed tests, mark the working directory as good or
476 bad, and bisect will either update to another candidate changeset
476 bad, and bisect will either update to another candidate changeset
477 or announce that it has found the bad revision.
477 or announce that it has found the bad revision.
478
478
479 As a shortcut, you can also use the revision argument to mark a
479 As a shortcut, you can also use the revision argument to mark a
480 revision as good or bad without checking it out first.
480 revision as good or bad without checking it out first.
481
481
482 If you supply a command, it will be used for automatic bisection.
482 If you supply a command, it will be used for automatic bisection.
483 Its exit status will be used to mark revisions as good or bad:
483 Its exit status will be used to mark revisions as good or bad:
484 status 0 means good, 125 means to skip the revision, 127
484 status 0 means good, 125 means to skip the revision, 127
485 (command not found) will abort the bisection, and any other
485 (command not found) will abort the bisection, and any other
486 non-zero exit status means the revision is bad.
486 non-zero exit status means the revision is bad.
487
487
488 Returns 0 on success.
488 Returns 0 on success.
489 """
489 """
490 def extendbisectrange(nodes, good):
490 def extendbisectrange(nodes, good):
491 # bisect is incomplete when it ends on a merge node and
491 # bisect is incomplete when it ends on a merge node and
492 # one of the parent was not checked.
492 # one of the parent was not checked.
493 parents = repo[nodes[0]].parents()
493 parents = repo[nodes[0]].parents()
494 if len(parents) > 1:
494 if len(parents) > 1:
495 side = good and state['bad'] or state['good']
495 side = good and state['bad'] or state['good']
496 num = len(set(i.node() for i in parents) & set(side))
496 num = len(set(i.node() for i in parents) & set(side))
497 if num == 1:
497 if num == 1:
498 return parents[0].ancestor(parents[1])
498 return parents[0].ancestor(parents[1])
499 return None
499 return None
500
500
501 def print_result(nodes, good):
501 def print_result(nodes, good):
502 displayer = cmdutil.show_changeset(ui, repo, {})
502 displayer = cmdutil.show_changeset(ui, repo, {})
503 if len(nodes) == 1:
503 if len(nodes) == 1:
504 # narrowed it down to a single revision
504 # narrowed it down to a single revision
505 if good:
505 if good:
506 ui.write(_("The first good revision is:\n"))
506 ui.write(_("The first good revision is:\n"))
507 else:
507 else:
508 ui.write(_("The first bad revision is:\n"))
508 ui.write(_("The first bad revision is:\n"))
509 displayer.show(repo[nodes[0]])
509 displayer.show(repo[nodes[0]])
510 extendnode = extendbisectrange(nodes, good)
510 extendnode = extendbisectrange(nodes, good)
511 if extendnode is not None:
511 if extendnode is not None:
512 ui.write(_('Not all ancestors of this changeset have been'
512 ui.write(_('Not all ancestors of this changeset have been'
513 ' checked.\nUse bisect --extend to continue the '
513 ' checked.\nUse bisect --extend to continue the '
514 'bisection from\nthe common ancestor, %s.\n')
514 'bisection from\nthe common ancestor, %s.\n')
515 % extendnode)
515 % extendnode)
516 else:
516 else:
517 # multiple possible revisions
517 # multiple possible revisions
518 if good:
518 if good:
519 ui.write(_("Due to skipped revisions, the first "
519 ui.write(_("Due to skipped revisions, the first "
520 "good revision could be any of:\n"))
520 "good revision could be any of:\n"))
521 else:
521 else:
522 ui.write(_("Due to skipped revisions, the first "
522 ui.write(_("Due to skipped revisions, the first "
523 "bad revision could be any of:\n"))
523 "bad revision could be any of:\n"))
524 for n in nodes:
524 for n in nodes:
525 displayer.show(repo[n])
525 displayer.show(repo[n])
526 displayer.close()
526 displayer.close()
527
527
528 def check_state(state, interactive=True):
528 def check_state(state, interactive=True):
529 if not state['good'] or not state['bad']:
529 if not state['good'] or not state['bad']:
530 if (good or bad or skip or reset) and interactive:
530 if (good or bad or skip or reset) and interactive:
531 return
531 return
532 if not state['good']:
532 if not state['good']:
533 raise util.Abort(_('cannot bisect (no known good revisions)'))
533 raise util.Abort(_('cannot bisect (no known good revisions)'))
534 else:
534 else:
535 raise util.Abort(_('cannot bisect (no known bad revisions)'))
535 raise util.Abort(_('cannot bisect (no known bad revisions)'))
536 return True
536 return True
537
537
538 # backward compatibility
538 # backward compatibility
539 if rev in "good bad reset init".split():
539 if rev in "good bad reset init".split():
540 ui.warn(_("(use of 'hg bisect <cmd>' is deprecated)\n"))
540 ui.warn(_("(use of 'hg bisect <cmd>' is deprecated)\n"))
541 cmd, rev, extra = rev, extra, None
541 cmd, rev, extra = rev, extra, None
542 if cmd == "good":
542 if cmd == "good":
543 good = True
543 good = True
544 elif cmd == "bad":
544 elif cmd == "bad":
545 bad = True
545 bad = True
546 else:
546 else:
547 reset = True
547 reset = True
548 elif extra or good + bad + skip + reset + extend + bool(command) > 1:
548 elif extra or good + bad + skip + reset + extend + bool(command) > 1:
549 raise util.Abort(_('incompatible arguments'))
549 raise util.Abort(_('incompatible arguments'))
550
550
551 if reset:
551 if reset:
552 p = repo.join("bisect.state")
552 p = repo.join("bisect.state")
553 if os.path.exists(p):
553 if os.path.exists(p):
554 os.unlink(p)
554 os.unlink(p)
555 return
555 return
556
556
557 state = hbisect.load_state(repo)
557 state = hbisect.load_state(repo)
558
558
559 if command:
559 if command:
560 changesets = 1
560 changesets = 1
561 try:
561 try:
562 while changesets:
562 while changesets:
563 # update state
563 # update state
564 status = util.system(command)
564 status = util.system(command)
565 if status == 125:
565 if status == 125:
566 transition = "skip"
566 transition = "skip"
567 elif status == 0:
567 elif status == 0:
568 transition = "good"
568 transition = "good"
569 # status < 0 means process was killed
569 # status < 0 means process was killed
570 elif status == 127:
570 elif status == 127:
571 raise util.Abort(_("failed to execute %s") % command)
571 raise util.Abort(_("failed to execute %s") % command)
572 elif status < 0:
572 elif status < 0:
573 raise util.Abort(_("%s killed") % command)
573 raise util.Abort(_("%s killed") % command)
574 else:
574 else:
575 transition = "bad"
575 transition = "bad"
576 ctx = scmutil.revsingle(repo, rev)
576 ctx = scmutil.revsingle(repo, rev)
577 rev = None # clear for future iterations
577 rev = None # clear for future iterations
578 state[transition].append(ctx.node())
578 state[transition].append(ctx.node())
579 ui.status(_('Changeset %d:%s: %s\n') % (ctx, ctx, transition))
579 ui.status(_('Changeset %d:%s: %s\n') % (ctx, ctx, transition))
580 check_state(state, interactive=False)
580 check_state(state, interactive=False)
581 # bisect
581 # bisect
582 nodes, changesets, good = hbisect.bisect(repo.changelog, state)
582 nodes, changesets, good = hbisect.bisect(repo.changelog, state)
583 # update to next check
583 # update to next check
584 cmdutil.bailifchanged(repo)
584 cmdutil.bailifchanged(repo)
585 hg.clean(repo, nodes[0], show_stats=False)
585 hg.clean(repo, nodes[0], show_stats=False)
586 finally:
586 finally:
587 hbisect.save_state(repo, state)
587 hbisect.save_state(repo, state)
588 print_result(nodes, good)
588 print_result(nodes, good)
589 return
589 return
590
590
591 # update state
591 # update state
592
592
593 if rev:
593 if rev:
594 nodes = [repo.lookup(i) for i in scmutil.revrange(repo, [rev])]
594 nodes = [repo.lookup(i) for i in scmutil.revrange(repo, [rev])]
595 else:
595 else:
596 nodes = [repo.lookup('.')]
596 nodes = [repo.lookup('.')]
597
597
598 if good or bad or skip:
598 if good or bad or skip:
599 if good:
599 if good:
600 state['good'] += nodes
600 state['good'] += nodes
601 elif bad:
601 elif bad:
602 state['bad'] += nodes
602 state['bad'] += nodes
603 elif skip:
603 elif skip:
604 state['skip'] += nodes
604 state['skip'] += nodes
605 hbisect.save_state(repo, state)
605 hbisect.save_state(repo, state)
606
606
607 if not check_state(state):
607 if not check_state(state):
608 return
608 return
609
609
610 # actually bisect
610 # actually bisect
611 nodes, changesets, good = hbisect.bisect(repo.changelog, state)
611 nodes, changesets, good = hbisect.bisect(repo.changelog, state)
612 if extend:
612 if extend:
613 if not changesets:
613 if not changesets:
614 extendnode = extendbisectrange(nodes, good)
614 extendnode = extendbisectrange(nodes, good)
615 if extendnode is not None:
615 if extendnode is not None:
616 ui.write(_("Extending search to changeset %d:%s\n"
616 ui.write(_("Extending search to changeset %d:%s\n"
617 % (extendnode.rev(), extendnode)))
617 % (extendnode.rev(), extendnode)))
618 if noupdate:
618 if noupdate:
619 return
619 return
620 cmdutil.bailifchanged(repo)
620 cmdutil.bailifchanged(repo)
621 return hg.clean(repo, extendnode.node())
621 return hg.clean(repo, extendnode.node())
622 raise util.Abort(_("nothing to extend"))
622 raise util.Abort(_("nothing to extend"))
623
623
624 if changesets == 0:
624 if changesets == 0:
625 print_result(nodes, good)
625 print_result(nodes, good)
626 else:
626 else:
627 assert len(nodes) == 1 # only a single node can be tested next
627 assert len(nodes) == 1 # only a single node can be tested next
628 node = nodes[0]
628 node = nodes[0]
629 # compute the approximate number of remaining tests
629 # compute the approximate number of remaining tests
630 tests, size = 0, 2
630 tests, size = 0, 2
631 while size <= changesets:
631 while size <= changesets:
632 tests, size = tests + 1, size * 2
632 tests, size = tests + 1, size * 2
633 rev = repo.changelog.rev(node)
633 rev = repo.changelog.rev(node)
634 ui.write(_("Testing changeset %d:%s "
634 ui.write(_("Testing changeset %d:%s "
635 "(%d changesets remaining, ~%d tests)\n")
635 "(%d changesets remaining, ~%d tests)\n")
636 % (rev, short(node), changesets, tests))
636 % (rev, short(node), changesets, tests))
637 if not noupdate:
637 if not noupdate:
638 cmdutil.bailifchanged(repo)
638 cmdutil.bailifchanged(repo)
639 return hg.clean(repo, node)
639 return hg.clean(repo, node)
640
640
641 @command('bookmarks',
641 @command('bookmarks',
642 [('f', 'force', False, _('force')),
642 [('f', 'force', False, _('force')),
643 ('r', 'rev', '', _('revision'), _('REV')),
643 ('r', 'rev', '', _('revision'), _('REV')),
644 ('d', 'delete', False, _('delete a given bookmark')),
644 ('d', 'delete', False, _('delete a given bookmark')),
645 ('m', 'rename', '', _('rename a given bookmark'), _('NAME')),
645 ('m', 'rename', '', _('rename a given bookmark'), _('NAME')),
646 ('i', 'inactive', False, _('do not mark a new bookmark active'))],
646 ('i', 'inactive', False, _('do not mark a new bookmark active'))],
647 _('hg bookmarks [-f] [-d] [-i] [-m NAME] [-r REV] [NAME]'))
647 _('hg bookmarks [-f] [-d] [-i] [-m NAME] [-r REV] [NAME]'))
648 def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False,
648 def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False,
649 rename=None, inactive=False):
649 rename=None, inactive=False):
650 '''track a line of development with movable markers
650 '''track a line of development with movable markers
651
651
652 Bookmarks are pointers to certain commits that move when
652 Bookmarks are pointers to certain commits that move when
653 committing. Bookmarks are local. They can be renamed, copied and
653 committing. Bookmarks are local. They can be renamed, copied and
654 deleted. It is possible to use bookmark names in :hg:`merge` and
654 deleted. It is possible to use bookmark names in :hg:`merge` and
655 :hg:`update` to merge and update respectively to a given bookmark.
655 :hg:`update` to merge and update respectively to a given bookmark.
656
656
657 You can use :hg:`bookmark NAME` to set a bookmark on the working
657 You can use :hg:`bookmark NAME` to set a bookmark on the working
658 directory's parent revision with the given name. If you specify
658 directory's parent revision with the given name. If you specify
659 a revision using -r REV (where REV may be an existing bookmark),
659 a revision using -r REV (where REV may be an existing bookmark),
660 the bookmark is assigned to that revision.
660 the bookmark is assigned to that revision.
661
661
662 Bookmarks can be pushed and pulled between repositories (see :hg:`help
662 Bookmarks can be pushed and pulled between repositories (see :hg:`help
663 push` and :hg:`help pull`). This requires both the local and remote
663 push` and :hg:`help pull`). This requires both the local and remote
664 repositories to support bookmarks. For versions prior to 1.8, this means
664 repositories to support bookmarks. For versions prior to 1.8, this means
665 the bookmarks extension must be enabled.
665 the bookmarks extension must be enabled.
666 '''
666 '''
667 hexfn = ui.debugflag and hex or short
667 hexfn = ui.debugflag and hex or short
668 marks = repo._bookmarks
668 marks = repo._bookmarks
669 cur = repo.changectx('.').node()
669 cur = repo.changectx('.').node()
670
670
671 if rename:
671 if rename:
672 if rename not in marks:
672 if rename not in marks:
673 raise util.Abort(_("bookmark '%s' does not exist") % rename)
673 raise util.Abort(_("bookmark '%s' does not exist") % rename)
674 if mark in marks and not force:
674 if mark in marks and not force:
675 raise util.Abort(_("bookmark '%s' already exists "
675 raise util.Abort(_("bookmark '%s' already exists "
676 "(use -f to force)") % mark)
676 "(use -f to force)") % mark)
677 if mark is None:
677 if mark is None:
678 raise util.Abort(_("new bookmark name required"))
678 raise util.Abort(_("new bookmark name required"))
679 marks[mark] = marks[rename]
679 marks[mark] = marks[rename]
680 if repo._bookmarkcurrent == rename and not inactive:
680 if repo._bookmarkcurrent == rename and not inactive:
681 bookmarks.setcurrent(repo, mark)
681 bookmarks.setcurrent(repo, mark)
682 del marks[rename]
682 del marks[rename]
683 bookmarks.write(repo)
683 bookmarks.write(repo)
684 return
684 return
685
685
686 if delete:
686 if delete:
687 if mark is None:
687 if mark is None:
688 raise util.Abort(_("bookmark name required"))
688 raise util.Abort(_("bookmark name required"))
689 if mark not in marks:
689 if mark not in marks:
690 raise util.Abort(_("bookmark '%s' does not exist") % mark)
690 raise util.Abort(_("bookmark '%s' does not exist") % mark)
691 if mark == repo._bookmarkcurrent:
691 if mark == repo._bookmarkcurrent:
692 bookmarks.setcurrent(repo, None)
692 bookmarks.setcurrent(repo, None)
693 del marks[mark]
693 del marks[mark]
694 bookmarks.write(repo)
694 bookmarks.write(repo)
695 return
695 return
696
696
697 if mark is not None:
697 if mark is not None:
698 if "\n" in mark:
698 if "\n" in mark:
699 raise util.Abort(_("bookmark name cannot contain newlines"))
699 raise util.Abort(_("bookmark name cannot contain newlines"))
700 mark = mark.strip()
700 mark = mark.strip()
701 if not mark:
701 if not mark:
702 raise util.Abort(_("bookmark names cannot consist entirely of "
702 raise util.Abort(_("bookmark names cannot consist entirely of "
703 "whitespace"))
703 "whitespace"))
704 if inactive and mark == repo._bookmarkcurrent:
704 if inactive and mark == repo._bookmarkcurrent:
705 bookmarks.setcurrent(repo, None)
705 bookmarks.setcurrent(repo, None)
706 return
706 return
707 if mark in marks and not force:
707 if mark in marks and not force:
708 raise util.Abort(_("bookmark '%s' already exists "
708 raise util.Abort(_("bookmark '%s' already exists "
709 "(use -f to force)") % mark)
709 "(use -f to force)") % mark)
710 if ((mark in repo.branchtags() or mark == repo.dirstate.branch())
710 if ((mark in repo.branchtags() or mark == repo.dirstate.branch())
711 and not force):
711 and not force):
712 raise util.Abort(
712 raise util.Abort(
713 _("a bookmark cannot have the name of an existing branch"))
713 _("a bookmark cannot have the name of an existing branch"))
714 if rev:
714 if rev:
715 marks[mark] = repo.lookup(rev)
715 marks[mark] = repo.lookup(rev)
716 else:
716 else:
717 marks[mark] = repo.changectx('.').node()
717 marks[mark] = repo.changectx('.').node()
718 if not inactive and repo.changectx('.').node() == marks[mark]:
718 if not inactive and repo.changectx('.').node() == marks[mark]:
719 bookmarks.setcurrent(repo, mark)
719 bookmarks.setcurrent(repo, mark)
720 bookmarks.write(repo)
720 bookmarks.write(repo)
721 return
721 return
722
722
723 if mark is None:
723 if mark is None:
724 if rev:
724 if rev:
725 raise util.Abort(_("bookmark name required"))
725 raise util.Abort(_("bookmark name required"))
726 if len(marks) == 0:
726 if len(marks) == 0:
727 ui.status(_("no bookmarks set\n"))
727 ui.status(_("no bookmarks set\n"))
728 else:
728 else:
729 for bmark, n in sorted(marks.iteritems()):
729 for bmark, n in sorted(marks.iteritems()):
730 current = repo._bookmarkcurrent
730 current = repo._bookmarkcurrent
731 if bmark == current and n == cur:
731 if bmark == current and n == cur:
732 prefix, label = '*', 'bookmarks.current'
732 prefix, label = '*', 'bookmarks.current'
733 else:
733 else:
734 prefix, label = ' ', ''
734 prefix, label = ' ', ''
735
735
736 if ui.quiet:
736 if ui.quiet:
737 ui.write("%s\n" % bmark, label=label)
737 ui.write("%s\n" % bmark, label=label)
738 else:
738 else:
739 ui.write(" %s %-25s %d:%s\n" % (
739 ui.write(" %s %-25s %d:%s\n" % (
740 prefix, bmark, repo.changelog.rev(n), hexfn(n)),
740 prefix, bmark, repo.changelog.rev(n), hexfn(n)),
741 label=label)
741 label=label)
742 return
742 return
743
743
744 @command('branch',
744 @command('branch',
745 [('f', 'force', None,
745 [('f', 'force', None,
746 _('set branch name even if it shadows an existing branch')),
746 _('set branch name even if it shadows an existing branch')),
747 ('C', 'clean', None, _('reset branch name to parent branch name'))],
747 ('C', 'clean', None, _('reset branch name to parent branch name'))],
748 _('[-fC] [NAME]'))
748 _('[-fC] [NAME]'))
749 def branch(ui, repo, label=None, **opts):
749 def branch(ui, repo, label=None, **opts):
750 """set or show the current branch name
750 """set or show the current branch name
751
751
752 With no argument, show the current branch name. With one argument,
752 With no argument, show the current branch name. With one argument,
753 set the working directory branch name (the branch will not exist
753 set the working directory branch name (the branch will not exist
754 in the repository until the next commit). Standard practice
754 in the repository until the next commit). Standard practice
755 recommends that primary development take place on the 'default'
755 recommends that primary development take place on the 'default'
756 branch.
756 branch.
757
757
758 Unless -f/--force is specified, branch will not let you set a
758 Unless -f/--force is specified, branch will not let you set a
759 branch name that already exists, even if it's inactive.
759 branch name that already exists, even if it's inactive.
760
760
761 Use -C/--clean to reset the working directory branch to that of
761 Use -C/--clean to reset the working directory branch to that of
762 the parent of the working directory, negating a previous branch
762 the parent of the working directory, negating a previous branch
763 change.
763 change.
764
764
765 Use the command :hg:`update` to switch to an existing branch. Use
765 Use the command :hg:`update` to switch to an existing branch. Use
766 :hg:`commit --close-branch` to mark this branch as closed.
766 :hg:`commit --close-branch` to mark this branch as closed.
767
767
768 Returns 0 on success.
768 Returns 0 on success.
769 """
769 """
770
770
771 if opts.get('clean'):
771 if opts.get('clean'):
772 label = repo[None].p1().branch()
772 label = repo[None].p1().branch()
773 repo.dirstate.setbranch(label)
773 repo.dirstate.setbranch(label)
774 ui.status(_('reset working directory to branch %s\n') % label)
774 ui.status(_('reset working directory to branch %s\n') % label)
775 elif label:
775 elif label:
776 if not opts.get('force') and label in repo.branchtags():
776 if not opts.get('force') and label in repo.branchtags():
777 if label not in [p.branch() for p in repo.parents()]:
777 if label not in [p.branch() for p in repo.parents()]:
778 raise util.Abort(_('a branch of the same name already exists'),
778 raise util.Abort(_('a branch of the same name already exists'),
779 # i18n: "it" refers to an existing branch
779 # i18n: "it" refers to an existing branch
780 hint=_("use 'hg update' to switch to it"))
780 hint=_("use 'hg update' to switch to it"))
781 repo.dirstate.setbranch(label)
781 repo.dirstate.setbranch(label)
782 ui.status(_('marked working directory as branch %s\n') % label)
782 ui.status(_('marked working directory as branch %s\n') % label)
783 else:
783 else:
784 ui.write("%s\n" % repo.dirstate.branch())
784 ui.write("%s\n" % repo.dirstate.branch())
785
785
786 @command('branches',
786 @command('branches',
787 [('a', 'active', False, _('show only branches that have unmerged heads')),
787 [('a', 'active', False, _('show only branches that have unmerged heads')),
788 ('c', 'closed', False, _('show normal and closed branches'))],
788 ('c', 'closed', False, _('show normal and closed branches'))],
789 _('[-ac]'))
789 _('[-ac]'))
790 def branches(ui, repo, active=False, closed=False):
790 def branches(ui, repo, active=False, closed=False):
791 """list repository named branches
791 """list repository named branches
792
792
793 List the repository's named branches, indicating which ones are
793 List the repository's named branches, indicating which ones are
794 inactive. If -c/--closed is specified, also list branches which have
794 inactive. If -c/--closed is specified, also list branches which have
795 been marked closed (see :hg:`commit --close-branch`).
795 been marked closed (see :hg:`commit --close-branch`).
796
796
797 If -a/--active is specified, only show active branches. A branch
797 If -a/--active is specified, only show active branches. A branch
798 is considered active if it contains repository heads.
798 is considered active if it contains repository heads.
799
799
800 Use the command :hg:`update` to switch to an existing branch.
800 Use the command :hg:`update` to switch to an existing branch.
801
801
802 Returns 0.
802 Returns 0.
803 """
803 """
804
804
805 hexfunc = ui.debugflag and hex or short
805 hexfunc = ui.debugflag and hex or short
806 activebranches = [repo[n].branch() for n in repo.heads()]
806 activebranches = [repo[n].branch() for n in repo.heads()]
807 def testactive(tag, node):
807 def testactive(tag, node):
808 realhead = tag in activebranches
808 realhead = tag in activebranches
809 open = node in repo.branchheads(tag, closed=False)
809 open = node in repo.branchheads(tag, closed=False)
810 return realhead and open
810 return realhead and open
811 branches = sorted([(testactive(tag, node), repo.changelog.rev(node), tag)
811 branches = sorted([(testactive(tag, node), repo.changelog.rev(node), tag)
812 for tag, node in repo.branchtags().items()],
812 for tag, node in repo.branchtags().items()],
813 reverse=True)
813 reverse=True)
814
814
815 for isactive, node, tag in branches:
815 for isactive, node, tag in branches:
816 if (not active) or isactive:
816 if (not active) or isactive:
817 if ui.quiet:
817 if ui.quiet:
818 ui.write("%s\n" % tag)
818 ui.write("%s\n" % tag)
819 else:
819 else:
820 hn = repo.lookup(node)
820 hn = repo.lookup(node)
821 if isactive:
821 if isactive:
822 label = 'branches.active'
822 label = 'branches.active'
823 notice = ''
823 notice = ''
824 elif hn not in repo.branchheads(tag, closed=False):
824 elif hn not in repo.branchheads(tag, closed=False):
825 if not closed:
825 if not closed:
826 continue
826 continue
827 label = 'branches.closed'
827 label = 'branches.closed'
828 notice = _(' (closed)')
828 notice = _(' (closed)')
829 else:
829 else:
830 label = 'branches.inactive'
830 label = 'branches.inactive'
831 notice = _(' (inactive)')
831 notice = _(' (inactive)')
832 if tag == repo.dirstate.branch():
832 if tag == repo.dirstate.branch():
833 label = 'branches.current'
833 label = 'branches.current'
834 rev = str(node).rjust(31 - encoding.colwidth(tag))
834 rev = str(node).rjust(31 - encoding.colwidth(tag))
835 rev = ui.label('%s:%s' % (rev, hexfunc(hn)), 'log.changeset')
835 rev = ui.label('%s:%s' % (rev, hexfunc(hn)), 'log.changeset')
836 tag = ui.label(tag, label)
836 tag = ui.label(tag, label)
837 ui.write("%s %s%s\n" % (tag, rev, notice))
837 ui.write("%s %s%s\n" % (tag, rev, notice))
838
838
839 @command('bundle',
839 @command('bundle',
840 [('f', 'force', None, _('run even when the destination is unrelated')),
840 [('f', 'force', None, _('run even when the destination is unrelated')),
841 ('r', 'rev', [], _('a changeset intended to be added to the destination'),
841 ('r', 'rev', [], _('a changeset intended to be added to the destination'),
842 _('REV')),
842 _('REV')),
843 ('b', 'branch', [], _('a specific branch you would like to bundle'),
843 ('b', 'branch', [], _('a specific branch you would like to bundle'),
844 _('BRANCH')),
844 _('BRANCH')),
845 ('', 'base', [],
845 ('', 'base', [],
846 _('a base changeset assumed to be available at the destination'),
846 _('a base changeset assumed to be available at the destination'),
847 _('REV')),
847 _('REV')),
848 ('a', 'all', None, _('bundle all changesets in the repository')),
848 ('a', 'all', None, _('bundle all changesets in the repository')),
849 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE')),
849 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE')),
850 ] + remoteopts,
850 ] + remoteopts,
851 _('[-f] [-t TYPE] [-a] [-r REV]... [--base REV]... FILE [DEST]'))
851 _('[-f] [-t TYPE] [-a] [-r REV]... [--base REV]... FILE [DEST]'))
852 def bundle(ui, repo, fname, dest=None, **opts):
852 def bundle(ui, repo, fname, dest=None, **opts):
853 """create a changegroup file
853 """create a changegroup file
854
854
855 Generate a compressed changegroup file collecting changesets not
855 Generate a compressed changegroup file collecting changesets not
856 known to be in another repository.
856 known to be in another repository.
857
857
858 If you omit the destination repository, then hg assumes the
858 If you omit the destination repository, then hg assumes the
859 destination will have all the nodes you specify with --base
859 destination will have all the nodes you specify with --base
860 parameters. To create a bundle containing all changesets, use
860 parameters. To create a bundle containing all changesets, use
861 -a/--all (or --base null).
861 -a/--all (or --base null).
862
862
863 You can change compression method with the -t/--type option.
863 You can change compression method with the -t/--type option.
864 The available compression methods are: none, bzip2, and
864 The available compression methods are: none, bzip2, and
865 gzip (by default, bundles are compressed using bzip2).
865 gzip (by default, bundles are compressed using bzip2).
866
866
867 The bundle file can then be transferred using conventional means
867 The bundle file can then be transferred using conventional means
868 and applied to another repository with the unbundle or pull
868 and applied to another repository with the unbundle or pull
869 command. This is useful when direct push and pull are not
869 command. This is useful when direct push and pull are not
870 available or when exporting an entire repository is undesirable.
870 available or when exporting an entire repository is undesirable.
871
871
872 Applying bundles preserves all changeset contents including
872 Applying bundles preserves all changeset contents including
873 permissions, copy/rename information, and revision history.
873 permissions, copy/rename information, and revision history.
874
874
875 Returns 0 on success, 1 if no changes found.
875 Returns 0 on success, 1 if no changes found.
876 """
876 """
877 revs = None
877 revs = None
878 if 'rev' in opts:
878 if 'rev' in opts:
879 revs = scmutil.revrange(repo, opts['rev'])
879 revs = scmutil.revrange(repo, opts['rev'])
880
880
881 if opts.get('all'):
881 if opts.get('all'):
882 base = ['null']
882 base = ['null']
883 else:
883 else:
884 base = scmutil.revrange(repo, opts.get('base'))
884 base = scmutil.revrange(repo, opts.get('base'))
885 if base:
885 if base:
886 if dest:
886 if dest:
887 raise util.Abort(_("--base is incompatible with specifying "
887 raise util.Abort(_("--base is incompatible with specifying "
888 "a destination"))
888 "a destination"))
889 common = [repo.lookup(rev) for rev in base]
889 common = [repo.lookup(rev) for rev in base]
890 heads = revs and map(repo.lookup, revs) or revs
890 heads = revs and map(repo.lookup, revs) or revs
891 else:
891 else:
892 dest = ui.expandpath(dest or 'default-push', dest or 'default')
892 dest = ui.expandpath(dest or 'default-push', dest or 'default')
893 dest, branches = hg.parseurl(dest, opts.get('branch'))
893 dest, branches = hg.parseurl(dest, opts.get('branch'))
894 other = hg.repository(hg.remoteui(repo, opts), dest)
894 other = hg.repository(hg.remoteui(repo, opts), dest)
895 revs, checkout = hg.addbranchrevs(repo, other, branches, revs)
895 revs, checkout = hg.addbranchrevs(repo, other, branches, revs)
896 heads = revs and map(repo.lookup, revs) or revs
896 heads = revs and map(repo.lookup, revs) or revs
897 common, outheads = discovery.findcommonoutgoing(repo, other,
897 common, outheads = discovery.findcommonoutgoing(repo, other,
898 onlyheads=heads,
898 onlyheads=heads,
899 force=opts.get('force'))
899 force=opts.get('force'))
900
900
901 cg = repo.getbundle('bundle', common=common, heads=heads)
901 cg = repo.getbundle('bundle', common=common, heads=heads)
902 if not cg:
902 if not cg:
903 ui.status(_("no changes found\n"))
903 ui.status(_("no changes found\n"))
904 return 1
904 return 1
905
905
906 bundletype = opts.get('type', 'bzip2').lower()
906 bundletype = opts.get('type', 'bzip2').lower()
907 btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'}
907 btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'}
908 bundletype = btypes.get(bundletype)
908 bundletype = btypes.get(bundletype)
909 if bundletype not in changegroup.bundletypes:
909 if bundletype not in changegroup.bundletypes:
910 raise util.Abort(_('unknown bundle type specified with --type'))
910 raise util.Abort(_('unknown bundle type specified with --type'))
911
911
912 changegroup.writebundle(cg, fname, bundletype)
912 changegroup.writebundle(cg, fname, bundletype)
913
913
914 @command('cat',
914 @command('cat',
915 [('o', 'output', '',
915 [('o', 'output', '',
916 _('print output to file with formatted name'), _('FORMAT')),
916 _('print output to file with formatted name'), _('FORMAT')),
917 ('r', 'rev', '', _('print the given revision'), _('REV')),
917 ('r', 'rev', '', _('print the given revision'), _('REV')),
918 ('', 'decode', None, _('apply any matching decode filter')),
918 ('', 'decode', None, _('apply any matching decode filter')),
919 ] + walkopts,
919 ] + walkopts,
920 _('[OPTION]... FILE...'))
920 _('[OPTION]... FILE...'))
921 def cat(ui, repo, file1, *pats, **opts):
921 def cat(ui, repo, file1, *pats, **opts):
922 """output the current or given revision of files
922 """output the current or given revision of files
923
923
924 Print the specified files as they were at the given revision. If
924 Print the specified files as they were at the given revision. If
925 no revision is given, the parent of the working directory is used,
925 no revision is given, the parent of the working directory is used,
926 or tip if no revision is checked out.
926 or tip if no revision is checked out.
927
927
928 Output may be to a file, in which case the name of the file is
928 Output may be to a file, in which case the name of the file is
929 given using a format string. The formatting rules are the same as
929 given using a format string. The formatting rules are the same as
930 for the export command, with the following additions:
930 for the export command, with the following additions:
931
931
932 :``%s``: basename of file being printed
932 :``%s``: basename of file being printed
933 :``%d``: dirname of file being printed, or '.' if in repository root
933 :``%d``: dirname of file being printed, or '.' if in repository root
934 :``%p``: root-relative path name of file being printed
934 :``%p``: root-relative path name of file being printed
935
935
936 Returns 0 on success.
936 Returns 0 on success.
937 """
937 """
938 ctx = scmutil.revsingle(repo, opts.get('rev'))
938 ctx = scmutil.revsingle(repo, opts.get('rev'))
939 err = 1
939 err = 1
940 m = scmutil.match(repo, (file1,) + pats, opts)
940 m = scmutil.match(repo, (file1,) + pats, opts)
941 for abs in ctx.walk(m):
941 for abs in ctx.walk(m):
942 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
942 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
943 pathname=abs)
943 pathname=abs)
944 data = ctx[abs].data()
944 data = ctx[abs].data()
945 if opts.get('decode'):
945 if opts.get('decode'):
946 data = repo.wwritedata(abs, data)
946 data = repo.wwritedata(abs, data)
947 fp.write(data)
947 fp.write(data)
948 fp.close()
948 fp.close()
949 err = 0
949 err = 0
950 return err
950 return err
951
951
952 @command('^clone',
952 @command('^clone',
953 [('U', 'noupdate', None,
953 [('U', 'noupdate', None,
954 _('the clone will include an empty working copy (only a repository)')),
954 _('the clone will include an empty working copy (only a repository)')),
955 ('u', 'updaterev', '', _('revision, tag or branch to check out'), _('REV')),
955 ('u', 'updaterev', '', _('revision, tag or branch to check out'), _('REV')),
956 ('r', 'rev', [], _('include the specified changeset'), _('REV')),
956 ('r', 'rev', [], _('include the specified changeset'), _('REV')),
957 ('b', 'branch', [], _('clone only the specified branch'), _('BRANCH')),
957 ('b', 'branch', [], _('clone only the specified branch'), _('BRANCH')),
958 ('', 'pull', None, _('use pull protocol to copy metadata')),
958 ('', 'pull', None, _('use pull protocol to copy metadata')),
959 ('', 'uncompressed', None, _('use uncompressed transfer (fast over LAN)')),
959 ('', 'uncompressed', None, _('use uncompressed transfer (fast over LAN)')),
960 ] + remoteopts,
960 ] + remoteopts,
961 _('[OPTION]... SOURCE [DEST]'))
961 _('[OPTION]... SOURCE [DEST]'))
962 def clone(ui, source, dest=None, **opts):
962 def clone(ui, source, dest=None, **opts):
963 """make a copy of an existing repository
963 """make a copy of an existing repository
964
964
965 Create a copy of an existing repository in a new directory.
965 Create a copy of an existing repository in a new directory.
966
966
967 If no destination directory name is specified, it defaults to the
967 If no destination directory name is specified, it defaults to the
968 basename of the source.
968 basename of the source.
969
969
970 The location of the source is added to the new repository's
970 The location of the source is added to the new repository's
971 ``.hg/hgrc`` file, as the default to be used for future pulls.
971 ``.hg/hgrc`` file, as the default to be used for future pulls.
972
972
973 See :hg:`help urls` for valid source format details.
973 See :hg:`help urls` for valid source format details.
974
974
975 It is possible to specify an ``ssh://`` URL as the destination, but no
975 It is possible to specify an ``ssh://`` URL as the destination, but no
976 ``.hg/hgrc`` and working directory will be created on the remote side.
976 ``.hg/hgrc`` and working directory will be created on the remote side.
977 Please see :hg:`help urls` for important details about ``ssh://`` URLs.
977 Please see :hg:`help urls` for important details about ``ssh://`` URLs.
978
978
979 A set of changesets (tags, or branch names) to pull may be specified
979 A set of changesets (tags, or branch names) to pull may be specified
980 by listing each changeset (tag, or branch name) with -r/--rev.
980 by listing each changeset (tag, or branch name) with -r/--rev.
981 If -r/--rev is used, the cloned repository will contain only a subset
981 If -r/--rev is used, the cloned repository will contain only a subset
982 of the changesets of the source repository. Only the set of changesets
982 of the changesets of the source repository. Only the set of changesets
983 defined by all -r/--rev options (including all their ancestors)
983 defined by all -r/--rev options (including all their ancestors)
984 will be pulled into the destination repository.
984 will be pulled into the destination repository.
985 No subsequent changesets (including subsequent tags) will be present
985 No subsequent changesets (including subsequent tags) will be present
986 in the destination.
986 in the destination.
987
987
988 Using -r/--rev (or 'clone src#rev dest') implies --pull, even for
988 Using -r/--rev (or 'clone src#rev dest') implies --pull, even for
989 local source repositories.
989 local source repositories.
990
990
991 For efficiency, hardlinks are used for cloning whenever the source
991 For efficiency, hardlinks are used for cloning whenever the source
992 and destination are on the same filesystem (note this applies only
992 and destination are on the same filesystem (note this applies only
993 to the repository data, not to the working directory). Some
993 to the repository data, not to the working directory). Some
994 filesystems, such as AFS, implement hardlinking incorrectly, but
994 filesystems, such as AFS, implement hardlinking incorrectly, but
995 do not report errors. In these cases, use the --pull option to
995 do not report errors. In these cases, use the --pull option to
996 avoid hardlinking.
996 avoid hardlinking.
997
997
998 In some cases, you can clone repositories and the working directory
998 In some cases, you can clone repositories and the working directory
999 using full hardlinks with ::
999 using full hardlinks with ::
1000
1000
1001 $ cp -al REPO REPOCLONE
1001 $ cp -al REPO REPOCLONE
1002
1002
1003 This is the fastest way to clone, but it is not always safe. The
1003 This is the fastest way to clone, but it is not always safe. The
1004 operation is not atomic (making sure REPO is not modified during
1004 operation is not atomic (making sure REPO is not modified during
1005 the operation is up to you) and you have to make sure your editor
1005 the operation is up to you) and you have to make sure your editor
1006 breaks hardlinks (Emacs and most Linux Kernel tools do so). Also,
1006 breaks hardlinks (Emacs and most Linux Kernel tools do so). Also,
1007 this is not compatible with certain extensions that place their
1007 this is not compatible with certain extensions that place their
1008 metadata under the .hg directory, such as mq.
1008 metadata under the .hg directory, such as mq.
1009
1009
1010 Mercurial will update the working directory to the first applicable
1010 Mercurial will update the working directory to the first applicable
1011 revision from this list:
1011 revision from this list:
1012
1012
1013 a) null if -U or the source repository has no changesets
1013 a) null if -U or the source repository has no changesets
1014 b) if -u . and the source repository is local, the first parent of
1014 b) if -u . and the source repository is local, the first parent of
1015 the source repository's working directory
1015 the source repository's working directory
1016 c) the changeset specified with -u (if a branch name, this means the
1016 c) the changeset specified with -u (if a branch name, this means the
1017 latest head of that branch)
1017 latest head of that branch)
1018 d) the changeset specified with -r
1018 d) the changeset specified with -r
1019 e) the tipmost head specified with -b
1019 e) the tipmost head specified with -b
1020 f) the tipmost head specified with the url#branch source syntax
1020 f) the tipmost head specified with the url#branch source syntax
1021 g) the tipmost head of the default branch
1021 g) the tipmost head of the default branch
1022 h) tip
1022 h) tip
1023
1023
1024 Returns 0 on success.
1024 Returns 0 on success.
1025 """
1025 """
1026 if opts.get('noupdate') and opts.get('updaterev'):
1026 if opts.get('noupdate') and opts.get('updaterev'):
1027 raise util.Abort(_("cannot specify both --noupdate and --updaterev"))
1027 raise util.Abort(_("cannot specify both --noupdate and --updaterev"))
1028
1028
1029 r = hg.clone(hg.remoteui(ui, opts), source, dest,
1029 r = hg.clone(hg.remoteui(ui, opts), source, dest,
1030 pull=opts.get('pull'),
1030 pull=opts.get('pull'),
1031 stream=opts.get('uncompressed'),
1031 stream=opts.get('uncompressed'),
1032 rev=opts.get('rev'),
1032 rev=opts.get('rev'),
1033 update=opts.get('updaterev') or not opts.get('noupdate'),
1033 update=opts.get('updaterev') or not opts.get('noupdate'),
1034 branch=opts.get('branch'))
1034 branch=opts.get('branch'))
1035
1035
1036 return r is None
1036 return r is None
1037
1037
1038 @command('^commit|ci',
1038 @command('^commit|ci',
1039 [('A', 'addremove', None,
1039 [('A', 'addremove', None,
1040 _('mark new/missing files as added/removed before committing')),
1040 _('mark new/missing files as added/removed before committing')),
1041 ('', 'close-branch', None,
1041 ('', 'close-branch', None,
1042 _('mark a branch as closed, hiding it from the branch list')),
1042 _('mark a branch as closed, hiding it from the branch list')),
1043 ] + walkopts + commitopts + commitopts2,
1043 ] + walkopts + commitopts + commitopts2,
1044 _('[OPTION]... [FILE]...'))
1044 _('[OPTION]... [FILE]...'))
1045 def commit(ui, repo, *pats, **opts):
1045 def commit(ui, repo, *pats, **opts):
1046 """commit the specified files or all outstanding changes
1046 """commit the specified files or all outstanding changes
1047
1047
1048 Commit changes to the given files into the repository. Unlike a
1048 Commit changes to the given files into the repository. Unlike a
1049 centralized SCM, this operation is a local operation. See
1049 centralized SCM, this operation is a local operation. See
1050 :hg:`push` for a way to actively distribute your changes.
1050 :hg:`push` for a way to actively distribute your changes.
1051
1051
1052 If a list of files is omitted, all changes reported by :hg:`status`
1052 If a list of files is omitted, all changes reported by :hg:`status`
1053 will be committed.
1053 will be committed.
1054
1054
1055 If you are committing the result of a merge, do not provide any
1055 If you are committing the result of a merge, do not provide any
1056 filenames or -I/-X filters.
1056 filenames or -I/-X filters.
1057
1057
1058 If no commit message is specified, Mercurial starts your
1058 If no commit message is specified, Mercurial starts your
1059 configured editor where you can enter a message. In case your
1059 configured editor where you can enter a message. In case your
1060 commit fails, you will find a backup of your message in
1060 commit fails, you will find a backup of your message in
1061 ``.hg/last-message.txt``.
1061 ``.hg/last-message.txt``.
1062
1062
1063 See :hg:`help dates` for a list of formats valid for -d/--date.
1063 See :hg:`help dates` for a list of formats valid for -d/--date.
1064
1064
1065 Returns 0 on success, 1 if nothing changed.
1065 Returns 0 on success, 1 if nothing changed.
1066 """
1066 """
1067 extra = {}
1067 extra = {}
1068 if opts.get('close_branch'):
1068 if opts.get('close_branch'):
1069 if repo['.'].node() not in repo.branchheads():
1069 if repo['.'].node() not in repo.branchheads():
1070 # The topo heads set is included in the branch heads set of the
1070 # The topo heads set is included in the branch heads set of the
1071 # current branch, so it's sufficient to test branchheads
1071 # current branch, so it's sufficient to test branchheads
1072 raise util.Abort(_('can only close branch heads'))
1072 raise util.Abort(_('can only close branch heads'))
1073 extra['close'] = 1
1073 extra['close'] = 1
1074 e = cmdutil.commiteditor
1074 e = cmdutil.commiteditor
1075 if opts.get('force_editor'):
1075 if opts.get('force_editor'):
1076 e = cmdutil.commitforceeditor
1076 e = cmdutil.commitforceeditor
1077
1077
1078 def commitfunc(ui, repo, message, match, opts):
1078 def commitfunc(ui, repo, message, match, opts):
1079 return repo.commit(message, opts.get('user'), opts.get('date'), match,
1079 return repo.commit(message, opts.get('user'), opts.get('date'), match,
1080 editor=e, extra=extra)
1080 editor=e, extra=extra)
1081
1081
1082 branch = repo[None].branch()
1082 branch = repo[None].branch()
1083 bheads = repo.branchheads(branch)
1083 bheads = repo.branchheads(branch)
1084
1084
1085 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
1085 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
1086 if not node:
1086 if not node:
1087 stat = repo.status(match=scmutil.match(repo, pats, opts))
1087 stat = repo.status(match=scmutil.match(repo, pats, opts))
1088 if stat[3]:
1088 if stat[3]:
1089 ui.status(_("nothing changed (%d missing files, see 'hg status')\n")
1089 ui.status(_("nothing changed (%d missing files, see 'hg status')\n")
1090 % len(stat[3]))
1090 % len(stat[3]))
1091 else:
1091 else:
1092 ui.status(_("nothing changed\n"))
1092 ui.status(_("nothing changed\n"))
1093 return 1
1093 return 1
1094
1094
1095 ctx = repo[node]
1095 ctx = repo[node]
1096 parents = ctx.parents()
1096 parents = ctx.parents()
1097
1097
1098 if bheads and not [x for x in parents
1098 if bheads and not [x for x in parents
1099 if x.node() in bheads and x.branch() == branch]:
1099 if x.node() in bheads and x.branch() == branch]:
1100 ui.status(_('created new head\n'))
1100 ui.status(_('created new head\n'))
1101 # The message is not printed for initial roots. For the other
1101 # The message is not printed for initial roots. For the other
1102 # changesets, it is printed in the following situations:
1102 # changesets, it is printed in the following situations:
1103 #
1103 #
1104 # Par column: for the 2 parents with ...
1104 # Par column: for the 2 parents with ...
1105 # N: null or no parent
1105 # N: null or no parent
1106 # B: parent is on another named branch
1106 # B: parent is on another named branch
1107 # C: parent is a regular non head changeset
1107 # C: parent is a regular non head changeset
1108 # H: parent was a branch head of the current branch
1108 # H: parent was a branch head of the current branch
1109 # Msg column: whether we print "created new head" message
1109 # Msg column: whether we print "created new head" message
1110 # In the following, it is assumed that there already exists some
1110 # In the following, it is assumed that there already exists some
1111 # initial branch heads of the current branch, otherwise nothing is
1111 # initial branch heads of the current branch, otherwise nothing is
1112 # printed anyway.
1112 # printed anyway.
1113 #
1113 #
1114 # Par Msg Comment
1114 # Par Msg Comment
1115 # NN y additional topo root
1115 # NN y additional topo root
1116 #
1116 #
1117 # BN y additional branch root
1117 # BN y additional branch root
1118 # CN y additional topo head
1118 # CN y additional topo head
1119 # HN n usual case
1119 # HN n usual case
1120 #
1120 #
1121 # BB y weird additional branch root
1121 # BB y weird additional branch root
1122 # CB y branch merge
1122 # CB y branch merge
1123 # HB n merge with named branch
1123 # HB n merge with named branch
1124 #
1124 #
1125 # CC y additional head from merge
1125 # CC y additional head from merge
1126 # CH n merge with a head
1126 # CH n merge with a head
1127 #
1127 #
1128 # HH n head merge: head count decreases
1128 # HH n head merge: head count decreases
1129
1129
1130 if not opts.get('close_branch'):
1130 if not opts.get('close_branch'):
1131 for r in parents:
1131 for r in parents:
1132 if r.extra().get('close') and r.branch() == branch:
1132 if r.extra().get('close') and r.branch() == branch:
1133 ui.status(_('reopening closed branch head %d\n') % r)
1133 ui.status(_('reopening closed branch head %d\n') % r)
1134
1134
1135 if ui.debugflag:
1135 if ui.debugflag:
1136 ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
1136 ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
1137 elif ui.verbose:
1137 elif ui.verbose:
1138 ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
1138 ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
1139
1139
1140 @command('copy|cp',
1140 @command('copy|cp',
1141 [('A', 'after', None, _('record a copy that has already occurred')),
1141 [('A', 'after', None, _('record a copy that has already occurred')),
1142 ('f', 'force', None, _('forcibly copy over an existing managed file')),
1142 ('f', 'force', None, _('forcibly copy over an existing managed file')),
1143 ] + walkopts + dryrunopts,
1143 ] + walkopts + dryrunopts,
1144 _('[OPTION]... [SOURCE]... DEST'))
1144 _('[OPTION]... [SOURCE]... DEST'))
1145 def copy(ui, repo, *pats, **opts):
1145 def copy(ui, repo, *pats, **opts):
1146 """mark files as copied for the next commit
1146 """mark files as copied for the next commit
1147
1147
1148 Mark dest as having copies of source files. If dest is a
1148 Mark dest as having copies of source files. If dest is a
1149 directory, copies are put in that directory. If dest is a file,
1149 directory, copies are put in that directory. If dest is a file,
1150 the source must be a single file.
1150 the source must be a single file.
1151
1151
1152 By default, this command copies the contents of files as they
1152 By default, this command copies the contents of files as they
1153 exist in the working directory. If invoked with -A/--after, the
1153 exist in the working directory. If invoked with -A/--after, the
1154 operation is recorded, but no copying is performed.
1154 operation is recorded, but no copying is performed.
1155
1155
1156 This command takes effect with the next commit. To undo a copy
1156 This command takes effect with the next commit. To undo a copy
1157 before that, see :hg:`revert`.
1157 before that, see :hg:`revert`.
1158
1158
1159 Returns 0 on success, 1 if errors are encountered.
1159 Returns 0 on success, 1 if errors are encountered.
1160 """
1160 """
1161 wlock = repo.wlock(False)
1161 wlock = repo.wlock(False)
1162 try:
1162 try:
1163 return cmdutil.copy(ui, repo, pats, opts)
1163 return cmdutil.copy(ui, repo, pats, opts)
1164 finally:
1164 finally:
1165 wlock.release()
1165 wlock.release()
1166
1166
1167 @command('debugancestor', [], _('[INDEX] REV1 REV2'))
1167 @command('debugancestor', [], _('[INDEX] REV1 REV2'))
1168 def debugancestor(ui, repo, *args):
1168 def debugancestor(ui, repo, *args):
1169 """find the ancestor revision of two revisions in a given index"""
1169 """find the ancestor revision of two revisions in a given index"""
1170 if len(args) == 3:
1170 if len(args) == 3:
1171 index, rev1, rev2 = args
1171 index, rev1, rev2 = args
1172 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), index)
1172 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), index)
1173 lookup = r.lookup
1173 lookup = r.lookup
1174 elif len(args) == 2:
1174 elif len(args) == 2:
1175 if not repo:
1175 if not repo:
1176 raise util.Abort(_("there is no Mercurial repository here "
1176 raise util.Abort(_("there is no Mercurial repository here "
1177 "(.hg not found)"))
1177 "(.hg not found)"))
1178 rev1, rev2 = args
1178 rev1, rev2 = args
1179 r = repo.changelog
1179 r = repo.changelog
1180 lookup = repo.lookup
1180 lookup = repo.lookup
1181 else:
1181 else:
1182 raise util.Abort(_('either two or three arguments required'))
1182 raise util.Abort(_('either two or three arguments required'))
1183 a = r.ancestor(lookup(rev1), lookup(rev2))
1183 a = r.ancestor(lookup(rev1), lookup(rev2))
1184 ui.write("%d:%s\n" % (r.rev(a), hex(a)))
1184 ui.write("%d:%s\n" % (r.rev(a), hex(a)))
1185
1185
1186 @command('debugbuilddag',
1186 @command('debugbuilddag',
1187 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
1187 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
1188 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
1188 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
1189 ('n', 'new-file', None, _('add new file at each rev'))],
1189 ('n', 'new-file', None, _('add new file at each rev'))],
1190 _('[OPTION]... [TEXT]'))
1190 _('[OPTION]... [TEXT]'))
1191 def debugbuilddag(ui, repo, text=None,
1191 def debugbuilddag(ui, repo, text=None,
1192 mergeable_file=False,
1192 mergeable_file=False,
1193 overwritten_file=False,
1193 overwritten_file=False,
1194 new_file=False):
1194 new_file=False):
1195 """builds a repo with a given DAG from scratch in the current empty repo
1195 """builds a repo with a given DAG from scratch in the current empty repo
1196
1196
1197 The description of the DAG is read from stdin if not given on the
1197 The description of the DAG is read from stdin if not given on the
1198 command line.
1198 command line.
1199
1199
1200 Elements:
1200 Elements:
1201
1201
1202 - "+n" is a linear run of n nodes based on the current default parent
1202 - "+n" is a linear run of n nodes based on the current default parent
1203 - "." is a single node based on the current default parent
1203 - "." is a single node based on the current default parent
1204 - "$" resets the default parent to null (implied at the start);
1204 - "$" resets the default parent to null (implied at the start);
1205 otherwise the default parent is always the last node created
1205 otherwise the default parent is always the last node created
1206 - "<p" sets the default parent to the backref p
1206 - "<p" sets the default parent to the backref p
1207 - "*p" is a fork at parent p, which is a backref
1207 - "*p" is a fork at parent p, which is a backref
1208 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
1208 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
1209 - "/p2" is a merge of the preceding node and p2
1209 - "/p2" is a merge of the preceding node and p2
1210 - ":tag" defines a local tag for the preceding node
1210 - ":tag" defines a local tag for the preceding node
1211 - "@branch" sets the named branch for subsequent nodes
1211 - "@branch" sets the named branch for subsequent nodes
1212 - "#...\\n" is a comment up to the end of the line
1212 - "#...\\n" is a comment up to the end of the line
1213
1213
1214 Whitespace between the above elements is ignored.
1214 Whitespace between the above elements is ignored.
1215
1215
1216 A backref is either
1216 A backref is either
1217
1217
1218 - a number n, which references the node curr-n, where curr is the current
1218 - a number n, which references the node curr-n, where curr is the current
1219 node, or
1219 node, or
1220 - the name of a local tag you placed earlier using ":tag", or
1220 - the name of a local tag you placed earlier using ":tag", or
1221 - empty to denote the default parent.
1221 - empty to denote the default parent.
1222
1222
1223 All string valued-elements are either strictly alphanumeric, or must
1223 All string valued-elements are either strictly alphanumeric, or must
1224 be enclosed in double quotes ("..."), with "\\" as escape character.
1224 be enclosed in double quotes ("..."), with "\\" as escape character.
1225 """
1225 """
1226
1226
1227 if text is None:
1227 if text is None:
1228 ui.status(_("reading DAG from stdin\n"))
1228 ui.status(_("reading DAG from stdin\n"))
1229 text = sys.stdin.read()
1229 text = sys.stdin.read()
1230
1230
1231 cl = repo.changelog
1231 cl = repo.changelog
1232 if len(cl) > 0:
1232 if len(cl) > 0:
1233 raise util.Abort(_('repository is not empty'))
1233 raise util.Abort(_('repository is not empty'))
1234
1234
1235 # determine number of revs in DAG
1235 # determine number of revs in DAG
1236 total = 0
1236 total = 0
1237 for type, data in dagparser.parsedag(text):
1237 for type, data in dagparser.parsedag(text):
1238 if type == 'n':
1238 if type == 'n':
1239 total += 1
1239 total += 1
1240
1240
1241 if mergeable_file:
1241 if mergeable_file:
1242 linesperrev = 2
1242 linesperrev = 2
1243 # make a file with k lines per rev
1243 # make a file with k lines per rev
1244 initialmergedlines = [str(i) for i in xrange(0, total * linesperrev)]
1244 initialmergedlines = [str(i) for i in xrange(0, total * linesperrev)]
1245 initialmergedlines.append("")
1245 initialmergedlines.append("")
1246
1246
1247 tags = []
1247 tags = []
1248
1248
1249 tr = repo.transaction("builddag")
1249 tr = repo.transaction("builddag")
1250 try:
1250 try:
1251
1251
1252 at = -1
1252 at = -1
1253 atbranch = 'default'
1253 atbranch = 'default'
1254 nodeids = []
1254 nodeids = []
1255 ui.progress(_('building'), 0, unit=_('revisions'), total=total)
1255 ui.progress(_('building'), 0, unit=_('revisions'), total=total)
1256 for type, data in dagparser.parsedag(text):
1256 for type, data in dagparser.parsedag(text):
1257 if type == 'n':
1257 if type == 'n':
1258 ui.note('node %s\n' % str(data))
1258 ui.note('node %s\n' % str(data))
1259 id, ps = data
1259 id, ps = data
1260
1260
1261 files = []
1261 files = []
1262 fctxs = {}
1262 fctxs = {}
1263
1263
1264 p2 = None
1264 p2 = None
1265 if mergeable_file:
1265 if mergeable_file:
1266 fn = "mf"
1266 fn = "mf"
1267 p1 = repo[ps[0]]
1267 p1 = repo[ps[0]]
1268 if len(ps) > 1:
1268 if len(ps) > 1:
1269 p2 = repo[ps[1]]
1269 p2 = repo[ps[1]]
1270 pa = p1.ancestor(p2)
1270 pa = p1.ancestor(p2)
1271 base, local, other = [x[fn].data() for x in pa, p1, p2]
1271 base, local, other = [x[fn].data() for x in pa, p1, p2]
1272 m3 = simplemerge.Merge3Text(base, local, other)
1272 m3 = simplemerge.Merge3Text(base, local, other)
1273 ml = [l.strip() for l in m3.merge_lines()]
1273 ml = [l.strip() for l in m3.merge_lines()]
1274 ml.append("")
1274 ml.append("")
1275 elif at > 0:
1275 elif at > 0:
1276 ml = p1[fn].data().split("\n")
1276 ml = p1[fn].data().split("\n")
1277 else:
1277 else:
1278 ml = initialmergedlines
1278 ml = initialmergedlines
1279 ml[id * linesperrev] += " r%i" % id
1279 ml[id * linesperrev] += " r%i" % id
1280 mergedtext = "\n".join(ml)
1280 mergedtext = "\n".join(ml)
1281 files.append(fn)
1281 files.append(fn)
1282 fctxs[fn] = context.memfilectx(fn, mergedtext)
1282 fctxs[fn] = context.memfilectx(fn, mergedtext)
1283
1283
1284 if overwritten_file:
1284 if overwritten_file:
1285 fn = "of"
1285 fn = "of"
1286 files.append(fn)
1286 files.append(fn)
1287 fctxs[fn] = context.memfilectx(fn, "r%i\n" % id)
1287 fctxs[fn] = context.memfilectx(fn, "r%i\n" % id)
1288
1288
1289 if new_file:
1289 if new_file:
1290 fn = "nf%i" % id
1290 fn = "nf%i" % id
1291 files.append(fn)
1291 files.append(fn)
1292 fctxs[fn] = context.memfilectx(fn, "r%i\n" % id)
1292 fctxs[fn] = context.memfilectx(fn, "r%i\n" % id)
1293 if len(ps) > 1:
1293 if len(ps) > 1:
1294 if not p2:
1294 if not p2:
1295 p2 = repo[ps[1]]
1295 p2 = repo[ps[1]]
1296 for fn in p2:
1296 for fn in p2:
1297 if fn.startswith("nf"):
1297 if fn.startswith("nf"):
1298 files.append(fn)
1298 files.append(fn)
1299 fctxs[fn] = p2[fn]
1299 fctxs[fn] = p2[fn]
1300
1300
1301 def fctxfn(repo, cx, path):
1301 def fctxfn(repo, cx, path):
1302 return fctxs.get(path)
1302 return fctxs.get(path)
1303
1303
1304 if len(ps) == 0 or ps[0] < 0:
1304 if len(ps) == 0 or ps[0] < 0:
1305 pars = [None, None]
1305 pars = [None, None]
1306 elif len(ps) == 1:
1306 elif len(ps) == 1:
1307 pars = [nodeids[ps[0]], None]
1307 pars = [nodeids[ps[0]], None]
1308 else:
1308 else:
1309 pars = [nodeids[p] for p in ps]
1309 pars = [nodeids[p] for p in ps]
1310 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
1310 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
1311 date=(id, 0),
1311 date=(id, 0),
1312 user="debugbuilddag",
1312 user="debugbuilddag",
1313 extra={'branch': atbranch})
1313 extra={'branch': atbranch})
1314 nodeid = repo.commitctx(cx)
1314 nodeid = repo.commitctx(cx)
1315 nodeids.append(nodeid)
1315 nodeids.append(nodeid)
1316 at = id
1316 at = id
1317 elif type == 'l':
1317 elif type == 'l':
1318 id, name = data
1318 id, name = data
1319 ui.note('tag %s\n' % name)
1319 ui.note('tag %s\n' % name)
1320 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
1320 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
1321 elif type == 'a':
1321 elif type == 'a':
1322 ui.note('branch %s\n' % data)
1322 ui.note('branch %s\n' % data)
1323 atbranch = data
1323 atbranch = data
1324 ui.progress(_('building'), id, unit=_('revisions'), total=total)
1324 ui.progress(_('building'), id, unit=_('revisions'), total=total)
1325 tr.close()
1325 tr.close()
1326 finally:
1326 finally:
1327 ui.progress(_('building'), None)
1327 ui.progress(_('building'), None)
1328 tr.release()
1328 tr.release()
1329
1329
1330 if tags:
1330 if tags:
1331 repo.opener.write("localtags", "".join(tags))
1331 repo.opener.write("localtags", "".join(tags))
1332
1332
1333 @command('debugbundle', [('a', 'all', None, _('show all details'))], _('FILE'))
1333 @command('debugbundle', [('a', 'all', None, _('show all details'))], _('FILE'))
1334 def debugbundle(ui, bundlepath, all=None, **opts):
1334 def debugbundle(ui, bundlepath, all=None, **opts):
1335 """lists the contents of a bundle"""
1335 """lists the contents of a bundle"""
1336 f = url.open(ui, bundlepath)
1336 f = url.open(ui, bundlepath)
1337 try:
1337 try:
1338 gen = changegroup.readbundle(f, bundlepath)
1338 gen = changegroup.readbundle(f, bundlepath)
1339 if all:
1339 if all:
1340 ui.write("format: id, p1, p2, cset, delta base, len(delta)\n")
1340 ui.write("format: id, p1, p2, cset, delta base, len(delta)\n")
1341
1341
1342 def showchunks(named):
1342 def showchunks(named):
1343 ui.write("\n%s\n" % named)
1343 ui.write("\n%s\n" % named)
1344 chain = None
1344 chain = None
1345 while 1:
1345 while True:
1346 chunkdata = gen.deltachunk(chain)
1346 chunkdata = gen.deltachunk(chain)
1347 if not chunkdata:
1347 if not chunkdata:
1348 break
1348 break
1349 node = chunkdata['node']
1349 node = chunkdata['node']
1350 p1 = chunkdata['p1']
1350 p1 = chunkdata['p1']
1351 p2 = chunkdata['p2']
1351 p2 = chunkdata['p2']
1352 cs = chunkdata['cs']
1352 cs = chunkdata['cs']
1353 deltabase = chunkdata['deltabase']
1353 deltabase = chunkdata['deltabase']
1354 delta = chunkdata['delta']
1354 delta = chunkdata['delta']
1355 ui.write("%s %s %s %s %s %s\n" %
1355 ui.write("%s %s %s %s %s %s\n" %
1356 (hex(node), hex(p1), hex(p2),
1356 (hex(node), hex(p1), hex(p2),
1357 hex(cs), hex(deltabase), len(delta)))
1357 hex(cs), hex(deltabase), len(delta)))
1358 chain = node
1358 chain = node
1359
1359
1360 chunkdata = gen.changelogheader()
1360 chunkdata = gen.changelogheader()
1361 showchunks("changelog")
1361 showchunks("changelog")
1362 chunkdata = gen.manifestheader()
1362 chunkdata = gen.manifestheader()
1363 showchunks("manifest")
1363 showchunks("manifest")
1364 while 1:
1364 while True:
1365 chunkdata = gen.filelogheader()
1365 chunkdata = gen.filelogheader()
1366 if not chunkdata:
1366 if not chunkdata:
1367 break
1367 break
1368 fname = chunkdata['filename']
1368 fname = chunkdata['filename']
1369 showchunks(fname)
1369 showchunks(fname)
1370 else:
1370 else:
1371 chunkdata = gen.changelogheader()
1371 chunkdata = gen.changelogheader()
1372 chain = None
1372 chain = None
1373 while 1:
1373 while True:
1374 chunkdata = gen.deltachunk(chain)
1374 chunkdata = gen.deltachunk(chain)
1375 if not chunkdata:
1375 if not chunkdata:
1376 break
1376 break
1377 node = chunkdata['node']
1377 node = chunkdata['node']
1378 ui.write("%s\n" % hex(node))
1378 ui.write("%s\n" % hex(node))
1379 chain = node
1379 chain = node
1380 finally:
1380 finally:
1381 f.close()
1381 f.close()
1382
1382
1383 @command('debugcheckstate', [], '')
1383 @command('debugcheckstate', [], '')
1384 def debugcheckstate(ui, repo):
1384 def debugcheckstate(ui, repo):
1385 """validate the correctness of the current dirstate"""
1385 """validate the correctness of the current dirstate"""
1386 parent1, parent2 = repo.dirstate.parents()
1386 parent1, parent2 = repo.dirstate.parents()
1387 m1 = repo[parent1].manifest()
1387 m1 = repo[parent1].manifest()
1388 m2 = repo[parent2].manifest()
1388 m2 = repo[parent2].manifest()
1389 errors = 0
1389 errors = 0
1390 for f in repo.dirstate:
1390 for f in repo.dirstate:
1391 state = repo.dirstate[f]
1391 state = repo.dirstate[f]
1392 if state in "nr" and f not in m1:
1392 if state in "nr" and f not in m1:
1393 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
1393 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
1394 errors += 1
1394 errors += 1
1395 if state in "a" and f in m1:
1395 if state in "a" and f in m1:
1396 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
1396 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
1397 errors += 1
1397 errors += 1
1398 if state in "m" and f not in m1 and f not in m2:
1398 if state in "m" and f not in m1 and f not in m2:
1399 ui.warn(_("%s in state %s, but not in either manifest\n") %
1399 ui.warn(_("%s in state %s, but not in either manifest\n") %
1400 (f, state))
1400 (f, state))
1401 errors += 1
1401 errors += 1
1402 for f in m1:
1402 for f in m1:
1403 state = repo.dirstate[f]
1403 state = repo.dirstate[f]
1404 if state not in "nrm":
1404 if state not in "nrm":
1405 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
1405 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
1406 errors += 1
1406 errors += 1
1407 if errors:
1407 if errors:
1408 error = _(".hg/dirstate inconsistent with current parent's manifest")
1408 error = _(".hg/dirstate inconsistent with current parent's manifest")
1409 raise util.Abort(error)
1409 raise util.Abort(error)
1410
1410
1411 @command('debugcommands', [], _('[COMMAND]'))
1411 @command('debugcommands', [], _('[COMMAND]'))
1412 def debugcommands(ui, cmd='', *args):
1412 def debugcommands(ui, cmd='', *args):
1413 """list all available commands and options"""
1413 """list all available commands and options"""
1414 for cmd, vals in sorted(table.iteritems()):
1414 for cmd, vals in sorted(table.iteritems()):
1415 cmd = cmd.split('|')[0].strip('^')
1415 cmd = cmd.split('|')[0].strip('^')
1416 opts = ', '.join([i[1] for i in vals[1]])
1416 opts = ', '.join([i[1] for i in vals[1]])
1417 ui.write('%s: %s\n' % (cmd, opts))
1417 ui.write('%s: %s\n' % (cmd, opts))
1418
1418
1419 @command('debugcomplete',
1419 @command('debugcomplete',
1420 [('o', 'options', None, _('show the command options'))],
1420 [('o', 'options', None, _('show the command options'))],
1421 _('[-o] CMD'))
1421 _('[-o] CMD'))
1422 def debugcomplete(ui, cmd='', **opts):
1422 def debugcomplete(ui, cmd='', **opts):
1423 """returns the completion list associated with the given command"""
1423 """returns the completion list associated with the given command"""
1424
1424
1425 if opts.get('options'):
1425 if opts.get('options'):
1426 options = []
1426 options = []
1427 otables = [globalopts]
1427 otables = [globalopts]
1428 if cmd:
1428 if cmd:
1429 aliases, entry = cmdutil.findcmd(cmd, table, False)
1429 aliases, entry = cmdutil.findcmd(cmd, table, False)
1430 otables.append(entry[1])
1430 otables.append(entry[1])
1431 for t in otables:
1431 for t in otables:
1432 for o in t:
1432 for o in t:
1433 if "(DEPRECATED)" in o[3]:
1433 if "(DEPRECATED)" in o[3]:
1434 continue
1434 continue
1435 if o[0]:
1435 if o[0]:
1436 options.append('-%s' % o[0])
1436 options.append('-%s' % o[0])
1437 options.append('--%s' % o[1])
1437 options.append('--%s' % o[1])
1438 ui.write("%s\n" % "\n".join(options))
1438 ui.write("%s\n" % "\n".join(options))
1439 return
1439 return
1440
1440
1441 cmdlist = cmdutil.findpossible(cmd, table)
1441 cmdlist = cmdutil.findpossible(cmd, table)
1442 if ui.verbose:
1442 if ui.verbose:
1443 cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
1443 cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
1444 ui.write("%s\n" % "\n".join(sorted(cmdlist)))
1444 ui.write("%s\n" % "\n".join(sorted(cmdlist)))
1445
1445
1446 @command('debugdag',
1446 @command('debugdag',
1447 [('t', 'tags', None, _('use tags as labels')),
1447 [('t', 'tags', None, _('use tags as labels')),
1448 ('b', 'branches', None, _('annotate with branch names')),
1448 ('b', 'branches', None, _('annotate with branch names')),
1449 ('', 'dots', None, _('use dots for runs')),
1449 ('', 'dots', None, _('use dots for runs')),
1450 ('s', 'spaces', None, _('separate elements by spaces'))],
1450 ('s', 'spaces', None, _('separate elements by spaces'))],
1451 _('[OPTION]... [FILE [REV]...]'))
1451 _('[OPTION]... [FILE [REV]...]'))
1452 def debugdag(ui, repo, file_=None, *revs, **opts):
1452 def debugdag(ui, repo, file_=None, *revs, **opts):
1453 """format the changelog or an index DAG as a concise textual description
1453 """format the changelog or an index DAG as a concise textual description
1454
1454
1455 If you pass a revlog index, the revlog's DAG is emitted. If you list
1455 If you pass a revlog index, the revlog's DAG is emitted. If you list
1456 revision numbers, they get labelled in the output as rN.
1456 revision numbers, they get labelled in the output as rN.
1457
1457
1458 Otherwise, the changelog DAG of the current repo is emitted.
1458 Otherwise, the changelog DAG of the current repo is emitted.
1459 """
1459 """
1460 spaces = opts.get('spaces')
1460 spaces = opts.get('spaces')
1461 dots = opts.get('dots')
1461 dots = opts.get('dots')
1462 if file_:
1462 if file_:
1463 rlog = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), file_)
1463 rlog = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), file_)
1464 revs = set((int(r) for r in revs))
1464 revs = set((int(r) for r in revs))
1465 def events():
1465 def events():
1466 for r in rlog:
1466 for r in rlog:
1467 yield 'n', (r, list(set(p for p in rlog.parentrevs(r) if p != -1)))
1467 yield 'n', (r, list(set(p for p in rlog.parentrevs(r) if p != -1)))
1468 if r in revs:
1468 if r in revs:
1469 yield 'l', (r, "r%i" % r)
1469 yield 'l', (r, "r%i" % r)
1470 elif repo:
1470 elif repo:
1471 cl = repo.changelog
1471 cl = repo.changelog
1472 tags = opts.get('tags')
1472 tags = opts.get('tags')
1473 branches = opts.get('branches')
1473 branches = opts.get('branches')
1474 if tags:
1474 if tags:
1475 labels = {}
1475 labels = {}
1476 for l, n in repo.tags().items():
1476 for l, n in repo.tags().items():
1477 labels.setdefault(cl.rev(n), []).append(l)
1477 labels.setdefault(cl.rev(n), []).append(l)
1478 def events():
1478 def events():
1479 b = "default"
1479 b = "default"
1480 for r in cl:
1480 for r in cl:
1481 if branches:
1481 if branches:
1482 newb = cl.read(cl.node(r))[5]['branch']
1482 newb = cl.read(cl.node(r))[5]['branch']
1483 if newb != b:
1483 if newb != b:
1484 yield 'a', newb
1484 yield 'a', newb
1485 b = newb
1485 b = newb
1486 yield 'n', (r, list(set(p for p in cl.parentrevs(r) if p != -1)))
1486 yield 'n', (r, list(set(p for p in cl.parentrevs(r) if p != -1)))
1487 if tags:
1487 if tags:
1488 ls = labels.get(r)
1488 ls = labels.get(r)
1489 if ls:
1489 if ls:
1490 for l in ls:
1490 for l in ls:
1491 yield 'l', (r, l)
1491 yield 'l', (r, l)
1492 else:
1492 else:
1493 raise util.Abort(_('need repo for changelog dag'))
1493 raise util.Abort(_('need repo for changelog dag'))
1494
1494
1495 for line in dagparser.dagtextlines(events(),
1495 for line in dagparser.dagtextlines(events(),
1496 addspaces=spaces,
1496 addspaces=spaces,
1497 wraplabels=True,
1497 wraplabels=True,
1498 wrapannotations=True,
1498 wrapannotations=True,
1499 wrapnonlinear=dots,
1499 wrapnonlinear=dots,
1500 usedots=dots,
1500 usedots=dots,
1501 maxlinewidth=70):
1501 maxlinewidth=70):
1502 ui.write(line)
1502 ui.write(line)
1503 ui.write("\n")
1503 ui.write("\n")
1504
1504
1505 @command('debugdata',
1505 @command('debugdata',
1506 [('c', 'changelog', False, _('open changelog')),
1506 [('c', 'changelog', False, _('open changelog')),
1507 ('m', 'manifest', False, _('open manifest'))],
1507 ('m', 'manifest', False, _('open manifest'))],
1508 _('-c|-m|FILE REV'))
1508 _('-c|-m|FILE REV'))
1509 def debugdata(ui, repo, file_, rev = None, **opts):
1509 def debugdata(ui, repo, file_, rev = None, **opts):
1510 """dump the contents of a data file revision"""
1510 """dump the contents of a data file revision"""
1511 if opts.get('changelog') or opts.get('manifest'):
1511 if opts.get('changelog') or opts.get('manifest'):
1512 file_, rev = None, file_
1512 file_, rev = None, file_
1513 elif rev is None:
1513 elif rev is None:
1514 raise error.CommandError('debugdata', _('invalid arguments'))
1514 raise error.CommandError('debugdata', _('invalid arguments'))
1515 r = cmdutil.openrevlog(repo, 'debugdata', file_, opts)
1515 r = cmdutil.openrevlog(repo, 'debugdata', file_, opts)
1516 try:
1516 try:
1517 ui.write(r.revision(r.lookup(rev)))
1517 ui.write(r.revision(r.lookup(rev)))
1518 except KeyError:
1518 except KeyError:
1519 raise util.Abort(_('invalid revision identifier %s') % rev)
1519 raise util.Abort(_('invalid revision identifier %s') % rev)
1520
1520
1521 @command('debugdate',
1521 @command('debugdate',
1522 [('e', 'extended', None, _('try extended date formats'))],
1522 [('e', 'extended', None, _('try extended date formats'))],
1523 _('[-e] DATE [RANGE]'))
1523 _('[-e] DATE [RANGE]'))
1524 def debugdate(ui, date, range=None, **opts):
1524 def debugdate(ui, date, range=None, **opts):
1525 """parse and display a date"""
1525 """parse and display a date"""
1526 if opts["extended"]:
1526 if opts["extended"]:
1527 d = util.parsedate(date, util.extendeddateformats)
1527 d = util.parsedate(date, util.extendeddateformats)
1528 else:
1528 else:
1529 d = util.parsedate(date)
1529 d = util.parsedate(date)
1530 ui.write("internal: %s %s\n" % d)
1530 ui.write("internal: %s %s\n" % d)
1531 ui.write("standard: %s\n" % util.datestr(d))
1531 ui.write("standard: %s\n" % util.datestr(d))
1532 if range:
1532 if range:
1533 m = util.matchdate(range)
1533 m = util.matchdate(range)
1534 ui.write("match: %s\n" % m(d[0]))
1534 ui.write("match: %s\n" % m(d[0]))
1535
1535
1536 @command('debugdiscovery',
1536 @command('debugdiscovery',
1537 [('', 'old', None, _('use old-style discovery')),
1537 [('', 'old', None, _('use old-style discovery')),
1538 ('', 'nonheads', None,
1538 ('', 'nonheads', None,
1539 _('use old-style discovery with non-heads included')),
1539 _('use old-style discovery with non-heads included')),
1540 ] + remoteopts,
1540 ] + remoteopts,
1541 _('[-l REV] [-r REV] [-b BRANCH]... [OTHER]'))
1541 _('[-l REV] [-r REV] [-b BRANCH]... [OTHER]'))
1542 def debugdiscovery(ui, repo, remoteurl="default", **opts):
1542 def debugdiscovery(ui, repo, remoteurl="default", **opts):
1543 """runs the changeset discovery protocol in isolation"""
1543 """runs the changeset discovery protocol in isolation"""
1544 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl), opts.get('branch'))
1544 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl), opts.get('branch'))
1545 remote = hg.repository(hg.remoteui(repo, opts), remoteurl)
1545 remote = hg.repository(hg.remoteui(repo, opts), remoteurl)
1546 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
1546 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
1547
1547
1548 # make sure tests are repeatable
1548 # make sure tests are repeatable
1549 random.seed(12323)
1549 random.seed(12323)
1550
1550
1551 def doit(localheads, remoteheads):
1551 def doit(localheads, remoteheads):
1552 if opts.get('old'):
1552 if opts.get('old'):
1553 if localheads:
1553 if localheads:
1554 raise util.Abort('cannot use localheads with old style discovery')
1554 raise util.Abort('cannot use localheads with old style discovery')
1555 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
1555 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
1556 force=True)
1556 force=True)
1557 common = set(common)
1557 common = set(common)
1558 if not opts.get('nonheads'):
1558 if not opts.get('nonheads'):
1559 ui.write("unpruned common: %s\n" % " ".join([short(n)
1559 ui.write("unpruned common: %s\n" % " ".join([short(n)
1560 for n in common]))
1560 for n in common]))
1561 dag = dagutil.revlogdag(repo.changelog)
1561 dag = dagutil.revlogdag(repo.changelog)
1562 all = dag.ancestorset(dag.internalizeall(common))
1562 all = dag.ancestorset(dag.internalizeall(common))
1563 common = dag.externalizeall(dag.headsetofconnecteds(all))
1563 common = dag.externalizeall(dag.headsetofconnecteds(all))
1564 else:
1564 else:
1565 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote)
1565 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote)
1566 common = set(common)
1566 common = set(common)
1567 rheads = set(hds)
1567 rheads = set(hds)
1568 lheads = set(repo.heads())
1568 lheads = set(repo.heads())
1569 ui.write("common heads: %s\n" % " ".join([short(n) for n in common]))
1569 ui.write("common heads: %s\n" % " ".join([short(n) for n in common]))
1570 if lheads <= common:
1570 if lheads <= common:
1571 ui.write("local is subset\n")
1571 ui.write("local is subset\n")
1572 elif rheads <= common:
1572 elif rheads <= common:
1573 ui.write("remote is subset\n")
1573 ui.write("remote is subset\n")
1574
1574
1575 serverlogs = opts.get('serverlog')
1575 serverlogs = opts.get('serverlog')
1576 if serverlogs:
1576 if serverlogs:
1577 for filename in serverlogs:
1577 for filename in serverlogs:
1578 logfile = open(filename, 'r')
1578 logfile = open(filename, 'r')
1579 try:
1579 try:
1580 line = logfile.readline()
1580 line = logfile.readline()
1581 while line:
1581 while line:
1582 parts = line.strip().split(';')
1582 parts = line.strip().split(';')
1583 op = parts[1]
1583 op = parts[1]
1584 if op == 'cg':
1584 if op == 'cg':
1585 pass
1585 pass
1586 elif op == 'cgss':
1586 elif op == 'cgss':
1587 doit(parts[2].split(' '), parts[3].split(' '))
1587 doit(parts[2].split(' '), parts[3].split(' '))
1588 elif op == 'unb':
1588 elif op == 'unb':
1589 doit(parts[3].split(' '), parts[2].split(' '))
1589 doit(parts[3].split(' '), parts[2].split(' '))
1590 line = logfile.readline()
1590 line = logfile.readline()
1591 finally:
1591 finally:
1592 logfile.close()
1592 logfile.close()
1593
1593
1594 else:
1594 else:
1595 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches,
1595 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches,
1596 opts.get('remote_head'))
1596 opts.get('remote_head'))
1597 localrevs = opts.get('local_head')
1597 localrevs = opts.get('local_head')
1598 doit(localrevs, remoterevs)
1598 doit(localrevs, remoterevs)
1599
1599
1600 @command('debugfsinfo', [], _('[PATH]'))
1600 @command('debugfsinfo', [], _('[PATH]'))
1601 def debugfsinfo(ui, path = "."):
1601 def debugfsinfo(ui, path = "."):
1602 """show information detected about current filesystem"""
1602 """show information detected about current filesystem"""
1603 util.writefile('.debugfsinfo', '')
1603 util.writefile('.debugfsinfo', '')
1604 ui.write('exec: %s\n' % (util.checkexec(path) and 'yes' or 'no'))
1604 ui.write('exec: %s\n' % (util.checkexec(path) and 'yes' or 'no'))
1605 ui.write('symlink: %s\n' % (util.checklink(path) and 'yes' or 'no'))
1605 ui.write('symlink: %s\n' % (util.checklink(path) and 'yes' or 'no'))
1606 ui.write('case-sensitive: %s\n' % (util.checkcase('.debugfsinfo')
1606 ui.write('case-sensitive: %s\n' % (util.checkcase('.debugfsinfo')
1607 and 'yes' or 'no'))
1607 and 'yes' or 'no'))
1608 os.unlink('.debugfsinfo')
1608 os.unlink('.debugfsinfo')
1609
1609
1610 @command('debuggetbundle',
1610 @command('debuggetbundle',
1611 [('H', 'head', [], _('id of head node'), _('ID')),
1611 [('H', 'head', [], _('id of head node'), _('ID')),
1612 ('C', 'common', [], _('id of common node'), _('ID')),
1612 ('C', 'common', [], _('id of common node'), _('ID')),
1613 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
1613 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
1614 _('REPO FILE [-H|-C ID]...'))
1614 _('REPO FILE [-H|-C ID]...'))
1615 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1615 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1616 """retrieves a bundle from a repo
1616 """retrieves a bundle from a repo
1617
1617
1618 Every ID must be a full-length hex node id string. Saves the bundle to the
1618 Every ID must be a full-length hex node id string. Saves the bundle to the
1619 given file.
1619 given file.
1620 """
1620 """
1621 repo = hg.repository(ui, repopath)
1621 repo = hg.repository(ui, repopath)
1622 if not repo.capable('getbundle'):
1622 if not repo.capable('getbundle'):
1623 raise util.Abort("getbundle() not supported by target repository")
1623 raise util.Abort("getbundle() not supported by target repository")
1624 args = {}
1624 args = {}
1625 if common:
1625 if common:
1626 args['common'] = [bin(s) for s in common]
1626 args['common'] = [bin(s) for s in common]
1627 if head:
1627 if head:
1628 args['heads'] = [bin(s) for s in head]
1628 args['heads'] = [bin(s) for s in head]
1629 bundle = repo.getbundle('debug', **args)
1629 bundle = repo.getbundle('debug', **args)
1630
1630
1631 bundletype = opts.get('type', 'bzip2').lower()
1631 bundletype = opts.get('type', 'bzip2').lower()
1632 btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'}
1632 btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'}
1633 bundletype = btypes.get(bundletype)
1633 bundletype = btypes.get(bundletype)
1634 if bundletype not in changegroup.bundletypes:
1634 if bundletype not in changegroup.bundletypes:
1635 raise util.Abort(_('unknown bundle type specified with --type'))
1635 raise util.Abort(_('unknown bundle type specified with --type'))
1636 changegroup.writebundle(bundle, bundlepath, bundletype)
1636 changegroup.writebundle(bundle, bundlepath, bundletype)
1637
1637
1638 @command('debugignore', [], '')
1638 @command('debugignore', [], '')
1639 def debugignore(ui, repo, *values, **opts):
1639 def debugignore(ui, repo, *values, **opts):
1640 """display the combined ignore pattern"""
1640 """display the combined ignore pattern"""
1641 ignore = repo.dirstate._ignore
1641 ignore = repo.dirstate._ignore
1642 if hasattr(ignore, 'includepat'):
1642 if hasattr(ignore, 'includepat'):
1643 ui.write("%s\n" % ignore.includepat)
1643 ui.write("%s\n" % ignore.includepat)
1644 else:
1644 else:
1645 raise util.Abort(_("no ignore patterns found"))
1645 raise util.Abort(_("no ignore patterns found"))
1646
1646
1647 @command('debugindex',
1647 @command('debugindex',
1648 [('c', 'changelog', False, _('open changelog')),
1648 [('c', 'changelog', False, _('open changelog')),
1649 ('m', 'manifest', False, _('open manifest')),
1649 ('m', 'manifest', False, _('open manifest')),
1650 ('f', 'format', 0, _('revlog format'), _('FORMAT'))],
1650 ('f', 'format', 0, _('revlog format'), _('FORMAT'))],
1651 _('[-f FORMAT] -c|-m|FILE'))
1651 _('[-f FORMAT] -c|-m|FILE'))
1652 def debugindex(ui, repo, file_ = None, **opts):
1652 def debugindex(ui, repo, file_ = None, **opts):
1653 """dump the contents of an index file"""
1653 """dump the contents of an index file"""
1654 r = cmdutil.openrevlog(repo, 'debugindex', file_, opts)
1654 r = cmdutil.openrevlog(repo, 'debugindex', file_, opts)
1655 format = opts.get('format', 0)
1655 format = opts.get('format', 0)
1656 if format not in (0, 1):
1656 if format not in (0, 1):
1657 raise util.Abort(_("unknown format %d") % format)
1657 raise util.Abort(_("unknown format %d") % format)
1658
1658
1659 generaldelta = r.version & revlog.REVLOGGENERALDELTA
1659 generaldelta = r.version & revlog.REVLOGGENERALDELTA
1660 if generaldelta:
1660 if generaldelta:
1661 basehdr = ' delta'
1661 basehdr = ' delta'
1662 else:
1662 else:
1663 basehdr = ' base'
1663 basehdr = ' base'
1664
1664
1665 if format == 0:
1665 if format == 0:
1666 ui.write(" rev offset length " + basehdr + " linkrev"
1666 ui.write(" rev offset length " + basehdr + " linkrev"
1667 " nodeid p1 p2\n")
1667 " nodeid p1 p2\n")
1668 elif format == 1:
1668 elif format == 1:
1669 ui.write(" rev flag offset length"
1669 ui.write(" rev flag offset length"
1670 " size " + basehdr + " link p1 p2 nodeid\n")
1670 " size " + basehdr + " link p1 p2 nodeid\n")
1671
1671
1672 for i in r:
1672 for i in r:
1673 node = r.node(i)
1673 node = r.node(i)
1674 if generaldelta:
1674 if generaldelta:
1675 base = r.deltaparent(i)
1675 base = r.deltaparent(i)
1676 else:
1676 else:
1677 base = r.chainbase(i)
1677 base = r.chainbase(i)
1678 if format == 0:
1678 if format == 0:
1679 try:
1679 try:
1680 pp = r.parents(node)
1680 pp = r.parents(node)
1681 except:
1681 except:
1682 pp = [nullid, nullid]
1682 pp = [nullid, nullid]
1683 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
1683 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
1684 i, r.start(i), r.length(i), base, r.linkrev(i),
1684 i, r.start(i), r.length(i), base, r.linkrev(i),
1685 short(node), short(pp[0]), short(pp[1])))
1685 short(node), short(pp[0]), short(pp[1])))
1686 elif format == 1:
1686 elif format == 1:
1687 pr = r.parentrevs(i)
1687 pr = r.parentrevs(i)
1688 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % (
1688 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % (
1689 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
1689 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
1690 base, r.linkrev(i), pr[0], pr[1], short(node)))
1690 base, r.linkrev(i), pr[0], pr[1], short(node)))
1691
1691
1692 @command('debugindexdot', [], _('FILE'))
1692 @command('debugindexdot', [], _('FILE'))
1693 def debugindexdot(ui, repo, file_):
1693 def debugindexdot(ui, repo, file_):
1694 """dump an index DAG as a graphviz dot file"""
1694 """dump an index DAG as a graphviz dot file"""
1695 r = None
1695 r = None
1696 if repo:
1696 if repo:
1697 filelog = repo.file(file_)
1697 filelog = repo.file(file_)
1698 if len(filelog):
1698 if len(filelog):
1699 r = filelog
1699 r = filelog
1700 if not r:
1700 if not r:
1701 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), file_)
1701 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), file_)
1702 ui.write("digraph G {\n")
1702 ui.write("digraph G {\n")
1703 for i in r:
1703 for i in r:
1704 node = r.node(i)
1704 node = r.node(i)
1705 pp = r.parents(node)
1705 pp = r.parents(node)
1706 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
1706 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
1707 if pp[1] != nullid:
1707 if pp[1] != nullid:
1708 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
1708 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
1709 ui.write("}\n")
1709 ui.write("}\n")
1710
1710
1711 @command('debuginstall', [], '')
1711 @command('debuginstall', [], '')
1712 def debuginstall(ui):
1712 def debuginstall(ui):
1713 '''test Mercurial installation
1713 '''test Mercurial installation
1714
1714
1715 Returns 0 on success.
1715 Returns 0 on success.
1716 '''
1716 '''
1717
1717
1718 def writetemp(contents):
1718 def writetemp(contents):
1719 (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
1719 (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
1720 f = os.fdopen(fd, "wb")
1720 f = os.fdopen(fd, "wb")
1721 f.write(contents)
1721 f.write(contents)
1722 f.close()
1722 f.close()
1723 return name
1723 return name
1724
1724
1725 problems = 0
1725 problems = 0
1726
1726
1727 # encoding
1727 # encoding
1728 ui.status(_("Checking encoding (%s)...\n") % encoding.encoding)
1728 ui.status(_("Checking encoding (%s)...\n") % encoding.encoding)
1729 try:
1729 try:
1730 encoding.fromlocal("test")
1730 encoding.fromlocal("test")
1731 except util.Abort, inst:
1731 except util.Abort, inst:
1732 ui.write(" %s\n" % inst)
1732 ui.write(" %s\n" % inst)
1733 ui.write(_(" (check that your locale is properly set)\n"))
1733 ui.write(_(" (check that your locale is properly set)\n"))
1734 problems += 1
1734 problems += 1
1735
1735
1736 # compiled modules
1736 # compiled modules
1737 ui.status(_("Checking installed modules (%s)...\n")
1737 ui.status(_("Checking installed modules (%s)...\n")
1738 % os.path.dirname(__file__))
1738 % os.path.dirname(__file__))
1739 try:
1739 try:
1740 import bdiff, mpatch, base85, osutil
1740 import bdiff, mpatch, base85, osutil
1741 except Exception, inst:
1741 except Exception, inst:
1742 ui.write(" %s\n" % inst)
1742 ui.write(" %s\n" % inst)
1743 ui.write(_(" One or more extensions could not be found"))
1743 ui.write(_(" One or more extensions could not be found"))
1744 ui.write(_(" (check that you compiled the extensions)\n"))
1744 ui.write(_(" (check that you compiled the extensions)\n"))
1745 problems += 1
1745 problems += 1
1746
1746
1747 # templates
1747 # templates
1748 ui.status(_("Checking templates...\n"))
1748 ui.status(_("Checking templates...\n"))
1749 try:
1749 try:
1750 import templater
1750 import templater
1751 templater.templater(templater.templatepath("map-cmdline.default"))
1751 templater.templater(templater.templatepath("map-cmdline.default"))
1752 except Exception, inst:
1752 except Exception, inst:
1753 ui.write(" %s\n" % inst)
1753 ui.write(" %s\n" % inst)
1754 ui.write(_(" (templates seem to have been installed incorrectly)\n"))
1754 ui.write(_(" (templates seem to have been installed incorrectly)\n"))
1755 problems += 1
1755 problems += 1
1756
1756
1757 # editor
1757 # editor
1758 ui.status(_("Checking commit editor...\n"))
1758 ui.status(_("Checking commit editor...\n"))
1759 editor = ui.geteditor()
1759 editor = ui.geteditor()
1760 cmdpath = util.findexe(editor) or util.findexe(editor.split()[0])
1760 cmdpath = util.findexe(editor) or util.findexe(editor.split()[0])
1761 if not cmdpath:
1761 if not cmdpath:
1762 if editor == 'vi':
1762 if editor == 'vi':
1763 ui.write(_(" No commit editor set and can't find vi in PATH\n"))
1763 ui.write(_(" No commit editor set and can't find vi in PATH\n"))
1764 ui.write(_(" (specify a commit editor in your configuration"
1764 ui.write(_(" (specify a commit editor in your configuration"
1765 " file)\n"))
1765 " file)\n"))
1766 else:
1766 else:
1767 ui.write(_(" Can't find editor '%s' in PATH\n") % editor)
1767 ui.write(_(" Can't find editor '%s' in PATH\n") % editor)
1768 ui.write(_(" (specify a commit editor in your configuration"
1768 ui.write(_(" (specify a commit editor in your configuration"
1769 " file)\n"))
1769 " file)\n"))
1770 problems += 1
1770 problems += 1
1771
1771
1772 # check username
1772 # check username
1773 ui.status(_("Checking username...\n"))
1773 ui.status(_("Checking username...\n"))
1774 try:
1774 try:
1775 ui.username()
1775 ui.username()
1776 except util.Abort, e:
1776 except util.Abort, e:
1777 ui.write(" %s\n" % e)
1777 ui.write(" %s\n" % e)
1778 ui.write(_(" (specify a username in your configuration file)\n"))
1778 ui.write(_(" (specify a username in your configuration file)\n"))
1779 problems += 1
1779 problems += 1
1780
1780
1781 if not problems:
1781 if not problems:
1782 ui.status(_("No problems detected\n"))
1782 ui.status(_("No problems detected\n"))
1783 else:
1783 else:
1784 ui.write(_("%s problems detected,"
1784 ui.write(_("%s problems detected,"
1785 " please check your install!\n") % problems)
1785 " please check your install!\n") % problems)
1786
1786
1787 return problems
1787 return problems
1788
1788
1789 @command('debugknown', [], _('REPO ID...'))
1789 @command('debugknown', [], _('REPO ID...'))
1790 def debugknown(ui, repopath, *ids, **opts):
1790 def debugknown(ui, repopath, *ids, **opts):
1791 """test whether node ids are known to a repo
1791 """test whether node ids are known to a repo
1792
1792
1793 Every ID must be a full-length hex node id string. Returns a list of 0s and 1s
1793 Every ID must be a full-length hex node id string. Returns a list of 0s and 1s
1794 indicating unknown/known.
1794 indicating unknown/known.
1795 """
1795 """
1796 repo = hg.repository(ui, repopath)
1796 repo = hg.repository(ui, repopath)
1797 if not repo.capable('known'):
1797 if not repo.capable('known'):
1798 raise util.Abort("known() not supported by target repository")
1798 raise util.Abort("known() not supported by target repository")
1799 flags = repo.known([bin(s) for s in ids])
1799 flags = repo.known([bin(s) for s in ids])
1800 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1800 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1801
1801
1802 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'))
1802 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'))
1803 def debugpushkey(ui, repopath, namespace, *keyinfo):
1803 def debugpushkey(ui, repopath, namespace, *keyinfo):
1804 '''access the pushkey key/value protocol
1804 '''access the pushkey key/value protocol
1805
1805
1806 With two args, list the keys in the given namespace.
1806 With two args, list the keys in the given namespace.
1807
1807
1808 With five args, set a key to new if it currently is set to old.
1808 With five args, set a key to new if it currently is set to old.
1809 Reports success or failure.
1809 Reports success or failure.
1810 '''
1810 '''
1811
1811
1812 target = hg.repository(ui, repopath)
1812 target = hg.repository(ui, repopath)
1813 if keyinfo:
1813 if keyinfo:
1814 key, old, new = keyinfo
1814 key, old, new = keyinfo
1815 r = target.pushkey(namespace, key, old, new)
1815 r = target.pushkey(namespace, key, old, new)
1816 ui.status(str(r) + '\n')
1816 ui.status(str(r) + '\n')
1817 return not r
1817 return not r
1818 else:
1818 else:
1819 for k, v in target.listkeys(namespace).iteritems():
1819 for k, v in target.listkeys(namespace).iteritems():
1820 ui.write("%s\t%s\n" % (k.encode('string-escape'),
1820 ui.write("%s\t%s\n" % (k.encode('string-escape'),
1821 v.encode('string-escape')))
1821 v.encode('string-escape')))
1822
1822
1823 @command('debugrebuildstate',
1823 @command('debugrebuildstate',
1824 [('r', 'rev', '', _('revision to rebuild to'), _('REV'))],
1824 [('r', 'rev', '', _('revision to rebuild to'), _('REV'))],
1825 _('[-r REV] [REV]'))
1825 _('[-r REV] [REV]'))
1826 def debugrebuildstate(ui, repo, rev="tip"):
1826 def debugrebuildstate(ui, repo, rev="tip"):
1827 """rebuild the dirstate as it would look like for the given revision"""
1827 """rebuild the dirstate as it would look like for the given revision"""
1828 ctx = scmutil.revsingle(repo, rev)
1828 ctx = scmutil.revsingle(repo, rev)
1829 wlock = repo.wlock()
1829 wlock = repo.wlock()
1830 try:
1830 try:
1831 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1831 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1832 finally:
1832 finally:
1833 wlock.release()
1833 wlock.release()
1834
1834
1835 @command('debugrename',
1835 @command('debugrename',
1836 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1836 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1837 _('[-r REV] FILE'))
1837 _('[-r REV] FILE'))
1838 def debugrename(ui, repo, file1, *pats, **opts):
1838 def debugrename(ui, repo, file1, *pats, **opts):
1839 """dump rename information"""
1839 """dump rename information"""
1840
1840
1841 ctx = scmutil.revsingle(repo, opts.get('rev'))
1841 ctx = scmutil.revsingle(repo, opts.get('rev'))
1842 m = scmutil.match(repo, (file1,) + pats, opts)
1842 m = scmutil.match(repo, (file1,) + pats, opts)
1843 for abs in ctx.walk(m):
1843 for abs in ctx.walk(m):
1844 fctx = ctx[abs]
1844 fctx = ctx[abs]
1845 o = fctx.filelog().renamed(fctx.filenode())
1845 o = fctx.filelog().renamed(fctx.filenode())
1846 rel = m.rel(abs)
1846 rel = m.rel(abs)
1847 if o:
1847 if o:
1848 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
1848 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
1849 else:
1849 else:
1850 ui.write(_("%s not renamed\n") % rel)
1850 ui.write(_("%s not renamed\n") % rel)
1851
1851
1852 @command('debugrevlog',
1852 @command('debugrevlog',
1853 [('c', 'changelog', False, _('open changelog')),
1853 [('c', 'changelog', False, _('open changelog')),
1854 ('m', 'manifest', False, _('open manifest')),
1854 ('m', 'manifest', False, _('open manifest')),
1855 ('d', 'dump', False, _('dump index data'))],
1855 ('d', 'dump', False, _('dump index data'))],
1856 _('-c|-m|FILE'))
1856 _('-c|-m|FILE'))
1857 def debugrevlog(ui, repo, file_ = None, **opts):
1857 def debugrevlog(ui, repo, file_ = None, **opts):
1858 """show data and statistics about a revlog"""
1858 """show data and statistics about a revlog"""
1859 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
1859 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
1860
1860
1861 if opts.get("dump"):
1861 if opts.get("dump"):
1862 numrevs = len(r)
1862 numrevs = len(r)
1863 ui.write("# rev p1rev p2rev start end deltastart base p1 p2"
1863 ui.write("# rev p1rev p2rev start end deltastart base p1 p2"
1864 " rawsize totalsize compression heads\n")
1864 " rawsize totalsize compression heads\n")
1865 ts = 0
1865 ts = 0
1866 heads = set()
1866 heads = set()
1867 for rev in xrange(numrevs):
1867 for rev in xrange(numrevs):
1868 dbase = r.deltaparent(rev)
1868 dbase = r.deltaparent(rev)
1869 if dbase == -1:
1869 if dbase == -1:
1870 dbase = rev
1870 dbase = rev
1871 cbase = r.chainbase(rev)
1871 cbase = r.chainbase(rev)
1872 p1, p2 = r.parentrevs(rev)
1872 p1, p2 = r.parentrevs(rev)
1873 rs = r.rawsize(rev)
1873 rs = r.rawsize(rev)
1874 ts = ts + rs
1874 ts = ts + rs
1875 heads -= set(r.parentrevs(rev))
1875 heads -= set(r.parentrevs(rev))
1876 heads.add(rev)
1876 heads.add(rev)
1877 ui.write("%d %d %d %d %d %d %d %d %d %d %d %d %d\n" %
1877 ui.write("%d %d %d %d %d %d %d %d %d %d %d %d %d\n" %
1878 (rev, p1, p2, r.start(rev), r.end(rev),
1878 (rev, p1, p2, r.start(rev), r.end(rev),
1879 r.start(dbase), r.start(cbase),
1879 r.start(dbase), r.start(cbase),
1880 r.start(p1), r.start(p2),
1880 r.start(p1), r.start(p2),
1881 rs, ts, ts / r.end(rev), len(heads)))
1881 rs, ts, ts / r.end(rev), len(heads)))
1882 return 0
1882 return 0
1883
1883
1884 v = r.version
1884 v = r.version
1885 format = v & 0xFFFF
1885 format = v & 0xFFFF
1886 flags = []
1886 flags = []
1887 gdelta = False
1887 gdelta = False
1888 if v & revlog.REVLOGNGINLINEDATA:
1888 if v & revlog.REVLOGNGINLINEDATA:
1889 flags.append('inline')
1889 flags.append('inline')
1890 if v & revlog.REVLOGGENERALDELTA:
1890 if v & revlog.REVLOGGENERALDELTA:
1891 gdelta = True
1891 gdelta = True
1892 flags.append('generaldelta')
1892 flags.append('generaldelta')
1893 if not flags:
1893 if not flags:
1894 flags = ['(none)']
1894 flags = ['(none)']
1895
1895
1896 nummerges = 0
1896 nummerges = 0
1897 numfull = 0
1897 numfull = 0
1898 numprev = 0
1898 numprev = 0
1899 nump1 = 0
1899 nump1 = 0
1900 nump2 = 0
1900 nump2 = 0
1901 numother = 0
1901 numother = 0
1902 nump1prev = 0
1902 nump1prev = 0
1903 nump2prev = 0
1903 nump2prev = 0
1904 chainlengths = []
1904 chainlengths = []
1905
1905
1906 datasize = [None, 0, 0L]
1906 datasize = [None, 0, 0L]
1907 fullsize = [None, 0, 0L]
1907 fullsize = [None, 0, 0L]
1908 deltasize = [None, 0, 0L]
1908 deltasize = [None, 0, 0L]
1909
1909
1910 def addsize(size, l):
1910 def addsize(size, l):
1911 if l[0] is None or size < l[0]:
1911 if l[0] is None or size < l[0]:
1912 l[0] = size
1912 l[0] = size
1913 if size > l[1]:
1913 if size > l[1]:
1914 l[1] = size
1914 l[1] = size
1915 l[2] += size
1915 l[2] += size
1916
1916
1917 numrevs = len(r)
1917 numrevs = len(r)
1918 for rev in xrange(numrevs):
1918 for rev in xrange(numrevs):
1919 p1, p2 = r.parentrevs(rev)
1919 p1, p2 = r.parentrevs(rev)
1920 delta = r.deltaparent(rev)
1920 delta = r.deltaparent(rev)
1921 if format > 0:
1921 if format > 0:
1922 addsize(r.rawsize(rev), datasize)
1922 addsize(r.rawsize(rev), datasize)
1923 if p2 != nullrev:
1923 if p2 != nullrev:
1924 nummerges += 1
1924 nummerges += 1
1925 size = r.length(rev)
1925 size = r.length(rev)
1926 if delta == nullrev:
1926 if delta == nullrev:
1927 chainlengths.append(0)
1927 chainlengths.append(0)
1928 numfull += 1
1928 numfull += 1
1929 addsize(size, fullsize)
1929 addsize(size, fullsize)
1930 else:
1930 else:
1931 chainlengths.append(chainlengths[delta] + 1)
1931 chainlengths.append(chainlengths[delta] + 1)
1932 addsize(size, deltasize)
1932 addsize(size, deltasize)
1933 if delta == rev - 1:
1933 if delta == rev - 1:
1934 numprev += 1
1934 numprev += 1
1935 if delta == p1:
1935 if delta == p1:
1936 nump1prev += 1
1936 nump1prev += 1
1937 elif delta == p2:
1937 elif delta == p2:
1938 nump2prev += 1
1938 nump2prev += 1
1939 elif delta == p1:
1939 elif delta == p1:
1940 nump1 += 1
1940 nump1 += 1
1941 elif delta == p2:
1941 elif delta == p2:
1942 nump2 += 1
1942 nump2 += 1
1943 elif delta != nullrev:
1943 elif delta != nullrev:
1944 numother += 1
1944 numother += 1
1945
1945
1946 numdeltas = numrevs - numfull
1946 numdeltas = numrevs - numfull
1947 numoprev = numprev - nump1prev - nump2prev
1947 numoprev = numprev - nump1prev - nump2prev
1948 totalrawsize = datasize[2]
1948 totalrawsize = datasize[2]
1949 datasize[2] /= numrevs
1949 datasize[2] /= numrevs
1950 fulltotal = fullsize[2]
1950 fulltotal = fullsize[2]
1951 fullsize[2] /= numfull
1951 fullsize[2] /= numfull
1952 deltatotal = deltasize[2]
1952 deltatotal = deltasize[2]
1953 deltasize[2] /= numrevs - numfull
1953 deltasize[2] /= numrevs - numfull
1954 totalsize = fulltotal + deltatotal
1954 totalsize = fulltotal + deltatotal
1955 avgchainlen = sum(chainlengths) / numrevs
1955 avgchainlen = sum(chainlengths) / numrevs
1956 compratio = totalrawsize / totalsize
1956 compratio = totalrawsize / totalsize
1957
1957
1958 basedfmtstr = '%%%dd\n'
1958 basedfmtstr = '%%%dd\n'
1959 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
1959 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
1960
1960
1961 def dfmtstr(max):
1961 def dfmtstr(max):
1962 return basedfmtstr % len(str(max))
1962 return basedfmtstr % len(str(max))
1963 def pcfmtstr(max, padding=0):
1963 def pcfmtstr(max, padding=0):
1964 return basepcfmtstr % (len(str(max)), ' ' * padding)
1964 return basepcfmtstr % (len(str(max)), ' ' * padding)
1965
1965
1966 def pcfmt(value, total):
1966 def pcfmt(value, total):
1967 return (value, 100 * float(value) / total)
1967 return (value, 100 * float(value) / total)
1968
1968
1969 ui.write('format : %d\n' % format)
1969 ui.write('format : %d\n' % format)
1970 ui.write('flags : %s\n' % ', '.join(flags))
1970 ui.write('flags : %s\n' % ', '.join(flags))
1971
1971
1972 ui.write('\n')
1972 ui.write('\n')
1973 fmt = pcfmtstr(totalsize)
1973 fmt = pcfmtstr(totalsize)
1974 fmt2 = dfmtstr(totalsize)
1974 fmt2 = dfmtstr(totalsize)
1975 ui.write('revisions : ' + fmt2 % numrevs)
1975 ui.write('revisions : ' + fmt2 % numrevs)
1976 ui.write(' merges : ' + fmt % pcfmt(nummerges, numrevs))
1976 ui.write(' merges : ' + fmt % pcfmt(nummerges, numrevs))
1977 ui.write(' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs))
1977 ui.write(' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs))
1978 ui.write('revisions : ' + fmt2 % numrevs)
1978 ui.write('revisions : ' + fmt2 % numrevs)
1979 ui.write(' full : ' + fmt % pcfmt(numfull, numrevs))
1979 ui.write(' full : ' + fmt % pcfmt(numfull, numrevs))
1980 ui.write(' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
1980 ui.write(' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
1981 ui.write('revision size : ' + fmt2 % totalsize)
1981 ui.write('revision size : ' + fmt2 % totalsize)
1982 ui.write(' full : ' + fmt % pcfmt(fulltotal, totalsize))
1982 ui.write(' full : ' + fmt % pcfmt(fulltotal, totalsize))
1983 ui.write(' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
1983 ui.write(' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
1984
1984
1985 ui.write('\n')
1985 ui.write('\n')
1986 fmt = dfmtstr(max(avgchainlen, compratio))
1986 fmt = dfmtstr(max(avgchainlen, compratio))
1987 ui.write('avg chain length : ' + fmt % avgchainlen)
1987 ui.write('avg chain length : ' + fmt % avgchainlen)
1988 ui.write('compression ratio : ' + fmt % compratio)
1988 ui.write('compression ratio : ' + fmt % compratio)
1989
1989
1990 if format > 0:
1990 if format > 0:
1991 ui.write('\n')
1991 ui.write('\n')
1992 ui.write('uncompressed data size (min/max/avg) : %d / %d / %d\n'
1992 ui.write('uncompressed data size (min/max/avg) : %d / %d / %d\n'
1993 % tuple(datasize))
1993 % tuple(datasize))
1994 ui.write('full revision size (min/max/avg) : %d / %d / %d\n'
1994 ui.write('full revision size (min/max/avg) : %d / %d / %d\n'
1995 % tuple(fullsize))
1995 % tuple(fullsize))
1996 ui.write('delta size (min/max/avg) : %d / %d / %d\n'
1996 ui.write('delta size (min/max/avg) : %d / %d / %d\n'
1997 % tuple(deltasize))
1997 % tuple(deltasize))
1998
1998
1999 if numdeltas > 0:
1999 if numdeltas > 0:
2000 ui.write('\n')
2000 ui.write('\n')
2001 fmt = pcfmtstr(numdeltas)
2001 fmt = pcfmtstr(numdeltas)
2002 fmt2 = pcfmtstr(numdeltas, 4)
2002 fmt2 = pcfmtstr(numdeltas, 4)
2003 ui.write('deltas against prev : ' + fmt % pcfmt(numprev, numdeltas))
2003 ui.write('deltas against prev : ' + fmt % pcfmt(numprev, numdeltas))
2004 if numprev > 0:
2004 if numprev > 0:
2005 ui.write(' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev))
2005 ui.write(' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev))
2006 ui.write(' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev))
2006 ui.write(' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev))
2007 ui.write(' other : ' + fmt2 % pcfmt(numoprev, numprev))
2007 ui.write(' other : ' + fmt2 % pcfmt(numoprev, numprev))
2008 if gdelta:
2008 if gdelta:
2009 ui.write('deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas))
2009 ui.write('deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas))
2010 ui.write('deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas))
2010 ui.write('deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas))
2011 ui.write('deltas against other : ' + fmt % pcfmt(numother, numdeltas))
2011 ui.write('deltas against other : ' + fmt % pcfmt(numother, numdeltas))
2012
2012
2013 @command('debugrevspec', [], ('REVSPEC'))
2013 @command('debugrevspec', [], ('REVSPEC'))
2014 def debugrevspec(ui, repo, expr):
2014 def debugrevspec(ui, repo, expr):
2015 '''parse and apply a revision specification'''
2015 '''parse and apply a revision specification'''
2016 if ui.verbose:
2016 if ui.verbose:
2017 tree = revset.parse(expr)[0]
2017 tree = revset.parse(expr)[0]
2018 ui.note(tree, "\n")
2018 ui.note(tree, "\n")
2019 newtree = revset.findaliases(ui, tree)
2019 newtree = revset.findaliases(ui, tree)
2020 if newtree != tree:
2020 if newtree != tree:
2021 ui.note(newtree, "\n")
2021 ui.note(newtree, "\n")
2022 func = revset.match(ui, expr)
2022 func = revset.match(ui, expr)
2023 for c in func(repo, range(len(repo))):
2023 for c in func(repo, range(len(repo))):
2024 ui.write("%s\n" % c)
2024 ui.write("%s\n" % c)
2025
2025
2026 @command('debugsetparents', [], _('REV1 [REV2]'))
2026 @command('debugsetparents', [], _('REV1 [REV2]'))
2027 def debugsetparents(ui, repo, rev1, rev2=None):
2027 def debugsetparents(ui, repo, rev1, rev2=None):
2028 """manually set the parents of the current working directory
2028 """manually set the parents of the current working directory
2029
2029
2030 This is useful for writing repository conversion tools, but should
2030 This is useful for writing repository conversion tools, but should
2031 be used with care.
2031 be used with care.
2032
2032
2033 Returns 0 on success.
2033 Returns 0 on success.
2034 """
2034 """
2035
2035
2036 r1 = scmutil.revsingle(repo, rev1).node()
2036 r1 = scmutil.revsingle(repo, rev1).node()
2037 r2 = scmutil.revsingle(repo, rev2, 'null').node()
2037 r2 = scmutil.revsingle(repo, rev2, 'null').node()
2038
2038
2039 wlock = repo.wlock()
2039 wlock = repo.wlock()
2040 try:
2040 try:
2041 repo.dirstate.setparents(r1, r2)
2041 repo.dirstate.setparents(r1, r2)
2042 finally:
2042 finally:
2043 wlock.release()
2043 wlock.release()
2044
2044
2045 @command('debugstate',
2045 @command('debugstate',
2046 [('', 'nodates', None, _('do not display the saved mtime')),
2046 [('', 'nodates', None, _('do not display the saved mtime')),
2047 ('', 'datesort', None, _('sort by saved mtime'))],
2047 ('', 'datesort', None, _('sort by saved mtime'))],
2048 _('[OPTION]...'))
2048 _('[OPTION]...'))
2049 def debugstate(ui, repo, nodates=None, datesort=None):
2049 def debugstate(ui, repo, nodates=None, datesort=None):
2050 """show the contents of the current dirstate"""
2050 """show the contents of the current dirstate"""
2051 timestr = ""
2051 timestr = ""
2052 showdate = not nodates
2052 showdate = not nodates
2053 if datesort:
2053 if datesort:
2054 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
2054 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
2055 else:
2055 else:
2056 keyfunc = None # sort by filename
2056 keyfunc = None # sort by filename
2057 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
2057 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
2058 if showdate:
2058 if showdate:
2059 if ent[3] == -1:
2059 if ent[3] == -1:
2060 # Pad or slice to locale representation
2060 # Pad or slice to locale representation
2061 locale_len = len(time.strftime("%Y-%m-%d %H:%M:%S ",
2061 locale_len = len(time.strftime("%Y-%m-%d %H:%M:%S ",
2062 time.localtime(0)))
2062 time.localtime(0)))
2063 timestr = 'unset'
2063 timestr = 'unset'
2064 timestr = (timestr[:locale_len] +
2064 timestr = (timestr[:locale_len] +
2065 ' ' * (locale_len - len(timestr)))
2065 ' ' * (locale_len - len(timestr)))
2066 else:
2066 else:
2067 timestr = time.strftime("%Y-%m-%d %H:%M:%S ",
2067 timestr = time.strftime("%Y-%m-%d %H:%M:%S ",
2068 time.localtime(ent[3]))
2068 time.localtime(ent[3]))
2069 if ent[1] & 020000:
2069 if ent[1] & 020000:
2070 mode = 'lnk'
2070 mode = 'lnk'
2071 else:
2071 else:
2072 mode = '%3o' % (ent[1] & 0777)
2072 mode = '%3o' % (ent[1] & 0777)
2073 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
2073 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
2074 for f in repo.dirstate.copies():
2074 for f in repo.dirstate.copies():
2075 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
2075 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
2076
2076
2077 @command('debugsub',
2077 @command('debugsub',
2078 [('r', 'rev', '',
2078 [('r', 'rev', '',
2079 _('revision to check'), _('REV'))],
2079 _('revision to check'), _('REV'))],
2080 _('[-r REV] [REV]'))
2080 _('[-r REV] [REV]'))
2081 def debugsub(ui, repo, rev=None):
2081 def debugsub(ui, repo, rev=None):
2082 ctx = scmutil.revsingle(repo, rev, None)
2082 ctx = scmutil.revsingle(repo, rev, None)
2083 for k, v in sorted(ctx.substate.items()):
2083 for k, v in sorted(ctx.substate.items()):
2084 ui.write('path %s\n' % k)
2084 ui.write('path %s\n' % k)
2085 ui.write(' source %s\n' % v[0])
2085 ui.write(' source %s\n' % v[0])
2086 ui.write(' revision %s\n' % v[1])
2086 ui.write(' revision %s\n' % v[1])
2087
2087
2088 @command('debugwalk', walkopts, _('[OPTION]... [FILE]...'))
2088 @command('debugwalk', walkopts, _('[OPTION]... [FILE]...'))
2089 def debugwalk(ui, repo, *pats, **opts):
2089 def debugwalk(ui, repo, *pats, **opts):
2090 """show how files match on given patterns"""
2090 """show how files match on given patterns"""
2091 m = scmutil.match(repo, pats, opts)
2091 m = scmutil.match(repo, pats, opts)
2092 items = list(repo.walk(m))
2092 items = list(repo.walk(m))
2093 if not items:
2093 if not items:
2094 return
2094 return
2095 fmt = 'f %%-%ds %%-%ds %%s' % (
2095 fmt = 'f %%-%ds %%-%ds %%s' % (
2096 max([len(abs) for abs in items]),
2096 max([len(abs) for abs in items]),
2097 max([len(m.rel(abs)) for abs in items]))
2097 max([len(m.rel(abs)) for abs in items]))
2098 for abs in items:
2098 for abs in items:
2099 line = fmt % (abs, m.rel(abs), m.exact(abs) and 'exact' or '')
2099 line = fmt % (abs, m.rel(abs), m.exact(abs) and 'exact' or '')
2100 ui.write("%s\n" % line.rstrip())
2100 ui.write("%s\n" % line.rstrip())
2101
2101
2102 @command('debugwireargs',
2102 @command('debugwireargs',
2103 [('', 'three', '', 'three'),
2103 [('', 'three', '', 'three'),
2104 ('', 'four', '', 'four'),
2104 ('', 'four', '', 'four'),
2105 ('', 'five', '', 'five'),
2105 ('', 'five', '', 'five'),
2106 ] + remoteopts,
2106 ] + remoteopts,
2107 _('REPO [OPTIONS]... [ONE [TWO]]'))
2107 _('REPO [OPTIONS]... [ONE [TWO]]'))
2108 def debugwireargs(ui, repopath, *vals, **opts):
2108 def debugwireargs(ui, repopath, *vals, **opts):
2109 repo = hg.repository(hg.remoteui(ui, opts), repopath)
2109 repo = hg.repository(hg.remoteui(ui, opts), repopath)
2110 for opt in remoteopts:
2110 for opt in remoteopts:
2111 del opts[opt[1]]
2111 del opts[opt[1]]
2112 args = {}
2112 args = {}
2113 for k, v in opts.iteritems():
2113 for k, v in opts.iteritems():
2114 if v:
2114 if v:
2115 args[k] = v
2115 args[k] = v
2116 # run twice to check that we don't mess up the stream for the next command
2116 # run twice to check that we don't mess up the stream for the next command
2117 res1 = repo.debugwireargs(*vals, **args)
2117 res1 = repo.debugwireargs(*vals, **args)
2118 res2 = repo.debugwireargs(*vals, **args)
2118 res2 = repo.debugwireargs(*vals, **args)
2119 ui.write("%s\n" % res1)
2119 ui.write("%s\n" % res1)
2120 if res1 != res2:
2120 if res1 != res2:
2121 ui.warn("%s\n" % res2)
2121 ui.warn("%s\n" % res2)
2122
2122
2123 @command('^diff',
2123 @command('^diff',
2124 [('r', 'rev', [], _('revision'), _('REV')),
2124 [('r', 'rev', [], _('revision'), _('REV')),
2125 ('c', 'change', '', _('change made by revision'), _('REV'))
2125 ('c', 'change', '', _('change made by revision'), _('REV'))
2126 ] + diffopts + diffopts2 + walkopts + subrepoopts,
2126 ] + diffopts + diffopts2 + walkopts + subrepoopts,
2127 _('[OPTION]... ([-c REV] | [-r REV1 [-r REV2]]) [FILE]...'))
2127 _('[OPTION]... ([-c REV] | [-r REV1 [-r REV2]]) [FILE]...'))
2128 def diff(ui, repo, *pats, **opts):
2128 def diff(ui, repo, *pats, **opts):
2129 """diff repository (or selected files)
2129 """diff repository (or selected files)
2130
2130
2131 Show differences between revisions for the specified files.
2131 Show differences between revisions for the specified files.
2132
2132
2133 Differences between files are shown using the unified diff format.
2133 Differences between files are shown using the unified diff format.
2134
2134
2135 .. note::
2135 .. note::
2136 diff may generate unexpected results for merges, as it will
2136 diff may generate unexpected results for merges, as it will
2137 default to comparing against the working directory's first
2137 default to comparing against the working directory's first
2138 parent changeset if no revisions are specified.
2138 parent changeset if no revisions are specified.
2139
2139
2140 When two revision arguments are given, then changes are shown
2140 When two revision arguments are given, then changes are shown
2141 between those revisions. If only one revision is specified then
2141 between those revisions. If only one revision is specified then
2142 that revision is compared to the working directory, and, when no
2142 that revision is compared to the working directory, and, when no
2143 revisions are specified, the working directory files are compared
2143 revisions are specified, the working directory files are compared
2144 to its parent.
2144 to its parent.
2145
2145
2146 Alternatively you can specify -c/--change with a revision to see
2146 Alternatively you can specify -c/--change with a revision to see
2147 the changes in that changeset relative to its first parent.
2147 the changes in that changeset relative to its first parent.
2148
2148
2149 Without the -a/--text option, diff will avoid generating diffs of
2149 Without the -a/--text option, diff will avoid generating diffs of
2150 files it detects as binary. With -a, diff will generate a diff
2150 files it detects as binary. With -a, diff will generate a diff
2151 anyway, probably with undesirable results.
2151 anyway, probably with undesirable results.
2152
2152
2153 Use the -g/--git option to generate diffs in the git extended diff
2153 Use the -g/--git option to generate diffs in the git extended diff
2154 format. For more information, read :hg:`help diffs`.
2154 format. For more information, read :hg:`help diffs`.
2155
2155
2156 Returns 0 on success.
2156 Returns 0 on success.
2157 """
2157 """
2158
2158
2159 revs = opts.get('rev')
2159 revs = opts.get('rev')
2160 change = opts.get('change')
2160 change = opts.get('change')
2161 stat = opts.get('stat')
2161 stat = opts.get('stat')
2162 reverse = opts.get('reverse')
2162 reverse = opts.get('reverse')
2163
2163
2164 if revs and change:
2164 if revs and change:
2165 msg = _('cannot specify --rev and --change at the same time')
2165 msg = _('cannot specify --rev and --change at the same time')
2166 raise util.Abort(msg)
2166 raise util.Abort(msg)
2167 elif change:
2167 elif change:
2168 node2 = scmutil.revsingle(repo, change, None).node()
2168 node2 = scmutil.revsingle(repo, change, None).node()
2169 node1 = repo[node2].p1().node()
2169 node1 = repo[node2].p1().node()
2170 else:
2170 else:
2171 node1, node2 = scmutil.revpair(repo, revs)
2171 node1, node2 = scmutil.revpair(repo, revs)
2172
2172
2173 if reverse:
2173 if reverse:
2174 node1, node2 = node2, node1
2174 node1, node2 = node2, node1
2175
2175
2176 diffopts = patch.diffopts(ui, opts)
2176 diffopts = patch.diffopts(ui, opts)
2177 m = scmutil.match(repo, pats, opts)
2177 m = scmutil.match(repo, pats, opts)
2178 cmdutil.diffordiffstat(ui, repo, diffopts, node1, node2, m, stat=stat,
2178 cmdutil.diffordiffstat(ui, repo, diffopts, node1, node2, m, stat=stat,
2179 listsubrepos=opts.get('subrepos'))
2179 listsubrepos=opts.get('subrepos'))
2180
2180
2181 @command('^export',
2181 @command('^export',
2182 [('o', 'output', '',
2182 [('o', 'output', '',
2183 _('print output to file with formatted name'), _('FORMAT')),
2183 _('print output to file with formatted name'), _('FORMAT')),
2184 ('', 'switch-parent', None, _('diff against the second parent')),
2184 ('', 'switch-parent', None, _('diff against the second parent')),
2185 ('r', 'rev', [], _('revisions to export'), _('REV')),
2185 ('r', 'rev', [], _('revisions to export'), _('REV')),
2186 ] + diffopts,
2186 ] + diffopts,
2187 _('[OPTION]... [-o OUTFILESPEC] REV...'))
2187 _('[OPTION]... [-o OUTFILESPEC] REV...'))
2188 def export(ui, repo, *changesets, **opts):
2188 def export(ui, repo, *changesets, **opts):
2189 """dump the header and diffs for one or more changesets
2189 """dump the header and diffs for one or more changesets
2190
2190
2191 Print the changeset header and diffs for one or more revisions.
2191 Print the changeset header and diffs for one or more revisions.
2192
2192
2193 The information shown in the changeset header is: author, date,
2193 The information shown in the changeset header is: author, date,
2194 branch name (if non-default), changeset hash, parent(s) and commit
2194 branch name (if non-default), changeset hash, parent(s) and commit
2195 comment.
2195 comment.
2196
2196
2197 .. note::
2197 .. note::
2198 export may generate unexpected diff output for merge
2198 export may generate unexpected diff output for merge
2199 changesets, as it will compare the merge changeset against its
2199 changesets, as it will compare the merge changeset against its
2200 first parent only.
2200 first parent only.
2201
2201
2202 Output may be to a file, in which case the name of the file is
2202 Output may be to a file, in which case the name of the file is
2203 given using a format string. The formatting rules are as follows:
2203 given using a format string. The formatting rules are as follows:
2204
2204
2205 :``%%``: literal "%" character
2205 :``%%``: literal "%" character
2206 :``%H``: changeset hash (40 hexadecimal digits)
2206 :``%H``: changeset hash (40 hexadecimal digits)
2207 :``%N``: number of patches being generated
2207 :``%N``: number of patches being generated
2208 :``%R``: changeset revision number
2208 :``%R``: changeset revision number
2209 :``%b``: basename of the exporting repository
2209 :``%b``: basename of the exporting repository
2210 :``%h``: short-form changeset hash (12 hexadecimal digits)
2210 :``%h``: short-form changeset hash (12 hexadecimal digits)
2211 :``%n``: zero-padded sequence number, starting at 1
2211 :``%n``: zero-padded sequence number, starting at 1
2212 :``%r``: zero-padded changeset revision number
2212 :``%r``: zero-padded changeset revision number
2213
2213
2214 Without the -a/--text option, export will avoid generating diffs
2214 Without the -a/--text option, export will avoid generating diffs
2215 of files it detects as binary. With -a, export will generate a
2215 of files it detects as binary. With -a, export will generate a
2216 diff anyway, probably with undesirable results.
2216 diff anyway, probably with undesirable results.
2217
2217
2218 Use the -g/--git option to generate diffs in the git extended diff
2218 Use the -g/--git option to generate diffs in the git extended diff
2219 format. See :hg:`help diffs` for more information.
2219 format. See :hg:`help diffs` for more information.
2220
2220
2221 With the --switch-parent option, the diff will be against the
2221 With the --switch-parent option, the diff will be against the
2222 second parent. It can be useful to review a merge.
2222 second parent. It can be useful to review a merge.
2223
2223
2224 Returns 0 on success.
2224 Returns 0 on success.
2225 """
2225 """
2226 changesets += tuple(opts.get('rev', []))
2226 changesets += tuple(opts.get('rev', []))
2227 if not changesets:
2227 if not changesets:
2228 raise util.Abort(_("export requires at least one changeset"))
2228 raise util.Abort(_("export requires at least one changeset"))
2229 revs = scmutil.revrange(repo, changesets)
2229 revs = scmutil.revrange(repo, changesets)
2230 if len(revs) > 1:
2230 if len(revs) > 1:
2231 ui.note(_('exporting patches:\n'))
2231 ui.note(_('exporting patches:\n'))
2232 else:
2232 else:
2233 ui.note(_('exporting patch:\n'))
2233 ui.note(_('exporting patch:\n'))
2234 cmdutil.export(repo, revs, template=opts.get('output'),
2234 cmdutil.export(repo, revs, template=opts.get('output'),
2235 switch_parent=opts.get('switch_parent'),
2235 switch_parent=opts.get('switch_parent'),
2236 opts=patch.diffopts(ui, opts))
2236 opts=patch.diffopts(ui, opts))
2237
2237
2238 @command('^forget', walkopts, _('[OPTION]... FILE...'))
2238 @command('^forget', walkopts, _('[OPTION]... FILE...'))
2239 def forget(ui, repo, *pats, **opts):
2239 def forget(ui, repo, *pats, **opts):
2240 """forget the specified files on the next commit
2240 """forget the specified files on the next commit
2241
2241
2242 Mark the specified files so they will no longer be tracked
2242 Mark the specified files so they will no longer be tracked
2243 after the next commit.
2243 after the next commit.
2244
2244
2245 This only removes files from the current branch, not from the
2245 This only removes files from the current branch, not from the
2246 entire project history, and it does not delete them from the
2246 entire project history, and it does not delete them from the
2247 working directory.
2247 working directory.
2248
2248
2249 To undo a forget before the next commit, see :hg:`add`.
2249 To undo a forget before the next commit, see :hg:`add`.
2250
2250
2251 Returns 0 on success.
2251 Returns 0 on success.
2252 """
2252 """
2253
2253
2254 if not pats:
2254 if not pats:
2255 raise util.Abort(_('no files specified'))
2255 raise util.Abort(_('no files specified'))
2256
2256
2257 m = scmutil.match(repo, pats, opts)
2257 m = scmutil.match(repo, pats, opts)
2258 s = repo.status(match=m, clean=True)
2258 s = repo.status(match=m, clean=True)
2259 forget = sorted(s[0] + s[1] + s[3] + s[6])
2259 forget = sorted(s[0] + s[1] + s[3] + s[6])
2260 errs = 0
2260 errs = 0
2261
2261
2262 for f in m.files():
2262 for f in m.files():
2263 if f not in repo.dirstate and not os.path.isdir(m.rel(f)):
2263 if f not in repo.dirstate and not os.path.isdir(m.rel(f)):
2264 ui.warn(_('not removing %s: file is already untracked\n')
2264 ui.warn(_('not removing %s: file is already untracked\n')
2265 % m.rel(f))
2265 % m.rel(f))
2266 errs = 1
2266 errs = 1
2267
2267
2268 for f in forget:
2268 for f in forget:
2269 if ui.verbose or not m.exact(f):
2269 if ui.verbose or not m.exact(f):
2270 ui.status(_('removing %s\n') % m.rel(f))
2270 ui.status(_('removing %s\n') % m.rel(f))
2271
2271
2272 repo[None].forget(forget)
2272 repo[None].forget(forget)
2273 return errs
2273 return errs
2274
2274
2275 @command('grep',
2275 @command('grep',
2276 [('0', 'print0', None, _('end fields with NUL')),
2276 [('0', 'print0', None, _('end fields with NUL')),
2277 ('', 'all', None, _('print all revisions that match')),
2277 ('', 'all', None, _('print all revisions that match')),
2278 ('a', 'text', None, _('treat all files as text')),
2278 ('a', 'text', None, _('treat all files as text')),
2279 ('f', 'follow', None,
2279 ('f', 'follow', None,
2280 _('follow changeset history,'
2280 _('follow changeset history,'
2281 ' or file history across copies and renames')),
2281 ' or file history across copies and renames')),
2282 ('i', 'ignore-case', None, _('ignore case when matching')),
2282 ('i', 'ignore-case', None, _('ignore case when matching')),
2283 ('l', 'files-with-matches', None,
2283 ('l', 'files-with-matches', None,
2284 _('print only filenames and revisions that match')),
2284 _('print only filenames and revisions that match')),
2285 ('n', 'line-number', None, _('print matching line numbers')),
2285 ('n', 'line-number', None, _('print matching line numbers')),
2286 ('r', 'rev', [],
2286 ('r', 'rev', [],
2287 _('only search files changed within revision range'), _('REV')),
2287 _('only search files changed within revision range'), _('REV')),
2288 ('u', 'user', None, _('list the author (long with -v)')),
2288 ('u', 'user', None, _('list the author (long with -v)')),
2289 ('d', 'date', None, _('list the date (short with -q)')),
2289 ('d', 'date', None, _('list the date (short with -q)')),
2290 ] + walkopts,
2290 ] + walkopts,
2291 _('[OPTION]... PATTERN [FILE]...'))
2291 _('[OPTION]... PATTERN [FILE]...'))
2292 def grep(ui, repo, pattern, *pats, **opts):
2292 def grep(ui, repo, pattern, *pats, **opts):
2293 """search for a pattern in specified files and revisions
2293 """search for a pattern in specified files and revisions
2294
2294
2295 Search revisions of files for a regular expression.
2295 Search revisions of files for a regular expression.
2296
2296
2297 This command behaves differently than Unix grep. It only accepts
2297 This command behaves differently than Unix grep. It only accepts
2298 Python/Perl regexps. It searches repository history, not the
2298 Python/Perl regexps. It searches repository history, not the
2299 working directory. It always prints the revision number in which a
2299 working directory. It always prints the revision number in which a
2300 match appears.
2300 match appears.
2301
2301
2302 By default, grep only prints output for the first revision of a
2302 By default, grep only prints output for the first revision of a
2303 file in which it finds a match. To get it to print every revision
2303 file in which it finds a match. To get it to print every revision
2304 that contains a change in match status ("-" for a match that
2304 that contains a change in match status ("-" for a match that
2305 becomes a non-match, or "+" for a non-match that becomes a match),
2305 becomes a non-match, or "+" for a non-match that becomes a match),
2306 use the --all flag.
2306 use the --all flag.
2307
2307
2308 Returns 0 if a match is found, 1 otherwise.
2308 Returns 0 if a match is found, 1 otherwise.
2309 """
2309 """
2310 reflags = 0
2310 reflags = 0
2311 if opts.get('ignore_case'):
2311 if opts.get('ignore_case'):
2312 reflags |= re.I
2312 reflags |= re.I
2313 try:
2313 try:
2314 regexp = re.compile(pattern, reflags)
2314 regexp = re.compile(pattern, reflags)
2315 except re.error, inst:
2315 except re.error, inst:
2316 ui.warn(_("grep: invalid match pattern: %s\n") % inst)
2316 ui.warn(_("grep: invalid match pattern: %s\n") % inst)
2317 return 1
2317 return 1
2318 sep, eol = ':', '\n'
2318 sep, eol = ':', '\n'
2319 if opts.get('print0'):
2319 if opts.get('print0'):
2320 sep = eol = '\0'
2320 sep = eol = '\0'
2321
2321
2322 getfile = util.lrucachefunc(repo.file)
2322 getfile = util.lrucachefunc(repo.file)
2323
2323
2324 def matchlines(body):
2324 def matchlines(body):
2325 begin = 0
2325 begin = 0
2326 linenum = 0
2326 linenum = 0
2327 while True:
2327 while True:
2328 match = regexp.search(body, begin)
2328 match = regexp.search(body, begin)
2329 if not match:
2329 if not match:
2330 break
2330 break
2331 mstart, mend = match.span()
2331 mstart, mend = match.span()
2332 linenum += body.count('\n', begin, mstart) + 1
2332 linenum += body.count('\n', begin, mstart) + 1
2333 lstart = body.rfind('\n', begin, mstart) + 1 or begin
2333 lstart = body.rfind('\n', begin, mstart) + 1 or begin
2334 begin = body.find('\n', mend) + 1 or len(body)
2334 begin = body.find('\n', mend) + 1 or len(body)
2335 lend = begin - 1
2335 lend = begin - 1
2336 yield linenum, mstart - lstart, mend - lstart, body[lstart:lend]
2336 yield linenum, mstart - lstart, mend - lstart, body[lstart:lend]
2337
2337
2338 class linestate(object):
2338 class linestate(object):
2339 def __init__(self, line, linenum, colstart, colend):
2339 def __init__(self, line, linenum, colstart, colend):
2340 self.line = line
2340 self.line = line
2341 self.linenum = linenum
2341 self.linenum = linenum
2342 self.colstart = colstart
2342 self.colstart = colstart
2343 self.colend = colend
2343 self.colend = colend
2344
2344
2345 def __hash__(self):
2345 def __hash__(self):
2346 return hash((self.linenum, self.line))
2346 return hash((self.linenum, self.line))
2347
2347
2348 def __eq__(self, other):
2348 def __eq__(self, other):
2349 return self.line == other.line
2349 return self.line == other.line
2350
2350
2351 matches = {}
2351 matches = {}
2352 copies = {}
2352 copies = {}
2353 def grepbody(fn, rev, body):
2353 def grepbody(fn, rev, body):
2354 matches[rev].setdefault(fn, [])
2354 matches[rev].setdefault(fn, [])
2355 m = matches[rev][fn]
2355 m = matches[rev][fn]
2356 for lnum, cstart, cend, line in matchlines(body):
2356 for lnum, cstart, cend, line in matchlines(body):
2357 s = linestate(line, lnum, cstart, cend)
2357 s = linestate(line, lnum, cstart, cend)
2358 m.append(s)
2358 m.append(s)
2359
2359
2360 def difflinestates(a, b):
2360 def difflinestates(a, b):
2361 sm = difflib.SequenceMatcher(None, a, b)
2361 sm = difflib.SequenceMatcher(None, a, b)
2362 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
2362 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
2363 if tag == 'insert':
2363 if tag == 'insert':
2364 for i in xrange(blo, bhi):
2364 for i in xrange(blo, bhi):
2365 yield ('+', b[i])
2365 yield ('+', b[i])
2366 elif tag == 'delete':
2366 elif tag == 'delete':
2367 for i in xrange(alo, ahi):
2367 for i in xrange(alo, ahi):
2368 yield ('-', a[i])
2368 yield ('-', a[i])
2369 elif tag == 'replace':
2369 elif tag == 'replace':
2370 for i in xrange(alo, ahi):
2370 for i in xrange(alo, ahi):
2371 yield ('-', a[i])
2371 yield ('-', a[i])
2372 for i in xrange(blo, bhi):
2372 for i in xrange(blo, bhi):
2373 yield ('+', b[i])
2373 yield ('+', b[i])
2374
2374
2375 def display(fn, ctx, pstates, states):
2375 def display(fn, ctx, pstates, states):
2376 rev = ctx.rev()
2376 rev = ctx.rev()
2377 datefunc = ui.quiet and util.shortdate or util.datestr
2377 datefunc = ui.quiet and util.shortdate or util.datestr
2378 found = False
2378 found = False
2379 filerevmatches = {}
2379 filerevmatches = {}
2380 def binary():
2380 def binary():
2381 flog = getfile(fn)
2381 flog = getfile(fn)
2382 return util.binary(flog.read(ctx.filenode(fn)))
2382 return util.binary(flog.read(ctx.filenode(fn)))
2383
2383
2384 if opts.get('all'):
2384 if opts.get('all'):
2385 iter = difflinestates(pstates, states)
2385 iter = difflinestates(pstates, states)
2386 else:
2386 else:
2387 iter = [('', l) for l in states]
2387 iter = [('', l) for l in states]
2388 for change, l in iter:
2388 for change, l in iter:
2389 cols = [fn, str(rev)]
2389 cols = [fn, str(rev)]
2390 before, match, after = None, None, None
2390 before, match, after = None, None, None
2391 if opts.get('line_number'):
2391 if opts.get('line_number'):
2392 cols.append(str(l.linenum))
2392 cols.append(str(l.linenum))
2393 if opts.get('all'):
2393 if opts.get('all'):
2394 cols.append(change)
2394 cols.append(change)
2395 if opts.get('user'):
2395 if opts.get('user'):
2396 cols.append(ui.shortuser(ctx.user()))
2396 cols.append(ui.shortuser(ctx.user()))
2397 if opts.get('date'):
2397 if opts.get('date'):
2398 cols.append(datefunc(ctx.date()))
2398 cols.append(datefunc(ctx.date()))
2399 if opts.get('files_with_matches'):
2399 if opts.get('files_with_matches'):
2400 c = (fn, rev)
2400 c = (fn, rev)
2401 if c in filerevmatches:
2401 if c in filerevmatches:
2402 continue
2402 continue
2403 filerevmatches[c] = 1
2403 filerevmatches[c] = 1
2404 else:
2404 else:
2405 before = l.line[:l.colstart]
2405 before = l.line[:l.colstart]
2406 match = l.line[l.colstart:l.colend]
2406 match = l.line[l.colstart:l.colend]
2407 after = l.line[l.colend:]
2407 after = l.line[l.colend:]
2408 ui.write(sep.join(cols))
2408 ui.write(sep.join(cols))
2409 if before is not None:
2409 if before is not None:
2410 if not opts.get('text') and binary():
2410 if not opts.get('text') and binary():
2411 ui.write(sep + " Binary file matches")
2411 ui.write(sep + " Binary file matches")
2412 else:
2412 else:
2413 ui.write(sep + before)
2413 ui.write(sep + before)
2414 ui.write(match, label='grep.match')
2414 ui.write(match, label='grep.match')
2415 ui.write(after)
2415 ui.write(after)
2416 ui.write(eol)
2416 ui.write(eol)
2417 found = True
2417 found = True
2418 return found
2418 return found
2419
2419
2420 skip = {}
2420 skip = {}
2421 revfiles = {}
2421 revfiles = {}
2422 matchfn = scmutil.match(repo, pats, opts)
2422 matchfn = scmutil.match(repo, pats, opts)
2423 found = False
2423 found = False
2424 follow = opts.get('follow')
2424 follow = opts.get('follow')
2425
2425
2426 def prep(ctx, fns):
2426 def prep(ctx, fns):
2427 rev = ctx.rev()
2427 rev = ctx.rev()
2428 pctx = ctx.p1()
2428 pctx = ctx.p1()
2429 parent = pctx.rev()
2429 parent = pctx.rev()
2430 matches.setdefault(rev, {})
2430 matches.setdefault(rev, {})
2431 matches.setdefault(parent, {})
2431 matches.setdefault(parent, {})
2432 files = revfiles.setdefault(rev, [])
2432 files = revfiles.setdefault(rev, [])
2433 for fn in fns:
2433 for fn in fns:
2434 flog = getfile(fn)
2434 flog = getfile(fn)
2435 try:
2435 try:
2436 fnode = ctx.filenode(fn)
2436 fnode = ctx.filenode(fn)
2437 except error.LookupError:
2437 except error.LookupError:
2438 continue
2438 continue
2439
2439
2440 copied = flog.renamed(fnode)
2440 copied = flog.renamed(fnode)
2441 copy = follow and copied and copied[0]
2441 copy = follow and copied and copied[0]
2442 if copy:
2442 if copy:
2443 copies.setdefault(rev, {})[fn] = copy
2443 copies.setdefault(rev, {})[fn] = copy
2444 if fn in skip:
2444 if fn in skip:
2445 if copy:
2445 if copy:
2446 skip[copy] = True
2446 skip[copy] = True
2447 continue
2447 continue
2448 files.append(fn)
2448 files.append(fn)
2449
2449
2450 if fn not in matches[rev]:
2450 if fn not in matches[rev]:
2451 grepbody(fn, rev, flog.read(fnode))
2451 grepbody(fn, rev, flog.read(fnode))
2452
2452
2453 pfn = copy or fn
2453 pfn = copy or fn
2454 if pfn not in matches[parent]:
2454 if pfn not in matches[parent]:
2455 try:
2455 try:
2456 fnode = pctx.filenode(pfn)
2456 fnode = pctx.filenode(pfn)
2457 grepbody(pfn, parent, flog.read(fnode))
2457 grepbody(pfn, parent, flog.read(fnode))
2458 except error.LookupError:
2458 except error.LookupError:
2459 pass
2459 pass
2460
2460
2461 for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep):
2461 for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep):
2462 rev = ctx.rev()
2462 rev = ctx.rev()
2463 parent = ctx.p1().rev()
2463 parent = ctx.p1().rev()
2464 for fn in sorted(revfiles.get(rev, [])):
2464 for fn in sorted(revfiles.get(rev, [])):
2465 states = matches[rev][fn]
2465 states = matches[rev][fn]
2466 copy = copies.get(rev, {}).get(fn)
2466 copy = copies.get(rev, {}).get(fn)
2467 if fn in skip:
2467 if fn in skip:
2468 if copy:
2468 if copy:
2469 skip[copy] = True
2469 skip[copy] = True
2470 continue
2470 continue
2471 pstates = matches.get(parent, {}).get(copy or fn, [])
2471 pstates = matches.get(parent, {}).get(copy or fn, [])
2472 if pstates or states:
2472 if pstates or states:
2473 r = display(fn, ctx, pstates, states)
2473 r = display(fn, ctx, pstates, states)
2474 found = found or r
2474 found = found or r
2475 if r and not opts.get('all'):
2475 if r and not opts.get('all'):
2476 skip[fn] = True
2476 skip[fn] = True
2477 if copy:
2477 if copy:
2478 skip[copy] = True
2478 skip[copy] = True
2479 del matches[rev]
2479 del matches[rev]
2480 del revfiles[rev]
2480 del revfiles[rev]
2481
2481
2482 return not found
2482 return not found
2483
2483
2484 @command('heads',
2484 @command('heads',
2485 [('r', 'rev', '',
2485 [('r', 'rev', '',
2486 _('show only heads which are descendants of STARTREV'), _('STARTREV')),
2486 _('show only heads which are descendants of STARTREV'), _('STARTREV')),
2487 ('t', 'topo', False, _('show topological heads only')),
2487 ('t', 'topo', False, _('show topological heads only')),
2488 ('a', 'active', False, _('show active branchheads only (DEPRECATED)')),
2488 ('a', 'active', False, _('show active branchheads only (DEPRECATED)')),
2489 ('c', 'closed', False, _('show normal and closed branch heads')),
2489 ('c', 'closed', False, _('show normal and closed branch heads')),
2490 ] + templateopts,
2490 ] + templateopts,
2491 _('[-ac] [-r STARTREV] [REV]...'))
2491 _('[-ac] [-r STARTREV] [REV]...'))
2492 def heads(ui, repo, *branchrevs, **opts):
2492 def heads(ui, repo, *branchrevs, **opts):
2493 """show current repository heads or show branch heads
2493 """show current repository heads or show branch heads
2494
2494
2495 With no arguments, show all repository branch heads.
2495 With no arguments, show all repository branch heads.
2496
2496
2497 Repository "heads" are changesets with no child changesets. They are
2497 Repository "heads" are changesets with no child changesets. They are
2498 where development generally takes place and are the usual targets
2498 where development generally takes place and are the usual targets
2499 for update and merge operations. Branch heads are changesets that have
2499 for update and merge operations. Branch heads are changesets that have
2500 no child changeset on the same branch.
2500 no child changeset on the same branch.
2501
2501
2502 If one or more REVs are given, only branch heads on the branches
2502 If one or more REVs are given, only branch heads on the branches
2503 associated with the specified changesets are shown.
2503 associated with the specified changesets are shown.
2504
2504
2505 If -c/--closed is specified, also show branch heads marked closed
2505 If -c/--closed is specified, also show branch heads marked closed
2506 (see :hg:`commit --close-branch`).
2506 (see :hg:`commit --close-branch`).
2507
2507
2508 If STARTREV is specified, only those heads that are descendants of
2508 If STARTREV is specified, only those heads that are descendants of
2509 STARTREV will be displayed.
2509 STARTREV will be displayed.
2510
2510
2511 If -t/--topo is specified, named branch mechanics will be ignored and only
2511 If -t/--topo is specified, named branch mechanics will be ignored and only
2512 changesets without children will be shown.
2512 changesets without children will be shown.
2513
2513
2514 Returns 0 if matching heads are found, 1 if not.
2514 Returns 0 if matching heads are found, 1 if not.
2515 """
2515 """
2516
2516
2517 start = None
2517 start = None
2518 if 'rev' in opts:
2518 if 'rev' in opts:
2519 start = scmutil.revsingle(repo, opts['rev'], None).node()
2519 start = scmutil.revsingle(repo, opts['rev'], None).node()
2520
2520
2521 if opts.get('topo'):
2521 if opts.get('topo'):
2522 heads = [repo[h] for h in repo.heads(start)]
2522 heads = [repo[h] for h in repo.heads(start)]
2523 else:
2523 else:
2524 heads = []
2524 heads = []
2525 for branch in repo.branchmap():
2525 for branch in repo.branchmap():
2526 heads += repo.branchheads(branch, start, opts.get('closed'))
2526 heads += repo.branchheads(branch, start, opts.get('closed'))
2527 heads = [repo[h] for h in heads]
2527 heads = [repo[h] for h in heads]
2528
2528
2529 if branchrevs:
2529 if branchrevs:
2530 branches = set(repo[br].branch() for br in branchrevs)
2530 branches = set(repo[br].branch() for br in branchrevs)
2531 heads = [h for h in heads if h.branch() in branches]
2531 heads = [h for h in heads if h.branch() in branches]
2532
2532
2533 if opts.get('active') and branchrevs:
2533 if opts.get('active') and branchrevs:
2534 dagheads = repo.heads(start)
2534 dagheads = repo.heads(start)
2535 heads = [h for h in heads if h.node() in dagheads]
2535 heads = [h for h in heads if h.node() in dagheads]
2536
2536
2537 if branchrevs:
2537 if branchrevs:
2538 haveheads = set(h.branch() for h in heads)
2538 haveheads = set(h.branch() for h in heads)
2539 if branches - haveheads:
2539 if branches - haveheads:
2540 headless = ', '.join(b for b in branches - haveheads)
2540 headless = ', '.join(b for b in branches - haveheads)
2541 msg = _('no open branch heads found on branches %s')
2541 msg = _('no open branch heads found on branches %s')
2542 if opts.get('rev'):
2542 if opts.get('rev'):
2543 msg += _(' (started at %s)' % opts['rev'])
2543 msg += _(' (started at %s)' % opts['rev'])
2544 ui.warn((msg + '\n') % headless)
2544 ui.warn((msg + '\n') % headless)
2545
2545
2546 if not heads:
2546 if not heads:
2547 return 1
2547 return 1
2548
2548
2549 heads = sorted(heads, key=lambda x: -x.rev())
2549 heads = sorted(heads, key=lambda x: -x.rev())
2550 displayer = cmdutil.show_changeset(ui, repo, opts)
2550 displayer = cmdutil.show_changeset(ui, repo, opts)
2551 for ctx in heads:
2551 for ctx in heads:
2552 displayer.show(ctx)
2552 displayer.show(ctx)
2553 displayer.close()
2553 displayer.close()
2554
2554
2555 @command('help',
2555 @command('help',
2556 [('e', 'extension', None, _('show only help for extensions')),
2556 [('e', 'extension', None, _('show only help for extensions')),
2557 ('c', 'command', None, _('show only help for commands'))],
2557 ('c', 'command', None, _('show only help for commands'))],
2558 _('[-ec] [TOPIC]'))
2558 _('[-ec] [TOPIC]'))
2559 def help_(ui, name=None, with_version=False, unknowncmd=False, full=True, **opts):
2559 def help_(ui, name=None, with_version=False, unknowncmd=False, full=True, **opts):
2560 """show help for a given topic or a help overview
2560 """show help for a given topic or a help overview
2561
2561
2562 With no arguments, print a list of commands with short help messages.
2562 With no arguments, print a list of commands with short help messages.
2563
2563
2564 Given a topic, extension, or command name, print help for that
2564 Given a topic, extension, or command name, print help for that
2565 topic.
2565 topic.
2566
2566
2567 Returns 0 if successful.
2567 Returns 0 if successful.
2568 """
2568 """
2569 option_lists = []
2569 option_lists = []
2570 textwidth = min(ui.termwidth(), 80) - 2
2570 textwidth = min(ui.termwidth(), 80) - 2
2571
2571
2572 def addglobalopts(aliases):
2572 def addglobalopts(aliases):
2573 if ui.verbose:
2573 if ui.verbose:
2574 option_lists.append((_("global options:"), globalopts))
2574 option_lists.append((_("global options:"), globalopts))
2575 if name == 'shortlist':
2575 if name == 'shortlist':
2576 option_lists.append((_('use "hg help" for the full list '
2576 option_lists.append((_('use "hg help" for the full list '
2577 'of commands'), ()))
2577 'of commands'), ()))
2578 else:
2578 else:
2579 if name == 'shortlist':
2579 if name == 'shortlist':
2580 msg = _('use "hg help" for the full list of commands '
2580 msg = _('use "hg help" for the full list of commands '
2581 'or "hg -v" for details')
2581 'or "hg -v" for details')
2582 elif name and not full:
2582 elif name and not full:
2583 msg = _('use "hg help %s" to show the full help text' % name)
2583 msg = _('use "hg help %s" to show the full help text' % name)
2584 elif aliases:
2584 elif aliases:
2585 msg = _('use "hg -v help%s" to show builtin aliases and '
2585 msg = _('use "hg -v help%s" to show builtin aliases and '
2586 'global options') % (name and " " + name or "")
2586 'global options') % (name and " " + name or "")
2587 else:
2587 else:
2588 msg = _('use "hg -v help %s" to show global options') % name
2588 msg = _('use "hg -v help %s" to show global options') % name
2589 option_lists.append((msg, ()))
2589 option_lists.append((msg, ()))
2590
2590
2591 def helpcmd(name):
2591 def helpcmd(name):
2592 if with_version:
2592 if with_version:
2593 version_(ui)
2593 version_(ui)
2594 ui.write('\n')
2594 ui.write('\n')
2595
2595
2596 try:
2596 try:
2597 aliases, entry = cmdutil.findcmd(name, table, strict=unknowncmd)
2597 aliases, entry = cmdutil.findcmd(name, table, strict=unknowncmd)
2598 except error.AmbiguousCommand, inst:
2598 except error.AmbiguousCommand, inst:
2599 # py3k fix: except vars can't be used outside the scope of the
2599 # py3k fix: except vars can't be used outside the scope of the
2600 # except block, nor can be used inside a lambda. python issue4617
2600 # except block, nor can be used inside a lambda. python issue4617
2601 prefix = inst.args[0]
2601 prefix = inst.args[0]
2602 select = lambda c: c.lstrip('^').startswith(prefix)
2602 select = lambda c: c.lstrip('^').startswith(prefix)
2603 helplist(_('list of commands:\n\n'), select)
2603 helplist(_('list of commands:\n\n'), select)
2604 return
2604 return
2605
2605
2606 # check if it's an invalid alias and display its error if it is
2606 # check if it's an invalid alias and display its error if it is
2607 if getattr(entry[0], 'badalias', False):
2607 if getattr(entry[0], 'badalias', False):
2608 if not unknowncmd:
2608 if not unknowncmd:
2609 entry[0](ui)
2609 entry[0](ui)
2610 return
2610 return
2611
2611
2612 # synopsis
2612 # synopsis
2613 if len(entry) > 2:
2613 if len(entry) > 2:
2614 if entry[2].startswith('hg'):
2614 if entry[2].startswith('hg'):
2615 ui.write("%s\n" % entry[2])
2615 ui.write("%s\n" % entry[2])
2616 else:
2616 else:
2617 ui.write('hg %s %s\n' % (aliases[0], entry[2]))
2617 ui.write('hg %s %s\n' % (aliases[0], entry[2]))
2618 else:
2618 else:
2619 ui.write('hg %s\n' % aliases[0])
2619 ui.write('hg %s\n' % aliases[0])
2620
2620
2621 # aliases
2621 # aliases
2622 if full and not ui.quiet and len(aliases) > 1:
2622 if full and not ui.quiet and len(aliases) > 1:
2623 ui.write(_("\naliases: %s\n") % ', '.join(aliases[1:]))
2623 ui.write(_("\naliases: %s\n") % ', '.join(aliases[1:]))
2624
2624
2625 # description
2625 # description
2626 doc = gettext(entry[0].__doc__)
2626 doc = gettext(entry[0].__doc__)
2627 if not doc:
2627 if not doc:
2628 doc = _("(no help text available)")
2628 doc = _("(no help text available)")
2629 if hasattr(entry[0], 'definition'): # aliased command
2629 if hasattr(entry[0], 'definition'): # aliased command
2630 if entry[0].definition.startswith('!'): # shell alias
2630 if entry[0].definition.startswith('!'): # shell alias
2631 doc = _('shell alias for::\n\n %s') % entry[0].definition[1:]
2631 doc = _('shell alias for::\n\n %s') % entry[0].definition[1:]
2632 else:
2632 else:
2633 doc = _('alias for: hg %s\n\n%s') % (entry[0].definition, doc)
2633 doc = _('alias for: hg %s\n\n%s') % (entry[0].definition, doc)
2634 if ui.quiet or not full:
2634 if ui.quiet or not full:
2635 doc = doc.splitlines()[0]
2635 doc = doc.splitlines()[0]
2636 keep = ui.verbose and ['verbose'] or []
2636 keep = ui.verbose and ['verbose'] or []
2637 formatted, pruned = minirst.format(doc, textwidth, keep=keep)
2637 formatted, pruned = minirst.format(doc, textwidth, keep=keep)
2638 ui.write("\n%s\n" % formatted)
2638 ui.write("\n%s\n" % formatted)
2639 if pruned:
2639 if pruned:
2640 ui.write(_('\nuse "hg -v help %s" to show verbose help\n') % name)
2640 ui.write(_('\nuse "hg -v help %s" to show verbose help\n') % name)
2641
2641
2642 if not ui.quiet:
2642 if not ui.quiet:
2643 # options
2643 # options
2644 if entry[1]:
2644 if entry[1]:
2645 option_lists.append((_("options:\n"), entry[1]))
2645 option_lists.append((_("options:\n"), entry[1]))
2646
2646
2647 addglobalopts(False)
2647 addglobalopts(False)
2648
2648
2649 # check if this command shadows a non-trivial (multi-line)
2649 # check if this command shadows a non-trivial (multi-line)
2650 # extension help text
2650 # extension help text
2651 try:
2651 try:
2652 mod = extensions.find(name)
2652 mod = extensions.find(name)
2653 doc = gettext(mod.__doc__) or ''
2653 doc = gettext(mod.__doc__) or ''
2654 if '\n' in doc.strip():
2654 if '\n' in doc.strip():
2655 msg = _('use "hg help -e %s" to show help for '
2655 msg = _('use "hg help -e %s" to show help for '
2656 'the %s extension') % (name, name)
2656 'the %s extension') % (name, name)
2657 ui.write('\n%s\n' % msg)
2657 ui.write('\n%s\n' % msg)
2658 except KeyError:
2658 except KeyError:
2659 pass
2659 pass
2660
2660
2661 def helplist(header, select=None):
2661 def helplist(header, select=None):
2662 h = {}
2662 h = {}
2663 cmds = {}
2663 cmds = {}
2664 for c, e in table.iteritems():
2664 for c, e in table.iteritems():
2665 f = c.split("|", 1)[0]
2665 f = c.split("|", 1)[0]
2666 if select and not select(f):
2666 if select and not select(f):
2667 continue
2667 continue
2668 if (not select and name != 'shortlist' and
2668 if (not select and name != 'shortlist' and
2669 e[0].__module__ != __name__):
2669 e[0].__module__ != __name__):
2670 continue
2670 continue
2671 if name == "shortlist" and not f.startswith("^"):
2671 if name == "shortlist" and not f.startswith("^"):
2672 continue
2672 continue
2673 f = f.lstrip("^")
2673 f = f.lstrip("^")
2674 if not ui.debugflag and f.startswith("debug"):
2674 if not ui.debugflag and f.startswith("debug"):
2675 continue
2675 continue
2676 doc = e[0].__doc__
2676 doc = e[0].__doc__
2677 if doc and 'DEPRECATED' in doc and not ui.verbose:
2677 if doc and 'DEPRECATED' in doc and not ui.verbose:
2678 continue
2678 continue
2679 doc = gettext(doc)
2679 doc = gettext(doc)
2680 if not doc:
2680 if not doc:
2681 doc = _("(no help text available)")
2681 doc = _("(no help text available)")
2682 h[f] = doc.splitlines()[0].rstrip()
2682 h[f] = doc.splitlines()[0].rstrip()
2683 cmds[f] = c.lstrip("^")
2683 cmds[f] = c.lstrip("^")
2684
2684
2685 if not h:
2685 if not h:
2686 ui.status(_('no commands defined\n'))
2686 ui.status(_('no commands defined\n'))
2687 return
2687 return
2688
2688
2689 ui.status(header)
2689 ui.status(header)
2690 fns = sorted(h)
2690 fns = sorted(h)
2691 m = max(map(len, fns))
2691 m = max(map(len, fns))
2692 for f in fns:
2692 for f in fns:
2693 if ui.verbose:
2693 if ui.verbose:
2694 commands = cmds[f].replace("|",", ")
2694 commands = cmds[f].replace("|",", ")
2695 ui.write(" %s:\n %s\n"%(commands, h[f]))
2695 ui.write(" %s:\n %s\n"%(commands, h[f]))
2696 else:
2696 else:
2697 ui.write('%s\n' % (util.wrap(h[f], textwidth,
2697 ui.write('%s\n' % (util.wrap(h[f], textwidth,
2698 initindent=' %-*s ' % (m, f),
2698 initindent=' %-*s ' % (m, f),
2699 hangindent=' ' * (m + 4))))
2699 hangindent=' ' * (m + 4))))
2700
2700
2701 if not ui.quiet:
2701 if not ui.quiet:
2702 addglobalopts(True)
2702 addglobalopts(True)
2703
2703
2704 def helptopic(name):
2704 def helptopic(name):
2705 for names, header, doc in help.helptable:
2705 for names, header, doc in help.helptable:
2706 if name in names:
2706 if name in names:
2707 break
2707 break
2708 else:
2708 else:
2709 raise error.UnknownCommand(name)
2709 raise error.UnknownCommand(name)
2710
2710
2711 # description
2711 # description
2712 if not doc:
2712 if not doc:
2713 doc = _("(no help text available)")
2713 doc = _("(no help text available)")
2714 if hasattr(doc, '__call__'):
2714 if hasattr(doc, '__call__'):
2715 doc = doc()
2715 doc = doc()
2716
2716
2717 ui.write("%s\n\n" % header)
2717 ui.write("%s\n\n" % header)
2718 ui.write("%s\n" % minirst.format(doc, textwidth, indent=4))
2718 ui.write("%s\n" % minirst.format(doc, textwidth, indent=4))
2719 try:
2719 try:
2720 cmdutil.findcmd(name, table)
2720 cmdutil.findcmd(name, table)
2721 ui.write(_('\nuse "hg help -c %s" to see help for '
2721 ui.write(_('\nuse "hg help -c %s" to see help for '
2722 'the %s command\n') % (name, name))
2722 'the %s command\n') % (name, name))
2723 except error.UnknownCommand:
2723 except error.UnknownCommand:
2724 pass
2724 pass
2725
2725
2726 def helpext(name):
2726 def helpext(name):
2727 try:
2727 try:
2728 mod = extensions.find(name)
2728 mod = extensions.find(name)
2729 doc = gettext(mod.__doc__) or _('no help text available')
2729 doc = gettext(mod.__doc__) or _('no help text available')
2730 except KeyError:
2730 except KeyError:
2731 mod = None
2731 mod = None
2732 doc = extensions.disabledext(name)
2732 doc = extensions.disabledext(name)
2733 if not doc:
2733 if not doc:
2734 raise error.UnknownCommand(name)
2734 raise error.UnknownCommand(name)
2735
2735
2736 if '\n' not in doc:
2736 if '\n' not in doc:
2737 head, tail = doc, ""
2737 head, tail = doc, ""
2738 else:
2738 else:
2739 head, tail = doc.split('\n', 1)
2739 head, tail = doc.split('\n', 1)
2740 ui.write(_('%s extension - %s\n\n') % (name.split('.')[-1], head))
2740 ui.write(_('%s extension - %s\n\n') % (name.split('.')[-1], head))
2741 if tail:
2741 if tail:
2742 ui.write(minirst.format(tail, textwidth))
2742 ui.write(minirst.format(tail, textwidth))
2743 ui.status('\n\n')
2743 ui.status('\n\n')
2744
2744
2745 if mod:
2745 if mod:
2746 try:
2746 try:
2747 ct = mod.cmdtable
2747 ct = mod.cmdtable
2748 except AttributeError:
2748 except AttributeError:
2749 ct = {}
2749 ct = {}
2750 modcmds = set([c.split('|', 1)[0] for c in ct])
2750 modcmds = set([c.split('|', 1)[0] for c in ct])
2751 helplist(_('list of commands:\n\n'), modcmds.__contains__)
2751 helplist(_('list of commands:\n\n'), modcmds.__contains__)
2752 else:
2752 else:
2753 ui.write(_('use "hg help extensions" for information on enabling '
2753 ui.write(_('use "hg help extensions" for information on enabling '
2754 'extensions\n'))
2754 'extensions\n'))
2755
2755
2756 def helpextcmd(name):
2756 def helpextcmd(name):
2757 cmd, ext, mod = extensions.disabledcmd(ui, name, ui.config('ui', 'strict'))
2757 cmd, ext, mod = extensions.disabledcmd(ui, name, ui.config('ui', 'strict'))
2758 doc = gettext(mod.__doc__).splitlines()[0]
2758 doc = gettext(mod.__doc__).splitlines()[0]
2759
2759
2760 msg = help.listexts(_("'%s' is provided by the following "
2760 msg = help.listexts(_("'%s' is provided by the following "
2761 "extension:") % cmd, {ext: doc}, indent=4)
2761 "extension:") % cmd, {ext: doc}, indent=4)
2762 ui.write(minirst.format(msg, textwidth))
2762 ui.write(minirst.format(msg, textwidth))
2763 ui.write('\n\n')
2763 ui.write('\n\n')
2764 ui.write(_('use "hg help extensions" for information on enabling '
2764 ui.write(_('use "hg help extensions" for information on enabling '
2765 'extensions\n'))
2765 'extensions\n'))
2766
2766
2767 if name and name != 'shortlist':
2767 if name and name != 'shortlist':
2768 i = None
2768 i = None
2769 if unknowncmd:
2769 if unknowncmd:
2770 queries = (helpextcmd,)
2770 queries = (helpextcmd,)
2771 elif opts.get('extension'):
2771 elif opts.get('extension'):
2772 queries = (helpext,)
2772 queries = (helpext,)
2773 elif opts.get('command'):
2773 elif opts.get('command'):
2774 queries = (helpcmd,)
2774 queries = (helpcmd,)
2775 else:
2775 else:
2776 queries = (helptopic, helpcmd, helpext, helpextcmd)
2776 queries = (helptopic, helpcmd, helpext, helpextcmd)
2777 for f in queries:
2777 for f in queries:
2778 try:
2778 try:
2779 f(name)
2779 f(name)
2780 i = None
2780 i = None
2781 break
2781 break
2782 except error.UnknownCommand, inst:
2782 except error.UnknownCommand, inst:
2783 i = inst
2783 i = inst
2784 if i:
2784 if i:
2785 raise i
2785 raise i
2786
2786
2787 else:
2787 else:
2788 # program name
2788 # program name
2789 if ui.verbose or with_version:
2789 if ui.verbose or with_version:
2790 version_(ui)
2790 version_(ui)
2791 else:
2791 else:
2792 ui.status(_("Mercurial Distributed SCM\n"))
2792 ui.status(_("Mercurial Distributed SCM\n"))
2793 ui.status('\n')
2793 ui.status('\n')
2794
2794
2795 # list of commands
2795 # list of commands
2796 if name == "shortlist":
2796 if name == "shortlist":
2797 header = _('basic commands:\n\n')
2797 header = _('basic commands:\n\n')
2798 else:
2798 else:
2799 header = _('list of commands:\n\n')
2799 header = _('list of commands:\n\n')
2800
2800
2801 helplist(header)
2801 helplist(header)
2802 if name != 'shortlist':
2802 if name != 'shortlist':
2803 text = help.listexts(_('enabled extensions:'), extensions.enabled())
2803 text = help.listexts(_('enabled extensions:'), extensions.enabled())
2804 if text:
2804 if text:
2805 ui.write("\n%s\n" % minirst.format(text, textwidth))
2805 ui.write("\n%s\n" % minirst.format(text, textwidth))
2806
2806
2807 # list all option lists
2807 # list all option lists
2808 opt_output = []
2808 opt_output = []
2809 multioccur = False
2809 multioccur = False
2810 for title, options in option_lists:
2810 for title, options in option_lists:
2811 opt_output.append(("\n%s" % title, None))
2811 opt_output.append(("\n%s" % title, None))
2812 for option in options:
2812 for option in options:
2813 if len(option) == 5:
2813 if len(option) == 5:
2814 shortopt, longopt, default, desc, optlabel = option
2814 shortopt, longopt, default, desc, optlabel = option
2815 else:
2815 else:
2816 shortopt, longopt, default, desc = option
2816 shortopt, longopt, default, desc = option
2817 optlabel = _("VALUE") # default label
2817 optlabel = _("VALUE") # default label
2818
2818
2819 if _("DEPRECATED") in desc and not ui.verbose:
2819 if _("DEPRECATED") in desc and not ui.verbose:
2820 continue
2820 continue
2821 if isinstance(default, list):
2821 if isinstance(default, list):
2822 numqualifier = " %s [+]" % optlabel
2822 numqualifier = " %s [+]" % optlabel
2823 multioccur = True
2823 multioccur = True
2824 elif (default is not None) and not isinstance(default, bool):
2824 elif (default is not None) and not isinstance(default, bool):
2825 numqualifier = " %s" % optlabel
2825 numqualifier = " %s" % optlabel
2826 else:
2826 else:
2827 numqualifier = ""
2827 numqualifier = ""
2828 opt_output.append(("%2s%s" %
2828 opt_output.append(("%2s%s" %
2829 (shortopt and "-%s" % shortopt,
2829 (shortopt and "-%s" % shortopt,
2830 longopt and " --%s%s" %
2830 longopt and " --%s%s" %
2831 (longopt, numqualifier)),
2831 (longopt, numqualifier)),
2832 "%s%s" % (desc,
2832 "%s%s" % (desc,
2833 default
2833 default
2834 and _(" (default: %s)") % default
2834 and _(" (default: %s)") % default
2835 or "")))
2835 or "")))
2836 if multioccur:
2836 if multioccur:
2837 msg = _("\n[+] marked option can be specified multiple times")
2837 msg = _("\n[+] marked option can be specified multiple times")
2838 if ui.verbose and name != 'shortlist':
2838 if ui.verbose and name != 'shortlist':
2839 opt_output.append((msg, None))
2839 opt_output.append((msg, None))
2840 else:
2840 else:
2841 opt_output.insert(-1, (msg, None))
2841 opt_output.insert(-1, (msg, None))
2842
2842
2843 if not name:
2843 if not name:
2844 ui.write(_("\nadditional help topics:\n\n"))
2844 ui.write(_("\nadditional help topics:\n\n"))
2845 topics = []
2845 topics = []
2846 for names, header, doc in help.helptable:
2846 for names, header, doc in help.helptable:
2847 topics.append((sorted(names, key=len, reverse=True)[0], header))
2847 topics.append((sorted(names, key=len, reverse=True)[0], header))
2848 topics_len = max([len(s[0]) for s in topics])
2848 topics_len = max([len(s[0]) for s in topics])
2849 for t, desc in topics:
2849 for t, desc in topics:
2850 ui.write(" %-*s %s\n" % (topics_len, t, desc))
2850 ui.write(" %-*s %s\n" % (topics_len, t, desc))
2851
2851
2852 if opt_output:
2852 if opt_output:
2853 colwidth = encoding.colwidth
2853 colwidth = encoding.colwidth
2854 # normalize: (opt or message, desc or None, width of opt)
2854 # normalize: (opt or message, desc or None, width of opt)
2855 entries = [desc and (opt, desc, colwidth(opt)) or (opt, None, 0)
2855 entries = [desc and (opt, desc, colwidth(opt)) or (opt, None, 0)
2856 for opt, desc in opt_output]
2856 for opt, desc in opt_output]
2857 hanging = max([e[2] for e in entries])
2857 hanging = max([e[2] for e in entries])
2858 for opt, desc, width in entries:
2858 for opt, desc, width in entries:
2859 if desc:
2859 if desc:
2860 initindent = ' %s%s ' % (opt, ' ' * (hanging - width))
2860 initindent = ' %s%s ' % (opt, ' ' * (hanging - width))
2861 hangindent = ' ' * (hanging + 3)
2861 hangindent = ' ' * (hanging + 3)
2862 ui.write('%s\n' % (util.wrap(desc, textwidth,
2862 ui.write('%s\n' % (util.wrap(desc, textwidth,
2863 initindent=initindent,
2863 initindent=initindent,
2864 hangindent=hangindent)))
2864 hangindent=hangindent)))
2865 else:
2865 else:
2866 ui.write("%s\n" % opt)
2866 ui.write("%s\n" % opt)
2867
2867
2868 @command('identify|id',
2868 @command('identify|id',
2869 [('r', 'rev', '',
2869 [('r', 'rev', '',
2870 _('identify the specified revision'), _('REV')),
2870 _('identify the specified revision'), _('REV')),
2871 ('n', 'num', None, _('show local revision number')),
2871 ('n', 'num', None, _('show local revision number')),
2872 ('i', 'id', None, _('show global revision id')),
2872 ('i', 'id', None, _('show global revision id')),
2873 ('b', 'branch', None, _('show branch')),
2873 ('b', 'branch', None, _('show branch')),
2874 ('t', 'tags', None, _('show tags')),
2874 ('t', 'tags', None, _('show tags')),
2875 ('B', 'bookmarks', None, _('show bookmarks'))],
2875 ('B', 'bookmarks', None, _('show bookmarks'))],
2876 _('[-nibtB] [-r REV] [SOURCE]'))
2876 _('[-nibtB] [-r REV] [SOURCE]'))
2877 def identify(ui, repo, source=None, rev=None,
2877 def identify(ui, repo, source=None, rev=None,
2878 num=None, id=None, branch=None, tags=None, bookmarks=None):
2878 num=None, id=None, branch=None, tags=None, bookmarks=None):
2879 """identify the working copy or specified revision
2879 """identify the working copy or specified revision
2880
2880
2881 Print a summary identifying the repository state at REV using one or
2881 Print a summary identifying the repository state at REV using one or
2882 two parent hash identifiers, followed by a "+" if the working
2882 two parent hash identifiers, followed by a "+" if the working
2883 directory has uncommitted changes, the branch name (if not default),
2883 directory has uncommitted changes, the branch name (if not default),
2884 a list of tags, and a list of bookmarks.
2884 a list of tags, and a list of bookmarks.
2885
2885
2886 When REV is not given, print a summary of the current state of the
2886 When REV is not given, print a summary of the current state of the
2887 repository.
2887 repository.
2888
2888
2889 Specifying a path to a repository root or Mercurial bundle will
2889 Specifying a path to a repository root or Mercurial bundle will
2890 cause lookup to operate on that repository/bundle.
2890 cause lookup to operate on that repository/bundle.
2891
2891
2892 Returns 0 if successful.
2892 Returns 0 if successful.
2893 """
2893 """
2894
2894
2895 if not repo and not source:
2895 if not repo and not source:
2896 raise util.Abort(_("there is no Mercurial repository here "
2896 raise util.Abort(_("there is no Mercurial repository here "
2897 "(.hg not found)"))
2897 "(.hg not found)"))
2898
2898
2899 hexfunc = ui.debugflag and hex or short
2899 hexfunc = ui.debugflag and hex or short
2900 default = not (num or id or branch or tags or bookmarks)
2900 default = not (num or id or branch or tags or bookmarks)
2901 output = []
2901 output = []
2902 revs = []
2902 revs = []
2903
2903
2904 if source:
2904 if source:
2905 source, branches = hg.parseurl(ui.expandpath(source))
2905 source, branches = hg.parseurl(ui.expandpath(source))
2906 repo = hg.repository(ui, source)
2906 repo = hg.repository(ui, source)
2907 revs, checkout = hg.addbranchrevs(repo, repo, branches, None)
2907 revs, checkout = hg.addbranchrevs(repo, repo, branches, None)
2908
2908
2909 if not repo.local():
2909 if not repo.local():
2910 if num or branch or tags:
2910 if num or branch or tags:
2911 raise util.Abort(
2911 raise util.Abort(
2912 _("can't query remote revision number, branch, or tags"))
2912 _("can't query remote revision number, branch, or tags"))
2913 if not rev and revs:
2913 if not rev and revs:
2914 rev = revs[0]
2914 rev = revs[0]
2915 if not rev:
2915 if not rev:
2916 rev = "tip"
2916 rev = "tip"
2917
2917
2918 remoterev = repo.lookup(rev)
2918 remoterev = repo.lookup(rev)
2919 if default or id:
2919 if default or id:
2920 output = [hexfunc(remoterev)]
2920 output = [hexfunc(remoterev)]
2921
2921
2922 def getbms():
2922 def getbms():
2923 bms = []
2923 bms = []
2924
2924
2925 if 'bookmarks' in repo.listkeys('namespaces'):
2925 if 'bookmarks' in repo.listkeys('namespaces'):
2926 hexremoterev = hex(remoterev)
2926 hexremoterev = hex(remoterev)
2927 bms = [bm for bm, bmr in repo.listkeys('bookmarks').iteritems()
2927 bms = [bm for bm, bmr in repo.listkeys('bookmarks').iteritems()
2928 if bmr == hexremoterev]
2928 if bmr == hexremoterev]
2929
2929
2930 return bms
2930 return bms
2931
2931
2932 if bookmarks:
2932 if bookmarks:
2933 output.extend(getbms())
2933 output.extend(getbms())
2934 elif default and not ui.quiet:
2934 elif default and not ui.quiet:
2935 # multiple bookmarks for a single parent separated by '/'
2935 # multiple bookmarks for a single parent separated by '/'
2936 bm = '/'.join(getbms())
2936 bm = '/'.join(getbms())
2937 if bm:
2937 if bm:
2938 output.append(bm)
2938 output.append(bm)
2939 else:
2939 else:
2940 if not rev:
2940 if not rev:
2941 ctx = repo[None]
2941 ctx = repo[None]
2942 parents = ctx.parents()
2942 parents = ctx.parents()
2943 changed = ""
2943 changed = ""
2944 if default or id or num:
2944 if default or id or num:
2945 changed = util.any(repo.status()) and "+" or ""
2945 changed = util.any(repo.status()) and "+" or ""
2946 if default or id:
2946 if default or id:
2947 output = ["%s%s" %
2947 output = ["%s%s" %
2948 ('+'.join([hexfunc(p.node()) for p in parents]), changed)]
2948 ('+'.join([hexfunc(p.node()) for p in parents]), changed)]
2949 if num:
2949 if num:
2950 output.append("%s%s" %
2950 output.append("%s%s" %
2951 ('+'.join([str(p.rev()) for p in parents]), changed))
2951 ('+'.join([str(p.rev()) for p in parents]), changed))
2952 else:
2952 else:
2953 ctx = scmutil.revsingle(repo, rev)
2953 ctx = scmutil.revsingle(repo, rev)
2954 if default or id:
2954 if default or id:
2955 output = [hexfunc(ctx.node())]
2955 output = [hexfunc(ctx.node())]
2956 if num:
2956 if num:
2957 output.append(str(ctx.rev()))
2957 output.append(str(ctx.rev()))
2958
2958
2959 if default and not ui.quiet:
2959 if default and not ui.quiet:
2960 b = ctx.branch()
2960 b = ctx.branch()
2961 if b != 'default':
2961 if b != 'default':
2962 output.append("(%s)" % b)
2962 output.append("(%s)" % b)
2963
2963
2964 # multiple tags for a single parent separated by '/'
2964 # multiple tags for a single parent separated by '/'
2965 t = '/'.join(ctx.tags())
2965 t = '/'.join(ctx.tags())
2966 if t:
2966 if t:
2967 output.append(t)
2967 output.append(t)
2968
2968
2969 # multiple bookmarks for a single parent separated by '/'
2969 # multiple bookmarks for a single parent separated by '/'
2970 bm = '/'.join(ctx.bookmarks())
2970 bm = '/'.join(ctx.bookmarks())
2971 if bm:
2971 if bm:
2972 output.append(bm)
2972 output.append(bm)
2973 else:
2973 else:
2974 if branch:
2974 if branch:
2975 output.append(ctx.branch())
2975 output.append(ctx.branch())
2976
2976
2977 if tags:
2977 if tags:
2978 output.extend(ctx.tags())
2978 output.extend(ctx.tags())
2979
2979
2980 if bookmarks:
2980 if bookmarks:
2981 output.extend(ctx.bookmarks())
2981 output.extend(ctx.bookmarks())
2982
2982
2983 ui.write("%s\n" % ' '.join(output))
2983 ui.write("%s\n" % ' '.join(output))
2984
2984
2985 @command('import|patch',
2985 @command('import|patch',
2986 [('p', 'strip', 1,
2986 [('p', 'strip', 1,
2987 _('directory strip option for patch. This has the same '
2987 _('directory strip option for patch. This has the same '
2988 'meaning as the corresponding patch option'), _('NUM')),
2988 'meaning as the corresponding patch option'), _('NUM')),
2989 ('b', 'base', '', _('base path'), _('PATH')),
2989 ('b', 'base', '', _('base path'), _('PATH')),
2990 ('f', 'force', None, _('skip check for outstanding uncommitted changes')),
2990 ('f', 'force', None, _('skip check for outstanding uncommitted changes')),
2991 ('', 'no-commit', None,
2991 ('', 'no-commit', None,
2992 _("don't commit, just update the working directory")),
2992 _("don't commit, just update the working directory")),
2993 ('', 'exact', None,
2993 ('', 'exact', None,
2994 _('apply patch to the nodes from which it was generated')),
2994 _('apply patch to the nodes from which it was generated')),
2995 ('', 'import-branch', None,
2995 ('', 'import-branch', None,
2996 _('use any branch information in patch (implied by --exact)'))] +
2996 _('use any branch information in patch (implied by --exact)'))] +
2997 commitopts + commitopts2 + similarityopts,
2997 commitopts + commitopts2 + similarityopts,
2998 _('[OPTION]... PATCH...'))
2998 _('[OPTION]... PATCH...'))
2999 def import_(ui, repo, patch1, *patches, **opts):
2999 def import_(ui, repo, patch1, *patches, **opts):
3000 """import an ordered set of patches
3000 """import an ordered set of patches
3001
3001
3002 Import a list of patches and commit them individually (unless
3002 Import a list of patches and commit them individually (unless
3003 --no-commit is specified).
3003 --no-commit is specified).
3004
3004
3005 If there are outstanding changes in the working directory, import
3005 If there are outstanding changes in the working directory, import
3006 will abort unless given the -f/--force flag.
3006 will abort unless given the -f/--force flag.
3007
3007
3008 You can import a patch straight from a mail message. Even patches
3008 You can import a patch straight from a mail message. Even patches
3009 as attachments work (to use the body part, it must have type
3009 as attachments work (to use the body part, it must have type
3010 text/plain or text/x-patch). From and Subject headers of email
3010 text/plain or text/x-patch). From and Subject headers of email
3011 message are used as default committer and commit message. All
3011 message are used as default committer and commit message. All
3012 text/plain body parts before first diff are added to commit
3012 text/plain body parts before first diff are added to commit
3013 message.
3013 message.
3014
3014
3015 If the imported patch was generated by :hg:`export`, user and
3015 If the imported patch was generated by :hg:`export`, user and
3016 description from patch override values from message headers and
3016 description from patch override values from message headers and
3017 body. Values given on command line with -m/--message and -u/--user
3017 body. Values given on command line with -m/--message and -u/--user
3018 override these.
3018 override these.
3019
3019
3020 If --exact is specified, import will set the working directory to
3020 If --exact is specified, import will set the working directory to
3021 the parent of each patch before applying it, and will abort if the
3021 the parent of each patch before applying it, and will abort if the
3022 resulting changeset has a different ID than the one recorded in
3022 resulting changeset has a different ID than the one recorded in
3023 the patch. This may happen due to character set problems or other
3023 the patch. This may happen due to character set problems or other
3024 deficiencies in the text patch format.
3024 deficiencies in the text patch format.
3025
3025
3026 With -s/--similarity, hg will attempt to discover renames and
3026 With -s/--similarity, hg will attempt to discover renames and
3027 copies in the patch in the same way as 'addremove'.
3027 copies in the patch in the same way as 'addremove'.
3028
3028
3029 To read a patch from standard input, use "-" as the patch name. If
3029 To read a patch from standard input, use "-" as the patch name. If
3030 a URL is specified, the patch will be downloaded from it.
3030 a URL is specified, the patch will be downloaded from it.
3031 See :hg:`help dates` for a list of formats valid for -d/--date.
3031 See :hg:`help dates` for a list of formats valid for -d/--date.
3032
3032
3033 Returns 0 on success.
3033 Returns 0 on success.
3034 """
3034 """
3035 patches = (patch1,) + patches
3035 patches = (patch1,) + patches
3036
3036
3037 date = opts.get('date')
3037 date = opts.get('date')
3038 if date:
3038 if date:
3039 opts['date'] = util.parsedate(date)
3039 opts['date'] = util.parsedate(date)
3040
3040
3041 try:
3041 try:
3042 sim = float(opts.get('similarity') or 0)
3042 sim = float(opts.get('similarity') or 0)
3043 except ValueError:
3043 except ValueError:
3044 raise util.Abort(_('similarity must be a number'))
3044 raise util.Abort(_('similarity must be a number'))
3045 if sim < 0 or sim > 100:
3045 if sim < 0 or sim > 100:
3046 raise util.Abort(_('similarity must be between 0 and 100'))
3046 raise util.Abort(_('similarity must be between 0 and 100'))
3047
3047
3048 if opts.get('exact') or not opts.get('force'):
3048 if opts.get('exact') or not opts.get('force'):
3049 cmdutil.bailifchanged(repo)
3049 cmdutil.bailifchanged(repo)
3050
3050
3051 d = opts["base"]
3051 d = opts["base"]
3052 strip = opts["strip"]
3052 strip = opts["strip"]
3053 wlock = lock = None
3053 wlock = lock = None
3054 msgs = []
3054 msgs = []
3055
3055
3056 def tryone(ui, hunk):
3056 def tryone(ui, hunk):
3057 tmpname, message, user, date, branch, nodeid, p1, p2 = \
3057 tmpname, message, user, date, branch, nodeid, p1, p2 = \
3058 patch.extract(ui, hunk)
3058 patch.extract(ui, hunk)
3059
3059
3060 if not tmpname:
3060 if not tmpname:
3061 return None
3061 return None
3062 commitid = _('to working directory')
3062 commitid = _('to working directory')
3063
3063
3064 try:
3064 try:
3065 cmdline_message = cmdutil.logmessage(opts)
3065 cmdline_message = cmdutil.logmessage(opts)
3066 if cmdline_message:
3066 if cmdline_message:
3067 # pickup the cmdline msg
3067 # pickup the cmdline msg
3068 message = cmdline_message
3068 message = cmdline_message
3069 elif message:
3069 elif message:
3070 # pickup the patch msg
3070 # pickup the patch msg
3071 message = message.strip()
3071 message = message.strip()
3072 else:
3072 else:
3073 # launch the editor
3073 # launch the editor
3074 message = None
3074 message = None
3075 ui.debug('message:\n%s\n' % message)
3075 ui.debug('message:\n%s\n' % message)
3076
3076
3077 wp = repo.parents()
3077 wp = repo.parents()
3078 if opts.get('exact'):
3078 if opts.get('exact'):
3079 if not nodeid or not p1:
3079 if not nodeid or not p1:
3080 raise util.Abort(_('not a Mercurial patch'))
3080 raise util.Abort(_('not a Mercurial patch'))
3081 p1 = repo.lookup(p1)
3081 p1 = repo.lookup(p1)
3082 p2 = repo.lookup(p2 or hex(nullid))
3082 p2 = repo.lookup(p2 or hex(nullid))
3083
3083
3084 if p1 != wp[0].node():
3084 if p1 != wp[0].node():
3085 hg.clean(repo, p1)
3085 hg.clean(repo, p1)
3086 repo.dirstate.setparents(p1, p2)
3086 repo.dirstate.setparents(p1, p2)
3087 elif p2:
3087 elif p2:
3088 try:
3088 try:
3089 p1 = repo.lookup(p1)
3089 p1 = repo.lookup(p1)
3090 p2 = repo.lookup(p2)
3090 p2 = repo.lookup(p2)
3091 if p1 == wp[0].node():
3091 if p1 == wp[0].node():
3092 repo.dirstate.setparents(p1, p2)
3092 repo.dirstate.setparents(p1, p2)
3093 except error.RepoError:
3093 except error.RepoError:
3094 pass
3094 pass
3095 if opts.get('exact') or opts.get('import_branch'):
3095 if opts.get('exact') or opts.get('import_branch'):
3096 repo.dirstate.setbranch(branch or 'default')
3096 repo.dirstate.setbranch(branch or 'default')
3097
3097
3098 files = {}
3098 files = {}
3099 patch.patch(ui, repo, tmpname, strip=strip, files=files,
3099 patch.patch(ui, repo, tmpname, strip=strip, files=files,
3100 eolmode=None, similarity=sim / 100.0)
3100 eolmode=None, similarity=sim / 100.0)
3101 files = list(files)
3101 files = list(files)
3102 if opts.get('no_commit'):
3102 if opts.get('no_commit'):
3103 if message:
3103 if message:
3104 msgs.append(message)
3104 msgs.append(message)
3105 else:
3105 else:
3106 if opts.get('exact'):
3106 if opts.get('exact'):
3107 m = None
3107 m = None
3108 else:
3108 else:
3109 m = scmutil.matchfiles(repo, files or [])
3109 m = scmutil.matchfiles(repo, files or [])
3110 n = repo.commit(message, opts.get('user') or user,
3110 n = repo.commit(message, opts.get('user') or user,
3111 opts.get('date') or date, match=m,
3111 opts.get('date') or date, match=m,
3112 editor=cmdutil.commiteditor)
3112 editor=cmdutil.commiteditor)
3113 if opts.get('exact'):
3113 if opts.get('exact'):
3114 if hex(n) != nodeid:
3114 if hex(n) != nodeid:
3115 repo.rollback()
3115 repo.rollback()
3116 raise util.Abort(_('patch is damaged'
3116 raise util.Abort(_('patch is damaged'
3117 ' or loses information'))
3117 ' or loses information'))
3118 # Force a dirstate write so that the next transaction
3118 # Force a dirstate write so that the next transaction
3119 # backups an up-do-date file.
3119 # backups an up-do-date file.
3120 repo.dirstate.write()
3120 repo.dirstate.write()
3121 if n:
3121 if n:
3122 commitid = short(n)
3122 commitid = short(n)
3123
3123
3124 return commitid
3124 return commitid
3125 finally:
3125 finally:
3126 os.unlink(tmpname)
3126 os.unlink(tmpname)
3127
3127
3128 try:
3128 try:
3129 wlock = repo.wlock()
3129 wlock = repo.wlock()
3130 lock = repo.lock()
3130 lock = repo.lock()
3131 lastcommit = None
3131 lastcommit = None
3132 for p in patches:
3132 for p in patches:
3133 pf = os.path.join(d, p)
3133 pf = os.path.join(d, p)
3134
3134
3135 if pf == '-':
3135 if pf == '-':
3136 ui.status(_("applying patch from stdin\n"))
3136 ui.status(_("applying patch from stdin\n"))
3137 pf = sys.stdin
3137 pf = sys.stdin
3138 else:
3138 else:
3139 ui.status(_("applying %s\n") % p)
3139 ui.status(_("applying %s\n") % p)
3140 pf = url.open(ui, pf)
3140 pf = url.open(ui, pf)
3141
3141
3142 haspatch = False
3142 haspatch = False
3143 for hunk in patch.split(pf):
3143 for hunk in patch.split(pf):
3144 commitid = tryone(ui, hunk)
3144 commitid = tryone(ui, hunk)
3145 if commitid:
3145 if commitid:
3146 haspatch = True
3146 haspatch = True
3147 if lastcommit:
3147 if lastcommit:
3148 ui.status(_('applied %s\n') % lastcommit)
3148 ui.status(_('applied %s\n') % lastcommit)
3149 lastcommit = commitid
3149 lastcommit = commitid
3150
3150
3151 if not haspatch:
3151 if not haspatch:
3152 raise util.Abort(_('no diffs found'))
3152 raise util.Abort(_('no diffs found'))
3153
3153
3154 if msgs:
3154 if msgs:
3155 repo.opener.write('last-message.txt', '\n* * *\n'.join(msgs))
3155 repo.opener.write('last-message.txt', '\n* * *\n'.join(msgs))
3156 finally:
3156 finally:
3157 release(lock, wlock)
3157 release(lock, wlock)
3158
3158
3159 @command('incoming|in',
3159 @command('incoming|in',
3160 [('f', 'force', None,
3160 [('f', 'force', None,
3161 _('run even if remote repository is unrelated')),
3161 _('run even if remote repository is unrelated')),
3162 ('n', 'newest-first', None, _('show newest record first')),
3162 ('n', 'newest-first', None, _('show newest record first')),
3163 ('', 'bundle', '',
3163 ('', 'bundle', '',
3164 _('file to store the bundles into'), _('FILE')),
3164 _('file to store the bundles into'), _('FILE')),
3165 ('r', 'rev', [], _('a remote changeset intended to be added'), _('REV')),
3165 ('r', 'rev', [], _('a remote changeset intended to be added'), _('REV')),
3166 ('B', 'bookmarks', False, _("compare bookmarks")),
3166 ('B', 'bookmarks', False, _("compare bookmarks")),
3167 ('b', 'branch', [],
3167 ('b', 'branch', [],
3168 _('a specific branch you would like to pull'), _('BRANCH')),
3168 _('a specific branch you would like to pull'), _('BRANCH')),
3169 ] + logopts + remoteopts + subrepoopts,
3169 ] + logopts + remoteopts + subrepoopts,
3170 _('[-p] [-n] [-M] [-f] [-r REV]... [--bundle FILENAME] [SOURCE]'))
3170 _('[-p] [-n] [-M] [-f] [-r REV]... [--bundle FILENAME] [SOURCE]'))
3171 def incoming(ui, repo, source="default", **opts):
3171 def incoming(ui, repo, source="default", **opts):
3172 """show new changesets found in source
3172 """show new changesets found in source
3173
3173
3174 Show new changesets found in the specified path/URL or the default
3174 Show new changesets found in the specified path/URL or the default
3175 pull location. These are the changesets that would have been pulled
3175 pull location. These are the changesets that would have been pulled
3176 if a pull at the time you issued this command.
3176 if a pull at the time you issued this command.
3177
3177
3178 For remote repository, using --bundle avoids downloading the
3178 For remote repository, using --bundle avoids downloading the
3179 changesets twice if the incoming is followed by a pull.
3179 changesets twice if the incoming is followed by a pull.
3180
3180
3181 See pull for valid source format details.
3181 See pull for valid source format details.
3182
3182
3183 Returns 0 if there are incoming changes, 1 otherwise.
3183 Returns 0 if there are incoming changes, 1 otherwise.
3184 """
3184 """
3185 if opts.get('bundle') and opts.get('subrepos'):
3185 if opts.get('bundle') and opts.get('subrepos'):
3186 raise util.Abort(_('cannot combine --bundle and --subrepos'))
3186 raise util.Abort(_('cannot combine --bundle and --subrepos'))
3187
3187
3188 if opts.get('bookmarks'):
3188 if opts.get('bookmarks'):
3189 source, branches = hg.parseurl(ui.expandpath(source),
3189 source, branches = hg.parseurl(ui.expandpath(source),
3190 opts.get('branch'))
3190 opts.get('branch'))
3191 other = hg.repository(hg.remoteui(repo, opts), source)
3191 other = hg.repository(hg.remoteui(repo, opts), source)
3192 if 'bookmarks' not in other.listkeys('namespaces'):
3192 if 'bookmarks' not in other.listkeys('namespaces'):
3193 ui.warn(_("remote doesn't support bookmarks\n"))
3193 ui.warn(_("remote doesn't support bookmarks\n"))
3194 return 0
3194 return 0
3195 ui.status(_('comparing with %s\n') % util.hidepassword(source))
3195 ui.status(_('comparing with %s\n') % util.hidepassword(source))
3196 return bookmarks.diff(ui, repo, other)
3196 return bookmarks.diff(ui, repo, other)
3197
3197
3198 repo._subtoppath = ui.expandpath(source)
3198 repo._subtoppath = ui.expandpath(source)
3199 try:
3199 try:
3200 return hg.incoming(ui, repo, source, opts)
3200 return hg.incoming(ui, repo, source, opts)
3201 finally:
3201 finally:
3202 del repo._subtoppath
3202 del repo._subtoppath
3203
3203
3204
3204
3205 @command('^init', remoteopts, _('[-e CMD] [--remotecmd CMD] [DEST]'))
3205 @command('^init', remoteopts, _('[-e CMD] [--remotecmd CMD] [DEST]'))
3206 def init(ui, dest=".", **opts):
3206 def init(ui, dest=".", **opts):
3207 """create a new repository in the given directory
3207 """create a new repository in the given directory
3208
3208
3209 Initialize a new repository in the given directory. If the given
3209 Initialize a new repository in the given directory. If the given
3210 directory does not exist, it will be created.
3210 directory does not exist, it will be created.
3211
3211
3212 If no directory is given, the current directory is used.
3212 If no directory is given, the current directory is used.
3213
3213
3214 It is possible to specify an ``ssh://`` URL as the destination.
3214 It is possible to specify an ``ssh://`` URL as the destination.
3215 See :hg:`help urls` for more information.
3215 See :hg:`help urls` for more information.
3216
3216
3217 Returns 0 on success.
3217 Returns 0 on success.
3218 """
3218 """
3219 hg.repository(hg.remoteui(ui, opts), ui.expandpath(dest), create=True)
3219 hg.repository(hg.remoteui(ui, opts), ui.expandpath(dest), create=True)
3220
3220
3221 @command('locate',
3221 @command('locate',
3222 [('r', 'rev', '', _('search the repository as it is in REV'), _('REV')),
3222 [('r', 'rev', '', _('search the repository as it is in REV'), _('REV')),
3223 ('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
3223 ('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
3224 ('f', 'fullpath', None, _('print complete paths from the filesystem root')),
3224 ('f', 'fullpath', None, _('print complete paths from the filesystem root')),
3225 ] + walkopts,
3225 ] + walkopts,
3226 _('[OPTION]... [PATTERN]...'))
3226 _('[OPTION]... [PATTERN]...'))
3227 def locate(ui, repo, *pats, **opts):
3227 def locate(ui, repo, *pats, **opts):
3228 """locate files matching specific patterns
3228 """locate files matching specific patterns
3229
3229
3230 Print files under Mercurial control in the working directory whose
3230 Print files under Mercurial control in the working directory whose
3231 names match the given patterns.
3231 names match the given patterns.
3232
3232
3233 By default, this command searches all directories in the working
3233 By default, this command searches all directories in the working
3234 directory. To search just the current directory and its
3234 directory. To search just the current directory and its
3235 subdirectories, use "--include .".
3235 subdirectories, use "--include .".
3236
3236
3237 If no patterns are given to match, this command prints the names
3237 If no patterns are given to match, this command prints the names
3238 of all files under Mercurial control in the working directory.
3238 of all files under Mercurial control in the working directory.
3239
3239
3240 If you want to feed the output of this command into the "xargs"
3240 If you want to feed the output of this command into the "xargs"
3241 command, use the -0 option to both this command and "xargs". This
3241 command, use the -0 option to both this command and "xargs". This
3242 will avoid the problem of "xargs" treating single filenames that
3242 will avoid the problem of "xargs" treating single filenames that
3243 contain whitespace as multiple filenames.
3243 contain whitespace as multiple filenames.
3244
3244
3245 Returns 0 if a match is found, 1 otherwise.
3245 Returns 0 if a match is found, 1 otherwise.
3246 """
3246 """
3247 end = opts.get('print0') and '\0' or '\n'
3247 end = opts.get('print0') and '\0' or '\n'
3248 rev = scmutil.revsingle(repo, opts.get('rev'), None).node()
3248 rev = scmutil.revsingle(repo, opts.get('rev'), None).node()
3249
3249
3250 ret = 1
3250 ret = 1
3251 m = scmutil.match(repo, pats, opts, default='relglob')
3251 m = scmutil.match(repo, pats, opts, default='relglob')
3252 m.bad = lambda x, y: False
3252 m.bad = lambda x, y: False
3253 for abs in repo[rev].walk(m):
3253 for abs in repo[rev].walk(m):
3254 if not rev and abs not in repo.dirstate:
3254 if not rev and abs not in repo.dirstate:
3255 continue
3255 continue
3256 if opts.get('fullpath'):
3256 if opts.get('fullpath'):
3257 ui.write(repo.wjoin(abs), end)
3257 ui.write(repo.wjoin(abs), end)
3258 else:
3258 else:
3259 ui.write(((pats and m.rel(abs)) or abs), end)
3259 ui.write(((pats and m.rel(abs)) or abs), end)
3260 ret = 0
3260 ret = 0
3261
3261
3262 return ret
3262 return ret
3263
3263
3264 @command('^log|history',
3264 @command('^log|history',
3265 [('f', 'follow', None,
3265 [('f', 'follow', None,
3266 _('follow changeset history, or file history across copies and renames')),
3266 _('follow changeset history, or file history across copies and renames')),
3267 ('', 'follow-first', None,
3267 ('', 'follow-first', None,
3268 _('only follow the first parent of merge changesets')),
3268 _('only follow the first parent of merge changesets')),
3269 ('d', 'date', '', _('show revisions matching date spec'), _('DATE')),
3269 ('d', 'date', '', _('show revisions matching date spec'), _('DATE')),
3270 ('C', 'copies', None, _('show copied files')),
3270 ('C', 'copies', None, _('show copied files')),
3271 ('k', 'keyword', [],
3271 ('k', 'keyword', [],
3272 _('do case-insensitive search for a given text'), _('TEXT')),
3272 _('do case-insensitive search for a given text'), _('TEXT')),
3273 ('r', 'rev', [], _('show the specified revision or range'), _('REV')),
3273 ('r', 'rev', [], _('show the specified revision or range'), _('REV')),
3274 ('', 'removed', None, _('include revisions where files were removed')),
3274 ('', 'removed', None, _('include revisions where files were removed')),
3275 ('m', 'only-merges', None, _('show only merges')),
3275 ('m', 'only-merges', None, _('show only merges')),
3276 ('u', 'user', [], _('revisions committed by user'), _('USER')),
3276 ('u', 'user', [], _('revisions committed by user'), _('USER')),
3277 ('', 'only-branch', [],
3277 ('', 'only-branch', [],
3278 _('show only changesets within the given named branch (DEPRECATED)'),
3278 _('show only changesets within the given named branch (DEPRECATED)'),
3279 _('BRANCH')),
3279 _('BRANCH')),
3280 ('b', 'branch', [],
3280 ('b', 'branch', [],
3281 _('show changesets within the given named branch'), _('BRANCH')),
3281 _('show changesets within the given named branch'), _('BRANCH')),
3282 ('P', 'prune', [],
3282 ('P', 'prune', [],
3283 _('do not display revision or any of its ancestors'), _('REV')),
3283 _('do not display revision or any of its ancestors'), _('REV')),
3284 ] + logopts + walkopts,
3284 ] + logopts + walkopts,
3285 _('[OPTION]... [FILE]'))
3285 _('[OPTION]... [FILE]'))
3286 def log(ui, repo, *pats, **opts):
3286 def log(ui, repo, *pats, **opts):
3287 """show revision history of entire repository or files
3287 """show revision history of entire repository or files
3288
3288
3289 Print the revision history of the specified files or the entire
3289 Print the revision history of the specified files or the entire
3290 project.
3290 project.
3291
3291
3292 File history is shown without following rename or copy history of
3292 File history is shown without following rename or copy history of
3293 files. Use -f/--follow with a filename to follow history across
3293 files. Use -f/--follow with a filename to follow history across
3294 renames and copies. --follow without a filename will only show
3294 renames and copies. --follow without a filename will only show
3295 ancestors or descendants of the starting revision. --follow-first
3295 ancestors or descendants of the starting revision. --follow-first
3296 only follows the first parent of merge revisions.
3296 only follows the first parent of merge revisions.
3297
3297
3298 If no revision range is specified, the default is ``tip:0`` unless
3298 If no revision range is specified, the default is ``tip:0`` unless
3299 --follow is set, in which case the working directory parent is
3299 --follow is set, in which case the working directory parent is
3300 used as the starting revision. You can specify a revision set for
3300 used as the starting revision. You can specify a revision set for
3301 log, see :hg:`help revsets` for more information.
3301 log, see :hg:`help revsets` for more information.
3302
3302
3303 See :hg:`help dates` for a list of formats valid for -d/--date.
3303 See :hg:`help dates` for a list of formats valid for -d/--date.
3304
3304
3305 By default this command prints revision number and changeset id,
3305 By default this command prints revision number and changeset id,
3306 tags, non-trivial parents, user, date and time, and a summary for
3306 tags, non-trivial parents, user, date and time, and a summary for
3307 each commit. When the -v/--verbose switch is used, the list of
3307 each commit. When the -v/--verbose switch is used, the list of
3308 changed files and full commit message are shown.
3308 changed files and full commit message are shown.
3309
3309
3310 .. note::
3310 .. note::
3311 log -p/--patch may generate unexpected diff output for merge
3311 log -p/--patch may generate unexpected diff output for merge
3312 changesets, as it will only compare the merge changeset against
3312 changesets, as it will only compare the merge changeset against
3313 its first parent. Also, only files different from BOTH parents
3313 its first parent. Also, only files different from BOTH parents
3314 will appear in files:.
3314 will appear in files:.
3315
3315
3316 Returns 0 on success.
3316 Returns 0 on success.
3317 """
3317 """
3318
3318
3319 matchfn = scmutil.match(repo, pats, opts)
3319 matchfn = scmutil.match(repo, pats, opts)
3320 limit = cmdutil.loglimit(opts)
3320 limit = cmdutil.loglimit(opts)
3321 count = 0
3321 count = 0
3322
3322
3323 endrev = None
3323 endrev = None
3324 if opts.get('copies') and opts.get('rev'):
3324 if opts.get('copies') and opts.get('rev'):
3325 endrev = max(scmutil.revrange(repo, opts.get('rev'))) + 1
3325 endrev = max(scmutil.revrange(repo, opts.get('rev'))) + 1
3326
3326
3327 df = False
3327 df = False
3328 if opts["date"]:
3328 if opts["date"]:
3329 df = util.matchdate(opts["date"])
3329 df = util.matchdate(opts["date"])
3330
3330
3331 branches = opts.get('branch', []) + opts.get('only_branch', [])
3331 branches = opts.get('branch', []) + opts.get('only_branch', [])
3332 opts['branch'] = [repo.lookupbranch(b) for b in branches]
3332 opts['branch'] = [repo.lookupbranch(b) for b in branches]
3333
3333
3334 displayer = cmdutil.show_changeset(ui, repo, opts, True)
3334 displayer = cmdutil.show_changeset(ui, repo, opts, True)
3335 def prep(ctx, fns):
3335 def prep(ctx, fns):
3336 rev = ctx.rev()
3336 rev = ctx.rev()
3337 parents = [p for p in repo.changelog.parentrevs(rev)
3337 parents = [p for p in repo.changelog.parentrevs(rev)
3338 if p != nullrev]
3338 if p != nullrev]
3339 if opts.get('no_merges') and len(parents) == 2:
3339 if opts.get('no_merges') and len(parents) == 2:
3340 return
3340 return
3341 if opts.get('only_merges') and len(parents) != 2:
3341 if opts.get('only_merges') and len(parents) != 2:
3342 return
3342 return
3343 if opts.get('branch') and ctx.branch() not in opts['branch']:
3343 if opts.get('branch') and ctx.branch() not in opts['branch']:
3344 return
3344 return
3345 if df and not df(ctx.date()[0]):
3345 if df and not df(ctx.date()[0]):
3346 return
3346 return
3347 if opts['user'] and not [k for k in opts['user']
3347 if opts['user'] and not [k for k in opts['user']
3348 if k.lower() in ctx.user().lower()]:
3348 if k.lower() in ctx.user().lower()]:
3349 return
3349 return
3350 if opts.get('keyword'):
3350 if opts.get('keyword'):
3351 for k in [kw.lower() for kw in opts['keyword']]:
3351 for k in [kw.lower() for kw in opts['keyword']]:
3352 if (k in ctx.user().lower() or
3352 if (k in ctx.user().lower() or
3353 k in ctx.description().lower() or
3353 k in ctx.description().lower() or
3354 k in " ".join(ctx.files()).lower()):
3354 k in " ".join(ctx.files()).lower()):
3355 break
3355 break
3356 else:
3356 else:
3357 return
3357 return
3358
3358
3359 copies = None
3359 copies = None
3360 if opts.get('copies') and rev:
3360 if opts.get('copies') and rev:
3361 copies = []
3361 copies = []
3362 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
3362 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
3363 for fn in ctx.files():
3363 for fn in ctx.files():
3364 rename = getrenamed(fn, rev)
3364 rename = getrenamed(fn, rev)
3365 if rename:
3365 if rename:
3366 copies.append((fn, rename[0]))
3366 copies.append((fn, rename[0]))
3367
3367
3368 revmatchfn = None
3368 revmatchfn = None
3369 if opts.get('patch') or opts.get('stat'):
3369 if opts.get('patch') or opts.get('stat'):
3370 if opts.get('follow') or opts.get('follow_first'):
3370 if opts.get('follow') or opts.get('follow_first'):
3371 # note: this might be wrong when following through merges
3371 # note: this might be wrong when following through merges
3372 revmatchfn = scmutil.match(repo, fns, default='path')
3372 revmatchfn = scmutil.match(repo, fns, default='path')
3373 else:
3373 else:
3374 revmatchfn = matchfn
3374 revmatchfn = matchfn
3375
3375
3376 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
3376 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
3377
3377
3378 for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep):
3378 for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep):
3379 if count == limit:
3379 if count == limit:
3380 break
3380 break
3381 if displayer.flush(ctx.rev()):
3381 if displayer.flush(ctx.rev()):
3382 count += 1
3382 count += 1
3383 displayer.close()
3383 displayer.close()
3384
3384
3385 @command('manifest',
3385 @command('manifest',
3386 [('r', 'rev', '', _('revision to display'), _('REV')),
3386 [('r', 'rev', '', _('revision to display'), _('REV')),
3387 ('', 'all', False, _("list files from all revisions"))],
3387 ('', 'all', False, _("list files from all revisions"))],
3388 _('[-r REV]'))
3388 _('[-r REV]'))
3389 def manifest(ui, repo, node=None, rev=None, **opts):
3389 def manifest(ui, repo, node=None, rev=None, **opts):
3390 """output the current or given revision of the project manifest
3390 """output the current or given revision of the project manifest
3391
3391
3392 Print a list of version controlled files for the given revision.
3392 Print a list of version controlled files for the given revision.
3393 If no revision is given, the first parent of the working directory
3393 If no revision is given, the first parent of the working directory
3394 is used, or the null revision if no revision is checked out.
3394 is used, or the null revision if no revision is checked out.
3395
3395
3396 With -v, print file permissions, symlink and executable bits.
3396 With -v, print file permissions, symlink and executable bits.
3397 With --debug, print file revision hashes.
3397 With --debug, print file revision hashes.
3398
3398
3399 If option --all is specified, the list of all files from all revisions
3399 If option --all is specified, the list of all files from all revisions
3400 is printed. This includes deleted and renamed files.
3400 is printed. This includes deleted and renamed files.
3401
3401
3402 Returns 0 on success.
3402 Returns 0 on success.
3403 """
3403 """
3404 if opts.get('all'):
3404 if opts.get('all'):
3405 if rev or node:
3405 if rev or node:
3406 raise util.Abort(_("can't specify a revision with --all"))
3406 raise util.Abort(_("can't specify a revision with --all"))
3407
3407
3408 res = []
3408 res = []
3409 prefix = "data/"
3409 prefix = "data/"
3410 suffix = ".i"
3410 suffix = ".i"
3411 plen = len(prefix)
3411 plen = len(prefix)
3412 slen = len(suffix)
3412 slen = len(suffix)
3413 lock = repo.lock()
3413 lock = repo.lock()
3414 try:
3414 try:
3415 for fn, b, size in repo.store.datafiles():
3415 for fn, b, size in repo.store.datafiles():
3416 if size != 0 and fn[-slen:] == suffix and fn[:plen] == prefix:
3416 if size != 0 and fn[-slen:] == suffix and fn[:plen] == prefix:
3417 res.append(fn[plen:-slen])
3417 res.append(fn[plen:-slen])
3418 finally:
3418 finally:
3419 lock.release()
3419 lock.release()
3420 for f in sorted(res):
3420 for f in sorted(res):
3421 ui.write("%s\n" % f)
3421 ui.write("%s\n" % f)
3422 return
3422 return
3423
3423
3424 if rev and node:
3424 if rev and node:
3425 raise util.Abort(_("please specify just one revision"))
3425 raise util.Abort(_("please specify just one revision"))
3426
3426
3427 if not node:
3427 if not node:
3428 node = rev
3428 node = rev
3429
3429
3430 decor = {'l':'644 @ ', 'x':'755 * ', '':'644 '}
3430 decor = {'l':'644 @ ', 'x':'755 * ', '':'644 '}
3431 ctx = scmutil.revsingle(repo, node)
3431 ctx = scmutil.revsingle(repo, node)
3432 for f in ctx:
3432 for f in ctx:
3433 if ui.debugflag:
3433 if ui.debugflag:
3434 ui.write("%40s " % hex(ctx.manifest()[f]))
3434 ui.write("%40s " % hex(ctx.manifest()[f]))
3435 if ui.verbose:
3435 if ui.verbose:
3436 ui.write(decor[ctx.flags(f)])
3436 ui.write(decor[ctx.flags(f)])
3437 ui.write("%s\n" % f)
3437 ui.write("%s\n" % f)
3438
3438
3439 @command('^merge',
3439 @command('^merge',
3440 [('f', 'force', None, _('force a merge with outstanding changes')),
3440 [('f', 'force', None, _('force a merge with outstanding changes')),
3441 ('t', 'tool', '', _('specify merge tool')),
3441 ('t', 'tool', '', _('specify merge tool')),
3442 ('r', 'rev', '', _('revision to merge'), _('REV')),
3442 ('r', 'rev', '', _('revision to merge'), _('REV')),
3443 ('P', 'preview', None,
3443 ('P', 'preview', None,
3444 _('review revisions to merge (no merge is performed)'))],
3444 _('review revisions to merge (no merge is performed)'))],
3445 _('[-P] [-f] [[-r] REV]'))
3445 _('[-P] [-f] [[-r] REV]'))
3446 def merge(ui, repo, node=None, **opts):
3446 def merge(ui, repo, node=None, **opts):
3447 """merge working directory with another revision
3447 """merge working directory with another revision
3448
3448
3449 The current working directory is updated with all changes made in
3449 The current working directory is updated with all changes made in
3450 the requested revision since the last common predecessor revision.
3450 the requested revision since the last common predecessor revision.
3451
3451
3452 Files that changed between either parent are marked as changed for
3452 Files that changed between either parent are marked as changed for
3453 the next commit and a commit must be performed before any further
3453 the next commit and a commit must be performed before any further
3454 updates to the repository are allowed. The next commit will have
3454 updates to the repository are allowed. The next commit will have
3455 two parents.
3455 two parents.
3456
3456
3457 ``--tool`` can be used to specify the merge tool used for file
3457 ``--tool`` can be used to specify the merge tool used for file
3458 merges. It overrides the HGMERGE environment variable and your
3458 merges. It overrides the HGMERGE environment variable and your
3459 configuration files. See :hg:`help merge-tools` for options.
3459 configuration files. See :hg:`help merge-tools` for options.
3460
3460
3461 If no revision is specified, the working directory's parent is a
3461 If no revision is specified, the working directory's parent is a
3462 head revision, and the current branch contains exactly one other
3462 head revision, and the current branch contains exactly one other
3463 head, the other head is merged with by default. Otherwise, an
3463 head, the other head is merged with by default. Otherwise, an
3464 explicit revision with which to merge with must be provided.
3464 explicit revision with which to merge with must be provided.
3465
3465
3466 :hg:`resolve` must be used to resolve unresolved files.
3466 :hg:`resolve` must be used to resolve unresolved files.
3467
3467
3468 To undo an uncommitted merge, use :hg:`update --clean .` which
3468 To undo an uncommitted merge, use :hg:`update --clean .` which
3469 will check out a clean copy of the original merge parent, losing
3469 will check out a clean copy of the original merge parent, losing
3470 all changes.
3470 all changes.
3471
3471
3472 Returns 0 on success, 1 if there are unresolved files.
3472 Returns 0 on success, 1 if there are unresolved files.
3473 """
3473 """
3474
3474
3475 if opts.get('rev') and node:
3475 if opts.get('rev') and node:
3476 raise util.Abort(_("please specify just one revision"))
3476 raise util.Abort(_("please specify just one revision"))
3477 if not node:
3477 if not node:
3478 node = opts.get('rev')
3478 node = opts.get('rev')
3479
3479
3480 if not node:
3480 if not node:
3481 branch = repo[None].branch()
3481 branch = repo[None].branch()
3482 bheads = repo.branchheads(branch)
3482 bheads = repo.branchheads(branch)
3483 if len(bheads) > 2:
3483 if len(bheads) > 2:
3484 raise util.Abort(_("branch '%s' has %d heads - "
3484 raise util.Abort(_("branch '%s' has %d heads - "
3485 "please merge with an explicit rev")
3485 "please merge with an explicit rev")
3486 % (branch, len(bheads)),
3486 % (branch, len(bheads)),
3487 hint=_("run 'hg heads .' to see heads"))
3487 hint=_("run 'hg heads .' to see heads"))
3488
3488
3489 parent = repo.dirstate.p1()
3489 parent = repo.dirstate.p1()
3490 if len(bheads) == 1:
3490 if len(bheads) == 1:
3491 if len(repo.heads()) > 1:
3491 if len(repo.heads()) > 1:
3492 raise util.Abort(_("branch '%s' has one head - "
3492 raise util.Abort(_("branch '%s' has one head - "
3493 "please merge with an explicit rev")
3493 "please merge with an explicit rev")
3494 % branch,
3494 % branch,
3495 hint=_("run 'hg heads' to see all heads"))
3495 hint=_("run 'hg heads' to see all heads"))
3496 msg = _('there is nothing to merge')
3496 msg = _('there is nothing to merge')
3497 if parent != repo.lookup(repo[None].branch()):
3497 if parent != repo.lookup(repo[None].branch()):
3498 msg = _('%s - use "hg update" instead') % msg
3498 msg = _('%s - use "hg update" instead') % msg
3499 raise util.Abort(msg)
3499 raise util.Abort(msg)
3500
3500
3501 if parent not in bheads:
3501 if parent not in bheads:
3502 raise util.Abort(_('working directory not at a head revision'),
3502 raise util.Abort(_('working directory not at a head revision'),
3503 hint=_("use 'hg update' or merge with an "
3503 hint=_("use 'hg update' or merge with an "
3504 "explicit revision"))
3504 "explicit revision"))
3505 node = parent == bheads[0] and bheads[-1] or bheads[0]
3505 node = parent == bheads[0] and bheads[-1] or bheads[0]
3506 else:
3506 else:
3507 node = scmutil.revsingle(repo, node).node()
3507 node = scmutil.revsingle(repo, node).node()
3508
3508
3509 if opts.get('preview'):
3509 if opts.get('preview'):
3510 # find nodes that are ancestors of p2 but not of p1
3510 # find nodes that are ancestors of p2 but not of p1
3511 p1 = repo.lookup('.')
3511 p1 = repo.lookup('.')
3512 p2 = repo.lookup(node)
3512 p2 = repo.lookup(node)
3513 nodes = repo.changelog.findmissing(common=[p1], heads=[p2])
3513 nodes = repo.changelog.findmissing(common=[p1], heads=[p2])
3514
3514
3515 displayer = cmdutil.show_changeset(ui, repo, opts)
3515 displayer = cmdutil.show_changeset(ui, repo, opts)
3516 for node in nodes:
3516 for node in nodes:
3517 displayer.show(repo[node])
3517 displayer.show(repo[node])
3518 displayer.close()
3518 displayer.close()
3519 return 0
3519 return 0
3520
3520
3521 try:
3521 try:
3522 # ui.forcemerge is an internal variable, do not document
3522 # ui.forcemerge is an internal variable, do not document
3523 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
3523 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
3524 return hg.merge(repo, node, force=opts.get('force'))
3524 return hg.merge(repo, node, force=opts.get('force'))
3525 finally:
3525 finally:
3526 ui.setconfig('ui', 'forcemerge', '')
3526 ui.setconfig('ui', 'forcemerge', '')
3527
3527
3528 @command('outgoing|out',
3528 @command('outgoing|out',
3529 [('f', 'force', None, _('run even when the destination is unrelated')),
3529 [('f', 'force', None, _('run even when the destination is unrelated')),
3530 ('r', 'rev', [],
3530 ('r', 'rev', [],
3531 _('a changeset intended to be included in the destination'), _('REV')),
3531 _('a changeset intended to be included in the destination'), _('REV')),
3532 ('n', 'newest-first', None, _('show newest record first')),
3532 ('n', 'newest-first', None, _('show newest record first')),
3533 ('B', 'bookmarks', False, _('compare bookmarks')),
3533 ('B', 'bookmarks', False, _('compare bookmarks')),
3534 ('b', 'branch', [], _('a specific branch you would like to push'),
3534 ('b', 'branch', [], _('a specific branch you would like to push'),
3535 _('BRANCH')),
3535 _('BRANCH')),
3536 ] + logopts + remoteopts + subrepoopts,
3536 ] + logopts + remoteopts + subrepoopts,
3537 _('[-M] [-p] [-n] [-f] [-r REV]... [DEST]'))
3537 _('[-M] [-p] [-n] [-f] [-r REV]... [DEST]'))
3538 def outgoing(ui, repo, dest=None, **opts):
3538 def outgoing(ui, repo, dest=None, **opts):
3539 """show changesets not found in the destination
3539 """show changesets not found in the destination
3540
3540
3541 Show changesets not found in the specified destination repository
3541 Show changesets not found in the specified destination repository
3542 or the default push location. These are the changesets that would
3542 or the default push location. These are the changesets that would
3543 be pushed if a push was requested.
3543 be pushed if a push was requested.
3544
3544
3545 See pull for details of valid destination formats.
3545 See pull for details of valid destination formats.
3546
3546
3547 Returns 0 if there are outgoing changes, 1 otherwise.
3547 Returns 0 if there are outgoing changes, 1 otherwise.
3548 """
3548 """
3549
3549
3550 if opts.get('bookmarks'):
3550 if opts.get('bookmarks'):
3551 dest = ui.expandpath(dest or 'default-push', dest or 'default')
3551 dest = ui.expandpath(dest or 'default-push', dest or 'default')
3552 dest, branches = hg.parseurl(dest, opts.get('branch'))
3552 dest, branches = hg.parseurl(dest, opts.get('branch'))
3553 other = hg.repository(hg.remoteui(repo, opts), dest)
3553 other = hg.repository(hg.remoteui(repo, opts), dest)
3554 if 'bookmarks' not in other.listkeys('namespaces'):
3554 if 'bookmarks' not in other.listkeys('namespaces'):
3555 ui.warn(_("remote doesn't support bookmarks\n"))
3555 ui.warn(_("remote doesn't support bookmarks\n"))
3556 return 0
3556 return 0
3557 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
3557 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
3558 return bookmarks.diff(ui, other, repo)
3558 return bookmarks.diff(ui, other, repo)
3559
3559
3560 repo._subtoppath = ui.expandpath(dest or 'default-push', dest or 'default')
3560 repo._subtoppath = ui.expandpath(dest or 'default-push', dest or 'default')
3561 try:
3561 try:
3562 return hg.outgoing(ui, repo, dest, opts)
3562 return hg.outgoing(ui, repo, dest, opts)
3563 finally:
3563 finally:
3564 del repo._subtoppath
3564 del repo._subtoppath
3565
3565
3566 @command('parents',
3566 @command('parents',
3567 [('r', 'rev', '', _('show parents of the specified revision'), _('REV')),
3567 [('r', 'rev', '', _('show parents of the specified revision'), _('REV')),
3568 ] + templateopts,
3568 ] + templateopts,
3569 _('[-r REV] [FILE]'))
3569 _('[-r REV] [FILE]'))
3570 def parents(ui, repo, file_=None, **opts):
3570 def parents(ui, repo, file_=None, **opts):
3571 """show the parents of the working directory or revision
3571 """show the parents of the working directory or revision
3572
3572
3573 Print the working directory's parent revisions. If a revision is
3573 Print the working directory's parent revisions. If a revision is
3574 given via -r/--rev, the parent of that revision will be printed.
3574 given via -r/--rev, the parent of that revision will be printed.
3575 If a file argument is given, the revision in which the file was
3575 If a file argument is given, the revision in which the file was
3576 last changed (before the working directory revision or the
3576 last changed (before the working directory revision or the
3577 argument to --rev if given) is printed.
3577 argument to --rev if given) is printed.
3578
3578
3579 Returns 0 on success.
3579 Returns 0 on success.
3580 """
3580 """
3581
3581
3582 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
3582 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
3583
3583
3584 if file_:
3584 if file_:
3585 m = scmutil.match(repo, (file_,), opts)
3585 m = scmutil.match(repo, (file_,), opts)
3586 if m.anypats() or len(m.files()) != 1:
3586 if m.anypats() or len(m.files()) != 1:
3587 raise util.Abort(_('can only specify an explicit filename'))
3587 raise util.Abort(_('can only specify an explicit filename'))
3588 file_ = m.files()[0]
3588 file_ = m.files()[0]
3589 filenodes = []
3589 filenodes = []
3590 for cp in ctx.parents():
3590 for cp in ctx.parents():
3591 if not cp:
3591 if not cp:
3592 continue
3592 continue
3593 try:
3593 try:
3594 filenodes.append(cp.filenode(file_))
3594 filenodes.append(cp.filenode(file_))
3595 except error.LookupError:
3595 except error.LookupError:
3596 pass
3596 pass
3597 if not filenodes:
3597 if not filenodes:
3598 raise util.Abort(_("'%s' not found in manifest!") % file_)
3598 raise util.Abort(_("'%s' not found in manifest!") % file_)
3599 fl = repo.file(file_)
3599 fl = repo.file(file_)
3600 p = [repo.lookup(fl.linkrev(fl.rev(fn))) for fn in filenodes]
3600 p = [repo.lookup(fl.linkrev(fl.rev(fn))) for fn in filenodes]
3601 else:
3601 else:
3602 p = [cp.node() for cp in ctx.parents()]
3602 p = [cp.node() for cp in ctx.parents()]
3603
3603
3604 displayer = cmdutil.show_changeset(ui, repo, opts)
3604 displayer = cmdutil.show_changeset(ui, repo, opts)
3605 for n in p:
3605 for n in p:
3606 if n != nullid:
3606 if n != nullid:
3607 displayer.show(repo[n])
3607 displayer.show(repo[n])
3608 displayer.close()
3608 displayer.close()
3609
3609
3610 @command('paths', [], _('[NAME]'))
3610 @command('paths', [], _('[NAME]'))
3611 def paths(ui, repo, search=None):
3611 def paths(ui, repo, search=None):
3612 """show aliases for remote repositories
3612 """show aliases for remote repositories
3613
3613
3614 Show definition of symbolic path name NAME. If no name is given,
3614 Show definition of symbolic path name NAME. If no name is given,
3615 show definition of all available names.
3615 show definition of all available names.
3616
3616
3617 Option -q/--quiet suppresses all output when searching for NAME
3617 Option -q/--quiet suppresses all output when searching for NAME
3618 and shows only the path names when listing all definitions.
3618 and shows only the path names when listing all definitions.
3619
3619
3620 Path names are defined in the [paths] section of your
3620 Path names are defined in the [paths] section of your
3621 configuration file and in ``/etc/mercurial/hgrc``. If run inside a
3621 configuration file and in ``/etc/mercurial/hgrc``. If run inside a
3622 repository, ``.hg/hgrc`` is used, too.
3622 repository, ``.hg/hgrc`` is used, too.
3623
3623
3624 The path names ``default`` and ``default-push`` have a special
3624 The path names ``default`` and ``default-push`` have a special
3625 meaning. When performing a push or pull operation, they are used
3625 meaning. When performing a push or pull operation, they are used
3626 as fallbacks if no location is specified on the command-line.
3626 as fallbacks if no location is specified on the command-line.
3627 When ``default-push`` is set, it will be used for push and
3627 When ``default-push`` is set, it will be used for push and
3628 ``default`` will be used for pull; otherwise ``default`` is used
3628 ``default`` will be used for pull; otherwise ``default`` is used
3629 as the fallback for both. When cloning a repository, the clone
3629 as the fallback for both. When cloning a repository, the clone
3630 source is written as ``default`` in ``.hg/hgrc``. Note that
3630 source is written as ``default`` in ``.hg/hgrc``. Note that
3631 ``default`` and ``default-push`` apply to all inbound (e.g.
3631 ``default`` and ``default-push`` apply to all inbound (e.g.
3632 :hg:`incoming`) and outbound (e.g. :hg:`outgoing`, :hg:`email` and
3632 :hg:`incoming`) and outbound (e.g. :hg:`outgoing`, :hg:`email` and
3633 :hg:`bundle`) operations.
3633 :hg:`bundle`) operations.
3634
3634
3635 See :hg:`help urls` for more information.
3635 See :hg:`help urls` for more information.
3636
3636
3637 Returns 0 on success.
3637 Returns 0 on success.
3638 """
3638 """
3639 if search:
3639 if search:
3640 for name, path in ui.configitems("paths"):
3640 for name, path in ui.configitems("paths"):
3641 if name == search:
3641 if name == search:
3642 ui.status("%s\n" % util.hidepassword(path))
3642 ui.status("%s\n" % util.hidepassword(path))
3643 return
3643 return
3644 if not ui.quiet:
3644 if not ui.quiet:
3645 ui.warn(_("not found!\n"))
3645 ui.warn(_("not found!\n"))
3646 return 1
3646 return 1
3647 else:
3647 else:
3648 for name, path in ui.configitems("paths"):
3648 for name, path in ui.configitems("paths"):
3649 if ui.quiet:
3649 if ui.quiet:
3650 ui.write("%s\n" % name)
3650 ui.write("%s\n" % name)
3651 else:
3651 else:
3652 ui.write("%s = %s\n" % (name, util.hidepassword(path)))
3652 ui.write("%s = %s\n" % (name, util.hidepassword(path)))
3653
3653
3654 def postincoming(ui, repo, modheads, optupdate, checkout):
3654 def postincoming(ui, repo, modheads, optupdate, checkout):
3655 if modheads == 0:
3655 if modheads == 0:
3656 return
3656 return
3657 if optupdate:
3657 if optupdate:
3658 try:
3658 try:
3659 return hg.update(repo, checkout)
3659 return hg.update(repo, checkout)
3660 except util.Abort, inst:
3660 except util.Abort, inst:
3661 ui.warn(_("not updating: %s\n" % str(inst)))
3661 ui.warn(_("not updating: %s\n" % str(inst)))
3662 return 0
3662 return 0
3663 if modheads > 1:
3663 if modheads > 1:
3664 currentbranchheads = len(repo.branchheads())
3664 currentbranchheads = len(repo.branchheads())
3665 if currentbranchheads == modheads:
3665 if currentbranchheads == modheads:
3666 ui.status(_("(run 'hg heads' to see heads, 'hg merge' to merge)\n"))
3666 ui.status(_("(run 'hg heads' to see heads, 'hg merge' to merge)\n"))
3667 elif currentbranchheads > 1:
3667 elif currentbranchheads > 1:
3668 ui.status(_("(run 'hg heads .' to see heads, 'hg merge' to merge)\n"))
3668 ui.status(_("(run 'hg heads .' to see heads, 'hg merge' to merge)\n"))
3669 else:
3669 else:
3670 ui.status(_("(run 'hg heads' to see heads)\n"))
3670 ui.status(_("(run 'hg heads' to see heads)\n"))
3671 else:
3671 else:
3672 ui.status(_("(run 'hg update' to get a working copy)\n"))
3672 ui.status(_("(run 'hg update' to get a working copy)\n"))
3673
3673
3674 @command('^pull',
3674 @command('^pull',
3675 [('u', 'update', None,
3675 [('u', 'update', None,
3676 _('update to new branch head if changesets were pulled')),
3676 _('update to new branch head if changesets were pulled')),
3677 ('f', 'force', None, _('run even when remote repository is unrelated')),
3677 ('f', 'force', None, _('run even when remote repository is unrelated')),
3678 ('r', 'rev', [], _('a remote changeset intended to be added'), _('REV')),
3678 ('r', 'rev', [], _('a remote changeset intended to be added'), _('REV')),
3679 ('B', 'bookmark', [], _("bookmark to pull"), _('BOOKMARK')),
3679 ('B', 'bookmark', [], _("bookmark to pull"), _('BOOKMARK')),
3680 ('b', 'branch', [], _('a specific branch you would like to pull'),
3680 ('b', 'branch', [], _('a specific branch you would like to pull'),
3681 _('BRANCH')),
3681 _('BRANCH')),
3682 ] + remoteopts,
3682 ] + remoteopts,
3683 _('[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]'))
3683 _('[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]'))
3684 def pull(ui, repo, source="default", **opts):
3684 def pull(ui, repo, source="default", **opts):
3685 """pull changes from the specified source
3685 """pull changes from the specified source
3686
3686
3687 Pull changes from a remote repository to a local one.
3687 Pull changes from a remote repository to a local one.
3688
3688
3689 This finds all changes from the repository at the specified path
3689 This finds all changes from the repository at the specified path
3690 or URL and adds them to a local repository (the current one unless
3690 or URL and adds them to a local repository (the current one unless
3691 -R is specified). By default, this does not update the copy of the
3691 -R is specified). By default, this does not update the copy of the
3692 project in the working directory.
3692 project in the working directory.
3693
3693
3694 Use :hg:`incoming` if you want to see what would have been added
3694 Use :hg:`incoming` if you want to see what would have been added
3695 by a pull at the time you issued this command. If you then decide
3695 by a pull at the time you issued this command. If you then decide
3696 to add those changes to the repository, you should use :hg:`pull
3696 to add those changes to the repository, you should use :hg:`pull
3697 -r X` where ``X`` is the last changeset listed by :hg:`incoming`.
3697 -r X` where ``X`` is the last changeset listed by :hg:`incoming`.
3698
3698
3699 If SOURCE is omitted, the 'default' path will be used.
3699 If SOURCE is omitted, the 'default' path will be used.
3700 See :hg:`help urls` for more information.
3700 See :hg:`help urls` for more information.
3701
3701
3702 Returns 0 on success, 1 if an update had unresolved files.
3702 Returns 0 on success, 1 if an update had unresolved files.
3703 """
3703 """
3704 source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
3704 source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
3705 other = hg.repository(hg.remoteui(repo, opts), source)
3705 other = hg.repository(hg.remoteui(repo, opts), source)
3706 ui.status(_('pulling from %s\n') % util.hidepassword(source))
3706 ui.status(_('pulling from %s\n') % util.hidepassword(source))
3707 revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev'))
3707 revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev'))
3708
3708
3709 if opts.get('bookmark'):
3709 if opts.get('bookmark'):
3710 if not revs:
3710 if not revs:
3711 revs = []
3711 revs = []
3712 rb = other.listkeys('bookmarks')
3712 rb = other.listkeys('bookmarks')
3713 for b in opts['bookmark']:
3713 for b in opts['bookmark']:
3714 if b not in rb:
3714 if b not in rb:
3715 raise util.Abort(_('remote bookmark %s not found!') % b)
3715 raise util.Abort(_('remote bookmark %s not found!') % b)
3716 revs.append(rb[b])
3716 revs.append(rb[b])
3717
3717
3718 if revs:
3718 if revs:
3719 try:
3719 try:
3720 revs = [other.lookup(rev) for rev in revs]
3720 revs = [other.lookup(rev) for rev in revs]
3721 except error.CapabilityError:
3721 except error.CapabilityError:
3722 err = _("other repository doesn't support revision lookup, "
3722 err = _("other repository doesn't support revision lookup, "
3723 "so a rev cannot be specified.")
3723 "so a rev cannot be specified.")
3724 raise util.Abort(err)
3724 raise util.Abort(err)
3725
3725
3726 modheads = repo.pull(other, heads=revs, force=opts.get('force'))
3726 modheads = repo.pull(other, heads=revs, force=opts.get('force'))
3727 bookmarks.updatefromremote(ui, repo, other)
3727 bookmarks.updatefromremote(ui, repo, other)
3728 if checkout:
3728 if checkout:
3729 checkout = str(repo.changelog.rev(other.lookup(checkout)))
3729 checkout = str(repo.changelog.rev(other.lookup(checkout)))
3730 repo._subtoppath = source
3730 repo._subtoppath = source
3731 try:
3731 try:
3732 ret = postincoming(ui, repo, modheads, opts.get('update'), checkout)
3732 ret = postincoming(ui, repo, modheads, opts.get('update'), checkout)
3733
3733
3734 finally:
3734 finally:
3735 del repo._subtoppath
3735 del repo._subtoppath
3736
3736
3737 # update specified bookmarks
3737 # update specified bookmarks
3738 if opts.get('bookmark'):
3738 if opts.get('bookmark'):
3739 for b in opts['bookmark']:
3739 for b in opts['bookmark']:
3740 # explicit pull overrides local bookmark if any
3740 # explicit pull overrides local bookmark if any
3741 ui.status(_("importing bookmark %s\n") % b)
3741 ui.status(_("importing bookmark %s\n") % b)
3742 repo._bookmarks[b] = repo[rb[b]].node()
3742 repo._bookmarks[b] = repo[rb[b]].node()
3743 bookmarks.write(repo)
3743 bookmarks.write(repo)
3744
3744
3745 return ret
3745 return ret
3746
3746
3747 @command('^push',
3747 @command('^push',
3748 [('f', 'force', None, _('force push')),
3748 [('f', 'force', None, _('force push')),
3749 ('r', 'rev', [],
3749 ('r', 'rev', [],
3750 _('a changeset intended to be included in the destination'),
3750 _('a changeset intended to be included in the destination'),
3751 _('REV')),
3751 _('REV')),
3752 ('B', 'bookmark', [], _("bookmark to push"), _('BOOKMARK')),
3752 ('B', 'bookmark', [], _("bookmark to push"), _('BOOKMARK')),
3753 ('b', 'branch', [],
3753 ('b', 'branch', [],
3754 _('a specific branch you would like to push'), _('BRANCH')),
3754 _('a specific branch you would like to push'), _('BRANCH')),
3755 ('', 'new-branch', False, _('allow pushing a new branch')),
3755 ('', 'new-branch', False, _('allow pushing a new branch')),
3756 ] + remoteopts,
3756 ] + remoteopts,
3757 _('[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]'))
3757 _('[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]'))
3758 def push(ui, repo, dest=None, **opts):
3758 def push(ui, repo, dest=None, **opts):
3759 """push changes to the specified destination
3759 """push changes to the specified destination
3760
3760
3761 Push changesets from the local repository to the specified
3761 Push changesets from the local repository to the specified
3762 destination.
3762 destination.
3763
3763
3764 This operation is symmetrical to pull: it is identical to a pull
3764 This operation is symmetrical to pull: it is identical to a pull
3765 in the destination repository from the current one.
3765 in the destination repository from the current one.
3766
3766
3767 By default, push will not allow creation of new heads at the
3767 By default, push will not allow creation of new heads at the
3768 destination, since multiple heads would make it unclear which head
3768 destination, since multiple heads would make it unclear which head
3769 to use. In this situation, it is recommended to pull and merge
3769 to use. In this situation, it is recommended to pull and merge
3770 before pushing.
3770 before pushing.
3771
3771
3772 Use --new-branch if you want to allow push to create a new named
3772 Use --new-branch if you want to allow push to create a new named
3773 branch that is not present at the destination. This allows you to
3773 branch that is not present at the destination. This allows you to
3774 only create a new branch without forcing other changes.
3774 only create a new branch without forcing other changes.
3775
3775
3776 Use -f/--force to override the default behavior and push all
3776 Use -f/--force to override the default behavior and push all
3777 changesets on all branches.
3777 changesets on all branches.
3778
3778
3779 If -r/--rev is used, the specified revision and all its ancestors
3779 If -r/--rev is used, the specified revision and all its ancestors
3780 will be pushed to the remote repository.
3780 will be pushed to the remote repository.
3781
3781
3782 Please see :hg:`help urls` for important details about ``ssh://``
3782 Please see :hg:`help urls` for important details about ``ssh://``
3783 URLs. If DESTINATION is omitted, a default path will be used.
3783 URLs. If DESTINATION is omitted, a default path will be used.
3784
3784
3785 Returns 0 if push was successful, 1 if nothing to push.
3785 Returns 0 if push was successful, 1 if nothing to push.
3786 """
3786 """
3787
3787
3788 if opts.get('bookmark'):
3788 if opts.get('bookmark'):
3789 for b in opts['bookmark']:
3789 for b in opts['bookmark']:
3790 # translate -B options to -r so changesets get pushed
3790 # translate -B options to -r so changesets get pushed
3791 if b in repo._bookmarks:
3791 if b in repo._bookmarks:
3792 opts.setdefault('rev', []).append(b)
3792 opts.setdefault('rev', []).append(b)
3793 else:
3793 else:
3794 # if we try to push a deleted bookmark, translate it to null
3794 # if we try to push a deleted bookmark, translate it to null
3795 # this lets simultaneous -r, -b options continue working
3795 # this lets simultaneous -r, -b options continue working
3796 opts.setdefault('rev', []).append("null")
3796 opts.setdefault('rev', []).append("null")
3797
3797
3798 dest = ui.expandpath(dest or 'default-push', dest or 'default')
3798 dest = ui.expandpath(dest or 'default-push', dest or 'default')
3799 dest, branches = hg.parseurl(dest, opts.get('branch'))
3799 dest, branches = hg.parseurl(dest, opts.get('branch'))
3800 ui.status(_('pushing to %s\n') % util.hidepassword(dest))
3800 ui.status(_('pushing to %s\n') % util.hidepassword(dest))
3801 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
3801 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
3802 other = hg.repository(hg.remoteui(repo, opts), dest)
3802 other = hg.repository(hg.remoteui(repo, opts), dest)
3803 if revs:
3803 if revs:
3804 revs = [repo.lookup(rev) for rev in revs]
3804 revs = [repo.lookup(rev) for rev in revs]
3805
3805
3806 repo._subtoppath = dest
3806 repo._subtoppath = dest
3807 try:
3807 try:
3808 # push subrepos depth-first for coherent ordering
3808 # push subrepos depth-first for coherent ordering
3809 c = repo['']
3809 c = repo['']
3810 subs = c.substate # only repos that are committed
3810 subs = c.substate # only repos that are committed
3811 for s in sorted(subs):
3811 for s in sorted(subs):
3812 if not c.sub(s).push(opts.get('force')):
3812 if not c.sub(s).push(opts.get('force')):
3813 return False
3813 return False
3814 finally:
3814 finally:
3815 del repo._subtoppath
3815 del repo._subtoppath
3816 result = repo.push(other, opts.get('force'), revs=revs,
3816 result = repo.push(other, opts.get('force'), revs=revs,
3817 newbranch=opts.get('new_branch'))
3817 newbranch=opts.get('new_branch'))
3818
3818
3819 result = (result == 0)
3819 result = (result == 0)
3820
3820
3821 if opts.get('bookmark'):
3821 if opts.get('bookmark'):
3822 rb = other.listkeys('bookmarks')
3822 rb = other.listkeys('bookmarks')
3823 for b in opts['bookmark']:
3823 for b in opts['bookmark']:
3824 # explicit push overrides remote bookmark if any
3824 # explicit push overrides remote bookmark if any
3825 if b in repo._bookmarks:
3825 if b in repo._bookmarks:
3826 ui.status(_("exporting bookmark %s\n") % b)
3826 ui.status(_("exporting bookmark %s\n") % b)
3827 new = repo[b].hex()
3827 new = repo[b].hex()
3828 elif b in rb:
3828 elif b in rb:
3829 ui.status(_("deleting remote bookmark %s\n") % b)
3829 ui.status(_("deleting remote bookmark %s\n") % b)
3830 new = '' # delete
3830 new = '' # delete
3831 else:
3831 else:
3832 ui.warn(_('bookmark %s does not exist on the local '
3832 ui.warn(_('bookmark %s does not exist on the local '
3833 'or remote repository!\n') % b)
3833 'or remote repository!\n') % b)
3834 return 2
3834 return 2
3835 old = rb.get(b, '')
3835 old = rb.get(b, '')
3836 r = other.pushkey('bookmarks', b, old, new)
3836 r = other.pushkey('bookmarks', b, old, new)
3837 if not r:
3837 if not r:
3838 ui.warn(_('updating bookmark %s failed!\n') % b)
3838 ui.warn(_('updating bookmark %s failed!\n') % b)
3839 if not result:
3839 if not result:
3840 result = 2
3840 result = 2
3841
3841
3842 return result
3842 return result
3843
3843
3844 @command('recover', [])
3844 @command('recover', [])
3845 def recover(ui, repo):
3845 def recover(ui, repo):
3846 """roll back an interrupted transaction
3846 """roll back an interrupted transaction
3847
3847
3848 Recover from an interrupted commit or pull.
3848 Recover from an interrupted commit or pull.
3849
3849
3850 This command tries to fix the repository status after an
3850 This command tries to fix the repository status after an
3851 interrupted operation. It should only be necessary when Mercurial
3851 interrupted operation. It should only be necessary when Mercurial
3852 suggests it.
3852 suggests it.
3853
3853
3854 Returns 0 if successful, 1 if nothing to recover or verify fails.
3854 Returns 0 if successful, 1 if nothing to recover or verify fails.
3855 """
3855 """
3856 if repo.recover():
3856 if repo.recover():
3857 return hg.verify(repo)
3857 return hg.verify(repo)
3858 return 1
3858 return 1
3859
3859
3860 @command('^remove|rm',
3860 @command('^remove|rm',
3861 [('A', 'after', None, _('record delete for missing files')),
3861 [('A', 'after', None, _('record delete for missing files')),
3862 ('f', 'force', None,
3862 ('f', 'force', None,
3863 _('remove (and delete) file even if added or modified')),
3863 _('remove (and delete) file even if added or modified')),
3864 ] + walkopts,
3864 ] + walkopts,
3865 _('[OPTION]... FILE...'))
3865 _('[OPTION]... FILE...'))
3866 def remove(ui, repo, *pats, **opts):
3866 def remove(ui, repo, *pats, **opts):
3867 """remove the specified files on the next commit
3867 """remove the specified files on the next commit
3868
3868
3869 Schedule the indicated files for removal from the repository.
3869 Schedule the indicated files for removal from the repository.
3870
3870
3871 This only removes files from the current branch, not from the
3871 This only removes files from the current branch, not from the
3872 entire project history. -A/--after can be used to remove only
3872 entire project history. -A/--after can be used to remove only
3873 files that have already been deleted, -f/--force can be used to
3873 files that have already been deleted, -f/--force can be used to
3874 force deletion, and -Af can be used to remove files from the next
3874 force deletion, and -Af can be used to remove files from the next
3875 revision without deleting them from the working directory.
3875 revision without deleting them from the working directory.
3876
3876
3877 The following table details the behavior of remove for different
3877 The following table details the behavior of remove for different
3878 file states (columns) and option combinations (rows). The file
3878 file states (columns) and option combinations (rows). The file
3879 states are Added [A], Clean [C], Modified [M] and Missing [!] (as
3879 states are Added [A], Clean [C], Modified [M] and Missing [!] (as
3880 reported by :hg:`status`). The actions are Warn, Remove (from
3880 reported by :hg:`status`). The actions are Warn, Remove (from
3881 branch) and Delete (from disk)::
3881 branch) and Delete (from disk)::
3882
3882
3883 A C M !
3883 A C M !
3884 none W RD W R
3884 none W RD W R
3885 -f R RD RD R
3885 -f R RD RD R
3886 -A W W W R
3886 -A W W W R
3887 -Af R R R R
3887 -Af R R R R
3888
3888
3889 Note that remove never deletes files in Added [A] state from the
3889 Note that remove never deletes files in Added [A] state from the
3890 working directory, not even if option --force is specified.
3890 working directory, not even if option --force is specified.
3891
3891
3892 This command schedules the files to be removed at the next commit.
3892 This command schedules the files to be removed at the next commit.
3893 To undo a remove before that, see :hg:`revert`.
3893 To undo a remove before that, see :hg:`revert`.
3894
3894
3895 Returns 0 on success, 1 if any warnings encountered.
3895 Returns 0 on success, 1 if any warnings encountered.
3896 """
3896 """
3897
3897
3898 ret = 0
3898 ret = 0
3899 after, force = opts.get('after'), opts.get('force')
3899 after, force = opts.get('after'), opts.get('force')
3900 if not pats and not after:
3900 if not pats and not after:
3901 raise util.Abort(_('no files specified'))
3901 raise util.Abort(_('no files specified'))
3902
3902
3903 m = scmutil.match(repo, pats, opts)
3903 m = scmutil.match(repo, pats, opts)
3904 s = repo.status(match=m, clean=True)
3904 s = repo.status(match=m, clean=True)
3905 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
3905 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
3906
3906
3907 for f in m.files():
3907 for f in m.files():
3908 if f not in repo.dirstate and not os.path.isdir(m.rel(f)):
3908 if f not in repo.dirstate and not os.path.isdir(m.rel(f)):
3909 ui.warn(_('not removing %s: file is untracked\n') % m.rel(f))
3909 ui.warn(_('not removing %s: file is untracked\n') % m.rel(f))
3910 ret = 1
3910 ret = 1
3911
3911
3912 if force:
3912 if force:
3913 list = modified + deleted + clean + added
3913 list = modified + deleted + clean + added
3914 elif after:
3914 elif after:
3915 list = deleted
3915 list = deleted
3916 for f in modified + added + clean:
3916 for f in modified + added + clean:
3917 ui.warn(_('not removing %s: file still exists (use -f'
3917 ui.warn(_('not removing %s: file still exists (use -f'
3918 ' to force removal)\n') % m.rel(f))
3918 ' to force removal)\n') % m.rel(f))
3919 ret = 1
3919 ret = 1
3920 else:
3920 else:
3921 list = deleted + clean
3921 list = deleted + clean
3922 for f in modified:
3922 for f in modified:
3923 ui.warn(_('not removing %s: file is modified (use -f'
3923 ui.warn(_('not removing %s: file is modified (use -f'
3924 ' to force removal)\n') % m.rel(f))
3924 ' to force removal)\n') % m.rel(f))
3925 ret = 1
3925 ret = 1
3926 for f in added:
3926 for f in added:
3927 ui.warn(_('not removing %s: file has been marked for add (use -f'
3927 ui.warn(_('not removing %s: file has been marked for add (use -f'
3928 ' to force removal)\n') % m.rel(f))
3928 ' to force removal)\n') % m.rel(f))
3929 ret = 1
3929 ret = 1
3930
3930
3931 for f in sorted(list):
3931 for f in sorted(list):
3932 if ui.verbose or not m.exact(f):
3932 if ui.verbose or not m.exact(f):
3933 ui.status(_('removing %s\n') % m.rel(f))
3933 ui.status(_('removing %s\n') % m.rel(f))
3934
3934
3935 wlock = repo.wlock()
3935 wlock = repo.wlock()
3936 try:
3936 try:
3937 if not after:
3937 if not after:
3938 for f in list:
3938 for f in list:
3939 if f in added:
3939 if f in added:
3940 continue # we never unlink added files on remove
3940 continue # we never unlink added files on remove
3941 try:
3941 try:
3942 util.unlinkpath(repo.wjoin(f))
3942 util.unlinkpath(repo.wjoin(f))
3943 except OSError, inst:
3943 except OSError, inst:
3944 if inst.errno != errno.ENOENT:
3944 if inst.errno != errno.ENOENT:
3945 raise
3945 raise
3946 repo[None].forget(list)
3946 repo[None].forget(list)
3947 finally:
3947 finally:
3948 wlock.release()
3948 wlock.release()
3949
3949
3950 return ret
3950 return ret
3951
3951
3952 @command('rename|move|mv',
3952 @command('rename|move|mv',
3953 [('A', 'after', None, _('record a rename that has already occurred')),
3953 [('A', 'after', None, _('record a rename that has already occurred')),
3954 ('f', 'force', None, _('forcibly copy over an existing managed file')),
3954 ('f', 'force', None, _('forcibly copy over an existing managed file')),
3955 ] + walkopts + dryrunopts,
3955 ] + walkopts + dryrunopts,
3956 _('[OPTION]... SOURCE... DEST'))
3956 _('[OPTION]... SOURCE... DEST'))
3957 def rename(ui, repo, *pats, **opts):
3957 def rename(ui, repo, *pats, **opts):
3958 """rename files; equivalent of copy + remove
3958 """rename files; equivalent of copy + remove
3959
3959
3960 Mark dest as copies of sources; mark sources for deletion. If dest
3960 Mark dest as copies of sources; mark sources for deletion. If dest
3961 is a directory, copies are put in that directory. If dest is a
3961 is a directory, copies are put in that directory. If dest is a
3962 file, there can only be one source.
3962 file, there can only be one source.
3963
3963
3964 By default, this command copies the contents of files as they
3964 By default, this command copies the contents of files as they
3965 exist in the working directory. If invoked with -A/--after, the
3965 exist in the working directory. If invoked with -A/--after, the
3966 operation is recorded, but no copying is performed.
3966 operation is recorded, but no copying is performed.
3967
3967
3968 This command takes effect at the next commit. To undo a rename
3968 This command takes effect at the next commit. To undo a rename
3969 before that, see :hg:`revert`.
3969 before that, see :hg:`revert`.
3970
3970
3971 Returns 0 on success, 1 if errors are encountered.
3971 Returns 0 on success, 1 if errors are encountered.
3972 """
3972 """
3973 wlock = repo.wlock(False)
3973 wlock = repo.wlock(False)
3974 try:
3974 try:
3975 return cmdutil.copy(ui, repo, pats, opts, rename=True)
3975 return cmdutil.copy(ui, repo, pats, opts, rename=True)
3976 finally:
3976 finally:
3977 wlock.release()
3977 wlock.release()
3978
3978
3979 @command('resolve',
3979 @command('resolve',
3980 [('a', 'all', None, _('select all unresolved files')),
3980 [('a', 'all', None, _('select all unresolved files')),
3981 ('l', 'list', None, _('list state of files needing merge')),
3981 ('l', 'list', None, _('list state of files needing merge')),
3982 ('m', 'mark', None, _('mark files as resolved')),
3982 ('m', 'mark', None, _('mark files as resolved')),
3983 ('u', 'unmark', None, _('mark files as unresolved')),
3983 ('u', 'unmark', None, _('mark files as unresolved')),
3984 ('t', 'tool', '', _('specify merge tool')),
3984 ('t', 'tool', '', _('specify merge tool')),
3985 ('n', 'no-status', None, _('hide status prefix'))]
3985 ('n', 'no-status', None, _('hide status prefix'))]
3986 + walkopts,
3986 + walkopts,
3987 _('[OPTION]... [FILE]...'))
3987 _('[OPTION]... [FILE]...'))
3988 def resolve(ui, repo, *pats, **opts):
3988 def resolve(ui, repo, *pats, **opts):
3989 """redo merges or set/view the merge status of files
3989 """redo merges or set/view the merge status of files
3990
3990
3991 Merges with unresolved conflicts are often the result of
3991 Merges with unresolved conflicts are often the result of
3992 non-interactive merging using the ``internal:merge`` configuration
3992 non-interactive merging using the ``internal:merge`` configuration
3993 setting, or a command-line merge tool like ``diff3``. The resolve
3993 setting, or a command-line merge tool like ``diff3``. The resolve
3994 command is used to manage the files involved in a merge, after
3994 command is used to manage the files involved in a merge, after
3995 :hg:`merge` has been run, and before :hg:`commit` is run (i.e. the
3995 :hg:`merge` has been run, and before :hg:`commit` is run (i.e. the
3996 working directory must have two parents).
3996 working directory must have two parents).
3997
3997
3998 The resolve command can be used in the following ways:
3998 The resolve command can be used in the following ways:
3999
3999
4000 - :hg:`resolve [--tool TOOL] FILE...`: attempt to re-merge the specified
4000 - :hg:`resolve [--tool TOOL] FILE...`: attempt to re-merge the specified
4001 files, discarding any previous merge attempts. Re-merging is not
4001 files, discarding any previous merge attempts. Re-merging is not
4002 performed for files already marked as resolved. Use ``--all/-a``
4002 performed for files already marked as resolved. Use ``--all/-a``
4003 to selects all unresolved files. ``--tool`` can be used to specify
4003 to selects all unresolved files. ``--tool`` can be used to specify
4004 the merge tool used for the given files. It overrides the HGMERGE
4004 the merge tool used for the given files. It overrides the HGMERGE
4005 environment variable and your configuration files.
4005 environment variable and your configuration files.
4006
4006
4007 - :hg:`resolve -m [FILE]`: mark a file as having been resolved
4007 - :hg:`resolve -m [FILE]`: mark a file as having been resolved
4008 (e.g. after having manually fixed-up the files). The default is
4008 (e.g. after having manually fixed-up the files). The default is
4009 to mark all unresolved files.
4009 to mark all unresolved files.
4010
4010
4011 - :hg:`resolve -u [FILE]...`: mark a file as unresolved. The
4011 - :hg:`resolve -u [FILE]...`: mark a file as unresolved. The
4012 default is to mark all resolved files.
4012 default is to mark all resolved files.
4013
4013
4014 - :hg:`resolve -l`: list files which had or still have conflicts.
4014 - :hg:`resolve -l`: list files which had or still have conflicts.
4015 In the printed list, ``U`` = unresolved and ``R`` = resolved.
4015 In the printed list, ``U`` = unresolved and ``R`` = resolved.
4016
4016
4017 Note that Mercurial will not let you commit files with unresolved
4017 Note that Mercurial will not let you commit files with unresolved
4018 merge conflicts. You must use :hg:`resolve -m ...` before you can
4018 merge conflicts. You must use :hg:`resolve -m ...` before you can
4019 commit after a conflicting merge.
4019 commit after a conflicting merge.
4020
4020
4021 Returns 0 on success, 1 if any files fail a resolve attempt.
4021 Returns 0 on success, 1 if any files fail a resolve attempt.
4022 """
4022 """
4023
4023
4024 all, mark, unmark, show, nostatus = \
4024 all, mark, unmark, show, nostatus = \
4025 [opts.get(o) for o in 'all mark unmark list no_status'.split()]
4025 [opts.get(o) for o in 'all mark unmark list no_status'.split()]
4026
4026
4027 if (show and (mark or unmark)) or (mark and unmark):
4027 if (show and (mark or unmark)) or (mark and unmark):
4028 raise util.Abort(_("too many options specified"))
4028 raise util.Abort(_("too many options specified"))
4029 if pats and all:
4029 if pats and all:
4030 raise util.Abort(_("can't specify --all and patterns"))
4030 raise util.Abort(_("can't specify --all and patterns"))
4031 if not (all or pats or show or mark or unmark):
4031 if not (all or pats or show or mark or unmark):
4032 raise util.Abort(_('no files or directories specified; '
4032 raise util.Abort(_('no files or directories specified; '
4033 'use --all to remerge all files'))
4033 'use --all to remerge all files'))
4034
4034
4035 ms = mergemod.mergestate(repo)
4035 ms = mergemod.mergestate(repo)
4036 m = scmutil.match(repo, pats, opts)
4036 m = scmutil.match(repo, pats, opts)
4037 ret = 0
4037 ret = 0
4038
4038
4039 for f in ms:
4039 for f in ms:
4040 if m(f):
4040 if m(f):
4041 if show:
4041 if show:
4042 if nostatus:
4042 if nostatus:
4043 ui.write("%s\n" % f)
4043 ui.write("%s\n" % f)
4044 else:
4044 else:
4045 ui.write("%s %s\n" % (ms[f].upper(), f),
4045 ui.write("%s %s\n" % (ms[f].upper(), f),
4046 label='resolve.' +
4046 label='resolve.' +
4047 {'u': 'unresolved', 'r': 'resolved'}[ms[f]])
4047 {'u': 'unresolved', 'r': 'resolved'}[ms[f]])
4048 elif mark:
4048 elif mark:
4049 ms.mark(f, "r")
4049 ms.mark(f, "r")
4050 elif unmark:
4050 elif unmark:
4051 ms.mark(f, "u")
4051 ms.mark(f, "u")
4052 else:
4052 else:
4053 wctx = repo[None]
4053 wctx = repo[None]
4054 mctx = wctx.parents()[-1]
4054 mctx = wctx.parents()[-1]
4055
4055
4056 # backup pre-resolve (merge uses .orig for its own purposes)
4056 # backup pre-resolve (merge uses .orig for its own purposes)
4057 a = repo.wjoin(f)
4057 a = repo.wjoin(f)
4058 util.copyfile(a, a + ".resolve")
4058 util.copyfile(a, a + ".resolve")
4059
4059
4060 try:
4060 try:
4061 # resolve file
4061 # resolve file
4062 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
4062 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
4063 if ms.resolve(f, wctx, mctx):
4063 if ms.resolve(f, wctx, mctx):
4064 ret = 1
4064 ret = 1
4065 finally:
4065 finally:
4066 ui.setconfig('ui', 'forcemerge', '')
4066 ui.setconfig('ui', 'forcemerge', '')
4067
4067
4068 # replace filemerge's .orig file with our resolve file
4068 # replace filemerge's .orig file with our resolve file
4069 util.rename(a + ".resolve", a + ".orig")
4069 util.rename(a + ".resolve", a + ".orig")
4070
4070
4071 ms.commit()
4071 ms.commit()
4072 return ret
4072 return ret
4073
4073
4074 @command('revert',
4074 @command('revert',
4075 [('a', 'all', None, _('revert all changes when no arguments given')),
4075 [('a', 'all', None, _('revert all changes when no arguments given')),
4076 ('d', 'date', '', _('tipmost revision matching date'), _('DATE')),
4076 ('d', 'date', '', _('tipmost revision matching date'), _('DATE')),
4077 ('r', 'rev', '', _('revert to the specified revision'), _('REV')),
4077 ('r', 'rev', '', _('revert to the specified revision'), _('REV')),
4078 ('', 'no-backup', None, _('do not save backup copies of files')),
4078 ('', 'no-backup', None, _('do not save backup copies of files')),
4079 ] + walkopts + dryrunopts,
4079 ] + walkopts + dryrunopts,
4080 _('[OPTION]... [-r REV] [NAME]...'))
4080 _('[OPTION]... [-r REV] [NAME]...'))
4081 def revert(ui, repo, *pats, **opts):
4081 def revert(ui, repo, *pats, **opts):
4082 """restore individual files or directories to an earlier state
4082 """restore individual files or directories to an earlier state
4083
4083
4084 .. note::
4084 .. note::
4085 This command is most likely not what you are looking for.
4085 This command is most likely not what you are looking for.
4086 Revert will partially overwrite content in the working
4086 Revert will partially overwrite content in the working
4087 directory without changing the working directory parents. Use
4087 directory without changing the working directory parents. Use
4088 :hg:`update -r rev` to check out earlier revisions, or
4088 :hg:`update -r rev` to check out earlier revisions, or
4089 :hg:`update --clean .` to undo a merge which has added another
4089 :hg:`update --clean .` to undo a merge which has added another
4090 parent.
4090 parent.
4091
4091
4092 With no revision specified, revert the named files or directories
4092 With no revision specified, revert the named files or directories
4093 to the contents they had in the parent of the working directory.
4093 to the contents they had in the parent of the working directory.
4094 This restores the contents of the affected files to an unmodified
4094 This restores the contents of the affected files to an unmodified
4095 state and unschedules adds, removes, copies, and renames. If the
4095 state and unschedules adds, removes, copies, and renames. If the
4096 working directory has two parents, you must explicitly specify a
4096 working directory has two parents, you must explicitly specify a
4097 revision.
4097 revision.
4098
4098
4099 Using the -r/--rev option, revert the given files or directories
4099 Using the -r/--rev option, revert the given files or directories
4100 to their contents as of a specific revision. This can be helpful
4100 to their contents as of a specific revision. This can be helpful
4101 to "roll back" some or all of an earlier change. See :hg:`help
4101 to "roll back" some or all of an earlier change. See :hg:`help
4102 dates` for a list of formats valid for -d/--date.
4102 dates` for a list of formats valid for -d/--date.
4103
4103
4104 Revert modifies the working directory. It does not commit any
4104 Revert modifies the working directory. It does not commit any
4105 changes, or change the parent of the working directory. If you
4105 changes, or change the parent of the working directory. If you
4106 revert to a revision other than the parent of the working
4106 revert to a revision other than the parent of the working
4107 directory, the reverted files will thus appear modified
4107 directory, the reverted files will thus appear modified
4108 afterwards.
4108 afterwards.
4109
4109
4110 If a file has been deleted, it is restored. Files scheduled for
4110 If a file has been deleted, it is restored. Files scheduled for
4111 addition are just unscheduled and left as they are. If the
4111 addition are just unscheduled and left as they are. If the
4112 executable mode of a file was changed, it is reset.
4112 executable mode of a file was changed, it is reset.
4113
4113
4114 If names are given, all files matching the names are reverted.
4114 If names are given, all files matching the names are reverted.
4115 If no arguments are given, no files are reverted.
4115 If no arguments are given, no files are reverted.
4116
4116
4117 Modified files are saved with a .orig suffix before reverting.
4117 Modified files are saved with a .orig suffix before reverting.
4118 To disable these backups, use --no-backup.
4118 To disable these backups, use --no-backup.
4119
4119
4120 Returns 0 on success.
4120 Returns 0 on success.
4121 """
4121 """
4122
4122
4123 if opts.get("date"):
4123 if opts.get("date"):
4124 if opts.get("rev"):
4124 if opts.get("rev"):
4125 raise util.Abort(_("you can't specify a revision and a date"))
4125 raise util.Abort(_("you can't specify a revision and a date"))
4126 opts["rev"] = cmdutil.finddate(ui, repo, opts["date"])
4126 opts["rev"] = cmdutil.finddate(ui, repo, opts["date"])
4127
4127
4128 parent, p2 = repo.dirstate.parents()
4128 parent, p2 = repo.dirstate.parents()
4129 if not opts.get('rev') and p2 != nullid:
4129 if not opts.get('rev') and p2 != nullid:
4130 raise util.Abort(_('uncommitted merge - '
4130 raise util.Abort(_('uncommitted merge - '
4131 'use "hg update", see "hg help revert"'))
4131 'use "hg update", see "hg help revert"'))
4132
4132
4133 if not pats and not opts.get('all'):
4133 if not pats and not opts.get('all'):
4134 raise util.Abort(_('no files or directories specified; '
4134 raise util.Abort(_('no files or directories specified; '
4135 'use --all to revert the whole repo'))
4135 'use --all to revert the whole repo'))
4136
4136
4137 ctx = scmutil.revsingle(repo, opts.get('rev'))
4137 ctx = scmutil.revsingle(repo, opts.get('rev'))
4138 node = ctx.node()
4138 node = ctx.node()
4139 mf = ctx.manifest()
4139 mf = ctx.manifest()
4140 if node == parent:
4140 if node == parent:
4141 pmf = mf
4141 pmf = mf
4142 else:
4142 else:
4143 pmf = None
4143 pmf = None
4144
4144
4145 # need all matching names in dirstate and manifest of target rev,
4145 # need all matching names in dirstate and manifest of target rev,
4146 # so have to walk both. do not print errors if files exist in one
4146 # so have to walk both. do not print errors if files exist in one
4147 # but not other.
4147 # but not other.
4148
4148
4149 names = {}
4149 names = {}
4150
4150
4151 wlock = repo.wlock()
4151 wlock = repo.wlock()
4152 try:
4152 try:
4153 # walk dirstate.
4153 # walk dirstate.
4154
4154
4155 m = scmutil.match(repo, pats, opts)
4155 m = scmutil.match(repo, pats, opts)
4156 m.bad = lambda x, y: False
4156 m.bad = lambda x, y: False
4157 for abs in repo.walk(m):
4157 for abs in repo.walk(m):
4158 names[abs] = m.rel(abs), m.exact(abs)
4158 names[abs] = m.rel(abs), m.exact(abs)
4159
4159
4160 # walk target manifest.
4160 # walk target manifest.
4161
4161
4162 def badfn(path, msg):
4162 def badfn(path, msg):
4163 if path in names:
4163 if path in names:
4164 return
4164 return
4165 path_ = path + '/'
4165 path_ = path + '/'
4166 for f in names:
4166 for f in names:
4167 if f.startswith(path_):
4167 if f.startswith(path_):
4168 return
4168 return
4169 ui.warn("%s: %s\n" % (m.rel(path), msg))
4169 ui.warn("%s: %s\n" % (m.rel(path), msg))
4170
4170
4171 m = scmutil.match(repo, pats, opts)
4171 m = scmutil.match(repo, pats, opts)
4172 m.bad = badfn
4172 m.bad = badfn
4173 for abs in repo[node].walk(m):
4173 for abs in repo[node].walk(m):
4174 if abs not in names:
4174 if abs not in names:
4175 names[abs] = m.rel(abs), m.exact(abs)
4175 names[abs] = m.rel(abs), m.exact(abs)
4176
4176
4177 m = scmutil.matchfiles(repo, names)
4177 m = scmutil.matchfiles(repo, names)
4178 changes = repo.status(match=m)[:4]
4178 changes = repo.status(match=m)[:4]
4179 modified, added, removed, deleted = map(set, changes)
4179 modified, added, removed, deleted = map(set, changes)
4180
4180
4181 # if f is a rename, also revert the source
4181 # if f is a rename, also revert the source
4182 cwd = repo.getcwd()
4182 cwd = repo.getcwd()
4183 for f in added:
4183 for f in added:
4184 src = repo.dirstate.copied(f)
4184 src = repo.dirstate.copied(f)
4185 if src and src not in names and repo.dirstate[src] == 'r':
4185 if src and src not in names and repo.dirstate[src] == 'r':
4186 removed.add(src)
4186 removed.add(src)
4187 names[src] = (repo.pathto(src, cwd), True)
4187 names[src] = (repo.pathto(src, cwd), True)
4188
4188
4189 def removeforget(abs):
4189 def removeforget(abs):
4190 if repo.dirstate[abs] == 'a':
4190 if repo.dirstate[abs] == 'a':
4191 return _('forgetting %s\n')
4191 return _('forgetting %s\n')
4192 return _('removing %s\n')
4192 return _('removing %s\n')
4193
4193
4194 revert = ([], _('reverting %s\n'))
4194 revert = ([], _('reverting %s\n'))
4195 add = ([], _('adding %s\n'))
4195 add = ([], _('adding %s\n'))
4196 remove = ([], removeforget)
4196 remove = ([], removeforget)
4197 undelete = ([], _('undeleting %s\n'))
4197 undelete = ([], _('undeleting %s\n'))
4198
4198
4199 disptable = (
4199 disptable = (
4200 # dispatch table:
4200 # dispatch table:
4201 # file state
4201 # file state
4202 # action if in target manifest
4202 # action if in target manifest
4203 # action if not in target manifest
4203 # action if not in target manifest
4204 # make backup if in target manifest
4204 # make backup if in target manifest
4205 # make backup if not in target manifest
4205 # make backup if not in target manifest
4206 (modified, revert, remove, True, True),
4206 (modified, revert, remove, True, True),
4207 (added, revert, remove, True, False),
4207 (added, revert, remove, True, False),
4208 (removed, undelete, None, False, False),
4208 (removed, undelete, None, False, False),
4209 (deleted, revert, remove, False, False),
4209 (deleted, revert, remove, False, False),
4210 )
4210 )
4211
4211
4212 for abs, (rel, exact) in sorted(names.items()):
4212 for abs, (rel, exact) in sorted(names.items()):
4213 mfentry = mf.get(abs)
4213 mfentry = mf.get(abs)
4214 target = repo.wjoin(abs)
4214 target = repo.wjoin(abs)
4215 def handle(xlist, dobackup):
4215 def handle(xlist, dobackup):
4216 xlist[0].append(abs)
4216 xlist[0].append(abs)
4217 if (dobackup and not opts.get('no_backup') and
4217 if (dobackup and not opts.get('no_backup') and
4218 os.path.lexists(target)):
4218 os.path.lexists(target)):
4219 bakname = "%s.orig" % rel
4219 bakname = "%s.orig" % rel
4220 ui.note(_('saving current version of %s as %s\n') %
4220 ui.note(_('saving current version of %s as %s\n') %
4221 (rel, bakname))
4221 (rel, bakname))
4222 if not opts.get('dry_run'):
4222 if not opts.get('dry_run'):
4223 util.rename(target, bakname)
4223 util.rename(target, bakname)
4224 if ui.verbose or not exact:
4224 if ui.verbose or not exact:
4225 msg = xlist[1]
4225 msg = xlist[1]
4226 if not isinstance(msg, basestring):
4226 if not isinstance(msg, basestring):
4227 msg = msg(abs)
4227 msg = msg(abs)
4228 ui.status(msg % rel)
4228 ui.status(msg % rel)
4229 for table, hitlist, misslist, backuphit, backupmiss in disptable:
4229 for table, hitlist, misslist, backuphit, backupmiss in disptable:
4230 if abs not in table:
4230 if abs not in table:
4231 continue
4231 continue
4232 # file has changed in dirstate
4232 # file has changed in dirstate
4233 if mfentry:
4233 if mfentry:
4234 handle(hitlist, backuphit)
4234 handle(hitlist, backuphit)
4235 elif misslist is not None:
4235 elif misslist is not None:
4236 handle(misslist, backupmiss)
4236 handle(misslist, backupmiss)
4237 break
4237 break
4238 else:
4238 else:
4239 if abs not in repo.dirstate:
4239 if abs not in repo.dirstate:
4240 if mfentry:
4240 if mfentry:
4241 handle(add, True)
4241 handle(add, True)
4242 elif exact:
4242 elif exact:
4243 ui.warn(_('file not managed: %s\n') % rel)
4243 ui.warn(_('file not managed: %s\n') % rel)
4244 continue
4244 continue
4245 # file has not changed in dirstate
4245 # file has not changed in dirstate
4246 if node == parent:
4246 if node == parent:
4247 if exact:
4247 if exact:
4248 ui.warn(_('no changes needed to %s\n') % rel)
4248 ui.warn(_('no changes needed to %s\n') % rel)
4249 continue
4249 continue
4250 if pmf is None:
4250 if pmf is None:
4251 # only need parent manifest in this unlikely case,
4251 # only need parent manifest in this unlikely case,
4252 # so do not read by default
4252 # so do not read by default
4253 pmf = repo[parent].manifest()
4253 pmf = repo[parent].manifest()
4254 if abs in pmf:
4254 if abs in pmf:
4255 if mfentry:
4255 if mfentry:
4256 # if version of file is same in parent and target
4256 # if version of file is same in parent and target
4257 # manifests, do nothing
4257 # manifests, do nothing
4258 if (pmf[abs] != mfentry or
4258 if (pmf[abs] != mfentry or
4259 pmf.flags(abs) != mf.flags(abs)):
4259 pmf.flags(abs) != mf.flags(abs)):
4260 handle(revert, False)
4260 handle(revert, False)
4261 else:
4261 else:
4262 handle(remove, False)
4262 handle(remove, False)
4263
4263
4264 if not opts.get('dry_run'):
4264 if not opts.get('dry_run'):
4265 def checkout(f):
4265 def checkout(f):
4266 fc = ctx[f]
4266 fc = ctx[f]
4267 repo.wwrite(f, fc.data(), fc.flags())
4267 repo.wwrite(f, fc.data(), fc.flags())
4268
4268
4269 audit_path = scmutil.pathauditor(repo.root)
4269 audit_path = scmutil.pathauditor(repo.root)
4270 for f in remove[0]:
4270 for f in remove[0]:
4271 if repo.dirstate[f] == 'a':
4271 if repo.dirstate[f] == 'a':
4272 repo.dirstate.drop(f)
4272 repo.dirstate.drop(f)
4273 continue
4273 continue
4274 audit_path(f)
4274 audit_path(f)
4275 try:
4275 try:
4276 util.unlinkpath(repo.wjoin(f))
4276 util.unlinkpath(repo.wjoin(f))
4277 except OSError:
4277 except OSError:
4278 pass
4278 pass
4279 repo.dirstate.remove(f)
4279 repo.dirstate.remove(f)
4280
4280
4281 normal = None
4281 normal = None
4282 if node == parent:
4282 if node == parent:
4283 # We're reverting to our parent. If possible, we'd like status
4283 # We're reverting to our parent. If possible, we'd like status
4284 # to report the file as clean. We have to use normallookup for
4284 # to report the file as clean. We have to use normallookup for
4285 # merges to avoid losing information about merged/dirty files.
4285 # merges to avoid losing information about merged/dirty files.
4286 if p2 != nullid:
4286 if p2 != nullid:
4287 normal = repo.dirstate.normallookup
4287 normal = repo.dirstate.normallookup
4288 else:
4288 else:
4289 normal = repo.dirstate.normal
4289 normal = repo.dirstate.normal
4290 for f in revert[0]:
4290 for f in revert[0]:
4291 checkout(f)
4291 checkout(f)
4292 if normal:
4292 if normal:
4293 normal(f)
4293 normal(f)
4294
4294
4295 for f in add[0]:
4295 for f in add[0]:
4296 checkout(f)
4296 checkout(f)
4297 repo.dirstate.add(f)
4297 repo.dirstate.add(f)
4298
4298
4299 normal = repo.dirstate.normallookup
4299 normal = repo.dirstate.normallookup
4300 if node == parent and p2 == nullid:
4300 if node == parent and p2 == nullid:
4301 normal = repo.dirstate.normal
4301 normal = repo.dirstate.normal
4302 for f in undelete[0]:
4302 for f in undelete[0]:
4303 checkout(f)
4303 checkout(f)
4304 normal(f)
4304 normal(f)
4305
4305
4306 finally:
4306 finally:
4307 wlock.release()
4307 wlock.release()
4308
4308
4309 @command('rollback', dryrunopts)
4309 @command('rollback', dryrunopts)
4310 def rollback(ui, repo, **opts):
4310 def rollback(ui, repo, **opts):
4311 """roll back the last transaction (dangerous)
4311 """roll back the last transaction (dangerous)
4312
4312
4313 This command should be used with care. There is only one level of
4313 This command should be used with care. There is only one level of
4314 rollback, and there is no way to undo a rollback. It will also
4314 rollback, and there is no way to undo a rollback. It will also
4315 restore the dirstate at the time of the last transaction, losing
4315 restore the dirstate at the time of the last transaction, losing
4316 any dirstate changes since that time. This command does not alter
4316 any dirstate changes since that time. This command does not alter
4317 the working directory.
4317 the working directory.
4318
4318
4319 Transactions are used to encapsulate the effects of all commands
4319 Transactions are used to encapsulate the effects of all commands
4320 that create new changesets or propagate existing changesets into a
4320 that create new changesets or propagate existing changesets into a
4321 repository. For example, the following commands are transactional,
4321 repository. For example, the following commands are transactional,
4322 and their effects can be rolled back:
4322 and their effects can be rolled back:
4323
4323
4324 - commit
4324 - commit
4325 - import
4325 - import
4326 - pull
4326 - pull
4327 - push (with this repository as the destination)
4327 - push (with this repository as the destination)
4328 - unbundle
4328 - unbundle
4329
4329
4330 This command is not intended for use on public repositories. Once
4330 This command is not intended for use on public repositories. Once
4331 changes are visible for pull by other users, rolling a transaction
4331 changes are visible for pull by other users, rolling a transaction
4332 back locally is ineffective (someone else may already have pulled
4332 back locally is ineffective (someone else may already have pulled
4333 the changes). Furthermore, a race is possible with readers of the
4333 the changes). Furthermore, a race is possible with readers of the
4334 repository; for example an in-progress pull from the repository
4334 repository; for example an in-progress pull from the repository
4335 may fail if a rollback is performed.
4335 may fail if a rollback is performed.
4336
4336
4337 Returns 0 on success, 1 if no rollback data is available.
4337 Returns 0 on success, 1 if no rollback data is available.
4338 """
4338 """
4339 return repo.rollback(opts.get('dry_run'))
4339 return repo.rollback(opts.get('dry_run'))
4340
4340
4341 @command('root', [])
4341 @command('root', [])
4342 def root(ui, repo):
4342 def root(ui, repo):
4343 """print the root (top) of the current working directory
4343 """print the root (top) of the current working directory
4344
4344
4345 Print the root directory of the current repository.
4345 Print the root directory of the current repository.
4346
4346
4347 Returns 0 on success.
4347 Returns 0 on success.
4348 """
4348 """
4349 ui.write(repo.root + "\n")
4349 ui.write(repo.root + "\n")
4350
4350
4351 @command('^serve',
4351 @command('^serve',
4352 [('A', 'accesslog', '', _('name of access log file to write to'),
4352 [('A', 'accesslog', '', _('name of access log file to write to'),
4353 _('FILE')),
4353 _('FILE')),
4354 ('d', 'daemon', None, _('run server in background')),
4354 ('d', 'daemon', None, _('run server in background')),
4355 ('', 'daemon-pipefds', '', _('used internally by daemon mode'), _('NUM')),
4355 ('', 'daemon-pipefds', '', _('used internally by daemon mode'), _('NUM')),
4356 ('E', 'errorlog', '', _('name of error log file to write to'), _('FILE')),
4356 ('E', 'errorlog', '', _('name of error log file to write to'), _('FILE')),
4357 # use string type, then we can check if something was passed
4357 # use string type, then we can check if something was passed
4358 ('p', 'port', '', _('port to listen on (default: 8000)'), _('PORT')),
4358 ('p', 'port', '', _('port to listen on (default: 8000)'), _('PORT')),
4359 ('a', 'address', '', _('address to listen on (default: all interfaces)'),
4359 ('a', 'address', '', _('address to listen on (default: all interfaces)'),
4360 _('ADDR')),
4360 _('ADDR')),
4361 ('', 'prefix', '', _('prefix path to serve from (default: server root)'),
4361 ('', 'prefix', '', _('prefix path to serve from (default: server root)'),
4362 _('PREFIX')),
4362 _('PREFIX')),
4363 ('n', 'name', '',
4363 ('n', 'name', '',
4364 _('name to show in web pages (default: working directory)'), _('NAME')),
4364 _('name to show in web pages (default: working directory)'), _('NAME')),
4365 ('', 'web-conf', '',
4365 ('', 'web-conf', '',
4366 _('name of the hgweb config file (see "hg help hgweb")'), _('FILE')),
4366 _('name of the hgweb config file (see "hg help hgweb")'), _('FILE')),
4367 ('', 'webdir-conf', '', _('name of the hgweb config file (DEPRECATED)'),
4367 ('', 'webdir-conf', '', _('name of the hgweb config file (DEPRECATED)'),
4368 _('FILE')),
4368 _('FILE')),
4369 ('', 'pid-file', '', _('name of file to write process ID to'), _('FILE')),
4369 ('', 'pid-file', '', _('name of file to write process ID to'), _('FILE')),
4370 ('', 'stdio', None, _('for remote clients')),
4370 ('', 'stdio', None, _('for remote clients')),
4371 ('t', 'templates', '', _('web templates to use'), _('TEMPLATE')),
4371 ('t', 'templates', '', _('web templates to use'), _('TEMPLATE')),
4372 ('', 'style', '', _('template style to use'), _('STYLE')),
4372 ('', 'style', '', _('template style to use'), _('STYLE')),
4373 ('6', 'ipv6', None, _('use IPv6 in addition to IPv4')),
4373 ('6', 'ipv6', None, _('use IPv6 in addition to IPv4')),
4374 ('', 'certificate', '', _('SSL certificate file'), _('FILE'))],
4374 ('', 'certificate', '', _('SSL certificate file'), _('FILE'))],
4375 _('[OPTION]...'))
4375 _('[OPTION]...'))
4376 def serve(ui, repo, **opts):
4376 def serve(ui, repo, **opts):
4377 """start stand-alone webserver
4377 """start stand-alone webserver
4378
4378
4379 Start a local HTTP repository browser and pull server. You can use
4379 Start a local HTTP repository browser and pull server. You can use
4380 this for ad-hoc sharing and browsing of repositories. It is
4380 this for ad-hoc sharing and browsing of repositories. It is
4381 recommended to use a real web server to serve a repository for
4381 recommended to use a real web server to serve a repository for
4382 longer periods of time.
4382 longer periods of time.
4383
4383
4384 Please note that the server does not implement access control.
4384 Please note that the server does not implement access control.
4385 This means that, by default, anybody can read from the server and
4385 This means that, by default, anybody can read from the server and
4386 nobody can write to it by default. Set the ``web.allow_push``
4386 nobody can write to it by default. Set the ``web.allow_push``
4387 option to ``*`` to allow everybody to push to the server. You
4387 option to ``*`` to allow everybody to push to the server. You
4388 should use a real web server if you need to authenticate users.
4388 should use a real web server if you need to authenticate users.
4389
4389
4390 By default, the server logs accesses to stdout and errors to
4390 By default, the server logs accesses to stdout and errors to
4391 stderr. Use the -A/--accesslog and -E/--errorlog options to log to
4391 stderr. Use the -A/--accesslog and -E/--errorlog options to log to
4392 files.
4392 files.
4393
4393
4394 To have the server choose a free port number to listen on, specify
4394 To have the server choose a free port number to listen on, specify
4395 a port number of 0; in this case, the server will print the port
4395 a port number of 0; in this case, the server will print the port
4396 number it uses.
4396 number it uses.
4397
4397
4398 Returns 0 on success.
4398 Returns 0 on success.
4399 """
4399 """
4400
4400
4401 if opts["stdio"]:
4401 if opts["stdio"]:
4402 if repo is None:
4402 if repo is None:
4403 raise error.RepoError(_("There is no Mercurial repository here"
4403 raise error.RepoError(_("There is no Mercurial repository here"
4404 " (.hg not found)"))
4404 " (.hg not found)"))
4405 s = sshserver.sshserver(ui, repo)
4405 s = sshserver.sshserver(ui, repo)
4406 s.serve_forever()
4406 s.serve_forever()
4407
4407
4408 # this way we can check if something was given in the command-line
4408 # this way we can check if something was given in the command-line
4409 if opts.get('port'):
4409 if opts.get('port'):
4410 opts['port'] = util.getport(opts.get('port'))
4410 opts['port'] = util.getport(opts.get('port'))
4411
4411
4412 baseui = repo and repo.baseui or ui
4412 baseui = repo and repo.baseui or ui
4413 optlist = ("name templates style address port prefix ipv6"
4413 optlist = ("name templates style address port prefix ipv6"
4414 " accesslog errorlog certificate encoding")
4414 " accesslog errorlog certificate encoding")
4415 for o in optlist.split():
4415 for o in optlist.split():
4416 val = opts.get(o, '')
4416 val = opts.get(o, '')
4417 if val in (None, ''): # should check against default options instead
4417 if val in (None, ''): # should check against default options instead
4418 continue
4418 continue
4419 baseui.setconfig("web", o, val)
4419 baseui.setconfig("web", o, val)
4420 if repo and repo.ui != baseui:
4420 if repo and repo.ui != baseui:
4421 repo.ui.setconfig("web", o, val)
4421 repo.ui.setconfig("web", o, val)
4422
4422
4423 o = opts.get('web_conf') or opts.get('webdir_conf')
4423 o = opts.get('web_conf') or opts.get('webdir_conf')
4424 if not o:
4424 if not o:
4425 if not repo:
4425 if not repo:
4426 raise error.RepoError(_("There is no Mercurial repository"
4426 raise error.RepoError(_("There is no Mercurial repository"
4427 " here (.hg not found)"))
4427 " here (.hg not found)"))
4428 o = repo.root
4428 o = repo.root
4429
4429
4430 app = hgweb.hgweb(o, baseui=ui)
4430 app = hgweb.hgweb(o, baseui=ui)
4431
4431
4432 class service(object):
4432 class service(object):
4433 def init(self):
4433 def init(self):
4434 util.setsignalhandler()
4434 util.setsignalhandler()
4435 self.httpd = hgweb.server.create_server(ui, app)
4435 self.httpd = hgweb.server.create_server(ui, app)
4436
4436
4437 if opts['port'] and not ui.verbose:
4437 if opts['port'] and not ui.verbose:
4438 return
4438 return
4439
4439
4440 if self.httpd.prefix:
4440 if self.httpd.prefix:
4441 prefix = self.httpd.prefix.strip('/') + '/'
4441 prefix = self.httpd.prefix.strip('/') + '/'
4442 else:
4442 else:
4443 prefix = ''
4443 prefix = ''
4444
4444
4445 port = ':%d' % self.httpd.port
4445 port = ':%d' % self.httpd.port
4446 if port == ':80':
4446 if port == ':80':
4447 port = ''
4447 port = ''
4448
4448
4449 bindaddr = self.httpd.addr
4449 bindaddr = self.httpd.addr
4450 if bindaddr == '0.0.0.0':
4450 if bindaddr == '0.0.0.0':
4451 bindaddr = '*'
4451 bindaddr = '*'
4452 elif ':' in bindaddr: # IPv6
4452 elif ':' in bindaddr: # IPv6
4453 bindaddr = '[%s]' % bindaddr
4453 bindaddr = '[%s]' % bindaddr
4454
4454
4455 fqaddr = self.httpd.fqaddr
4455 fqaddr = self.httpd.fqaddr
4456 if ':' in fqaddr:
4456 if ':' in fqaddr:
4457 fqaddr = '[%s]' % fqaddr
4457 fqaddr = '[%s]' % fqaddr
4458 if opts['port']:
4458 if opts['port']:
4459 write = ui.status
4459 write = ui.status
4460 else:
4460 else:
4461 write = ui.write
4461 write = ui.write
4462 write(_('listening at http://%s%s/%s (bound to %s:%d)\n') %
4462 write(_('listening at http://%s%s/%s (bound to %s:%d)\n') %
4463 (fqaddr, port, prefix, bindaddr, self.httpd.port))
4463 (fqaddr, port, prefix, bindaddr, self.httpd.port))
4464
4464
4465 def run(self):
4465 def run(self):
4466 self.httpd.serve_forever()
4466 self.httpd.serve_forever()
4467
4467
4468 service = service()
4468 service = service()
4469
4469
4470 cmdutil.service(opts, initfn=service.init, runfn=service.run)
4470 cmdutil.service(opts, initfn=service.init, runfn=service.run)
4471
4471
4472 @command('showconfig|debugconfig',
4472 @command('showconfig|debugconfig',
4473 [('u', 'untrusted', None, _('show untrusted configuration options'))],
4473 [('u', 'untrusted', None, _('show untrusted configuration options'))],
4474 _('[-u] [NAME]...'))
4474 _('[-u] [NAME]...'))
4475 def showconfig(ui, repo, *values, **opts):
4475 def showconfig(ui, repo, *values, **opts):
4476 """show combined config settings from all hgrc files
4476 """show combined config settings from all hgrc files
4477
4477
4478 With no arguments, print names and values of all config items.
4478 With no arguments, print names and values of all config items.
4479
4479
4480 With one argument of the form section.name, print just the value
4480 With one argument of the form section.name, print just the value
4481 of that config item.
4481 of that config item.
4482
4482
4483 With multiple arguments, print names and values of all config
4483 With multiple arguments, print names and values of all config
4484 items with matching section names.
4484 items with matching section names.
4485
4485
4486 With --debug, the source (filename and line number) is printed
4486 With --debug, the source (filename and line number) is printed
4487 for each config item.
4487 for each config item.
4488
4488
4489 Returns 0 on success.
4489 Returns 0 on success.
4490 """
4490 """
4491
4491
4492 for f in scmutil.rcpath():
4492 for f in scmutil.rcpath():
4493 ui.debug(_('read config from: %s\n') % f)
4493 ui.debug(_('read config from: %s\n') % f)
4494 untrusted = bool(opts.get('untrusted'))
4494 untrusted = bool(opts.get('untrusted'))
4495 if values:
4495 if values:
4496 sections = [v for v in values if '.' not in v]
4496 sections = [v for v in values if '.' not in v]
4497 items = [v for v in values if '.' in v]
4497 items = [v for v in values if '.' in v]
4498 if len(items) > 1 or items and sections:
4498 if len(items) > 1 or items and sections:
4499 raise util.Abort(_('only one config item permitted'))
4499 raise util.Abort(_('only one config item permitted'))
4500 for section, name, value in ui.walkconfig(untrusted=untrusted):
4500 for section, name, value in ui.walkconfig(untrusted=untrusted):
4501 value = str(value).replace('\n', '\\n')
4501 value = str(value).replace('\n', '\\n')
4502 sectname = section + '.' + name
4502 sectname = section + '.' + name
4503 if values:
4503 if values:
4504 for v in values:
4504 for v in values:
4505 if v == section:
4505 if v == section:
4506 ui.debug('%s: ' %
4506 ui.debug('%s: ' %
4507 ui.configsource(section, name, untrusted))
4507 ui.configsource(section, name, untrusted))
4508 ui.write('%s=%s\n' % (sectname, value))
4508 ui.write('%s=%s\n' % (sectname, value))
4509 elif v == sectname:
4509 elif v == sectname:
4510 ui.debug('%s: ' %
4510 ui.debug('%s: ' %
4511 ui.configsource(section, name, untrusted))
4511 ui.configsource(section, name, untrusted))
4512 ui.write(value, '\n')
4512 ui.write(value, '\n')
4513 else:
4513 else:
4514 ui.debug('%s: ' %
4514 ui.debug('%s: ' %
4515 ui.configsource(section, name, untrusted))
4515 ui.configsource(section, name, untrusted))
4516 ui.write('%s=%s\n' % (sectname, value))
4516 ui.write('%s=%s\n' % (sectname, value))
4517
4517
4518 @command('^status|st',
4518 @command('^status|st',
4519 [('A', 'all', None, _('show status of all files')),
4519 [('A', 'all', None, _('show status of all files')),
4520 ('m', 'modified', None, _('show only modified files')),
4520 ('m', 'modified', None, _('show only modified files')),
4521 ('a', 'added', None, _('show only added files')),
4521 ('a', 'added', None, _('show only added files')),
4522 ('r', 'removed', None, _('show only removed files')),
4522 ('r', 'removed', None, _('show only removed files')),
4523 ('d', 'deleted', None, _('show only deleted (but tracked) files')),
4523 ('d', 'deleted', None, _('show only deleted (but tracked) files')),
4524 ('c', 'clean', None, _('show only files without changes')),
4524 ('c', 'clean', None, _('show only files without changes')),
4525 ('u', 'unknown', None, _('show only unknown (not tracked) files')),
4525 ('u', 'unknown', None, _('show only unknown (not tracked) files')),
4526 ('i', 'ignored', None, _('show only ignored files')),
4526 ('i', 'ignored', None, _('show only ignored files')),
4527 ('n', 'no-status', None, _('hide status prefix')),
4527 ('n', 'no-status', None, _('hide status prefix')),
4528 ('C', 'copies', None, _('show source of copied files')),
4528 ('C', 'copies', None, _('show source of copied files')),
4529 ('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
4529 ('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
4530 ('', 'rev', [], _('show difference from revision'), _('REV')),
4530 ('', 'rev', [], _('show difference from revision'), _('REV')),
4531 ('', 'change', '', _('list the changed files of a revision'), _('REV')),
4531 ('', 'change', '', _('list the changed files of a revision'), _('REV')),
4532 ] + walkopts + subrepoopts,
4532 ] + walkopts + subrepoopts,
4533 _('[OPTION]... [FILE]...'))
4533 _('[OPTION]... [FILE]...'))
4534 def status(ui, repo, *pats, **opts):
4534 def status(ui, repo, *pats, **opts):
4535 """show changed files in the working directory
4535 """show changed files in the working directory
4536
4536
4537 Show status of files in the repository. If names are given, only
4537 Show status of files in the repository. If names are given, only
4538 files that match are shown. Files that are clean or ignored or
4538 files that match are shown. Files that are clean or ignored or
4539 the source of a copy/move operation, are not listed unless
4539 the source of a copy/move operation, are not listed unless
4540 -c/--clean, -i/--ignored, -C/--copies or -A/--all are given.
4540 -c/--clean, -i/--ignored, -C/--copies or -A/--all are given.
4541 Unless options described with "show only ..." are given, the
4541 Unless options described with "show only ..." are given, the
4542 options -mardu are used.
4542 options -mardu are used.
4543
4543
4544 Option -q/--quiet hides untracked (unknown and ignored) files
4544 Option -q/--quiet hides untracked (unknown and ignored) files
4545 unless explicitly requested with -u/--unknown or -i/--ignored.
4545 unless explicitly requested with -u/--unknown or -i/--ignored.
4546
4546
4547 .. note::
4547 .. note::
4548 status may appear to disagree with diff if permissions have
4548 status may appear to disagree with diff if permissions have
4549 changed or a merge has occurred. The standard diff format does
4549 changed or a merge has occurred. The standard diff format does
4550 not report permission changes and diff only reports changes
4550 not report permission changes and diff only reports changes
4551 relative to one merge parent.
4551 relative to one merge parent.
4552
4552
4553 If one revision is given, it is used as the base revision.
4553 If one revision is given, it is used as the base revision.
4554 If two revisions are given, the differences between them are
4554 If two revisions are given, the differences between them are
4555 shown. The --change option can also be used as a shortcut to list
4555 shown. The --change option can also be used as a shortcut to list
4556 the changed files of a revision from its first parent.
4556 the changed files of a revision from its first parent.
4557
4557
4558 The codes used to show the status of files are::
4558 The codes used to show the status of files are::
4559
4559
4560 M = modified
4560 M = modified
4561 A = added
4561 A = added
4562 R = removed
4562 R = removed
4563 C = clean
4563 C = clean
4564 ! = missing (deleted by non-hg command, but still tracked)
4564 ! = missing (deleted by non-hg command, but still tracked)
4565 ? = not tracked
4565 ? = not tracked
4566 I = ignored
4566 I = ignored
4567 = origin of the previous file listed as A (added)
4567 = origin of the previous file listed as A (added)
4568
4568
4569 Returns 0 on success.
4569 Returns 0 on success.
4570 """
4570 """
4571
4571
4572 revs = opts.get('rev')
4572 revs = opts.get('rev')
4573 change = opts.get('change')
4573 change = opts.get('change')
4574
4574
4575 if revs and change:
4575 if revs and change:
4576 msg = _('cannot specify --rev and --change at the same time')
4576 msg = _('cannot specify --rev and --change at the same time')
4577 raise util.Abort(msg)
4577 raise util.Abort(msg)
4578 elif change:
4578 elif change:
4579 node2 = repo.lookup(change)
4579 node2 = repo.lookup(change)
4580 node1 = repo[node2].p1().node()
4580 node1 = repo[node2].p1().node()
4581 else:
4581 else:
4582 node1, node2 = scmutil.revpair(repo, revs)
4582 node1, node2 = scmutil.revpair(repo, revs)
4583
4583
4584 cwd = (pats and repo.getcwd()) or ''
4584 cwd = (pats and repo.getcwd()) or ''
4585 end = opts.get('print0') and '\0' or '\n'
4585 end = opts.get('print0') and '\0' or '\n'
4586 copy = {}
4586 copy = {}
4587 states = 'modified added removed deleted unknown ignored clean'.split()
4587 states = 'modified added removed deleted unknown ignored clean'.split()
4588 show = [k for k in states if opts.get(k)]
4588 show = [k for k in states if opts.get(k)]
4589 if opts.get('all'):
4589 if opts.get('all'):
4590 show += ui.quiet and (states[:4] + ['clean']) or states
4590 show += ui.quiet and (states[:4] + ['clean']) or states
4591 if not show:
4591 if not show:
4592 show = ui.quiet and states[:4] or states[:5]
4592 show = ui.quiet and states[:4] or states[:5]
4593
4593
4594 stat = repo.status(node1, node2, scmutil.match(repo, pats, opts),
4594 stat = repo.status(node1, node2, scmutil.match(repo, pats, opts),
4595 'ignored' in show, 'clean' in show, 'unknown' in show,
4595 'ignored' in show, 'clean' in show, 'unknown' in show,
4596 opts.get('subrepos'))
4596 opts.get('subrepos'))
4597 changestates = zip(states, 'MAR!?IC', stat)
4597 changestates = zip(states, 'MAR!?IC', stat)
4598
4598
4599 if (opts.get('all') or opts.get('copies')) and not opts.get('no_status'):
4599 if (opts.get('all') or opts.get('copies')) and not opts.get('no_status'):
4600 ctxn = repo[nullid]
4600 ctxn = repo[nullid]
4601 ctx1 = repo[node1]
4601 ctx1 = repo[node1]
4602 ctx2 = repo[node2]
4602 ctx2 = repo[node2]
4603 added = stat[1]
4603 added = stat[1]
4604 if node2 is None:
4604 if node2 is None:
4605 added = stat[0] + stat[1] # merged?
4605 added = stat[0] + stat[1] # merged?
4606
4606
4607 for k, v in copies.copies(repo, ctx1, ctx2, ctxn)[0].iteritems():
4607 for k, v in copies.copies(repo, ctx1, ctx2, ctxn)[0].iteritems():
4608 if k in added:
4608 if k in added:
4609 copy[k] = v
4609 copy[k] = v
4610 elif v in added:
4610 elif v in added:
4611 copy[v] = k
4611 copy[v] = k
4612
4612
4613 for state, char, files in changestates:
4613 for state, char, files in changestates:
4614 if state in show:
4614 if state in show:
4615 format = "%s %%s%s" % (char, end)
4615 format = "%s %%s%s" % (char, end)
4616 if opts.get('no_status'):
4616 if opts.get('no_status'):
4617 format = "%%s%s" % end
4617 format = "%%s%s" % end
4618
4618
4619 for f in files:
4619 for f in files:
4620 ui.write(format % repo.pathto(f, cwd),
4620 ui.write(format % repo.pathto(f, cwd),
4621 label='status.' + state)
4621 label='status.' + state)
4622 if f in copy:
4622 if f in copy:
4623 ui.write(' %s%s' % (repo.pathto(copy[f], cwd), end),
4623 ui.write(' %s%s' % (repo.pathto(copy[f], cwd), end),
4624 label='status.copied')
4624 label='status.copied')
4625
4625
4626 @command('^summary|sum',
4626 @command('^summary|sum',
4627 [('', 'remote', None, _('check for push and pull'))], '[--remote]')
4627 [('', 'remote', None, _('check for push and pull'))], '[--remote]')
4628 def summary(ui, repo, **opts):
4628 def summary(ui, repo, **opts):
4629 """summarize working directory state
4629 """summarize working directory state
4630
4630
4631 This generates a brief summary of the working directory state,
4631 This generates a brief summary of the working directory state,
4632 including parents, branch, commit status, and available updates.
4632 including parents, branch, commit status, and available updates.
4633
4633
4634 With the --remote option, this will check the default paths for
4634 With the --remote option, this will check the default paths for
4635 incoming and outgoing changes. This can be time-consuming.
4635 incoming and outgoing changes. This can be time-consuming.
4636
4636
4637 Returns 0 on success.
4637 Returns 0 on success.
4638 """
4638 """
4639
4639
4640 ctx = repo[None]
4640 ctx = repo[None]
4641 parents = ctx.parents()
4641 parents = ctx.parents()
4642 pnode = parents[0].node()
4642 pnode = parents[0].node()
4643
4643
4644 for p in parents:
4644 for p in parents:
4645 # label with log.changeset (instead of log.parent) since this
4645 # label with log.changeset (instead of log.parent) since this
4646 # shows a working directory parent *changeset*:
4646 # shows a working directory parent *changeset*:
4647 ui.write(_('parent: %d:%s ') % (p.rev(), str(p)),
4647 ui.write(_('parent: %d:%s ') % (p.rev(), str(p)),
4648 label='log.changeset')
4648 label='log.changeset')
4649 ui.write(' '.join(p.tags()), label='log.tag')
4649 ui.write(' '.join(p.tags()), label='log.tag')
4650 if p.bookmarks():
4650 if p.bookmarks():
4651 ui.write(' ' + ' '.join(p.bookmarks()), label='log.bookmark')
4651 ui.write(' ' + ' '.join(p.bookmarks()), label='log.bookmark')
4652 if p.rev() == -1:
4652 if p.rev() == -1:
4653 if not len(repo):
4653 if not len(repo):
4654 ui.write(_(' (empty repository)'))
4654 ui.write(_(' (empty repository)'))
4655 else:
4655 else:
4656 ui.write(_(' (no revision checked out)'))
4656 ui.write(_(' (no revision checked out)'))
4657 ui.write('\n')
4657 ui.write('\n')
4658 if p.description():
4658 if p.description():
4659 ui.status(' ' + p.description().splitlines()[0].strip() + '\n',
4659 ui.status(' ' + p.description().splitlines()[0].strip() + '\n',
4660 label='log.summary')
4660 label='log.summary')
4661
4661
4662 branch = ctx.branch()
4662 branch = ctx.branch()
4663 bheads = repo.branchheads(branch)
4663 bheads = repo.branchheads(branch)
4664 m = _('branch: %s\n') % branch
4664 m = _('branch: %s\n') % branch
4665 if branch != 'default':
4665 if branch != 'default':
4666 ui.write(m, label='log.branch')
4666 ui.write(m, label='log.branch')
4667 else:
4667 else:
4668 ui.status(m, label='log.branch')
4668 ui.status(m, label='log.branch')
4669
4669
4670 st = list(repo.status(unknown=True))[:6]
4670 st = list(repo.status(unknown=True))[:6]
4671
4671
4672 c = repo.dirstate.copies()
4672 c = repo.dirstate.copies()
4673 copied, renamed = [], []
4673 copied, renamed = [], []
4674 for d, s in c.iteritems():
4674 for d, s in c.iteritems():
4675 if s in st[2]:
4675 if s in st[2]:
4676 st[2].remove(s)
4676 st[2].remove(s)
4677 renamed.append(d)
4677 renamed.append(d)
4678 else:
4678 else:
4679 copied.append(d)
4679 copied.append(d)
4680 if d in st[1]:
4680 if d in st[1]:
4681 st[1].remove(d)
4681 st[1].remove(d)
4682 st.insert(3, renamed)
4682 st.insert(3, renamed)
4683 st.insert(4, copied)
4683 st.insert(4, copied)
4684
4684
4685 ms = mergemod.mergestate(repo)
4685 ms = mergemod.mergestate(repo)
4686 st.append([f for f in ms if ms[f] == 'u'])
4686 st.append([f for f in ms if ms[f] == 'u'])
4687
4687
4688 subs = [s for s in ctx.substate if ctx.sub(s).dirty()]
4688 subs = [s for s in ctx.substate if ctx.sub(s).dirty()]
4689 st.append(subs)
4689 st.append(subs)
4690
4690
4691 labels = [ui.label(_('%d modified'), 'status.modified'),
4691 labels = [ui.label(_('%d modified'), 'status.modified'),
4692 ui.label(_('%d added'), 'status.added'),
4692 ui.label(_('%d added'), 'status.added'),
4693 ui.label(_('%d removed'), 'status.removed'),
4693 ui.label(_('%d removed'), 'status.removed'),
4694 ui.label(_('%d renamed'), 'status.copied'),
4694 ui.label(_('%d renamed'), 'status.copied'),
4695 ui.label(_('%d copied'), 'status.copied'),
4695 ui.label(_('%d copied'), 'status.copied'),
4696 ui.label(_('%d deleted'), 'status.deleted'),
4696 ui.label(_('%d deleted'), 'status.deleted'),
4697 ui.label(_('%d unknown'), 'status.unknown'),
4697 ui.label(_('%d unknown'), 'status.unknown'),
4698 ui.label(_('%d ignored'), 'status.ignored'),
4698 ui.label(_('%d ignored'), 'status.ignored'),
4699 ui.label(_('%d unresolved'), 'resolve.unresolved'),
4699 ui.label(_('%d unresolved'), 'resolve.unresolved'),
4700 ui.label(_('%d subrepos'), 'status.modified')]
4700 ui.label(_('%d subrepos'), 'status.modified')]
4701 t = []
4701 t = []
4702 for s, l in zip(st, labels):
4702 for s, l in zip(st, labels):
4703 if s:
4703 if s:
4704 t.append(l % len(s))
4704 t.append(l % len(s))
4705
4705
4706 t = ', '.join(t)
4706 t = ', '.join(t)
4707 cleanworkdir = False
4707 cleanworkdir = False
4708
4708
4709 if len(parents) > 1:
4709 if len(parents) > 1:
4710 t += _(' (merge)')
4710 t += _(' (merge)')
4711 elif branch != parents[0].branch():
4711 elif branch != parents[0].branch():
4712 t += _(' (new branch)')
4712 t += _(' (new branch)')
4713 elif (parents[0].extra().get('close') and
4713 elif (parents[0].extra().get('close') and
4714 pnode in repo.branchheads(branch, closed=True)):
4714 pnode in repo.branchheads(branch, closed=True)):
4715 t += _(' (head closed)')
4715 t += _(' (head closed)')
4716 elif not (st[0] or st[1] or st[2] or st[3] or st[4] or st[9]):
4716 elif not (st[0] or st[1] or st[2] or st[3] or st[4] or st[9]):
4717 t += _(' (clean)')
4717 t += _(' (clean)')
4718 cleanworkdir = True
4718 cleanworkdir = True
4719 elif pnode not in bheads:
4719 elif pnode not in bheads:
4720 t += _(' (new branch head)')
4720 t += _(' (new branch head)')
4721
4721
4722 if cleanworkdir:
4722 if cleanworkdir:
4723 ui.status(_('commit: %s\n') % t.strip())
4723 ui.status(_('commit: %s\n') % t.strip())
4724 else:
4724 else:
4725 ui.write(_('commit: %s\n') % t.strip())
4725 ui.write(_('commit: %s\n') % t.strip())
4726
4726
4727 # all ancestors of branch heads - all ancestors of parent = new csets
4727 # all ancestors of branch heads - all ancestors of parent = new csets
4728 new = [0] * len(repo)
4728 new = [0] * len(repo)
4729 cl = repo.changelog
4729 cl = repo.changelog
4730 for a in [cl.rev(n) for n in bheads]:
4730 for a in [cl.rev(n) for n in bheads]:
4731 new[a] = 1
4731 new[a] = 1
4732 for a in cl.ancestors(*[cl.rev(n) for n in bheads]):
4732 for a in cl.ancestors(*[cl.rev(n) for n in bheads]):
4733 new[a] = 1
4733 new[a] = 1
4734 for a in [p.rev() for p in parents]:
4734 for a in [p.rev() for p in parents]:
4735 if a >= 0:
4735 if a >= 0:
4736 new[a] = 0
4736 new[a] = 0
4737 for a in cl.ancestors(*[p.rev() for p in parents]):
4737 for a in cl.ancestors(*[p.rev() for p in parents]):
4738 new[a] = 0
4738 new[a] = 0
4739 new = sum(new)
4739 new = sum(new)
4740
4740
4741 if new == 0:
4741 if new == 0:
4742 ui.status(_('update: (current)\n'))
4742 ui.status(_('update: (current)\n'))
4743 elif pnode not in bheads:
4743 elif pnode not in bheads:
4744 ui.write(_('update: %d new changesets (update)\n') % new)
4744 ui.write(_('update: %d new changesets (update)\n') % new)
4745 else:
4745 else:
4746 ui.write(_('update: %d new changesets, %d branch heads (merge)\n') %
4746 ui.write(_('update: %d new changesets, %d branch heads (merge)\n') %
4747 (new, len(bheads)))
4747 (new, len(bheads)))
4748
4748
4749 if opts.get('remote'):
4749 if opts.get('remote'):
4750 t = []
4750 t = []
4751 source, branches = hg.parseurl(ui.expandpath('default'))
4751 source, branches = hg.parseurl(ui.expandpath('default'))
4752 other = hg.repository(hg.remoteui(repo, {}), source)
4752 other = hg.repository(hg.remoteui(repo, {}), source)
4753 revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev'))
4753 revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev'))
4754 ui.debug('comparing with %s\n' % util.hidepassword(source))
4754 ui.debug('comparing with %s\n' % util.hidepassword(source))
4755 repo.ui.pushbuffer()
4755 repo.ui.pushbuffer()
4756 commoninc = discovery.findcommonincoming(repo, other)
4756 commoninc = discovery.findcommonincoming(repo, other)
4757 _common, incoming, _rheads = commoninc
4757 _common, incoming, _rheads = commoninc
4758 repo.ui.popbuffer()
4758 repo.ui.popbuffer()
4759 if incoming:
4759 if incoming:
4760 t.append(_('1 or more incoming'))
4760 t.append(_('1 or more incoming'))
4761
4761
4762 dest, branches = hg.parseurl(ui.expandpath('default-push', 'default'))
4762 dest, branches = hg.parseurl(ui.expandpath('default-push', 'default'))
4763 revs, checkout = hg.addbranchrevs(repo, repo, branches, None)
4763 revs, checkout = hg.addbranchrevs(repo, repo, branches, None)
4764 if source != dest:
4764 if source != dest:
4765 other = hg.repository(hg.remoteui(repo, {}), dest)
4765 other = hg.repository(hg.remoteui(repo, {}), dest)
4766 commoninc = None
4766 commoninc = None
4767 ui.debug('comparing with %s\n' % util.hidepassword(dest))
4767 ui.debug('comparing with %s\n' % util.hidepassword(dest))
4768 repo.ui.pushbuffer()
4768 repo.ui.pushbuffer()
4769 common, outheads = discovery.findcommonoutgoing(repo, other,
4769 common, outheads = discovery.findcommonoutgoing(repo, other,
4770 commoninc=commoninc)
4770 commoninc=commoninc)
4771 repo.ui.popbuffer()
4771 repo.ui.popbuffer()
4772 o = repo.changelog.findmissing(common=common, heads=outheads)
4772 o = repo.changelog.findmissing(common=common, heads=outheads)
4773 if o:
4773 if o:
4774 t.append(_('%d outgoing') % len(o))
4774 t.append(_('%d outgoing') % len(o))
4775 if 'bookmarks' in other.listkeys('namespaces'):
4775 if 'bookmarks' in other.listkeys('namespaces'):
4776 lmarks = repo.listkeys('bookmarks')
4776 lmarks = repo.listkeys('bookmarks')
4777 rmarks = other.listkeys('bookmarks')
4777 rmarks = other.listkeys('bookmarks')
4778 diff = set(rmarks) - set(lmarks)
4778 diff = set(rmarks) - set(lmarks)
4779 if len(diff) > 0:
4779 if len(diff) > 0:
4780 t.append(_('%d incoming bookmarks') % len(diff))
4780 t.append(_('%d incoming bookmarks') % len(diff))
4781 diff = set(lmarks) - set(rmarks)
4781 diff = set(lmarks) - set(rmarks)
4782 if len(diff) > 0:
4782 if len(diff) > 0:
4783 t.append(_('%d outgoing bookmarks') % len(diff))
4783 t.append(_('%d outgoing bookmarks') % len(diff))
4784
4784
4785 if t:
4785 if t:
4786 ui.write(_('remote: %s\n') % (', '.join(t)))
4786 ui.write(_('remote: %s\n') % (', '.join(t)))
4787 else:
4787 else:
4788 ui.status(_('remote: (synced)\n'))
4788 ui.status(_('remote: (synced)\n'))
4789
4789
4790 @command('tag',
4790 @command('tag',
4791 [('f', 'force', None, _('force tag')),
4791 [('f', 'force', None, _('force tag')),
4792 ('l', 'local', None, _('make the tag local')),
4792 ('l', 'local', None, _('make the tag local')),
4793 ('r', 'rev', '', _('revision to tag'), _('REV')),
4793 ('r', 'rev', '', _('revision to tag'), _('REV')),
4794 ('', 'remove', None, _('remove a tag')),
4794 ('', 'remove', None, _('remove a tag')),
4795 # -l/--local is already there, commitopts cannot be used
4795 # -l/--local is already there, commitopts cannot be used
4796 ('e', 'edit', None, _('edit commit message')),
4796 ('e', 'edit', None, _('edit commit message')),
4797 ('m', 'message', '', _('use <text> as commit message'), _('TEXT')),
4797 ('m', 'message', '', _('use <text> as commit message'), _('TEXT')),
4798 ] + commitopts2,
4798 ] + commitopts2,
4799 _('[-f] [-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME...'))
4799 _('[-f] [-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME...'))
4800 def tag(ui, repo, name1, *names, **opts):
4800 def tag(ui, repo, name1, *names, **opts):
4801 """add one or more tags for the current or given revision
4801 """add one or more tags for the current or given revision
4802
4802
4803 Name a particular revision using <name>.
4803 Name a particular revision using <name>.
4804
4804
4805 Tags are used to name particular revisions of the repository and are
4805 Tags are used to name particular revisions of the repository and are
4806 very useful to compare different revisions, to go back to significant
4806 very useful to compare different revisions, to go back to significant
4807 earlier versions or to mark branch points as releases, etc. Changing
4807 earlier versions or to mark branch points as releases, etc. Changing
4808 an existing tag is normally disallowed; use -f/--force to override.
4808 an existing tag is normally disallowed; use -f/--force to override.
4809
4809
4810 If no revision is given, the parent of the working directory is
4810 If no revision is given, the parent of the working directory is
4811 used, or tip if no revision is checked out.
4811 used, or tip if no revision is checked out.
4812
4812
4813 To facilitate version control, distribution, and merging of tags,
4813 To facilitate version control, distribution, and merging of tags,
4814 they are stored as a file named ".hgtags" which is managed similarly
4814 they are stored as a file named ".hgtags" which is managed similarly
4815 to other project files and can be hand-edited if necessary. This
4815 to other project files and can be hand-edited if necessary. This
4816 also means that tagging creates a new commit. The file
4816 also means that tagging creates a new commit. The file
4817 ".hg/localtags" is used for local tags (not shared among
4817 ".hg/localtags" is used for local tags (not shared among
4818 repositories).
4818 repositories).
4819
4819
4820 Tag commits are usually made at the head of a branch. If the parent
4820 Tag commits are usually made at the head of a branch. If the parent
4821 of the working directory is not a branch head, :hg:`tag` aborts; use
4821 of the working directory is not a branch head, :hg:`tag` aborts; use
4822 -f/--force to force the tag commit to be based on a non-head
4822 -f/--force to force the tag commit to be based on a non-head
4823 changeset.
4823 changeset.
4824
4824
4825 See :hg:`help dates` for a list of formats valid for -d/--date.
4825 See :hg:`help dates` for a list of formats valid for -d/--date.
4826
4826
4827 Since tag names have priority over branch names during revision
4827 Since tag names have priority over branch names during revision
4828 lookup, using an existing branch name as a tag name is discouraged.
4828 lookup, using an existing branch name as a tag name is discouraged.
4829
4829
4830 Returns 0 on success.
4830 Returns 0 on success.
4831 """
4831 """
4832
4832
4833 rev_ = "."
4833 rev_ = "."
4834 names = [t.strip() for t in (name1,) + names]
4834 names = [t.strip() for t in (name1,) + names]
4835 if len(names) != len(set(names)):
4835 if len(names) != len(set(names)):
4836 raise util.Abort(_('tag names must be unique'))
4836 raise util.Abort(_('tag names must be unique'))
4837 for n in names:
4837 for n in names:
4838 if n in ['tip', '.', 'null']:
4838 if n in ['tip', '.', 'null']:
4839 raise util.Abort(_("the name '%s' is reserved") % n)
4839 raise util.Abort(_("the name '%s' is reserved") % n)
4840 if not n:
4840 if not n:
4841 raise util.Abort(_('tag names cannot consist entirely of whitespace'))
4841 raise util.Abort(_('tag names cannot consist entirely of whitespace'))
4842 if opts.get('rev') and opts.get('remove'):
4842 if opts.get('rev') and opts.get('remove'):
4843 raise util.Abort(_("--rev and --remove are incompatible"))
4843 raise util.Abort(_("--rev and --remove are incompatible"))
4844 if opts.get('rev'):
4844 if opts.get('rev'):
4845 rev_ = opts['rev']
4845 rev_ = opts['rev']
4846 message = opts.get('message')
4846 message = opts.get('message')
4847 if opts.get('remove'):
4847 if opts.get('remove'):
4848 expectedtype = opts.get('local') and 'local' or 'global'
4848 expectedtype = opts.get('local') and 'local' or 'global'
4849 for n in names:
4849 for n in names:
4850 if not repo.tagtype(n):
4850 if not repo.tagtype(n):
4851 raise util.Abort(_("tag '%s' does not exist") % n)
4851 raise util.Abort(_("tag '%s' does not exist") % n)
4852 if repo.tagtype(n) != expectedtype:
4852 if repo.tagtype(n) != expectedtype:
4853 if expectedtype == 'global':
4853 if expectedtype == 'global':
4854 raise util.Abort(_("tag '%s' is not a global tag") % n)
4854 raise util.Abort(_("tag '%s' is not a global tag") % n)
4855 else:
4855 else:
4856 raise util.Abort(_("tag '%s' is not a local tag") % n)
4856 raise util.Abort(_("tag '%s' is not a local tag") % n)
4857 rev_ = nullid
4857 rev_ = nullid
4858 if not message:
4858 if not message:
4859 # we don't translate commit messages
4859 # we don't translate commit messages
4860 message = 'Removed tag %s' % ', '.join(names)
4860 message = 'Removed tag %s' % ', '.join(names)
4861 elif not opts.get('force'):
4861 elif not opts.get('force'):
4862 for n in names:
4862 for n in names:
4863 if n in repo.tags():
4863 if n in repo.tags():
4864 raise util.Abort(_("tag '%s' already exists "
4864 raise util.Abort(_("tag '%s' already exists "
4865 "(use -f to force)") % n)
4865 "(use -f to force)") % n)
4866 if not opts.get('local'):
4866 if not opts.get('local'):
4867 p1, p2 = repo.dirstate.parents()
4867 p1, p2 = repo.dirstate.parents()
4868 if p2 != nullid:
4868 if p2 != nullid:
4869 raise util.Abort(_('uncommitted merge'))
4869 raise util.Abort(_('uncommitted merge'))
4870 bheads = repo.branchheads()
4870 bheads = repo.branchheads()
4871 if not opts.get('force') and bheads and p1 not in bheads:
4871 if not opts.get('force') and bheads and p1 not in bheads:
4872 raise util.Abort(_('not at a branch head (use -f to force)'))
4872 raise util.Abort(_('not at a branch head (use -f to force)'))
4873 r = scmutil.revsingle(repo, rev_).node()
4873 r = scmutil.revsingle(repo, rev_).node()
4874
4874
4875 if not message:
4875 if not message:
4876 # we don't translate commit messages
4876 # we don't translate commit messages
4877 message = ('Added tag %s for changeset %s' %
4877 message = ('Added tag %s for changeset %s' %
4878 (', '.join(names), short(r)))
4878 (', '.join(names), short(r)))
4879
4879
4880 date = opts.get('date')
4880 date = opts.get('date')
4881 if date:
4881 if date:
4882 date = util.parsedate(date)
4882 date = util.parsedate(date)
4883
4883
4884 if opts.get('edit'):
4884 if opts.get('edit'):
4885 message = ui.edit(message, ui.username())
4885 message = ui.edit(message, ui.username())
4886
4886
4887 repo.tag(names, r, message, opts.get('local'), opts.get('user'), date)
4887 repo.tag(names, r, message, opts.get('local'), opts.get('user'), date)
4888
4888
4889 @command('tags', [], '')
4889 @command('tags', [], '')
4890 def tags(ui, repo):
4890 def tags(ui, repo):
4891 """list repository tags
4891 """list repository tags
4892
4892
4893 This lists both regular and local tags. When the -v/--verbose
4893 This lists both regular and local tags. When the -v/--verbose
4894 switch is used, a third column "local" is printed for local tags.
4894 switch is used, a third column "local" is printed for local tags.
4895
4895
4896 Returns 0 on success.
4896 Returns 0 on success.
4897 """
4897 """
4898
4898
4899 hexfunc = ui.debugflag and hex or short
4899 hexfunc = ui.debugflag and hex or short
4900 tagtype = ""
4900 tagtype = ""
4901
4901
4902 for t, n in reversed(repo.tagslist()):
4902 for t, n in reversed(repo.tagslist()):
4903 if ui.quiet:
4903 if ui.quiet:
4904 ui.write("%s\n" % t)
4904 ui.write("%s\n" % t)
4905 continue
4905 continue
4906
4906
4907 hn = hexfunc(n)
4907 hn = hexfunc(n)
4908 r = "%5d:%s" % (repo.changelog.rev(n), hn)
4908 r = "%5d:%s" % (repo.changelog.rev(n), hn)
4909 spaces = " " * (30 - encoding.colwidth(t))
4909 spaces = " " * (30 - encoding.colwidth(t))
4910
4910
4911 if ui.verbose:
4911 if ui.verbose:
4912 if repo.tagtype(t) == 'local':
4912 if repo.tagtype(t) == 'local':
4913 tagtype = " local"
4913 tagtype = " local"
4914 else:
4914 else:
4915 tagtype = ""
4915 tagtype = ""
4916 ui.write("%s%s %s%s\n" % (t, spaces, r, tagtype))
4916 ui.write("%s%s %s%s\n" % (t, spaces, r, tagtype))
4917
4917
4918 @command('tip',
4918 @command('tip',
4919 [('p', 'patch', None, _('show patch')),
4919 [('p', 'patch', None, _('show patch')),
4920 ('g', 'git', None, _('use git extended diff format')),
4920 ('g', 'git', None, _('use git extended diff format')),
4921 ] + templateopts,
4921 ] + templateopts,
4922 _('[-p] [-g]'))
4922 _('[-p] [-g]'))
4923 def tip(ui, repo, **opts):
4923 def tip(ui, repo, **opts):
4924 """show the tip revision
4924 """show the tip revision
4925
4925
4926 The tip revision (usually just called the tip) is the changeset
4926 The tip revision (usually just called the tip) is the changeset
4927 most recently added to the repository (and therefore the most
4927 most recently added to the repository (and therefore the most
4928 recently changed head).
4928 recently changed head).
4929
4929
4930 If you have just made a commit, that commit will be the tip. If
4930 If you have just made a commit, that commit will be the tip. If
4931 you have just pulled changes from another repository, the tip of
4931 you have just pulled changes from another repository, the tip of
4932 that repository becomes the current tip. The "tip" tag is special
4932 that repository becomes the current tip. The "tip" tag is special
4933 and cannot be renamed or assigned to a different changeset.
4933 and cannot be renamed or assigned to a different changeset.
4934
4934
4935 Returns 0 on success.
4935 Returns 0 on success.
4936 """
4936 """
4937 displayer = cmdutil.show_changeset(ui, repo, opts)
4937 displayer = cmdutil.show_changeset(ui, repo, opts)
4938 displayer.show(repo[len(repo) - 1])
4938 displayer.show(repo[len(repo) - 1])
4939 displayer.close()
4939 displayer.close()
4940
4940
4941 @command('unbundle',
4941 @command('unbundle',
4942 [('u', 'update', None,
4942 [('u', 'update', None,
4943 _('update to new branch head if changesets were unbundled'))],
4943 _('update to new branch head if changesets were unbundled'))],
4944 _('[-u] FILE...'))
4944 _('[-u] FILE...'))
4945 def unbundle(ui, repo, fname1, *fnames, **opts):
4945 def unbundle(ui, repo, fname1, *fnames, **opts):
4946 """apply one or more changegroup files
4946 """apply one or more changegroup files
4947
4947
4948 Apply one or more compressed changegroup files generated by the
4948 Apply one or more compressed changegroup files generated by the
4949 bundle command.
4949 bundle command.
4950
4950
4951 Returns 0 on success, 1 if an update has unresolved files.
4951 Returns 0 on success, 1 if an update has unresolved files.
4952 """
4952 """
4953 fnames = (fname1,) + fnames
4953 fnames = (fname1,) + fnames
4954
4954
4955 lock = repo.lock()
4955 lock = repo.lock()
4956 wc = repo['.']
4956 wc = repo['.']
4957 try:
4957 try:
4958 for fname in fnames:
4958 for fname in fnames:
4959 f = url.open(ui, fname)
4959 f = url.open(ui, fname)
4960 gen = changegroup.readbundle(f, fname)
4960 gen = changegroup.readbundle(f, fname)
4961 modheads = repo.addchangegroup(gen, 'unbundle', 'bundle:' + fname,
4961 modheads = repo.addchangegroup(gen, 'unbundle', 'bundle:' + fname,
4962 lock=lock)
4962 lock=lock)
4963 bookmarks.updatecurrentbookmark(repo, wc.node(), wc.branch())
4963 bookmarks.updatecurrentbookmark(repo, wc.node(), wc.branch())
4964 finally:
4964 finally:
4965 lock.release()
4965 lock.release()
4966 return postincoming(ui, repo, modheads, opts.get('update'), None)
4966 return postincoming(ui, repo, modheads, opts.get('update'), None)
4967
4967
4968 @command('^update|up|checkout|co',
4968 @command('^update|up|checkout|co',
4969 [('C', 'clean', None, _('discard uncommitted changes (no backup)')),
4969 [('C', 'clean', None, _('discard uncommitted changes (no backup)')),
4970 ('c', 'check', None,
4970 ('c', 'check', None,
4971 _('update across branches if no uncommitted changes')),
4971 _('update across branches if no uncommitted changes')),
4972 ('d', 'date', '', _('tipmost revision matching date'), _('DATE')),
4972 ('d', 'date', '', _('tipmost revision matching date'), _('DATE')),
4973 ('r', 'rev', '', _('revision'), _('REV'))],
4973 ('r', 'rev', '', _('revision'), _('REV'))],
4974 _('[-c] [-C] [-d DATE] [[-r] REV]'))
4974 _('[-c] [-C] [-d DATE] [[-r] REV]'))
4975 def update(ui, repo, node=None, rev=None, clean=False, date=None, check=False):
4975 def update(ui, repo, node=None, rev=None, clean=False, date=None, check=False):
4976 """update working directory (or switch revisions)
4976 """update working directory (or switch revisions)
4977
4977
4978 Update the repository's working directory to the specified
4978 Update the repository's working directory to the specified
4979 changeset. If no changeset is specified, update to the tip of the
4979 changeset. If no changeset is specified, update to the tip of the
4980 current named branch.
4980 current named branch.
4981
4981
4982 If the changeset is not a descendant of the working directory's
4982 If the changeset is not a descendant of the working directory's
4983 parent, the update is aborted. With the -c/--check option, the
4983 parent, the update is aborted. With the -c/--check option, the
4984 working directory is checked for uncommitted changes; if none are
4984 working directory is checked for uncommitted changes; if none are
4985 found, the working directory is updated to the specified
4985 found, the working directory is updated to the specified
4986 changeset.
4986 changeset.
4987
4987
4988 The following rules apply when the working directory contains
4988 The following rules apply when the working directory contains
4989 uncommitted changes:
4989 uncommitted changes:
4990
4990
4991 1. If neither -c/--check nor -C/--clean is specified, and if
4991 1. If neither -c/--check nor -C/--clean is specified, and if
4992 the requested changeset is an ancestor or descendant of
4992 the requested changeset is an ancestor or descendant of
4993 the working directory's parent, the uncommitted changes
4993 the working directory's parent, the uncommitted changes
4994 are merged into the requested changeset and the merged
4994 are merged into the requested changeset and the merged
4995 result is left uncommitted. If the requested changeset is
4995 result is left uncommitted. If the requested changeset is
4996 not an ancestor or descendant (that is, it is on another
4996 not an ancestor or descendant (that is, it is on another
4997 branch), the update is aborted and the uncommitted changes
4997 branch), the update is aborted and the uncommitted changes
4998 are preserved.
4998 are preserved.
4999
4999
5000 2. With the -c/--check option, the update is aborted and the
5000 2. With the -c/--check option, the update is aborted and the
5001 uncommitted changes are preserved.
5001 uncommitted changes are preserved.
5002
5002
5003 3. With the -C/--clean option, uncommitted changes are discarded and
5003 3. With the -C/--clean option, uncommitted changes are discarded and
5004 the working directory is updated to the requested changeset.
5004 the working directory is updated to the requested changeset.
5005
5005
5006 Use null as the changeset to remove the working directory (like
5006 Use null as the changeset to remove the working directory (like
5007 :hg:`clone -U`).
5007 :hg:`clone -U`).
5008
5008
5009 If you want to update just one file to an older changeset, use
5009 If you want to update just one file to an older changeset, use
5010 :hg:`revert`.
5010 :hg:`revert`.
5011
5011
5012 See :hg:`help dates` for a list of formats valid for -d/--date.
5012 See :hg:`help dates` for a list of formats valid for -d/--date.
5013
5013
5014 Returns 0 on success, 1 if there are unresolved files.
5014 Returns 0 on success, 1 if there are unresolved files.
5015 """
5015 """
5016 if rev and node:
5016 if rev and node:
5017 raise util.Abort(_("please specify just one revision"))
5017 raise util.Abort(_("please specify just one revision"))
5018
5018
5019 if rev is None or rev == '':
5019 if rev is None or rev == '':
5020 rev = node
5020 rev = node
5021
5021
5022 # if we defined a bookmark, we have to remember the original bookmark name
5022 # if we defined a bookmark, we have to remember the original bookmark name
5023 brev = rev
5023 brev = rev
5024 rev = scmutil.revsingle(repo, rev, rev).rev()
5024 rev = scmutil.revsingle(repo, rev, rev).rev()
5025
5025
5026 if check and clean:
5026 if check and clean:
5027 raise util.Abort(_("cannot specify both -c/--check and -C/--clean"))
5027 raise util.Abort(_("cannot specify both -c/--check and -C/--clean"))
5028
5028
5029 if check:
5029 if check:
5030 # we could use dirty() but we can ignore merge and branch trivia
5030 # we could use dirty() but we can ignore merge and branch trivia
5031 c = repo[None]
5031 c = repo[None]
5032 if c.modified() or c.added() or c.removed():
5032 if c.modified() or c.added() or c.removed():
5033 raise util.Abort(_("uncommitted local changes"))
5033 raise util.Abort(_("uncommitted local changes"))
5034
5034
5035 if date:
5035 if date:
5036 if rev is not None:
5036 if rev is not None:
5037 raise util.Abort(_("you can't specify a revision and a date"))
5037 raise util.Abort(_("you can't specify a revision and a date"))
5038 rev = cmdutil.finddate(ui, repo, date)
5038 rev = cmdutil.finddate(ui, repo, date)
5039
5039
5040 if clean or check:
5040 if clean or check:
5041 ret = hg.clean(repo, rev)
5041 ret = hg.clean(repo, rev)
5042 else:
5042 else:
5043 ret = hg.update(repo, rev)
5043 ret = hg.update(repo, rev)
5044
5044
5045 if brev in repo._bookmarks:
5045 if brev in repo._bookmarks:
5046 bookmarks.setcurrent(repo, brev)
5046 bookmarks.setcurrent(repo, brev)
5047
5047
5048 return ret
5048 return ret
5049
5049
5050 @command('verify', [])
5050 @command('verify', [])
5051 def verify(ui, repo):
5051 def verify(ui, repo):
5052 """verify the integrity of the repository
5052 """verify the integrity of the repository
5053
5053
5054 Verify the integrity of the current repository.
5054 Verify the integrity of the current repository.
5055
5055
5056 This will perform an extensive check of the repository's
5056 This will perform an extensive check of the repository's
5057 integrity, validating the hashes and checksums of each entry in
5057 integrity, validating the hashes and checksums of each entry in
5058 the changelog, manifest, and tracked files, as well as the
5058 the changelog, manifest, and tracked files, as well as the
5059 integrity of their crosslinks and indices.
5059 integrity of their crosslinks and indices.
5060
5060
5061 Returns 0 on success, 1 if errors are encountered.
5061 Returns 0 on success, 1 if errors are encountered.
5062 """
5062 """
5063 return hg.verify(repo)
5063 return hg.verify(repo)
5064
5064
5065 @command('version', [])
5065 @command('version', [])
5066 def version_(ui):
5066 def version_(ui):
5067 """output version and copyright information"""
5067 """output version and copyright information"""
5068 ui.write(_("Mercurial Distributed SCM (version %s)\n")
5068 ui.write(_("Mercurial Distributed SCM (version %s)\n")
5069 % util.version())
5069 % util.version())
5070 ui.status(_(
5070 ui.status(_(
5071 "(see http://mercurial.selenic.com for more information)\n"
5071 "(see http://mercurial.selenic.com for more information)\n"
5072 "\nCopyright (C) 2005-2011 Matt Mackall and others\n"
5072 "\nCopyright (C) 2005-2011 Matt Mackall and others\n"
5073 "This is free software; see the source for copying conditions. "
5073 "This is free software; see the source for copying conditions. "
5074 "There is NO\nwarranty; "
5074 "There is NO\nwarranty; "
5075 "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
5075 "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
5076 ))
5076 ))
5077
5077
5078 norepo = ("clone init version help debugcommands debugcomplete"
5078 norepo = ("clone init version help debugcommands debugcomplete"
5079 " debugdate debuginstall debugfsinfo debugpushkey debugwireargs"
5079 " debugdate debuginstall debugfsinfo debugpushkey debugwireargs"
5080 " debugknown debuggetbundle debugbundle")
5080 " debugknown debuggetbundle debugbundle")
5081 optionalrepo = ("identify paths serve showconfig debugancestor debugdag"
5081 optionalrepo = ("identify paths serve showconfig debugancestor debugdag"
5082 " debugdata debugindex debugindexdot debugrevlog")
5082 " debugdata debugindex debugindexdot debugrevlog")
@@ -1,267 +1,267
1 # copies.py - copy detection for Mercurial
1 # copies.py - copy detection for Mercurial
2 #
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import util
8 import util
9 import heapq
9 import heapq
10
10
11 def _nonoverlap(d1, d2, d3):
11 def _nonoverlap(d1, d2, d3):
12 "Return list of elements in d1 not in d2 or d3"
12 "Return list of elements in d1 not in d2 or d3"
13 return sorted([d for d in d1 if d not in d3 and d not in d2])
13 return sorted([d for d in d1 if d not in d3 and d not in d2])
14
14
15 def _dirname(f):
15 def _dirname(f):
16 s = f.rfind("/")
16 s = f.rfind("/")
17 if s == -1:
17 if s == -1:
18 return ""
18 return ""
19 return f[:s]
19 return f[:s]
20
20
21 def _dirs(files):
21 def _dirs(files):
22 d = set()
22 d = set()
23 for f in files:
23 for f in files:
24 f = _dirname(f)
24 f = _dirname(f)
25 while f not in d:
25 while f not in d:
26 d.add(f)
26 d.add(f)
27 f = _dirname(f)
27 f = _dirname(f)
28 return d
28 return d
29
29
30 def _findlimit(repo, a, b):
30 def _findlimit(repo, a, b):
31 """Find the earliest revision that's an ancestor of a or b but not both,
31 """Find the earliest revision that's an ancestor of a or b but not both,
32 None if no such revision exists.
32 None if no such revision exists.
33 """
33 """
34 # basic idea:
34 # basic idea:
35 # - mark a and b with different sides
35 # - mark a and b with different sides
36 # - if a parent's children are all on the same side, the parent is
36 # - if a parent's children are all on the same side, the parent is
37 # on that side, otherwise it is on no side
37 # on that side, otherwise it is on no side
38 # - walk the graph in topological order with the help of a heap;
38 # - walk the graph in topological order with the help of a heap;
39 # - add unseen parents to side map
39 # - add unseen parents to side map
40 # - clear side of any parent that has children on different sides
40 # - clear side of any parent that has children on different sides
41 # - track number of interesting revs that might still be on a side
41 # - track number of interesting revs that might still be on a side
42 # - track the lowest interesting rev seen
42 # - track the lowest interesting rev seen
43 # - quit when interesting revs is zero
43 # - quit when interesting revs is zero
44
44
45 cl = repo.changelog
45 cl = repo.changelog
46 working = len(cl) # pseudo rev for the working directory
46 working = len(cl) # pseudo rev for the working directory
47 if a is None:
47 if a is None:
48 a = working
48 a = working
49 if b is None:
49 if b is None:
50 b = working
50 b = working
51
51
52 side = {a: -1, b: 1}
52 side = {a: -1, b: 1}
53 visit = [-a, -b]
53 visit = [-a, -b]
54 heapq.heapify(visit)
54 heapq.heapify(visit)
55 interesting = len(visit)
55 interesting = len(visit)
56 hascommonancestor = False
56 hascommonancestor = False
57 limit = working
57 limit = working
58
58
59 while interesting:
59 while interesting:
60 r = -heapq.heappop(visit)
60 r = -heapq.heappop(visit)
61 if r == working:
61 if r == working:
62 parents = [cl.rev(p) for p in repo.dirstate.parents()]
62 parents = [cl.rev(p) for p in repo.dirstate.parents()]
63 else:
63 else:
64 parents = cl.parentrevs(r)
64 parents = cl.parentrevs(r)
65 for p in parents:
65 for p in parents:
66 if p < 0:
66 if p < 0:
67 continue
67 continue
68 if p not in side:
68 if p not in side:
69 # first time we see p; add it to visit
69 # first time we see p; add it to visit
70 side[p] = side[r]
70 side[p] = side[r]
71 if side[p]:
71 if side[p]:
72 interesting += 1
72 interesting += 1
73 heapq.heappush(visit, -p)
73 heapq.heappush(visit, -p)
74 elif side[p] and side[p] != side[r]:
74 elif side[p] and side[p] != side[r]:
75 # p was interesting but now we know better
75 # p was interesting but now we know better
76 side[p] = 0
76 side[p] = 0
77 interesting -= 1
77 interesting -= 1
78 hascommonancestor = True
78 hascommonancestor = True
79 if side[r]:
79 if side[r]:
80 limit = r # lowest rev visited
80 limit = r # lowest rev visited
81 interesting -= 1
81 interesting -= 1
82
82
83 if not hascommonancestor:
83 if not hascommonancestor:
84 return None
84 return None
85 return limit
85 return limit
86
86
87 def copies(repo, c1, c2, ca, checkdirs=False):
87 def copies(repo, c1, c2, ca, checkdirs=False):
88 """
88 """
89 Find moves and copies between context c1 and c2
89 Find moves and copies between context c1 and c2
90 """
90 """
91 # avoid silly behavior for update from empty dir
91 # avoid silly behavior for update from empty dir
92 if not c1 or not c2 or c1 == c2:
92 if not c1 or not c2 or c1 == c2:
93 return {}, {}
93 return {}, {}
94
94
95 # avoid silly behavior for parent -> working dir
95 # avoid silly behavior for parent -> working dir
96 if c2.node() is None and c1.node() == repo.dirstate.p1():
96 if c2.node() is None and c1.node() == repo.dirstate.p1():
97 return repo.dirstate.copies(), {}
97 return repo.dirstate.copies(), {}
98
98
99 limit = _findlimit(repo, c1.rev(), c2.rev())
99 limit = _findlimit(repo, c1.rev(), c2.rev())
100 if limit is None:
100 if limit is None:
101 # no common ancestor, no copies
101 # no common ancestor, no copies
102 return {}, {}
102 return {}, {}
103 m1 = c1.manifest()
103 m1 = c1.manifest()
104 m2 = c2.manifest()
104 m2 = c2.manifest()
105 ma = ca.manifest()
105 ma = ca.manifest()
106
106
107 def makectx(f, n):
107 def makectx(f, n):
108 if len(n) != 20: # in a working context?
108 if len(n) != 20: # in a working context?
109 if c1.rev() is None:
109 if c1.rev() is None:
110 return c1.filectx(f)
110 return c1.filectx(f)
111 return c2.filectx(f)
111 return c2.filectx(f)
112 return repo.filectx(f, fileid=n)
112 return repo.filectx(f, fileid=n)
113
113
114 ctx = util.lrucachefunc(makectx)
114 ctx = util.lrucachefunc(makectx)
115 copy = {}
115 copy = {}
116 fullcopy = {}
116 fullcopy = {}
117 diverge = {}
117 diverge = {}
118
118
119 def related(f1, f2, limit):
119 def related(f1, f2, limit):
120 # Walk back to common ancestor to see if the two files originate
120 # Walk back to common ancestor to see if the two files originate
121 # from the same file. Since workingfilectx's rev() is None it messes
121 # from the same file. Since workingfilectx's rev() is None it messes
122 # up the integer comparison logic, hence the pre-step check for
122 # up the integer comparison logic, hence the pre-step check for
123 # None (f1 and f2 can only be workingfilectx's initially).
123 # None (f1 and f2 can only be workingfilectx's initially).
124
124
125 if f1 == f2:
125 if f1 == f2:
126 return f1 # a match
126 return f1 # a match
127
127
128 g1, g2 = f1.ancestors(), f2.ancestors()
128 g1, g2 = f1.ancestors(), f2.ancestors()
129 try:
129 try:
130 f1r, f2r = f1.rev(), f2.rev()
130 f1r, f2r = f1.rev(), f2.rev()
131
131
132 if f1r is None:
132 if f1r is None:
133 f1 = g1.next()
133 f1 = g1.next()
134 if f2r is None:
134 if f2r is None:
135 f2 = g2.next()
135 f2 = g2.next()
136
136
137 while 1:
137 while True:
138 f1r, f2r = f1.rev(), f2.rev()
138 f1r, f2r = f1.rev(), f2.rev()
139 if f1r > f2r:
139 if f1r > f2r:
140 f1 = g1.next()
140 f1 = g1.next()
141 elif f2r > f1r:
141 elif f2r > f1r:
142 f2 = g2.next()
142 f2 = g2.next()
143 elif f1 == f2:
143 elif f1 == f2:
144 return f1 # a match
144 return f1 # a match
145 elif f1r == f2r or f1r < limit or f2r < limit:
145 elif f1r == f2r or f1r < limit or f2r < limit:
146 return False # copy no longer relevant
146 return False # copy no longer relevant
147 except StopIteration:
147 except StopIteration:
148 return False
148 return False
149
149
150 def checkcopies(f, m1, m2):
150 def checkcopies(f, m1, m2):
151 '''check possible copies of f from m1 to m2'''
151 '''check possible copies of f from m1 to m2'''
152 of = None
152 of = None
153 seen = set([f])
153 seen = set([f])
154 for oc in ctx(f, m1[f]).ancestors():
154 for oc in ctx(f, m1[f]).ancestors():
155 ocr = oc.rev()
155 ocr = oc.rev()
156 of = oc.path()
156 of = oc.path()
157 if of in seen:
157 if of in seen:
158 # check limit late - grab last rename before
158 # check limit late - grab last rename before
159 if ocr < limit:
159 if ocr < limit:
160 break
160 break
161 continue
161 continue
162 seen.add(of)
162 seen.add(of)
163
163
164 fullcopy[f] = of # remember for dir rename detection
164 fullcopy[f] = of # remember for dir rename detection
165 if of not in m2:
165 if of not in m2:
166 continue # no match, keep looking
166 continue # no match, keep looking
167 if m2[of] == ma.get(of):
167 if m2[of] == ma.get(of):
168 break # no merge needed, quit early
168 break # no merge needed, quit early
169 c2 = ctx(of, m2[of])
169 c2 = ctx(of, m2[of])
170 cr = related(oc, c2, ca.rev())
170 cr = related(oc, c2, ca.rev())
171 if cr and (of == f or of == c2.path()): # non-divergent
171 if cr and (of == f or of == c2.path()): # non-divergent
172 copy[f] = of
172 copy[f] = of
173 of = None
173 of = None
174 break
174 break
175
175
176 if of in ma:
176 if of in ma:
177 diverge.setdefault(of, []).append(f)
177 diverge.setdefault(of, []).append(f)
178
178
179 repo.ui.debug(" searching for copies back to rev %d\n" % limit)
179 repo.ui.debug(" searching for copies back to rev %d\n" % limit)
180
180
181 u1 = _nonoverlap(m1, m2, ma)
181 u1 = _nonoverlap(m1, m2, ma)
182 u2 = _nonoverlap(m2, m1, ma)
182 u2 = _nonoverlap(m2, m1, ma)
183
183
184 if u1:
184 if u1:
185 repo.ui.debug(" unmatched files in local:\n %s\n"
185 repo.ui.debug(" unmatched files in local:\n %s\n"
186 % "\n ".join(u1))
186 % "\n ".join(u1))
187 if u2:
187 if u2:
188 repo.ui.debug(" unmatched files in other:\n %s\n"
188 repo.ui.debug(" unmatched files in other:\n %s\n"
189 % "\n ".join(u2))
189 % "\n ".join(u2))
190
190
191 for f in u1:
191 for f in u1:
192 checkcopies(f, m1, m2)
192 checkcopies(f, m1, m2)
193 for f in u2:
193 for f in u2:
194 checkcopies(f, m2, m1)
194 checkcopies(f, m2, m1)
195
195
196 diverge2 = set()
196 diverge2 = set()
197 for of, fl in diverge.items():
197 for of, fl in diverge.items():
198 if len(fl) == 1 or of in c2:
198 if len(fl) == 1 or of in c2:
199 del diverge[of] # not actually divergent, or not a rename
199 del diverge[of] # not actually divergent, or not a rename
200 else:
200 else:
201 diverge2.update(fl) # reverse map for below
201 diverge2.update(fl) # reverse map for below
202
202
203 if fullcopy:
203 if fullcopy:
204 repo.ui.debug(" all copies found (* = to merge, ! = divergent):\n")
204 repo.ui.debug(" all copies found (* = to merge, ! = divergent):\n")
205 for f in fullcopy:
205 for f in fullcopy:
206 note = ""
206 note = ""
207 if f in copy:
207 if f in copy:
208 note += "*"
208 note += "*"
209 if f in diverge2:
209 if f in diverge2:
210 note += "!"
210 note += "!"
211 repo.ui.debug(" %s -> %s %s\n" % (f, fullcopy[f], note))
211 repo.ui.debug(" %s -> %s %s\n" % (f, fullcopy[f], note))
212 del diverge2
212 del diverge2
213
213
214 if not fullcopy or not checkdirs:
214 if not fullcopy or not checkdirs:
215 return copy, diverge
215 return copy, diverge
216
216
217 repo.ui.debug(" checking for directory renames\n")
217 repo.ui.debug(" checking for directory renames\n")
218
218
219 # generate a directory move map
219 # generate a directory move map
220 d1, d2 = _dirs(m1), _dirs(m2)
220 d1, d2 = _dirs(m1), _dirs(m2)
221 invalid = set()
221 invalid = set()
222 dirmove = {}
222 dirmove = {}
223
223
224 # examine each file copy for a potential directory move, which is
224 # examine each file copy for a potential directory move, which is
225 # when all the files in a directory are moved to a new directory
225 # when all the files in a directory are moved to a new directory
226 for dst, src in fullcopy.iteritems():
226 for dst, src in fullcopy.iteritems():
227 dsrc, ddst = _dirname(src), _dirname(dst)
227 dsrc, ddst = _dirname(src), _dirname(dst)
228 if dsrc in invalid:
228 if dsrc in invalid:
229 # already seen to be uninteresting
229 # already seen to be uninteresting
230 continue
230 continue
231 elif dsrc in d1 and ddst in d1:
231 elif dsrc in d1 and ddst in d1:
232 # directory wasn't entirely moved locally
232 # directory wasn't entirely moved locally
233 invalid.add(dsrc)
233 invalid.add(dsrc)
234 elif dsrc in d2 and ddst in d2:
234 elif dsrc in d2 and ddst in d2:
235 # directory wasn't entirely moved remotely
235 # directory wasn't entirely moved remotely
236 invalid.add(dsrc)
236 invalid.add(dsrc)
237 elif dsrc in dirmove and dirmove[dsrc] != ddst:
237 elif dsrc in dirmove and dirmove[dsrc] != ddst:
238 # files from the same directory moved to two different places
238 # files from the same directory moved to two different places
239 invalid.add(dsrc)
239 invalid.add(dsrc)
240 else:
240 else:
241 # looks good so far
241 # looks good so far
242 dirmove[dsrc + "/"] = ddst + "/"
242 dirmove[dsrc + "/"] = ddst + "/"
243
243
244 for i in invalid:
244 for i in invalid:
245 if i in dirmove:
245 if i in dirmove:
246 del dirmove[i]
246 del dirmove[i]
247 del d1, d2, invalid
247 del d1, d2, invalid
248
248
249 if not dirmove:
249 if not dirmove:
250 return copy, diverge
250 return copy, diverge
251
251
252 for d in dirmove:
252 for d in dirmove:
253 repo.ui.debug(" dir %s -> %s\n" % (d, dirmove[d]))
253 repo.ui.debug(" dir %s -> %s\n" % (d, dirmove[d]))
254
254
255 # check unaccounted nonoverlapping files against directory moves
255 # check unaccounted nonoverlapping files against directory moves
256 for f in u1 + u2:
256 for f in u1 + u2:
257 if f not in fullcopy:
257 if f not in fullcopy:
258 for d in dirmove:
258 for d in dirmove:
259 if f.startswith(d):
259 if f.startswith(d):
260 # new file added in a directory that was moved, move it
260 # new file added in a directory that was moved, move it
261 df = dirmove[d] + f[len(d):]
261 df = dirmove[d] + f[len(d):]
262 if df not in copy:
262 if df not in copy:
263 copy[f] = df
263 copy[f] = df
264 repo.ui.debug(" file %s -> %s\n" % (f, copy[f]))
264 repo.ui.debug(" file %s -> %s\n" % (f, copy[f]))
265 break
265 break
266
266
267 return copy, diverge
267 return copy, diverge
@@ -1,88 +1,88
1 #
1 #
2 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
2 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import cgi, cStringIO, zlib, sys, urllib
8 import cgi, cStringIO, zlib, sys, urllib
9 from mercurial import util, wireproto
9 from mercurial import util, wireproto
10 from common import HTTP_OK
10 from common import HTTP_OK
11
11
12 HGTYPE = 'application/mercurial-0.1'
12 HGTYPE = 'application/mercurial-0.1'
13
13
14 class webproto(object):
14 class webproto(object):
15 def __init__(self, req):
15 def __init__(self, req):
16 self.req = req
16 self.req = req
17 self.response = ''
17 self.response = ''
18 def getargs(self, args):
18 def getargs(self, args):
19 knownargs = self._args()
19 knownargs = self._args()
20 data = {}
20 data = {}
21 keys = args.split()
21 keys = args.split()
22 for k in keys:
22 for k in keys:
23 if k == '*':
23 if k == '*':
24 star = {}
24 star = {}
25 for key in knownargs.keys():
25 for key in knownargs.keys():
26 if key != 'cmd' and key not in keys:
26 if key != 'cmd' and key not in keys:
27 star[key] = knownargs[key][0]
27 star[key] = knownargs[key][0]
28 data['*'] = star
28 data['*'] = star
29 else:
29 else:
30 data[k] = knownargs[k][0]
30 data[k] = knownargs[k][0]
31 return [data[k] for k in keys]
31 return [data[k] for k in keys]
32 def _args(self):
32 def _args(self):
33 args = self.req.form.copy()
33 args = self.req.form.copy()
34 chunks = []
34 chunks = []
35 i = 1
35 i = 1
36 while 1:
36 while True:
37 h = self.req.env.get('HTTP_X_HGARG_' + str(i))
37 h = self.req.env.get('HTTP_X_HGARG_' + str(i))
38 if h is None:
38 if h is None:
39 break
39 break
40 chunks += [h]
40 chunks += [h]
41 i += 1
41 i += 1
42 args.update(cgi.parse_qs(''.join(chunks), keep_blank_values=True))
42 args.update(cgi.parse_qs(''.join(chunks), keep_blank_values=True))
43 return args
43 return args
44 def getfile(self, fp):
44 def getfile(self, fp):
45 length = int(self.req.env['CONTENT_LENGTH'])
45 length = int(self.req.env['CONTENT_LENGTH'])
46 for s in util.filechunkiter(self.req, limit=length):
46 for s in util.filechunkiter(self.req, limit=length):
47 fp.write(s)
47 fp.write(s)
48 def redirect(self):
48 def redirect(self):
49 self.oldio = sys.stdout, sys.stderr
49 self.oldio = sys.stdout, sys.stderr
50 sys.stderr = sys.stdout = cStringIO.StringIO()
50 sys.stderr = sys.stdout = cStringIO.StringIO()
51 def groupchunks(self, cg):
51 def groupchunks(self, cg):
52 z = zlib.compressobj()
52 z = zlib.compressobj()
53 while 1:
53 while True:
54 chunk = cg.read(4096)
54 chunk = cg.read(4096)
55 if not chunk:
55 if not chunk:
56 break
56 break
57 yield z.compress(chunk)
57 yield z.compress(chunk)
58 yield z.flush()
58 yield z.flush()
59 def _client(self):
59 def _client(self):
60 return 'remote:%s:%s:%s' % (
60 return 'remote:%s:%s:%s' % (
61 self.req.env.get('wsgi.url_scheme') or 'http',
61 self.req.env.get('wsgi.url_scheme') or 'http',
62 urllib.quote(self.req.env.get('REMOTE_HOST', '')),
62 urllib.quote(self.req.env.get('REMOTE_HOST', '')),
63 urllib.quote(self.req.env.get('REMOTE_USER', '')))
63 urllib.quote(self.req.env.get('REMOTE_USER', '')))
64
64
65 def iscmd(cmd):
65 def iscmd(cmd):
66 return cmd in wireproto.commands
66 return cmd in wireproto.commands
67
67
68 def call(repo, req, cmd):
68 def call(repo, req, cmd):
69 p = webproto(req)
69 p = webproto(req)
70 rsp = wireproto.dispatch(repo, p, cmd)
70 rsp = wireproto.dispatch(repo, p, cmd)
71 if isinstance(rsp, str):
71 if isinstance(rsp, str):
72 req.respond(HTTP_OK, HGTYPE, length=len(rsp))
72 req.respond(HTTP_OK, HGTYPE, length=len(rsp))
73 return [rsp]
73 return [rsp]
74 elif isinstance(rsp, wireproto.streamres):
74 elif isinstance(rsp, wireproto.streamres):
75 req.respond(HTTP_OK, HGTYPE)
75 req.respond(HTTP_OK, HGTYPE)
76 return rsp.gen
76 return rsp.gen
77 elif isinstance(rsp, wireproto.pushres):
77 elif isinstance(rsp, wireproto.pushres):
78 val = sys.stdout.getvalue()
78 val = sys.stdout.getvalue()
79 sys.stdout, sys.stderr = p.oldio
79 sys.stdout, sys.stderr = p.oldio
80 req.respond(HTTP_OK, HGTYPE)
80 req.respond(HTTP_OK, HGTYPE)
81 return ['%d\n%s' % (rsp.res, val)]
81 return ['%d\n%s' % (rsp.res, val)]
82 elif isinstance(rsp, wireproto.pusherr):
82 elif isinstance(rsp, wireproto.pusherr):
83 # drain the incoming bundle
83 # drain the incoming bundle
84 req.drain()
84 req.drain()
85 sys.stdout, sys.stderr = p.oldio
85 sys.stdout, sys.stderr = p.oldio
86 rsp = '0\n%s\n' % rsp.res
86 rsp = '0\n%s\n' % rsp.res
87 req.respond(HTTP_OK, HGTYPE, length=len(rsp))
87 req.respond(HTTP_OK, HGTYPE, length=len(rsp))
88 return [rsp]
88 return [rsp]
@@ -1,765 +1,765
1 # This library is free software; you can redistribute it and/or
1 # This library is free software; you can redistribute it and/or
2 # modify it under the terms of the GNU Lesser General Public
2 # modify it under the terms of the GNU Lesser General Public
3 # License as published by the Free Software Foundation; either
3 # License as published by the Free Software Foundation; either
4 # version 2.1 of the License, or (at your option) any later version.
4 # version 2.1 of the License, or (at your option) any later version.
5 #
5 #
6 # This library is distributed in the hope that it will be useful,
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
9 # Lesser General Public License for more details.
10 #
10 #
11 # You should have received a copy of the GNU Lesser General Public
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, write to the
12 # License along with this library; if not, write to the
13 # Free Software Foundation, Inc.,
13 # Free Software Foundation, Inc.,
14 # 59 Temple Place, Suite 330,
14 # 59 Temple Place, Suite 330,
15 # Boston, MA 02111-1307 USA
15 # Boston, MA 02111-1307 USA
16
16
17 # This file is part of urlgrabber, a high-level cross-protocol url-grabber
17 # This file is part of urlgrabber, a high-level cross-protocol url-grabber
18 # Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
18 # Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
19
19
20 # Modified by Benoit Boissinot:
20 # Modified by Benoit Boissinot:
21 # - fix for digest auth (inspired from urllib2.py @ Python v2.4)
21 # - fix for digest auth (inspired from urllib2.py @ Python v2.4)
22 # Modified by Dirkjan Ochtman:
22 # Modified by Dirkjan Ochtman:
23 # - import md5 function from a local util module
23 # - import md5 function from a local util module
24 # Modified by Martin Geisler:
24 # Modified by Martin Geisler:
25 # - moved md5 function from local util module to this module
25 # - moved md5 function from local util module to this module
26 # Modified by Augie Fackler:
26 # Modified by Augie Fackler:
27 # - add safesend method and use it to prevent broken pipe errors
27 # - add safesend method and use it to prevent broken pipe errors
28 # on large POST requests
28 # on large POST requests
29
29
30 """An HTTP handler for urllib2 that supports HTTP 1.1 and keepalive.
30 """An HTTP handler for urllib2 that supports HTTP 1.1 and keepalive.
31
31
32 >>> import urllib2
32 >>> import urllib2
33 >>> from keepalive import HTTPHandler
33 >>> from keepalive import HTTPHandler
34 >>> keepalive_handler = HTTPHandler()
34 >>> keepalive_handler = HTTPHandler()
35 >>> opener = urllib2.build_opener(keepalive_handler)
35 >>> opener = urllib2.build_opener(keepalive_handler)
36 >>> urllib2.install_opener(opener)
36 >>> urllib2.install_opener(opener)
37 >>>
37 >>>
38 >>> fo = urllib2.urlopen('http://www.python.org')
38 >>> fo = urllib2.urlopen('http://www.python.org')
39
39
40 If a connection to a given host is requested, and all of the existing
40 If a connection to a given host is requested, and all of the existing
41 connections are still in use, another connection will be opened. If
41 connections are still in use, another connection will be opened. If
42 the handler tries to use an existing connection but it fails in some
42 the handler tries to use an existing connection but it fails in some
43 way, it will be closed and removed from the pool.
43 way, it will be closed and removed from the pool.
44
44
45 To remove the handler, simply re-run build_opener with no arguments, and
45 To remove the handler, simply re-run build_opener with no arguments, and
46 install that opener.
46 install that opener.
47
47
48 You can explicitly close connections by using the close_connection()
48 You can explicitly close connections by using the close_connection()
49 method of the returned file-like object (described below) or you can
49 method of the returned file-like object (described below) or you can
50 use the handler methods:
50 use the handler methods:
51
51
52 close_connection(host)
52 close_connection(host)
53 close_all()
53 close_all()
54 open_connections()
54 open_connections()
55
55
56 NOTE: using the close_connection and close_all methods of the handler
56 NOTE: using the close_connection and close_all methods of the handler
57 should be done with care when using multiple threads.
57 should be done with care when using multiple threads.
58 * there is nothing that prevents another thread from creating new
58 * there is nothing that prevents another thread from creating new
59 connections immediately after connections are closed
59 connections immediately after connections are closed
60 * no checks are done to prevent in-use connections from being closed
60 * no checks are done to prevent in-use connections from being closed
61
61
62 >>> keepalive_handler.close_all()
62 >>> keepalive_handler.close_all()
63
63
64 EXTRA ATTRIBUTES AND METHODS
64 EXTRA ATTRIBUTES AND METHODS
65
65
66 Upon a status of 200, the object returned has a few additional
66 Upon a status of 200, the object returned has a few additional
67 attributes and methods, which should not be used if you want to
67 attributes and methods, which should not be used if you want to
68 remain consistent with the normal urllib2-returned objects:
68 remain consistent with the normal urllib2-returned objects:
69
69
70 close_connection() - close the connection to the host
70 close_connection() - close the connection to the host
71 readlines() - you know, readlines()
71 readlines() - you know, readlines()
72 status - the return status (ie 404)
72 status - the return status (ie 404)
73 reason - english translation of status (ie 'File not found')
73 reason - english translation of status (ie 'File not found')
74
74
75 If you want the best of both worlds, use this inside an
75 If you want the best of both worlds, use this inside an
76 AttributeError-catching try:
76 AttributeError-catching try:
77
77
78 >>> try: status = fo.status
78 >>> try: status = fo.status
79 >>> except AttributeError: status = None
79 >>> except AttributeError: status = None
80
80
81 Unfortunately, these are ONLY there if status == 200, so it's not
81 Unfortunately, these are ONLY there if status == 200, so it's not
82 easy to distinguish between non-200 responses. The reason is that
82 easy to distinguish between non-200 responses. The reason is that
83 urllib2 tries to do clever things with error codes 301, 302, 401,
83 urllib2 tries to do clever things with error codes 301, 302, 401,
84 and 407, and it wraps the object upon return.
84 and 407, and it wraps the object upon return.
85
85
86 For python versions earlier than 2.4, you can avoid this fancy error
86 For python versions earlier than 2.4, you can avoid this fancy error
87 handling by setting the module-level global HANDLE_ERRORS to zero.
87 handling by setting the module-level global HANDLE_ERRORS to zero.
88 You see, prior to 2.4, it's the HTTP Handler's job to determine what
88 You see, prior to 2.4, it's the HTTP Handler's job to determine what
89 to handle specially, and what to just pass up. HANDLE_ERRORS == 0
89 to handle specially, and what to just pass up. HANDLE_ERRORS == 0
90 means "pass everything up". In python 2.4, however, this job no
90 means "pass everything up". In python 2.4, however, this job no
91 longer belongs to the HTTP Handler and is now done by a NEW handler,
91 longer belongs to the HTTP Handler and is now done by a NEW handler,
92 HTTPErrorProcessor. Here's the bottom line:
92 HTTPErrorProcessor. Here's the bottom line:
93
93
94 python version < 2.4
94 python version < 2.4
95 HANDLE_ERRORS == 1 (default) pass up 200, treat the rest as
95 HANDLE_ERRORS == 1 (default) pass up 200, treat the rest as
96 errors
96 errors
97 HANDLE_ERRORS == 0 pass everything up, error processing is
97 HANDLE_ERRORS == 0 pass everything up, error processing is
98 left to the calling code
98 left to the calling code
99 python version >= 2.4
99 python version >= 2.4
100 HANDLE_ERRORS == 1 pass up 200, treat the rest as errors
100 HANDLE_ERRORS == 1 pass up 200, treat the rest as errors
101 HANDLE_ERRORS == 0 (default) pass everything up, let the
101 HANDLE_ERRORS == 0 (default) pass everything up, let the
102 other handlers (specifically,
102 other handlers (specifically,
103 HTTPErrorProcessor) decide what to do
103 HTTPErrorProcessor) decide what to do
104
104
105 In practice, setting the variable either way makes little difference
105 In practice, setting the variable either way makes little difference
106 in python 2.4, so for the most consistent behavior across versions,
106 in python 2.4, so for the most consistent behavior across versions,
107 you probably just want to use the defaults, which will give you
107 you probably just want to use the defaults, which will give you
108 exceptions on errors.
108 exceptions on errors.
109
109
110 """
110 """
111
111
112 # $Id: keepalive.py,v 1.14 2006/04/04 21:00:32 mstenner Exp $
112 # $Id: keepalive.py,v 1.14 2006/04/04 21:00:32 mstenner Exp $
113
113
114 import errno
114 import errno
115 import httplib
115 import httplib
116 import socket
116 import socket
117 import thread
117 import thread
118 import urllib2
118 import urllib2
119
119
120 DEBUG = None
120 DEBUG = None
121
121
122 import sys
122 import sys
123 if sys.version_info < (2, 4):
123 if sys.version_info < (2, 4):
124 HANDLE_ERRORS = 1
124 HANDLE_ERRORS = 1
125 else: HANDLE_ERRORS = 0
125 else: HANDLE_ERRORS = 0
126
126
127 class ConnectionManager:
127 class ConnectionManager:
128 """
128 """
129 The connection manager must be able to:
129 The connection manager must be able to:
130 * keep track of all existing
130 * keep track of all existing
131 """
131 """
132 def __init__(self):
132 def __init__(self):
133 self._lock = thread.allocate_lock()
133 self._lock = thread.allocate_lock()
134 self._hostmap = {} # map hosts to a list of connections
134 self._hostmap = {} # map hosts to a list of connections
135 self._connmap = {} # map connections to host
135 self._connmap = {} # map connections to host
136 self._readymap = {} # map connection to ready state
136 self._readymap = {} # map connection to ready state
137
137
138 def add(self, host, connection, ready):
138 def add(self, host, connection, ready):
139 self._lock.acquire()
139 self._lock.acquire()
140 try:
140 try:
141 if not host in self._hostmap:
141 if not host in self._hostmap:
142 self._hostmap[host] = []
142 self._hostmap[host] = []
143 self._hostmap[host].append(connection)
143 self._hostmap[host].append(connection)
144 self._connmap[connection] = host
144 self._connmap[connection] = host
145 self._readymap[connection] = ready
145 self._readymap[connection] = ready
146 finally:
146 finally:
147 self._lock.release()
147 self._lock.release()
148
148
149 def remove(self, connection):
149 def remove(self, connection):
150 self._lock.acquire()
150 self._lock.acquire()
151 try:
151 try:
152 try:
152 try:
153 host = self._connmap[connection]
153 host = self._connmap[connection]
154 except KeyError:
154 except KeyError:
155 pass
155 pass
156 else:
156 else:
157 del self._connmap[connection]
157 del self._connmap[connection]
158 del self._readymap[connection]
158 del self._readymap[connection]
159 self._hostmap[host].remove(connection)
159 self._hostmap[host].remove(connection)
160 if not self._hostmap[host]: del self._hostmap[host]
160 if not self._hostmap[host]: del self._hostmap[host]
161 finally:
161 finally:
162 self._lock.release()
162 self._lock.release()
163
163
164 def set_ready(self, connection, ready):
164 def set_ready(self, connection, ready):
165 try:
165 try:
166 self._readymap[connection] = ready
166 self._readymap[connection] = ready
167 except KeyError:
167 except KeyError:
168 pass
168 pass
169
169
170 def get_ready_conn(self, host):
170 def get_ready_conn(self, host):
171 conn = None
171 conn = None
172 self._lock.acquire()
172 self._lock.acquire()
173 try:
173 try:
174 if host in self._hostmap:
174 if host in self._hostmap:
175 for c in self._hostmap[host]:
175 for c in self._hostmap[host]:
176 if self._readymap[c]:
176 if self._readymap[c]:
177 self._readymap[c] = 0
177 self._readymap[c] = 0
178 conn = c
178 conn = c
179 break
179 break
180 finally:
180 finally:
181 self._lock.release()
181 self._lock.release()
182 return conn
182 return conn
183
183
184 def get_all(self, host=None):
184 def get_all(self, host=None):
185 if host:
185 if host:
186 return list(self._hostmap.get(host, []))
186 return list(self._hostmap.get(host, []))
187 else:
187 else:
188 return dict(self._hostmap)
188 return dict(self._hostmap)
189
189
190 class KeepAliveHandler:
190 class KeepAliveHandler:
191 def __init__(self):
191 def __init__(self):
192 self._cm = ConnectionManager()
192 self._cm = ConnectionManager()
193
193
194 #### Connection Management
194 #### Connection Management
195 def open_connections(self):
195 def open_connections(self):
196 """return a list of connected hosts and the number of connections
196 """return a list of connected hosts and the number of connections
197 to each. [('foo.com:80', 2), ('bar.org', 1)]"""
197 to each. [('foo.com:80', 2), ('bar.org', 1)]"""
198 return [(host, len(li)) for (host, li) in self._cm.get_all().items()]
198 return [(host, len(li)) for (host, li) in self._cm.get_all().items()]
199
199
200 def close_connection(self, host):
200 def close_connection(self, host):
201 """close connection(s) to <host>
201 """close connection(s) to <host>
202 host is the host:port spec, as in 'www.cnn.com:8080' as passed in.
202 host is the host:port spec, as in 'www.cnn.com:8080' as passed in.
203 no error occurs if there is no connection to that host."""
203 no error occurs if there is no connection to that host."""
204 for h in self._cm.get_all(host):
204 for h in self._cm.get_all(host):
205 self._cm.remove(h)
205 self._cm.remove(h)
206 h.close()
206 h.close()
207
207
208 def close_all(self):
208 def close_all(self):
209 """close all open connections"""
209 """close all open connections"""
210 for host, conns in self._cm.get_all().iteritems():
210 for host, conns in self._cm.get_all().iteritems():
211 for h in conns:
211 for h in conns:
212 self._cm.remove(h)
212 self._cm.remove(h)
213 h.close()
213 h.close()
214
214
215 def _request_closed(self, request, host, connection):
215 def _request_closed(self, request, host, connection):
216 """tells us that this request is now closed and the the
216 """tells us that this request is now closed and the the
217 connection is ready for another request"""
217 connection is ready for another request"""
218 self._cm.set_ready(connection, 1)
218 self._cm.set_ready(connection, 1)
219
219
220 def _remove_connection(self, host, connection, close=0):
220 def _remove_connection(self, host, connection, close=0):
221 if close:
221 if close:
222 connection.close()
222 connection.close()
223 self._cm.remove(connection)
223 self._cm.remove(connection)
224
224
225 #### Transaction Execution
225 #### Transaction Execution
226 def http_open(self, req):
226 def http_open(self, req):
227 return self.do_open(HTTPConnection, req)
227 return self.do_open(HTTPConnection, req)
228
228
229 def do_open(self, http_class, req):
229 def do_open(self, http_class, req):
230 host = req.get_host()
230 host = req.get_host()
231 if not host:
231 if not host:
232 raise urllib2.URLError('no host given')
232 raise urllib2.URLError('no host given')
233
233
234 try:
234 try:
235 h = self._cm.get_ready_conn(host)
235 h = self._cm.get_ready_conn(host)
236 while h:
236 while h:
237 r = self._reuse_connection(h, req, host)
237 r = self._reuse_connection(h, req, host)
238
238
239 # if this response is non-None, then it worked and we're
239 # if this response is non-None, then it worked and we're
240 # done. Break out, skipping the else block.
240 # done. Break out, skipping the else block.
241 if r:
241 if r:
242 break
242 break
243
243
244 # connection is bad - possibly closed by server
244 # connection is bad - possibly closed by server
245 # discard it and ask for the next free connection
245 # discard it and ask for the next free connection
246 h.close()
246 h.close()
247 self._cm.remove(h)
247 self._cm.remove(h)
248 h = self._cm.get_ready_conn(host)
248 h = self._cm.get_ready_conn(host)
249 else:
249 else:
250 # no (working) free connections were found. Create a new one.
250 # no (working) free connections were found. Create a new one.
251 h = http_class(host)
251 h = http_class(host)
252 if DEBUG:
252 if DEBUG:
253 DEBUG.info("creating new connection to %s (%d)",
253 DEBUG.info("creating new connection to %s (%d)",
254 host, id(h))
254 host, id(h))
255 self._cm.add(host, h, 0)
255 self._cm.add(host, h, 0)
256 self._start_transaction(h, req)
256 self._start_transaction(h, req)
257 r = h.getresponse()
257 r = h.getresponse()
258 except (socket.error, httplib.HTTPException), err:
258 except (socket.error, httplib.HTTPException), err:
259 raise urllib2.URLError(err)
259 raise urllib2.URLError(err)
260
260
261 # if not a persistent connection, don't try to reuse it
261 # if not a persistent connection, don't try to reuse it
262 if r.will_close:
262 if r.will_close:
263 self._cm.remove(h)
263 self._cm.remove(h)
264
264
265 if DEBUG:
265 if DEBUG:
266 DEBUG.info("STATUS: %s, %s", r.status, r.reason)
266 DEBUG.info("STATUS: %s, %s", r.status, r.reason)
267 r._handler = self
267 r._handler = self
268 r._host = host
268 r._host = host
269 r._url = req.get_full_url()
269 r._url = req.get_full_url()
270 r._connection = h
270 r._connection = h
271 r.code = r.status
271 r.code = r.status
272 r.headers = r.msg
272 r.headers = r.msg
273 r.msg = r.reason
273 r.msg = r.reason
274
274
275 if r.status == 200 or not HANDLE_ERRORS:
275 if r.status == 200 or not HANDLE_ERRORS:
276 return r
276 return r
277 else:
277 else:
278 return self.parent.error('http', req, r,
278 return self.parent.error('http', req, r,
279 r.status, r.msg, r.headers)
279 r.status, r.msg, r.headers)
280
280
281 def _reuse_connection(self, h, req, host):
281 def _reuse_connection(self, h, req, host):
282 """start the transaction with a re-used connection
282 """start the transaction with a re-used connection
283 return a response object (r) upon success or None on failure.
283 return a response object (r) upon success or None on failure.
284 This DOES not close or remove bad connections in cases where
284 This DOES not close or remove bad connections in cases where
285 it returns. However, if an unexpected exception occurs, it
285 it returns. However, if an unexpected exception occurs, it
286 will close and remove the connection before re-raising.
286 will close and remove the connection before re-raising.
287 """
287 """
288 try:
288 try:
289 self._start_transaction(h, req)
289 self._start_transaction(h, req)
290 r = h.getresponse()
290 r = h.getresponse()
291 # note: just because we got something back doesn't mean it
291 # note: just because we got something back doesn't mean it
292 # worked. We'll check the version below, too.
292 # worked. We'll check the version below, too.
293 except (socket.error, httplib.HTTPException):
293 except (socket.error, httplib.HTTPException):
294 r = None
294 r = None
295 except:
295 except:
296 # adding this block just in case we've missed
296 # adding this block just in case we've missed
297 # something we will still raise the exception, but
297 # something we will still raise the exception, but
298 # lets try and close the connection and remove it
298 # lets try and close the connection and remove it
299 # first. We previously got into a nasty loop
299 # first. We previously got into a nasty loop
300 # where an exception was uncaught, and so the
300 # where an exception was uncaught, and so the
301 # connection stayed open. On the next try, the
301 # connection stayed open. On the next try, the
302 # same exception was raised, etc. The tradeoff is
302 # same exception was raised, etc. The tradeoff is
303 # that it's now possible this call will raise
303 # that it's now possible this call will raise
304 # a DIFFERENT exception
304 # a DIFFERENT exception
305 if DEBUG:
305 if DEBUG:
306 DEBUG.error("unexpected exception - closing "
306 DEBUG.error("unexpected exception - closing "
307 "connection to %s (%d)", host, id(h))
307 "connection to %s (%d)", host, id(h))
308 self._cm.remove(h)
308 self._cm.remove(h)
309 h.close()
309 h.close()
310 raise
310 raise
311
311
312 if r is None or r.version == 9:
312 if r is None or r.version == 9:
313 # httplib falls back to assuming HTTP 0.9 if it gets a
313 # httplib falls back to assuming HTTP 0.9 if it gets a
314 # bad header back. This is most likely to happen if
314 # bad header back. This is most likely to happen if
315 # the socket has been closed by the server since we
315 # the socket has been closed by the server since we
316 # last used the connection.
316 # last used the connection.
317 if DEBUG:
317 if DEBUG:
318 DEBUG.info("failed to re-use connection to %s (%d)",
318 DEBUG.info("failed to re-use connection to %s (%d)",
319 host, id(h))
319 host, id(h))
320 r = None
320 r = None
321 else:
321 else:
322 if DEBUG:
322 if DEBUG:
323 DEBUG.info("re-using connection to %s (%d)", host, id(h))
323 DEBUG.info("re-using connection to %s (%d)", host, id(h))
324
324
325 return r
325 return r
326
326
327 def _start_transaction(self, h, req):
327 def _start_transaction(self, h, req):
328 # What follows mostly reimplements HTTPConnection.request()
328 # What follows mostly reimplements HTTPConnection.request()
329 # except it adds self.parent.addheaders in the mix.
329 # except it adds self.parent.addheaders in the mix.
330 headers = req.headers.copy()
330 headers = req.headers.copy()
331 if sys.version_info >= (2, 4):
331 if sys.version_info >= (2, 4):
332 headers.update(req.unredirected_hdrs)
332 headers.update(req.unredirected_hdrs)
333 headers.update(self.parent.addheaders)
333 headers.update(self.parent.addheaders)
334 headers = dict((n.lower(), v) for n, v in headers.items())
334 headers = dict((n.lower(), v) for n, v in headers.items())
335 skipheaders = {}
335 skipheaders = {}
336 for n in ('host', 'accept-encoding'):
336 for n in ('host', 'accept-encoding'):
337 if n in headers:
337 if n in headers:
338 skipheaders['skip_' + n.replace('-', '_')] = 1
338 skipheaders['skip_' + n.replace('-', '_')] = 1
339 try:
339 try:
340 if req.has_data():
340 if req.has_data():
341 data = req.get_data()
341 data = req.get_data()
342 h.putrequest('POST', req.get_selector(), **skipheaders)
342 h.putrequest('POST', req.get_selector(), **skipheaders)
343 if 'content-type' not in headers:
343 if 'content-type' not in headers:
344 h.putheader('Content-type',
344 h.putheader('Content-type',
345 'application/x-www-form-urlencoded')
345 'application/x-www-form-urlencoded')
346 if 'content-length' not in headers:
346 if 'content-length' not in headers:
347 h.putheader('Content-length', '%d' % len(data))
347 h.putheader('Content-length', '%d' % len(data))
348 else:
348 else:
349 h.putrequest('GET', req.get_selector(), **skipheaders)
349 h.putrequest('GET', req.get_selector(), **skipheaders)
350 except (socket.error), err:
350 except (socket.error), err:
351 raise urllib2.URLError(err)
351 raise urllib2.URLError(err)
352 for k, v in headers.items():
352 for k, v in headers.items():
353 h.putheader(k, v)
353 h.putheader(k, v)
354 h.endheaders()
354 h.endheaders()
355 if req.has_data():
355 if req.has_data():
356 h.send(data)
356 h.send(data)
357
357
358 class HTTPHandler(KeepAliveHandler, urllib2.HTTPHandler):
358 class HTTPHandler(KeepAliveHandler, urllib2.HTTPHandler):
359 pass
359 pass
360
360
361 class HTTPResponse(httplib.HTTPResponse):
361 class HTTPResponse(httplib.HTTPResponse):
362 # we need to subclass HTTPResponse in order to
362 # we need to subclass HTTPResponse in order to
363 # 1) add readline() and readlines() methods
363 # 1) add readline() and readlines() methods
364 # 2) add close_connection() methods
364 # 2) add close_connection() methods
365 # 3) add info() and geturl() methods
365 # 3) add info() and geturl() methods
366
366
367 # in order to add readline(), read must be modified to deal with a
367 # in order to add readline(), read must be modified to deal with a
368 # buffer. example: readline must read a buffer and then spit back
368 # buffer. example: readline must read a buffer and then spit back
369 # one line at a time. The only real alternative is to read one
369 # one line at a time. The only real alternative is to read one
370 # BYTE at a time (ick). Once something has been read, it can't be
370 # BYTE at a time (ick). Once something has been read, it can't be
371 # put back (ok, maybe it can, but that's even uglier than this),
371 # put back (ok, maybe it can, but that's even uglier than this),
372 # so if you THEN do a normal read, you must first take stuff from
372 # so if you THEN do a normal read, you must first take stuff from
373 # the buffer.
373 # the buffer.
374
374
375 # the read method wraps the original to accomodate buffering,
375 # the read method wraps the original to accomodate buffering,
376 # although read() never adds to the buffer.
376 # although read() never adds to the buffer.
377 # Both readline and readlines have been stolen with almost no
377 # Both readline and readlines have been stolen with almost no
378 # modification from socket.py
378 # modification from socket.py
379
379
380
380
381 def __init__(self, sock, debuglevel=0, strict=0, method=None):
381 def __init__(self, sock, debuglevel=0, strict=0, method=None):
382 if method: # the httplib in python 2.3 uses the method arg
382 if method: # the httplib in python 2.3 uses the method arg
383 httplib.HTTPResponse.__init__(self, sock, debuglevel, method)
383 httplib.HTTPResponse.__init__(self, sock, debuglevel, method)
384 else: # 2.2 doesn't
384 else: # 2.2 doesn't
385 httplib.HTTPResponse.__init__(self, sock, debuglevel)
385 httplib.HTTPResponse.__init__(self, sock, debuglevel)
386 self.fileno = sock.fileno
386 self.fileno = sock.fileno
387 self.code = None
387 self.code = None
388 self._rbuf = ''
388 self._rbuf = ''
389 self._rbufsize = 8096
389 self._rbufsize = 8096
390 self._handler = None # inserted by the handler later
390 self._handler = None # inserted by the handler later
391 self._host = None # (same)
391 self._host = None # (same)
392 self._url = None # (same)
392 self._url = None # (same)
393 self._connection = None # (same)
393 self._connection = None # (same)
394
394
395 _raw_read = httplib.HTTPResponse.read
395 _raw_read = httplib.HTTPResponse.read
396
396
397 def close(self):
397 def close(self):
398 if self.fp:
398 if self.fp:
399 self.fp.close()
399 self.fp.close()
400 self.fp = None
400 self.fp = None
401 if self._handler:
401 if self._handler:
402 self._handler._request_closed(self, self._host,
402 self._handler._request_closed(self, self._host,
403 self._connection)
403 self._connection)
404
404
405 def close_connection(self):
405 def close_connection(self):
406 self._handler._remove_connection(self._host, self._connection, close=1)
406 self._handler._remove_connection(self._host, self._connection, close=1)
407 self.close()
407 self.close()
408
408
409 def info(self):
409 def info(self):
410 return self.headers
410 return self.headers
411
411
412 def geturl(self):
412 def geturl(self):
413 return self._url
413 return self._url
414
414
415 def read(self, amt=None):
415 def read(self, amt=None):
416 # the _rbuf test is only in this first if for speed. It's not
416 # the _rbuf test is only in this first if for speed. It's not
417 # logically necessary
417 # logically necessary
418 if self._rbuf and not amt is None:
418 if self._rbuf and not amt is None:
419 L = len(self._rbuf)
419 L = len(self._rbuf)
420 if amt > L:
420 if amt > L:
421 amt -= L
421 amt -= L
422 else:
422 else:
423 s = self._rbuf[:amt]
423 s = self._rbuf[:amt]
424 self._rbuf = self._rbuf[amt:]
424 self._rbuf = self._rbuf[amt:]
425 return s
425 return s
426
426
427 s = self._rbuf + self._raw_read(amt)
427 s = self._rbuf + self._raw_read(amt)
428 self._rbuf = ''
428 self._rbuf = ''
429 return s
429 return s
430
430
431 # stolen from Python SVN #68532 to fix issue1088
431 # stolen from Python SVN #68532 to fix issue1088
432 def _read_chunked(self, amt):
432 def _read_chunked(self, amt):
433 chunk_left = self.chunk_left
433 chunk_left = self.chunk_left
434 value = ''
434 value = ''
435
435
436 # XXX This accumulates chunks by repeated string concatenation,
436 # XXX This accumulates chunks by repeated string concatenation,
437 # which is not efficient as the number or size of chunks gets big.
437 # which is not efficient as the number or size of chunks gets big.
438 while True:
438 while True:
439 if chunk_left is None:
439 if chunk_left is None:
440 line = self.fp.readline()
440 line = self.fp.readline()
441 i = line.find(';')
441 i = line.find(';')
442 if i >= 0:
442 if i >= 0:
443 line = line[:i] # strip chunk-extensions
443 line = line[:i] # strip chunk-extensions
444 try:
444 try:
445 chunk_left = int(line, 16)
445 chunk_left = int(line, 16)
446 except ValueError:
446 except ValueError:
447 # close the connection as protocol synchronisation is
447 # close the connection as protocol synchronisation is
448 # probably lost
448 # probably lost
449 self.close()
449 self.close()
450 raise httplib.IncompleteRead(value)
450 raise httplib.IncompleteRead(value)
451 if chunk_left == 0:
451 if chunk_left == 0:
452 break
452 break
453 if amt is None:
453 if amt is None:
454 value += self._safe_read(chunk_left)
454 value += self._safe_read(chunk_left)
455 elif amt < chunk_left:
455 elif amt < chunk_left:
456 value += self._safe_read(amt)
456 value += self._safe_read(amt)
457 self.chunk_left = chunk_left - amt
457 self.chunk_left = chunk_left - amt
458 return value
458 return value
459 elif amt == chunk_left:
459 elif amt == chunk_left:
460 value += self._safe_read(amt)
460 value += self._safe_read(amt)
461 self._safe_read(2) # toss the CRLF at the end of the chunk
461 self._safe_read(2) # toss the CRLF at the end of the chunk
462 self.chunk_left = None
462 self.chunk_left = None
463 return value
463 return value
464 else:
464 else:
465 value += self._safe_read(chunk_left)
465 value += self._safe_read(chunk_left)
466 amt -= chunk_left
466 amt -= chunk_left
467
467
468 # we read the whole chunk, get another
468 # we read the whole chunk, get another
469 self._safe_read(2) # toss the CRLF at the end of the chunk
469 self._safe_read(2) # toss the CRLF at the end of the chunk
470 chunk_left = None
470 chunk_left = None
471
471
472 # read and discard trailer up to the CRLF terminator
472 # read and discard trailer up to the CRLF terminator
473 ### note: we shouldn't have any trailers!
473 ### note: we shouldn't have any trailers!
474 while True:
474 while True:
475 line = self.fp.readline()
475 line = self.fp.readline()
476 if not line:
476 if not line:
477 # a vanishingly small number of sites EOF without
477 # a vanishingly small number of sites EOF without
478 # sending the trailer
478 # sending the trailer
479 break
479 break
480 if line == '\r\n':
480 if line == '\r\n':
481 break
481 break
482
482
483 # we read everything; close the "file"
483 # we read everything; close the "file"
484 self.close()
484 self.close()
485
485
486 return value
486 return value
487
487
488 def readline(self, limit=-1):
488 def readline(self, limit=-1):
489 i = self._rbuf.find('\n')
489 i = self._rbuf.find('\n')
490 while i < 0 and not (0 < limit <= len(self._rbuf)):
490 while i < 0 and not (0 < limit <= len(self._rbuf)):
491 new = self._raw_read(self._rbufsize)
491 new = self._raw_read(self._rbufsize)
492 if not new:
492 if not new:
493 break
493 break
494 i = new.find('\n')
494 i = new.find('\n')
495 if i >= 0:
495 if i >= 0:
496 i = i + len(self._rbuf)
496 i = i + len(self._rbuf)
497 self._rbuf = self._rbuf + new
497 self._rbuf = self._rbuf + new
498 if i < 0:
498 if i < 0:
499 i = len(self._rbuf)
499 i = len(self._rbuf)
500 else:
500 else:
501 i = i + 1
501 i = i + 1
502 if 0 <= limit < len(self._rbuf):
502 if 0 <= limit < len(self._rbuf):
503 i = limit
503 i = limit
504 data, self._rbuf = self._rbuf[:i], self._rbuf[i:]
504 data, self._rbuf = self._rbuf[:i], self._rbuf[i:]
505 return data
505 return data
506
506
507 def readlines(self, sizehint = 0):
507 def readlines(self, sizehint = 0):
508 total = 0
508 total = 0
509 list = []
509 list = []
510 while 1:
510 while True:
511 line = self.readline()
511 line = self.readline()
512 if not line:
512 if not line:
513 break
513 break
514 list.append(line)
514 list.append(line)
515 total += len(line)
515 total += len(line)
516 if sizehint and total >= sizehint:
516 if sizehint and total >= sizehint:
517 break
517 break
518 return list
518 return list
519
519
520 def safesend(self, str):
520 def safesend(self, str):
521 """Send `str' to the server.
521 """Send `str' to the server.
522
522
523 Shamelessly ripped off from httplib to patch a bad behavior.
523 Shamelessly ripped off from httplib to patch a bad behavior.
524 """
524 """
525 # _broken_pipe_resp is an attribute we set in this function
525 # _broken_pipe_resp is an attribute we set in this function
526 # if the socket is closed while we're sending data but
526 # if the socket is closed while we're sending data but
527 # the server sent us a response before hanging up.
527 # the server sent us a response before hanging up.
528 # In that case, we want to pretend to send the rest of the
528 # In that case, we want to pretend to send the rest of the
529 # outgoing data, and then let the user use getresponse()
529 # outgoing data, and then let the user use getresponse()
530 # (which we wrap) to get this last response before
530 # (which we wrap) to get this last response before
531 # opening a new socket.
531 # opening a new socket.
532 if getattr(self, '_broken_pipe_resp', None) is not None:
532 if getattr(self, '_broken_pipe_resp', None) is not None:
533 return
533 return
534
534
535 if self.sock is None:
535 if self.sock is None:
536 if self.auto_open:
536 if self.auto_open:
537 self.connect()
537 self.connect()
538 else:
538 else:
539 raise httplib.NotConnected()
539 raise httplib.NotConnected()
540
540
541 # send the data to the server. if we get a broken pipe, then close
541 # send the data to the server. if we get a broken pipe, then close
542 # the socket. we want to reconnect when somebody tries to send again.
542 # the socket. we want to reconnect when somebody tries to send again.
543 #
543 #
544 # NOTE: we DO propagate the error, though, because we cannot simply
544 # NOTE: we DO propagate the error, though, because we cannot simply
545 # ignore the error... the caller will know if they can retry.
545 # ignore the error... the caller will know if they can retry.
546 if self.debuglevel > 0:
546 if self.debuglevel > 0:
547 print "send:", repr(str)
547 print "send:", repr(str)
548 try:
548 try:
549 blocksize = 8192
549 blocksize = 8192
550 if hasattr(str,'read') :
550 if hasattr(str,'read') :
551 if self.debuglevel > 0:
551 if self.debuglevel > 0:
552 print "sendIng a read()able"
552 print "sendIng a read()able"
553 data = str.read(blocksize)
553 data = str.read(blocksize)
554 while data:
554 while data:
555 self.sock.sendall(data)
555 self.sock.sendall(data)
556 data = str.read(blocksize)
556 data = str.read(blocksize)
557 else:
557 else:
558 self.sock.sendall(str)
558 self.sock.sendall(str)
559 except socket.error, v:
559 except socket.error, v:
560 reraise = True
560 reraise = True
561 if v[0] == errno.EPIPE: # Broken pipe
561 if v[0] == errno.EPIPE: # Broken pipe
562 if self._HTTPConnection__state == httplib._CS_REQ_SENT:
562 if self._HTTPConnection__state == httplib._CS_REQ_SENT:
563 self._broken_pipe_resp = None
563 self._broken_pipe_resp = None
564 self._broken_pipe_resp = self.getresponse()
564 self._broken_pipe_resp = self.getresponse()
565 reraise = False
565 reraise = False
566 self.close()
566 self.close()
567 if reraise:
567 if reraise:
568 raise
568 raise
569
569
570 def wrapgetresponse(cls):
570 def wrapgetresponse(cls):
571 """Wraps getresponse in cls with a broken-pipe sane version.
571 """Wraps getresponse in cls with a broken-pipe sane version.
572 """
572 """
573 def safegetresponse(self):
573 def safegetresponse(self):
574 # In safesend() we might set the _broken_pipe_resp
574 # In safesend() we might set the _broken_pipe_resp
575 # attribute, in which case the socket has already
575 # attribute, in which case the socket has already
576 # been closed and we just need to give them the response
576 # been closed and we just need to give them the response
577 # back. Otherwise, we use the normal response path.
577 # back. Otherwise, we use the normal response path.
578 r = getattr(self, '_broken_pipe_resp', None)
578 r = getattr(self, '_broken_pipe_resp', None)
579 if r is not None:
579 if r is not None:
580 return r
580 return r
581 return cls.getresponse(self)
581 return cls.getresponse(self)
582 safegetresponse.__doc__ = cls.getresponse.__doc__
582 safegetresponse.__doc__ = cls.getresponse.__doc__
583 return safegetresponse
583 return safegetresponse
584
584
585 class HTTPConnection(httplib.HTTPConnection):
585 class HTTPConnection(httplib.HTTPConnection):
586 # use the modified response class
586 # use the modified response class
587 response_class = HTTPResponse
587 response_class = HTTPResponse
588 send = safesend
588 send = safesend
589 getresponse = wrapgetresponse(httplib.HTTPConnection)
589 getresponse = wrapgetresponse(httplib.HTTPConnection)
590
590
591
591
592 #########################################################################
592 #########################################################################
593 ##### TEST FUNCTIONS
593 ##### TEST FUNCTIONS
594 #########################################################################
594 #########################################################################
595
595
596 def error_handler(url):
596 def error_handler(url):
597 global HANDLE_ERRORS
597 global HANDLE_ERRORS
598 orig = HANDLE_ERRORS
598 orig = HANDLE_ERRORS
599 keepalive_handler = HTTPHandler()
599 keepalive_handler = HTTPHandler()
600 opener = urllib2.build_opener(keepalive_handler)
600 opener = urllib2.build_opener(keepalive_handler)
601 urllib2.install_opener(opener)
601 urllib2.install_opener(opener)
602 pos = {0: 'off', 1: 'on'}
602 pos = {0: 'off', 1: 'on'}
603 for i in (0, 1):
603 for i in (0, 1):
604 print " fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i)
604 print " fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i)
605 HANDLE_ERRORS = i
605 HANDLE_ERRORS = i
606 try:
606 try:
607 fo = urllib2.urlopen(url)
607 fo = urllib2.urlopen(url)
608 fo.read()
608 fo.read()
609 fo.close()
609 fo.close()
610 try:
610 try:
611 status, reason = fo.status, fo.reason
611 status, reason = fo.status, fo.reason
612 except AttributeError:
612 except AttributeError:
613 status, reason = None, None
613 status, reason = None, None
614 except IOError, e:
614 except IOError, e:
615 print " EXCEPTION: %s" % e
615 print " EXCEPTION: %s" % e
616 raise
616 raise
617 else:
617 else:
618 print " status = %s, reason = %s" % (status, reason)
618 print " status = %s, reason = %s" % (status, reason)
619 HANDLE_ERRORS = orig
619 HANDLE_ERRORS = orig
620 hosts = keepalive_handler.open_connections()
620 hosts = keepalive_handler.open_connections()
621 print "open connections:", hosts
621 print "open connections:", hosts
622 keepalive_handler.close_all()
622 keepalive_handler.close_all()
623
623
624 def md5(s):
624 def md5(s):
625 try:
625 try:
626 from hashlib import md5 as _md5
626 from hashlib import md5 as _md5
627 except ImportError:
627 except ImportError:
628 from md5 import md5 as _md5
628 from md5 import md5 as _md5
629 global md5
629 global md5
630 md5 = _md5
630 md5 = _md5
631 return _md5(s)
631 return _md5(s)
632
632
633 def continuity(url):
633 def continuity(url):
634 format = '%25s: %s'
634 format = '%25s: %s'
635
635
636 # first fetch the file with the normal http handler
636 # first fetch the file with the normal http handler
637 opener = urllib2.build_opener()
637 opener = urllib2.build_opener()
638 urllib2.install_opener(opener)
638 urllib2.install_opener(opener)
639 fo = urllib2.urlopen(url)
639 fo = urllib2.urlopen(url)
640 foo = fo.read()
640 foo = fo.read()
641 fo.close()
641 fo.close()
642 m = md5.new(foo)
642 m = md5.new(foo)
643 print format % ('normal urllib', m.hexdigest())
643 print format % ('normal urllib', m.hexdigest())
644
644
645 # now install the keepalive handler and try again
645 # now install the keepalive handler and try again
646 opener = urllib2.build_opener(HTTPHandler())
646 opener = urllib2.build_opener(HTTPHandler())
647 urllib2.install_opener(opener)
647 urllib2.install_opener(opener)
648
648
649 fo = urllib2.urlopen(url)
649 fo = urllib2.urlopen(url)
650 foo = fo.read()
650 foo = fo.read()
651 fo.close()
651 fo.close()
652 m = md5.new(foo)
652 m = md5.new(foo)
653 print format % ('keepalive read', m.hexdigest())
653 print format % ('keepalive read', m.hexdigest())
654
654
655 fo = urllib2.urlopen(url)
655 fo = urllib2.urlopen(url)
656 foo = ''
656 foo = ''
657 while 1:
657 while True:
658 f = fo.readline()
658 f = fo.readline()
659 if f:
659 if f:
660 foo = foo + f
660 foo = foo + f
661 else: break
661 else: break
662 fo.close()
662 fo.close()
663 m = md5.new(foo)
663 m = md5.new(foo)
664 print format % ('keepalive readline', m.hexdigest())
664 print format % ('keepalive readline', m.hexdigest())
665
665
666 def comp(N, url):
666 def comp(N, url):
667 print ' making %i connections to:\n %s' % (N, url)
667 print ' making %i connections to:\n %s' % (N, url)
668
668
669 sys.stdout.write(' first using the normal urllib handlers')
669 sys.stdout.write(' first using the normal urllib handlers')
670 # first use normal opener
670 # first use normal opener
671 opener = urllib2.build_opener()
671 opener = urllib2.build_opener()
672 urllib2.install_opener(opener)
672 urllib2.install_opener(opener)
673 t1 = fetch(N, url)
673 t1 = fetch(N, url)
674 print ' TIME: %.3f s' % t1
674 print ' TIME: %.3f s' % t1
675
675
676 sys.stdout.write(' now using the keepalive handler ')
676 sys.stdout.write(' now using the keepalive handler ')
677 # now install the keepalive handler and try again
677 # now install the keepalive handler and try again
678 opener = urllib2.build_opener(HTTPHandler())
678 opener = urllib2.build_opener(HTTPHandler())
679 urllib2.install_opener(opener)
679 urllib2.install_opener(opener)
680 t2 = fetch(N, url)
680 t2 = fetch(N, url)
681 print ' TIME: %.3f s' % t2
681 print ' TIME: %.3f s' % t2
682 print ' improvement factor: %.2f' % (t1 / t2)
682 print ' improvement factor: %.2f' % (t1 / t2)
683
683
684 def fetch(N, url, delay=0):
684 def fetch(N, url, delay=0):
685 import time
685 import time
686 lens = []
686 lens = []
687 starttime = time.time()
687 starttime = time.time()
688 for i in range(N):
688 for i in range(N):
689 if delay and i > 0:
689 if delay and i > 0:
690 time.sleep(delay)
690 time.sleep(delay)
691 fo = urllib2.urlopen(url)
691 fo = urllib2.urlopen(url)
692 foo = fo.read()
692 foo = fo.read()
693 fo.close()
693 fo.close()
694 lens.append(len(foo))
694 lens.append(len(foo))
695 diff = time.time() - starttime
695 diff = time.time() - starttime
696
696
697 j = 0
697 j = 0
698 for i in lens[1:]:
698 for i in lens[1:]:
699 j = j + 1
699 j = j + 1
700 if not i == lens[0]:
700 if not i == lens[0]:
701 print "WARNING: inconsistent length on read %i: %i" % (j, i)
701 print "WARNING: inconsistent length on read %i: %i" % (j, i)
702
702
703 return diff
703 return diff
704
704
705 def test_timeout(url):
705 def test_timeout(url):
706 global DEBUG
706 global DEBUG
707 dbbackup = DEBUG
707 dbbackup = DEBUG
708 class FakeLogger:
708 class FakeLogger:
709 def debug(self, msg, *args):
709 def debug(self, msg, *args):
710 print msg % args
710 print msg % args
711 info = warning = error = debug
711 info = warning = error = debug
712 DEBUG = FakeLogger()
712 DEBUG = FakeLogger()
713 print " fetching the file to establish a connection"
713 print " fetching the file to establish a connection"
714 fo = urllib2.urlopen(url)
714 fo = urllib2.urlopen(url)
715 data1 = fo.read()
715 data1 = fo.read()
716 fo.close()
716 fo.close()
717
717
718 i = 20
718 i = 20
719 print " waiting %i seconds for the server to close the connection" % i
719 print " waiting %i seconds for the server to close the connection" % i
720 while i > 0:
720 while i > 0:
721 sys.stdout.write('\r %2i' % i)
721 sys.stdout.write('\r %2i' % i)
722 sys.stdout.flush()
722 sys.stdout.flush()
723 time.sleep(1)
723 time.sleep(1)
724 i -= 1
724 i -= 1
725 sys.stderr.write('\r')
725 sys.stderr.write('\r')
726
726
727 print " fetching the file a second time"
727 print " fetching the file a second time"
728 fo = urllib2.urlopen(url)
728 fo = urllib2.urlopen(url)
729 data2 = fo.read()
729 data2 = fo.read()
730 fo.close()
730 fo.close()
731
731
732 if data1 == data2:
732 if data1 == data2:
733 print ' data are identical'
733 print ' data are identical'
734 else:
734 else:
735 print ' ERROR: DATA DIFFER'
735 print ' ERROR: DATA DIFFER'
736
736
737 DEBUG = dbbackup
737 DEBUG = dbbackup
738
738
739
739
740 def test(url, N=10):
740 def test(url, N=10):
741 print "checking error hander (do this on a non-200)"
741 print "checking error hander (do this on a non-200)"
742 try: error_handler(url)
742 try: error_handler(url)
743 except IOError:
743 except IOError:
744 print "exiting - exception will prevent further tests"
744 print "exiting - exception will prevent further tests"
745 sys.exit()
745 sys.exit()
746 print
746 print
747 print "performing continuity test (making sure stuff isn't corrupted)"
747 print "performing continuity test (making sure stuff isn't corrupted)"
748 continuity(url)
748 continuity(url)
749 print
749 print
750 print "performing speed comparison"
750 print "performing speed comparison"
751 comp(N, url)
751 comp(N, url)
752 print
752 print
753 print "performing dropped-connection check"
753 print "performing dropped-connection check"
754 test_timeout(url)
754 test_timeout(url)
755
755
756 if __name__ == '__main__':
756 if __name__ == '__main__':
757 import time
757 import time
758 import sys
758 import sys
759 try:
759 try:
760 N = int(sys.argv[1])
760 N = int(sys.argv[1])
761 url = sys.argv[2]
761 url = sys.argv[2]
762 except:
762 except:
763 print "%s <integer> <url>" % sys.argv[0]
763 print "%s <integer> <url>" % sys.argv[0]
764 else:
764 else:
765 test(url, N)
765 test(url, N)
@@ -1,1982 +1,1982
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error
13 import scmutil, util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20
20
21 class localrepository(repo.repository):
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 'known', 'getbundle'))
23 'known', 'getbundle'))
24 supportedformats = set(('revlogv1', 'generaldelta'))
24 supportedformats = set(('revlogv1', 'generaldelta'))
25 supported = supportedformats | set(('store', 'fncache', 'shared',
25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 'dotencode'))
26 'dotencode'))
27
27
28 def __init__(self, baseui, path=None, create=False):
28 def __init__(self, baseui, path=None, create=False):
29 repo.repository.__init__(self)
29 repo.repository.__init__(self)
30 self.root = os.path.realpath(util.expandpath(path))
30 self.root = os.path.realpath(util.expandpath(path))
31 self.path = os.path.join(self.root, ".hg")
31 self.path = os.path.join(self.root, ".hg")
32 self.origroot = path
32 self.origroot = path
33 self.auditor = scmutil.pathauditor(self.root, self._checknested)
33 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 self.opener = scmutil.opener(self.path)
34 self.opener = scmutil.opener(self.path)
35 self.wopener = scmutil.opener(self.root)
35 self.wopener = scmutil.opener(self.root)
36 self.baseui = baseui
36 self.baseui = baseui
37 self.ui = baseui.copy()
37 self.ui = baseui.copy()
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 extensions.loadall(self.ui)
41 extensions.loadall(self.ui)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 if not os.path.isdir(self.path):
45 if not os.path.isdir(self.path):
46 if create:
46 if create:
47 if not os.path.exists(path):
47 if not os.path.exists(path):
48 util.makedirs(path)
48 util.makedirs(path)
49 util.makedir(self.path, notindexed=True)
49 util.makedir(self.path, notindexed=True)
50 requirements = ["revlogv1"]
50 requirements = ["revlogv1"]
51 if self.ui.configbool('format', 'usestore', True):
51 if self.ui.configbool('format', 'usestore', True):
52 os.mkdir(os.path.join(self.path, "store"))
52 os.mkdir(os.path.join(self.path, "store"))
53 requirements.append("store")
53 requirements.append("store")
54 if self.ui.configbool('format', 'usefncache', True):
54 if self.ui.configbool('format', 'usefncache', True):
55 requirements.append("fncache")
55 requirements.append("fncache")
56 if self.ui.configbool('format', 'dotencode', True):
56 if self.ui.configbool('format', 'dotencode', True):
57 requirements.append('dotencode')
57 requirements.append('dotencode')
58 # create an invalid changelog
58 # create an invalid changelog
59 self.opener.append(
59 self.opener.append(
60 "00changelog.i",
60 "00changelog.i",
61 '\0\0\0\2' # represents revlogv2
61 '\0\0\0\2' # represents revlogv2
62 ' dummy changelog to prevent using the old repo layout'
62 ' dummy changelog to prevent using the old repo layout'
63 )
63 )
64 if self.ui.configbool('format', 'generaldelta', False):
64 if self.ui.configbool('format', 'generaldelta', False):
65 requirements.append("generaldelta")
65 requirements.append("generaldelta")
66 else:
66 else:
67 raise error.RepoError(_("repository %s not found") % path)
67 raise error.RepoError(_("repository %s not found") % path)
68 elif create:
68 elif create:
69 raise error.RepoError(_("repository %s already exists") % path)
69 raise error.RepoError(_("repository %s already exists") % path)
70 else:
70 else:
71 try:
71 try:
72 requirements = scmutil.readrequires(self.opener, self.supported)
72 requirements = scmutil.readrequires(self.opener, self.supported)
73 except IOError, inst:
73 except IOError, inst:
74 if inst.errno != errno.ENOENT:
74 if inst.errno != errno.ENOENT:
75 raise
75 raise
76 requirements = set()
76 requirements = set()
77
77
78 self.sharedpath = self.path
78 self.sharedpath = self.path
79 try:
79 try:
80 s = os.path.realpath(self.opener.read("sharedpath"))
80 s = os.path.realpath(self.opener.read("sharedpath"))
81 if not os.path.exists(s):
81 if not os.path.exists(s):
82 raise error.RepoError(
82 raise error.RepoError(
83 _('.hg/sharedpath points to nonexistent directory %s') % s)
83 _('.hg/sharedpath points to nonexistent directory %s') % s)
84 self.sharedpath = s
84 self.sharedpath = s
85 except IOError, inst:
85 except IOError, inst:
86 if inst.errno != errno.ENOENT:
86 if inst.errno != errno.ENOENT:
87 raise
87 raise
88
88
89 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
89 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
90 self.spath = self.store.path
90 self.spath = self.store.path
91 self.sopener = self.store.opener
91 self.sopener = self.store.opener
92 self.sjoin = self.store.join
92 self.sjoin = self.store.join
93 self.opener.createmode = self.store.createmode
93 self.opener.createmode = self.store.createmode
94 self._applyrequirements(requirements)
94 self._applyrequirements(requirements)
95 if create:
95 if create:
96 self._writerequirements()
96 self._writerequirements()
97
97
98 # These two define the set of tags for this repository. _tags
98 # These two define the set of tags for this repository. _tags
99 # maps tag name to node; _tagtypes maps tag name to 'global' or
99 # maps tag name to node; _tagtypes maps tag name to 'global' or
100 # 'local'. (Global tags are defined by .hgtags across all
100 # 'local'. (Global tags are defined by .hgtags across all
101 # heads, and local tags are defined in .hg/localtags.) They
101 # heads, and local tags are defined in .hg/localtags.) They
102 # constitute the in-memory cache of tags.
102 # constitute the in-memory cache of tags.
103 self._tags = None
103 self._tags = None
104 self._tagtypes = None
104 self._tagtypes = None
105
105
106 self._branchcache = None
106 self._branchcache = None
107 self._branchcachetip = None
107 self._branchcachetip = None
108 self.nodetagscache = None
108 self.nodetagscache = None
109 self.filterpats = {}
109 self.filterpats = {}
110 self._datafilters = {}
110 self._datafilters = {}
111 self._transref = self._lockref = self._wlockref = None
111 self._transref = self._lockref = self._wlockref = None
112
112
113 def _applyrequirements(self, requirements):
113 def _applyrequirements(self, requirements):
114 self.requirements = requirements
114 self.requirements = requirements
115 openerreqs = set(('revlogv1', 'generaldelta'))
115 openerreqs = set(('revlogv1', 'generaldelta'))
116 self.sopener.options = dict((r, 1) for r in requirements
116 self.sopener.options = dict((r, 1) for r in requirements
117 if r in openerreqs)
117 if r in openerreqs)
118
118
119 def _writerequirements(self):
119 def _writerequirements(self):
120 reqfile = self.opener("requires", "w")
120 reqfile = self.opener("requires", "w")
121 for r in self.requirements:
121 for r in self.requirements:
122 reqfile.write("%s\n" % r)
122 reqfile.write("%s\n" % r)
123 reqfile.close()
123 reqfile.close()
124
124
125 def _checknested(self, path):
125 def _checknested(self, path):
126 """Determine if path is a legal nested repository."""
126 """Determine if path is a legal nested repository."""
127 if not path.startswith(self.root):
127 if not path.startswith(self.root):
128 return False
128 return False
129 subpath = path[len(self.root) + 1:]
129 subpath = path[len(self.root) + 1:]
130
130
131 # XXX: Checking against the current working copy is wrong in
131 # XXX: Checking against the current working copy is wrong in
132 # the sense that it can reject things like
132 # the sense that it can reject things like
133 #
133 #
134 # $ hg cat -r 10 sub/x.txt
134 # $ hg cat -r 10 sub/x.txt
135 #
135 #
136 # if sub/ is no longer a subrepository in the working copy
136 # if sub/ is no longer a subrepository in the working copy
137 # parent revision.
137 # parent revision.
138 #
138 #
139 # However, it can of course also allow things that would have
139 # However, it can of course also allow things that would have
140 # been rejected before, such as the above cat command if sub/
140 # been rejected before, such as the above cat command if sub/
141 # is a subrepository now, but was a normal directory before.
141 # is a subrepository now, but was a normal directory before.
142 # The old path auditor would have rejected by mistake since it
142 # The old path auditor would have rejected by mistake since it
143 # panics when it sees sub/.hg/.
143 # panics when it sees sub/.hg/.
144 #
144 #
145 # All in all, checking against the working copy seems sensible
145 # All in all, checking against the working copy seems sensible
146 # since we want to prevent access to nested repositories on
146 # since we want to prevent access to nested repositories on
147 # the filesystem *now*.
147 # the filesystem *now*.
148 ctx = self[None]
148 ctx = self[None]
149 parts = util.splitpath(subpath)
149 parts = util.splitpath(subpath)
150 while parts:
150 while parts:
151 prefix = os.sep.join(parts)
151 prefix = os.sep.join(parts)
152 if prefix in ctx.substate:
152 if prefix in ctx.substate:
153 if prefix == subpath:
153 if prefix == subpath:
154 return True
154 return True
155 else:
155 else:
156 sub = ctx.sub(prefix)
156 sub = ctx.sub(prefix)
157 return sub.checknested(subpath[len(prefix) + 1:])
157 return sub.checknested(subpath[len(prefix) + 1:])
158 else:
158 else:
159 parts.pop()
159 parts.pop()
160 return False
160 return False
161
161
162 @util.propertycache
162 @util.propertycache
163 def _bookmarks(self):
163 def _bookmarks(self):
164 return bookmarks.read(self)
164 return bookmarks.read(self)
165
165
166 @util.propertycache
166 @util.propertycache
167 def _bookmarkcurrent(self):
167 def _bookmarkcurrent(self):
168 return bookmarks.readcurrent(self)
168 return bookmarks.readcurrent(self)
169
169
170 @propertycache
170 @propertycache
171 def changelog(self):
171 def changelog(self):
172 c = changelog.changelog(self.sopener)
172 c = changelog.changelog(self.sopener)
173 if 'HG_PENDING' in os.environ:
173 if 'HG_PENDING' in os.environ:
174 p = os.environ['HG_PENDING']
174 p = os.environ['HG_PENDING']
175 if p.startswith(self.root):
175 if p.startswith(self.root):
176 c.readpending('00changelog.i.a')
176 c.readpending('00changelog.i.a')
177 return c
177 return c
178
178
179 @propertycache
179 @propertycache
180 def manifest(self):
180 def manifest(self):
181 return manifest.manifest(self.sopener)
181 return manifest.manifest(self.sopener)
182
182
183 @propertycache
183 @propertycache
184 def dirstate(self):
184 def dirstate(self):
185 warned = [0]
185 warned = [0]
186 def validate(node):
186 def validate(node):
187 try:
187 try:
188 self.changelog.rev(node)
188 self.changelog.rev(node)
189 return node
189 return node
190 except error.LookupError:
190 except error.LookupError:
191 if not warned[0]:
191 if not warned[0]:
192 warned[0] = True
192 warned[0] = True
193 self.ui.warn(_("warning: ignoring unknown"
193 self.ui.warn(_("warning: ignoring unknown"
194 " working parent %s!\n") % short(node))
194 " working parent %s!\n") % short(node))
195 return nullid
195 return nullid
196
196
197 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
197 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
198
198
199 def __getitem__(self, changeid):
199 def __getitem__(self, changeid):
200 if changeid is None:
200 if changeid is None:
201 return context.workingctx(self)
201 return context.workingctx(self)
202 return context.changectx(self, changeid)
202 return context.changectx(self, changeid)
203
203
204 def __contains__(self, changeid):
204 def __contains__(self, changeid):
205 try:
205 try:
206 return bool(self.lookup(changeid))
206 return bool(self.lookup(changeid))
207 except error.RepoLookupError:
207 except error.RepoLookupError:
208 return False
208 return False
209
209
210 def __nonzero__(self):
210 def __nonzero__(self):
211 return True
211 return True
212
212
213 def __len__(self):
213 def __len__(self):
214 return len(self.changelog)
214 return len(self.changelog)
215
215
216 def __iter__(self):
216 def __iter__(self):
217 for i in xrange(len(self)):
217 for i in xrange(len(self)):
218 yield i
218 yield i
219
219
220 def url(self):
220 def url(self):
221 return 'file:' + self.root
221 return 'file:' + self.root
222
222
223 def hook(self, name, throw=False, **args):
223 def hook(self, name, throw=False, **args):
224 return hook.hook(self.ui, self, name, throw, **args)
224 return hook.hook(self.ui, self, name, throw, **args)
225
225
226 tag_disallowed = ':\r\n'
226 tag_disallowed = ':\r\n'
227
227
228 def _tag(self, names, node, message, local, user, date, extra={}):
228 def _tag(self, names, node, message, local, user, date, extra={}):
229 if isinstance(names, str):
229 if isinstance(names, str):
230 allchars = names
230 allchars = names
231 names = (names,)
231 names = (names,)
232 else:
232 else:
233 allchars = ''.join(names)
233 allchars = ''.join(names)
234 for c in self.tag_disallowed:
234 for c in self.tag_disallowed:
235 if c in allchars:
235 if c in allchars:
236 raise util.Abort(_('%r cannot be used in a tag name') % c)
236 raise util.Abort(_('%r cannot be used in a tag name') % c)
237
237
238 branches = self.branchmap()
238 branches = self.branchmap()
239 for name in names:
239 for name in names:
240 self.hook('pretag', throw=True, node=hex(node), tag=name,
240 self.hook('pretag', throw=True, node=hex(node), tag=name,
241 local=local)
241 local=local)
242 if name in branches:
242 if name in branches:
243 self.ui.warn(_("warning: tag %s conflicts with existing"
243 self.ui.warn(_("warning: tag %s conflicts with existing"
244 " branch name\n") % name)
244 " branch name\n") % name)
245
245
246 def writetags(fp, names, munge, prevtags):
246 def writetags(fp, names, munge, prevtags):
247 fp.seek(0, 2)
247 fp.seek(0, 2)
248 if prevtags and prevtags[-1] != '\n':
248 if prevtags and prevtags[-1] != '\n':
249 fp.write('\n')
249 fp.write('\n')
250 for name in names:
250 for name in names:
251 m = munge and munge(name) or name
251 m = munge and munge(name) or name
252 if self._tagtypes and name in self._tagtypes:
252 if self._tagtypes and name in self._tagtypes:
253 old = self._tags.get(name, nullid)
253 old = self._tags.get(name, nullid)
254 fp.write('%s %s\n' % (hex(old), m))
254 fp.write('%s %s\n' % (hex(old), m))
255 fp.write('%s %s\n' % (hex(node), m))
255 fp.write('%s %s\n' % (hex(node), m))
256 fp.close()
256 fp.close()
257
257
258 prevtags = ''
258 prevtags = ''
259 if local:
259 if local:
260 try:
260 try:
261 fp = self.opener('localtags', 'r+')
261 fp = self.opener('localtags', 'r+')
262 except IOError:
262 except IOError:
263 fp = self.opener('localtags', 'a')
263 fp = self.opener('localtags', 'a')
264 else:
264 else:
265 prevtags = fp.read()
265 prevtags = fp.read()
266
266
267 # local tags are stored in the current charset
267 # local tags are stored in the current charset
268 writetags(fp, names, None, prevtags)
268 writetags(fp, names, None, prevtags)
269 for name in names:
269 for name in names:
270 self.hook('tag', node=hex(node), tag=name, local=local)
270 self.hook('tag', node=hex(node), tag=name, local=local)
271 return
271 return
272
272
273 try:
273 try:
274 fp = self.wfile('.hgtags', 'rb+')
274 fp = self.wfile('.hgtags', 'rb+')
275 except IOError:
275 except IOError:
276 fp = self.wfile('.hgtags', 'ab')
276 fp = self.wfile('.hgtags', 'ab')
277 else:
277 else:
278 prevtags = fp.read()
278 prevtags = fp.read()
279
279
280 # committed tags are stored in UTF-8
280 # committed tags are stored in UTF-8
281 writetags(fp, names, encoding.fromlocal, prevtags)
281 writetags(fp, names, encoding.fromlocal, prevtags)
282
282
283 fp.close()
283 fp.close()
284
284
285 if '.hgtags' not in self.dirstate:
285 if '.hgtags' not in self.dirstate:
286 self[None].add(['.hgtags'])
286 self[None].add(['.hgtags'])
287
287
288 m = matchmod.exact(self.root, '', ['.hgtags'])
288 m = matchmod.exact(self.root, '', ['.hgtags'])
289 tagnode = self.commit(message, user, date, extra=extra, match=m)
289 tagnode = self.commit(message, user, date, extra=extra, match=m)
290
290
291 for name in names:
291 for name in names:
292 self.hook('tag', node=hex(node), tag=name, local=local)
292 self.hook('tag', node=hex(node), tag=name, local=local)
293
293
294 return tagnode
294 return tagnode
295
295
296 def tag(self, names, node, message, local, user, date):
296 def tag(self, names, node, message, local, user, date):
297 '''tag a revision with one or more symbolic names.
297 '''tag a revision with one or more symbolic names.
298
298
299 names is a list of strings or, when adding a single tag, names may be a
299 names is a list of strings or, when adding a single tag, names may be a
300 string.
300 string.
301
301
302 if local is True, the tags are stored in a per-repository file.
302 if local is True, the tags are stored in a per-repository file.
303 otherwise, they are stored in the .hgtags file, and a new
303 otherwise, they are stored in the .hgtags file, and a new
304 changeset is committed with the change.
304 changeset is committed with the change.
305
305
306 keyword arguments:
306 keyword arguments:
307
307
308 local: whether to store tags in non-version-controlled file
308 local: whether to store tags in non-version-controlled file
309 (default False)
309 (default False)
310
310
311 message: commit message to use if committing
311 message: commit message to use if committing
312
312
313 user: name of user to use if committing
313 user: name of user to use if committing
314
314
315 date: date tuple to use if committing'''
315 date: date tuple to use if committing'''
316
316
317 if not local:
317 if not local:
318 for x in self.status()[:5]:
318 for x in self.status()[:5]:
319 if '.hgtags' in x:
319 if '.hgtags' in x:
320 raise util.Abort(_('working copy of .hgtags is changed '
320 raise util.Abort(_('working copy of .hgtags is changed '
321 '(please commit .hgtags manually)'))
321 '(please commit .hgtags manually)'))
322
322
323 self.tags() # instantiate the cache
323 self.tags() # instantiate the cache
324 self._tag(names, node, message, local, user, date)
324 self._tag(names, node, message, local, user, date)
325
325
326 def tags(self):
326 def tags(self):
327 '''return a mapping of tag to node'''
327 '''return a mapping of tag to node'''
328 if self._tags is None:
328 if self._tags is None:
329 (self._tags, self._tagtypes) = self._findtags()
329 (self._tags, self._tagtypes) = self._findtags()
330
330
331 return self._tags
331 return self._tags
332
332
333 def _findtags(self):
333 def _findtags(self):
334 '''Do the hard work of finding tags. Return a pair of dicts
334 '''Do the hard work of finding tags. Return a pair of dicts
335 (tags, tagtypes) where tags maps tag name to node, and tagtypes
335 (tags, tagtypes) where tags maps tag name to node, and tagtypes
336 maps tag name to a string like \'global\' or \'local\'.
336 maps tag name to a string like \'global\' or \'local\'.
337 Subclasses or extensions are free to add their own tags, but
337 Subclasses or extensions are free to add their own tags, but
338 should be aware that the returned dicts will be retained for the
338 should be aware that the returned dicts will be retained for the
339 duration of the localrepo object.'''
339 duration of the localrepo object.'''
340
340
341 # XXX what tagtype should subclasses/extensions use? Currently
341 # XXX what tagtype should subclasses/extensions use? Currently
342 # mq and bookmarks add tags, but do not set the tagtype at all.
342 # mq and bookmarks add tags, but do not set the tagtype at all.
343 # Should each extension invent its own tag type? Should there
343 # Should each extension invent its own tag type? Should there
344 # be one tagtype for all such "virtual" tags? Or is the status
344 # be one tagtype for all such "virtual" tags? Or is the status
345 # quo fine?
345 # quo fine?
346
346
347 alltags = {} # map tag name to (node, hist)
347 alltags = {} # map tag name to (node, hist)
348 tagtypes = {}
348 tagtypes = {}
349
349
350 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
350 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
351 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
351 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
352
352
353 # Build the return dicts. Have to re-encode tag names because
353 # Build the return dicts. Have to re-encode tag names because
354 # the tags module always uses UTF-8 (in order not to lose info
354 # the tags module always uses UTF-8 (in order not to lose info
355 # writing to the cache), but the rest of Mercurial wants them in
355 # writing to the cache), but the rest of Mercurial wants them in
356 # local encoding.
356 # local encoding.
357 tags = {}
357 tags = {}
358 for (name, (node, hist)) in alltags.iteritems():
358 for (name, (node, hist)) in alltags.iteritems():
359 if node != nullid:
359 if node != nullid:
360 try:
360 try:
361 # ignore tags to unknown nodes
361 # ignore tags to unknown nodes
362 self.changelog.lookup(node)
362 self.changelog.lookup(node)
363 tags[encoding.tolocal(name)] = node
363 tags[encoding.tolocal(name)] = node
364 except error.LookupError:
364 except error.LookupError:
365 pass
365 pass
366 tags['tip'] = self.changelog.tip()
366 tags['tip'] = self.changelog.tip()
367 tagtypes = dict([(encoding.tolocal(name), value)
367 tagtypes = dict([(encoding.tolocal(name), value)
368 for (name, value) in tagtypes.iteritems()])
368 for (name, value) in tagtypes.iteritems()])
369 return (tags, tagtypes)
369 return (tags, tagtypes)
370
370
371 def tagtype(self, tagname):
371 def tagtype(self, tagname):
372 '''
372 '''
373 return the type of the given tag. result can be:
373 return the type of the given tag. result can be:
374
374
375 'local' : a local tag
375 'local' : a local tag
376 'global' : a global tag
376 'global' : a global tag
377 None : tag does not exist
377 None : tag does not exist
378 '''
378 '''
379
379
380 self.tags()
380 self.tags()
381
381
382 return self._tagtypes.get(tagname)
382 return self._tagtypes.get(tagname)
383
383
384 def tagslist(self):
384 def tagslist(self):
385 '''return a list of tags ordered by revision'''
385 '''return a list of tags ordered by revision'''
386 l = []
386 l = []
387 for t, n in self.tags().iteritems():
387 for t, n in self.tags().iteritems():
388 r = self.changelog.rev(n)
388 r = self.changelog.rev(n)
389 l.append((r, t, n))
389 l.append((r, t, n))
390 return [(t, n) for r, t, n in sorted(l)]
390 return [(t, n) for r, t, n in sorted(l)]
391
391
392 def nodetags(self, node):
392 def nodetags(self, node):
393 '''return the tags associated with a node'''
393 '''return the tags associated with a node'''
394 if not self.nodetagscache:
394 if not self.nodetagscache:
395 self.nodetagscache = {}
395 self.nodetagscache = {}
396 for t, n in self.tags().iteritems():
396 for t, n in self.tags().iteritems():
397 self.nodetagscache.setdefault(n, []).append(t)
397 self.nodetagscache.setdefault(n, []).append(t)
398 for tags in self.nodetagscache.itervalues():
398 for tags in self.nodetagscache.itervalues():
399 tags.sort()
399 tags.sort()
400 return self.nodetagscache.get(node, [])
400 return self.nodetagscache.get(node, [])
401
401
402 def nodebookmarks(self, node):
402 def nodebookmarks(self, node):
403 marks = []
403 marks = []
404 for bookmark, n in self._bookmarks.iteritems():
404 for bookmark, n in self._bookmarks.iteritems():
405 if n == node:
405 if n == node:
406 marks.append(bookmark)
406 marks.append(bookmark)
407 return sorted(marks)
407 return sorted(marks)
408
408
409 def _branchtags(self, partial, lrev):
409 def _branchtags(self, partial, lrev):
410 # TODO: rename this function?
410 # TODO: rename this function?
411 tiprev = len(self) - 1
411 tiprev = len(self) - 1
412 if lrev != tiprev:
412 if lrev != tiprev:
413 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
413 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
414 self._updatebranchcache(partial, ctxgen)
414 self._updatebranchcache(partial, ctxgen)
415 self._writebranchcache(partial, self.changelog.tip(), tiprev)
415 self._writebranchcache(partial, self.changelog.tip(), tiprev)
416
416
417 return partial
417 return partial
418
418
419 def updatebranchcache(self):
419 def updatebranchcache(self):
420 tip = self.changelog.tip()
420 tip = self.changelog.tip()
421 if self._branchcache is not None and self._branchcachetip == tip:
421 if self._branchcache is not None and self._branchcachetip == tip:
422 return self._branchcache
422 return self._branchcache
423
423
424 oldtip = self._branchcachetip
424 oldtip = self._branchcachetip
425 self._branchcachetip = tip
425 self._branchcachetip = tip
426 if oldtip is None or oldtip not in self.changelog.nodemap:
426 if oldtip is None or oldtip not in self.changelog.nodemap:
427 partial, last, lrev = self._readbranchcache()
427 partial, last, lrev = self._readbranchcache()
428 else:
428 else:
429 lrev = self.changelog.rev(oldtip)
429 lrev = self.changelog.rev(oldtip)
430 partial = self._branchcache
430 partial = self._branchcache
431
431
432 self._branchtags(partial, lrev)
432 self._branchtags(partial, lrev)
433 # this private cache holds all heads (not just tips)
433 # this private cache holds all heads (not just tips)
434 self._branchcache = partial
434 self._branchcache = partial
435
435
436 def branchmap(self):
436 def branchmap(self):
437 '''returns a dictionary {branch: [branchheads]}'''
437 '''returns a dictionary {branch: [branchheads]}'''
438 self.updatebranchcache()
438 self.updatebranchcache()
439 return self._branchcache
439 return self._branchcache
440
440
441 def branchtags(self):
441 def branchtags(self):
442 '''return a dict where branch names map to the tipmost head of
442 '''return a dict where branch names map to the tipmost head of
443 the branch, open heads come before closed'''
443 the branch, open heads come before closed'''
444 bt = {}
444 bt = {}
445 for bn, heads in self.branchmap().iteritems():
445 for bn, heads in self.branchmap().iteritems():
446 tip = heads[-1]
446 tip = heads[-1]
447 for h in reversed(heads):
447 for h in reversed(heads):
448 if 'close' not in self.changelog.read(h)[5]:
448 if 'close' not in self.changelog.read(h)[5]:
449 tip = h
449 tip = h
450 break
450 break
451 bt[bn] = tip
451 bt[bn] = tip
452 return bt
452 return bt
453
453
454 def _readbranchcache(self):
454 def _readbranchcache(self):
455 partial = {}
455 partial = {}
456 try:
456 try:
457 f = self.opener("cache/branchheads")
457 f = self.opener("cache/branchheads")
458 lines = f.read().split('\n')
458 lines = f.read().split('\n')
459 f.close()
459 f.close()
460 except (IOError, OSError):
460 except (IOError, OSError):
461 return {}, nullid, nullrev
461 return {}, nullid, nullrev
462
462
463 try:
463 try:
464 last, lrev = lines.pop(0).split(" ", 1)
464 last, lrev = lines.pop(0).split(" ", 1)
465 last, lrev = bin(last), int(lrev)
465 last, lrev = bin(last), int(lrev)
466 if lrev >= len(self) or self[lrev].node() != last:
466 if lrev >= len(self) or self[lrev].node() != last:
467 # invalidate the cache
467 # invalidate the cache
468 raise ValueError('invalidating branch cache (tip differs)')
468 raise ValueError('invalidating branch cache (tip differs)')
469 for l in lines:
469 for l in lines:
470 if not l:
470 if not l:
471 continue
471 continue
472 node, label = l.split(" ", 1)
472 node, label = l.split(" ", 1)
473 label = encoding.tolocal(label.strip())
473 label = encoding.tolocal(label.strip())
474 partial.setdefault(label, []).append(bin(node))
474 partial.setdefault(label, []).append(bin(node))
475 except KeyboardInterrupt:
475 except KeyboardInterrupt:
476 raise
476 raise
477 except Exception, inst:
477 except Exception, inst:
478 if self.ui.debugflag:
478 if self.ui.debugflag:
479 self.ui.warn(str(inst), '\n')
479 self.ui.warn(str(inst), '\n')
480 partial, last, lrev = {}, nullid, nullrev
480 partial, last, lrev = {}, nullid, nullrev
481 return partial, last, lrev
481 return partial, last, lrev
482
482
483 def _writebranchcache(self, branches, tip, tiprev):
483 def _writebranchcache(self, branches, tip, tiprev):
484 try:
484 try:
485 f = self.opener("cache/branchheads", "w", atomictemp=True)
485 f = self.opener("cache/branchheads", "w", atomictemp=True)
486 f.write("%s %s\n" % (hex(tip), tiprev))
486 f.write("%s %s\n" % (hex(tip), tiprev))
487 for label, nodes in branches.iteritems():
487 for label, nodes in branches.iteritems():
488 for node in nodes:
488 for node in nodes:
489 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
489 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
490 f.rename()
490 f.rename()
491 except (IOError, OSError):
491 except (IOError, OSError):
492 pass
492 pass
493
493
494 def _updatebranchcache(self, partial, ctxgen):
494 def _updatebranchcache(self, partial, ctxgen):
495 # collect new branch entries
495 # collect new branch entries
496 newbranches = {}
496 newbranches = {}
497 for c in ctxgen:
497 for c in ctxgen:
498 newbranches.setdefault(c.branch(), []).append(c.node())
498 newbranches.setdefault(c.branch(), []).append(c.node())
499 # if older branchheads are reachable from new ones, they aren't
499 # if older branchheads are reachable from new ones, they aren't
500 # really branchheads. Note checking parents is insufficient:
500 # really branchheads. Note checking parents is insufficient:
501 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
501 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
502 for branch, newnodes in newbranches.iteritems():
502 for branch, newnodes in newbranches.iteritems():
503 bheads = partial.setdefault(branch, [])
503 bheads = partial.setdefault(branch, [])
504 bheads.extend(newnodes)
504 bheads.extend(newnodes)
505 if len(bheads) <= 1:
505 if len(bheads) <= 1:
506 continue
506 continue
507 bheads = sorted(bheads, key=lambda x: self[x].rev())
507 bheads = sorted(bheads, key=lambda x: self[x].rev())
508 # starting from tip means fewer passes over reachable
508 # starting from tip means fewer passes over reachable
509 while newnodes:
509 while newnodes:
510 latest = newnodes.pop()
510 latest = newnodes.pop()
511 if latest not in bheads:
511 if latest not in bheads:
512 continue
512 continue
513 minbhrev = self[bheads[0]].node()
513 minbhrev = self[bheads[0]].node()
514 reachable = self.changelog.reachable(latest, minbhrev)
514 reachable = self.changelog.reachable(latest, minbhrev)
515 reachable.remove(latest)
515 reachable.remove(latest)
516 if reachable:
516 if reachable:
517 bheads = [b for b in bheads if b not in reachable]
517 bheads = [b for b in bheads if b not in reachable]
518 partial[branch] = bheads
518 partial[branch] = bheads
519
519
520 def lookup(self, key):
520 def lookup(self, key):
521 if isinstance(key, int):
521 if isinstance(key, int):
522 return self.changelog.node(key)
522 return self.changelog.node(key)
523 elif key == '.':
523 elif key == '.':
524 return self.dirstate.p1()
524 return self.dirstate.p1()
525 elif key == 'null':
525 elif key == 'null':
526 return nullid
526 return nullid
527 elif key == 'tip':
527 elif key == 'tip':
528 return self.changelog.tip()
528 return self.changelog.tip()
529 n = self.changelog._match(key)
529 n = self.changelog._match(key)
530 if n:
530 if n:
531 return n
531 return n
532 if key in self._bookmarks:
532 if key in self._bookmarks:
533 return self._bookmarks[key]
533 return self._bookmarks[key]
534 if key in self.tags():
534 if key in self.tags():
535 return self.tags()[key]
535 return self.tags()[key]
536 if key in self.branchtags():
536 if key in self.branchtags():
537 return self.branchtags()[key]
537 return self.branchtags()[key]
538 n = self.changelog._partialmatch(key)
538 n = self.changelog._partialmatch(key)
539 if n:
539 if n:
540 return n
540 return n
541
541
542 # can't find key, check if it might have come from damaged dirstate
542 # can't find key, check if it might have come from damaged dirstate
543 if key in self.dirstate.parents():
543 if key in self.dirstate.parents():
544 raise error.Abort(_("working directory has unknown parent '%s'!")
544 raise error.Abort(_("working directory has unknown parent '%s'!")
545 % short(key))
545 % short(key))
546 try:
546 try:
547 if len(key) == 20:
547 if len(key) == 20:
548 key = hex(key)
548 key = hex(key)
549 except TypeError:
549 except TypeError:
550 pass
550 pass
551 raise error.RepoLookupError(_("unknown revision '%s'") % key)
551 raise error.RepoLookupError(_("unknown revision '%s'") % key)
552
552
553 def lookupbranch(self, key, remote=None):
553 def lookupbranch(self, key, remote=None):
554 repo = remote or self
554 repo = remote or self
555 if key in repo.branchmap():
555 if key in repo.branchmap():
556 return key
556 return key
557
557
558 repo = (remote and remote.local()) and remote or self
558 repo = (remote and remote.local()) and remote or self
559 return repo[key].branch()
559 return repo[key].branch()
560
560
561 def known(self, nodes):
561 def known(self, nodes):
562 nm = self.changelog.nodemap
562 nm = self.changelog.nodemap
563 return [(n in nm) for n in nodes]
563 return [(n in nm) for n in nodes]
564
564
565 def local(self):
565 def local(self):
566 return True
566 return True
567
567
568 def join(self, f):
568 def join(self, f):
569 return os.path.join(self.path, f)
569 return os.path.join(self.path, f)
570
570
571 def wjoin(self, f):
571 def wjoin(self, f):
572 return os.path.join(self.root, f)
572 return os.path.join(self.root, f)
573
573
574 def file(self, f):
574 def file(self, f):
575 if f[0] == '/':
575 if f[0] == '/':
576 f = f[1:]
576 f = f[1:]
577 return filelog.filelog(self.sopener, f)
577 return filelog.filelog(self.sopener, f)
578
578
579 def changectx(self, changeid):
579 def changectx(self, changeid):
580 return self[changeid]
580 return self[changeid]
581
581
582 def parents(self, changeid=None):
582 def parents(self, changeid=None):
583 '''get list of changectxs for parents of changeid'''
583 '''get list of changectxs for parents of changeid'''
584 return self[changeid].parents()
584 return self[changeid].parents()
585
585
586 def filectx(self, path, changeid=None, fileid=None):
586 def filectx(self, path, changeid=None, fileid=None):
587 """changeid can be a changeset revision, node, or tag.
587 """changeid can be a changeset revision, node, or tag.
588 fileid can be a file revision or node."""
588 fileid can be a file revision or node."""
589 return context.filectx(self, path, changeid, fileid)
589 return context.filectx(self, path, changeid, fileid)
590
590
591 def getcwd(self):
591 def getcwd(self):
592 return self.dirstate.getcwd()
592 return self.dirstate.getcwd()
593
593
594 def pathto(self, f, cwd=None):
594 def pathto(self, f, cwd=None):
595 return self.dirstate.pathto(f, cwd)
595 return self.dirstate.pathto(f, cwd)
596
596
597 def wfile(self, f, mode='r'):
597 def wfile(self, f, mode='r'):
598 return self.wopener(f, mode)
598 return self.wopener(f, mode)
599
599
600 def _link(self, f):
600 def _link(self, f):
601 return os.path.islink(self.wjoin(f))
601 return os.path.islink(self.wjoin(f))
602
602
603 def _loadfilter(self, filter):
603 def _loadfilter(self, filter):
604 if filter not in self.filterpats:
604 if filter not in self.filterpats:
605 l = []
605 l = []
606 for pat, cmd in self.ui.configitems(filter):
606 for pat, cmd in self.ui.configitems(filter):
607 if cmd == '!':
607 if cmd == '!':
608 continue
608 continue
609 mf = matchmod.match(self.root, '', [pat])
609 mf = matchmod.match(self.root, '', [pat])
610 fn = None
610 fn = None
611 params = cmd
611 params = cmd
612 for name, filterfn in self._datafilters.iteritems():
612 for name, filterfn in self._datafilters.iteritems():
613 if cmd.startswith(name):
613 if cmd.startswith(name):
614 fn = filterfn
614 fn = filterfn
615 params = cmd[len(name):].lstrip()
615 params = cmd[len(name):].lstrip()
616 break
616 break
617 if not fn:
617 if not fn:
618 fn = lambda s, c, **kwargs: util.filter(s, c)
618 fn = lambda s, c, **kwargs: util.filter(s, c)
619 # Wrap old filters not supporting keyword arguments
619 # Wrap old filters not supporting keyword arguments
620 if not inspect.getargspec(fn)[2]:
620 if not inspect.getargspec(fn)[2]:
621 oldfn = fn
621 oldfn = fn
622 fn = lambda s, c, **kwargs: oldfn(s, c)
622 fn = lambda s, c, **kwargs: oldfn(s, c)
623 l.append((mf, fn, params))
623 l.append((mf, fn, params))
624 self.filterpats[filter] = l
624 self.filterpats[filter] = l
625 return self.filterpats[filter]
625 return self.filterpats[filter]
626
626
627 def _filter(self, filterpats, filename, data):
627 def _filter(self, filterpats, filename, data):
628 for mf, fn, cmd in filterpats:
628 for mf, fn, cmd in filterpats:
629 if mf(filename):
629 if mf(filename):
630 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
630 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
631 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
631 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
632 break
632 break
633
633
634 return data
634 return data
635
635
636 @propertycache
636 @propertycache
637 def _encodefilterpats(self):
637 def _encodefilterpats(self):
638 return self._loadfilter('encode')
638 return self._loadfilter('encode')
639
639
640 @propertycache
640 @propertycache
641 def _decodefilterpats(self):
641 def _decodefilterpats(self):
642 return self._loadfilter('decode')
642 return self._loadfilter('decode')
643
643
644 def adddatafilter(self, name, filter):
644 def adddatafilter(self, name, filter):
645 self._datafilters[name] = filter
645 self._datafilters[name] = filter
646
646
647 def wread(self, filename):
647 def wread(self, filename):
648 if self._link(filename):
648 if self._link(filename):
649 data = os.readlink(self.wjoin(filename))
649 data = os.readlink(self.wjoin(filename))
650 else:
650 else:
651 data = self.wopener.read(filename)
651 data = self.wopener.read(filename)
652 return self._filter(self._encodefilterpats, filename, data)
652 return self._filter(self._encodefilterpats, filename, data)
653
653
654 def wwrite(self, filename, data, flags):
654 def wwrite(self, filename, data, flags):
655 data = self._filter(self._decodefilterpats, filename, data)
655 data = self._filter(self._decodefilterpats, filename, data)
656 if 'l' in flags:
656 if 'l' in flags:
657 self.wopener.symlink(data, filename)
657 self.wopener.symlink(data, filename)
658 else:
658 else:
659 self.wopener.write(filename, data)
659 self.wopener.write(filename, data)
660 if 'x' in flags:
660 if 'x' in flags:
661 util.setflags(self.wjoin(filename), False, True)
661 util.setflags(self.wjoin(filename), False, True)
662
662
663 def wwritedata(self, filename, data):
663 def wwritedata(self, filename, data):
664 return self._filter(self._decodefilterpats, filename, data)
664 return self._filter(self._decodefilterpats, filename, data)
665
665
666 def transaction(self, desc):
666 def transaction(self, desc):
667 tr = self._transref and self._transref() or None
667 tr = self._transref and self._transref() or None
668 if tr and tr.running():
668 if tr and tr.running():
669 return tr.nest()
669 return tr.nest()
670
670
671 # abort here if the journal already exists
671 # abort here if the journal already exists
672 if os.path.exists(self.sjoin("journal")):
672 if os.path.exists(self.sjoin("journal")):
673 raise error.RepoError(
673 raise error.RepoError(
674 _("abandoned transaction found - run hg recover"))
674 _("abandoned transaction found - run hg recover"))
675
675
676 journalfiles = self._writejournal(desc)
676 journalfiles = self._writejournal(desc)
677 renames = [(x, undoname(x)) for x in journalfiles]
677 renames = [(x, undoname(x)) for x in journalfiles]
678
678
679 tr = transaction.transaction(self.ui.warn, self.sopener,
679 tr = transaction.transaction(self.ui.warn, self.sopener,
680 self.sjoin("journal"),
680 self.sjoin("journal"),
681 aftertrans(renames),
681 aftertrans(renames),
682 self.store.createmode)
682 self.store.createmode)
683 self._transref = weakref.ref(tr)
683 self._transref = weakref.ref(tr)
684 return tr
684 return tr
685
685
686 def _writejournal(self, desc):
686 def _writejournal(self, desc):
687 # save dirstate for rollback
687 # save dirstate for rollback
688 try:
688 try:
689 ds = self.opener.read("dirstate")
689 ds = self.opener.read("dirstate")
690 except IOError:
690 except IOError:
691 ds = ""
691 ds = ""
692 self.opener.write("journal.dirstate", ds)
692 self.opener.write("journal.dirstate", ds)
693 self.opener.write("journal.branch",
693 self.opener.write("journal.branch",
694 encoding.fromlocal(self.dirstate.branch()))
694 encoding.fromlocal(self.dirstate.branch()))
695 self.opener.write("journal.desc",
695 self.opener.write("journal.desc",
696 "%d\n%s\n" % (len(self), desc))
696 "%d\n%s\n" % (len(self), desc))
697
697
698 bkname = self.join('bookmarks')
698 bkname = self.join('bookmarks')
699 if os.path.exists(bkname):
699 if os.path.exists(bkname):
700 util.copyfile(bkname, self.join('journal.bookmarks'))
700 util.copyfile(bkname, self.join('journal.bookmarks'))
701 else:
701 else:
702 self.opener.write('journal.bookmarks', '')
702 self.opener.write('journal.bookmarks', '')
703
703
704 return (self.sjoin('journal'), self.join('journal.dirstate'),
704 return (self.sjoin('journal'), self.join('journal.dirstate'),
705 self.join('journal.branch'), self.join('journal.desc'),
705 self.join('journal.branch'), self.join('journal.desc'),
706 self.join('journal.bookmarks'))
706 self.join('journal.bookmarks'))
707
707
708 def recover(self):
708 def recover(self):
709 lock = self.lock()
709 lock = self.lock()
710 try:
710 try:
711 if os.path.exists(self.sjoin("journal")):
711 if os.path.exists(self.sjoin("journal")):
712 self.ui.status(_("rolling back interrupted transaction\n"))
712 self.ui.status(_("rolling back interrupted transaction\n"))
713 transaction.rollback(self.sopener, self.sjoin("journal"),
713 transaction.rollback(self.sopener, self.sjoin("journal"),
714 self.ui.warn)
714 self.ui.warn)
715 self.invalidate()
715 self.invalidate()
716 return True
716 return True
717 else:
717 else:
718 self.ui.warn(_("no interrupted transaction available\n"))
718 self.ui.warn(_("no interrupted transaction available\n"))
719 return False
719 return False
720 finally:
720 finally:
721 lock.release()
721 lock.release()
722
722
723 def rollback(self, dryrun=False):
723 def rollback(self, dryrun=False):
724 wlock = lock = None
724 wlock = lock = None
725 try:
725 try:
726 wlock = self.wlock()
726 wlock = self.wlock()
727 lock = self.lock()
727 lock = self.lock()
728 if os.path.exists(self.sjoin("undo")):
728 if os.path.exists(self.sjoin("undo")):
729 try:
729 try:
730 args = self.opener.read("undo.desc").splitlines()
730 args = self.opener.read("undo.desc").splitlines()
731 if len(args) >= 3 and self.ui.verbose:
731 if len(args) >= 3 and self.ui.verbose:
732 desc = _("repository tip rolled back to revision %s"
732 desc = _("repository tip rolled back to revision %s"
733 " (undo %s: %s)\n") % (
733 " (undo %s: %s)\n") % (
734 int(args[0]) - 1, args[1], args[2])
734 int(args[0]) - 1, args[1], args[2])
735 elif len(args) >= 2:
735 elif len(args) >= 2:
736 desc = _("repository tip rolled back to revision %s"
736 desc = _("repository tip rolled back to revision %s"
737 " (undo %s)\n") % (
737 " (undo %s)\n") % (
738 int(args[0]) - 1, args[1])
738 int(args[0]) - 1, args[1])
739 except IOError:
739 except IOError:
740 desc = _("rolling back unknown transaction\n")
740 desc = _("rolling back unknown transaction\n")
741 self.ui.status(desc)
741 self.ui.status(desc)
742 if dryrun:
742 if dryrun:
743 return
743 return
744 transaction.rollback(self.sopener, self.sjoin("undo"),
744 transaction.rollback(self.sopener, self.sjoin("undo"),
745 self.ui.warn)
745 self.ui.warn)
746 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
746 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
747 if os.path.exists(self.join('undo.bookmarks')):
747 if os.path.exists(self.join('undo.bookmarks')):
748 util.rename(self.join('undo.bookmarks'),
748 util.rename(self.join('undo.bookmarks'),
749 self.join('bookmarks'))
749 self.join('bookmarks'))
750 try:
750 try:
751 branch = self.opener.read("undo.branch")
751 branch = self.opener.read("undo.branch")
752 self.dirstate.setbranch(branch)
752 self.dirstate.setbranch(branch)
753 except IOError:
753 except IOError:
754 self.ui.warn(_("named branch could not be reset, "
754 self.ui.warn(_("named branch could not be reset, "
755 "current branch is still: %s\n")
755 "current branch is still: %s\n")
756 % self.dirstate.branch())
756 % self.dirstate.branch())
757 self.invalidate()
757 self.invalidate()
758 self.dirstate.invalidate()
758 self.dirstate.invalidate()
759 self.destroyed()
759 self.destroyed()
760 parents = tuple([p.rev() for p in self.parents()])
760 parents = tuple([p.rev() for p in self.parents()])
761 if len(parents) > 1:
761 if len(parents) > 1:
762 self.ui.status(_("working directory now based on "
762 self.ui.status(_("working directory now based on "
763 "revisions %d and %d\n") % parents)
763 "revisions %d and %d\n") % parents)
764 else:
764 else:
765 self.ui.status(_("working directory now based on "
765 self.ui.status(_("working directory now based on "
766 "revision %d\n") % parents)
766 "revision %d\n") % parents)
767 else:
767 else:
768 self.ui.warn(_("no rollback information available\n"))
768 self.ui.warn(_("no rollback information available\n"))
769 return 1
769 return 1
770 finally:
770 finally:
771 release(lock, wlock)
771 release(lock, wlock)
772
772
773 def invalidatecaches(self):
773 def invalidatecaches(self):
774 self._tags = None
774 self._tags = None
775 self._tagtypes = None
775 self._tagtypes = None
776 self.nodetagscache = None
776 self.nodetagscache = None
777 self._branchcache = None # in UTF-8
777 self._branchcache = None # in UTF-8
778 self._branchcachetip = None
778 self._branchcachetip = None
779
779
780 def invalidate(self):
780 def invalidate(self):
781 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
781 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
782 if a in self.__dict__:
782 if a in self.__dict__:
783 delattr(self, a)
783 delattr(self, a)
784 self.invalidatecaches()
784 self.invalidatecaches()
785
785
786 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
786 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
787 try:
787 try:
788 l = lock.lock(lockname, 0, releasefn, desc=desc)
788 l = lock.lock(lockname, 0, releasefn, desc=desc)
789 except error.LockHeld, inst:
789 except error.LockHeld, inst:
790 if not wait:
790 if not wait:
791 raise
791 raise
792 self.ui.warn(_("waiting for lock on %s held by %r\n") %
792 self.ui.warn(_("waiting for lock on %s held by %r\n") %
793 (desc, inst.locker))
793 (desc, inst.locker))
794 # default to 600 seconds timeout
794 # default to 600 seconds timeout
795 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
795 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
796 releasefn, desc=desc)
796 releasefn, desc=desc)
797 if acquirefn:
797 if acquirefn:
798 acquirefn()
798 acquirefn()
799 return l
799 return l
800
800
801 def lock(self, wait=True):
801 def lock(self, wait=True):
802 '''Lock the repository store (.hg/store) and return a weak reference
802 '''Lock the repository store (.hg/store) and return a weak reference
803 to the lock. Use this before modifying the store (e.g. committing or
803 to the lock. Use this before modifying the store (e.g. committing or
804 stripping). If you are opening a transaction, get a lock as well.)'''
804 stripping). If you are opening a transaction, get a lock as well.)'''
805 l = self._lockref and self._lockref()
805 l = self._lockref and self._lockref()
806 if l is not None and l.held:
806 if l is not None and l.held:
807 l.lock()
807 l.lock()
808 return l
808 return l
809
809
810 l = self._lock(self.sjoin("lock"), wait, self.store.write,
810 l = self._lock(self.sjoin("lock"), wait, self.store.write,
811 self.invalidate, _('repository %s') % self.origroot)
811 self.invalidate, _('repository %s') % self.origroot)
812 self._lockref = weakref.ref(l)
812 self._lockref = weakref.ref(l)
813 return l
813 return l
814
814
815 def wlock(self, wait=True):
815 def wlock(self, wait=True):
816 '''Lock the non-store parts of the repository (everything under
816 '''Lock the non-store parts of the repository (everything under
817 .hg except .hg/store) and return a weak reference to the lock.
817 .hg except .hg/store) and return a weak reference to the lock.
818 Use this before modifying files in .hg.'''
818 Use this before modifying files in .hg.'''
819 l = self._wlockref and self._wlockref()
819 l = self._wlockref and self._wlockref()
820 if l is not None and l.held:
820 if l is not None and l.held:
821 l.lock()
821 l.lock()
822 return l
822 return l
823
823
824 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
824 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
825 self.dirstate.invalidate, _('working directory of %s') %
825 self.dirstate.invalidate, _('working directory of %s') %
826 self.origroot)
826 self.origroot)
827 self._wlockref = weakref.ref(l)
827 self._wlockref = weakref.ref(l)
828 return l
828 return l
829
829
830 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
830 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
831 """
831 """
832 commit an individual file as part of a larger transaction
832 commit an individual file as part of a larger transaction
833 """
833 """
834
834
835 fname = fctx.path()
835 fname = fctx.path()
836 text = fctx.data()
836 text = fctx.data()
837 flog = self.file(fname)
837 flog = self.file(fname)
838 fparent1 = manifest1.get(fname, nullid)
838 fparent1 = manifest1.get(fname, nullid)
839 fparent2 = fparent2o = manifest2.get(fname, nullid)
839 fparent2 = fparent2o = manifest2.get(fname, nullid)
840
840
841 meta = {}
841 meta = {}
842 copy = fctx.renamed()
842 copy = fctx.renamed()
843 if copy and copy[0] != fname:
843 if copy and copy[0] != fname:
844 # Mark the new revision of this file as a copy of another
844 # Mark the new revision of this file as a copy of another
845 # file. This copy data will effectively act as a parent
845 # file. This copy data will effectively act as a parent
846 # of this new revision. If this is a merge, the first
846 # of this new revision. If this is a merge, the first
847 # parent will be the nullid (meaning "look up the copy data")
847 # parent will be the nullid (meaning "look up the copy data")
848 # and the second one will be the other parent. For example:
848 # and the second one will be the other parent. For example:
849 #
849 #
850 # 0 --- 1 --- 3 rev1 changes file foo
850 # 0 --- 1 --- 3 rev1 changes file foo
851 # \ / rev2 renames foo to bar and changes it
851 # \ / rev2 renames foo to bar and changes it
852 # \- 2 -/ rev3 should have bar with all changes and
852 # \- 2 -/ rev3 should have bar with all changes and
853 # should record that bar descends from
853 # should record that bar descends from
854 # bar in rev2 and foo in rev1
854 # bar in rev2 and foo in rev1
855 #
855 #
856 # this allows this merge to succeed:
856 # this allows this merge to succeed:
857 #
857 #
858 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
858 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
859 # \ / merging rev3 and rev4 should use bar@rev2
859 # \ / merging rev3 and rev4 should use bar@rev2
860 # \- 2 --- 4 as the merge base
860 # \- 2 --- 4 as the merge base
861 #
861 #
862
862
863 cfname = copy[0]
863 cfname = copy[0]
864 crev = manifest1.get(cfname)
864 crev = manifest1.get(cfname)
865 newfparent = fparent2
865 newfparent = fparent2
866
866
867 if manifest2: # branch merge
867 if manifest2: # branch merge
868 if fparent2 == nullid or crev is None: # copied on remote side
868 if fparent2 == nullid or crev is None: # copied on remote side
869 if cfname in manifest2:
869 if cfname in manifest2:
870 crev = manifest2[cfname]
870 crev = manifest2[cfname]
871 newfparent = fparent1
871 newfparent = fparent1
872
872
873 # find source in nearest ancestor if we've lost track
873 # find source in nearest ancestor if we've lost track
874 if not crev:
874 if not crev:
875 self.ui.debug(" %s: searching for copy revision for %s\n" %
875 self.ui.debug(" %s: searching for copy revision for %s\n" %
876 (fname, cfname))
876 (fname, cfname))
877 for ancestor in self[None].ancestors():
877 for ancestor in self[None].ancestors():
878 if cfname in ancestor:
878 if cfname in ancestor:
879 crev = ancestor[cfname].filenode()
879 crev = ancestor[cfname].filenode()
880 break
880 break
881
881
882 if crev:
882 if crev:
883 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
883 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
884 meta["copy"] = cfname
884 meta["copy"] = cfname
885 meta["copyrev"] = hex(crev)
885 meta["copyrev"] = hex(crev)
886 fparent1, fparent2 = nullid, newfparent
886 fparent1, fparent2 = nullid, newfparent
887 else:
887 else:
888 self.ui.warn(_("warning: can't find ancestor for '%s' "
888 self.ui.warn(_("warning: can't find ancestor for '%s' "
889 "copied from '%s'!\n") % (fname, cfname))
889 "copied from '%s'!\n") % (fname, cfname))
890
890
891 elif fparent2 != nullid:
891 elif fparent2 != nullid:
892 # is one parent an ancestor of the other?
892 # is one parent an ancestor of the other?
893 fparentancestor = flog.ancestor(fparent1, fparent2)
893 fparentancestor = flog.ancestor(fparent1, fparent2)
894 if fparentancestor == fparent1:
894 if fparentancestor == fparent1:
895 fparent1, fparent2 = fparent2, nullid
895 fparent1, fparent2 = fparent2, nullid
896 elif fparentancestor == fparent2:
896 elif fparentancestor == fparent2:
897 fparent2 = nullid
897 fparent2 = nullid
898
898
899 # is the file changed?
899 # is the file changed?
900 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
900 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
901 changelist.append(fname)
901 changelist.append(fname)
902 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
902 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
903
903
904 # are just the flags changed during merge?
904 # are just the flags changed during merge?
905 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
905 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
906 changelist.append(fname)
906 changelist.append(fname)
907
907
908 return fparent1
908 return fparent1
909
909
910 def commit(self, text="", user=None, date=None, match=None, force=False,
910 def commit(self, text="", user=None, date=None, match=None, force=False,
911 editor=False, extra={}):
911 editor=False, extra={}):
912 """Add a new revision to current repository.
912 """Add a new revision to current repository.
913
913
914 Revision information is gathered from the working directory,
914 Revision information is gathered from the working directory,
915 match can be used to filter the committed files. If editor is
915 match can be used to filter the committed files. If editor is
916 supplied, it is called to get a commit message.
916 supplied, it is called to get a commit message.
917 """
917 """
918
918
919 def fail(f, msg):
919 def fail(f, msg):
920 raise util.Abort('%s: %s' % (f, msg))
920 raise util.Abort('%s: %s' % (f, msg))
921
921
922 if not match:
922 if not match:
923 match = matchmod.always(self.root, '')
923 match = matchmod.always(self.root, '')
924
924
925 if not force:
925 if not force:
926 vdirs = []
926 vdirs = []
927 match.dir = vdirs.append
927 match.dir = vdirs.append
928 match.bad = fail
928 match.bad = fail
929
929
930 wlock = self.wlock()
930 wlock = self.wlock()
931 try:
931 try:
932 wctx = self[None]
932 wctx = self[None]
933 merge = len(wctx.parents()) > 1
933 merge = len(wctx.parents()) > 1
934
934
935 if (not force and merge and match and
935 if (not force and merge and match and
936 (match.files() or match.anypats())):
936 (match.files() or match.anypats())):
937 raise util.Abort(_('cannot partially commit a merge '
937 raise util.Abort(_('cannot partially commit a merge '
938 '(do not specify files or patterns)'))
938 '(do not specify files or patterns)'))
939
939
940 changes = self.status(match=match, clean=force)
940 changes = self.status(match=match, clean=force)
941 if force:
941 if force:
942 changes[0].extend(changes[6]) # mq may commit unchanged files
942 changes[0].extend(changes[6]) # mq may commit unchanged files
943
943
944 # check subrepos
944 # check subrepos
945 subs = []
945 subs = []
946 removedsubs = set()
946 removedsubs = set()
947 for p in wctx.parents():
947 for p in wctx.parents():
948 removedsubs.update(s for s in p.substate if match(s))
948 removedsubs.update(s for s in p.substate if match(s))
949 for s in wctx.substate:
949 for s in wctx.substate:
950 removedsubs.discard(s)
950 removedsubs.discard(s)
951 if match(s) and wctx.sub(s).dirty():
951 if match(s) and wctx.sub(s).dirty():
952 subs.append(s)
952 subs.append(s)
953 if (subs or removedsubs):
953 if (subs or removedsubs):
954 if (not match('.hgsub') and
954 if (not match('.hgsub') and
955 '.hgsub' in (wctx.modified() + wctx.added())):
955 '.hgsub' in (wctx.modified() + wctx.added())):
956 raise util.Abort(_("can't commit subrepos without .hgsub"))
956 raise util.Abort(_("can't commit subrepos without .hgsub"))
957 if '.hgsubstate' not in changes[0]:
957 if '.hgsubstate' not in changes[0]:
958 changes[0].insert(0, '.hgsubstate')
958 changes[0].insert(0, '.hgsubstate')
959
959
960 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
960 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
961 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
961 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
962 if changedsubs:
962 if changedsubs:
963 raise util.Abort(_("uncommitted changes in subrepo %s")
963 raise util.Abort(_("uncommitted changes in subrepo %s")
964 % changedsubs[0])
964 % changedsubs[0])
965
965
966 # make sure all explicit patterns are matched
966 # make sure all explicit patterns are matched
967 if not force and match.files():
967 if not force and match.files():
968 matched = set(changes[0] + changes[1] + changes[2])
968 matched = set(changes[0] + changes[1] + changes[2])
969
969
970 for f in match.files():
970 for f in match.files():
971 if f == '.' or f in matched or f in wctx.substate:
971 if f == '.' or f in matched or f in wctx.substate:
972 continue
972 continue
973 if f in changes[3]: # missing
973 if f in changes[3]: # missing
974 fail(f, _('file not found!'))
974 fail(f, _('file not found!'))
975 if f in vdirs: # visited directory
975 if f in vdirs: # visited directory
976 d = f + '/'
976 d = f + '/'
977 for mf in matched:
977 for mf in matched:
978 if mf.startswith(d):
978 if mf.startswith(d):
979 break
979 break
980 else:
980 else:
981 fail(f, _("no match under directory!"))
981 fail(f, _("no match under directory!"))
982 elif f not in self.dirstate:
982 elif f not in self.dirstate:
983 fail(f, _("file not tracked!"))
983 fail(f, _("file not tracked!"))
984
984
985 if (not force and not extra.get("close") and not merge
985 if (not force and not extra.get("close") and not merge
986 and not (changes[0] or changes[1] or changes[2])
986 and not (changes[0] or changes[1] or changes[2])
987 and wctx.branch() == wctx.p1().branch()):
987 and wctx.branch() == wctx.p1().branch()):
988 return None
988 return None
989
989
990 ms = mergemod.mergestate(self)
990 ms = mergemod.mergestate(self)
991 for f in changes[0]:
991 for f in changes[0]:
992 if f in ms and ms[f] == 'u':
992 if f in ms and ms[f] == 'u':
993 raise util.Abort(_("unresolved merge conflicts "
993 raise util.Abort(_("unresolved merge conflicts "
994 "(see hg help resolve)"))
994 "(see hg help resolve)"))
995
995
996 cctx = context.workingctx(self, text, user, date, extra, changes)
996 cctx = context.workingctx(self, text, user, date, extra, changes)
997 if editor:
997 if editor:
998 cctx._text = editor(self, cctx, subs)
998 cctx._text = editor(self, cctx, subs)
999 edited = (text != cctx._text)
999 edited = (text != cctx._text)
1000
1000
1001 # commit subs
1001 # commit subs
1002 if subs or removedsubs:
1002 if subs or removedsubs:
1003 state = wctx.substate.copy()
1003 state = wctx.substate.copy()
1004 for s in sorted(subs):
1004 for s in sorted(subs):
1005 sub = wctx.sub(s)
1005 sub = wctx.sub(s)
1006 self.ui.status(_('committing subrepository %s\n') %
1006 self.ui.status(_('committing subrepository %s\n') %
1007 subrepo.subrelpath(sub))
1007 subrepo.subrelpath(sub))
1008 sr = sub.commit(cctx._text, user, date)
1008 sr = sub.commit(cctx._text, user, date)
1009 state[s] = (state[s][0], sr)
1009 state[s] = (state[s][0], sr)
1010 subrepo.writestate(self, state)
1010 subrepo.writestate(self, state)
1011
1011
1012 # Save commit message in case this transaction gets rolled back
1012 # Save commit message in case this transaction gets rolled back
1013 # (e.g. by a pretxncommit hook). Leave the content alone on
1013 # (e.g. by a pretxncommit hook). Leave the content alone on
1014 # the assumption that the user will use the same editor again.
1014 # the assumption that the user will use the same editor again.
1015 msgfile = self.opener('last-message.txt', 'wb')
1015 msgfile = self.opener('last-message.txt', 'wb')
1016 msgfile.write(cctx._text)
1016 msgfile.write(cctx._text)
1017 msgfile.close()
1017 msgfile.close()
1018
1018
1019 p1, p2 = self.dirstate.parents()
1019 p1, p2 = self.dirstate.parents()
1020 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1020 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1021 try:
1021 try:
1022 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1022 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1023 ret = self.commitctx(cctx, True)
1023 ret = self.commitctx(cctx, True)
1024 except:
1024 except:
1025 if edited:
1025 if edited:
1026 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1026 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1027 self.ui.write(
1027 self.ui.write(
1028 _('note: commit message saved in %s\n') % msgfn)
1028 _('note: commit message saved in %s\n') % msgfn)
1029 raise
1029 raise
1030
1030
1031 # update bookmarks, dirstate and mergestate
1031 # update bookmarks, dirstate and mergestate
1032 bookmarks.update(self, p1, ret)
1032 bookmarks.update(self, p1, ret)
1033 for f in changes[0] + changes[1]:
1033 for f in changes[0] + changes[1]:
1034 self.dirstate.normal(f)
1034 self.dirstate.normal(f)
1035 for f in changes[2]:
1035 for f in changes[2]:
1036 self.dirstate.drop(f)
1036 self.dirstate.drop(f)
1037 self.dirstate.setparents(ret)
1037 self.dirstate.setparents(ret)
1038 ms.reset()
1038 ms.reset()
1039 finally:
1039 finally:
1040 wlock.release()
1040 wlock.release()
1041
1041
1042 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1042 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1043 return ret
1043 return ret
1044
1044
1045 def commitctx(self, ctx, error=False):
1045 def commitctx(self, ctx, error=False):
1046 """Add a new revision to current repository.
1046 """Add a new revision to current repository.
1047 Revision information is passed via the context argument.
1047 Revision information is passed via the context argument.
1048 """
1048 """
1049
1049
1050 tr = lock = None
1050 tr = lock = None
1051 removed = list(ctx.removed())
1051 removed = list(ctx.removed())
1052 p1, p2 = ctx.p1(), ctx.p2()
1052 p1, p2 = ctx.p1(), ctx.p2()
1053 user = ctx.user()
1053 user = ctx.user()
1054
1054
1055 lock = self.lock()
1055 lock = self.lock()
1056 try:
1056 try:
1057 tr = self.transaction("commit")
1057 tr = self.transaction("commit")
1058 trp = weakref.proxy(tr)
1058 trp = weakref.proxy(tr)
1059
1059
1060 if ctx.files():
1060 if ctx.files():
1061 m1 = p1.manifest().copy()
1061 m1 = p1.manifest().copy()
1062 m2 = p2.manifest()
1062 m2 = p2.manifest()
1063
1063
1064 # check in files
1064 # check in files
1065 new = {}
1065 new = {}
1066 changed = []
1066 changed = []
1067 linkrev = len(self)
1067 linkrev = len(self)
1068 for f in sorted(ctx.modified() + ctx.added()):
1068 for f in sorted(ctx.modified() + ctx.added()):
1069 self.ui.note(f + "\n")
1069 self.ui.note(f + "\n")
1070 try:
1070 try:
1071 fctx = ctx[f]
1071 fctx = ctx[f]
1072 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1072 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1073 changed)
1073 changed)
1074 m1.set(f, fctx.flags())
1074 m1.set(f, fctx.flags())
1075 except OSError, inst:
1075 except OSError, inst:
1076 self.ui.warn(_("trouble committing %s!\n") % f)
1076 self.ui.warn(_("trouble committing %s!\n") % f)
1077 raise
1077 raise
1078 except IOError, inst:
1078 except IOError, inst:
1079 errcode = getattr(inst, 'errno', errno.ENOENT)
1079 errcode = getattr(inst, 'errno', errno.ENOENT)
1080 if error or errcode and errcode != errno.ENOENT:
1080 if error or errcode and errcode != errno.ENOENT:
1081 self.ui.warn(_("trouble committing %s!\n") % f)
1081 self.ui.warn(_("trouble committing %s!\n") % f)
1082 raise
1082 raise
1083 else:
1083 else:
1084 removed.append(f)
1084 removed.append(f)
1085
1085
1086 # update manifest
1086 # update manifest
1087 m1.update(new)
1087 m1.update(new)
1088 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1088 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1089 drop = [f for f in removed if f in m1]
1089 drop = [f for f in removed if f in m1]
1090 for f in drop:
1090 for f in drop:
1091 del m1[f]
1091 del m1[f]
1092 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1092 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1093 p2.manifestnode(), (new, drop))
1093 p2.manifestnode(), (new, drop))
1094 files = changed + removed
1094 files = changed + removed
1095 else:
1095 else:
1096 mn = p1.manifestnode()
1096 mn = p1.manifestnode()
1097 files = []
1097 files = []
1098
1098
1099 # update changelog
1099 # update changelog
1100 self.changelog.delayupdate()
1100 self.changelog.delayupdate()
1101 n = self.changelog.add(mn, files, ctx.description(),
1101 n = self.changelog.add(mn, files, ctx.description(),
1102 trp, p1.node(), p2.node(),
1102 trp, p1.node(), p2.node(),
1103 user, ctx.date(), ctx.extra().copy())
1103 user, ctx.date(), ctx.extra().copy())
1104 p = lambda: self.changelog.writepending() and self.root or ""
1104 p = lambda: self.changelog.writepending() and self.root or ""
1105 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1105 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1106 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1106 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1107 parent2=xp2, pending=p)
1107 parent2=xp2, pending=p)
1108 self.changelog.finalize(trp)
1108 self.changelog.finalize(trp)
1109 tr.close()
1109 tr.close()
1110
1110
1111 if self._branchcache:
1111 if self._branchcache:
1112 self.updatebranchcache()
1112 self.updatebranchcache()
1113 return n
1113 return n
1114 finally:
1114 finally:
1115 if tr:
1115 if tr:
1116 tr.release()
1116 tr.release()
1117 lock.release()
1117 lock.release()
1118
1118
1119 def destroyed(self):
1119 def destroyed(self):
1120 '''Inform the repository that nodes have been destroyed.
1120 '''Inform the repository that nodes have been destroyed.
1121 Intended for use by strip and rollback, so there's a common
1121 Intended for use by strip and rollback, so there's a common
1122 place for anything that has to be done after destroying history.'''
1122 place for anything that has to be done after destroying history.'''
1123 # XXX it might be nice if we could take the list of destroyed
1123 # XXX it might be nice if we could take the list of destroyed
1124 # nodes, but I don't see an easy way for rollback() to do that
1124 # nodes, but I don't see an easy way for rollback() to do that
1125
1125
1126 # Ensure the persistent tag cache is updated. Doing it now
1126 # Ensure the persistent tag cache is updated. Doing it now
1127 # means that the tag cache only has to worry about destroyed
1127 # means that the tag cache only has to worry about destroyed
1128 # heads immediately after a strip/rollback. That in turn
1128 # heads immediately after a strip/rollback. That in turn
1129 # guarantees that "cachetip == currenttip" (comparing both rev
1129 # guarantees that "cachetip == currenttip" (comparing both rev
1130 # and node) always means no nodes have been added or destroyed.
1130 # and node) always means no nodes have been added or destroyed.
1131
1131
1132 # XXX this is suboptimal when qrefresh'ing: we strip the current
1132 # XXX this is suboptimal when qrefresh'ing: we strip the current
1133 # head, refresh the tag cache, then immediately add a new head.
1133 # head, refresh the tag cache, then immediately add a new head.
1134 # But I think doing it this way is necessary for the "instant
1134 # But I think doing it this way is necessary for the "instant
1135 # tag cache retrieval" case to work.
1135 # tag cache retrieval" case to work.
1136 self.invalidatecaches()
1136 self.invalidatecaches()
1137
1137
1138 def walk(self, match, node=None):
1138 def walk(self, match, node=None):
1139 '''
1139 '''
1140 walk recursively through the directory tree or a given
1140 walk recursively through the directory tree or a given
1141 changeset, finding all files matched by the match
1141 changeset, finding all files matched by the match
1142 function
1142 function
1143 '''
1143 '''
1144 return self[node].walk(match)
1144 return self[node].walk(match)
1145
1145
1146 def status(self, node1='.', node2=None, match=None,
1146 def status(self, node1='.', node2=None, match=None,
1147 ignored=False, clean=False, unknown=False,
1147 ignored=False, clean=False, unknown=False,
1148 listsubrepos=False):
1148 listsubrepos=False):
1149 """return status of files between two nodes or node and working directory
1149 """return status of files between two nodes or node and working directory
1150
1150
1151 If node1 is None, use the first dirstate parent instead.
1151 If node1 is None, use the first dirstate parent instead.
1152 If node2 is None, compare node1 with working directory.
1152 If node2 is None, compare node1 with working directory.
1153 """
1153 """
1154
1154
1155 def mfmatches(ctx):
1155 def mfmatches(ctx):
1156 mf = ctx.manifest().copy()
1156 mf = ctx.manifest().copy()
1157 for fn in mf.keys():
1157 for fn in mf.keys():
1158 if not match(fn):
1158 if not match(fn):
1159 del mf[fn]
1159 del mf[fn]
1160 return mf
1160 return mf
1161
1161
1162 if isinstance(node1, context.changectx):
1162 if isinstance(node1, context.changectx):
1163 ctx1 = node1
1163 ctx1 = node1
1164 else:
1164 else:
1165 ctx1 = self[node1]
1165 ctx1 = self[node1]
1166 if isinstance(node2, context.changectx):
1166 if isinstance(node2, context.changectx):
1167 ctx2 = node2
1167 ctx2 = node2
1168 else:
1168 else:
1169 ctx2 = self[node2]
1169 ctx2 = self[node2]
1170
1170
1171 working = ctx2.rev() is None
1171 working = ctx2.rev() is None
1172 parentworking = working and ctx1 == self['.']
1172 parentworking = working and ctx1 == self['.']
1173 match = match or matchmod.always(self.root, self.getcwd())
1173 match = match or matchmod.always(self.root, self.getcwd())
1174 listignored, listclean, listunknown = ignored, clean, unknown
1174 listignored, listclean, listunknown = ignored, clean, unknown
1175
1175
1176 # load earliest manifest first for caching reasons
1176 # load earliest manifest first for caching reasons
1177 if not working and ctx2.rev() < ctx1.rev():
1177 if not working and ctx2.rev() < ctx1.rev():
1178 ctx2.manifest()
1178 ctx2.manifest()
1179
1179
1180 if not parentworking:
1180 if not parentworking:
1181 def bad(f, msg):
1181 def bad(f, msg):
1182 if f not in ctx1:
1182 if f not in ctx1:
1183 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1183 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1184 match.bad = bad
1184 match.bad = bad
1185
1185
1186 if working: # we need to scan the working dir
1186 if working: # we need to scan the working dir
1187 subrepos = []
1187 subrepos = []
1188 if '.hgsub' in self.dirstate:
1188 if '.hgsub' in self.dirstate:
1189 subrepos = ctx1.substate.keys()
1189 subrepos = ctx1.substate.keys()
1190 s = self.dirstate.status(match, subrepos, listignored,
1190 s = self.dirstate.status(match, subrepos, listignored,
1191 listclean, listunknown)
1191 listclean, listunknown)
1192 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1192 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1193
1193
1194 # check for any possibly clean files
1194 # check for any possibly clean files
1195 if parentworking and cmp:
1195 if parentworking and cmp:
1196 fixup = []
1196 fixup = []
1197 # do a full compare of any files that might have changed
1197 # do a full compare of any files that might have changed
1198 for f in sorted(cmp):
1198 for f in sorted(cmp):
1199 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1199 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1200 or ctx1[f].cmp(ctx2[f])):
1200 or ctx1[f].cmp(ctx2[f])):
1201 modified.append(f)
1201 modified.append(f)
1202 else:
1202 else:
1203 fixup.append(f)
1203 fixup.append(f)
1204
1204
1205 # update dirstate for files that are actually clean
1205 # update dirstate for files that are actually clean
1206 if fixup:
1206 if fixup:
1207 if listclean:
1207 if listclean:
1208 clean += fixup
1208 clean += fixup
1209
1209
1210 try:
1210 try:
1211 # updating the dirstate is optional
1211 # updating the dirstate is optional
1212 # so we don't wait on the lock
1212 # so we don't wait on the lock
1213 wlock = self.wlock(False)
1213 wlock = self.wlock(False)
1214 try:
1214 try:
1215 for f in fixup:
1215 for f in fixup:
1216 self.dirstate.normal(f)
1216 self.dirstate.normal(f)
1217 finally:
1217 finally:
1218 wlock.release()
1218 wlock.release()
1219 except error.LockError:
1219 except error.LockError:
1220 pass
1220 pass
1221
1221
1222 if not parentworking:
1222 if not parentworking:
1223 mf1 = mfmatches(ctx1)
1223 mf1 = mfmatches(ctx1)
1224 if working:
1224 if working:
1225 # we are comparing working dir against non-parent
1225 # we are comparing working dir against non-parent
1226 # generate a pseudo-manifest for the working dir
1226 # generate a pseudo-manifest for the working dir
1227 mf2 = mfmatches(self['.'])
1227 mf2 = mfmatches(self['.'])
1228 for f in cmp + modified + added:
1228 for f in cmp + modified + added:
1229 mf2[f] = None
1229 mf2[f] = None
1230 mf2.set(f, ctx2.flags(f))
1230 mf2.set(f, ctx2.flags(f))
1231 for f in removed:
1231 for f in removed:
1232 if f in mf2:
1232 if f in mf2:
1233 del mf2[f]
1233 del mf2[f]
1234 else:
1234 else:
1235 # we are comparing two revisions
1235 # we are comparing two revisions
1236 deleted, unknown, ignored = [], [], []
1236 deleted, unknown, ignored = [], [], []
1237 mf2 = mfmatches(ctx2)
1237 mf2 = mfmatches(ctx2)
1238
1238
1239 modified, added, clean = [], [], []
1239 modified, added, clean = [], [], []
1240 for fn in mf2:
1240 for fn in mf2:
1241 if fn in mf1:
1241 if fn in mf1:
1242 if (fn not in deleted and
1242 if (fn not in deleted and
1243 (mf1.flags(fn) != mf2.flags(fn) or
1243 (mf1.flags(fn) != mf2.flags(fn) or
1244 (mf1[fn] != mf2[fn] and
1244 (mf1[fn] != mf2[fn] and
1245 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1245 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1246 modified.append(fn)
1246 modified.append(fn)
1247 elif listclean:
1247 elif listclean:
1248 clean.append(fn)
1248 clean.append(fn)
1249 del mf1[fn]
1249 del mf1[fn]
1250 elif fn not in deleted:
1250 elif fn not in deleted:
1251 added.append(fn)
1251 added.append(fn)
1252 removed = mf1.keys()
1252 removed = mf1.keys()
1253
1253
1254 r = modified, added, removed, deleted, unknown, ignored, clean
1254 r = modified, added, removed, deleted, unknown, ignored, clean
1255
1255
1256 if listsubrepos:
1256 if listsubrepos:
1257 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1257 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1258 if working:
1258 if working:
1259 rev2 = None
1259 rev2 = None
1260 else:
1260 else:
1261 rev2 = ctx2.substate[subpath][1]
1261 rev2 = ctx2.substate[subpath][1]
1262 try:
1262 try:
1263 submatch = matchmod.narrowmatcher(subpath, match)
1263 submatch = matchmod.narrowmatcher(subpath, match)
1264 s = sub.status(rev2, match=submatch, ignored=listignored,
1264 s = sub.status(rev2, match=submatch, ignored=listignored,
1265 clean=listclean, unknown=listunknown,
1265 clean=listclean, unknown=listunknown,
1266 listsubrepos=True)
1266 listsubrepos=True)
1267 for rfiles, sfiles in zip(r, s):
1267 for rfiles, sfiles in zip(r, s):
1268 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1268 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1269 except error.LookupError:
1269 except error.LookupError:
1270 self.ui.status(_("skipping missing subrepository: %s\n")
1270 self.ui.status(_("skipping missing subrepository: %s\n")
1271 % subpath)
1271 % subpath)
1272
1272
1273 for l in r:
1273 for l in r:
1274 l.sort()
1274 l.sort()
1275 return r
1275 return r
1276
1276
1277 def heads(self, start=None):
1277 def heads(self, start=None):
1278 heads = self.changelog.heads(start)
1278 heads = self.changelog.heads(start)
1279 # sort the output in rev descending order
1279 # sort the output in rev descending order
1280 return sorted(heads, key=self.changelog.rev, reverse=True)
1280 return sorted(heads, key=self.changelog.rev, reverse=True)
1281
1281
1282 def branchheads(self, branch=None, start=None, closed=False):
1282 def branchheads(self, branch=None, start=None, closed=False):
1283 '''return a (possibly filtered) list of heads for the given branch
1283 '''return a (possibly filtered) list of heads for the given branch
1284
1284
1285 Heads are returned in topological order, from newest to oldest.
1285 Heads are returned in topological order, from newest to oldest.
1286 If branch is None, use the dirstate branch.
1286 If branch is None, use the dirstate branch.
1287 If start is not None, return only heads reachable from start.
1287 If start is not None, return only heads reachable from start.
1288 If closed is True, return heads that are marked as closed as well.
1288 If closed is True, return heads that are marked as closed as well.
1289 '''
1289 '''
1290 if branch is None:
1290 if branch is None:
1291 branch = self[None].branch()
1291 branch = self[None].branch()
1292 branches = self.branchmap()
1292 branches = self.branchmap()
1293 if branch not in branches:
1293 if branch not in branches:
1294 return []
1294 return []
1295 # the cache returns heads ordered lowest to highest
1295 # the cache returns heads ordered lowest to highest
1296 bheads = list(reversed(branches[branch]))
1296 bheads = list(reversed(branches[branch]))
1297 if start is not None:
1297 if start is not None:
1298 # filter out the heads that cannot be reached from startrev
1298 # filter out the heads that cannot be reached from startrev
1299 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1299 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1300 bheads = [h for h in bheads if h in fbheads]
1300 bheads = [h for h in bheads if h in fbheads]
1301 if not closed:
1301 if not closed:
1302 bheads = [h for h in bheads if
1302 bheads = [h for h in bheads if
1303 ('close' not in self.changelog.read(h)[5])]
1303 ('close' not in self.changelog.read(h)[5])]
1304 return bheads
1304 return bheads
1305
1305
1306 def branches(self, nodes):
1306 def branches(self, nodes):
1307 if not nodes:
1307 if not nodes:
1308 nodes = [self.changelog.tip()]
1308 nodes = [self.changelog.tip()]
1309 b = []
1309 b = []
1310 for n in nodes:
1310 for n in nodes:
1311 t = n
1311 t = n
1312 while 1:
1312 while True:
1313 p = self.changelog.parents(n)
1313 p = self.changelog.parents(n)
1314 if p[1] != nullid or p[0] == nullid:
1314 if p[1] != nullid or p[0] == nullid:
1315 b.append((t, n, p[0], p[1]))
1315 b.append((t, n, p[0], p[1]))
1316 break
1316 break
1317 n = p[0]
1317 n = p[0]
1318 return b
1318 return b
1319
1319
1320 def between(self, pairs):
1320 def between(self, pairs):
1321 r = []
1321 r = []
1322
1322
1323 for top, bottom in pairs:
1323 for top, bottom in pairs:
1324 n, l, i = top, [], 0
1324 n, l, i = top, [], 0
1325 f = 1
1325 f = 1
1326
1326
1327 while n != bottom and n != nullid:
1327 while n != bottom and n != nullid:
1328 p = self.changelog.parents(n)[0]
1328 p = self.changelog.parents(n)[0]
1329 if i == f:
1329 if i == f:
1330 l.append(n)
1330 l.append(n)
1331 f = f * 2
1331 f = f * 2
1332 n = p
1332 n = p
1333 i += 1
1333 i += 1
1334
1334
1335 r.append(l)
1335 r.append(l)
1336
1336
1337 return r
1337 return r
1338
1338
1339 def pull(self, remote, heads=None, force=False):
1339 def pull(self, remote, heads=None, force=False):
1340 lock = self.lock()
1340 lock = self.lock()
1341 try:
1341 try:
1342 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1342 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1343 force=force)
1343 force=force)
1344 common, fetch, rheads = tmp
1344 common, fetch, rheads = tmp
1345 if not fetch:
1345 if not fetch:
1346 self.ui.status(_("no changes found\n"))
1346 self.ui.status(_("no changes found\n"))
1347 result = 0
1347 result = 0
1348 else:
1348 else:
1349 if heads is None and list(common) == [nullid]:
1349 if heads is None and list(common) == [nullid]:
1350 self.ui.status(_("requesting all changes\n"))
1350 self.ui.status(_("requesting all changes\n"))
1351 elif heads is None and remote.capable('changegroupsubset'):
1351 elif heads is None and remote.capable('changegroupsubset'):
1352 # issue1320, avoid a race if remote changed after discovery
1352 # issue1320, avoid a race if remote changed after discovery
1353 heads = rheads
1353 heads = rheads
1354
1354
1355 if remote.capable('getbundle'):
1355 if remote.capable('getbundle'):
1356 cg = remote.getbundle('pull', common=common,
1356 cg = remote.getbundle('pull', common=common,
1357 heads=heads or rheads)
1357 heads=heads or rheads)
1358 elif heads is None:
1358 elif heads is None:
1359 cg = remote.changegroup(fetch, 'pull')
1359 cg = remote.changegroup(fetch, 'pull')
1360 elif not remote.capable('changegroupsubset'):
1360 elif not remote.capable('changegroupsubset'):
1361 raise util.Abort(_("partial pull cannot be done because "
1361 raise util.Abort(_("partial pull cannot be done because "
1362 "other repository doesn't support "
1362 "other repository doesn't support "
1363 "changegroupsubset."))
1363 "changegroupsubset."))
1364 else:
1364 else:
1365 cg = remote.changegroupsubset(fetch, heads, 'pull')
1365 cg = remote.changegroupsubset(fetch, heads, 'pull')
1366 result = self.addchangegroup(cg, 'pull', remote.url(),
1366 result = self.addchangegroup(cg, 'pull', remote.url(),
1367 lock=lock)
1367 lock=lock)
1368 finally:
1368 finally:
1369 lock.release()
1369 lock.release()
1370
1370
1371 return result
1371 return result
1372
1372
1373 def checkpush(self, force, revs):
1373 def checkpush(self, force, revs):
1374 """Extensions can override this function if additional checks have
1374 """Extensions can override this function if additional checks have
1375 to be performed before pushing, or call it if they override push
1375 to be performed before pushing, or call it if they override push
1376 command.
1376 command.
1377 """
1377 """
1378 pass
1378 pass
1379
1379
1380 def push(self, remote, force=False, revs=None, newbranch=False):
1380 def push(self, remote, force=False, revs=None, newbranch=False):
1381 '''Push outgoing changesets (limited by revs) from the current
1381 '''Push outgoing changesets (limited by revs) from the current
1382 repository to remote. Return an integer:
1382 repository to remote. Return an integer:
1383 - 0 means HTTP error *or* nothing to push
1383 - 0 means HTTP error *or* nothing to push
1384 - 1 means we pushed and remote head count is unchanged *or*
1384 - 1 means we pushed and remote head count is unchanged *or*
1385 we have outgoing changesets but refused to push
1385 we have outgoing changesets but refused to push
1386 - other values as described by addchangegroup()
1386 - other values as described by addchangegroup()
1387 '''
1387 '''
1388 # there are two ways to push to remote repo:
1388 # there are two ways to push to remote repo:
1389 #
1389 #
1390 # addchangegroup assumes local user can lock remote
1390 # addchangegroup assumes local user can lock remote
1391 # repo (local filesystem, old ssh servers).
1391 # repo (local filesystem, old ssh servers).
1392 #
1392 #
1393 # unbundle assumes local user cannot lock remote repo (new ssh
1393 # unbundle assumes local user cannot lock remote repo (new ssh
1394 # servers, http servers).
1394 # servers, http servers).
1395
1395
1396 self.checkpush(force, revs)
1396 self.checkpush(force, revs)
1397 lock = None
1397 lock = None
1398 unbundle = remote.capable('unbundle')
1398 unbundle = remote.capable('unbundle')
1399 if not unbundle:
1399 if not unbundle:
1400 lock = remote.lock()
1400 lock = remote.lock()
1401 try:
1401 try:
1402 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1402 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1403 newbranch)
1403 newbranch)
1404 ret = remote_heads
1404 ret = remote_heads
1405 if cg is not None:
1405 if cg is not None:
1406 if unbundle:
1406 if unbundle:
1407 # local repo finds heads on server, finds out what
1407 # local repo finds heads on server, finds out what
1408 # revs it must push. once revs transferred, if server
1408 # revs it must push. once revs transferred, if server
1409 # finds it has different heads (someone else won
1409 # finds it has different heads (someone else won
1410 # commit/push race), server aborts.
1410 # commit/push race), server aborts.
1411 if force:
1411 if force:
1412 remote_heads = ['force']
1412 remote_heads = ['force']
1413 # ssh: return remote's addchangegroup()
1413 # ssh: return remote's addchangegroup()
1414 # http: return remote's addchangegroup() or 0 for error
1414 # http: return remote's addchangegroup() or 0 for error
1415 ret = remote.unbundle(cg, remote_heads, 'push')
1415 ret = remote.unbundle(cg, remote_heads, 'push')
1416 else:
1416 else:
1417 # we return an integer indicating remote head count change
1417 # we return an integer indicating remote head count change
1418 ret = remote.addchangegroup(cg, 'push', self.url(),
1418 ret = remote.addchangegroup(cg, 'push', self.url(),
1419 lock=lock)
1419 lock=lock)
1420 finally:
1420 finally:
1421 if lock is not None:
1421 if lock is not None:
1422 lock.release()
1422 lock.release()
1423
1423
1424 self.ui.debug("checking for updated bookmarks\n")
1424 self.ui.debug("checking for updated bookmarks\n")
1425 rb = remote.listkeys('bookmarks')
1425 rb = remote.listkeys('bookmarks')
1426 for k in rb.keys():
1426 for k in rb.keys():
1427 if k in self._bookmarks:
1427 if k in self._bookmarks:
1428 nr, nl = rb[k], hex(self._bookmarks[k])
1428 nr, nl = rb[k], hex(self._bookmarks[k])
1429 if nr in self:
1429 if nr in self:
1430 cr = self[nr]
1430 cr = self[nr]
1431 cl = self[nl]
1431 cl = self[nl]
1432 if cl in cr.descendants():
1432 if cl in cr.descendants():
1433 r = remote.pushkey('bookmarks', k, nr, nl)
1433 r = remote.pushkey('bookmarks', k, nr, nl)
1434 if r:
1434 if r:
1435 self.ui.status(_("updating bookmark %s\n") % k)
1435 self.ui.status(_("updating bookmark %s\n") % k)
1436 else:
1436 else:
1437 self.ui.warn(_('updating bookmark %s'
1437 self.ui.warn(_('updating bookmark %s'
1438 ' failed!\n') % k)
1438 ' failed!\n') % k)
1439
1439
1440 return ret
1440 return ret
1441
1441
1442 def changegroupinfo(self, nodes, source):
1442 def changegroupinfo(self, nodes, source):
1443 if self.ui.verbose or source == 'bundle':
1443 if self.ui.verbose or source == 'bundle':
1444 self.ui.status(_("%d changesets found\n") % len(nodes))
1444 self.ui.status(_("%d changesets found\n") % len(nodes))
1445 if self.ui.debugflag:
1445 if self.ui.debugflag:
1446 self.ui.debug("list of changesets:\n")
1446 self.ui.debug("list of changesets:\n")
1447 for node in nodes:
1447 for node in nodes:
1448 self.ui.debug("%s\n" % hex(node))
1448 self.ui.debug("%s\n" % hex(node))
1449
1449
1450 def changegroupsubset(self, bases, heads, source):
1450 def changegroupsubset(self, bases, heads, source):
1451 """Compute a changegroup consisting of all the nodes that are
1451 """Compute a changegroup consisting of all the nodes that are
1452 descendents of any of the bases and ancestors of any of the heads.
1452 descendents of any of the bases and ancestors of any of the heads.
1453 Return a chunkbuffer object whose read() method will return
1453 Return a chunkbuffer object whose read() method will return
1454 successive changegroup chunks.
1454 successive changegroup chunks.
1455
1455
1456 It is fairly complex as determining which filenodes and which
1456 It is fairly complex as determining which filenodes and which
1457 manifest nodes need to be included for the changeset to be complete
1457 manifest nodes need to be included for the changeset to be complete
1458 is non-trivial.
1458 is non-trivial.
1459
1459
1460 Another wrinkle is doing the reverse, figuring out which changeset in
1460 Another wrinkle is doing the reverse, figuring out which changeset in
1461 the changegroup a particular filenode or manifestnode belongs to.
1461 the changegroup a particular filenode or manifestnode belongs to.
1462 """
1462 """
1463 cl = self.changelog
1463 cl = self.changelog
1464 if not bases:
1464 if not bases:
1465 bases = [nullid]
1465 bases = [nullid]
1466 csets, bases, heads = cl.nodesbetween(bases, heads)
1466 csets, bases, heads = cl.nodesbetween(bases, heads)
1467 # We assume that all ancestors of bases are known
1467 # We assume that all ancestors of bases are known
1468 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1468 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1469 return self._changegroupsubset(common, csets, heads, source)
1469 return self._changegroupsubset(common, csets, heads, source)
1470
1470
1471 def getbundle(self, source, heads=None, common=None):
1471 def getbundle(self, source, heads=None, common=None):
1472 """Like changegroupsubset, but returns the set difference between the
1472 """Like changegroupsubset, but returns the set difference between the
1473 ancestors of heads and the ancestors common.
1473 ancestors of heads and the ancestors common.
1474
1474
1475 If heads is None, use the local heads. If common is None, use [nullid].
1475 If heads is None, use the local heads. If common is None, use [nullid].
1476
1476
1477 The nodes in common might not all be known locally due to the way the
1477 The nodes in common might not all be known locally due to the way the
1478 current discovery protocol works.
1478 current discovery protocol works.
1479 """
1479 """
1480 cl = self.changelog
1480 cl = self.changelog
1481 if common:
1481 if common:
1482 nm = cl.nodemap
1482 nm = cl.nodemap
1483 common = [n for n in common if n in nm]
1483 common = [n for n in common if n in nm]
1484 else:
1484 else:
1485 common = [nullid]
1485 common = [nullid]
1486 if not heads:
1486 if not heads:
1487 heads = cl.heads()
1487 heads = cl.heads()
1488 common, missing = cl.findcommonmissing(common, heads)
1488 common, missing = cl.findcommonmissing(common, heads)
1489 if not missing:
1489 if not missing:
1490 return None
1490 return None
1491 return self._changegroupsubset(common, missing, heads, source)
1491 return self._changegroupsubset(common, missing, heads, source)
1492
1492
1493 def _changegroupsubset(self, commonrevs, csets, heads, source):
1493 def _changegroupsubset(self, commonrevs, csets, heads, source):
1494
1494
1495 cl = self.changelog
1495 cl = self.changelog
1496 mf = self.manifest
1496 mf = self.manifest
1497 mfs = {} # needed manifests
1497 mfs = {} # needed manifests
1498 fnodes = {} # needed file nodes
1498 fnodes = {} # needed file nodes
1499 changedfiles = set()
1499 changedfiles = set()
1500 fstate = ['', {}]
1500 fstate = ['', {}]
1501 count = [0]
1501 count = [0]
1502
1502
1503 # can we go through the fast path ?
1503 # can we go through the fast path ?
1504 heads.sort()
1504 heads.sort()
1505 if heads == sorted(self.heads()):
1505 if heads == sorted(self.heads()):
1506 return self._changegroup(csets, source)
1506 return self._changegroup(csets, source)
1507
1507
1508 # slow path
1508 # slow path
1509 self.hook('preoutgoing', throw=True, source=source)
1509 self.hook('preoutgoing', throw=True, source=source)
1510 self.changegroupinfo(csets, source)
1510 self.changegroupinfo(csets, source)
1511
1511
1512 # filter any nodes that claim to be part of the known set
1512 # filter any nodes that claim to be part of the known set
1513 def prune(revlog, missing):
1513 def prune(revlog, missing):
1514 for n in missing:
1514 for n in missing:
1515 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1515 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1516 yield n
1516 yield n
1517
1517
1518 def lookup(revlog, x):
1518 def lookup(revlog, x):
1519 if revlog == cl:
1519 if revlog == cl:
1520 c = cl.read(x)
1520 c = cl.read(x)
1521 changedfiles.update(c[3])
1521 changedfiles.update(c[3])
1522 mfs.setdefault(c[0], x)
1522 mfs.setdefault(c[0], x)
1523 count[0] += 1
1523 count[0] += 1
1524 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1524 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1525 return x
1525 return x
1526 elif revlog == mf:
1526 elif revlog == mf:
1527 clnode = mfs[x]
1527 clnode = mfs[x]
1528 mdata = mf.readfast(x)
1528 mdata = mf.readfast(x)
1529 for f in changedfiles:
1529 for f in changedfiles:
1530 if f in mdata:
1530 if f in mdata:
1531 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1531 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1532 count[0] += 1
1532 count[0] += 1
1533 self.ui.progress(_('bundling'), count[0],
1533 self.ui.progress(_('bundling'), count[0],
1534 unit=_('manifests'), total=len(mfs))
1534 unit=_('manifests'), total=len(mfs))
1535 return mfs[x]
1535 return mfs[x]
1536 else:
1536 else:
1537 self.ui.progress(
1537 self.ui.progress(
1538 _('bundling'), count[0], item=fstate[0],
1538 _('bundling'), count[0], item=fstate[0],
1539 unit=_('files'), total=len(changedfiles))
1539 unit=_('files'), total=len(changedfiles))
1540 return fstate[1][x]
1540 return fstate[1][x]
1541
1541
1542 bundler = changegroup.bundle10(lookup)
1542 bundler = changegroup.bundle10(lookup)
1543 reorder = self.ui.config('bundle', 'reorder', 'auto')
1543 reorder = self.ui.config('bundle', 'reorder', 'auto')
1544 if reorder == 'auto':
1544 if reorder == 'auto':
1545 reorder = None
1545 reorder = None
1546 else:
1546 else:
1547 reorder = util.parsebool(reorder)
1547 reorder = util.parsebool(reorder)
1548
1548
1549 def gengroup():
1549 def gengroup():
1550 # Create a changenode group generator that will call our functions
1550 # Create a changenode group generator that will call our functions
1551 # back to lookup the owning changenode and collect information.
1551 # back to lookup the owning changenode and collect information.
1552 for chunk in cl.group(csets, bundler, reorder=reorder):
1552 for chunk in cl.group(csets, bundler, reorder=reorder):
1553 yield chunk
1553 yield chunk
1554 self.ui.progress(_('bundling'), None)
1554 self.ui.progress(_('bundling'), None)
1555
1555
1556 # Create a generator for the manifestnodes that calls our lookup
1556 # Create a generator for the manifestnodes that calls our lookup
1557 # and data collection functions back.
1557 # and data collection functions back.
1558 count[0] = 0
1558 count[0] = 0
1559 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1559 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1560 yield chunk
1560 yield chunk
1561 self.ui.progress(_('bundling'), None)
1561 self.ui.progress(_('bundling'), None)
1562
1562
1563 mfs.clear()
1563 mfs.clear()
1564
1564
1565 # Go through all our files in order sorted by name.
1565 # Go through all our files in order sorted by name.
1566 count[0] = 0
1566 count[0] = 0
1567 for fname in sorted(changedfiles):
1567 for fname in sorted(changedfiles):
1568 filerevlog = self.file(fname)
1568 filerevlog = self.file(fname)
1569 if not len(filerevlog):
1569 if not len(filerevlog):
1570 raise util.Abort(_("empty or missing revlog for %s") % fname)
1570 raise util.Abort(_("empty or missing revlog for %s") % fname)
1571 fstate[0] = fname
1571 fstate[0] = fname
1572 fstate[1] = fnodes.pop(fname, {})
1572 fstate[1] = fnodes.pop(fname, {})
1573 first = True
1573 first = True
1574
1574
1575 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1575 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1576 bundler, reorder=reorder):
1576 bundler, reorder=reorder):
1577 if first:
1577 if first:
1578 if chunk == bundler.close():
1578 if chunk == bundler.close():
1579 break
1579 break
1580 count[0] += 1
1580 count[0] += 1
1581 yield bundler.fileheader(fname)
1581 yield bundler.fileheader(fname)
1582 first = False
1582 first = False
1583 yield chunk
1583 yield chunk
1584 # Signal that no more groups are left.
1584 # Signal that no more groups are left.
1585 yield bundler.close()
1585 yield bundler.close()
1586 self.ui.progress(_('bundling'), None)
1586 self.ui.progress(_('bundling'), None)
1587
1587
1588 if csets:
1588 if csets:
1589 self.hook('outgoing', node=hex(csets[0]), source=source)
1589 self.hook('outgoing', node=hex(csets[0]), source=source)
1590
1590
1591 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1591 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1592
1592
1593 def changegroup(self, basenodes, source):
1593 def changegroup(self, basenodes, source):
1594 # to avoid a race we use changegroupsubset() (issue1320)
1594 # to avoid a race we use changegroupsubset() (issue1320)
1595 return self.changegroupsubset(basenodes, self.heads(), source)
1595 return self.changegroupsubset(basenodes, self.heads(), source)
1596
1596
1597 def _changegroup(self, nodes, source):
1597 def _changegroup(self, nodes, source):
1598 """Compute the changegroup of all nodes that we have that a recipient
1598 """Compute the changegroup of all nodes that we have that a recipient
1599 doesn't. Return a chunkbuffer object whose read() method will return
1599 doesn't. Return a chunkbuffer object whose read() method will return
1600 successive changegroup chunks.
1600 successive changegroup chunks.
1601
1601
1602 This is much easier than the previous function as we can assume that
1602 This is much easier than the previous function as we can assume that
1603 the recipient has any changenode we aren't sending them.
1603 the recipient has any changenode we aren't sending them.
1604
1604
1605 nodes is the set of nodes to send"""
1605 nodes is the set of nodes to send"""
1606
1606
1607 cl = self.changelog
1607 cl = self.changelog
1608 mf = self.manifest
1608 mf = self.manifest
1609 mfs = {}
1609 mfs = {}
1610 changedfiles = set()
1610 changedfiles = set()
1611 fstate = ['']
1611 fstate = ['']
1612 count = [0]
1612 count = [0]
1613
1613
1614 self.hook('preoutgoing', throw=True, source=source)
1614 self.hook('preoutgoing', throw=True, source=source)
1615 self.changegroupinfo(nodes, source)
1615 self.changegroupinfo(nodes, source)
1616
1616
1617 revset = set([cl.rev(n) for n in nodes])
1617 revset = set([cl.rev(n) for n in nodes])
1618
1618
1619 def gennodelst(log):
1619 def gennodelst(log):
1620 for r in log:
1620 for r in log:
1621 if log.linkrev(r) in revset:
1621 if log.linkrev(r) in revset:
1622 yield log.node(r)
1622 yield log.node(r)
1623
1623
1624 def lookup(revlog, x):
1624 def lookup(revlog, x):
1625 if revlog == cl:
1625 if revlog == cl:
1626 c = cl.read(x)
1626 c = cl.read(x)
1627 changedfiles.update(c[3])
1627 changedfiles.update(c[3])
1628 mfs.setdefault(c[0], x)
1628 mfs.setdefault(c[0], x)
1629 count[0] += 1
1629 count[0] += 1
1630 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1630 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1631 return x
1631 return x
1632 elif revlog == mf:
1632 elif revlog == mf:
1633 count[0] += 1
1633 count[0] += 1
1634 self.ui.progress(_('bundling'), count[0],
1634 self.ui.progress(_('bundling'), count[0],
1635 unit=_('manifests'), total=len(mfs))
1635 unit=_('manifests'), total=len(mfs))
1636 return cl.node(revlog.linkrev(revlog.rev(x)))
1636 return cl.node(revlog.linkrev(revlog.rev(x)))
1637 else:
1637 else:
1638 self.ui.progress(
1638 self.ui.progress(
1639 _('bundling'), count[0], item=fstate[0],
1639 _('bundling'), count[0], item=fstate[0],
1640 total=len(changedfiles), unit=_('files'))
1640 total=len(changedfiles), unit=_('files'))
1641 return cl.node(revlog.linkrev(revlog.rev(x)))
1641 return cl.node(revlog.linkrev(revlog.rev(x)))
1642
1642
1643 bundler = changegroup.bundle10(lookup)
1643 bundler = changegroup.bundle10(lookup)
1644 reorder = self.ui.config('bundle', 'reorder', 'auto')
1644 reorder = self.ui.config('bundle', 'reorder', 'auto')
1645 if reorder == 'auto':
1645 if reorder == 'auto':
1646 reorder = None
1646 reorder = None
1647 else:
1647 else:
1648 reorder = util.parsebool(reorder)
1648 reorder = util.parsebool(reorder)
1649
1649
1650 def gengroup():
1650 def gengroup():
1651 '''yield a sequence of changegroup chunks (strings)'''
1651 '''yield a sequence of changegroup chunks (strings)'''
1652 # construct a list of all changed files
1652 # construct a list of all changed files
1653
1653
1654 for chunk in cl.group(nodes, bundler, reorder=reorder):
1654 for chunk in cl.group(nodes, bundler, reorder=reorder):
1655 yield chunk
1655 yield chunk
1656 self.ui.progress(_('bundling'), None)
1656 self.ui.progress(_('bundling'), None)
1657
1657
1658 count[0] = 0
1658 count[0] = 0
1659 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1659 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1660 yield chunk
1660 yield chunk
1661 self.ui.progress(_('bundling'), None)
1661 self.ui.progress(_('bundling'), None)
1662
1662
1663 count[0] = 0
1663 count[0] = 0
1664 for fname in sorted(changedfiles):
1664 for fname in sorted(changedfiles):
1665 filerevlog = self.file(fname)
1665 filerevlog = self.file(fname)
1666 if not len(filerevlog):
1666 if not len(filerevlog):
1667 raise util.Abort(_("empty or missing revlog for %s") % fname)
1667 raise util.Abort(_("empty or missing revlog for %s") % fname)
1668 fstate[0] = fname
1668 fstate[0] = fname
1669 first = True
1669 first = True
1670 for chunk in filerevlog.group(gennodelst(filerevlog), bundler,
1670 for chunk in filerevlog.group(gennodelst(filerevlog), bundler,
1671 reorder=reorder):
1671 reorder=reorder):
1672 if first:
1672 if first:
1673 if chunk == bundler.close():
1673 if chunk == bundler.close():
1674 break
1674 break
1675 count[0] += 1
1675 count[0] += 1
1676 yield bundler.fileheader(fname)
1676 yield bundler.fileheader(fname)
1677 first = False
1677 first = False
1678 yield chunk
1678 yield chunk
1679 yield bundler.close()
1679 yield bundler.close()
1680 self.ui.progress(_('bundling'), None)
1680 self.ui.progress(_('bundling'), None)
1681
1681
1682 if nodes:
1682 if nodes:
1683 self.hook('outgoing', node=hex(nodes[0]), source=source)
1683 self.hook('outgoing', node=hex(nodes[0]), source=source)
1684
1684
1685 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1685 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1686
1686
1687 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1687 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1688 """Add the changegroup returned by source.read() to this repo.
1688 """Add the changegroup returned by source.read() to this repo.
1689 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1689 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1690 the URL of the repo where this changegroup is coming from.
1690 the URL of the repo where this changegroup is coming from.
1691 If lock is not None, the function takes ownership of the lock
1691 If lock is not None, the function takes ownership of the lock
1692 and releases it after the changegroup is added.
1692 and releases it after the changegroup is added.
1693
1693
1694 Return an integer summarizing the change to this repo:
1694 Return an integer summarizing the change to this repo:
1695 - nothing changed or no source: 0
1695 - nothing changed or no source: 0
1696 - more heads than before: 1+added heads (2..n)
1696 - more heads than before: 1+added heads (2..n)
1697 - fewer heads than before: -1-removed heads (-2..-n)
1697 - fewer heads than before: -1-removed heads (-2..-n)
1698 - number of heads stays the same: 1
1698 - number of heads stays the same: 1
1699 """
1699 """
1700 def csmap(x):
1700 def csmap(x):
1701 self.ui.debug("add changeset %s\n" % short(x))
1701 self.ui.debug("add changeset %s\n" % short(x))
1702 return len(cl)
1702 return len(cl)
1703
1703
1704 def revmap(x):
1704 def revmap(x):
1705 return cl.rev(x)
1705 return cl.rev(x)
1706
1706
1707 if not source:
1707 if not source:
1708 return 0
1708 return 0
1709
1709
1710 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1710 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1711
1711
1712 changesets = files = revisions = 0
1712 changesets = files = revisions = 0
1713 efiles = set()
1713 efiles = set()
1714
1714
1715 # write changelog data to temp files so concurrent readers will not see
1715 # write changelog data to temp files so concurrent readers will not see
1716 # inconsistent view
1716 # inconsistent view
1717 cl = self.changelog
1717 cl = self.changelog
1718 cl.delayupdate()
1718 cl.delayupdate()
1719 oldheads = cl.heads()
1719 oldheads = cl.heads()
1720
1720
1721 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1721 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1722 try:
1722 try:
1723 trp = weakref.proxy(tr)
1723 trp = weakref.proxy(tr)
1724 # pull off the changeset group
1724 # pull off the changeset group
1725 self.ui.status(_("adding changesets\n"))
1725 self.ui.status(_("adding changesets\n"))
1726 clstart = len(cl)
1726 clstart = len(cl)
1727 class prog(object):
1727 class prog(object):
1728 step = _('changesets')
1728 step = _('changesets')
1729 count = 1
1729 count = 1
1730 ui = self.ui
1730 ui = self.ui
1731 total = None
1731 total = None
1732 def __call__(self):
1732 def __call__(self):
1733 self.ui.progress(self.step, self.count, unit=_('chunks'),
1733 self.ui.progress(self.step, self.count, unit=_('chunks'),
1734 total=self.total)
1734 total=self.total)
1735 self.count += 1
1735 self.count += 1
1736 pr = prog()
1736 pr = prog()
1737 source.callback = pr
1737 source.callback = pr
1738
1738
1739 source.changelogheader()
1739 source.changelogheader()
1740 if (cl.addgroup(source, csmap, trp) is None
1740 if (cl.addgroup(source, csmap, trp) is None
1741 and not emptyok):
1741 and not emptyok):
1742 raise util.Abort(_("received changelog group is empty"))
1742 raise util.Abort(_("received changelog group is empty"))
1743 clend = len(cl)
1743 clend = len(cl)
1744 changesets = clend - clstart
1744 changesets = clend - clstart
1745 for c in xrange(clstart, clend):
1745 for c in xrange(clstart, clend):
1746 efiles.update(self[c].files())
1746 efiles.update(self[c].files())
1747 efiles = len(efiles)
1747 efiles = len(efiles)
1748 self.ui.progress(_('changesets'), None)
1748 self.ui.progress(_('changesets'), None)
1749
1749
1750 # pull off the manifest group
1750 # pull off the manifest group
1751 self.ui.status(_("adding manifests\n"))
1751 self.ui.status(_("adding manifests\n"))
1752 pr.step = _('manifests')
1752 pr.step = _('manifests')
1753 pr.count = 1
1753 pr.count = 1
1754 pr.total = changesets # manifests <= changesets
1754 pr.total = changesets # manifests <= changesets
1755 # no need to check for empty manifest group here:
1755 # no need to check for empty manifest group here:
1756 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1756 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1757 # no new manifest will be created and the manifest group will
1757 # no new manifest will be created and the manifest group will
1758 # be empty during the pull
1758 # be empty during the pull
1759 source.manifestheader()
1759 source.manifestheader()
1760 self.manifest.addgroup(source, revmap, trp)
1760 self.manifest.addgroup(source, revmap, trp)
1761 self.ui.progress(_('manifests'), None)
1761 self.ui.progress(_('manifests'), None)
1762
1762
1763 needfiles = {}
1763 needfiles = {}
1764 if self.ui.configbool('server', 'validate', default=False):
1764 if self.ui.configbool('server', 'validate', default=False):
1765 # validate incoming csets have their manifests
1765 # validate incoming csets have their manifests
1766 for cset in xrange(clstart, clend):
1766 for cset in xrange(clstart, clend):
1767 mfest = self.changelog.read(self.changelog.node(cset))[0]
1767 mfest = self.changelog.read(self.changelog.node(cset))[0]
1768 mfest = self.manifest.readdelta(mfest)
1768 mfest = self.manifest.readdelta(mfest)
1769 # store file nodes we must see
1769 # store file nodes we must see
1770 for f, n in mfest.iteritems():
1770 for f, n in mfest.iteritems():
1771 needfiles.setdefault(f, set()).add(n)
1771 needfiles.setdefault(f, set()).add(n)
1772
1772
1773 # process the files
1773 # process the files
1774 self.ui.status(_("adding file changes\n"))
1774 self.ui.status(_("adding file changes\n"))
1775 pr.step = 'files'
1775 pr.step = 'files'
1776 pr.count = 1
1776 pr.count = 1
1777 pr.total = efiles
1777 pr.total = efiles
1778 source.callback = None
1778 source.callback = None
1779
1779
1780 while 1:
1780 while True:
1781 chunkdata = source.filelogheader()
1781 chunkdata = source.filelogheader()
1782 if not chunkdata:
1782 if not chunkdata:
1783 break
1783 break
1784 f = chunkdata["filename"]
1784 f = chunkdata["filename"]
1785 self.ui.debug("adding %s revisions\n" % f)
1785 self.ui.debug("adding %s revisions\n" % f)
1786 pr()
1786 pr()
1787 fl = self.file(f)
1787 fl = self.file(f)
1788 o = len(fl)
1788 o = len(fl)
1789 if fl.addgroup(source, revmap, trp) is None:
1789 if fl.addgroup(source, revmap, trp) is None:
1790 raise util.Abort(_("received file revlog group is empty"))
1790 raise util.Abort(_("received file revlog group is empty"))
1791 revisions += len(fl) - o
1791 revisions += len(fl) - o
1792 files += 1
1792 files += 1
1793 if f in needfiles:
1793 if f in needfiles:
1794 needs = needfiles[f]
1794 needs = needfiles[f]
1795 for new in xrange(o, len(fl)):
1795 for new in xrange(o, len(fl)):
1796 n = fl.node(new)
1796 n = fl.node(new)
1797 if n in needs:
1797 if n in needs:
1798 needs.remove(n)
1798 needs.remove(n)
1799 if not needs:
1799 if not needs:
1800 del needfiles[f]
1800 del needfiles[f]
1801 self.ui.progress(_('files'), None)
1801 self.ui.progress(_('files'), None)
1802
1802
1803 for f, needs in needfiles.iteritems():
1803 for f, needs in needfiles.iteritems():
1804 fl = self.file(f)
1804 fl = self.file(f)
1805 for n in needs:
1805 for n in needs:
1806 try:
1806 try:
1807 fl.rev(n)
1807 fl.rev(n)
1808 except error.LookupError:
1808 except error.LookupError:
1809 raise util.Abort(
1809 raise util.Abort(
1810 _('missing file data for %s:%s - run hg verify') %
1810 _('missing file data for %s:%s - run hg verify') %
1811 (f, hex(n)))
1811 (f, hex(n)))
1812
1812
1813 dh = 0
1813 dh = 0
1814 if oldheads:
1814 if oldheads:
1815 heads = cl.heads()
1815 heads = cl.heads()
1816 dh = len(heads) - len(oldheads)
1816 dh = len(heads) - len(oldheads)
1817 for h in heads:
1817 for h in heads:
1818 if h not in oldheads and 'close' in self[h].extra():
1818 if h not in oldheads and 'close' in self[h].extra():
1819 dh -= 1
1819 dh -= 1
1820 htext = ""
1820 htext = ""
1821 if dh:
1821 if dh:
1822 htext = _(" (%+d heads)") % dh
1822 htext = _(" (%+d heads)") % dh
1823
1823
1824 self.ui.status(_("added %d changesets"
1824 self.ui.status(_("added %d changesets"
1825 " with %d changes to %d files%s\n")
1825 " with %d changes to %d files%s\n")
1826 % (changesets, revisions, files, htext))
1826 % (changesets, revisions, files, htext))
1827
1827
1828 if changesets > 0:
1828 if changesets > 0:
1829 p = lambda: cl.writepending() and self.root or ""
1829 p = lambda: cl.writepending() and self.root or ""
1830 self.hook('pretxnchangegroup', throw=True,
1830 self.hook('pretxnchangegroup', throw=True,
1831 node=hex(cl.node(clstart)), source=srctype,
1831 node=hex(cl.node(clstart)), source=srctype,
1832 url=url, pending=p)
1832 url=url, pending=p)
1833
1833
1834 # make changelog see real files again
1834 # make changelog see real files again
1835 cl.finalize(trp)
1835 cl.finalize(trp)
1836
1836
1837 tr.close()
1837 tr.close()
1838 finally:
1838 finally:
1839 tr.release()
1839 tr.release()
1840 if lock:
1840 if lock:
1841 lock.release()
1841 lock.release()
1842
1842
1843 if changesets > 0:
1843 if changesets > 0:
1844 # forcefully update the on-disk branch cache
1844 # forcefully update the on-disk branch cache
1845 self.ui.debug("updating the branch cache\n")
1845 self.ui.debug("updating the branch cache\n")
1846 self.updatebranchcache()
1846 self.updatebranchcache()
1847 self.hook("changegroup", node=hex(cl.node(clstart)),
1847 self.hook("changegroup", node=hex(cl.node(clstart)),
1848 source=srctype, url=url)
1848 source=srctype, url=url)
1849
1849
1850 for i in xrange(clstart, clend):
1850 for i in xrange(clstart, clend):
1851 self.hook("incoming", node=hex(cl.node(i)),
1851 self.hook("incoming", node=hex(cl.node(i)),
1852 source=srctype, url=url)
1852 source=srctype, url=url)
1853
1853
1854 # never return 0 here:
1854 # never return 0 here:
1855 if dh < 0:
1855 if dh < 0:
1856 return dh - 1
1856 return dh - 1
1857 else:
1857 else:
1858 return dh + 1
1858 return dh + 1
1859
1859
1860 def stream_in(self, remote, requirements):
1860 def stream_in(self, remote, requirements):
1861 lock = self.lock()
1861 lock = self.lock()
1862 try:
1862 try:
1863 fp = remote.stream_out()
1863 fp = remote.stream_out()
1864 l = fp.readline()
1864 l = fp.readline()
1865 try:
1865 try:
1866 resp = int(l)
1866 resp = int(l)
1867 except ValueError:
1867 except ValueError:
1868 raise error.ResponseError(
1868 raise error.ResponseError(
1869 _('Unexpected response from remote server:'), l)
1869 _('Unexpected response from remote server:'), l)
1870 if resp == 1:
1870 if resp == 1:
1871 raise util.Abort(_('operation forbidden by server'))
1871 raise util.Abort(_('operation forbidden by server'))
1872 elif resp == 2:
1872 elif resp == 2:
1873 raise util.Abort(_('locking the remote repository failed'))
1873 raise util.Abort(_('locking the remote repository failed'))
1874 elif resp != 0:
1874 elif resp != 0:
1875 raise util.Abort(_('the server sent an unknown error code'))
1875 raise util.Abort(_('the server sent an unknown error code'))
1876 self.ui.status(_('streaming all changes\n'))
1876 self.ui.status(_('streaming all changes\n'))
1877 l = fp.readline()
1877 l = fp.readline()
1878 try:
1878 try:
1879 total_files, total_bytes = map(int, l.split(' ', 1))
1879 total_files, total_bytes = map(int, l.split(' ', 1))
1880 except (ValueError, TypeError):
1880 except (ValueError, TypeError):
1881 raise error.ResponseError(
1881 raise error.ResponseError(
1882 _('Unexpected response from remote server:'), l)
1882 _('Unexpected response from remote server:'), l)
1883 self.ui.status(_('%d files to transfer, %s of data\n') %
1883 self.ui.status(_('%d files to transfer, %s of data\n') %
1884 (total_files, util.bytecount(total_bytes)))
1884 (total_files, util.bytecount(total_bytes)))
1885 start = time.time()
1885 start = time.time()
1886 for i in xrange(total_files):
1886 for i in xrange(total_files):
1887 # XXX doesn't support '\n' or '\r' in filenames
1887 # XXX doesn't support '\n' or '\r' in filenames
1888 l = fp.readline()
1888 l = fp.readline()
1889 try:
1889 try:
1890 name, size = l.split('\0', 1)
1890 name, size = l.split('\0', 1)
1891 size = int(size)
1891 size = int(size)
1892 except (ValueError, TypeError):
1892 except (ValueError, TypeError):
1893 raise error.ResponseError(
1893 raise error.ResponseError(
1894 _('Unexpected response from remote server:'), l)
1894 _('Unexpected response from remote server:'), l)
1895 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1895 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1896 # for backwards compat, name was partially encoded
1896 # for backwards compat, name was partially encoded
1897 ofp = self.sopener(store.decodedir(name), 'w')
1897 ofp = self.sopener(store.decodedir(name), 'w')
1898 for chunk in util.filechunkiter(fp, limit=size):
1898 for chunk in util.filechunkiter(fp, limit=size):
1899 ofp.write(chunk)
1899 ofp.write(chunk)
1900 ofp.close()
1900 ofp.close()
1901 elapsed = time.time() - start
1901 elapsed = time.time() - start
1902 if elapsed <= 0:
1902 if elapsed <= 0:
1903 elapsed = 0.001
1903 elapsed = 0.001
1904 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1904 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1905 (util.bytecount(total_bytes), elapsed,
1905 (util.bytecount(total_bytes), elapsed,
1906 util.bytecount(total_bytes / elapsed)))
1906 util.bytecount(total_bytes / elapsed)))
1907
1907
1908 # new requirements = old non-format requirements + new format-related
1908 # new requirements = old non-format requirements + new format-related
1909 # requirements from the streamed-in repository
1909 # requirements from the streamed-in repository
1910 requirements.update(set(self.requirements) - self.supportedformats)
1910 requirements.update(set(self.requirements) - self.supportedformats)
1911 self._applyrequirements(requirements)
1911 self._applyrequirements(requirements)
1912 self._writerequirements()
1912 self._writerequirements()
1913
1913
1914 self.invalidate()
1914 self.invalidate()
1915 return len(self.heads()) + 1
1915 return len(self.heads()) + 1
1916 finally:
1916 finally:
1917 lock.release()
1917 lock.release()
1918
1918
1919 def clone(self, remote, heads=[], stream=False):
1919 def clone(self, remote, heads=[], stream=False):
1920 '''clone remote repository.
1920 '''clone remote repository.
1921
1921
1922 keyword arguments:
1922 keyword arguments:
1923 heads: list of revs to clone (forces use of pull)
1923 heads: list of revs to clone (forces use of pull)
1924 stream: use streaming clone if possible'''
1924 stream: use streaming clone if possible'''
1925
1925
1926 # now, all clients that can request uncompressed clones can
1926 # now, all clients that can request uncompressed clones can
1927 # read repo formats supported by all servers that can serve
1927 # read repo formats supported by all servers that can serve
1928 # them.
1928 # them.
1929
1929
1930 # if revlog format changes, client will have to check version
1930 # if revlog format changes, client will have to check version
1931 # and format flags on "stream" capability, and use
1931 # and format flags on "stream" capability, and use
1932 # uncompressed only if compatible.
1932 # uncompressed only if compatible.
1933
1933
1934 if stream and not heads:
1934 if stream and not heads:
1935 # 'stream' means remote revlog format is revlogv1 only
1935 # 'stream' means remote revlog format is revlogv1 only
1936 if remote.capable('stream'):
1936 if remote.capable('stream'):
1937 return self.stream_in(remote, set(('revlogv1',)))
1937 return self.stream_in(remote, set(('revlogv1',)))
1938 # otherwise, 'streamreqs' contains the remote revlog format
1938 # otherwise, 'streamreqs' contains the remote revlog format
1939 streamreqs = remote.capable('streamreqs')
1939 streamreqs = remote.capable('streamreqs')
1940 if streamreqs:
1940 if streamreqs:
1941 streamreqs = set(streamreqs.split(','))
1941 streamreqs = set(streamreqs.split(','))
1942 # if we support it, stream in and adjust our requirements
1942 # if we support it, stream in and adjust our requirements
1943 if not streamreqs - self.supportedformats:
1943 if not streamreqs - self.supportedformats:
1944 return self.stream_in(remote, streamreqs)
1944 return self.stream_in(remote, streamreqs)
1945 return self.pull(remote, heads)
1945 return self.pull(remote, heads)
1946
1946
1947 def pushkey(self, namespace, key, old, new):
1947 def pushkey(self, namespace, key, old, new):
1948 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1948 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1949 old=old, new=new)
1949 old=old, new=new)
1950 ret = pushkey.push(self, namespace, key, old, new)
1950 ret = pushkey.push(self, namespace, key, old, new)
1951 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1951 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1952 ret=ret)
1952 ret=ret)
1953 return ret
1953 return ret
1954
1954
1955 def listkeys(self, namespace):
1955 def listkeys(self, namespace):
1956 self.hook('prelistkeys', throw=True, namespace=namespace)
1956 self.hook('prelistkeys', throw=True, namespace=namespace)
1957 values = pushkey.list(self, namespace)
1957 values = pushkey.list(self, namespace)
1958 self.hook('listkeys', namespace=namespace, values=values)
1958 self.hook('listkeys', namespace=namespace, values=values)
1959 return values
1959 return values
1960
1960
1961 def debugwireargs(self, one, two, three=None, four=None, five=None):
1961 def debugwireargs(self, one, two, three=None, four=None, five=None):
1962 '''used to test argument passing over the wire'''
1962 '''used to test argument passing over the wire'''
1963 return "%s %s %s %s %s" % (one, two, three, four, five)
1963 return "%s %s %s %s %s" % (one, two, three, four, five)
1964
1964
1965 # used to avoid circular references so destructors work
1965 # used to avoid circular references so destructors work
1966 def aftertrans(files):
1966 def aftertrans(files):
1967 renamefiles = [tuple(t) for t in files]
1967 renamefiles = [tuple(t) for t in files]
1968 def a():
1968 def a():
1969 for src, dest in renamefiles:
1969 for src, dest in renamefiles:
1970 util.rename(src, dest)
1970 util.rename(src, dest)
1971 return a
1971 return a
1972
1972
1973 def undoname(fn):
1973 def undoname(fn):
1974 base, name = os.path.split(fn)
1974 base, name = os.path.split(fn)
1975 assert name.startswith('journal')
1975 assert name.startswith('journal')
1976 return os.path.join(base, name.replace('journal', 'undo', 1))
1976 return os.path.join(base, name.replace('journal', 'undo', 1))
1977
1977
1978 def instance(ui, path, create):
1978 def instance(ui, path, create):
1979 return localrepository(ui, util.localpath(path), create)
1979 return localrepository(ui, util.localpath(path), create)
1980
1980
1981 def islocal(path):
1981 def islocal(path):
1982 return True
1982 return True
@@ -1,137 +1,137
1 # lock.py - simple advisory locking scheme for mercurial
1 # lock.py - simple advisory locking scheme for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import util, error
8 import util, error
9 import errno, os, socket, time
9 import errno, os, socket, time
10 import warnings
10 import warnings
11
11
12 class lock(object):
12 class lock(object):
13 '''An advisory lock held by one process to control access to a set
13 '''An advisory lock held by one process to control access to a set
14 of files. Non-cooperating processes or incorrectly written scripts
14 of files. Non-cooperating processes or incorrectly written scripts
15 can ignore Mercurial's locking scheme and stomp all over the
15 can ignore Mercurial's locking scheme and stomp all over the
16 repository, so don't do that.
16 repository, so don't do that.
17
17
18 Typically used via localrepository.lock() to lock the repository
18 Typically used via localrepository.lock() to lock the repository
19 store (.hg/store/) or localrepository.wlock() to lock everything
19 store (.hg/store/) or localrepository.wlock() to lock everything
20 else under .hg/.'''
20 else under .hg/.'''
21
21
22 # lock is symlink on platforms that support it, file on others.
22 # lock is symlink on platforms that support it, file on others.
23
23
24 # symlink is used because create of directory entry and contents
24 # symlink is used because create of directory entry and contents
25 # are atomic even over nfs.
25 # are atomic even over nfs.
26
26
27 # old-style lock: symlink to pid
27 # old-style lock: symlink to pid
28 # new-style lock: symlink to hostname:pid
28 # new-style lock: symlink to hostname:pid
29
29
30 _host = None
30 _host = None
31
31
32 def __init__(self, file, timeout=-1, releasefn=None, desc=None):
32 def __init__(self, file, timeout=-1, releasefn=None, desc=None):
33 self.f = file
33 self.f = file
34 self.held = 0
34 self.held = 0
35 self.timeout = timeout
35 self.timeout = timeout
36 self.releasefn = releasefn
36 self.releasefn = releasefn
37 self.desc = desc
37 self.desc = desc
38 self.lock()
38 self.lock()
39
39
40 def __del__(self):
40 def __del__(self):
41 if self.held:
41 if self.held:
42 warnings.warn("use lock.release instead of del lock",
42 warnings.warn("use lock.release instead of del lock",
43 category=DeprecationWarning,
43 category=DeprecationWarning,
44 stacklevel=2)
44 stacklevel=2)
45
45
46 # ensure the lock will be removed
46 # ensure the lock will be removed
47 # even if recursive locking did occur
47 # even if recursive locking did occur
48 self.held = 1
48 self.held = 1
49
49
50 self.release()
50 self.release()
51
51
52 def lock(self):
52 def lock(self):
53 timeout = self.timeout
53 timeout = self.timeout
54 while 1:
54 while True:
55 try:
55 try:
56 self.trylock()
56 self.trylock()
57 return 1
57 return 1
58 except error.LockHeld, inst:
58 except error.LockHeld, inst:
59 if timeout != 0:
59 if timeout != 0:
60 time.sleep(1)
60 time.sleep(1)
61 if timeout > 0:
61 if timeout > 0:
62 timeout -= 1
62 timeout -= 1
63 continue
63 continue
64 raise error.LockHeld(errno.ETIMEDOUT, inst.filename, self.desc,
64 raise error.LockHeld(errno.ETIMEDOUT, inst.filename, self.desc,
65 inst.locker)
65 inst.locker)
66
66
67 def trylock(self):
67 def trylock(self):
68 if self.held:
68 if self.held:
69 self.held += 1
69 self.held += 1
70 return
70 return
71 if lock._host is None:
71 if lock._host is None:
72 lock._host = socket.gethostname()
72 lock._host = socket.gethostname()
73 lockname = '%s:%s' % (lock._host, os.getpid())
73 lockname = '%s:%s' % (lock._host, os.getpid())
74 while not self.held:
74 while not self.held:
75 try:
75 try:
76 util.makelock(lockname, self.f)
76 util.makelock(lockname, self.f)
77 self.held = 1
77 self.held = 1
78 except (OSError, IOError), why:
78 except (OSError, IOError), why:
79 if why.errno == errno.EEXIST:
79 if why.errno == errno.EEXIST:
80 locker = self.testlock()
80 locker = self.testlock()
81 if locker is not None:
81 if locker is not None:
82 raise error.LockHeld(errno.EAGAIN, self.f, self.desc,
82 raise error.LockHeld(errno.EAGAIN, self.f, self.desc,
83 locker)
83 locker)
84 else:
84 else:
85 raise error.LockUnavailable(why.errno, why.strerror,
85 raise error.LockUnavailable(why.errno, why.strerror,
86 why.filename, self.desc)
86 why.filename, self.desc)
87
87
88 def testlock(self):
88 def testlock(self):
89 """return id of locker if lock is valid, else None.
89 """return id of locker if lock is valid, else None.
90
90
91 If old-style lock, we cannot tell what machine locker is on.
91 If old-style lock, we cannot tell what machine locker is on.
92 with new-style lock, if locker is on this machine, we can
92 with new-style lock, if locker is on this machine, we can
93 see if locker is alive. If locker is on this machine but
93 see if locker is alive. If locker is on this machine but
94 not alive, we can safely break lock.
94 not alive, we can safely break lock.
95
95
96 The lock file is only deleted when None is returned.
96 The lock file is only deleted when None is returned.
97
97
98 """
98 """
99 locker = util.readlock(self.f)
99 locker = util.readlock(self.f)
100 try:
100 try:
101 host, pid = locker.split(":", 1)
101 host, pid = locker.split(":", 1)
102 except ValueError:
102 except ValueError:
103 return locker
103 return locker
104 if host != lock._host:
104 if host != lock._host:
105 return locker
105 return locker
106 try:
106 try:
107 pid = int(pid)
107 pid = int(pid)
108 except ValueError:
108 except ValueError:
109 return locker
109 return locker
110 if util.testpid(pid):
110 if util.testpid(pid):
111 return locker
111 return locker
112 # if locker dead, break lock. must do this with another lock
112 # if locker dead, break lock. must do this with another lock
113 # held, or can race and break valid lock.
113 # held, or can race and break valid lock.
114 try:
114 try:
115 l = lock(self.f + '.break', timeout=0)
115 l = lock(self.f + '.break', timeout=0)
116 util.unlink(self.f)
116 util.unlink(self.f)
117 l.release()
117 l.release()
118 except error.LockError:
118 except error.LockError:
119 return locker
119 return locker
120
120
121 def release(self):
121 def release(self):
122 if self.held > 1:
122 if self.held > 1:
123 self.held -= 1
123 self.held -= 1
124 elif self.held == 1:
124 elif self.held == 1:
125 self.held = 0
125 self.held = 0
126 if self.releasefn:
126 if self.releasefn:
127 self.releasefn()
127 self.releasefn()
128 try:
128 try:
129 util.unlink(self.f)
129 util.unlink(self.f)
130 except OSError:
130 except OSError:
131 pass
131 pass
132
132
133 def release(*locks):
133 def release(*locks):
134 for lock in locks:
134 for lock in locks:
135 if lock is not None:
135 if lock is not None:
136 lock.release()
136 lock.release()
137
137
@@ -1,1786 +1,1786
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import cStringIO, email.Parser, os, errno, re
9 import cStringIO, email.Parser, os, errno, re
10 import tempfile, zlib, shutil
10 import tempfile, zlib, shutil
11
11
12 from i18n import _
12 from i18n import _
13 from node import hex, nullid, short
13 from node import hex, nullid, short
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding
15
15
16 gitre = re.compile('diff --git a/(.*) b/(.*)')
16 gitre = re.compile('diff --git a/(.*) b/(.*)')
17
17
18 class PatchError(Exception):
18 class PatchError(Exception):
19 pass
19 pass
20
20
21
21
22 # public functions
22 # public functions
23
23
24 def split(stream):
24 def split(stream):
25 '''return an iterator of individual patches from a stream'''
25 '''return an iterator of individual patches from a stream'''
26 def isheader(line, inheader):
26 def isheader(line, inheader):
27 if inheader and line[0] in (' ', '\t'):
27 if inheader and line[0] in (' ', '\t'):
28 # continuation
28 # continuation
29 return True
29 return True
30 if line[0] in (' ', '-', '+'):
30 if line[0] in (' ', '-', '+'):
31 # diff line - don't check for header pattern in there
31 # diff line - don't check for header pattern in there
32 return False
32 return False
33 l = line.split(': ', 1)
33 l = line.split(': ', 1)
34 return len(l) == 2 and ' ' not in l[0]
34 return len(l) == 2 and ' ' not in l[0]
35
35
36 def chunk(lines):
36 def chunk(lines):
37 return cStringIO.StringIO(''.join(lines))
37 return cStringIO.StringIO(''.join(lines))
38
38
39 def hgsplit(stream, cur):
39 def hgsplit(stream, cur):
40 inheader = True
40 inheader = True
41
41
42 for line in stream:
42 for line in stream:
43 if not line.strip():
43 if not line.strip():
44 inheader = False
44 inheader = False
45 if not inheader and line.startswith('# HG changeset patch'):
45 if not inheader and line.startswith('# HG changeset patch'):
46 yield chunk(cur)
46 yield chunk(cur)
47 cur = []
47 cur = []
48 inheader = True
48 inheader = True
49
49
50 cur.append(line)
50 cur.append(line)
51
51
52 if cur:
52 if cur:
53 yield chunk(cur)
53 yield chunk(cur)
54
54
55 def mboxsplit(stream, cur):
55 def mboxsplit(stream, cur):
56 for line in stream:
56 for line in stream:
57 if line.startswith('From '):
57 if line.startswith('From '):
58 for c in split(chunk(cur[1:])):
58 for c in split(chunk(cur[1:])):
59 yield c
59 yield c
60 cur = []
60 cur = []
61
61
62 cur.append(line)
62 cur.append(line)
63
63
64 if cur:
64 if cur:
65 for c in split(chunk(cur[1:])):
65 for c in split(chunk(cur[1:])):
66 yield c
66 yield c
67
67
68 def mimesplit(stream, cur):
68 def mimesplit(stream, cur):
69 def msgfp(m):
69 def msgfp(m):
70 fp = cStringIO.StringIO()
70 fp = cStringIO.StringIO()
71 g = email.Generator.Generator(fp, mangle_from_=False)
71 g = email.Generator.Generator(fp, mangle_from_=False)
72 g.flatten(m)
72 g.flatten(m)
73 fp.seek(0)
73 fp.seek(0)
74 return fp
74 return fp
75
75
76 for line in stream:
76 for line in stream:
77 cur.append(line)
77 cur.append(line)
78 c = chunk(cur)
78 c = chunk(cur)
79
79
80 m = email.Parser.Parser().parse(c)
80 m = email.Parser.Parser().parse(c)
81 if not m.is_multipart():
81 if not m.is_multipart():
82 yield msgfp(m)
82 yield msgfp(m)
83 else:
83 else:
84 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
84 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
85 for part in m.walk():
85 for part in m.walk():
86 ct = part.get_content_type()
86 ct = part.get_content_type()
87 if ct not in ok_types:
87 if ct not in ok_types:
88 continue
88 continue
89 yield msgfp(part)
89 yield msgfp(part)
90
90
91 def headersplit(stream, cur):
91 def headersplit(stream, cur):
92 inheader = False
92 inheader = False
93
93
94 for line in stream:
94 for line in stream:
95 if not inheader and isheader(line, inheader):
95 if not inheader and isheader(line, inheader):
96 yield chunk(cur)
96 yield chunk(cur)
97 cur = []
97 cur = []
98 inheader = True
98 inheader = True
99 if inheader and not isheader(line, inheader):
99 if inheader and not isheader(line, inheader):
100 inheader = False
100 inheader = False
101
101
102 cur.append(line)
102 cur.append(line)
103
103
104 if cur:
104 if cur:
105 yield chunk(cur)
105 yield chunk(cur)
106
106
107 def remainder(cur):
107 def remainder(cur):
108 yield chunk(cur)
108 yield chunk(cur)
109
109
110 class fiter(object):
110 class fiter(object):
111 def __init__(self, fp):
111 def __init__(self, fp):
112 self.fp = fp
112 self.fp = fp
113
113
114 def __iter__(self):
114 def __iter__(self):
115 return self
115 return self
116
116
117 def next(self):
117 def next(self):
118 l = self.fp.readline()
118 l = self.fp.readline()
119 if not l:
119 if not l:
120 raise StopIteration
120 raise StopIteration
121 return l
121 return l
122
122
123 inheader = False
123 inheader = False
124 cur = []
124 cur = []
125
125
126 mimeheaders = ['content-type']
126 mimeheaders = ['content-type']
127
127
128 if not hasattr(stream, 'next'):
128 if not hasattr(stream, 'next'):
129 # http responses, for example, have readline but not next
129 # http responses, for example, have readline but not next
130 stream = fiter(stream)
130 stream = fiter(stream)
131
131
132 for line in stream:
132 for line in stream:
133 cur.append(line)
133 cur.append(line)
134 if line.startswith('# HG changeset patch'):
134 if line.startswith('# HG changeset patch'):
135 return hgsplit(stream, cur)
135 return hgsplit(stream, cur)
136 elif line.startswith('From '):
136 elif line.startswith('From '):
137 return mboxsplit(stream, cur)
137 return mboxsplit(stream, cur)
138 elif isheader(line, inheader):
138 elif isheader(line, inheader):
139 inheader = True
139 inheader = True
140 if line.split(':', 1)[0].lower() in mimeheaders:
140 if line.split(':', 1)[0].lower() in mimeheaders:
141 # let email parser handle this
141 # let email parser handle this
142 return mimesplit(stream, cur)
142 return mimesplit(stream, cur)
143 elif line.startswith('--- ') and inheader:
143 elif line.startswith('--- ') and inheader:
144 # No evil headers seen by diff start, split by hand
144 # No evil headers seen by diff start, split by hand
145 return headersplit(stream, cur)
145 return headersplit(stream, cur)
146 # Not enough info, keep reading
146 # Not enough info, keep reading
147
147
148 # if we are here, we have a very plain patch
148 # if we are here, we have a very plain patch
149 return remainder(cur)
149 return remainder(cur)
150
150
151 def extract(ui, fileobj):
151 def extract(ui, fileobj):
152 '''extract patch from data read from fileobj.
152 '''extract patch from data read from fileobj.
153
153
154 patch can be a normal patch or contained in an email message.
154 patch can be a normal patch or contained in an email message.
155
155
156 return tuple (filename, message, user, date, branch, node, p1, p2).
156 return tuple (filename, message, user, date, branch, node, p1, p2).
157 Any item in the returned tuple can be None. If filename is None,
157 Any item in the returned tuple can be None. If filename is None,
158 fileobj did not contain a patch. Caller must unlink filename when done.'''
158 fileobj did not contain a patch. Caller must unlink filename when done.'''
159
159
160 # attempt to detect the start of a patch
160 # attempt to detect the start of a patch
161 # (this heuristic is borrowed from quilt)
161 # (this heuristic is borrowed from quilt)
162 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
162 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
163 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
163 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
164 r'---[ \t].*?^\+\+\+[ \t]|'
164 r'---[ \t].*?^\+\+\+[ \t]|'
165 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
165 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
166
166
167 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
167 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
168 tmpfp = os.fdopen(fd, 'w')
168 tmpfp = os.fdopen(fd, 'w')
169 try:
169 try:
170 msg = email.Parser.Parser().parse(fileobj)
170 msg = email.Parser.Parser().parse(fileobj)
171
171
172 subject = msg['Subject']
172 subject = msg['Subject']
173 user = msg['From']
173 user = msg['From']
174 if not subject and not user:
174 if not subject and not user:
175 # Not an email, restore parsed headers if any
175 # Not an email, restore parsed headers if any
176 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
176 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
177
177
178 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
178 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
179 # should try to parse msg['Date']
179 # should try to parse msg['Date']
180 date = None
180 date = None
181 nodeid = None
181 nodeid = None
182 branch = None
182 branch = None
183 parents = []
183 parents = []
184
184
185 if subject:
185 if subject:
186 if subject.startswith('[PATCH'):
186 if subject.startswith('[PATCH'):
187 pend = subject.find(']')
187 pend = subject.find(']')
188 if pend >= 0:
188 if pend >= 0:
189 subject = subject[pend + 1:].lstrip()
189 subject = subject[pend + 1:].lstrip()
190 subject = subject.replace('\n\t', ' ')
190 subject = subject.replace('\n\t', ' ')
191 ui.debug('Subject: %s\n' % subject)
191 ui.debug('Subject: %s\n' % subject)
192 if user:
192 if user:
193 ui.debug('From: %s\n' % user)
193 ui.debug('From: %s\n' % user)
194 diffs_seen = 0
194 diffs_seen = 0
195 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
195 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
196 message = ''
196 message = ''
197 for part in msg.walk():
197 for part in msg.walk():
198 content_type = part.get_content_type()
198 content_type = part.get_content_type()
199 ui.debug('Content-Type: %s\n' % content_type)
199 ui.debug('Content-Type: %s\n' % content_type)
200 if content_type not in ok_types:
200 if content_type not in ok_types:
201 continue
201 continue
202 payload = part.get_payload(decode=True)
202 payload = part.get_payload(decode=True)
203 m = diffre.search(payload)
203 m = diffre.search(payload)
204 if m:
204 if m:
205 hgpatch = False
205 hgpatch = False
206 hgpatchheader = False
206 hgpatchheader = False
207 ignoretext = False
207 ignoretext = False
208
208
209 ui.debug('found patch at byte %d\n' % m.start(0))
209 ui.debug('found patch at byte %d\n' % m.start(0))
210 diffs_seen += 1
210 diffs_seen += 1
211 cfp = cStringIO.StringIO()
211 cfp = cStringIO.StringIO()
212 for line in payload[:m.start(0)].splitlines():
212 for line in payload[:m.start(0)].splitlines():
213 if line.startswith('# HG changeset patch') and not hgpatch:
213 if line.startswith('# HG changeset patch') and not hgpatch:
214 ui.debug('patch generated by hg export\n')
214 ui.debug('patch generated by hg export\n')
215 hgpatch = True
215 hgpatch = True
216 hgpatchheader = True
216 hgpatchheader = True
217 # drop earlier commit message content
217 # drop earlier commit message content
218 cfp.seek(0)
218 cfp.seek(0)
219 cfp.truncate()
219 cfp.truncate()
220 subject = None
220 subject = None
221 elif hgpatchheader:
221 elif hgpatchheader:
222 if line.startswith('# User '):
222 if line.startswith('# User '):
223 user = line[7:]
223 user = line[7:]
224 ui.debug('From: %s\n' % user)
224 ui.debug('From: %s\n' % user)
225 elif line.startswith("# Date "):
225 elif line.startswith("# Date "):
226 date = line[7:]
226 date = line[7:]
227 elif line.startswith("# Branch "):
227 elif line.startswith("# Branch "):
228 branch = line[9:]
228 branch = line[9:]
229 elif line.startswith("# Node ID "):
229 elif line.startswith("# Node ID "):
230 nodeid = line[10:]
230 nodeid = line[10:]
231 elif line.startswith("# Parent "):
231 elif line.startswith("# Parent "):
232 parents.append(line[10:])
232 parents.append(line[10:])
233 elif not line.startswith("# "):
233 elif not line.startswith("# "):
234 hgpatchheader = False
234 hgpatchheader = False
235 elif line == '---' and gitsendmail:
235 elif line == '---' and gitsendmail:
236 ignoretext = True
236 ignoretext = True
237 if not hgpatchheader and not ignoretext:
237 if not hgpatchheader and not ignoretext:
238 cfp.write(line)
238 cfp.write(line)
239 cfp.write('\n')
239 cfp.write('\n')
240 message = cfp.getvalue()
240 message = cfp.getvalue()
241 if tmpfp:
241 if tmpfp:
242 tmpfp.write(payload)
242 tmpfp.write(payload)
243 if not payload.endswith('\n'):
243 if not payload.endswith('\n'):
244 tmpfp.write('\n')
244 tmpfp.write('\n')
245 elif not diffs_seen and message and content_type == 'text/plain':
245 elif not diffs_seen and message and content_type == 'text/plain':
246 message += '\n' + payload
246 message += '\n' + payload
247 except:
247 except:
248 tmpfp.close()
248 tmpfp.close()
249 os.unlink(tmpname)
249 os.unlink(tmpname)
250 raise
250 raise
251
251
252 if subject and not message.startswith(subject):
252 if subject and not message.startswith(subject):
253 message = '%s\n%s' % (subject, message)
253 message = '%s\n%s' % (subject, message)
254 tmpfp.close()
254 tmpfp.close()
255 if not diffs_seen:
255 if not diffs_seen:
256 os.unlink(tmpname)
256 os.unlink(tmpname)
257 return None, message, user, date, branch, None, None, None
257 return None, message, user, date, branch, None, None, None
258 p1 = parents and parents.pop(0) or None
258 p1 = parents and parents.pop(0) or None
259 p2 = parents and parents.pop(0) or None
259 p2 = parents and parents.pop(0) or None
260 return tmpname, message, user, date, branch, nodeid, p1, p2
260 return tmpname, message, user, date, branch, nodeid, p1, p2
261
261
262 class patchmeta(object):
262 class patchmeta(object):
263 """Patched file metadata
263 """Patched file metadata
264
264
265 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
265 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
266 or COPY. 'path' is patched file path. 'oldpath' is set to the
266 or COPY. 'path' is patched file path. 'oldpath' is set to the
267 origin file when 'op' is either COPY or RENAME, None otherwise. If
267 origin file when 'op' is either COPY or RENAME, None otherwise. If
268 file mode is changed, 'mode' is a tuple (islink, isexec) where
268 file mode is changed, 'mode' is a tuple (islink, isexec) where
269 'islink' is True if the file is a symlink and 'isexec' is True if
269 'islink' is True if the file is a symlink and 'isexec' is True if
270 the file is executable. Otherwise, 'mode' is None.
270 the file is executable. Otherwise, 'mode' is None.
271 """
271 """
272 def __init__(self, path):
272 def __init__(self, path):
273 self.path = path
273 self.path = path
274 self.oldpath = None
274 self.oldpath = None
275 self.mode = None
275 self.mode = None
276 self.op = 'MODIFY'
276 self.op = 'MODIFY'
277 self.binary = False
277 self.binary = False
278
278
279 def setmode(self, mode):
279 def setmode(self, mode):
280 islink = mode & 020000
280 islink = mode & 020000
281 isexec = mode & 0100
281 isexec = mode & 0100
282 self.mode = (islink, isexec)
282 self.mode = (islink, isexec)
283
283
284 def __repr__(self):
284 def __repr__(self):
285 return "<patchmeta %s %r>" % (self.op, self.path)
285 return "<patchmeta %s %r>" % (self.op, self.path)
286
286
287 def readgitpatch(lr):
287 def readgitpatch(lr):
288 """extract git-style metadata about patches from <patchname>"""
288 """extract git-style metadata about patches from <patchname>"""
289
289
290 # Filter patch for git information
290 # Filter patch for git information
291 gp = None
291 gp = None
292 gitpatches = []
292 gitpatches = []
293 for line in lr:
293 for line in lr:
294 line = line.rstrip(' \r\n')
294 line = line.rstrip(' \r\n')
295 if line.startswith('diff --git'):
295 if line.startswith('diff --git'):
296 m = gitre.match(line)
296 m = gitre.match(line)
297 if m:
297 if m:
298 if gp:
298 if gp:
299 gitpatches.append(gp)
299 gitpatches.append(gp)
300 dst = m.group(2)
300 dst = m.group(2)
301 gp = patchmeta(dst)
301 gp = patchmeta(dst)
302 elif gp:
302 elif gp:
303 if line.startswith('--- '):
303 if line.startswith('--- '):
304 gitpatches.append(gp)
304 gitpatches.append(gp)
305 gp = None
305 gp = None
306 continue
306 continue
307 if line.startswith('rename from '):
307 if line.startswith('rename from '):
308 gp.op = 'RENAME'
308 gp.op = 'RENAME'
309 gp.oldpath = line[12:]
309 gp.oldpath = line[12:]
310 elif line.startswith('rename to '):
310 elif line.startswith('rename to '):
311 gp.path = line[10:]
311 gp.path = line[10:]
312 elif line.startswith('copy from '):
312 elif line.startswith('copy from '):
313 gp.op = 'COPY'
313 gp.op = 'COPY'
314 gp.oldpath = line[10:]
314 gp.oldpath = line[10:]
315 elif line.startswith('copy to '):
315 elif line.startswith('copy to '):
316 gp.path = line[8:]
316 gp.path = line[8:]
317 elif line.startswith('deleted file'):
317 elif line.startswith('deleted file'):
318 gp.op = 'DELETE'
318 gp.op = 'DELETE'
319 elif line.startswith('new file mode '):
319 elif line.startswith('new file mode '):
320 gp.op = 'ADD'
320 gp.op = 'ADD'
321 gp.setmode(int(line[-6:], 8))
321 gp.setmode(int(line[-6:], 8))
322 elif line.startswith('new mode '):
322 elif line.startswith('new mode '):
323 gp.setmode(int(line[-6:], 8))
323 gp.setmode(int(line[-6:], 8))
324 elif line.startswith('GIT binary patch'):
324 elif line.startswith('GIT binary patch'):
325 gp.binary = True
325 gp.binary = True
326 if gp:
326 if gp:
327 gitpatches.append(gp)
327 gitpatches.append(gp)
328
328
329 return gitpatches
329 return gitpatches
330
330
331 class linereader(object):
331 class linereader(object):
332 # simple class to allow pushing lines back into the input stream
332 # simple class to allow pushing lines back into the input stream
333 def __init__(self, fp):
333 def __init__(self, fp):
334 self.fp = fp
334 self.fp = fp
335 self.buf = []
335 self.buf = []
336
336
337 def push(self, line):
337 def push(self, line):
338 if line is not None:
338 if line is not None:
339 self.buf.append(line)
339 self.buf.append(line)
340
340
341 def readline(self):
341 def readline(self):
342 if self.buf:
342 if self.buf:
343 l = self.buf[0]
343 l = self.buf[0]
344 del self.buf[0]
344 del self.buf[0]
345 return l
345 return l
346 return self.fp.readline()
346 return self.fp.readline()
347
347
348 def __iter__(self):
348 def __iter__(self):
349 while 1:
349 while True:
350 l = self.readline()
350 l = self.readline()
351 if not l:
351 if not l:
352 break
352 break
353 yield l
353 yield l
354
354
355 class abstractbackend(object):
355 class abstractbackend(object):
356 def __init__(self, ui):
356 def __init__(self, ui):
357 self.ui = ui
357 self.ui = ui
358
358
359 def getfile(self, fname):
359 def getfile(self, fname):
360 """Return target file data and flags as a (data, (islink,
360 """Return target file data and flags as a (data, (islink,
361 isexec)) tuple.
361 isexec)) tuple.
362 """
362 """
363 raise NotImplementedError
363 raise NotImplementedError
364
364
365 def setfile(self, fname, data, mode, copysource):
365 def setfile(self, fname, data, mode, copysource):
366 """Write data to target file fname and set its mode. mode is a
366 """Write data to target file fname and set its mode. mode is a
367 (islink, isexec) tuple. If data is None, the file content should
367 (islink, isexec) tuple. If data is None, the file content should
368 be left unchanged. If the file is modified after being copied,
368 be left unchanged. If the file is modified after being copied,
369 copysource is set to the original file name.
369 copysource is set to the original file name.
370 """
370 """
371 raise NotImplementedError
371 raise NotImplementedError
372
372
373 def unlink(self, fname):
373 def unlink(self, fname):
374 """Unlink target file."""
374 """Unlink target file."""
375 raise NotImplementedError
375 raise NotImplementedError
376
376
377 def writerej(self, fname, failed, total, lines):
377 def writerej(self, fname, failed, total, lines):
378 """Write rejected lines for fname. total is the number of hunks
378 """Write rejected lines for fname. total is the number of hunks
379 which failed to apply and total the total number of hunks for this
379 which failed to apply and total the total number of hunks for this
380 files.
380 files.
381 """
381 """
382 pass
382 pass
383
383
384 def exists(self, fname):
384 def exists(self, fname):
385 raise NotImplementedError
385 raise NotImplementedError
386
386
387 class fsbackend(abstractbackend):
387 class fsbackend(abstractbackend):
388 def __init__(self, ui, basedir):
388 def __init__(self, ui, basedir):
389 super(fsbackend, self).__init__(ui)
389 super(fsbackend, self).__init__(ui)
390 self.opener = scmutil.opener(basedir)
390 self.opener = scmutil.opener(basedir)
391
391
392 def _join(self, f):
392 def _join(self, f):
393 return os.path.join(self.opener.base, f)
393 return os.path.join(self.opener.base, f)
394
394
395 def getfile(self, fname):
395 def getfile(self, fname):
396 path = self._join(fname)
396 path = self._join(fname)
397 if os.path.islink(path):
397 if os.path.islink(path):
398 return (os.readlink(path), (True, False))
398 return (os.readlink(path), (True, False))
399 isexec, islink = False, False
399 isexec, islink = False, False
400 try:
400 try:
401 isexec = os.lstat(path).st_mode & 0100 != 0
401 isexec = os.lstat(path).st_mode & 0100 != 0
402 islink = os.path.islink(path)
402 islink = os.path.islink(path)
403 except OSError, e:
403 except OSError, e:
404 if e.errno != errno.ENOENT:
404 if e.errno != errno.ENOENT:
405 raise
405 raise
406 return (self.opener.read(fname), (islink, isexec))
406 return (self.opener.read(fname), (islink, isexec))
407
407
408 def setfile(self, fname, data, mode, copysource):
408 def setfile(self, fname, data, mode, copysource):
409 islink, isexec = mode
409 islink, isexec = mode
410 if data is None:
410 if data is None:
411 util.setflags(self._join(fname), islink, isexec)
411 util.setflags(self._join(fname), islink, isexec)
412 return
412 return
413 if islink:
413 if islink:
414 self.opener.symlink(data, fname)
414 self.opener.symlink(data, fname)
415 else:
415 else:
416 self.opener.write(fname, data)
416 self.opener.write(fname, data)
417 if isexec:
417 if isexec:
418 util.setflags(self._join(fname), False, True)
418 util.setflags(self._join(fname), False, True)
419
419
420 def unlink(self, fname):
420 def unlink(self, fname):
421 try:
421 try:
422 util.unlinkpath(self._join(fname))
422 util.unlinkpath(self._join(fname))
423 except OSError, inst:
423 except OSError, inst:
424 if inst.errno != errno.ENOENT:
424 if inst.errno != errno.ENOENT:
425 raise
425 raise
426
426
427 def writerej(self, fname, failed, total, lines):
427 def writerej(self, fname, failed, total, lines):
428 fname = fname + ".rej"
428 fname = fname + ".rej"
429 self.ui.warn(
429 self.ui.warn(
430 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
430 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
431 (failed, total, fname))
431 (failed, total, fname))
432 fp = self.opener(fname, 'w')
432 fp = self.opener(fname, 'w')
433 fp.writelines(lines)
433 fp.writelines(lines)
434 fp.close()
434 fp.close()
435
435
436 def exists(self, fname):
436 def exists(self, fname):
437 return os.path.lexists(self._join(fname))
437 return os.path.lexists(self._join(fname))
438
438
439 class workingbackend(fsbackend):
439 class workingbackend(fsbackend):
440 def __init__(self, ui, repo, similarity):
440 def __init__(self, ui, repo, similarity):
441 super(workingbackend, self).__init__(ui, repo.root)
441 super(workingbackend, self).__init__(ui, repo.root)
442 self.repo = repo
442 self.repo = repo
443 self.similarity = similarity
443 self.similarity = similarity
444 self.removed = set()
444 self.removed = set()
445 self.changed = set()
445 self.changed = set()
446 self.copied = []
446 self.copied = []
447
447
448 def _checkknown(self, fname):
448 def _checkknown(self, fname):
449 if self.repo.dirstate[fname] == '?' and self.exists(fname):
449 if self.repo.dirstate[fname] == '?' and self.exists(fname):
450 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
450 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
451
451
452 def setfile(self, fname, data, mode, copysource):
452 def setfile(self, fname, data, mode, copysource):
453 self._checkknown(fname)
453 self._checkknown(fname)
454 super(workingbackend, self).setfile(fname, data, mode, copysource)
454 super(workingbackend, self).setfile(fname, data, mode, copysource)
455 if copysource is not None:
455 if copysource is not None:
456 self.copied.append((copysource, fname))
456 self.copied.append((copysource, fname))
457 self.changed.add(fname)
457 self.changed.add(fname)
458
458
459 def unlink(self, fname):
459 def unlink(self, fname):
460 self._checkknown(fname)
460 self._checkknown(fname)
461 super(workingbackend, self).unlink(fname)
461 super(workingbackend, self).unlink(fname)
462 self.removed.add(fname)
462 self.removed.add(fname)
463 self.changed.add(fname)
463 self.changed.add(fname)
464
464
465 def close(self):
465 def close(self):
466 wctx = self.repo[None]
466 wctx = self.repo[None]
467 addremoved = set(self.changed)
467 addremoved = set(self.changed)
468 for src, dst in self.copied:
468 for src, dst in self.copied:
469 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
469 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
470 addremoved.discard(src)
470 addremoved.discard(src)
471 if (not self.similarity) and self.removed:
471 if (not self.similarity) and self.removed:
472 wctx.forget(sorted(self.removed))
472 wctx.forget(sorted(self.removed))
473 if addremoved:
473 if addremoved:
474 cwd = self.repo.getcwd()
474 cwd = self.repo.getcwd()
475 if cwd:
475 if cwd:
476 addremoved = [util.pathto(self.repo.root, cwd, f)
476 addremoved = [util.pathto(self.repo.root, cwd, f)
477 for f in addremoved]
477 for f in addremoved]
478 scmutil.addremove(self.repo, addremoved, similarity=self.similarity)
478 scmutil.addremove(self.repo, addremoved, similarity=self.similarity)
479 return sorted(self.changed)
479 return sorted(self.changed)
480
480
481 class filestore(object):
481 class filestore(object):
482 def __init__(self):
482 def __init__(self):
483 self.opener = None
483 self.opener = None
484 self.files = {}
484 self.files = {}
485 self.created = 0
485 self.created = 0
486
486
487 def setfile(self, fname, data, mode):
487 def setfile(self, fname, data, mode):
488 if self.opener is None:
488 if self.opener is None:
489 root = tempfile.mkdtemp(prefix='hg-patch-')
489 root = tempfile.mkdtemp(prefix='hg-patch-')
490 self.opener = scmutil.opener(root)
490 self.opener = scmutil.opener(root)
491 # Avoid filename issues with these simple names
491 # Avoid filename issues with these simple names
492 fn = str(self.created)
492 fn = str(self.created)
493 self.opener.write(fn, data)
493 self.opener.write(fn, data)
494 self.created += 1
494 self.created += 1
495 self.files[fname] = (fn, mode)
495 self.files[fname] = (fn, mode)
496
496
497 def getfile(self, fname):
497 def getfile(self, fname):
498 if fname not in self.files:
498 if fname not in self.files:
499 raise IOError()
499 raise IOError()
500 fn, mode = self.files[fname]
500 fn, mode = self.files[fname]
501 return self.opener.read(fn), mode
501 return self.opener.read(fn), mode
502
502
503 def close(self):
503 def close(self):
504 if self.opener:
504 if self.opener:
505 shutil.rmtree(self.opener.base)
505 shutil.rmtree(self.opener.base)
506
506
507 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
507 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
508 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
508 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
509 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
509 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
510 eolmodes = ['strict', 'crlf', 'lf', 'auto']
510 eolmodes = ['strict', 'crlf', 'lf', 'auto']
511
511
512 class patchfile(object):
512 class patchfile(object):
513 def __init__(self, ui, fname, backend, store, mode, create, remove,
513 def __init__(self, ui, fname, backend, store, mode, create, remove,
514 eolmode='strict', copysource=None):
514 eolmode='strict', copysource=None):
515 self.fname = fname
515 self.fname = fname
516 self.eolmode = eolmode
516 self.eolmode = eolmode
517 self.eol = None
517 self.eol = None
518 self.backend = backend
518 self.backend = backend
519 self.ui = ui
519 self.ui = ui
520 self.lines = []
520 self.lines = []
521 self.exists = False
521 self.exists = False
522 self.missing = True
522 self.missing = True
523 self.mode = mode
523 self.mode = mode
524 self.copysource = copysource
524 self.copysource = copysource
525 self.create = create
525 self.create = create
526 self.remove = remove
526 self.remove = remove
527 try:
527 try:
528 if copysource is None:
528 if copysource is None:
529 data, mode = backend.getfile(fname)
529 data, mode = backend.getfile(fname)
530 self.exists = True
530 self.exists = True
531 else:
531 else:
532 data, mode = store.getfile(copysource)
532 data, mode = store.getfile(copysource)
533 self.exists = backend.exists(fname)
533 self.exists = backend.exists(fname)
534 self.missing = False
534 self.missing = False
535 if data:
535 if data:
536 self.lines = data.splitlines(True)
536 self.lines = data.splitlines(True)
537 if self.mode is None:
537 if self.mode is None:
538 self.mode = mode
538 self.mode = mode
539 if self.lines:
539 if self.lines:
540 # Normalize line endings
540 # Normalize line endings
541 if self.lines[0].endswith('\r\n'):
541 if self.lines[0].endswith('\r\n'):
542 self.eol = '\r\n'
542 self.eol = '\r\n'
543 elif self.lines[0].endswith('\n'):
543 elif self.lines[0].endswith('\n'):
544 self.eol = '\n'
544 self.eol = '\n'
545 if eolmode != 'strict':
545 if eolmode != 'strict':
546 nlines = []
546 nlines = []
547 for l in self.lines:
547 for l in self.lines:
548 if l.endswith('\r\n'):
548 if l.endswith('\r\n'):
549 l = l[:-2] + '\n'
549 l = l[:-2] + '\n'
550 nlines.append(l)
550 nlines.append(l)
551 self.lines = nlines
551 self.lines = nlines
552 except IOError:
552 except IOError:
553 if create:
553 if create:
554 self.missing = False
554 self.missing = False
555 if self.mode is None:
555 if self.mode is None:
556 self.mode = (False, False)
556 self.mode = (False, False)
557 if self.missing:
557 if self.missing:
558 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
558 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
559
559
560 self.hash = {}
560 self.hash = {}
561 self.dirty = 0
561 self.dirty = 0
562 self.offset = 0
562 self.offset = 0
563 self.skew = 0
563 self.skew = 0
564 self.rej = []
564 self.rej = []
565 self.fileprinted = False
565 self.fileprinted = False
566 self.printfile(False)
566 self.printfile(False)
567 self.hunks = 0
567 self.hunks = 0
568
568
569 def writelines(self, fname, lines, mode):
569 def writelines(self, fname, lines, mode):
570 if self.eolmode == 'auto':
570 if self.eolmode == 'auto':
571 eol = self.eol
571 eol = self.eol
572 elif self.eolmode == 'crlf':
572 elif self.eolmode == 'crlf':
573 eol = '\r\n'
573 eol = '\r\n'
574 else:
574 else:
575 eol = '\n'
575 eol = '\n'
576
576
577 if self.eolmode != 'strict' and eol and eol != '\n':
577 if self.eolmode != 'strict' and eol and eol != '\n':
578 rawlines = []
578 rawlines = []
579 for l in lines:
579 for l in lines:
580 if l and l[-1] == '\n':
580 if l and l[-1] == '\n':
581 l = l[:-1] + eol
581 l = l[:-1] + eol
582 rawlines.append(l)
582 rawlines.append(l)
583 lines = rawlines
583 lines = rawlines
584
584
585 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
585 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
586
586
587 def printfile(self, warn):
587 def printfile(self, warn):
588 if self.fileprinted:
588 if self.fileprinted:
589 return
589 return
590 if warn or self.ui.verbose:
590 if warn or self.ui.verbose:
591 self.fileprinted = True
591 self.fileprinted = True
592 s = _("patching file %s\n") % self.fname
592 s = _("patching file %s\n") % self.fname
593 if warn:
593 if warn:
594 self.ui.warn(s)
594 self.ui.warn(s)
595 else:
595 else:
596 self.ui.note(s)
596 self.ui.note(s)
597
597
598
598
599 def findlines(self, l, linenum):
599 def findlines(self, l, linenum):
600 # looks through the hash and finds candidate lines. The
600 # looks through the hash and finds candidate lines. The
601 # result is a list of line numbers sorted based on distance
601 # result is a list of line numbers sorted based on distance
602 # from linenum
602 # from linenum
603
603
604 cand = self.hash.get(l, [])
604 cand = self.hash.get(l, [])
605 if len(cand) > 1:
605 if len(cand) > 1:
606 # resort our list of potentials forward then back.
606 # resort our list of potentials forward then back.
607 cand.sort(key=lambda x: abs(x - linenum))
607 cand.sort(key=lambda x: abs(x - linenum))
608 return cand
608 return cand
609
609
610 def write_rej(self):
610 def write_rej(self):
611 # our rejects are a little different from patch(1). This always
611 # our rejects are a little different from patch(1). This always
612 # creates rejects in the same form as the original patch. A file
612 # creates rejects in the same form as the original patch. A file
613 # header is inserted so that you can run the reject through patch again
613 # header is inserted so that you can run the reject through patch again
614 # without having to type the filename.
614 # without having to type the filename.
615 if not self.rej:
615 if not self.rej:
616 return
616 return
617 base = os.path.basename(self.fname)
617 base = os.path.basename(self.fname)
618 lines = ["--- %s\n+++ %s\n" % (base, base)]
618 lines = ["--- %s\n+++ %s\n" % (base, base)]
619 for x in self.rej:
619 for x in self.rej:
620 for l in x.hunk:
620 for l in x.hunk:
621 lines.append(l)
621 lines.append(l)
622 if l[-1] != '\n':
622 if l[-1] != '\n':
623 lines.append("\n\ No newline at end of file\n")
623 lines.append("\n\ No newline at end of file\n")
624 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
624 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
625
625
626 def apply(self, h):
626 def apply(self, h):
627 if not h.complete():
627 if not h.complete():
628 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
628 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
629 (h.number, h.desc, len(h.a), h.lena, len(h.b),
629 (h.number, h.desc, len(h.a), h.lena, len(h.b),
630 h.lenb))
630 h.lenb))
631
631
632 self.hunks += 1
632 self.hunks += 1
633
633
634 if self.missing:
634 if self.missing:
635 self.rej.append(h)
635 self.rej.append(h)
636 return -1
636 return -1
637
637
638 if self.exists and self.create:
638 if self.exists and self.create:
639 if self.copysource:
639 if self.copysource:
640 self.ui.warn(_("cannot create %s: destination already "
640 self.ui.warn(_("cannot create %s: destination already "
641 "exists\n" % self.fname))
641 "exists\n" % self.fname))
642 else:
642 else:
643 self.ui.warn(_("file %s already exists\n") % self.fname)
643 self.ui.warn(_("file %s already exists\n") % self.fname)
644 self.rej.append(h)
644 self.rej.append(h)
645 return -1
645 return -1
646
646
647 if isinstance(h, binhunk):
647 if isinstance(h, binhunk):
648 if self.remove:
648 if self.remove:
649 self.backend.unlink(self.fname)
649 self.backend.unlink(self.fname)
650 else:
650 else:
651 self.lines[:] = h.new()
651 self.lines[:] = h.new()
652 self.offset += len(h.new())
652 self.offset += len(h.new())
653 self.dirty = True
653 self.dirty = True
654 return 0
654 return 0
655
655
656 horig = h
656 horig = h
657 if (self.eolmode in ('crlf', 'lf')
657 if (self.eolmode in ('crlf', 'lf')
658 or self.eolmode == 'auto' and self.eol):
658 or self.eolmode == 'auto' and self.eol):
659 # If new eols are going to be normalized, then normalize
659 # If new eols are going to be normalized, then normalize
660 # hunk data before patching. Otherwise, preserve input
660 # hunk data before patching. Otherwise, preserve input
661 # line-endings.
661 # line-endings.
662 h = h.getnormalized()
662 h = h.getnormalized()
663
663
664 # fast case first, no offsets, no fuzz
664 # fast case first, no offsets, no fuzz
665 old = h.old()
665 old = h.old()
666 # patch starts counting at 1 unless we are adding the file
666 # patch starts counting at 1 unless we are adding the file
667 if h.starta == 0:
667 if h.starta == 0:
668 start = 0
668 start = 0
669 else:
669 else:
670 start = h.starta + self.offset - 1
670 start = h.starta + self.offset - 1
671 orig_start = start
671 orig_start = start
672 # if there's skew we want to emit the "(offset %d lines)" even
672 # if there's skew we want to emit the "(offset %d lines)" even
673 # when the hunk cleanly applies at start + skew, so skip the
673 # when the hunk cleanly applies at start + skew, so skip the
674 # fast case code
674 # fast case code
675 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
675 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
676 if self.remove:
676 if self.remove:
677 self.backend.unlink(self.fname)
677 self.backend.unlink(self.fname)
678 else:
678 else:
679 self.lines[start : start + h.lena] = h.new()
679 self.lines[start : start + h.lena] = h.new()
680 self.offset += h.lenb - h.lena
680 self.offset += h.lenb - h.lena
681 self.dirty = True
681 self.dirty = True
682 return 0
682 return 0
683
683
684 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
684 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
685 self.hash = {}
685 self.hash = {}
686 for x, s in enumerate(self.lines):
686 for x, s in enumerate(self.lines):
687 self.hash.setdefault(s, []).append(x)
687 self.hash.setdefault(s, []).append(x)
688 if h.hunk[-1][0] != ' ':
688 if h.hunk[-1][0] != ' ':
689 # if the hunk tried to put something at the bottom of the file
689 # if the hunk tried to put something at the bottom of the file
690 # override the start line and use eof here
690 # override the start line and use eof here
691 search_start = len(self.lines)
691 search_start = len(self.lines)
692 else:
692 else:
693 search_start = orig_start + self.skew
693 search_start = orig_start + self.skew
694
694
695 for fuzzlen in xrange(3):
695 for fuzzlen in xrange(3):
696 for toponly in [True, False]:
696 for toponly in [True, False]:
697 old = h.old(fuzzlen, toponly)
697 old = h.old(fuzzlen, toponly)
698
698
699 cand = self.findlines(old[0][1:], search_start)
699 cand = self.findlines(old[0][1:], search_start)
700 for l in cand:
700 for l in cand:
701 if diffhelpers.testhunk(old, self.lines, l) == 0:
701 if diffhelpers.testhunk(old, self.lines, l) == 0:
702 newlines = h.new(fuzzlen, toponly)
702 newlines = h.new(fuzzlen, toponly)
703 self.lines[l : l + len(old)] = newlines
703 self.lines[l : l + len(old)] = newlines
704 self.offset += len(newlines) - len(old)
704 self.offset += len(newlines) - len(old)
705 self.skew = l - orig_start
705 self.skew = l - orig_start
706 self.dirty = True
706 self.dirty = True
707 offset = l - orig_start - fuzzlen
707 offset = l - orig_start - fuzzlen
708 if fuzzlen:
708 if fuzzlen:
709 msg = _("Hunk #%d succeeded at %d "
709 msg = _("Hunk #%d succeeded at %d "
710 "with fuzz %d "
710 "with fuzz %d "
711 "(offset %d lines).\n")
711 "(offset %d lines).\n")
712 self.printfile(True)
712 self.printfile(True)
713 self.ui.warn(msg %
713 self.ui.warn(msg %
714 (h.number, l + 1, fuzzlen, offset))
714 (h.number, l + 1, fuzzlen, offset))
715 else:
715 else:
716 msg = _("Hunk #%d succeeded at %d "
716 msg = _("Hunk #%d succeeded at %d "
717 "(offset %d lines).\n")
717 "(offset %d lines).\n")
718 self.ui.note(msg % (h.number, l + 1, offset))
718 self.ui.note(msg % (h.number, l + 1, offset))
719 return fuzzlen
719 return fuzzlen
720 self.printfile(True)
720 self.printfile(True)
721 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
721 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
722 self.rej.append(horig)
722 self.rej.append(horig)
723 return -1
723 return -1
724
724
725 def close(self):
725 def close(self):
726 if self.dirty:
726 if self.dirty:
727 self.writelines(self.fname, self.lines, self.mode)
727 self.writelines(self.fname, self.lines, self.mode)
728 self.write_rej()
728 self.write_rej()
729 return len(self.rej)
729 return len(self.rej)
730
730
731 class hunk(object):
731 class hunk(object):
732 def __init__(self, desc, num, lr, context):
732 def __init__(self, desc, num, lr, context):
733 self.number = num
733 self.number = num
734 self.desc = desc
734 self.desc = desc
735 self.hunk = [desc]
735 self.hunk = [desc]
736 self.a = []
736 self.a = []
737 self.b = []
737 self.b = []
738 self.starta = self.lena = None
738 self.starta = self.lena = None
739 self.startb = self.lenb = None
739 self.startb = self.lenb = None
740 if lr is not None:
740 if lr is not None:
741 if context:
741 if context:
742 self.read_context_hunk(lr)
742 self.read_context_hunk(lr)
743 else:
743 else:
744 self.read_unified_hunk(lr)
744 self.read_unified_hunk(lr)
745
745
746 def getnormalized(self):
746 def getnormalized(self):
747 """Return a copy with line endings normalized to LF."""
747 """Return a copy with line endings normalized to LF."""
748
748
749 def normalize(lines):
749 def normalize(lines):
750 nlines = []
750 nlines = []
751 for line in lines:
751 for line in lines:
752 if line.endswith('\r\n'):
752 if line.endswith('\r\n'):
753 line = line[:-2] + '\n'
753 line = line[:-2] + '\n'
754 nlines.append(line)
754 nlines.append(line)
755 return nlines
755 return nlines
756
756
757 # Dummy object, it is rebuilt manually
757 # Dummy object, it is rebuilt manually
758 nh = hunk(self.desc, self.number, None, None)
758 nh = hunk(self.desc, self.number, None, None)
759 nh.number = self.number
759 nh.number = self.number
760 nh.desc = self.desc
760 nh.desc = self.desc
761 nh.hunk = self.hunk
761 nh.hunk = self.hunk
762 nh.a = normalize(self.a)
762 nh.a = normalize(self.a)
763 nh.b = normalize(self.b)
763 nh.b = normalize(self.b)
764 nh.starta = self.starta
764 nh.starta = self.starta
765 nh.startb = self.startb
765 nh.startb = self.startb
766 nh.lena = self.lena
766 nh.lena = self.lena
767 nh.lenb = self.lenb
767 nh.lenb = self.lenb
768 return nh
768 return nh
769
769
770 def read_unified_hunk(self, lr):
770 def read_unified_hunk(self, lr):
771 m = unidesc.match(self.desc)
771 m = unidesc.match(self.desc)
772 if not m:
772 if not m:
773 raise PatchError(_("bad hunk #%d") % self.number)
773 raise PatchError(_("bad hunk #%d") % self.number)
774 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
774 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
775 if self.lena is None:
775 if self.lena is None:
776 self.lena = 1
776 self.lena = 1
777 else:
777 else:
778 self.lena = int(self.lena)
778 self.lena = int(self.lena)
779 if self.lenb is None:
779 if self.lenb is None:
780 self.lenb = 1
780 self.lenb = 1
781 else:
781 else:
782 self.lenb = int(self.lenb)
782 self.lenb = int(self.lenb)
783 self.starta = int(self.starta)
783 self.starta = int(self.starta)
784 self.startb = int(self.startb)
784 self.startb = int(self.startb)
785 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
785 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
786 # if we hit eof before finishing out the hunk, the last line will
786 # if we hit eof before finishing out the hunk, the last line will
787 # be zero length. Lets try to fix it up.
787 # be zero length. Lets try to fix it up.
788 while len(self.hunk[-1]) == 0:
788 while len(self.hunk[-1]) == 0:
789 del self.hunk[-1]
789 del self.hunk[-1]
790 del self.a[-1]
790 del self.a[-1]
791 del self.b[-1]
791 del self.b[-1]
792 self.lena -= 1
792 self.lena -= 1
793 self.lenb -= 1
793 self.lenb -= 1
794 self._fixnewline(lr)
794 self._fixnewline(lr)
795
795
796 def read_context_hunk(self, lr):
796 def read_context_hunk(self, lr):
797 self.desc = lr.readline()
797 self.desc = lr.readline()
798 m = contextdesc.match(self.desc)
798 m = contextdesc.match(self.desc)
799 if not m:
799 if not m:
800 raise PatchError(_("bad hunk #%d") % self.number)
800 raise PatchError(_("bad hunk #%d") % self.number)
801 foo, self.starta, foo2, aend, foo3 = m.groups()
801 foo, self.starta, foo2, aend, foo3 = m.groups()
802 self.starta = int(self.starta)
802 self.starta = int(self.starta)
803 if aend is None:
803 if aend is None:
804 aend = self.starta
804 aend = self.starta
805 self.lena = int(aend) - self.starta
805 self.lena = int(aend) - self.starta
806 if self.starta:
806 if self.starta:
807 self.lena += 1
807 self.lena += 1
808 for x in xrange(self.lena):
808 for x in xrange(self.lena):
809 l = lr.readline()
809 l = lr.readline()
810 if l.startswith('---'):
810 if l.startswith('---'):
811 # lines addition, old block is empty
811 # lines addition, old block is empty
812 lr.push(l)
812 lr.push(l)
813 break
813 break
814 s = l[2:]
814 s = l[2:]
815 if l.startswith('- ') or l.startswith('! '):
815 if l.startswith('- ') or l.startswith('! '):
816 u = '-' + s
816 u = '-' + s
817 elif l.startswith(' '):
817 elif l.startswith(' '):
818 u = ' ' + s
818 u = ' ' + s
819 else:
819 else:
820 raise PatchError(_("bad hunk #%d old text line %d") %
820 raise PatchError(_("bad hunk #%d old text line %d") %
821 (self.number, x))
821 (self.number, x))
822 self.a.append(u)
822 self.a.append(u)
823 self.hunk.append(u)
823 self.hunk.append(u)
824
824
825 l = lr.readline()
825 l = lr.readline()
826 if l.startswith('\ '):
826 if l.startswith('\ '):
827 s = self.a[-1][:-1]
827 s = self.a[-1][:-1]
828 self.a[-1] = s
828 self.a[-1] = s
829 self.hunk[-1] = s
829 self.hunk[-1] = s
830 l = lr.readline()
830 l = lr.readline()
831 m = contextdesc.match(l)
831 m = contextdesc.match(l)
832 if not m:
832 if not m:
833 raise PatchError(_("bad hunk #%d") % self.number)
833 raise PatchError(_("bad hunk #%d") % self.number)
834 foo, self.startb, foo2, bend, foo3 = m.groups()
834 foo, self.startb, foo2, bend, foo3 = m.groups()
835 self.startb = int(self.startb)
835 self.startb = int(self.startb)
836 if bend is None:
836 if bend is None:
837 bend = self.startb
837 bend = self.startb
838 self.lenb = int(bend) - self.startb
838 self.lenb = int(bend) - self.startb
839 if self.startb:
839 if self.startb:
840 self.lenb += 1
840 self.lenb += 1
841 hunki = 1
841 hunki = 1
842 for x in xrange(self.lenb):
842 for x in xrange(self.lenb):
843 l = lr.readline()
843 l = lr.readline()
844 if l.startswith('\ '):
844 if l.startswith('\ '):
845 # XXX: the only way to hit this is with an invalid line range.
845 # XXX: the only way to hit this is with an invalid line range.
846 # The no-eol marker is not counted in the line range, but I
846 # The no-eol marker is not counted in the line range, but I
847 # guess there are diff(1) out there which behave differently.
847 # guess there are diff(1) out there which behave differently.
848 s = self.b[-1][:-1]
848 s = self.b[-1][:-1]
849 self.b[-1] = s
849 self.b[-1] = s
850 self.hunk[hunki - 1] = s
850 self.hunk[hunki - 1] = s
851 continue
851 continue
852 if not l:
852 if not l:
853 # line deletions, new block is empty and we hit EOF
853 # line deletions, new block is empty and we hit EOF
854 lr.push(l)
854 lr.push(l)
855 break
855 break
856 s = l[2:]
856 s = l[2:]
857 if l.startswith('+ ') or l.startswith('! '):
857 if l.startswith('+ ') or l.startswith('! '):
858 u = '+' + s
858 u = '+' + s
859 elif l.startswith(' '):
859 elif l.startswith(' '):
860 u = ' ' + s
860 u = ' ' + s
861 elif len(self.b) == 0:
861 elif len(self.b) == 0:
862 # line deletions, new block is empty
862 # line deletions, new block is empty
863 lr.push(l)
863 lr.push(l)
864 break
864 break
865 else:
865 else:
866 raise PatchError(_("bad hunk #%d old text line %d") %
866 raise PatchError(_("bad hunk #%d old text line %d") %
867 (self.number, x))
867 (self.number, x))
868 self.b.append(s)
868 self.b.append(s)
869 while True:
869 while True:
870 if hunki >= len(self.hunk):
870 if hunki >= len(self.hunk):
871 h = ""
871 h = ""
872 else:
872 else:
873 h = self.hunk[hunki]
873 h = self.hunk[hunki]
874 hunki += 1
874 hunki += 1
875 if h == u:
875 if h == u:
876 break
876 break
877 elif h.startswith('-'):
877 elif h.startswith('-'):
878 continue
878 continue
879 else:
879 else:
880 self.hunk.insert(hunki - 1, u)
880 self.hunk.insert(hunki - 1, u)
881 break
881 break
882
882
883 if not self.a:
883 if not self.a:
884 # this happens when lines were only added to the hunk
884 # this happens when lines were only added to the hunk
885 for x in self.hunk:
885 for x in self.hunk:
886 if x.startswith('-') or x.startswith(' '):
886 if x.startswith('-') or x.startswith(' '):
887 self.a.append(x)
887 self.a.append(x)
888 if not self.b:
888 if not self.b:
889 # this happens when lines were only deleted from the hunk
889 # this happens when lines were only deleted from the hunk
890 for x in self.hunk:
890 for x in self.hunk:
891 if x.startswith('+') or x.startswith(' '):
891 if x.startswith('+') or x.startswith(' '):
892 self.b.append(x[1:])
892 self.b.append(x[1:])
893 # @@ -start,len +start,len @@
893 # @@ -start,len +start,len @@
894 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
894 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
895 self.startb, self.lenb)
895 self.startb, self.lenb)
896 self.hunk[0] = self.desc
896 self.hunk[0] = self.desc
897 self._fixnewline(lr)
897 self._fixnewline(lr)
898
898
899 def _fixnewline(self, lr):
899 def _fixnewline(self, lr):
900 l = lr.readline()
900 l = lr.readline()
901 if l.startswith('\ '):
901 if l.startswith('\ '):
902 diffhelpers.fix_newline(self.hunk, self.a, self.b)
902 diffhelpers.fix_newline(self.hunk, self.a, self.b)
903 else:
903 else:
904 lr.push(l)
904 lr.push(l)
905
905
906 def complete(self):
906 def complete(self):
907 return len(self.a) == self.lena and len(self.b) == self.lenb
907 return len(self.a) == self.lena and len(self.b) == self.lenb
908
908
909 def fuzzit(self, l, fuzz, toponly):
909 def fuzzit(self, l, fuzz, toponly):
910 # this removes context lines from the top and bottom of list 'l'. It
910 # this removes context lines from the top and bottom of list 'l'. It
911 # checks the hunk to make sure only context lines are removed, and then
911 # checks the hunk to make sure only context lines are removed, and then
912 # returns a new shortened list of lines.
912 # returns a new shortened list of lines.
913 fuzz = min(fuzz, len(l)-1)
913 fuzz = min(fuzz, len(l)-1)
914 if fuzz:
914 if fuzz:
915 top = 0
915 top = 0
916 bot = 0
916 bot = 0
917 hlen = len(self.hunk)
917 hlen = len(self.hunk)
918 for x in xrange(hlen - 1):
918 for x in xrange(hlen - 1):
919 # the hunk starts with the @@ line, so use x+1
919 # the hunk starts with the @@ line, so use x+1
920 if self.hunk[x + 1][0] == ' ':
920 if self.hunk[x + 1][0] == ' ':
921 top += 1
921 top += 1
922 else:
922 else:
923 break
923 break
924 if not toponly:
924 if not toponly:
925 for x in xrange(hlen - 1):
925 for x in xrange(hlen - 1):
926 if self.hunk[hlen - bot - 1][0] == ' ':
926 if self.hunk[hlen - bot - 1][0] == ' ':
927 bot += 1
927 bot += 1
928 else:
928 else:
929 break
929 break
930
930
931 # top and bot now count context in the hunk
931 # top and bot now count context in the hunk
932 # adjust them if either one is short
932 # adjust them if either one is short
933 context = max(top, bot, 3)
933 context = max(top, bot, 3)
934 if bot < context:
934 if bot < context:
935 bot = max(0, fuzz - (context - bot))
935 bot = max(0, fuzz - (context - bot))
936 else:
936 else:
937 bot = min(fuzz, bot)
937 bot = min(fuzz, bot)
938 if top < context:
938 if top < context:
939 top = max(0, fuzz - (context - top))
939 top = max(0, fuzz - (context - top))
940 else:
940 else:
941 top = min(fuzz, top)
941 top = min(fuzz, top)
942
942
943 return l[top:len(l)-bot]
943 return l[top:len(l)-bot]
944 return l
944 return l
945
945
946 def old(self, fuzz=0, toponly=False):
946 def old(self, fuzz=0, toponly=False):
947 return self.fuzzit(self.a, fuzz, toponly)
947 return self.fuzzit(self.a, fuzz, toponly)
948
948
949 def new(self, fuzz=0, toponly=False):
949 def new(self, fuzz=0, toponly=False):
950 return self.fuzzit(self.b, fuzz, toponly)
950 return self.fuzzit(self.b, fuzz, toponly)
951
951
952 class binhunk:
952 class binhunk:
953 'A binary patch file. Only understands literals so far.'
953 'A binary patch file. Only understands literals so far.'
954 def __init__(self, lr):
954 def __init__(self, lr):
955 self.text = None
955 self.text = None
956 self.hunk = ['GIT binary patch\n']
956 self.hunk = ['GIT binary patch\n']
957 self._read(lr)
957 self._read(lr)
958
958
959 def complete(self):
959 def complete(self):
960 return self.text is not None
960 return self.text is not None
961
961
962 def new(self):
962 def new(self):
963 return [self.text]
963 return [self.text]
964
964
965 def _read(self, lr):
965 def _read(self, lr):
966 line = lr.readline()
966 line = lr.readline()
967 self.hunk.append(line)
967 self.hunk.append(line)
968 while line and not line.startswith('literal '):
968 while line and not line.startswith('literal '):
969 line = lr.readline()
969 line = lr.readline()
970 self.hunk.append(line)
970 self.hunk.append(line)
971 if not line:
971 if not line:
972 raise PatchError(_('could not extract binary patch'))
972 raise PatchError(_('could not extract binary patch'))
973 size = int(line[8:].rstrip())
973 size = int(line[8:].rstrip())
974 dec = []
974 dec = []
975 line = lr.readline()
975 line = lr.readline()
976 self.hunk.append(line)
976 self.hunk.append(line)
977 while len(line) > 1:
977 while len(line) > 1:
978 l = line[0]
978 l = line[0]
979 if l <= 'Z' and l >= 'A':
979 if l <= 'Z' and l >= 'A':
980 l = ord(l) - ord('A') + 1
980 l = ord(l) - ord('A') + 1
981 else:
981 else:
982 l = ord(l) - ord('a') + 27
982 l = ord(l) - ord('a') + 27
983 dec.append(base85.b85decode(line[1:-1])[:l])
983 dec.append(base85.b85decode(line[1:-1])[:l])
984 line = lr.readline()
984 line = lr.readline()
985 self.hunk.append(line)
985 self.hunk.append(line)
986 text = zlib.decompress(''.join(dec))
986 text = zlib.decompress(''.join(dec))
987 if len(text) != size:
987 if len(text) != size:
988 raise PatchError(_('binary patch is %d bytes, not %d') %
988 raise PatchError(_('binary patch is %d bytes, not %d') %
989 len(text), size)
989 len(text), size)
990 self.text = text
990 self.text = text
991
991
992 def parsefilename(str):
992 def parsefilename(str):
993 # --- filename \t|space stuff
993 # --- filename \t|space stuff
994 s = str[4:].rstrip('\r\n')
994 s = str[4:].rstrip('\r\n')
995 i = s.find('\t')
995 i = s.find('\t')
996 if i < 0:
996 if i < 0:
997 i = s.find(' ')
997 i = s.find(' ')
998 if i < 0:
998 if i < 0:
999 return s
999 return s
1000 return s[:i]
1000 return s[:i]
1001
1001
1002 def pathstrip(path, strip):
1002 def pathstrip(path, strip):
1003 pathlen = len(path)
1003 pathlen = len(path)
1004 i = 0
1004 i = 0
1005 if strip == 0:
1005 if strip == 0:
1006 return '', path.rstrip()
1006 return '', path.rstrip()
1007 count = strip
1007 count = strip
1008 while count > 0:
1008 while count > 0:
1009 i = path.find('/', i)
1009 i = path.find('/', i)
1010 if i == -1:
1010 if i == -1:
1011 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1011 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1012 (count, strip, path))
1012 (count, strip, path))
1013 i += 1
1013 i += 1
1014 # consume '//' in the path
1014 # consume '//' in the path
1015 while i < pathlen - 1 and path[i] == '/':
1015 while i < pathlen - 1 and path[i] == '/':
1016 i += 1
1016 i += 1
1017 count -= 1
1017 count -= 1
1018 return path[:i].lstrip(), path[i:].rstrip()
1018 return path[:i].lstrip(), path[i:].rstrip()
1019
1019
1020 def selectfile(backend, afile_orig, bfile_orig, hunk, strip, gp):
1020 def selectfile(backend, afile_orig, bfile_orig, hunk, strip, gp):
1021 if gp:
1021 if gp:
1022 # Git patches do not play games. Excluding copies from the
1022 # Git patches do not play games. Excluding copies from the
1023 # following heuristic avoids a lot of confusion
1023 # following heuristic avoids a lot of confusion
1024 fname = pathstrip(gp.path, strip - 1)[1]
1024 fname = pathstrip(gp.path, strip - 1)[1]
1025 create = gp.op in ('ADD', 'COPY', 'RENAME')
1025 create = gp.op in ('ADD', 'COPY', 'RENAME')
1026 remove = gp.op == 'DELETE'
1026 remove = gp.op == 'DELETE'
1027 missing = not create and not backend.exists(fname)
1027 missing = not create and not backend.exists(fname)
1028 return fname, create, remove
1028 return fname, create, remove
1029 nulla = afile_orig == "/dev/null"
1029 nulla = afile_orig == "/dev/null"
1030 nullb = bfile_orig == "/dev/null"
1030 nullb = bfile_orig == "/dev/null"
1031 create = nulla and hunk.starta == 0 and hunk.lena == 0
1031 create = nulla and hunk.starta == 0 and hunk.lena == 0
1032 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1032 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1033 abase, afile = pathstrip(afile_orig, strip)
1033 abase, afile = pathstrip(afile_orig, strip)
1034 gooda = not nulla and backend.exists(afile)
1034 gooda = not nulla and backend.exists(afile)
1035 bbase, bfile = pathstrip(bfile_orig, strip)
1035 bbase, bfile = pathstrip(bfile_orig, strip)
1036 if afile == bfile:
1036 if afile == bfile:
1037 goodb = gooda
1037 goodb = gooda
1038 else:
1038 else:
1039 goodb = not nullb and backend.exists(bfile)
1039 goodb = not nullb and backend.exists(bfile)
1040 missing = not goodb and not gooda and not create
1040 missing = not goodb and not gooda and not create
1041
1041
1042 # some diff programs apparently produce patches where the afile is
1042 # some diff programs apparently produce patches where the afile is
1043 # not /dev/null, but afile starts with bfile
1043 # not /dev/null, but afile starts with bfile
1044 abasedir = afile[:afile.rfind('/') + 1]
1044 abasedir = afile[:afile.rfind('/') + 1]
1045 bbasedir = bfile[:bfile.rfind('/') + 1]
1045 bbasedir = bfile[:bfile.rfind('/') + 1]
1046 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1046 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1047 and hunk.starta == 0 and hunk.lena == 0):
1047 and hunk.starta == 0 and hunk.lena == 0):
1048 create = True
1048 create = True
1049 missing = False
1049 missing = False
1050
1050
1051 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1051 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1052 # diff is between a file and its backup. In this case, the original
1052 # diff is between a file and its backup. In this case, the original
1053 # file should be patched (see original mpatch code).
1053 # file should be patched (see original mpatch code).
1054 isbackup = (abase == bbase and bfile.startswith(afile))
1054 isbackup = (abase == bbase and bfile.startswith(afile))
1055 fname = None
1055 fname = None
1056 if not missing:
1056 if not missing:
1057 if gooda and goodb:
1057 if gooda and goodb:
1058 fname = isbackup and afile or bfile
1058 fname = isbackup and afile or bfile
1059 elif gooda:
1059 elif gooda:
1060 fname = afile
1060 fname = afile
1061
1061
1062 if not fname:
1062 if not fname:
1063 if not nullb:
1063 if not nullb:
1064 fname = isbackup and afile or bfile
1064 fname = isbackup and afile or bfile
1065 elif not nulla:
1065 elif not nulla:
1066 fname = afile
1066 fname = afile
1067 else:
1067 else:
1068 raise PatchError(_("undefined source and destination files"))
1068 raise PatchError(_("undefined source and destination files"))
1069
1069
1070 return fname, create, remove
1070 return fname, create, remove
1071
1071
1072 def scangitpatch(lr, firstline):
1072 def scangitpatch(lr, firstline):
1073 """
1073 """
1074 Git patches can emit:
1074 Git patches can emit:
1075 - rename a to b
1075 - rename a to b
1076 - change b
1076 - change b
1077 - copy a to c
1077 - copy a to c
1078 - change c
1078 - change c
1079
1079
1080 We cannot apply this sequence as-is, the renamed 'a' could not be
1080 We cannot apply this sequence as-is, the renamed 'a' could not be
1081 found for it would have been renamed already. And we cannot copy
1081 found for it would have been renamed already. And we cannot copy
1082 from 'b' instead because 'b' would have been changed already. So
1082 from 'b' instead because 'b' would have been changed already. So
1083 we scan the git patch for copy and rename commands so we can
1083 we scan the git patch for copy and rename commands so we can
1084 perform the copies ahead of time.
1084 perform the copies ahead of time.
1085 """
1085 """
1086 pos = 0
1086 pos = 0
1087 try:
1087 try:
1088 pos = lr.fp.tell()
1088 pos = lr.fp.tell()
1089 fp = lr.fp
1089 fp = lr.fp
1090 except IOError:
1090 except IOError:
1091 fp = cStringIO.StringIO(lr.fp.read())
1091 fp = cStringIO.StringIO(lr.fp.read())
1092 gitlr = linereader(fp)
1092 gitlr = linereader(fp)
1093 gitlr.push(firstline)
1093 gitlr.push(firstline)
1094 gitpatches = readgitpatch(gitlr)
1094 gitpatches = readgitpatch(gitlr)
1095 fp.seek(pos)
1095 fp.seek(pos)
1096 return gitpatches
1096 return gitpatches
1097
1097
1098 def iterhunks(fp):
1098 def iterhunks(fp):
1099 """Read a patch and yield the following events:
1099 """Read a patch and yield the following events:
1100 - ("file", afile, bfile, firsthunk): select a new target file.
1100 - ("file", afile, bfile, firsthunk): select a new target file.
1101 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1101 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1102 "file" event.
1102 "file" event.
1103 - ("git", gitchanges): current diff is in git format, gitchanges
1103 - ("git", gitchanges): current diff is in git format, gitchanges
1104 maps filenames to gitpatch records. Unique event.
1104 maps filenames to gitpatch records. Unique event.
1105 """
1105 """
1106 afile = ""
1106 afile = ""
1107 bfile = ""
1107 bfile = ""
1108 state = None
1108 state = None
1109 hunknum = 0
1109 hunknum = 0
1110 emitfile = newfile = False
1110 emitfile = newfile = False
1111 gitpatches = None
1111 gitpatches = None
1112
1112
1113 # our states
1113 # our states
1114 BFILE = 1
1114 BFILE = 1
1115 context = None
1115 context = None
1116 lr = linereader(fp)
1116 lr = linereader(fp)
1117
1117
1118 while True:
1118 while True:
1119 x = lr.readline()
1119 x = lr.readline()
1120 if not x:
1120 if not x:
1121 break
1121 break
1122 if state == BFILE and (
1122 if state == BFILE and (
1123 (not context and x[0] == '@')
1123 (not context and x[0] == '@')
1124 or (context is not False and x.startswith('***************'))
1124 or (context is not False and x.startswith('***************'))
1125 or x.startswith('GIT binary patch')):
1125 or x.startswith('GIT binary patch')):
1126 gp = None
1126 gp = None
1127 if gitpatches and gitpatches[-1][0] == bfile:
1127 if gitpatches and gitpatches[-1][0] == bfile:
1128 gp = gitpatches.pop()[1]
1128 gp = gitpatches.pop()[1]
1129 if x.startswith('GIT binary patch'):
1129 if x.startswith('GIT binary patch'):
1130 h = binhunk(lr)
1130 h = binhunk(lr)
1131 else:
1131 else:
1132 if context is None and x.startswith('***************'):
1132 if context is None and x.startswith('***************'):
1133 context = True
1133 context = True
1134 h = hunk(x, hunknum + 1, lr, context)
1134 h = hunk(x, hunknum + 1, lr, context)
1135 hunknum += 1
1135 hunknum += 1
1136 if emitfile:
1136 if emitfile:
1137 emitfile = False
1137 emitfile = False
1138 yield 'file', (afile, bfile, h, gp)
1138 yield 'file', (afile, bfile, h, gp)
1139 yield 'hunk', h
1139 yield 'hunk', h
1140 elif x.startswith('diff --git'):
1140 elif x.startswith('diff --git'):
1141 m = gitre.match(x)
1141 m = gitre.match(x)
1142 if not m:
1142 if not m:
1143 continue
1143 continue
1144 if gitpatches is None:
1144 if gitpatches is None:
1145 # scan whole input for git metadata
1145 # scan whole input for git metadata
1146 gitpatches = [('b/' + gp.path, gp) for gp
1146 gitpatches = [('b/' + gp.path, gp) for gp
1147 in scangitpatch(lr, x)]
1147 in scangitpatch(lr, x)]
1148 yield 'git', [g[1] for g in gitpatches
1148 yield 'git', [g[1] for g in gitpatches
1149 if g[1].op in ('COPY', 'RENAME')]
1149 if g[1].op in ('COPY', 'RENAME')]
1150 gitpatches.reverse()
1150 gitpatches.reverse()
1151 afile = 'a/' + m.group(1)
1151 afile = 'a/' + m.group(1)
1152 bfile = 'b/' + m.group(2)
1152 bfile = 'b/' + m.group(2)
1153 while bfile != gitpatches[-1][0]:
1153 while bfile != gitpatches[-1][0]:
1154 gp = gitpatches.pop()[1]
1154 gp = gitpatches.pop()[1]
1155 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp)
1155 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp)
1156 gp = gitpatches[-1][1]
1156 gp = gitpatches[-1][1]
1157 # copy/rename + modify should modify target, not source
1157 # copy/rename + modify should modify target, not source
1158 if gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') or gp.mode:
1158 if gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') or gp.mode:
1159 afile = bfile
1159 afile = bfile
1160 newfile = True
1160 newfile = True
1161 elif x.startswith('---'):
1161 elif x.startswith('---'):
1162 # check for a unified diff
1162 # check for a unified diff
1163 l2 = lr.readline()
1163 l2 = lr.readline()
1164 if not l2.startswith('+++'):
1164 if not l2.startswith('+++'):
1165 lr.push(l2)
1165 lr.push(l2)
1166 continue
1166 continue
1167 newfile = True
1167 newfile = True
1168 context = False
1168 context = False
1169 afile = parsefilename(x)
1169 afile = parsefilename(x)
1170 bfile = parsefilename(l2)
1170 bfile = parsefilename(l2)
1171 elif x.startswith('***'):
1171 elif x.startswith('***'):
1172 # check for a context diff
1172 # check for a context diff
1173 l2 = lr.readline()
1173 l2 = lr.readline()
1174 if not l2.startswith('---'):
1174 if not l2.startswith('---'):
1175 lr.push(l2)
1175 lr.push(l2)
1176 continue
1176 continue
1177 l3 = lr.readline()
1177 l3 = lr.readline()
1178 lr.push(l3)
1178 lr.push(l3)
1179 if not l3.startswith("***************"):
1179 if not l3.startswith("***************"):
1180 lr.push(l2)
1180 lr.push(l2)
1181 continue
1181 continue
1182 newfile = True
1182 newfile = True
1183 context = True
1183 context = True
1184 afile = parsefilename(x)
1184 afile = parsefilename(x)
1185 bfile = parsefilename(l2)
1185 bfile = parsefilename(l2)
1186
1186
1187 if newfile:
1187 if newfile:
1188 newfile = False
1188 newfile = False
1189 emitfile = True
1189 emitfile = True
1190 state = BFILE
1190 state = BFILE
1191 hunknum = 0
1191 hunknum = 0
1192
1192
1193 while gitpatches:
1193 while gitpatches:
1194 gp = gitpatches.pop()[1]
1194 gp = gitpatches.pop()[1]
1195 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp)
1195 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp)
1196
1196
1197 def applydiff(ui, fp, changed, backend, store, strip=1, eolmode='strict'):
1197 def applydiff(ui, fp, changed, backend, store, strip=1, eolmode='strict'):
1198 """Reads a patch from fp and tries to apply it.
1198 """Reads a patch from fp and tries to apply it.
1199
1199
1200 The dict 'changed' is filled in with all of the filenames changed
1200 The dict 'changed' is filled in with all of the filenames changed
1201 by the patch. Returns 0 for a clean patch, -1 if any rejects were
1201 by the patch. Returns 0 for a clean patch, -1 if any rejects were
1202 found and 1 if there was any fuzz.
1202 found and 1 if there was any fuzz.
1203
1203
1204 If 'eolmode' is 'strict', the patch content and patched file are
1204 If 'eolmode' is 'strict', the patch content and patched file are
1205 read in binary mode. Otherwise, line endings are ignored when
1205 read in binary mode. Otherwise, line endings are ignored when
1206 patching then normalized according to 'eolmode'.
1206 patching then normalized according to 'eolmode'.
1207 """
1207 """
1208 return _applydiff(ui, fp, patchfile, backend, store, changed, strip=strip,
1208 return _applydiff(ui, fp, patchfile, backend, store, changed, strip=strip,
1209 eolmode=eolmode)
1209 eolmode=eolmode)
1210
1210
1211 def _applydiff(ui, fp, patcher, backend, store, changed, strip=1,
1211 def _applydiff(ui, fp, patcher, backend, store, changed, strip=1,
1212 eolmode='strict'):
1212 eolmode='strict'):
1213
1213
1214 def pstrip(p):
1214 def pstrip(p):
1215 return pathstrip(p, strip - 1)[1]
1215 return pathstrip(p, strip - 1)[1]
1216
1216
1217 rejects = 0
1217 rejects = 0
1218 err = 0
1218 err = 0
1219 current_file = None
1219 current_file = None
1220
1220
1221 for state, values in iterhunks(fp):
1221 for state, values in iterhunks(fp):
1222 if state == 'hunk':
1222 if state == 'hunk':
1223 if not current_file:
1223 if not current_file:
1224 continue
1224 continue
1225 ret = current_file.apply(values)
1225 ret = current_file.apply(values)
1226 if ret >= 0:
1226 if ret >= 0:
1227 changed.setdefault(current_file.fname, None)
1227 changed.setdefault(current_file.fname, None)
1228 if ret > 0:
1228 if ret > 0:
1229 err = 1
1229 err = 1
1230 elif state == 'file':
1230 elif state == 'file':
1231 if current_file:
1231 if current_file:
1232 rejects += current_file.close()
1232 rejects += current_file.close()
1233 current_file = None
1233 current_file = None
1234 afile, bfile, first_hunk, gp = values
1234 afile, bfile, first_hunk, gp = values
1235 copysource = None
1235 copysource = None
1236 if gp:
1236 if gp:
1237 path = pstrip(gp.path)
1237 path = pstrip(gp.path)
1238 if gp.oldpath:
1238 if gp.oldpath:
1239 copysource = pstrip(gp.oldpath)
1239 copysource = pstrip(gp.oldpath)
1240 changed[path] = gp
1240 changed[path] = gp
1241 if gp.op == 'DELETE':
1241 if gp.op == 'DELETE':
1242 backend.unlink(path)
1242 backend.unlink(path)
1243 continue
1243 continue
1244 if gp.op == 'RENAME':
1244 if gp.op == 'RENAME':
1245 backend.unlink(copysource)
1245 backend.unlink(copysource)
1246 if not first_hunk:
1246 if not first_hunk:
1247 data, mode = None, None
1247 data, mode = None, None
1248 if gp.op in ('RENAME', 'COPY'):
1248 if gp.op in ('RENAME', 'COPY'):
1249 data, mode = store.getfile(copysource)
1249 data, mode = store.getfile(copysource)
1250 if gp.mode:
1250 if gp.mode:
1251 mode = gp.mode
1251 mode = gp.mode
1252 if gp.op == 'ADD':
1252 if gp.op == 'ADD':
1253 # Added files without content have no hunk and
1253 # Added files without content have no hunk and
1254 # must be created
1254 # must be created
1255 data = ''
1255 data = ''
1256 if data or mode:
1256 if data or mode:
1257 if (gp.op in ('ADD', 'RENAME', 'COPY')
1257 if (gp.op in ('ADD', 'RENAME', 'COPY')
1258 and backend.exists(path)):
1258 and backend.exists(path)):
1259 raise PatchError(_("cannot create %s: destination "
1259 raise PatchError(_("cannot create %s: destination "
1260 "already exists") % path)
1260 "already exists") % path)
1261 backend.setfile(path, data, mode, copysource)
1261 backend.setfile(path, data, mode, copysource)
1262 if not first_hunk:
1262 if not first_hunk:
1263 continue
1263 continue
1264 try:
1264 try:
1265 mode = gp and gp.mode or None
1265 mode = gp and gp.mode or None
1266 current_file, create, remove = selectfile(
1266 current_file, create, remove = selectfile(
1267 backend, afile, bfile, first_hunk, strip, gp)
1267 backend, afile, bfile, first_hunk, strip, gp)
1268 current_file = patcher(ui, current_file, backend, store, mode,
1268 current_file = patcher(ui, current_file, backend, store, mode,
1269 create, remove, eolmode=eolmode,
1269 create, remove, eolmode=eolmode,
1270 copysource=copysource)
1270 copysource=copysource)
1271 except PatchError, inst:
1271 except PatchError, inst:
1272 ui.warn(str(inst) + '\n')
1272 ui.warn(str(inst) + '\n')
1273 current_file = None
1273 current_file = None
1274 rejects += 1
1274 rejects += 1
1275 continue
1275 continue
1276 elif state == 'git':
1276 elif state == 'git':
1277 for gp in values:
1277 for gp in values:
1278 path = pstrip(gp.oldpath)
1278 path = pstrip(gp.oldpath)
1279 data, mode = backend.getfile(path)
1279 data, mode = backend.getfile(path)
1280 store.setfile(path, data, mode)
1280 store.setfile(path, data, mode)
1281 else:
1281 else:
1282 raise util.Abort(_('unsupported parser state: %s') % state)
1282 raise util.Abort(_('unsupported parser state: %s') % state)
1283
1283
1284 if current_file:
1284 if current_file:
1285 rejects += current_file.close()
1285 rejects += current_file.close()
1286
1286
1287 if rejects:
1287 if rejects:
1288 return -1
1288 return -1
1289 return err
1289 return err
1290
1290
1291 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1291 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1292 similarity):
1292 similarity):
1293 """use <patcher> to apply <patchname> to the working directory.
1293 """use <patcher> to apply <patchname> to the working directory.
1294 returns whether patch was applied with fuzz factor."""
1294 returns whether patch was applied with fuzz factor."""
1295
1295
1296 fuzz = False
1296 fuzz = False
1297 args = []
1297 args = []
1298 cwd = repo.root
1298 cwd = repo.root
1299 if cwd:
1299 if cwd:
1300 args.append('-d %s' % util.shellquote(cwd))
1300 args.append('-d %s' % util.shellquote(cwd))
1301 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1301 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1302 util.shellquote(patchname)))
1302 util.shellquote(patchname)))
1303 try:
1303 try:
1304 for line in fp:
1304 for line in fp:
1305 line = line.rstrip()
1305 line = line.rstrip()
1306 ui.note(line + '\n')
1306 ui.note(line + '\n')
1307 if line.startswith('patching file '):
1307 if line.startswith('patching file '):
1308 pf = util.parsepatchoutput(line)
1308 pf = util.parsepatchoutput(line)
1309 printed_file = False
1309 printed_file = False
1310 files.setdefault(pf, None)
1310 files.setdefault(pf, None)
1311 elif line.find('with fuzz') >= 0:
1311 elif line.find('with fuzz') >= 0:
1312 fuzz = True
1312 fuzz = True
1313 if not printed_file:
1313 if not printed_file:
1314 ui.warn(pf + '\n')
1314 ui.warn(pf + '\n')
1315 printed_file = True
1315 printed_file = True
1316 ui.warn(line + '\n')
1316 ui.warn(line + '\n')
1317 elif line.find('saving rejects to file') >= 0:
1317 elif line.find('saving rejects to file') >= 0:
1318 ui.warn(line + '\n')
1318 ui.warn(line + '\n')
1319 elif line.find('FAILED') >= 0:
1319 elif line.find('FAILED') >= 0:
1320 if not printed_file:
1320 if not printed_file:
1321 ui.warn(pf + '\n')
1321 ui.warn(pf + '\n')
1322 printed_file = True
1322 printed_file = True
1323 ui.warn(line + '\n')
1323 ui.warn(line + '\n')
1324 finally:
1324 finally:
1325 if files:
1325 if files:
1326 cfiles = list(files)
1326 cfiles = list(files)
1327 cwd = repo.getcwd()
1327 cwd = repo.getcwd()
1328 if cwd:
1328 if cwd:
1329 cfiles = [util.pathto(repo.root, cwd, f)
1329 cfiles = [util.pathto(repo.root, cwd, f)
1330 for f in cfile]
1330 for f in cfile]
1331 scmutil.addremove(repo, cfiles, similarity=similarity)
1331 scmutil.addremove(repo, cfiles, similarity=similarity)
1332 code = fp.close()
1332 code = fp.close()
1333 if code:
1333 if code:
1334 raise PatchError(_("patch command failed: %s") %
1334 raise PatchError(_("patch command failed: %s") %
1335 util.explainexit(code)[0])
1335 util.explainexit(code)[0])
1336 return fuzz
1336 return fuzz
1337
1337
1338 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1338 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1339 similarity=0):
1339 similarity=0):
1340 """use builtin patch to apply <patchobj> to the working directory.
1340 """use builtin patch to apply <patchobj> to the working directory.
1341 returns whether patch was applied with fuzz factor."""
1341 returns whether patch was applied with fuzz factor."""
1342
1342
1343 if files is None:
1343 if files is None:
1344 files = {}
1344 files = {}
1345 if eolmode is None:
1345 if eolmode is None:
1346 eolmode = ui.config('patch', 'eol', 'strict')
1346 eolmode = ui.config('patch', 'eol', 'strict')
1347 if eolmode.lower() not in eolmodes:
1347 if eolmode.lower() not in eolmodes:
1348 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1348 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1349 eolmode = eolmode.lower()
1349 eolmode = eolmode.lower()
1350
1350
1351 store = filestore()
1351 store = filestore()
1352 backend = workingbackend(ui, repo, similarity)
1352 backend = workingbackend(ui, repo, similarity)
1353 try:
1353 try:
1354 fp = open(patchobj, 'rb')
1354 fp = open(patchobj, 'rb')
1355 except TypeError:
1355 except TypeError:
1356 fp = patchobj
1356 fp = patchobj
1357 try:
1357 try:
1358 ret = applydiff(ui, fp, files, backend, store, strip=strip,
1358 ret = applydiff(ui, fp, files, backend, store, strip=strip,
1359 eolmode=eolmode)
1359 eolmode=eolmode)
1360 finally:
1360 finally:
1361 if fp != patchobj:
1361 if fp != patchobj:
1362 fp.close()
1362 fp.close()
1363 files.update(dict.fromkeys(backend.close()))
1363 files.update(dict.fromkeys(backend.close()))
1364 store.close()
1364 store.close()
1365 if ret < 0:
1365 if ret < 0:
1366 raise PatchError(_('patch failed to apply'))
1366 raise PatchError(_('patch failed to apply'))
1367 return ret > 0
1367 return ret > 0
1368
1368
1369 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1369 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1370 similarity=0):
1370 similarity=0):
1371 """Apply <patchname> to the working directory.
1371 """Apply <patchname> to the working directory.
1372
1372
1373 'eolmode' specifies how end of lines should be handled. It can be:
1373 'eolmode' specifies how end of lines should be handled. It can be:
1374 - 'strict': inputs are read in binary mode, EOLs are preserved
1374 - 'strict': inputs are read in binary mode, EOLs are preserved
1375 - 'crlf': EOLs are ignored when patching and reset to CRLF
1375 - 'crlf': EOLs are ignored when patching and reset to CRLF
1376 - 'lf': EOLs are ignored when patching and reset to LF
1376 - 'lf': EOLs are ignored when patching and reset to LF
1377 - None: get it from user settings, default to 'strict'
1377 - None: get it from user settings, default to 'strict'
1378 'eolmode' is ignored when using an external patcher program.
1378 'eolmode' is ignored when using an external patcher program.
1379
1379
1380 Returns whether patch was applied with fuzz factor.
1380 Returns whether patch was applied with fuzz factor.
1381 """
1381 """
1382 patcher = ui.config('ui', 'patch')
1382 patcher = ui.config('ui', 'patch')
1383 if files is None:
1383 if files is None:
1384 files = {}
1384 files = {}
1385 try:
1385 try:
1386 if patcher:
1386 if patcher:
1387 return _externalpatch(ui, repo, patcher, patchname, strip,
1387 return _externalpatch(ui, repo, patcher, patchname, strip,
1388 files, similarity)
1388 files, similarity)
1389 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1389 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1390 similarity)
1390 similarity)
1391 except PatchError, err:
1391 except PatchError, err:
1392 raise util.Abort(str(err))
1392 raise util.Abort(str(err))
1393
1393
1394 def changedfiles(ui, repo, patchpath, strip=1):
1394 def changedfiles(ui, repo, patchpath, strip=1):
1395 backend = fsbackend(ui, repo.root)
1395 backend = fsbackend(ui, repo.root)
1396 fp = open(patchpath, 'rb')
1396 fp = open(patchpath, 'rb')
1397 try:
1397 try:
1398 changed = set()
1398 changed = set()
1399 for state, values in iterhunks(fp):
1399 for state, values in iterhunks(fp):
1400 if state == 'file':
1400 if state == 'file':
1401 afile, bfile, first_hunk, gp = values
1401 afile, bfile, first_hunk, gp = values
1402 if gp:
1402 if gp:
1403 changed.add(pathstrip(gp.path, strip - 1)[1])
1403 changed.add(pathstrip(gp.path, strip - 1)[1])
1404 if gp.op == 'RENAME':
1404 if gp.op == 'RENAME':
1405 changed.add(pathstrip(gp.oldpath, strip - 1)[1])
1405 changed.add(pathstrip(gp.oldpath, strip - 1)[1])
1406 if not first_hunk:
1406 if not first_hunk:
1407 continue
1407 continue
1408 current_file, create, remove = selectfile(
1408 current_file, create, remove = selectfile(
1409 backend, afile, bfile, first_hunk, strip, gp)
1409 backend, afile, bfile, first_hunk, strip, gp)
1410 changed.add(current_file)
1410 changed.add(current_file)
1411 elif state not in ('hunk', 'git'):
1411 elif state not in ('hunk', 'git'):
1412 raise util.Abort(_('unsupported parser state: %s') % state)
1412 raise util.Abort(_('unsupported parser state: %s') % state)
1413 return changed
1413 return changed
1414 finally:
1414 finally:
1415 fp.close()
1415 fp.close()
1416
1416
1417 def b85diff(to, tn):
1417 def b85diff(to, tn):
1418 '''print base85-encoded binary diff'''
1418 '''print base85-encoded binary diff'''
1419 def gitindex(text):
1419 def gitindex(text):
1420 if not text:
1420 if not text:
1421 return hex(nullid)
1421 return hex(nullid)
1422 l = len(text)
1422 l = len(text)
1423 s = util.sha1('blob %d\0' % l)
1423 s = util.sha1('blob %d\0' % l)
1424 s.update(text)
1424 s.update(text)
1425 return s.hexdigest()
1425 return s.hexdigest()
1426
1426
1427 def fmtline(line):
1427 def fmtline(line):
1428 l = len(line)
1428 l = len(line)
1429 if l <= 26:
1429 if l <= 26:
1430 l = chr(ord('A') + l - 1)
1430 l = chr(ord('A') + l - 1)
1431 else:
1431 else:
1432 l = chr(l - 26 + ord('a') - 1)
1432 l = chr(l - 26 + ord('a') - 1)
1433 return '%c%s\n' % (l, base85.b85encode(line, True))
1433 return '%c%s\n' % (l, base85.b85encode(line, True))
1434
1434
1435 def chunk(text, csize=52):
1435 def chunk(text, csize=52):
1436 l = len(text)
1436 l = len(text)
1437 i = 0
1437 i = 0
1438 while i < l:
1438 while i < l:
1439 yield text[i:i + csize]
1439 yield text[i:i + csize]
1440 i += csize
1440 i += csize
1441
1441
1442 tohash = gitindex(to)
1442 tohash = gitindex(to)
1443 tnhash = gitindex(tn)
1443 tnhash = gitindex(tn)
1444 if tohash == tnhash:
1444 if tohash == tnhash:
1445 return ""
1445 return ""
1446
1446
1447 # TODO: deltas
1447 # TODO: deltas
1448 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1448 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1449 (tohash, tnhash, len(tn))]
1449 (tohash, tnhash, len(tn))]
1450 for l in chunk(zlib.compress(tn)):
1450 for l in chunk(zlib.compress(tn)):
1451 ret.append(fmtline(l))
1451 ret.append(fmtline(l))
1452 ret.append('\n')
1452 ret.append('\n')
1453 return ''.join(ret)
1453 return ''.join(ret)
1454
1454
1455 class GitDiffRequired(Exception):
1455 class GitDiffRequired(Exception):
1456 pass
1456 pass
1457
1457
1458 def diffopts(ui, opts=None, untrusted=False):
1458 def diffopts(ui, opts=None, untrusted=False):
1459 def get(key, name=None, getter=ui.configbool):
1459 def get(key, name=None, getter=ui.configbool):
1460 return ((opts and opts.get(key)) or
1460 return ((opts and opts.get(key)) or
1461 getter('diff', name or key, None, untrusted=untrusted))
1461 getter('diff', name or key, None, untrusted=untrusted))
1462 return mdiff.diffopts(
1462 return mdiff.diffopts(
1463 text=opts and opts.get('text'),
1463 text=opts and opts.get('text'),
1464 git=get('git'),
1464 git=get('git'),
1465 nodates=get('nodates'),
1465 nodates=get('nodates'),
1466 showfunc=get('show_function', 'showfunc'),
1466 showfunc=get('show_function', 'showfunc'),
1467 ignorews=get('ignore_all_space', 'ignorews'),
1467 ignorews=get('ignore_all_space', 'ignorews'),
1468 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1468 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1469 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1469 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1470 context=get('unified', getter=ui.config))
1470 context=get('unified', getter=ui.config))
1471
1471
1472 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1472 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1473 losedatafn=None, prefix=''):
1473 losedatafn=None, prefix=''):
1474 '''yields diff of changes to files between two nodes, or node and
1474 '''yields diff of changes to files between two nodes, or node and
1475 working directory.
1475 working directory.
1476
1476
1477 if node1 is None, use first dirstate parent instead.
1477 if node1 is None, use first dirstate parent instead.
1478 if node2 is None, compare node1 with working directory.
1478 if node2 is None, compare node1 with working directory.
1479
1479
1480 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1480 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1481 every time some change cannot be represented with the current
1481 every time some change cannot be represented with the current
1482 patch format. Return False to upgrade to git patch format, True to
1482 patch format. Return False to upgrade to git patch format, True to
1483 accept the loss or raise an exception to abort the diff. It is
1483 accept the loss or raise an exception to abort the diff. It is
1484 called with the name of current file being diffed as 'fn'. If set
1484 called with the name of current file being diffed as 'fn'. If set
1485 to None, patches will always be upgraded to git format when
1485 to None, patches will always be upgraded to git format when
1486 necessary.
1486 necessary.
1487
1487
1488 prefix is a filename prefix that is prepended to all filenames on
1488 prefix is a filename prefix that is prepended to all filenames on
1489 display (used for subrepos).
1489 display (used for subrepos).
1490 '''
1490 '''
1491
1491
1492 if opts is None:
1492 if opts is None:
1493 opts = mdiff.defaultopts
1493 opts = mdiff.defaultopts
1494
1494
1495 if not node1 and not node2:
1495 if not node1 and not node2:
1496 node1 = repo.dirstate.p1()
1496 node1 = repo.dirstate.p1()
1497
1497
1498 def lrugetfilectx():
1498 def lrugetfilectx():
1499 cache = {}
1499 cache = {}
1500 order = []
1500 order = []
1501 def getfilectx(f, ctx):
1501 def getfilectx(f, ctx):
1502 fctx = ctx.filectx(f, filelog=cache.get(f))
1502 fctx = ctx.filectx(f, filelog=cache.get(f))
1503 if f not in cache:
1503 if f not in cache:
1504 if len(cache) > 20:
1504 if len(cache) > 20:
1505 del cache[order.pop(0)]
1505 del cache[order.pop(0)]
1506 cache[f] = fctx.filelog()
1506 cache[f] = fctx.filelog()
1507 else:
1507 else:
1508 order.remove(f)
1508 order.remove(f)
1509 order.append(f)
1509 order.append(f)
1510 return fctx
1510 return fctx
1511 return getfilectx
1511 return getfilectx
1512 getfilectx = lrugetfilectx()
1512 getfilectx = lrugetfilectx()
1513
1513
1514 ctx1 = repo[node1]
1514 ctx1 = repo[node1]
1515 ctx2 = repo[node2]
1515 ctx2 = repo[node2]
1516
1516
1517 if not changes:
1517 if not changes:
1518 changes = repo.status(ctx1, ctx2, match=match)
1518 changes = repo.status(ctx1, ctx2, match=match)
1519 modified, added, removed = changes[:3]
1519 modified, added, removed = changes[:3]
1520
1520
1521 if not modified and not added and not removed:
1521 if not modified and not added and not removed:
1522 return []
1522 return []
1523
1523
1524 revs = None
1524 revs = None
1525 if not repo.ui.quiet:
1525 if not repo.ui.quiet:
1526 hexfunc = repo.ui.debugflag and hex or short
1526 hexfunc = repo.ui.debugflag and hex or short
1527 revs = [hexfunc(node) for node in [node1, node2] if node]
1527 revs = [hexfunc(node) for node in [node1, node2] if node]
1528
1528
1529 copy = {}
1529 copy = {}
1530 if opts.git or opts.upgrade:
1530 if opts.git or opts.upgrade:
1531 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1531 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1532
1532
1533 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1533 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1534 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1534 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1535 if opts.upgrade and not opts.git:
1535 if opts.upgrade and not opts.git:
1536 try:
1536 try:
1537 def losedata(fn):
1537 def losedata(fn):
1538 if not losedatafn or not losedatafn(fn=fn):
1538 if not losedatafn or not losedatafn(fn=fn):
1539 raise GitDiffRequired()
1539 raise GitDiffRequired()
1540 # Buffer the whole output until we are sure it can be generated
1540 # Buffer the whole output until we are sure it can be generated
1541 return list(difffn(opts.copy(git=False), losedata))
1541 return list(difffn(opts.copy(git=False), losedata))
1542 except GitDiffRequired:
1542 except GitDiffRequired:
1543 return difffn(opts.copy(git=True), None)
1543 return difffn(opts.copy(git=True), None)
1544 else:
1544 else:
1545 return difffn(opts, None)
1545 return difffn(opts, None)
1546
1546
1547 def difflabel(func, *args, **kw):
1547 def difflabel(func, *args, **kw):
1548 '''yields 2-tuples of (output, label) based on the output of func()'''
1548 '''yields 2-tuples of (output, label) based on the output of func()'''
1549 prefixes = [('diff', 'diff.diffline'),
1549 prefixes = [('diff', 'diff.diffline'),
1550 ('copy', 'diff.extended'),
1550 ('copy', 'diff.extended'),
1551 ('rename', 'diff.extended'),
1551 ('rename', 'diff.extended'),
1552 ('old', 'diff.extended'),
1552 ('old', 'diff.extended'),
1553 ('new', 'diff.extended'),
1553 ('new', 'diff.extended'),
1554 ('deleted', 'diff.extended'),
1554 ('deleted', 'diff.extended'),
1555 ('---', 'diff.file_a'),
1555 ('---', 'diff.file_a'),
1556 ('+++', 'diff.file_b'),
1556 ('+++', 'diff.file_b'),
1557 ('@@', 'diff.hunk'),
1557 ('@@', 'diff.hunk'),
1558 ('-', 'diff.deleted'),
1558 ('-', 'diff.deleted'),
1559 ('+', 'diff.inserted')]
1559 ('+', 'diff.inserted')]
1560
1560
1561 for chunk in func(*args, **kw):
1561 for chunk in func(*args, **kw):
1562 lines = chunk.split('\n')
1562 lines = chunk.split('\n')
1563 for i, line in enumerate(lines):
1563 for i, line in enumerate(lines):
1564 if i != 0:
1564 if i != 0:
1565 yield ('\n', '')
1565 yield ('\n', '')
1566 stripline = line
1566 stripline = line
1567 if line and line[0] in '+-':
1567 if line and line[0] in '+-':
1568 # highlight trailing whitespace, but only in changed lines
1568 # highlight trailing whitespace, but only in changed lines
1569 stripline = line.rstrip()
1569 stripline = line.rstrip()
1570 for prefix, label in prefixes:
1570 for prefix, label in prefixes:
1571 if stripline.startswith(prefix):
1571 if stripline.startswith(prefix):
1572 yield (stripline, label)
1572 yield (stripline, label)
1573 break
1573 break
1574 else:
1574 else:
1575 yield (line, '')
1575 yield (line, '')
1576 if line != stripline:
1576 if line != stripline:
1577 yield (line[len(stripline):], 'diff.trailingwhitespace')
1577 yield (line[len(stripline):], 'diff.trailingwhitespace')
1578
1578
1579 def diffui(*args, **kw):
1579 def diffui(*args, **kw):
1580 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1580 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1581 return difflabel(diff, *args, **kw)
1581 return difflabel(diff, *args, **kw)
1582
1582
1583
1583
1584 def _addmodehdr(header, omode, nmode):
1584 def _addmodehdr(header, omode, nmode):
1585 if omode != nmode:
1585 if omode != nmode:
1586 header.append('old mode %s\n' % omode)
1586 header.append('old mode %s\n' % omode)
1587 header.append('new mode %s\n' % nmode)
1587 header.append('new mode %s\n' % nmode)
1588
1588
1589 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1589 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1590 copy, getfilectx, opts, losedatafn, prefix):
1590 copy, getfilectx, opts, losedatafn, prefix):
1591
1591
1592 def join(f):
1592 def join(f):
1593 return os.path.join(prefix, f)
1593 return os.path.join(prefix, f)
1594
1594
1595 date1 = util.datestr(ctx1.date())
1595 date1 = util.datestr(ctx1.date())
1596 man1 = ctx1.manifest()
1596 man1 = ctx1.manifest()
1597
1597
1598 gone = set()
1598 gone = set()
1599 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1599 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1600
1600
1601 copyto = dict([(v, k) for k, v in copy.items()])
1601 copyto = dict([(v, k) for k, v in copy.items()])
1602
1602
1603 if opts.git:
1603 if opts.git:
1604 revs = None
1604 revs = None
1605
1605
1606 for f in sorted(modified + added + removed):
1606 for f in sorted(modified + added + removed):
1607 to = None
1607 to = None
1608 tn = None
1608 tn = None
1609 dodiff = True
1609 dodiff = True
1610 header = []
1610 header = []
1611 if f in man1:
1611 if f in man1:
1612 to = getfilectx(f, ctx1).data()
1612 to = getfilectx(f, ctx1).data()
1613 if f not in removed:
1613 if f not in removed:
1614 tn = getfilectx(f, ctx2).data()
1614 tn = getfilectx(f, ctx2).data()
1615 a, b = f, f
1615 a, b = f, f
1616 if opts.git or losedatafn:
1616 if opts.git or losedatafn:
1617 if f in added:
1617 if f in added:
1618 mode = gitmode[ctx2.flags(f)]
1618 mode = gitmode[ctx2.flags(f)]
1619 if f in copy or f in copyto:
1619 if f in copy or f in copyto:
1620 if opts.git:
1620 if opts.git:
1621 if f in copy:
1621 if f in copy:
1622 a = copy[f]
1622 a = copy[f]
1623 else:
1623 else:
1624 a = copyto[f]
1624 a = copyto[f]
1625 omode = gitmode[man1.flags(a)]
1625 omode = gitmode[man1.flags(a)]
1626 _addmodehdr(header, omode, mode)
1626 _addmodehdr(header, omode, mode)
1627 if a in removed and a not in gone:
1627 if a in removed and a not in gone:
1628 op = 'rename'
1628 op = 'rename'
1629 gone.add(a)
1629 gone.add(a)
1630 else:
1630 else:
1631 op = 'copy'
1631 op = 'copy'
1632 header.append('%s from %s\n' % (op, join(a)))
1632 header.append('%s from %s\n' % (op, join(a)))
1633 header.append('%s to %s\n' % (op, join(f)))
1633 header.append('%s to %s\n' % (op, join(f)))
1634 to = getfilectx(a, ctx1).data()
1634 to = getfilectx(a, ctx1).data()
1635 else:
1635 else:
1636 losedatafn(f)
1636 losedatafn(f)
1637 else:
1637 else:
1638 if opts.git:
1638 if opts.git:
1639 header.append('new file mode %s\n' % mode)
1639 header.append('new file mode %s\n' % mode)
1640 elif ctx2.flags(f):
1640 elif ctx2.flags(f):
1641 losedatafn(f)
1641 losedatafn(f)
1642 # In theory, if tn was copied or renamed we should check
1642 # In theory, if tn was copied or renamed we should check
1643 # if the source is binary too but the copy record already
1643 # if the source is binary too but the copy record already
1644 # forces git mode.
1644 # forces git mode.
1645 if util.binary(tn):
1645 if util.binary(tn):
1646 if opts.git:
1646 if opts.git:
1647 dodiff = 'binary'
1647 dodiff = 'binary'
1648 else:
1648 else:
1649 losedatafn(f)
1649 losedatafn(f)
1650 if not opts.git and not tn:
1650 if not opts.git and not tn:
1651 # regular diffs cannot represent new empty file
1651 # regular diffs cannot represent new empty file
1652 losedatafn(f)
1652 losedatafn(f)
1653 elif f in removed:
1653 elif f in removed:
1654 if opts.git:
1654 if opts.git:
1655 # have we already reported a copy above?
1655 # have we already reported a copy above?
1656 if ((f in copy and copy[f] in added
1656 if ((f in copy and copy[f] in added
1657 and copyto[copy[f]] == f) or
1657 and copyto[copy[f]] == f) or
1658 (f in copyto and copyto[f] in added
1658 (f in copyto and copyto[f] in added
1659 and copy[copyto[f]] == f)):
1659 and copy[copyto[f]] == f)):
1660 dodiff = False
1660 dodiff = False
1661 else:
1661 else:
1662 header.append('deleted file mode %s\n' %
1662 header.append('deleted file mode %s\n' %
1663 gitmode[man1.flags(f)])
1663 gitmode[man1.flags(f)])
1664 elif not to or util.binary(to):
1664 elif not to or util.binary(to):
1665 # regular diffs cannot represent empty file deletion
1665 # regular diffs cannot represent empty file deletion
1666 losedatafn(f)
1666 losedatafn(f)
1667 else:
1667 else:
1668 oflag = man1.flags(f)
1668 oflag = man1.flags(f)
1669 nflag = ctx2.flags(f)
1669 nflag = ctx2.flags(f)
1670 binary = util.binary(to) or util.binary(tn)
1670 binary = util.binary(to) or util.binary(tn)
1671 if opts.git:
1671 if opts.git:
1672 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1672 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1673 if binary:
1673 if binary:
1674 dodiff = 'binary'
1674 dodiff = 'binary'
1675 elif binary or nflag != oflag:
1675 elif binary or nflag != oflag:
1676 losedatafn(f)
1676 losedatafn(f)
1677 if opts.git:
1677 if opts.git:
1678 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1678 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1679
1679
1680 if dodiff:
1680 if dodiff:
1681 if dodiff == 'binary':
1681 if dodiff == 'binary':
1682 text = b85diff(to, tn)
1682 text = b85diff(to, tn)
1683 else:
1683 else:
1684 text = mdiff.unidiff(to, date1,
1684 text = mdiff.unidiff(to, date1,
1685 # ctx2 date may be dynamic
1685 # ctx2 date may be dynamic
1686 tn, util.datestr(ctx2.date()),
1686 tn, util.datestr(ctx2.date()),
1687 join(a), join(b), revs, opts=opts)
1687 join(a), join(b), revs, opts=opts)
1688 if header and (text or len(header) > 1):
1688 if header and (text or len(header) > 1):
1689 yield ''.join(header)
1689 yield ''.join(header)
1690 if text:
1690 if text:
1691 yield text
1691 yield text
1692
1692
1693 def diffstatsum(stats):
1693 def diffstatsum(stats):
1694 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1694 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1695 for f, a, r, b in stats:
1695 for f, a, r, b in stats:
1696 maxfile = max(maxfile, encoding.colwidth(f))
1696 maxfile = max(maxfile, encoding.colwidth(f))
1697 maxtotal = max(maxtotal, a + r)
1697 maxtotal = max(maxtotal, a + r)
1698 addtotal += a
1698 addtotal += a
1699 removetotal += r
1699 removetotal += r
1700 binary = binary or b
1700 binary = binary or b
1701
1701
1702 return maxfile, maxtotal, addtotal, removetotal, binary
1702 return maxfile, maxtotal, addtotal, removetotal, binary
1703
1703
1704 def diffstatdata(lines):
1704 def diffstatdata(lines):
1705 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1705 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1706
1706
1707 results = []
1707 results = []
1708 filename, adds, removes = None, 0, 0
1708 filename, adds, removes = None, 0, 0
1709
1709
1710 def addresult():
1710 def addresult():
1711 if filename:
1711 if filename:
1712 isbinary = adds == 0 and removes == 0
1712 isbinary = adds == 0 and removes == 0
1713 results.append((filename, adds, removes, isbinary))
1713 results.append((filename, adds, removes, isbinary))
1714
1714
1715 for line in lines:
1715 for line in lines:
1716 if line.startswith('diff'):
1716 if line.startswith('diff'):
1717 addresult()
1717 addresult()
1718 # set numbers to 0 anyway when starting new file
1718 # set numbers to 0 anyway when starting new file
1719 adds, removes = 0, 0
1719 adds, removes = 0, 0
1720 if line.startswith('diff --git'):
1720 if line.startswith('diff --git'):
1721 filename = gitre.search(line).group(1)
1721 filename = gitre.search(line).group(1)
1722 elif line.startswith('diff -r'):
1722 elif line.startswith('diff -r'):
1723 # format: "diff -r ... -r ... filename"
1723 # format: "diff -r ... -r ... filename"
1724 filename = diffre.search(line).group(1)
1724 filename = diffre.search(line).group(1)
1725 elif line.startswith('+') and not line.startswith('+++'):
1725 elif line.startswith('+') and not line.startswith('+++'):
1726 adds += 1
1726 adds += 1
1727 elif line.startswith('-') and not line.startswith('---'):
1727 elif line.startswith('-') and not line.startswith('---'):
1728 removes += 1
1728 removes += 1
1729 addresult()
1729 addresult()
1730 return results
1730 return results
1731
1731
1732 def diffstat(lines, width=80, git=False):
1732 def diffstat(lines, width=80, git=False):
1733 output = []
1733 output = []
1734 stats = diffstatdata(lines)
1734 stats = diffstatdata(lines)
1735 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1735 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1736
1736
1737 countwidth = len(str(maxtotal))
1737 countwidth = len(str(maxtotal))
1738 if hasbinary and countwidth < 3:
1738 if hasbinary and countwidth < 3:
1739 countwidth = 3
1739 countwidth = 3
1740 graphwidth = width - countwidth - maxname - 6
1740 graphwidth = width - countwidth - maxname - 6
1741 if graphwidth < 10:
1741 if graphwidth < 10:
1742 graphwidth = 10
1742 graphwidth = 10
1743
1743
1744 def scale(i):
1744 def scale(i):
1745 if maxtotal <= graphwidth:
1745 if maxtotal <= graphwidth:
1746 return i
1746 return i
1747 # If diffstat runs out of room it doesn't print anything,
1747 # If diffstat runs out of room it doesn't print anything,
1748 # which isn't very useful, so always print at least one + or -
1748 # which isn't very useful, so always print at least one + or -
1749 # if there were at least some changes.
1749 # if there were at least some changes.
1750 return max(i * graphwidth // maxtotal, int(bool(i)))
1750 return max(i * graphwidth // maxtotal, int(bool(i)))
1751
1751
1752 for filename, adds, removes, isbinary in stats:
1752 for filename, adds, removes, isbinary in stats:
1753 if git and isbinary:
1753 if git and isbinary:
1754 count = 'Bin'
1754 count = 'Bin'
1755 else:
1755 else:
1756 count = adds + removes
1756 count = adds + removes
1757 pluses = '+' * scale(adds)
1757 pluses = '+' * scale(adds)
1758 minuses = '-' * scale(removes)
1758 minuses = '-' * scale(removes)
1759 output.append(' %s%s | %*s %s%s\n' %
1759 output.append(' %s%s | %*s %s%s\n' %
1760 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1760 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1761 countwidth, count, pluses, minuses))
1761 countwidth, count, pluses, minuses))
1762
1762
1763 if stats:
1763 if stats:
1764 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1764 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1765 % (len(stats), totaladds, totalremoves))
1765 % (len(stats), totaladds, totalremoves))
1766
1766
1767 return ''.join(output)
1767 return ''.join(output)
1768
1768
1769 def diffstatui(*args, **kw):
1769 def diffstatui(*args, **kw):
1770 '''like diffstat(), but yields 2-tuples of (output, label) for
1770 '''like diffstat(), but yields 2-tuples of (output, label) for
1771 ui.write()
1771 ui.write()
1772 '''
1772 '''
1773
1773
1774 for line in diffstat(*args, **kw).splitlines():
1774 for line in diffstat(*args, **kw).splitlines():
1775 if line and line[-1] in '+-':
1775 if line and line[-1] in '+-':
1776 name, graph = line.rsplit(' ', 1)
1776 name, graph = line.rsplit(' ', 1)
1777 yield (name + ' ', '')
1777 yield (name + ' ', '')
1778 m = re.search(r'\++', graph)
1778 m = re.search(r'\++', graph)
1779 if m:
1779 if m:
1780 yield (m.group(0), 'diffstat.inserted')
1780 yield (m.group(0), 'diffstat.inserted')
1781 m = re.search(r'-+', graph)
1781 m = re.search(r'-+', graph)
1782 if m:
1782 if m:
1783 yield (m.group(0), 'diffstat.deleted')
1783 yield (m.group(0), 'diffstat.deleted')
1784 else:
1784 else:
1785 yield (line, '')
1785 yield (line, '')
1786 yield ('\n', '')
1786 yield ('\n', '')
@@ -1,1278 +1,1278
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 # import stuff from node for others to import from revlog
14 # import stuff from node for others to import from revlog
15 from node import bin, hex, nullid, nullrev
15 from node import bin, hex, nullid, nullrev
16 from i18n import _
16 from i18n import _
17 import ancestor, mdiff, parsers, error, util, dagutil
17 import ancestor, mdiff, parsers, error, util, dagutil
18 import struct, zlib, errno
18 import struct, zlib, errno
19
19
20 _pack = struct.pack
20 _pack = struct.pack
21 _unpack = struct.unpack
21 _unpack = struct.unpack
22 _compress = zlib.compress
22 _compress = zlib.compress
23 _decompress = zlib.decompress
23 _decompress = zlib.decompress
24 _sha = util.sha1
24 _sha = util.sha1
25
25
26 # revlog header flags
26 # revlog header flags
27 REVLOGV0 = 0
27 REVLOGV0 = 0
28 REVLOGNG = 1
28 REVLOGNG = 1
29 REVLOGNGINLINEDATA = (1 << 16)
29 REVLOGNGINLINEDATA = (1 << 16)
30 REVLOGGENERALDELTA = (1 << 17)
30 REVLOGGENERALDELTA = (1 << 17)
31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
32 REVLOG_DEFAULT_FORMAT = REVLOGNG
32 REVLOG_DEFAULT_FORMAT = REVLOGNG
33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
35
35
36 # revlog index flags
36 # revlog index flags
37 REVIDX_KNOWN_FLAGS = 0
37 REVIDX_KNOWN_FLAGS = 0
38
38
39 # max size of revlog with inline data
39 # max size of revlog with inline data
40 _maxinline = 131072
40 _maxinline = 131072
41 _chunksize = 1048576
41 _chunksize = 1048576
42
42
43 RevlogError = error.RevlogError
43 RevlogError = error.RevlogError
44 LookupError = error.LookupError
44 LookupError = error.LookupError
45
45
46 def getoffset(q):
46 def getoffset(q):
47 return int(q >> 16)
47 return int(q >> 16)
48
48
49 def gettype(q):
49 def gettype(q):
50 return int(q & 0xFFFF)
50 return int(q & 0xFFFF)
51
51
52 def offset_type(offset, type):
52 def offset_type(offset, type):
53 return long(long(offset) << 16 | type)
53 return long(long(offset) << 16 | type)
54
54
55 nullhash = _sha(nullid)
55 nullhash = _sha(nullid)
56
56
57 def hash(text, p1, p2):
57 def hash(text, p1, p2):
58 """generate a hash from the given text and its parent hashes
58 """generate a hash from the given text and its parent hashes
59
59
60 This hash combines both the current file contents and its history
60 This hash combines both the current file contents and its history
61 in a manner that makes it easy to distinguish nodes with the same
61 in a manner that makes it easy to distinguish nodes with the same
62 content in the revision graph.
62 content in the revision graph.
63 """
63 """
64 # As of now, if one of the parent node is null, p2 is null
64 # As of now, if one of the parent node is null, p2 is null
65 if p2 == nullid:
65 if p2 == nullid:
66 # deep copy of a hash is faster than creating one
66 # deep copy of a hash is faster than creating one
67 s = nullhash.copy()
67 s = nullhash.copy()
68 s.update(p1)
68 s.update(p1)
69 else:
69 else:
70 # none of the parent nodes are nullid
70 # none of the parent nodes are nullid
71 l = [p1, p2]
71 l = [p1, p2]
72 l.sort()
72 l.sort()
73 s = _sha(l[0])
73 s = _sha(l[0])
74 s.update(l[1])
74 s.update(l[1])
75 s.update(text)
75 s.update(text)
76 return s.digest()
76 return s.digest()
77
77
78 def compress(text):
78 def compress(text):
79 """ generate a possibly-compressed representation of text """
79 """ generate a possibly-compressed representation of text """
80 if not text:
80 if not text:
81 return ("", text)
81 return ("", text)
82 l = len(text)
82 l = len(text)
83 bin = None
83 bin = None
84 if l < 44:
84 if l < 44:
85 pass
85 pass
86 elif l > 1000000:
86 elif l > 1000000:
87 # zlib makes an internal copy, thus doubling memory usage for
87 # zlib makes an internal copy, thus doubling memory usage for
88 # large files, so lets do this in pieces
88 # large files, so lets do this in pieces
89 z = zlib.compressobj()
89 z = zlib.compressobj()
90 p = []
90 p = []
91 pos = 0
91 pos = 0
92 while pos < l:
92 while pos < l:
93 pos2 = pos + 2**20
93 pos2 = pos + 2**20
94 p.append(z.compress(text[pos:pos2]))
94 p.append(z.compress(text[pos:pos2]))
95 pos = pos2
95 pos = pos2
96 p.append(z.flush())
96 p.append(z.flush())
97 if sum(map(len, p)) < l:
97 if sum(map(len, p)) < l:
98 bin = "".join(p)
98 bin = "".join(p)
99 else:
99 else:
100 bin = _compress(text)
100 bin = _compress(text)
101 if bin is None or len(bin) > l:
101 if bin is None or len(bin) > l:
102 if text[0] == '\0':
102 if text[0] == '\0':
103 return ("", text)
103 return ("", text)
104 return ('u', text)
104 return ('u', text)
105 return ("", bin)
105 return ("", bin)
106
106
107 def decompress(bin):
107 def decompress(bin):
108 """ decompress the given input """
108 """ decompress the given input """
109 if not bin:
109 if not bin:
110 return bin
110 return bin
111 t = bin[0]
111 t = bin[0]
112 if t == '\0':
112 if t == '\0':
113 return bin
113 return bin
114 if t == 'x':
114 if t == 'x':
115 return _decompress(bin)
115 return _decompress(bin)
116 if t == 'u':
116 if t == 'u':
117 return bin[1:]
117 return bin[1:]
118 raise RevlogError(_("unknown compression type %r") % t)
118 raise RevlogError(_("unknown compression type %r") % t)
119
119
120 indexformatv0 = ">4l20s20s20s"
120 indexformatv0 = ">4l20s20s20s"
121 v0shaoffset = 56
121 v0shaoffset = 56
122
122
123 class revlogoldio(object):
123 class revlogoldio(object):
124 def __init__(self):
124 def __init__(self):
125 self.size = struct.calcsize(indexformatv0)
125 self.size = struct.calcsize(indexformatv0)
126
126
127 def parseindex(self, data, inline):
127 def parseindex(self, data, inline):
128 s = self.size
128 s = self.size
129 index = []
129 index = []
130 nodemap = {nullid: nullrev}
130 nodemap = {nullid: nullrev}
131 n = off = 0
131 n = off = 0
132 l = len(data)
132 l = len(data)
133 while off + s <= l:
133 while off + s <= l:
134 cur = data[off:off + s]
134 cur = data[off:off + s]
135 off += s
135 off += s
136 e = _unpack(indexformatv0, cur)
136 e = _unpack(indexformatv0, cur)
137 # transform to revlogv1 format
137 # transform to revlogv1 format
138 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
138 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
139 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
139 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
140 index.append(e2)
140 index.append(e2)
141 nodemap[e[6]] = n
141 nodemap[e[6]] = n
142 n += 1
142 n += 1
143
143
144 # add the magic null revision at -1
144 # add the magic null revision at -1
145 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
145 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
146
146
147 return index, nodemap, None
147 return index, nodemap, None
148
148
149 def packentry(self, entry, node, version, rev):
149 def packentry(self, entry, node, version, rev):
150 if gettype(entry[0]):
150 if gettype(entry[0]):
151 raise RevlogError(_("index entry flags need RevlogNG"))
151 raise RevlogError(_("index entry flags need RevlogNG"))
152 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
152 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
153 node(entry[5]), node(entry[6]), entry[7])
153 node(entry[5]), node(entry[6]), entry[7])
154 return _pack(indexformatv0, *e2)
154 return _pack(indexformatv0, *e2)
155
155
156 # index ng:
156 # index ng:
157 # 6 bytes: offset
157 # 6 bytes: offset
158 # 2 bytes: flags
158 # 2 bytes: flags
159 # 4 bytes: compressed length
159 # 4 bytes: compressed length
160 # 4 bytes: uncompressed length
160 # 4 bytes: uncompressed length
161 # 4 bytes: base rev
161 # 4 bytes: base rev
162 # 4 bytes: link rev
162 # 4 bytes: link rev
163 # 4 bytes: parent 1 rev
163 # 4 bytes: parent 1 rev
164 # 4 bytes: parent 2 rev
164 # 4 bytes: parent 2 rev
165 # 32 bytes: nodeid
165 # 32 bytes: nodeid
166 indexformatng = ">Qiiiiii20s12x"
166 indexformatng = ">Qiiiiii20s12x"
167 ngshaoffset = 32
167 ngshaoffset = 32
168 versionformat = ">I"
168 versionformat = ">I"
169
169
170 class revlogio(object):
170 class revlogio(object):
171 def __init__(self):
171 def __init__(self):
172 self.size = struct.calcsize(indexformatng)
172 self.size = struct.calcsize(indexformatng)
173
173
174 def parseindex(self, data, inline):
174 def parseindex(self, data, inline):
175 # call the C implementation to parse the index data
175 # call the C implementation to parse the index data
176 index, cache = parsers.parse_index2(data, inline)
176 index, cache = parsers.parse_index2(data, inline)
177 return index, None, cache
177 return index, None, cache
178
178
179 def packentry(self, entry, node, version, rev):
179 def packentry(self, entry, node, version, rev):
180 p = _pack(indexformatng, *entry)
180 p = _pack(indexformatng, *entry)
181 if rev == 0:
181 if rev == 0:
182 p = _pack(versionformat, version) + p[4:]
182 p = _pack(versionformat, version) + p[4:]
183 return p
183 return p
184
184
185 class revlog(object):
185 class revlog(object):
186 """
186 """
187 the underlying revision storage object
187 the underlying revision storage object
188
188
189 A revlog consists of two parts, an index and the revision data.
189 A revlog consists of two parts, an index and the revision data.
190
190
191 The index is a file with a fixed record size containing
191 The index is a file with a fixed record size containing
192 information on each revision, including its nodeid (hash), the
192 information on each revision, including its nodeid (hash), the
193 nodeids of its parents, the position and offset of its data within
193 nodeids of its parents, the position and offset of its data within
194 the data file, and the revision it's based on. Finally, each entry
194 the data file, and the revision it's based on. Finally, each entry
195 contains a linkrev entry that can serve as a pointer to external
195 contains a linkrev entry that can serve as a pointer to external
196 data.
196 data.
197
197
198 The revision data itself is a linear collection of data chunks.
198 The revision data itself is a linear collection of data chunks.
199 Each chunk represents a revision and is usually represented as a
199 Each chunk represents a revision and is usually represented as a
200 delta against the previous chunk. To bound lookup time, runs of
200 delta against the previous chunk. To bound lookup time, runs of
201 deltas are limited to about 2 times the length of the original
201 deltas are limited to about 2 times the length of the original
202 version data. This makes retrieval of a version proportional to
202 version data. This makes retrieval of a version proportional to
203 its size, or O(1) relative to the number of revisions.
203 its size, or O(1) relative to the number of revisions.
204
204
205 Both pieces of the revlog are written to in an append-only
205 Both pieces of the revlog are written to in an append-only
206 fashion, which means we never need to rewrite a file to insert or
206 fashion, which means we never need to rewrite a file to insert or
207 remove data, and can use some simple techniques to avoid the need
207 remove data, and can use some simple techniques to avoid the need
208 for locking while reading.
208 for locking while reading.
209 """
209 """
210 def __init__(self, opener, indexfile):
210 def __init__(self, opener, indexfile):
211 """
211 """
212 create a revlog object
212 create a revlog object
213
213
214 opener is a function that abstracts the file opening operation
214 opener is a function that abstracts the file opening operation
215 and can be used to implement COW semantics or the like.
215 and can be used to implement COW semantics or the like.
216 """
216 """
217 self.indexfile = indexfile
217 self.indexfile = indexfile
218 self.datafile = indexfile[:-2] + ".d"
218 self.datafile = indexfile[:-2] + ".d"
219 self.opener = opener
219 self.opener = opener
220 self._cache = None
220 self._cache = None
221 self._basecache = (0, 0)
221 self._basecache = (0, 0)
222 self._chunkcache = (0, '')
222 self._chunkcache = (0, '')
223 self.index = []
223 self.index = []
224 self._pcache = {}
224 self._pcache = {}
225 self._nodecache = {nullid: nullrev}
225 self._nodecache = {nullid: nullrev}
226 self._nodepos = None
226 self._nodepos = None
227
227
228 v = REVLOG_DEFAULT_VERSION
228 v = REVLOG_DEFAULT_VERSION
229 if hasattr(opener, 'options'):
229 if hasattr(opener, 'options'):
230 if 'revlogv1' in opener.options:
230 if 'revlogv1' in opener.options:
231 if 'generaldelta' in opener.options:
231 if 'generaldelta' in opener.options:
232 v |= REVLOGGENERALDELTA
232 v |= REVLOGGENERALDELTA
233 else:
233 else:
234 v = 0
234 v = 0
235
235
236 i = ''
236 i = ''
237 self._initempty = True
237 self._initempty = True
238 try:
238 try:
239 f = self.opener(self.indexfile)
239 f = self.opener(self.indexfile)
240 i = f.read()
240 i = f.read()
241 f.close()
241 f.close()
242 if len(i) > 0:
242 if len(i) > 0:
243 v = struct.unpack(versionformat, i[:4])[0]
243 v = struct.unpack(versionformat, i[:4])[0]
244 self._initempty = False
244 self._initempty = False
245 except IOError, inst:
245 except IOError, inst:
246 if inst.errno != errno.ENOENT:
246 if inst.errno != errno.ENOENT:
247 raise
247 raise
248
248
249 self.version = v
249 self.version = v
250 self._inline = v & REVLOGNGINLINEDATA
250 self._inline = v & REVLOGNGINLINEDATA
251 self._generaldelta = v & REVLOGGENERALDELTA
251 self._generaldelta = v & REVLOGGENERALDELTA
252 flags = v & ~0xFFFF
252 flags = v & ~0xFFFF
253 fmt = v & 0xFFFF
253 fmt = v & 0xFFFF
254 if fmt == REVLOGV0 and flags:
254 if fmt == REVLOGV0 and flags:
255 raise RevlogError(_("index %s unknown flags %#04x for format v0")
255 raise RevlogError(_("index %s unknown flags %#04x for format v0")
256 % (self.indexfile, flags >> 16))
256 % (self.indexfile, flags >> 16))
257 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
257 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
258 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
258 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
259 % (self.indexfile, flags >> 16))
259 % (self.indexfile, flags >> 16))
260 elif fmt > REVLOGNG:
260 elif fmt > REVLOGNG:
261 raise RevlogError(_("index %s unknown format %d")
261 raise RevlogError(_("index %s unknown format %d")
262 % (self.indexfile, fmt))
262 % (self.indexfile, fmt))
263
263
264 self._io = revlogio()
264 self._io = revlogio()
265 if self.version == REVLOGV0:
265 if self.version == REVLOGV0:
266 self._io = revlogoldio()
266 self._io = revlogoldio()
267 try:
267 try:
268 d = self._io.parseindex(i, self._inline)
268 d = self._io.parseindex(i, self._inline)
269 except (ValueError, IndexError):
269 except (ValueError, IndexError):
270 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
270 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
271 self.index, nodemap, self._chunkcache = d
271 self.index, nodemap, self._chunkcache = d
272 if nodemap is not None:
272 if nodemap is not None:
273 self.nodemap = self._nodecache = nodemap
273 self.nodemap = self._nodecache = nodemap
274 if not self._chunkcache:
274 if not self._chunkcache:
275 self._chunkclear()
275 self._chunkclear()
276
276
277 def tip(self):
277 def tip(self):
278 return self.node(len(self.index) - 2)
278 return self.node(len(self.index) - 2)
279 def __len__(self):
279 def __len__(self):
280 return len(self.index) - 1
280 return len(self.index) - 1
281 def __iter__(self):
281 def __iter__(self):
282 for i in xrange(len(self)):
282 for i in xrange(len(self)):
283 yield i
283 yield i
284
284
285 @util.propertycache
285 @util.propertycache
286 def nodemap(self):
286 def nodemap(self):
287 self.rev(self.node(0))
287 self.rev(self.node(0))
288 return self._nodecache
288 return self._nodecache
289
289
290 def rev(self, node):
290 def rev(self, node):
291 try:
291 try:
292 return self._nodecache[node]
292 return self._nodecache[node]
293 except KeyError:
293 except KeyError:
294 n = self._nodecache
294 n = self._nodecache
295 i = self.index
295 i = self.index
296 p = self._nodepos
296 p = self._nodepos
297 if p is None:
297 if p is None:
298 p = len(i) - 2
298 p = len(i) - 2
299 for r in xrange(p, -1, -1):
299 for r in xrange(p, -1, -1):
300 v = i[r][7]
300 v = i[r][7]
301 n[v] = r
301 n[v] = r
302 if v == node:
302 if v == node:
303 self._nodepos = r - 1
303 self._nodepos = r - 1
304 return r
304 return r
305 raise LookupError(node, self.indexfile, _('no node'))
305 raise LookupError(node, self.indexfile, _('no node'))
306
306
307 def node(self, rev):
307 def node(self, rev):
308 return self.index[rev][7]
308 return self.index[rev][7]
309 def linkrev(self, rev):
309 def linkrev(self, rev):
310 return self.index[rev][4]
310 return self.index[rev][4]
311 def parents(self, node):
311 def parents(self, node):
312 i = self.index
312 i = self.index
313 d = i[self.rev(node)]
313 d = i[self.rev(node)]
314 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
314 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
315 def parentrevs(self, rev):
315 def parentrevs(self, rev):
316 return self.index[rev][5:7]
316 return self.index[rev][5:7]
317 def start(self, rev):
317 def start(self, rev):
318 return int(self.index[rev][0] >> 16)
318 return int(self.index[rev][0] >> 16)
319 def end(self, rev):
319 def end(self, rev):
320 return self.start(rev) + self.length(rev)
320 return self.start(rev) + self.length(rev)
321 def length(self, rev):
321 def length(self, rev):
322 return self.index[rev][1]
322 return self.index[rev][1]
323 def chainbase(self, rev):
323 def chainbase(self, rev):
324 index = self.index
324 index = self.index
325 base = index[rev][3]
325 base = index[rev][3]
326 while base != rev:
326 while base != rev:
327 rev = base
327 rev = base
328 base = index[rev][3]
328 base = index[rev][3]
329 return base
329 return base
330 def flags(self, rev):
330 def flags(self, rev):
331 return self.index[rev][0] & 0xFFFF
331 return self.index[rev][0] & 0xFFFF
332 def rawsize(self, rev):
332 def rawsize(self, rev):
333 """return the length of the uncompressed text for a given revision"""
333 """return the length of the uncompressed text for a given revision"""
334 l = self.index[rev][2]
334 l = self.index[rev][2]
335 if l >= 0:
335 if l >= 0:
336 return l
336 return l
337
337
338 t = self.revision(self.node(rev))
338 t = self.revision(self.node(rev))
339 return len(t)
339 return len(t)
340 size = rawsize
340 size = rawsize
341
341
342 def reachable(self, node, stop=None):
342 def reachable(self, node, stop=None):
343 """return the set of all nodes ancestral to a given node, including
343 """return the set of all nodes ancestral to a given node, including
344 the node itself, stopping when stop is matched"""
344 the node itself, stopping when stop is matched"""
345 reachable = set((node,))
345 reachable = set((node,))
346 visit = [node]
346 visit = [node]
347 if stop:
347 if stop:
348 stopn = self.rev(stop)
348 stopn = self.rev(stop)
349 else:
349 else:
350 stopn = 0
350 stopn = 0
351 while visit:
351 while visit:
352 n = visit.pop(0)
352 n = visit.pop(0)
353 if n == stop:
353 if n == stop:
354 continue
354 continue
355 if n == nullid:
355 if n == nullid:
356 continue
356 continue
357 for p in self.parents(n):
357 for p in self.parents(n):
358 if self.rev(p) < stopn:
358 if self.rev(p) < stopn:
359 continue
359 continue
360 if p not in reachable:
360 if p not in reachable:
361 reachable.add(p)
361 reachable.add(p)
362 visit.append(p)
362 visit.append(p)
363 return reachable
363 return reachable
364
364
365 def ancestors(self, *revs):
365 def ancestors(self, *revs):
366 """Generate the ancestors of 'revs' in reverse topological order.
366 """Generate the ancestors of 'revs' in reverse topological order.
367
367
368 Yield a sequence of revision numbers starting with the parents
368 Yield a sequence of revision numbers starting with the parents
369 of each revision in revs, i.e., each revision is *not* considered
369 of each revision in revs, i.e., each revision is *not* considered
370 an ancestor of itself. Results are in breadth-first order:
370 an ancestor of itself. Results are in breadth-first order:
371 parents of each rev in revs, then parents of those, etc. Result
371 parents of each rev in revs, then parents of those, etc. Result
372 does not include the null revision."""
372 does not include the null revision."""
373 visit = list(revs)
373 visit = list(revs)
374 seen = set([nullrev])
374 seen = set([nullrev])
375 while visit:
375 while visit:
376 for parent in self.parentrevs(visit.pop(0)):
376 for parent in self.parentrevs(visit.pop(0)):
377 if parent not in seen:
377 if parent not in seen:
378 visit.append(parent)
378 visit.append(parent)
379 seen.add(parent)
379 seen.add(parent)
380 yield parent
380 yield parent
381
381
382 def descendants(self, *revs):
382 def descendants(self, *revs):
383 """Generate the descendants of 'revs' in revision order.
383 """Generate the descendants of 'revs' in revision order.
384
384
385 Yield a sequence of revision numbers starting with a child of
385 Yield a sequence of revision numbers starting with a child of
386 some rev in revs, i.e., each revision is *not* considered a
386 some rev in revs, i.e., each revision is *not* considered a
387 descendant of itself. Results are ordered by revision number (a
387 descendant of itself. Results are ordered by revision number (a
388 topological sort)."""
388 topological sort)."""
389 first = min(revs)
389 first = min(revs)
390 if first == nullrev:
390 if first == nullrev:
391 for i in self:
391 for i in self:
392 yield i
392 yield i
393 return
393 return
394
394
395 seen = set(revs)
395 seen = set(revs)
396 for i in xrange(first + 1, len(self)):
396 for i in xrange(first + 1, len(self)):
397 for x in self.parentrevs(i):
397 for x in self.parentrevs(i):
398 if x != nullrev and x in seen:
398 if x != nullrev and x in seen:
399 seen.add(i)
399 seen.add(i)
400 yield i
400 yield i
401 break
401 break
402
402
403 def findcommonmissing(self, common=None, heads=None):
403 def findcommonmissing(self, common=None, heads=None):
404 """Return a tuple of the ancestors of common and the ancestors of heads
404 """Return a tuple of the ancestors of common and the ancestors of heads
405 that are not ancestors of common.
405 that are not ancestors of common.
406
406
407 More specifically, the second element is a list of nodes N such that
407 More specifically, the second element is a list of nodes N such that
408 every N satisfies the following constraints:
408 every N satisfies the following constraints:
409
409
410 1. N is an ancestor of some node in 'heads'
410 1. N is an ancestor of some node in 'heads'
411 2. N is not an ancestor of any node in 'common'
411 2. N is not an ancestor of any node in 'common'
412
412
413 The list is sorted by revision number, meaning it is
413 The list is sorted by revision number, meaning it is
414 topologically sorted.
414 topologically sorted.
415
415
416 'heads' and 'common' are both lists of node IDs. If heads is
416 'heads' and 'common' are both lists of node IDs. If heads is
417 not supplied, uses all of the revlog's heads. If common is not
417 not supplied, uses all of the revlog's heads. If common is not
418 supplied, uses nullid."""
418 supplied, uses nullid."""
419 if common is None:
419 if common is None:
420 common = [nullid]
420 common = [nullid]
421 if heads is None:
421 if heads is None:
422 heads = self.heads()
422 heads = self.heads()
423
423
424 common = [self.rev(n) for n in common]
424 common = [self.rev(n) for n in common]
425 heads = [self.rev(n) for n in heads]
425 heads = [self.rev(n) for n in heads]
426
426
427 # we want the ancestors, but inclusive
427 # we want the ancestors, but inclusive
428 has = set(self.ancestors(*common))
428 has = set(self.ancestors(*common))
429 has.add(nullrev)
429 has.add(nullrev)
430 has.update(common)
430 has.update(common)
431
431
432 # take all ancestors from heads that aren't in has
432 # take all ancestors from heads that aren't in has
433 missing = set()
433 missing = set()
434 visit = [r for r in heads if r not in has]
434 visit = [r for r in heads if r not in has]
435 while visit:
435 while visit:
436 r = visit.pop(0)
436 r = visit.pop(0)
437 if r in missing:
437 if r in missing:
438 continue
438 continue
439 else:
439 else:
440 missing.add(r)
440 missing.add(r)
441 for p in self.parentrevs(r):
441 for p in self.parentrevs(r):
442 if p not in has:
442 if p not in has:
443 visit.append(p)
443 visit.append(p)
444 missing = list(missing)
444 missing = list(missing)
445 missing.sort()
445 missing.sort()
446 return has, [self.node(r) for r in missing]
446 return has, [self.node(r) for r in missing]
447
447
448 def findmissing(self, common=None, heads=None):
448 def findmissing(self, common=None, heads=None):
449 """Return the ancestors of heads that are not ancestors of common.
449 """Return the ancestors of heads that are not ancestors of common.
450
450
451 More specifically, return a list of nodes N such that every N
451 More specifically, return a list of nodes N such that every N
452 satisfies the following constraints:
452 satisfies the following constraints:
453
453
454 1. N is an ancestor of some node in 'heads'
454 1. N is an ancestor of some node in 'heads'
455 2. N is not an ancestor of any node in 'common'
455 2. N is not an ancestor of any node in 'common'
456
456
457 The list is sorted by revision number, meaning it is
457 The list is sorted by revision number, meaning it is
458 topologically sorted.
458 topologically sorted.
459
459
460 'heads' and 'common' are both lists of node IDs. If heads is
460 'heads' and 'common' are both lists of node IDs. If heads is
461 not supplied, uses all of the revlog's heads. If common is not
461 not supplied, uses all of the revlog's heads. If common is not
462 supplied, uses nullid."""
462 supplied, uses nullid."""
463 _common, missing = self.findcommonmissing(common, heads)
463 _common, missing = self.findcommonmissing(common, heads)
464 return missing
464 return missing
465
465
466 def nodesbetween(self, roots=None, heads=None):
466 def nodesbetween(self, roots=None, heads=None):
467 """Return a topological path from 'roots' to 'heads'.
467 """Return a topological path from 'roots' to 'heads'.
468
468
469 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
469 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
470 topologically sorted list of all nodes N that satisfy both of
470 topologically sorted list of all nodes N that satisfy both of
471 these constraints:
471 these constraints:
472
472
473 1. N is a descendant of some node in 'roots'
473 1. N is a descendant of some node in 'roots'
474 2. N is an ancestor of some node in 'heads'
474 2. N is an ancestor of some node in 'heads'
475
475
476 Every node is considered to be both a descendant and an ancestor
476 Every node is considered to be both a descendant and an ancestor
477 of itself, so every reachable node in 'roots' and 'heads' will be
477 of itself, so every reachable node in 'roots' and 'heads' will be
478 included in 'nodes'.
478 included in 'nodes'.
479
479
480 'outroots' is the list of reachable nodes in 'roots', i.e., the
480 'outroots' is the list of reachable nodes in 'roots', i.e., the
481 subset of 'roots' that is returned in 'nodes'. Likewise,
481 subset of 'roots' that is returned in 'nodes'. Likewise,
482 'outheads' is the subset of 'heads' that is also in 'nodes'.
482 'outheads' is the subset of 'heads' that is also in 'nodes'.
483
483
484 'roots' and 'heads' are both lists of node IDs. If 'roots' is
484 'roots' and 'heads' are both lists of node IDs. If 'roots' is
485 unspecified, uses nullid as the only root. If 'heads' is
485 unspecified, uses nullid as the only root. If 'heads' is
486 unspecified, uses list of all of the revlog's heads."""
486 unspecified, uses list of all of the revlog's heads."""
487 nonodes = ([], [], [])
487 nonodes = ([], [], [])
488 if roots is not None:
488 if roots is not None:
489 roots = list(roots)
489 roots = list(roots)
490 if not roots:
490 if not roots:
491 return nonodes
491 return nonodes
492 lowestrev = min([self.rev(n) for n in roots])
492 lowestrev = min([self.rev(n) for n in roots])
493 else:
493 else:
494 roots = [nullid] # Everybody's a descendent of nullid
494 roots = [nullid] # Everybody's a descendent of nullid
495 lowestrev = nullrev
495 lowestrev = nullrev
496 if (lowestrev == nullrev) and (heads is None):
496 if (lowestrev == nullrev) and (heads is None):
497 # We want _all_ the nodes!
497 # We want _all_ the nodes!
498 return ([self.node(r) for r in self], [nullid], list(self.heads()))
498 return ([self.node(r) for r in self], [nullid], list(self.heads()))
499 if heads is None:
499 if heads is None:
500 # All nodes are ancestors, so the latest ancestor is the last
500 # All nodes are ancestors, so the latest ancestor is the last
501 # node.
501 # node.
502 highestrev = len(self) - 1
502 highestrev = len(self) - 1
503 # Set ancestors to None to signal that every node is an ancestor.
503 # Set ancestors to None to signal that every node is an ancestor.
504 ancestors = None
504 ancestors = None
505 # Set heads to an empty dictionary for later discovery of heads
505 # Set heads to an empty dictionary for later discovery of heads
506 heads = {}
506 heads = {}
507 else:
507 else:
508 heads = list(heads)
508 heads = list(heads)
509 if not heads:
509 if not heads:
510 return nonodes
510 return nonodes
511 ancestors = set()
511 ancestors = set()
512 # Turn heads into a dictionary so we can remove 'fake' heads.
512 # Turn heads into a dictionary so we can remove 'fake' heads.
513 # Also, later we will be using it to filter out the heads we can't
513 # Also, later we will be using it to filter out the heads we can't
514 # find from roots.
514 # find from roots.
515 heads = dict.fromkeys(heads, False)
515 heads = dict.fromkeys(heads, False)
516 # Start at the top and keep marking parents until we're done.
516 # Start at the top and keep marking parents until we're done.
517 nodestotag = set(heads)
517 nodestotag = set(heads)
518 # Remember where the top was so we can use it as a limit later.
518 # Remember where the top was so we can use it as a limit later.
519 highestrev = max([self.rev(n) for n in nodestotag])
519 highestrev = max([self.rev(n) for n in nodestotag])
520 while nodestotag:
520 while nodestotag:
521 # grab a node to tag
521 # grab a node to tag
522 n = nodestotag.pop()
522 n = nodestotag.pop()
523 # Never tag nullid
523 # Never tag nullid
524 if n == nullid:
524 if n == nullid:
525 continue
525 continue
526 # A node's revision number represents its place in a
526 # A node's revision number represents its place in a
527 # topologically sorted list of nodes.
527 # topologically sorted list of nodes.
528 r = self.rev(n)
528 r = self.rev(n)
529 if r >= lowestrev:
529 if r >= lowestrev:
530 if n not in ancestors:
530 if n not in ancestors:
531 # If we are possibly a descendent of one of the roots
531 # If we are possibly a descendent of one of the roots
532 # and we haven't already been marked as an ancestor
532 # and we haven't already been marked as an ancestor
533 ancestors.add(n) # Mark as ancestor
533 ancestors.add(n) # Mark as ancestor
534 # Add non-nullid parents to list of nodes to tag.
534 # Add non-nullid parents to list of nodes to tag.
535 nodestotag.update([p for p in self.parents(n) if
535 nodestotag.update([p for p in self.parents(n) if
536 p != nullid])
536 p != nullid])
537 elif n in heads: # We've seen it before, is it a fake head?
537 elif n in heads: # We've seen it before, is it a fake head?
538 # So it is, real heads should not be the ancestors of
538 # So it is, real heads should not be the ancestors of
539 # any other heads.
539 # any other heads.
540 heads.pop(n)
540 heads.pop(n)
541 if not ancestors:
541 if not ancestors:
542 return nonodes
542 return nonodes
543 # Now that we have our set of ancestors, we want to remove any
543 # Now that we have our set of ancestors, we want to remove any
544 # roots that are not ancestors.
544 # roots that are not ancestors.
545
545
546 # If one of the roots was nullid, everything is included anyway.
546 # If one of the roots was nullid, everything is included anyway.
547 if lowestrev > nullrev:
547 if lowestrev > nullrev:
548 # But, since we weren't, let's recompute the lowest rev to not
548 # But, since we weren't, let's recompute the lowest rev to not
549 # include roots that aren't ancestors.
549 # include roots that aren't ancestors.
550
550
551 # Filter out roots that aren't ancestors of heads
551 # Filter out roots that aren't ancestors of heads
552 roots = [n for n in roots if n in ancestors]
552 roots = [n for n in roots if n in ancestors]
553 # Recompute the lowest revision
553 # Recompute the lowest revision
554 if roots:
554 if roots:
555 lowestrev = min([self.rev(n) for n in roots])
555 lowestrev = min([self.rev(n) for n in roots])
556 else:
556 else:
557 # No more roots? Return empty list
557 # No more roots? Return empty list
558 return nonodes
558 return nonodes
559 else:
559 else:
560 # We are descending from nullid, and don't need to care about
560 # We are descending from nullid, and don't need to care about
561 # any other roots.
561 # any other roots.
562 lowestrev = nullrev
562 lowestrev = nullrev
563 roots = [nullid]
563 roots = [nullid]
564 # Transform our roots list into a set.
564 # Transform our roots list into a set.
565 descendents = set(roots)
565 descendents = set(roots)
566 # Also, keep the original roots so we can filter out roots that aren't
566 # Also, keep the original roots so we can filter out roots that aren't
567 # 'real' roots (i.e. are descended from other roots).
567 # 'real' roots (i.e. are descended from other roots).
568 roots = descendents.copy()
568 roots = descendents.copy()
569 # Our topologically sorted list of output nodes.
569 # Our topologically sorted list of output nodes.
570 orderedout = []
570 orderedout = []
571 # Don't start at nullid since we don't want nullid in our output list,
571 # Don't start at nullid since we don't want nullid in our output list,
572 # and if nullid shows up in descedents, empty parents will look like
572 # and if nullid shows up in descedents, empty parents will look like
573 # they're descendents.
573 # they're descendents.
574 for r in xrange(max(lowestrev, 0), highestrev + 1):
574 for r in xrange(max(lowestrev, 0), highestrev + 1):
575 n = self.node(r)
575 n = self.node(r)
576 isdescendent = False
576 isdescendent = False
577 if lowestrev == nullrev: # Everybody is a descendent of nullid
577 if lowestrev == nullrev: # Everybody is a descendent of nullid
578 isdescendent = True
578 isdescendent = True
579 elif n in descendents:
579 elif n in descendents:
580 # n is already a descendent
580 # n is already a descendent
581 isdescendent = True
581 isdescendent = True
582 # This check only needs to be done here because all the roots
582 # This check only needs to be done here because all the roots
583 # will start being marked is descendents before the loop.
583 # will start being marked is descendents before the loop.
584 if n in roots:
584 if n in roots:
585 # If n was a root, check if it's a 'real' root.
585 # If n was a root, check if it's a 'real' root.
586 p = tuple(self.parents(n))
586 p = tuple(self.parents(n))
587 # If any of its parents are descendents, it's not a root.
587 # If any of its parents are descendents, it's not a root.
588 if (p[0] in descendents) or (p[1] in descendents):
588 if (p[0] in descendents) or (p[1] in descendents):
589 roots.remove(n)
589 roots.remove(n)
590 else:
590 else:
591 p = tuple(self.parents(n))
591 p = tuple(self.parents(n))
592 # A node is a descendent if either of its parents are
592 # A node is a descendent if either of its parents are
593 # descendents. (We seeded the dependents list with the roots
593 # descendents. (We seeded the dependents list with the roots
594 # up there, remember?)
594 # up there, remember?)
595 if (p[0] in descendents) or (p[1] in descendents):
595 if (p[0] in descendents) or (p[1] in descendents):
596 descendents.add(n)
596 descendents.add(n)
597 isdescendent = True
597 isdescendent = True
598 if isdescendent and ((ancestors is None) or (n in ancestors)):
598 if isdescendent and ((ancestors is None) or (n in ancestors)):
599 # Only include nodes that are both descendents and ancestors.
599 # Only include nodes that are both descendents and ancestors.
600 orderedout.append(n)
600 orderedout.append(n)
601 if (ancestors is not None) and (n in heads):
601 if (ancestors is not None) and (n in heads):
602 # We're trying to figure out which heads are reachable
602 # We're trying to figure out which heads are reachable
603 # from roots.
603 # from roots.
604 # Mark this head as having been reached
604 # Mark this head as having been reached
605 heads[n] = True
605 heads[n] = True
606 elif ancestors is None:
606 elif ancestors is None:
607 # Otherwise, we're trying to discover the heads.
607 # Otherwise, we're trying to discover the heads.
608 # Assume this is a head because if it isn't, the next step
608 # Assume this is a head because if it isn't, the next step
609 # will eventually remove it.
609 # will eventually remove it.
610 heads[n] = True
610 heads[n] = True
611 # But, obviously its parents aren't.
611 # But, obviously its parents aren't.
612 for p in self.parents(n):
612 for p in self.parents(n):
613 heads.pop(p, None)
613 heads.pop(p, None)
614 heads = [n for n, flag in heads.iteritems() if flag]
614 heads = [n for n, flag in heads.iteritems() if flag]
615 roots = list(roots)
615 roots = list(roots)
616 assert orderedout
616 assert orderedout
617 assert roots
617 assert roots
618 assert heads
618 assert heads
619 return (orderedout, roots, heads)
619 return (orderedout, roots, heads)
620
620
621 def headrevs(self):
621 def headrevs(self):
622 count = len(self)
622 count = len(self)
623 if not count:
623 if not count:
624 return [nullrev]
624 return [nullrev]
625 ishead = [1] * (count + 1)
625 ishead = [1] * (count + 1)
626 index = self.index
626 index = self.index
627 for r in xrange(count):
627 for r in xrange(count):
628 e = index[r]
628 e = index[r]
629 ishead[e[5]] = ishead[e[6]] = 0
629 ishead[e[5]] = ishead[e[6]] = 0
630 return [r for r in xrange(count) if ishead[r]]
630 return [r for r in xrange(count) if ishead[r]]
631
631
632 def heads(self, start=None, stop=None):
632 def heads(self, start=None, stop=None):
633 """return the list of all nodes that have no children
633 """return the list of all nodes that have no children
634
634
635 if start is specified, only heads that are descendants of
635 if start is specified, only heads that are descendants of
636 start will be returned
636 start will be returned
637 if stop is specified, it will consider all the revs from stop
637 if stop is specified, it will consider all the revs from stop
638 as if they had no children
638 as if they had no children
639 """
639 """
640 if start is None and stop is None:
640 if start is None and stop is None:
641 if not len(self):
641 if not len(self):
642 return [nullid]
642 return [nullid]
643 return [self.node(r) for r in self.headrevs()]
643 return [self.node(r) for r in self.headrevs()]
644
644
645 if start is None:
645 if start is None:
646 start = nullid
646 start = nullid
647 if stop is None:
647 if stop is None:
648 stop = []
648 stop = []
649 stoprevs = set([self.rev(n) for n in stop])
649 stoprevs = set([self.rev(n) for n in stop])
650 startrev = self.rev(start)
650 startrev = self.rev(start)
651 reachable = set((startrev,))
651 reachable = set((startrev,))
652 heads = set((startrev,))
652 heads = set((startrev,))
653
653
654 parentrevs = self.parentrevs
654 parentrevs = self.parentrevs
655 for r in xrange(startrev + 1, len(self)):
655 for r in xrange(startrev + 1, len(self)):
656 for p in parentrevs(r):
656 for p in parentrevs(r):
657 if p in reachable:
657 if p in reachable:
658 if r not in stoprevs:
658 if r not in stoprevs:
659 reachable.add(r)
659 reachable.add(r)
660 heads.add(r)
660 heads.add(r)
661 if p in heads and p not in stoprevs:
661 if p in heads and p not in stoprevs:
662 heads.remove(p)
662 heads.remove(p)
663
663
664 return [self.node(r) for r in heads]
664 return [self.node(r) for r in heads]
665
665
666 def children(self, node):
666 def children(self, node):
667 """find the children of a given node"""
667 """find the children of a given node"""
668 c = []
668 c = []
669 p = self.rev(node)
669 p = self.rev(node)
670 for r in range(p + 1, len(self)):
670 for r in range(p + 1, len(self)):
671 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
671 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
672 if prevs:
672 if prevs:
673 for pr in prevs:
673 for pr in prevs:
674 if pr == p:
674 if pr == p:
675 c.append(self.node(r))
675 c.append(self.node(r))
676 elif p == nullrev:
676 elif p == nullrev:
677 c.append(self.node(r))
677 c.append(self.node(r))
678 return c
678 return c
679
679
680 def descendant(self, start, end):
680 def descendant(self, start, end):
681 if start == nullrev:
681 if start == nullrev:
682 return True
682 return True
683 for i in self.descendants(start):
683 for i in self.descendants(start):
684 if i == end:
684 if i == end:
685 return True
685 return True
686 elif i > end:
686 elif i > end:
687 break
687 break
688 return False
688 return False
689
689
690 def ancestor(self, a, b):
690 def ancestor(self, a, b):
691 """calculate the least common ancestor of nodes a and b"""
691 """calculate the least common ancestor of nodes a and b"""
692
692
693 # fast path, check if it is a descendant
693 # fast path, check if it is a descendant
694 a, b = self.rev(a), self.rev(b)
694 a, b = self.rev(a), self.rev(b)
695 start, end = sorted((a, b))
695 start, end = sorted((a, b))
696 if self.descendant(start, end):
696 if self.descendant(start, end):
697 return self.node(start)
697 return self.node(start)
698
698
699 def parents(rev):
699 def parents(rev):
700 return [p for p in self.parentrevs(rev) if p != nullrev]
700 return [p for p in self.parentrevs(rev) if p != nullrev]
701
701
702 c = ancestor.ancestor(a, b, parents)
702 c = ancestor.ancestor(a, b, parents)
703 if c is None:
703 if c is None:
704 return nullid
704 return nullid
705
705
706 return self.node(c)
706 return self.node(c)
707
707
708 def _match(self, id):
708 def _match(self, id):
709 if isinstance(id, (long, int)):
709 if isinstance(id, (long, int)):
710 # rev
710 # rev
711 return self.node(id)
711 return self.node(id)
712 if len(id) == 20:
712 if len(id) == 20:
713 # possibly a binary node
713 # possibly a binary node
714 # odds of a binary node being all hex in ASCII are 1 in 10**25
714 # odds of a binary node being all hex in ASCII are 1 in 10**25
715 try:
715 try:
716 node = id
716 node = id
717 self.rev(node) # quick search the index
717 self.rev(node) # quick search the index
718 return node
718 return node
719 except LookupError:
719 except LookupError:
720 pass # may be partial hex id
720 pass # may be partial hex id
721 try:
721 try:
722 # str(rev)
722 # str(rev)
723 rev = int(id)
723 rev = int(id)
724 if str(rev) != id:
724 if str(rev) != id:
725 raise ValueError
725 raise ValueError
726 if rev < 0:
726 if rev < 0:
727 rev = len(self) + rev
727 rev = len(self) + rev
728 if rev < 0 or rev >= len(self):
728 if rev < 0 or rev >= len(self):
729 raise ValueError
729 raise ValueError
730 return self.node(rev)
730 return self.node(rev)
731 except (ValueError, OverflowError):
731 except (ValueError, OverflowError):
732 pass
732 pass
733 if len(id) == 40:
733 if len(id) == 40:
734 try:
734 try:
735 # a full hex nodeid?
735 # a full hex nodeid?
736 node = bin(id)
736 node = bin(id)
737 self.rev(node)
737 self.rev(node)
738 return node
738 return node
739 except (TypeError, LookupError):
739 except (TypeError, LookupError):
740 pass
740 pass
741
741
742 def _partialmatch(self, id):
742 def _partialmatch(self, id):
743 if id in self._pcache:
743 if id in self._pcache:
744 return self._pcache[id]
744 return self._pcache[id]
745
745
746 if len(id) < 40:
746 if len(id) < 40:
747 try:
747 try:
748 # hex(node)[:...]
748 # hex(node)[:...]
749 l = len(id) // 2 # grab an even number of digits
749 l = len(id) // 2 # grab an even number of digits
750 prefix = bin(id[:l * 2])
750 prefix = bin(id[:l * 2])
751 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
751 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
752 nl = [n for n in nl if hex(n).startswith(id)]
752 nl = [n for n in nl if hex(n).startswith(id)]
753 if len(nl) > 0:
753 if len(nl) > 0:
754 if len(nl) == 1:
754 if len(nl) == 1:
755 self._pcache[id] = nl[0]
755 self._pcache[id] = nl[0]
756 return nl[0]
756 return nl[0]
757 raise LookupError(id, self.indexfile,
757 raise LookupError(id, self.indexfile,
758 _('ambiguous identifier'))
758 _('ambiguous identifier'))
759 return None
759 return None
760 except TypeError:
760 except TypeError:
761 pass
761 pass
762
762
763 def lookup(self, id):
763 def lookup(self, id):
764 """locate a node based on:
764 """locate a node based on:
765 - revision number or str(revision number)
765 - revision number or str(revision number)
766 - nodeid or subset of hex nodeid
766 - nodeid or subset of hex nodeid
767 """
767 """
768 n = self._match(id)
768 n = self._match(id)
769 if n is not None:
769 if n is not None:
770 return n
770 return n
771 n = self._partialmatch(id)
771 n = self._partialmatch(id)
772 if n:
772 if n:
773 return n
773 return n
774
774
775 raise LookupError(id, self.indexfile, _('no match found'))
775 raise LookupError(id, self.indexfile, _('no match found'))
776
776
777 def cmp(self, node, text):
777 def cmp(self, node, text):
778 """compare text with a given file revision
778 """compare text with a given file revision
779
779
780 returns True if text is different than what is stored.
780 returns True if text is different than what is stored.
781 """
781 """
782 p1, p2 = self.parents(node)
782 p1, p2 = self.parents(node)
783 return hash(text, p1, p2) != node
783 return hash(text, p1, p2) != node
784
784
785 def _addchunk(self, offset, data):
785 def _addchunk(self, offset, data):
786 o, d = self._chunkcache
786 o, d = self._chunkcache
787 # try to add to existing cache
787 # try to add to existing cache
788 if o + len(d) == offset and len(d) + len(data) < _chunksize:
788 if o + len(d) == offset and len(d) + len(data) < _chunksize:
789 self._chunkcache = o, d + data
789 self._chunkcache = o, d + data
790 else:
790 else:
791 self._chunkcache = offset, data
791 self._chunkcache = offset, data
792
792
793 def _loadchunk(self, offset, length):
793 def _loadchunk(self, offset, length):
794 if self._inline:
794 if self._inline:
795 df = self.opener(self.indexfile)
795 df = self.opener(self.indexfile)
796 else:
796 else:
797 df = self.opener(self.datafile)
797 df = self.opener(self.datafile)
798
798
799 readahead = max(65536, length)
799 readahead = max(65536, length)
800 df.seek(offset)
800 df.seek(offset)
801 d = df.read(readahead)
801 d = df.read(readahead)
802 self._addchunk(offset, d)
802 self._addchunk(offset, d)
803 if readahead > length:
803 if readahead > length:
804 return d[:length]
804 return d[:length]
805 return d
805 return d
806
806
807 def _getchunk(self, offset, length):
807 def _getchunk(self, offset, length):
808 o, d = self._chunkcache
808 o, d = self._chunkcache
809 l = len(d)
809 l = len(d)
810
810
811 # is it in the cache?
811 # is it in the cache?
812 cachestart = offset - o
812 cachestart = offset - o
813 cacheend = cachestart + length
813 cacheend = cachestart + length
814 if cachestart >= 0 and cacheend <= l:
814 if cachestart >= 0 and cacheend <= l:
815 if cachestart == 0 and cacheend == l:
815 if cachestart == 0 and cacheend == l:
816 return d # avoid a copy
816 return d # avoid a copy
817 return d[cachestart:cacheend]
817 return d[cachestart:cacheend]
818
818
819 return self._loadchunk(offset, length)
819 return self._loadchunk(offset, length)
820
820
821 def _chunkraw(self, startrev, endrev):
821 def _chunkraw(self, startrev, endrev):
822 start = self.start(startrev)
822 start = self.start(startrev)
823 length = self.end(endrev) - start
823 length = self.end(endrev) - start
824 if self._inline:
824 if self._inline:
825 start += (startrev + 1) * self._io.size
825 start += (startrev + 1) * self._io.size
826 return self._getchunk(start, length)
826 return self._getchunk(start, length)
827
827
828 def _chunk(self, rev):
828 def _chunk(self, rev):
829 return decompress(self._chunkraw(rev, rev))
829 return decompress(self._chunkraw(rev, rev))
830
830
831 def _chunkbase(self, rev):
831 def _chunkbase(self, rev):
832 return self._chunk(rev)
832 return self._chunk(rev)
833
833
834 def _chunkclear(self):
834 def _chunkclear(self):
835 self._chunkcache = (0, '')
835 self._chunkcache = (0, '')
836
836
837 def deltaparent(self, rev):
837 def deltaparent(self, rev):
838 """return deltaparent of the given revision"""
838 """return deltaparent of the given revision"""
839 base = self.index[rev][3]
839 base = self.index[rev][3]
840 if base == rev:
840 if base == rev:
841 return nullrev
841 return nullrev
842 elif self._generaldelta:
842 elif self._generaldelta:
843 return base
843 return base
844 else:
844 else:
845 return rev - 1
845 return rev - 1
846
846
847 def revdiff(self, rev1, rev2):
847 def revdiff(self, rev1, rev2):
848 """return or calculate a delta between two revisions"""
848 """return or calculate a delta between two revisions"""
849 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
849 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
850 return self._chunk(rev2)
850 return self._chunk(rev2)
851
851
852 return mdiff.textdiff(self.revision(self.node(rev1)),
852 return mdiff.textdiff(self.revision(self.node(rev1)),
853 self.revision(self.node(rev2)))
853 self.revision(self.node(rev2)))
854
854
855 def revision(self, node):
855 def revision(self, node):
856 """return an uncompressed revision of a given node"""
856 """return an uncompressed revision of a given node"""
857 cachedrev = None
857 cachedrev = None
858 if node == nullid:
858 if node == nullid:
859 return ""
859 return ""
860 if self._cache:
860 if self._cache:
861 if self._cache[0] == node:
861 if self._cache[0] == node:
862 return self._cache[2]
862 return self._cache[2]
863 cachedrev = self._cache[1]
863 cachedrev = self._cache[1]
864
864
865 # look up what we need to read
865 # look up what we need to read
866 text = None
866 text = None
867 rev = self.rev(node)
867 rev = self.rev(node)
868
868
869 # check rev flags
869 # check rev flags
870 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
870 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
871 raise RevlogError(_('incompatible revision flag %x') %
871 raise RevlogError(_('incompatible revision flag %x') %
872 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
872 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
873
873
874 # build delta chain
874 # build delta chain
875 chain = []
875 chain = []
876 index = self.index # for performance
876 index = self.index # for performance
877 generaldelta = self._generaldelta
877 generaldelta = self._generaldelta
878 iterrev = rev
878 iterrev = rev
879 e = index[iterrev]
879 e = index[iterrev]
880 while iterrev != e[3] and iterrev != cachedrev:
880 while iterrev != e[3] and iterrev != cachedrev:
881 chain.append(iterrev)
881 chain.append(iterrev)
882 if generaldelta:
882 if generaldelta:
883 iterrev = e[3]
883 iterrev = e[3]
884 else:
884 else:
885 iterrev -= 1
885 iterrev -= 1
886 e = index[iterrev]
886 e = index[iterrev]
887 chain.reverse()
887 chain.reverse()
888 base = iterrev
888 base = iterrev
889
889
890 if iterrev == cachedrev:
890 if iterrev == cachedrev:
891 # cache hit
891 # cache hit
892 text = self._cache[2]
892 text = self._cache[2]
893
893
894 # drop cache to save memory
894 # drop cache to save memory
895 self._cache = None
895 self._cache = None
896
896
897 self._chunkraw(base, rev)
897 self._chunkraw(base, rev)
898 if text is None:
898 if text is None:
899 text = self._chunkbase(base)
899 text = self._chunkbase(base)
900
900
901 bins = [self._chunk(r) for r in chain]
901 bins = [self._chunk(r) for r in chain]
902 text = mdiff.patches(text, bins)
902 text = mdiff.patches(text, bins)
903
903
904 text = self._checkhash(text, node, rev)
904 text = self._checkhash(text, node, rev)
905
905
906 self._cache = (node, rev, text)
906 self._cache = (node, rev, text)
907 return text
907 return text
908
908
909 def _checkhash(self, text, node, rev):
909 def _checkhash(self, text, node, rev):
910 p1, p2 = self.parents(node)
910 p1, p2 = self.parents(node)
911 if node != hash(text, p1, p2):
911 if node != hash(text, p1, p2):
912 raise RevlogError(_("integrity check failed on %s:%d")
912 raise RevlogError(_("integrity check failed on %s:%d")
913 % (self.indexfile, rev))
913 % (self.indexfile, rev))
914 return text
914 return text
915
915
916 def checkinlinesize(self, tr, fp=None):
916 def checkinlinesize(self, tr, fp=None):
917 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
917 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
918 return
918 return
919
919
920 trinfo = tr.find(self.indexfile)
920 trinfo = tr.find(self.indexfile)
921 if trinfo is None:
921 if trinfo is None:
922 raise RevlogError(_("%s not found in the transaction")
922 raise RevlogError(_("%s not found in the transaction")
923 % self.indexfile)
923 % self.indexfile)
924
924
925 trindex = trinfo[2]
925 trindex = trinfo[2]
926 dataoff = self.start(trindex)
926 dataoff = self.start(trindex)
927
927
928 tr.add(self.datafile, dataoff)
928 tr.add(self.datafile, dataoff)
929
929
930 if fp:
930 if fp:
931 fp.flush()
931 fp.flush()
932 fp.close()
932 fp.close()
933
933
934 df = self.opener(self.datafile, 'w')
934 df = self.opener(self.datafile, 'w')
935 try:
935 try:
936 for r in self:
936 for r in self:
937 df.write(self._chunkraw(r, r))
937 df.write(self._chunkraw(r, r))
938 finally:
938 finally:
939 df.close()
939 df.close()
940
940
941 fp = self.opener(self.indexfile, 'w', atomictemp=True)
941 fp = self.opener(self.indexfile, 'w', atomictemp=True)
942 self.version &= ~(REVLOGNGINLINEDATA)
942 self.version &= ~(REVLOGNGINLINEDATA)
943 self._inline = False
943 self._inline = False
944 for i in self:
944 for i in self:
945 e = self._io.packentry(self.index[i], self.node, self.version, i)
945 e = self._io.packentry(self.index[i], self.node, self.version, i)
946 fp.write(e)
946 fp.write(e)
947
947
948 # if we don't call rename, the temp file will never replace the
948 # if we don't call rename, the temp file will never replace the
949 # real index
949 # real index
950 fp.rename()
950 fp.rename()
951
951
952 tr.replace(self.indexfile, trindex * self._io.size)
952 tr.replace(self.indexfile, trindex * self._io.size)
953 self._chunkclear()
953 self._chunkclear()
954
954
955 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None):
955 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None):
956 """add a revision to the log
956 """add a revision to the log
957
957
958 text - the revision data to add
958 text - the revision data to add
959 transaction - the transaction object used for rollback
959 transaction - the transaction object used for rollback
960 link - the linkrev data to add
960 link - the linkrev data to add
961 p1, p2 - the parent nodeids of the revision
961 p1, p2 - the parent nodeids of the revision
962 cachedelta - an optional precomputed delta
962 cachedelta - an optional precomputed delta
963 """
963 """
964 node = hash(text, p1, p2)
964 node = hash(text, p1, p2)
965 if node in self.nodemap:
965 if node in self.nodemap:
966 return node
966 return node
967
967
968 dfh = None
968 dfh = None
969 if not self._inline:
969 if not self._inline:
970 dfh = self.opener(self.datafile, "a")
970 dfh = self.opener(self.datafile, "a")
971 ifh = self.opener(self.indexfile, "a+")
971 ifh = self.opener(self.indexfile, "a+")
972 try:
972 try:
973 return self._addrevision(node, text, transaction, link, p1, p2,
973 return self._addrevision(node, text, transaction, link, p1, p2,
974 cachedelta, ifh, dfh)
974 cachedelta, ifh, dfh)
975 finally:
975 finally:
976 if dfh:
976 if dfh:
977 dfh.close()
977 dfh.close()
978 ifh.close()
978 ifh.close()
979
979
980 def _addrevision(self, node, text, transaction, link, p1, p2,
980 def _addrevision(self, node, text, transaction, link, p1, p2,
981 cachedelta, ifh, dfh):
981 cachedelta, ifh, dfh):
982 """internal function to add revisions to the log
982 """internal function to add revisions to the log
983
983
984 see addrevision for argument descriptions.
984 see addrevision for argument descriptions.
985 invariants:
985 invariants:
986 - text is optional (can be None); if not set, cachedelta must be set.
986 - text is optional (can be None); if not set, cachedelta must be set.
987 if both are set, they must correspond to eachother.
987 if both are set, they must correspond to eachother.
988 """
988 """
989 btext = [text]
989 btext = [text]
990 def buildtext():
990 def buildtext():
991 if btext[0] is not None:
991 if btext[0] is not None:
992 return btext[0]
992 return btext[0]
993 # flush any pending writes here so we can read it in revision
993 # flush any pending writes here so we can read it in revision
994 if dfh:
994 if dfh:
995 dfh.flush()
995 dfh.flush()
996 ifh.flush()
996 ifh.flush()
997 basetext = self.revision(self.node(cachedelta[0]))
997 basetext = self.revision(self.node(cachedelta[0]))
998 btext[0] = mdiff.patch(basetext, cachedelta[1])
998 btext[0] = mdiff.patch(basetext, cachedelta[1])
999 chk = hash(btext[0], p1, p2)
999 chk = hash(btext[0], p1, p2)
1000 if chk != node:
1000 if chk != node:
1001 raise RevlogError(_("consistency error in delta"))
1001 raise RevlogError(_("consistency error in delta"))
1002 return btext[0]
1002 return btext[0]
1003
1003
1004 def builddelta(rev):
1004 def builddelta(rev):
1005 # can we use the cached delta?
1005 # can we use the cached delta?
1006 if cachedelta and cachedelta[0] == rev:
1006 if cachedelta and cachedelta[0] == rev:
1007 delta = cachedelta[1]
1007 delta = cachedelta[1]
1008 else:
1008 else:
1009 t = buildtext()
1009 t = buildtext()
1010 ptext = self.revision(self.node(rev))
1010 ptext = self.revision(self.node(rev))
1011 delta = mdiff.textdiff(ptext, t)
1011 delta = mdiff.textdiff(ptext, t)
1012 data = compress(delta)
1012 data = compress(delta)
1013 l = len(data[1]) + len(data[0])
1013 l = len(data[1]) + len(data[0])
1014 if basecache[0] == rev:
1014 if basecache[0] == rev:
1015 chainbase = basecache[1]
1015 chainbase = basecache[1]
1016 else:
1016 else:
1017 chainbase = self.chainbase(rev)
1017 chainbase = self.chainbase(rev)
1018 dist = l + offset - self.start(chainbase)
1018 dist = l + offset - self.start(chainbase)
1019 if self._generaldelta:
1019 if self._generaldelta:
1020 base = rev
1020 base = rev
1021 else:
1021 else:
1022 base = chainbase
1022 base = chainbase
1023 return dist, l, data, base, chainbase
1023 return dist, l, data, base, chainbase
1024
1024
1025 curr = len(self)
1025 curr = len(self)
1026 prev = curr - 1
1026 prev = curr - 1
1027 base = chainbase = curr
1027 base = chainbase = curr
1028 offset = self.end(prev)
1028 offset = self.end(prev)
1029 flags = 0
1029 flags = 0
1030 d = None
1030 d = None
1031 basecache = self._basecache
1031 basecache = self._basecache
1032 p1r, p2r = self.rev(p1), self.rev(p2)
1032 p1r, p2r = self.rev(p1), self.rev(p2)
1033
1033
1034 # should we try to build a delta?
1034 # should we try to build a delta?
1035 if prev != nullrev:
1035 if prev != nullrev:
1036 if self._generaldelta:
1036 if self._generaldelta:
1037 if p1r >= basecache[1]:
1037 if p1r >= basecache[1]:
1038 d = builddelta(p1r)
1038 d = builddelta(p1r)
1039 elif p2r >= basecache[1]:
1039 elif p2r >= basecache[1]:
1040 d = builddelta(p2r)
1040 d = builddelta(p2r)
1041 else:
1041 else:
1042 d = builddelta(prev)
1042 d = builddelta(prev)
1043 else:
1043 else:
1044 d = builddelta(prev)
1044 d = builddelta(prev)
1045 dist, l, data, base, chainbase = d
1045 dist, l, data, base, chainbase = d
1046
1046
1047 # full versions are inserted when the needed deltas
1047 # full versions are inserted when the needed deltas
1048 # become comparable to the uncompressed text
1048 # become comparable to the uncompressed text
1049 if text is None:
1049 if text is None:
1050 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1050 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1051 cachedelta[1])
1051 cachedelta[1])
1052 else:
1052 else:
1053 textlen = len(text)
1053 textlen = len(text)
1054 if d is None or dist > textlen * 2:
1054 if d is None or dist > textlen * 2:
1055 text = buildtext()
1055 text = buildtext()
1056 data = compress(text)
1056 data = compress(text)
1057 l = len(data[1]) + len(data[0])
1057 l = len(data[1]) + len(data[0])
1058 base = chainbase = curr
1058 base = chainbase = curr
1059
1059
1060 e = (offset_type(offset, flags), l, textlen,
1060 e = (offset_type(offset, flags), l, textlen,
1061 base, link, p1r, p2r, node)
1061 base, link, p1r, p2r, node)
1062 self.index.insert(-1, e)
1062 self.index.insert(-1, e)
1063 self.nodemap[node] = curr
1063 self.nodemap[node] = curr
1064
1064
1065 entry = self._io.packentry(e, self.node, self.version, curr)
1065 entry = self._io.packentry(e, self.node, self.version, curr)
1066 if not self._inline:
1066 if not self._inline:
1067 transaction.add(self.datafile, offset)
1067 transaction.add(self.datafile, offset)
1068 transaction.add(self.indexfile, curr * len(entry))
1068 transaction.add(self.indexfile, curr * len(entry))
1069 if data[0]:
1069 if data[0]:
1070 dfh.write(data[0])
1070 dfh.write(data[0])
1071 dfh.write(data[1])
1071 dfh.write(data[1])
1072 dfh.flush()
1072 dfh.flush()
1073 ifh.write(entry)
1073 ifh.write(entry)
1074 else:
1074 else:
1075 offset += curr * self._io.size
1075 offset += curr * self._io.size
1076 transaction.add(self.indexfile, offset, curr)
1076 transaction.add(self.indexfile, offset, curr)
1077 ifh.write(entry)
1077 ifh.write(entry)
1078 ifh.write(data[0])
1078 ifh.write(data[0])
1079 ifh.write(data[1])
1079 ifh.write(data[1])
1080 self.checkinlinesize(transaction, ifh)
1080 self.checkinlinesize(transaction, ifh)
1081
1081
1082 if type(text) == str: # only accept immutable objects
1082 if type(text) == str: # only accept immutable objects
1083 self._cache = (node, curr, text)
1083 self._cache = (node, curr, text)
1084 self._basecache = (curr, chainbase)
1084 self._basecache = (curr, chainbase)
1085 return node
1085 return node
1086
1086
1087 def group(self, nodelist, bundler, reorder=None):
1087 def group(self, nodelist, bundler, reorder=None):
1088 """Calculate a delta group, yielding a sequence of changegroup chunks
1088 """Calculate a delta group, yielding a sequence of changegroup chunks
1089 (strings).
1089 (strings).
1090
1090
1091 Given a list of changeset revs, return a set of deltas and
1091 Given a list of changeset revs, return a set of deltas and
1092 metadata corresponding to nodes. The first delta is
1092 metadata corresponding to nodes. The first delta is
1093 first parent(nodelist[0]) -> nodelist[0], the receiver is
1093 first parent(nodelist[0]) -> nodelist[0], the receiver is
1094 guaranteed to have this parent as it has all history before
1094 guaranteed to have this parent as it has all history before
1095 these changesets. In the case firstparent is nullrev the
1095 these changesets. In the case firstparent is nullrev the
1096 changegroup starts with a full revision.
1096 changegroup starts with a full revision.
1097 """
1097 """
1098
1098
1099 # for generaldelta revlogs, we linearize the revs; this will both be
1099 # for generaldelta revlogs, we linearize the revs; this will both be
1100 # much quicker and generate a much smaller bundle
1100 # much quicker and generate a much smaller bundle
1101 if (self._generaldelta and reorder is not False) or reorder:
1101 if (self._generaldelta and reorder is not False) or reorder:
1102 dag = dagutil.revlogdag(self)
1102 dag = dagutil.revlogdag(self)
1103 revs = set(self.rev(n) for n in nodelist)
1103 revs = set(self.rev(n) for n in nodelist)
1104 revs = dag.linearize(revs)
1104 revs = dag.linearize(revs)
1105 else:
1105 else:
1106 revs = sorted([self.rev(n) for n in nodelist])
1106 revs = sorted([self.rev(n) for n in nodelist])
1107
1107
1108 # if we don't have any revisions touched by these changesets, bail
1108 # if we don't have any revisions touched by these changesets, bail
1109 if not revs:
1109 if not revs:
1110 yield bundler.close()
1110 yield bundler.close()
1111 return
1111 return
1112
1112
1113 # add the parent of the first rev
1113 # add the parent of the first rev
1114 p = self.parentrevs(revs[0])[0]
1114 p = self.parentrevs(revs[0])[0]
1115 revs.insert(0, p)
1115 revs.insert(0, p)
1116
1116
1117 # build deltas
1117 # build deltas
1118 for r in xrange(len(revs) - 1):
1118 for r in xrange(len(revs) - 1):
1119 prev, curr = revs[r], revs[r + 1]
1119 prev, curr = revs[r], revs[r + 1]
1120 for c in bundler.revchunk(self, curr, prev):
1120 for c in bundler.revchunk(self, curr, prev):
1121 yield c
1121 yield c
1122
1122
1123 yield bundler.close()
1123 yield bundler.close()
1124
1124
1125 def addgroup(self, bundle, linkmapper, transaction):
1125 def addgroup(self, bundle, linkmapper, transaction):
1126 """
1126 """
1127 add a delta group
1127 add a delta group
1128
1128
1129 given a set of deltas, add them to the revision log. the
1129 given a set of deltas, add them to the revision log. the
1130 first delta is against its parent, which should be in our
1130 first delta is against its parent, which should be in our
1131 log, the rest are against the previous delta.
1131 log, the rest are against the previous delta.
1132 """
1132 """
1133
1133
1134 # track the base of the current delta log
1134 # track the base of the current delta log
1135 node = None
1135 node = None
1136
1136
1137 r = len(self)
1137 r = len(self)
1138 end = 0
1138 end = 0
1139 if r:
1139 if r:
1140 end = self.end(r - 1)
1140 end = self.end(r - 1)
1141 ifh = self.opener(self.indexfile, "a+")
1141 ifh = self.opener(self.indexfile, "a+")
1142 isize = r * self._io.size
1142 isize = r * self._io.size
1143 if self._inline:
1143 if self._inline:
1144 transaction.add(self.indexfile, end + isize, r)
1144 transaction.add(self.indexfile, end + isize, r)
1145 dfh = None
1145 dfh = None
1146 else:
1146 else:
1147 transaction.add(self.indexfile, isize, r)
1147 transaction.add(self.indexfile, isize, r)
1148 transaction.add(self.datafile, end)
1148 transaction.add(self.datafile, end)
1149 dfh = self.opener(self.datafile, "a")
1149 dfh = self.opener(self.datafile, "a")
1150
1150
1151 try:
1151 try:
1152 # loop through our set of deltas
1152 # loop through our set of deltas
1153 chain = None
1153 chain = None
1154 while 1:
1154 while True:
1155 chunkdata = bundle.deltachunk(chain)
1155 chunkdata = bundle.deltachunk(chain)
1156 if not chunkdata:
1156 if not chunkdata:
1157 break
1157 break
1158 node = chunkdata['node']
1158 node = chunkdata['node']
1159 p1 = chunkdata['p1']
1159 p1 = chunkdata['p1']
1160 p2 = chunkdata['p2']
1160 p2 = chunkdata['p2']
1161 cs = chunkdata['cs']
1161 cs = chunkdata['cs']
1162 deltabase = chunkdata['deltabase']
1162 deltabase = chunkdata['deltabase']
1163 delta = chunkdata['delta']
1163 delta = chunkdata['delta']
1164
1164
1165 link = linkmapper(cs)
1165 link = linkmapper(cs)
1166 if node in self.nodemap:
1166 if node in self.nodemap:
1167 # this can happen if two branches make the same change
1167 # this can happen if two branches make the same change
1168 chain = node
1168 chain = node
1169 continue
1169 continue
1170
1170
1171 for p in (p1, p2):
1171 for p in (p1, p2):
1172 if not p in self.nodemap:
1172 if not p in self.nodemap:
1173 raise LookupError(p, self.indexfile,
1173 raise LookupError(p, self.indexfile,
1174 _('unknown parent'))
1174 _('unknown parent'))
1175
1175
1176 if deltabase not in self.nodemap:
1176 if deltabase not in self.nodemap:
1177 raise LookupError(deltabase, self.indexfile,
1177 raise LookupError(deltabase, self.indexfile,
1178 _('unknown delta base'))
1178 _('unknown delta base'))
1179
1179
1180 baserev = self.rev(deltabase)
1180 baserev = self.rev(deltabase)
1181 chain = self._addrevision(node, None, transaction, link,
1181 chain = self._addrevision(node, None, transaction, link,
1182 p1, p2, (baserev, delta), ifh, dfh)
1182 p1, p2, (baserev, delta), ifh, dfh)
1183 if not dfh and not self._inline:
1183 if not dfh and not self._inline:
1184 # addrevision switched from inline to conventional
1184 # addrevision switched from inline to conventional
1185 # reopen the index
1185 # reopen the index
1186 ifh.close()
1186 ifh.close()
1187 dfh = self.opener(self.datafile, "a")
1187 dfh = self.opener(self.datafile, "a")
1188 ifh = self.opener(self.indexfile, "a")
1188 ifh = self.opener(self.indexfile, "a")
1189 finally:
1189 finally:
1190 if dfh:
1190 if dfh:
1191 dfh.close()
1191 dfh.close()
1192 ifh.close()
1192 ifh.close()
1193
1193
1194 return node
1194 return node
1195
1195
1196 def strip(self, minlink, transaction):
1196 def strip(self, minlink, transaction):
1197 """truncate the revlog on the first revision with a linkrev >= minlink
1197 """truncate the revlog on the first revision with a linkrev >= minlink
1198
1198
1199 This function is called when we're stripping revision minlink and
1199 This function is called when we're stripping revision minlink and
1200 its descendants from the repository.
1200 its descendants from the repository.
1201
1201
1202 We have to remove all revisions with linkrev >= minlink, because
1202 We have to remove all revisions with linkrev >= minlink, because
1203 the equivalent changelog revisions will be renumbered after the
1203 the equivalent changelog revisions will be renumbered after the
1204 strip.
1204 strip.
1205
1205
1206 So we truncate the revlog on the first of these revisions, and
1206 So we truncate the revlog on the first of these revisions, and
1207 trust that the caller has saved the revisions that shouldn't be
1207 trust that the caller has saved the revisions that shouldn't be
1208 removed and that it'll readd them after this truncation.
1208 removed and that it'll readd them after this truncation.
1209 """
1209 """
1210 if len(self) == 0:
1210 if len(self) == 0:
1211 return
1211 return
1212
1212
1213 for rev in self:
1213 for rev in self:
1214 if self.index[rev][4] >= minlink:
1214 if self.index[rev][4] >= minlink:
1215 break
1215 break
1216 else:
1216 else:
1217 return
1217 return
1218
1218
1219 # first truncate the files on disk
1219 # first truncate the files on disk
1220 end = self.start(rev)
1220 end = self.start(rev)
1221 if not self._inline:
1221 if not self._inline:
1222 transaction.add(self.datafile, end)
1222 transaction.add(self.datafile, end)
1223 end = rev * self._io.size
1223 end = rev * self._io.size
1224 else:
1224 else:
1225 end += rev * self._io.size
1225 end += rev * self._io.size
1226
1226
1227 transaction.add(self.indexfile, end)
1227 transaction.add(self.indexfile, end)
1228
1228
1229 # then reset internal state in memory to forget those revisions
1229 # then reset internal state in memory to forget those revisions
1230 self._cache = None
1230 self._cache = None
1231 self._chunkclear()
1231 self._chunkclear()
1232 for x in xrange(rev, len(self)):
1232 for x in xrange(rev, len(self)):
1233 del self.nodemap[self.node(x)]
1233 del self.nodemap[self.node(x)]
1234
1234
1235 del self.index[rev:-1]
1235 del self.index[rev:-1]
1236
1236
1237 def checksize(self):
1237 def checksize(self):
1238 expected = 0
1238 expected = 0
1239 if len(self):
1239 if len(self):
1240 expected = max(0, self.end(len(self) - 1))
1240 expected = max(0, self.end(len(self) - 1))
1241
1241
1242 try:
1242 try:
1243 f = self.opener(self.datafile)
1243 f = self.opener(self.datafile)
1244 f.seek(0, 2)
1244 f.seek(0, 2)
1245 actual = f.tell()
1245 actual = f.tell()
1246 f.close()
1246 f.close()
1247 dd = actual - expected
1247 dd = actual - expected
1248 except IOError, inst:
1248 except IOError, inst:
1249 if inst.errno != errno.ENOENT:
1249 if inst.errno != errno.ENOENT:
1250 raise
1250 raise
1251 dd = 0
1251 dd = 0
1252
1252
1253 try:
1253 try:
1254 f = self.opener(self.indexfile)
1254 f = self.opener(self.indexfile)
1255 f.seek(0, 2)
1255 f.seek(0, 2)
1256 actual = f.tell()
1256 actual = f.tell()
1257 f.close()
1257 f.close()
1258 s = self._io.size
1258 s = self._io.size
1259 i = max(0, actual // s)
1259 i = max(0, actual // s)
1260 di = actual - (i * s)
1260 di = actual - (i * s)
1261 if self._inline:
1261 if self._inline:
1262 databytes = 0
1262 databytes = 0
1263 for r in self:
1263 for r in self:
1264 databytes += max(0, self.length(r))
1264 databytes += max(0, self.length(r))
1265 dd = 0
1265 dd = 0
1266 di = actual - len(self) * s - databytes
1266 di = actual - len(self) * s - databytes
1267 except IOError, inst:
1267 except IOError, inst:
1268 if inst.errno != errno.ENOENT:
1268 if inst.errno != errno.ENOENT:
1269 raise
1269 raise
1270 di = 0
1270 di = 0
1271
1271
1272 return (dd, di)
1272 return (dd, di)
1273
1273
1274 def files(self):
1274 def files(self):
1275 res = [self.indexfile]
1275 res = [self.indexfile]
1276 if not self._inline:
1276 if not self._inline:
1277 res.append(self.datafile)
1277 res.append(self.datafile)
1278 return res
1278 return res
@@ -1,214 +1,214
1 # sshrepo.py - ssh repository proxy class for mercurial
1 # sshrepo.py - ssh repository proxy class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import util, error, wireproto
9 import util, error, wireproto
10
10
11 class remotelock(object):
11 class remotelock(object):
12 def __init__(self, repo):
12 def __init__(self, repo):
13 self.repo = repo
13 self.repo = repo
14 def release(self):
14 def release(self):
15 self.repo.unlock()
15 self.repo.unlock()
16 self.repo = None
16 self.repo = None
17 def __del__(self):
17 def __del__(self):
18 if self.repo:
18 if self.repo:
19 self.release()
19 self.release()
20
20
21 class sshrepository(wireproto.wirerepository):
21 class sshrepository(wireproto.wirerepository):
22 def __init__(self, ui, path, create=False):
22 def __init__(self, ui, path, create=False):
23 self._url = path
23 self._url = path
24 self.ui = ui
24 self.ui = ui
25
25
26 u = util.url(path, parsequery=False, parsefragment=False)
26 u = util.url(path, parsequery=False, parsefragment=False)
27 if u.scheme != 'ssh' or not u.host or u.path is None:
27 if u.scheme != 'ssh' or not u.host or u.path is None:
28 self._abort(error.RepoError(_("couldn't parse location %s") % path))
28 self._abort(error.RepoError(_("couldn't parse location %s") % path))
29
29
30 self.user = u.user
30 self.user = u.user
31 if u.passwd is not None:
31 if u.passwd is not None:
32 self._abort(error.RepoError(_("password in URL not supported")))
32 self._abort(error.RepoError(_("password in URL not supported")))
33 self.host = u.host
33 self.host = u.host
34 self.port = u.port
34 self.port = u.port
35 self.path = u.path or "."
35 self.path = u.path or "."
36
36
37 sshcmd = self.ui.config("ui", "ssh", "ssh")
37 sshcmd = self.ui.config("ui", "ssh", "ssh")
38 remotecmd = self.ui.config("ui", "remotecmd", "hg")
38 remotecmd = self.ui.config("ui", "remotecmd", "hg")
39
39
40 args = util.sshargs(sshcmd, self.host, self.user, self.port)
40 args = util.sshargs(sshcmd, self.host, self.user, self.port)
41
41
42 if create:
42 if create:
43 cmd = '%s %s "%s init %s"'
43 cmd = '%s %s "%s init %s"'
44 cmd = cmd % (sshcmd, args, remotecmd, self.path)
44 cmd = cmd % (sshcmd, args, remotecmd, self.path)
45
45
46 ui.note(_('running %s\n') % cmd)
46 ui.note(_('running %s\n') % cmd)
47 res = util.system(cmd)
47 res = util.system(cmd)
48 if res != 0:
48 if res != 0:
49 self._abort(error.RepoError(_("could not create remote repo")))
49 self._abort(error.RepoError(_("could not create remote repo")))
50
50
51 self.validate_repo(ui, sshcmd, args, remotecmd)
51 self.validate_repo(ui, sshcmd, args, remotecmd)
52
52
53 def url(self):
53 def url(self):
54 return self._url
54 return self._url
55
55
56 def validate_repo(self, ui, sshcmd, args, remotecmd):
56 def validate_repo(self, ui, sshcmd, args, remotecmd):
57 # cleanup up previous run
57 # cleanup up previous run
58 self.cleanup()
58 self.cleanup()
59
59
60 cmd = '%s %s "%s -R %s serve --stdio"'
60 cmd = '%s %s "%s -R %s serve --stdio"'
61 cmd = cmd % (sshcmd, args, remotecmd, self.path)
61 cmd = cmd % (sshcmd, args, remotecmd, self.path)
62
62
63 cmd = util.quotecommand(cmd)
63 cmd = util.quotecommand(cmd)
64 ui.note(_('running %s\n') % cmd)
64 ui.note(_('running %s\n') % cmd)
65 self.pipeo, self.pipei, self.pipee = util.popen3(cmd)
65 self.pipeo, self.pipei, self.pipee = util.popen3(cmd)
66
66
67 # skip any noise generated by remote shell
67 # skip any noise generated by remote shell
68 self._callstream("hello")
68 self._callstream("hello")
69 r = self._callstream("between", pairs=("%s-%s" % ("0"*40, "0"*40)))
69 r = self._callstream("between", pairs=("%s-%s" % ("0"*40, "0"*40)))
70 lines = ["", "dummy"]
70 lines = ["", "dummy"]
71 max_noise = 500
71 max_noise = 500
72 while lines[-1] and max_noise:
72 while lines[-1] and max_noise:
73 l = r.readline()
73 l = r.readline()
74 self.readerr()
74 self.readerr()
75 if lines[-1] == "1\n" and l == "\n":
75 if lines[-1] == "1\n" and l == "\n":
76 break
76 break
77 if l:
77 if l:
78 ui.debug("remote: ", l)
78 ui.debug("remote: ", l)
79 lines.append(l)
79 lines.append(l)
80 max_noise -= 1
80 max_noise -= 1
81 else:
81 else:
82 self._abort(error.RepoError(_("no suitable response from remote hg")))
82 self._abort(error.RepoError(_("no suitable response from remote hg")))
83
83
84 self.capabilities = set()
84 self.capabilities = set()
85 for l in reversed(lines):
85 for l in reversed(lines):
86 if l.startswith("capabilities:"):
86 if l.startswith("capabilities:"):
87 self.capabilities.update(l[:-1].split(":")[1].split())
87 self.capabilities.update(l[:-1].split(":")[1].split())
88 break
88 break
89
89
90 def readerr(self):
90 def readerr(self):
91 while 1:
91 while True:
92 size = util.fstat(self.pipee).st_size
92 size = util.fstat(self.pipee).st_size
93 if size == 0:
93 if size == 0:
94 break
94 break
95 s = self.pipee.read(size)
95 s = self.pipee.read(size)
96 if not s:
96 if not s:
97 break
97 break
98 for l in s.splitlines():
98 for l in s.splitlines():
99 self.ui.status(_("remote: "), l, '\n')
99 self.ui.status(_("remote: "), l, '\n')
100
100
101 def _abort(self, exception):
101 def _abort(self, exception):
102 self.cleanup()
102 self.cleanup()
103 raise exception
103 raise exception
104
104
105 def cleanup(self):
105 def cleanup(self):
106 try:
106 try:
107 self.pipeo.close()
107 self.pipeo.close()
108 self.pipei.close()
108 self.pipei.close()
109 # read the error descriptor until EOF
109 # read the error descriptor until EOF
110 for l in self.pipee:
110 for l in self.pipee:
111 self.ui.status(_("remote: "), l)
111 self.ui.status(_("remote: "), l)
112 self.pipee.close()
112 self.pipee.close()
113 except:
113 except:
114 pass
114 pass
115
115
116 __del__ = cleanup
116 __del__ = cleanup
117
117
118 def _callstream(self, cmd, **args):
118 def _callstream(self, cmd, **args):
119 self.ui.debug("sending %s command\n" % cmd)
119 self.ui.debug("sending %s command\n" % cmd)
120 self.pipeo.write("%s\n" % cmd)
120 self.pipeo.write("%s\n" % cmd)
121 _func, names = wireproto.commands[cmd]
121 _func, names = wireproto.commands[cmd]
122 keys = names.split()
122 keys = names.split()
123 wireargs = {}
123 wireargs = {}
124 for k in keys:
124 for k in keys:
125 if k == '*':
125 if k == '*':
126 wireargs['*'] = args
126 wireargs['*'] = args
127 break
127 break
128 else:
128 else:
129 wireargs[k] = args[k]
129 wireargs[k] = args[k]
130 del args[k]
130 del args[k]
131 for k, v in sorted(wireargs.iteritems()):
131 for k, v in sorted(wireargs.iteritems()):
132 self.pipeo.write("%s %d\n" % (k, len(v)))
132 self.pipeo.write("%s %d\n" % (k, len(v)))
133 if isinstance(v, dict):
133 if isinstance(v, dict):
134 for dk, dv in v.iteritems():
134 for dk, dv in v.iteritems():
135 self.pipeo.write("%s %d\n" % (dk, len(dv)))
135 self.pipeo.write("%s %d\n" % (dk, len(dv)))
136 self.pipeo.write(dv)
136 self.pipeo.write(dv)
137 else:
137 else:
138 self.pipeo.write(v)
138 self.pipeo.write(v)
139 self.pipeo.flush()
139 self.pipeo.flush()
140
140
141 return self.pipei
141 return self.pipei
142
142
143 def _call(self, cmd, **args):
143 def _call(self, cmd, **args):
144 self._callstream(cmd, **args)
144 self._callstream(cmd, **args)
145 return self._recv()
145 return self._recv()
146
146
147 def _callpush(self, cmd, fp, **args):
147 def _callpush(self, cmd, fp, **args):
148 r = self._call(cmd, **args)
148 r = self._call(cmd, **args)
149 if r:
149 if r:
150 return '', r
150 return '', r
151 while 1:
151 while True:
152 d = fp.read(4096)
152 d = fp.read(4096)
153 if not d:
153 if not d:
154 break
154 break
155 self._send(d)
155 self._send(d)
156 self._send("", flush=True)
156 self._send("", flush=True)
157 r = self._recv()
157 r = self._recv()
158 if r:
158 if r:
159 return '', r
159 return '', r
160 return self._recv(), ''
160 return self._recv(), ''
161
161
162 def _decompress(self, stream):
162 def _decompress(self, stream):
163 return stream
163 return stream
164
164
165 def _recv(self):
165 def _recv(self):
166 l = self.pipei.readline()
166 l = self.pipei.readline()
167 self.readerr()
167 self.readerr()
168 try:
168 try:
169 l = int(l)
169 l = int(l)
170 except ValueError:
170 except ValueError:
171 self._abort(error.ResponseError(_("unexpected response:"), l))
171 self._abort(error.ResponseError(_("unexpected response:"), l))
172 return self.pipei.read(l)
172 return self.pipei.read(l)
173
173
174 def _send(self, data, flush=False):
174 def _send(self, data, flush=False):
175 self.pipeo.write("%d\n" % len(data))
175 self.pipeo.write("%d\n" % len(data))
176 if data:
176 if data:
177 self.pipeo.write(data)
177 self.pipeo.write(data)
178 if flush:
178 if flush:
179 self.pipeo.flush()
179 self.pipeo.flush()
180 self.readerr()
180 self.readerr()
181
181
182 def lock(self):
182 def lock(self):
183 self._call("lock")
183 self._call("lock")
184 return remotelock(self)
184 return remotelock(self)
185
185
186 def unlock(self):
186 def unlock(self):
187 self._call("unlock")
187 self._call("unlock")
188
188
189 def addchangegroup(self, cg, source, url):
189 def addchangegroup(self, cg, source, url):
190 '''Send a changegroup to the remote server. Return an integer
190 '''Send a changegroup to the remote server. Return an integer
191 similar to unbundle(). DEPRECATED, since it requires locking the
191 similar to unbundle(). DEPRECATED, since it requires locking the
192 remote.'''
192 remote.'''
193 d = self._call("addchangegroup")
193 d = self._call("addchangegroup")
194 if d:
194 if d:
195 self._abort(error.RepoError(_("push refused: %s") % d))
195 self._abort(error.RepoError(_("push refused: %s") % d))
196 while 1:
196 while True:
197 d = cg.read(4096)
197 d = cg.read(4096)
198 if not d:
198 if not d:
199 break
199 break
200 self.pipeo.write(d)
200 self.pipeo.write(d)
201 self.readerr()
201 self.readerr()
202
202
203 self.pipeo.flush()
203 self.pipeo.flush()
204
204
205 self.readerr()
205 self.readerr()
206 r = self._recv()
206 r = self._recv()
207 if not r:
207 if not r:
208 return 1
208 return 1
209 try:
209 try:
210 return int(r)
210 return int(r)
211 except ValueError:
211 except ValueError:
212 self._abort(error.ResponseError(_("unexpected response:"), r))
212 self._abort(error.ResponseError(_("unexpected response:"), r))
213
213
214 instance = sshrepository
214 instance = sshrepository
@@ -1,42 +1,42
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 #
2 #
3 # Based on python's Tools/scripts/md5sum.py
3 # Based on python's Tools/scripts/md5sum.py
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2, which is
6 # of the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2, which is
7 # GPL-compatible.
7 # GPL-compatible.
8
8
9 import sys, os
9 import sys, os
10
10
11 try:
11 try:
12 from hashlib import md5
12 from hashlib import md5
13 except ImportError:
13 except ImportError:
14 from md5 import md5
14 from md5 import md5
15
15
16 try:
16 try:
17 import msvcrt
17 import msvcrt
18 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
18 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
19 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
19 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
20 except ImportError:
20 except ImportError:
21 pass
21 pass
22
22
23 for filename in sys.argv[1:]:
23 for filename in sys.argv[1:]:
24 try:
24 try:
25 fp = open(filename, 'rb')
25 fp = open(filename, 'rb')
26 except IOError, msg:
26 except IOError, msg:
27 sys.stderr.write('%s: Can\'t open: %s\n' % (filename, msg))
27 sys.stderr.write('%s: Can\'t open: %s\n' % (filename, msg))
28 sys.exit(1)
28 sys.exit(1)
29
29
30 m = md5()
30 m = md5()
31 try:
31 try:
32 while 1:
32 while True:
33 data = fp.read(8192)
33 data = fp.read(8192)
34 if not data:
34 if not data:
35 break
35 break
36 m.update(data)
36 m.update(data)
37 except IOError, msg:
37 except IOError, msg:
38 sys.stderr.write('%s: I/O error: %s\n' % (filename, msg))
38 sys.stderr.write('%s: I/O error: %s\n' % (filename, msg))
39 sys.exit(1)
39 sys.exit(1)
40 sys.stdout.write('%s %s\n' % (m.hexdigest(), filename))
40 sys.stdout.write('%s %s\n' % (m.hexdigest(), filename))
41
41
42 sys.exit(0)
42 sys.exit(0)
@@ -1,141 +1,141
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2
2
3 __doc__ = """Tiny HTTP Proxy.
3 __doc__ = """Tiny HTTP Proxy.
4
4
5 This module implements GET, HEAD, POST, PUT and DELETE methods
5 This module implements GET, HEAD, POST, PUT and DELETE methods
6 on BaseHTTPServer, and behaves as an HTTP proxy. The CONNECT
6 on BaseHTTPServer, and behaves as an HTTP proxy. The CONNECT
7 method is also implemented experimentally, but has not been
7 method is also implemented experimentally, but has not been
8 tested yet.
8 tested yet.
9
9
10 Any help will be greatly appreciated. SUZUKI Hisao
10 Any help will be greatly appreciated. SUZUKI Hisao
11 """
11 """
12
12
13 __version__ = "0.2.1"
13 __version__ = "0.2.1"
14
14
15 import BaseHTTPServer, select, socket, SocketServer, urlparse
15 import BaseHTTPServer, select, socket, SocketServer, urlparse
16
16
17 class ProxyHandler (BaseHTTPServer.BaseHTTPRequestHandler):
17 class ProxyHandler (BaseHTTPServer.BaseHTTPRequestHandler):
18 __base = BaseHTTPServer.BaseHTTPRequestHandler
18 __base = BaseHTTPServer.BaseHTTPRequestHandler
19 __base_handle = __base.handle
19 __base_handle = __base.handle
20
20
21 server_version = "TinyHTTPProxy/" + __version__
21 server_version = "TinyHTTPProxy/" + __version__
22 rbufsize = 0 # self.rfile Be unbuffered
22 rbufsize = 0 # self.rfile Be unbuffered
23
23
24 def handle(self):
24 def handle(self):
25 (ip, port) = self.client_address
25 (ip, port) = self.client_address
26 if hasattr(self, 'allowed_clients') and ip not in self.allowed_clients:
26 if hasattr(self, 'allowed_clients') and ip not in self.allowed_clients:
27 self.raw_requestline = self.rfile.readline()
27 self.raw_requestline = self.rfile.readline()
28 if self.parse_request():
28 if self.parse_request():
29 self.send_error(403)
29 self.send_error(403)
30 else:
30 else:
31 self.__base_handle()
31 self.__base_handle()
32
32
33 def log_request(self, code='-', size='-'):
33 def log_request(self, code='-', size='-'):
34 xheaders = [h for h in self.headers.items() if h[0].startswith('x-')]
34 xheaders = [h for h in self.headers.items() if h[0].startswith('x-')]
35 self.log_message('"%s" %s %s%s',
35 self.log_message('"%s" %s %s%s',
36 self.requestline, str(code), str(size),
36 self.requestline, str(code), str(size),
37 ''.join([' %s:%s' % h for h in sorted(xheaders)]))
37 ''.join([' %s:%s' % h for h in sorted(xheaders)]))
38
38
39 def _connect_to(self, netloc, soc):
39 def _connect_to(self, netloc, soc):
40 i = netloc.find(':')
40 i = netloc.find(':')
41 if i >= 0:
41 if i >= 0:
42 host_port = netloc[:i], int(netloc[i + 1:])
42 host_port = netloc[:i], int(netloc[i + 1:])
43 else:
43 else:
44 host_port = netloc, 80
44 host_port = netloc, 80
45 print "\t" "connect to %s:%d" % host_port
45 print "\t" "connect to %s:%d" % host_port
46 try: soc.connect(host_port)
46 try: soc.connect(host_port)
47 except socket.error, arg:
47 except socket.error, arg:
48 try: msg = arg[1]
48 try: msg = arg[1]
49 except: msg = arg
49 except: msg = arg
50 self.send_error(404, msg)
50 self.send_error(404, msg)
51 return 0
51 return 0
52 return 1
52 return 1
53
53
54 def do_CONNECT(self):
54 def do_CONNECT(self):
55 soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
55 soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
56 try:
56 try:
57 if self._connect_to(self.path, soc):
57 if self._connect_to(self.path, soc):
58 self.log_request(200)
58 self.log_request(200)
59 self.wfile.write(self.protocol_version +
59 self.wfile.write(self.protocol_version +
60 " 200 Connection established\r\n")
60 " 200 Connection established\r\n")
61 self.wfile.write("Proxy-agent: %s\r\n" % self.version_string())
61 self.wfile.write("Proxy-agent: %s\r\n" % self.version_string())
62 self.wfile.write("\r\n")
62 self.wfile.write("\r\n")
63 self._read_write(soc, 300)
63 self._read_write(soc, 300)
64 finally:
64 finally:
65 print "\t" "bye"
65 print "\t" "bye"
66 soc.close()
66 soc.close()
67 self.connection.close()
67 self.connection.close()
68
68
69 def do_GET(self):
69 def do_GET(self):
70 (scm, netloc, path, params, query, fragment) = urlparse.urlparse(
70 (scm, netloc, path, params, query, fragment) = urlparse.urlparse(
71 self.path, 'http')
71 self.path, 'http')
72 if scm != 'http' or fragment or not netloc:
72 if scm != 'http' or fragment or not netloc:
73 self.send_error(400, "bad url %s" % self.path)
73 self.send_error(400, "bad url %s" % self.path)
74 return
74 return
75 soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
75 soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
76 try:
76 try:
77 if self._connect_to(netloc, soc):
77 if self._connect_to(netloc, soc):
78 self.log_request()
78 self.log_request()
79 soc.send("%s %s %s\r\n" % (
79 soc.send("%s %s %s\r\n" % (
80 self.command,
80 self.command,
81 urlparse.urlunparse(('', '', path, params, query, '')),
81 urlparse.urlunparse(('', '', path, params, query, '')),
82 self.request_version))
82 self.request_version))
83 self.headers['Connection'] = 'close'
83 self.headers['Connection'] = 'close'
84 del self.headers['Proxy-Connection']
84 del self.headers['Proxy-Connection']
85 for key_val in self.headers.items():
85 for key_val in self.headers.items():
86 soc.send("%s: %s\r\n" % key_val)
86 soc.send("%s: %s\r\n" % key_val)
87 soc.send("\r\n")
87 soc.send("\r\n")
88 self._read_write(soc)
88 self._read_write(soc)
89 finally:
89 finally:
90 print "\t" "bye"
90 print "\t" "bye"
91 soc.close()
91 soc.close()
92 self.connection.close()
92 self.connection.close()
93
93
94 def _read_write(self, soc, max_idling=20):
94 def _read_write(self, soc, max_idling=20):
95 iw = [self.connection, soc]
95 iw = [self.connection, soc]
96 ow = []
96 ow = []
97 count = 0
97 count = 0
98 while 1:
98 while True:
99 count += 1
99 count += 1
100 (ins, _, exs) = select.select(iw, ow, iw, 3)
100 (ins, _, exs) = select.select(iw, ow, iw, 3)
101 if exs:
101 if exs:
102 break
102 break
103 if ins:
103 if ins:
104 for i in ins:
104 for i in ins:
105 if i is soc:
105 if i is soc:
106 out = self.connection
106 out = self.connection
107 else:
107 else:
108 out = soc
108 out = soc
109 data = i.recv(8192)
109 data = i.recv(8192)
110 if data:
110 if data:
111 out.send(data)
111 out.send(data)
112 count = 0
112 count = 0
113 else:
113 else:
114 print "\t" "idle", count
114 print "\t" "idle", count
115 if count == max_idling:
115 if count == max_idling:
116 break
116 break
117
117
118 do_HEAD = do_GET
118 do_HEAD = do_GET
119 do_POST = do_GET
119 do_POST = do_GET
120 do_PUT = do_GET
120 do_PUT = do_GET
121 do_DELETE = do_GET
121 do_DELETE = do_GET
122
122
123 class ThreadingHTTPServer (SocketServer.ThreadingMixIn,
123 class ThreadingHTTPServer (SocketServer.ThreadingMixIn,
124 BaseHTTPServer.HTTPServer): pass
124 BaseHTTPServer.HTTPServer): pass
125
125
126 if __name__ == '__main__':
126 if __name__ == '__main__':
127 from sys import argv
127 from sys import argv
128 if argv[1:] and argv[1] in ('-h', '--help'):
128 if argv[1:] and argv[1] in ('-h', '--help'):
129 print argv[0], "[port [allowed_client_name ...]]"
129 print argv[0], "[port [allowed_client_name ...]]"
130 else:
130 else:
131 if argv[2:]:
131 if argv[2:]:
132 allowed = []
132 allowed = []
133 for name in argv[2:]:
133 for name in argv[2:]:
134 client = socket.gethostbyname(name)
134 client = socket.gethostbyname(name)
135 allowed.append(client)
135 allowed.append(client)
136 print "Accept: %s (%s)" % (client, name)
136 print "Accept: %s (%s)" % (client, name)
137 ProxyHandler.allowed_clients = allowed
137 ProxyHandler.allowed_clients = allowed
138 del argv[2:]
138 del argv[2:]
139 else:
139 else:
140 print "Any clients will be served..."
140 print "Any clients will be served..."
141 BaseHTTPServer.test(ProxyHandler, ThreadingHTTPServer)
141 BaseHTTPServer.test(ProxyHandler, ThreadingHTTPServer)
General Comments 0
You need to be logged in to leave comments. Login now