##// END OF EJS Templates
check-code: flag 0/1 used as constant Boolean expression
Martin Geisler -
r14494:1ffeeb91 default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,377 +1,379
1 1 #!/usr/bin/env python
2 2 #
3 3 # check-code - a style and portability checker for Mercurial
4 4 #
5 5 # Copyright 2010 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 import re, glob, os, sys
11 11 import keyword
12 12 import optparse
13 13
14 14 def repquote(m):
15 15 t = re.sub(r"\w", "x", m.group('text'))
16 16 t = re.sub(r"[^\sx]", "o", t)
17 17 return m.group('quote') + t + m.group('quote')
18 18
19 19 def reppython(m):
20 20 comment = m.group('comment')
21 21 if comment:
22 22 return "#" * len(comment)
23 23 return repquote(m)
24 24
25 25 def repcomment(m):
26 26 return m.group(1) + "#" * len(m.group(2))
27 27
28 28 def repccomment(m):
29 29 t = re.sub(r"((?<=\n) )|\S", "x", m.group(2))
30 30 return m.group(1) + t + "*/"
31 31
32 32 def repcallspaces(m):
33 33 t = re.sub(r"\n\s+", "\n", m.group(2))
34 34 return m.group(1) + t
35 35
36 36 def repinclude(m):
37 37 return m.group(1) + "<foo>"
38 38
39 39 def rephere(m):
40 40 t = re.sub(r"\S", "x", m.group(2))
41 41 return m.group(1) + t
42 42
43 43
44 44 testpats = [
45 45 [
46 46 (r'(pushd|popd)', "don't use 'pushd' or 'popd', use 'cd'"),
47 47 (r'\W\$?\(\([^\)]*\)\)', "don't use (()) or $(()), use 'expr'"),
48 48 (r'^function', "don't use 'function', use old style"),
49 49 (r'grep.*-q', "don't use 'grep -q', redirect to /dev/null"),
50 50 (r'echo.*\\n', "don't use 'echo \\n', use printf"),
51 51 (r'echo -n', "don't use 'echo -n', use printf"),
52 52 (r'^diff.*-\w*N', "don't use 'diff -N'"),
53 53 (r'(^| )wc[^|]*$', "filter wc output"),
54 54 (r'head -c', "don't use 'head -c', use 'dd'"),
55 55 (r'ls.*-\w*R', "don't use 'ls -R', use 'find'"),
56 56 (r'printf.*\\\d\d\d', "don't use 'printf \NNN', use Python"),
57 57 (r'printf.*\\x', "don't use printf \\x, use Python"),
58 58 (r'\$\(.*\)', "don't use $(expr), use `expr`"),
59 59 (r'rm -rf \*', "don't use naked rm -rf, target a directory"),
60 60 (r'(^|\|\s*)grep (-\w\s+)*[^|]*[(|]\w',
61 61 "use egrep for extended grep syntax"),
62 62 (r'/bin/', "don't use explicit paths for tools"),
63 63 (r'\$PWD', "don't use $PWD, use `pwd`"),
64 64 (r'[^\n]\Z', "no trailing newline"),
65 65 (r'export.*=', "don't export and assign at once"),
66 66 ('^([^"\']|("[^"]*")|(\'[^\']*\'))*\\^', "^ must be quoted"),
67 67 (r'^source\b', "don't use 'source', use '.'"),
68 68 (r'touch -d', "don't use 'touch -d', use 'touch -t' instead"),
69 69 (r'ls\s+[^|-]+\s+-', "options to 'ls' must come before filenames"),
70 70 (r'[^>]>\s*\$HGRCPATH', "don't overwrite $HGRCPATH, append to it"),
71 71 ],
72 72 # warnings
73 73 []
74 74 ]
75 75
76 76 testfilters = [
77 77 (r"( *)(#([^\n]*\S)?)", repcomment),
78 78 (r"<<(\S+)((.|\n)*?\n\1)", rephere),
79 79 ]
80 80
81 81 uprefix = r"^ \$ "
82 82 uprefixc = r"^ > "
83 83 utestpats = [
84 84 [
85 85 (r'^(\S| $ ).*(\S\s+|^\s+)\n', "trailing whitespace on non-output"),
86 86 (uprefix + r'.*\|\s*sed', "use regex test output patterns instead of sed"),
87 87 (uprefix + r'(true|exit 0)', "explicit zero exit unnecessary"),
88 88 (uprefix + r'.*\$\?', "explicit exit code checks unnecessary"),
89 89 (uprefix + r'.*\|\| echo.*(fail|error)',
90 90 "explicit exit code checks unnecessary"),
91 91 (uprefix + r'set -e', "don't use set -e"),
92 92 (uprefixc + r'( *)\t', "don't use tabs to indent"),
93 93 ],
94 94 # warnings
95 95 []
96 96 ]
97 97
98 98 for i in [0, 1]:
99 99 for p, m in testpats[i]:
100 100 if p.startswith('^'):
101 101 p = uprefix + p[1:]
102 102 else:
103 103 p = uprefix + p
104 104 utestpats[i].append((p, m))
105 105
106 106 utestfilters = [
107 107 (r"( *)(#([^\n]*\S)?)", repcomment),
108 108 ]
109 109
110 110 pypats = [
111 111 [
112 112 (r'^\s*def\s*\w+\s*\(.*,\s*\(',
113 113 "tuple parameter unpacking not available in Python 3+"),
114 114 (r'lambda\s*\(.*,.*\)',
115 115 "tuple parameter unpacking not available in Python 3+"),
116 116 (r'(?<!def)\s+(cmp)\(', "cmp is not available in Python 3+"),
117 117 (r'\breduce\s*\(.*', "reduce is not available in Python 3+"),
118 118 (r'\.has_key\b', "dict.has_key is not available in Python 3+"),
119 119 (r'^\s*\t', "don't use tabs"),
120 120 (r'\S;\s*\n', "semicolon"),
121 121 (r'\w,\w', "missing whitespace after ,"),
122 122 (r'\w[+/*\-<>]\w', "missing whitespace in expression"),
123 123 (r'^\s+\w+=\w+[^,)]$', "missing whitespace in assignment"),
124 124 (r'.{85}', "line too long"),
125 125 (r'[^\n]\Z', "no trailing newline"),
126 126 (r'(\S\s+|^\s+)\n', "trailing whitespace"),
127 127 # (r'^\s+[^_ ][^_. ]+_[^_]+\s*=', "don't use underbars in identifiers"),
128 128 # (r'\w*[a-z][A-Z]\w*\s*=', "don't use camelcase in identifiers"),
129 129 (r'^\s*(if|while|def|class|except|try)\s[^[]*:\s*[^\]#\s]+',
130 130 "linebreak after :"),
131 131 (r'class\s[^(]:', "old-style class, use class foo(object)"),
132 132 (r'\b(%s)\(' % '|'.join(keyword.kwlist),
133 133 "Python keyword is not a function"),
134 134 (r',]', "unneeded trailing ',' in list"),
135 135 # (r'class\s[A-Z][^\(]*\((?!Exception)',
136 136 # "don't capitalize non-exception classes"),
137 137 # (r'in range\(', "use xrange"),
138 138 # (r'^\s*print\s+', "avoid using print in core and extensions"),
139 139 (r'[\x80-\xff]', "non-ASCII character literal"),
140 140 (r'("\')\.format\(', "str.format() not available in Python 2.4"),
141 141 (r'^\s*with\s+', "with not available in Python 2.4"),
142 142 (r'\.isdisjoint\(', "set.isdisjoint not available in Python 2.4"),
143 143 (r'^\s*except.* as .*:', "except as not available in Python 2.4"),
144 144 (r'^\s*os\.path\.relpath', "relpath not available in Python 2.4"),
145 145 (r'(?<!def)\s+(any|all|format)\(',
146 146 "any/all/format not available in Python 2.4"),
147 147 (r'(?<!def)\s+(callable)\(',
148 148 "callable not available in Python 3, use hasattr(f, '__call__')"),
149 149 (r'if\s.*\selse', "if ... else form not available in Python 2.4"),
150 150 (r'^\s*(%s)\s\s' % '|'.join(keyword.kwlist),
151 151 "gratuitous whitespace after Python keyword"),
152 152 (r'([\(\[]\s\S)|(\S\s[\)\]])', "gratuitous whitespace in () or []"),
153 153 # (r'\s\s=', "gratuitous whitespace before ="),
154 154 (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=)\S',
155 155 "missing whitespace around operator"),
156 156 (r'[^>< ](\+=|-=|!=|<>|<=|>=|<<=|>>=)\s',
157 157 "missing whitespace around operator"),
158 158 (r'\s(\+=|-=|!=|<>|<=|>=|<<=|>>=)\S',
159 159 "missing whitespace around operator"),
160 160 (r'[^+=*/!<>&| -](\s=|=\s)[^= ]',
161 161 "wrong whitespace around ="),
162 162 (r'raise Exception', "don't raise generic exceptions"),
163 163 (r' is\s+(not\s+)?["\'0-9-]', "object comparison with literal"),
164 164 (r' [=!]=\s+(True|False|None)',
165 165 "comparison with singleton, use 'is' or 'is not' instead"),
166 (r'^\s*(while|if) [01]:',
167 "use True/False for constant Boolean expression"),
166 168 (r'opener\([^)]*\).read\(',
167 169 "use opener.read() instead"),
168 170 (r'opener\([^)]*\).write\(',
169 171 "use opener.write() instead"),
170 172 (r'[\s\(](open|file)\([^)]*\)\.read\(',
171 173 "use util.readfile() instead"),
172 174 (r'[\s\(](open|file)\([^)]*\)\.write\(',
173 175 "use util.readfile() instead"),
174 176 (r'^[\s\(]*(open(er)?|file)\([^)]*\)',
175 177 "always assign an opened file to a variable, and close it afterwards"),
176 178 (r'[\s\(](open|file)\([^)]*\)\.',
177 179 "always assign an opened file to a variable, and close it afterwards"),
178 180 ],
179 181 # warnings
180 182 [
181 183 (r'.{81}', "warning: line over 80 characters"),
182 184 (r'^\s*except:$', "warning: naked except clause"),
183 185 (r'ui\.(status|progress|write|note|warn)\([\'\"]x',
184 186 "warning: unwrapped ui message"),
185 187 ]
186 188 ]
187 189
188 190 pyfilters = [
189 191 (r"""(?msx)(?P<comment>\#.*?$)|
190 192 ((?P<quote>('''|\"\"\"|(?<!')'(?!')|(?<!")"(?!")))
191 193 (?P<text>(([^\\]|\\.)*?))
192 194 (?P=quote))""", reppython),
193 195 ]
194 196
195 197 cpats = [
196 198 [
197 199 (r'//', "don't use //-style comments"),
198 200 (r'^ ', "don't use spaces to indent"),
199 201 (r'\S\t', "don't use tabs except for indent"),
200 202 (r'(\S\s+|^\s+)\n', "trailing whitespace"),
201 203 (r'.{85}', "line too long"),
202 204 (r'(while|if|do|for)\(', "use space after while/if/do/for"),
203 205 (r'return\(', "return is not a function"),
204 206 (r' ;', "no space before ;"),
205 207 (r'\w+\* \w+', "use int *foo, not int* foo"),
206 208 (r'\([^\)]+\) \w+', "use (int)foo, not (int) foo"),
207 209 (r'\S+ (\+\+|--)', "use foo++, not foo ++"),
208 210 (r'\w,\w', "missing whitespace after ,"),
209 211 (r'^[^#]\w[+/*]\w', "missing whitespace in expression"),
210 212 (r'^#\s+\w', "use #foo, not # foo"),
211 213 (r'[^\n]\Z', "no trailing newline"),
212 214 (r'^\s*#import\b', "use only #include in standard C code"),
213 215 ],
214 216 # warnings
215 217 []
216 218 ]
217 219
218 220 cfilters = [
219 221 (r'(/\*)(((\*(?!/))|[^*])*)\*/', repccomment),
220 222 (r'''(?P<quote>(?<!")")(?P<text>([^"]|\\")+)"(?!")''', repquote),
221 223 (r'''(#\s*include\s+<)([^>]+)>''', repinclude),
222 224 (r'(\()([^)]+\))', repcallspaces),
223 225 ]
224 226
225 227 inutilpats = [
226 228 [
227 229 (r'\bui\.', "don't use ui in util"),
228 230 ],
229 231 # warnings
230 232 []
231 233 ]
232 234
233 235 inrevlogpats = [
234 236 [
235 237 (r'\brepo\.', "don't use repo in revlog"),
236 238 ],
237 239 # warnings
238 240 []
239 241 ]
240 242
241 243 checks = [
242 244 ('python', r'.*\.(py|cgi)$', pyfilters, pypats),
243 245 ('test script', r'(.*/)?test-[^.~]*$', testfilters, testpats),
244 246 ('c', r'.*\.c$', cfilters, cpats),
245 247 ('unified test', r'.*\.t$', utestfilters, utestpats),
246 248 ('layering violation repo in revlog', r'mercurial/revlog\.py', pyfilters,
247 249 inrevlogpats),
248 250 ('layering violation ui in util', r'mercurial/util\.py', pyfilters,
249 251 inutilpats),
250 252 ]
251 253
252 254 class norepeatlogger(object):
253 255 def __init__(self):
254 256 self._lastseen = None
255 257
256 258 def log(self, fname, lineno, line, msg, blame):
257 259 """print error related a to given line of a given file.
258 260
259 261 The faulty line will also be printed but only once in the case
260 262 of multiple errors.
261 263
262 264 :fname: filename
263 265 :lineno: line number
264 266 :line: actual content of the line
265 267 :msg: error message
266 268 """
267 269 msgid = fname, lineno, line
268 270 if msgid != self._lastseen:
269 271 if blame:
270 272 print "%s:%d (%s):" % (fname, lineno, blame)
271 273 else:
272 274 print "%s:%d:" % (fname, lineno)
273 275 print " > %s" % line
274 276 self._lastseen = msgid
275 277 print " " + msg
276 278
277 279 _defaultlogger = norepeatlogger()
278 280
279 281 def getblame(f):
280 282 lines = []
281 283 for l in os.popen('hg annotate -un %s' % f):
282 284 start, line = l.split(':', 1)
283 285 user, rev = start.split()
284 286 lines.append((line[1:-1], user, rev))
285 287 return lines
286 288
287 289 def checkfile(f, logfunc=_defaultlogger.log, maxerr=None, warnings=False,
288 290 blame=False, debug=False):
289 291 """checks style and portability of a given file
290 292
291 293 :f: filepath
292 294 :logfunc: function used to report error
293 295 logfunc(filename, linenumber, linecontent, errormessage)
294 296 :maxerr: number of error to display before arborting.
295 297 Set to None (default) to report all errors
296 298
297 299 return True if no error is found, False otherwise.
298 300 """
299 301 blamecache = None
300 302 result = True
301 303 for name, match, filters, pats in checks:
302 304 if debug:
303 305 print name, f
304 306 fc = 0
305 307 if not re.match(match, f):
306 308 if debug:
307 309 print "Skipping %s for %s it doesn't match %s" % (
308 310 name, match, f)
309 311 continue
310 312 fp = open(f)
311 313 pre = post = fp.read()
312 314 fp.close()
313 315 if "no-" + "check-code" in pre:
314 316 if debug:
315 317 print "Skipping %s for %s it has no- and check-code" % (
316 318 name, f)
317 319 break
318 320 for p, r in filters:
319 321 post = re.sub(p, r, post)
320 322 if warnings:
321 323 pats = pats[0] + pats[1]
322 324 else:
323 325 pats = pats[0]
324 326 # print post # uncomment to show filtered version
325 327 z = enumerate(zip(pre.splitlines(), post.splitlines(True)))
326 328 if debug:
327 329 print "Checking %s for %s" % (name, f)
328 330 for n, l in z:
329 331 if "check-code" + "-ignore" in l[0]:
330 332 if debug:
331 333 print "Skipping %s for %s:%s (check-code -ignore)" % (
332 334 name, f, n)
333 335 continue
334 336 for p, msg in pats:
335 337 if re.search(p, l[1]):
336 338 bd = ""
337 339 if blame:
338 340 bd = 'working directory'
339 341 if not blamecache:
340 342 blamecache = getblame(f)
341 343 if n < len(blamecache):
342 344 bl, bu, br = blamecache[n]
343 345 if bl == l[0]:
344 346 bd = '%s@%s' % (bu, br)
345 347 logfunc(f, n + 1, l[0], msg, bd)
346 348 fc += 1
347 349 result = False
348 350 if maxerr is not None and fc >= maxerr:
349 351 print " (too many errors, giving up)"
350 352 break
351 353 return result
352 354
353 355 if __name__ == "__main__":
354 356 parser = optparse.OptionParser("%prog [options] [files]")
355 357 parser.add_option("-w", "--warnings", action="store_true",
356 358 help="include warning-level checks")
357 359 parser.add_option("-p", "--per-file", type="int",
358 360 help="max warnings per file")
359 361 parser.add_option("-b", "--blame", action="store_true",
360 362 help="use annotate to generate blame info")
361 363 parser.add_option("", "--debug", action="store_true",
362 364 help="show debug information")
363 365
364 366 parser.set_defaults(per_file=15, warnings=False, blame=False, debug=False)
365 367 (options, args) = parser.parse_args()
366 368
367 369 if len(args) == 0:
368 370 check = glob.glob("*")
369 371 else:
370 372 check = args
371 373
372 374 for f in check:
373 375 ret = 0
374 376 if not checkfile(f, maxerr=options.per_file, warnings=options.warnings,
375 377 blame=options.blame, debug=options.debug):
376 378 ret = 1
377 379 sys.exit(ret)
@@ -1,166 +1,166
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance'''
3 3
4 4 from mercurial import cmdutil, scmutil, match, commands
5 5 import time, os, sys
6 6
7 7 def timer(func, title=None):
8 8 results = []
9 9 begin = time.time()
10 10 count = 0
11 while 1:
11 while True:
12 12 ostart = os.times()
13 13 cstart = time.time()
14 14 r = func()
15 15 cstop = time.time()
16 16 ostop = os.times()
17 17 count += 1
18 18 a, b = ostart, ostop
19 19 results.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
20 20 if cstop - begin > 3 and count >= 100:
21 21 break
22 22 if cstop - begin > 10 and count >= 3:
23 23 break
24 24 if title:
25 25 sys.stderr.write("! %s\n" % title)
26 26 if r:
27 27 sys.stderr.write("! result: %s\n" % r)
28 28 m = min(results)
29 29 sys.stderr.write("! wall %f comb %f user %f sys %f (best of %d)\n"
30 30 % (m[0], m[1] + m[2], m[1], m[2], count))
31 31
32 32 def perfwalk(ui, repo, *pats):
33 33 try:
34 34 m = scmutil.match(repo, pats, {})
35 35 timer(lambda: len(list(repo.dirstate.walk(m, [], True, False))))
36 36 except:
37 37 try:
38 38 m = scmutil.match(repo, pats, {})
39 39 timer(lambda: len([b for a, b, c in repo.dirstate.statwalk([], m)]))
40 40 except:
41 41 timer(lambda: len(list(cmdutil.walk(repo, pats, {}))))
42 42
43 43 def perfstatus(ui, repo, *pats):
44 44 #m = match.always(repo.root, repo.getcwd())
45 45 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False, False))))
46 46 timer(lambda: sum(map(len, repo.status())))
47 47
48 48 def perfheads(ui, repo):
49 49 timer(lambda: len(repo.changelog.heads()))
50 50
51 51 def perftags(ui, repo):
52 52 import mercurial.changelog, mercurial.manifest
53 53 def t():
54 54 repo.changelog = mercurial.changelog.changelog(repo.sopener)
55 55 repo.manifest = mercurial.manifest.manifest(repo.sopener)
56 56 repo._tags = None
57 57 return len(repo.tags())
58 58 timer(t)
59 59
60 60 def perfdirstate(ui, repo):
61 61 "a" in repo.dirstate
62 62 def d():
63 63 repo.dirstate.invalidate()
64 64 "a" in repo.dirstate
65 65 timer(d)
66 66
67 67 def perfdirstatedirs(ui, repo):
68 68 "a" in repo.dirstate
69 69 def d():
70 70 "a" in repo.dirstate._dirs
71 71 del repo.dirstate._dirs
72 72 timer(d)
73 73
74 74 def perfmanifest(ui, repo):
75 75 def d():
76 76 t = repo.manifest.tip()
77 77 m = repo.manifest.read(t)
78 78 repo.manifest.mapcache = None
79 79 repo.manifest._cache = None
80 80 timer(d)
81 81
82 82 def perfindex(ui, repo):
83 83 import mercurial.revlog
84 84 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
85 85 n = repo["tip"].node()
86 86 def d():
87 87 repo.invalidate()
88 88 repo[n]
89 89 timer(d)
90 90
91 91 def perfstartup(ui, repo):
92 92 cmd = sys.argv[0]
93 93 def d():
94 94 os.system("HGRCPATH= %s version -q > /dev/null" % cmd)
95 95 timer(d)
96 96
97 97 def perfparents(ui, repo):
98 98 nl = [repo.changelog.node(i) for i in xrange(1000)]
99 99 def d():
100 100 for n in nl:
101 101 repo.changelog.parents(n)
102 102 timer(d)
103 103
104 104 def perflookup(ui, repo, rev):
105 105 timer(lambda: len(repo.lookup(rev)))
106 106
107 107 def perflog(ui, repo, **opts):
108 108 ui.pushbuffer()
109 109 timer(lambda: commands.log(ui, repo, rev=[], date='', user='',
110 110 copies=opts.get('rename')))
111 111 ui.popbuffer()
112 112
113 113 def perftemplating(ui, repo):
114 114 ui.pushbuffer()
115 115 timer(lambda: commands.log(ui, repo, rev=[], date='', user='',
116 116 template='{date|shortdate} [{rev}:{node|short}]'
117 117 ' {author|person}: {desc|firstline}\n'))
118 118 ui.popbuffer()
119 119
120 120 def perfdiffwd(ui, repo):
121 121 """Profile diff of working directory changes"""
122 122 options = {
123 123 'w': 'ignore_all_space',
124 124 'b': 'ignore_space_change',
125 125 'B': 'ignore_blank_lines',
126 126 }
127 127
128 128 for diffopt in ('', 'w', 'b', 'B', 'wB'):
129 129 opts = dict((options[c], '1') for c in diffopt)
130 130 def d():
131 131 ui.pushbuffer()
132 132 commands.diff(ui, repo, **opts)
133 133 ui.popbuffer()
134 134 title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none')
135 135 timer(d, title)
136 136
137 137 def perfrevlog(ui, repo, file_, **opts):
138 138 from mercurial import revlog
139 139 dist = opts['dist']
140 140 def d():
141 141 r = revlog.revlog(lambda fn: open(fn, 'rb'), file_)
142 142 for x in xrange(0, len(r), dist):
143 143 r.revision(r.node(x))
144 144
145 145 timer(d)
146 146
147 147 cmdtable = {
148 148 'perflookup': (perflookup, []),
149 149 'perfparents': (perfparents, []),
150 150 'perfstartup': (perfstartup, []),
151 151 'perfstatus': (perfstatus, []),
152 152 'perfwalk': (perfwalk, []),
153 153 'perfmanifest': (perfmanifest, []),
154 154 'perfindex': (perfindex, []),
155 155 'perfheads': (perfheads, []),
156 156 'perftags': (perftags, []),
157 157 'perfdirstate': (perfdirstate, []),
158 158 'perfdirstatedirs': (perfdirstate, []),
159 159 'perflog': (perflog,
160 160 [('', 'rename', False, 'ask log to follow renames')]),
161 161 'perftemplating': (perftemplating, []),
162 162 'perfdiffwd': (perfdiffwd, []),
163 163 'perfrevlog': (perfrevlog,
164 164 [('d', 'dist', 100, 'distance between the revisions')],
165 165 "[INDEXFILE]"),
166 166 }
@@ -1,271 +1,271
1 1 # cvs.py: CVS conversion code inspired by hg-cvs-import and git-cvsimport
2 2 #
3 3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import os, re, socket, errno
9 9 from cStringIO import StringIO
10 10 from mercurial import encoding, util
11 11 from mercurial.i18n import _
12 12
13 13 from common import NoRepo, commit, converter_source, checktool
14 14 import cvsps
15 15
16 16 class convert_cvs(converter_source):
17 17 def __init__(self, ui, path, rev=None):
18 18 super(convert_cvs, self).__init__(ui, path, rev=rev)
19 19
20 20 cvs = os.path.join(path, "CVS")
21 21 if not os.path.exists(cvs):
22 22 raise NoRepo(_("%s does not look like a CVS checkout") % path)
23 23
24 24 checktool('cvs')
25 25
26 26 self.changeset = None
27 27 self.files = {}
28 28 self.tags = {}
29 29 self.lastbranch = {}
30 30 self.socket = None
31 31 self.cvsroot = open(os.path.join(cvs, "Root")).read()[:-1]
32 32 self.cvsrepo = open(os.path.join(cvs, "Repository")).read()[:-1]
33 33 self.encoding = encoding.encoding
34 34
35 35 self._connect()
36 36
37 37 def _parse(self):
38 38 if self.changeset is not None:
39 39 return
40 40 self.changeset = {}
41 41
42 42 maxrev = 0
43 43 if self.rev:
44 44 # TODO: handle tags
45 45 try:
46 46 # patchset number?
47 47 maxrev = int(self.rev)
48 48 except ValueError:
49 49 raise util.Abort(_('revision %s is not a patchset number')
50 50 % self.rev)
51 51
52 52 d = os.getcwd()
53 53 try:
54 54 os.chdir(self.path)
55 55 id = None
56 56
57 57 cache = 'update'
58 58 if not self.ui.configbool('convert', 'cvsps.cache', True):
59 59 cache = None
60 60 db = cvsps.createlog(self.ui, cache=cache)
61 61 db = cvsps.createchangeset(self.ui, db,
62 62 fuzz=int(self.ui.config('convert', 'cvsps.fuzz', 60)),
63 63 mergeto=self.ui.config('convert', 'cvsps.mergeto', None),
64 64 mergefrom=self.ui.config('convert', 'cvsps.mergefrom', None))
65 65
66 66 for cs in db:
67 67 if maxrev and cs.id > maxrev:
68 68 break
69 69 id = str(cs.id)
70 70 cs.author = self.recode(cs.author)
71 71 self.lastbranch[cs.branch] = id
72 72 cs.comment = self.recode(cs.comment)
73 73 date = util.datestr(cs.date)
74 74 self.tags.update(dict.fromkeys(cs.tags, id))
75 75
76 76 files = {}
77 77 for f in cs.entries:
78 78 files[f.file] = "%s%s" % ('.'.join([str(x)
79 79 for x in f.revision]),
80 80 ['', '(DEAD)'][f.dead])
81 81
82 82 # add current commit to set
83 83 c = commit(author=cs.author, date=date,
84 84 parents=[str(p.id) for p in cs.parents],
85 85 desc=cs.comment, branch=cs.branch or '')
86 86 self.changeset[id] = c
87 87 self.files[id] = files
88 88
89 89 self.heads = self.lastbranch.values()
90 90 finally:
91 91 os.chdir(d)
92 92
93 93 def _connect(self):
94 94 root = self.cvsroot
95 95 conntype = None
96 96 user, host = None, None
97 97 cmd = ['cvs', 'server']
98 98
99 99 self.ui.status(_("connecting to %s\n") % root)
100 100
101 101 if root.startswith(":pserver:"):
102 102 root = root[9:]
103 103 m = re.match(r'(?:(.*?)(?::(.*?))?@)?([^:\/]*)(?::(\d*))?(.*)',
104 104 root)
105 105 if m:
106 106 conntype = "pserver"
107 107 user, passw, serv, port, root = m.groups()
108 108 if not user:
109 109 user = "anonymous"
110 110 if not port:
111 111 port = 2401
112 112 else:
113 113 port = int(port)
114 114 format0 = ":pserver:%s@%s:%s" % (user, serv, root)
115 115 format1 = ":pserver:%s@%s:%d%s" % (user, serv, port, root)
116 116
117 117 if not passw:
118 118 passw = "A"
119 119 cvspass = os.path.expanduser("~/.cvspass")
120 120 try:
121 121 pf = open(cvspass)
122 122 for line in pf.read().splitlines():
123 123 part1, part2 = line.split(' ', 1)
124 124 if part1 == '/1':
125 125 # /1 :pserver:user@example.com:2401/cvsroot/foo Ah<Z
126 126 part1, part2 = part2.split(' ', 1)
127 127 format = format1
128 128 else:
129 129 # :pserver:user@example.com:/cvsroot/foo Ah<Z
130 130 format = format0
131 131 if part1 == format:
132 132 passw = part2
133 133 break
134 134 pf.close()
135 135 except IOError, inst:
136 136 if inst.errno != errno.ENOENT:
137 137 if not getattr(inst, 'filename', None):
138 138 inst.filename = cvspass
139 139 raise
140 140
141 141 sck = socket.socket()
142 142 sck.connect((serv, port))
143 143 sck.send("\n".join(["BEGIN AUTH REQUEST", root, user, passw,
144 144 "END AUTH REQUEST", ""]))
145 145 if sck.recv(128) != "I LOVE YOU\n":
146 146 raise util.Abort(_("CVS pserver authentication failed"))
147 147
148 148 self.writep = self.readp = sck.makefile('r+')
149 149
150 150 if not conntype and root.startswith(":local:"):
151 151 conntype = "local"
152 152 root = root[7:]
153 153
154 154 if not conntype:
155 155 # :ext:user@host/home/user/path/to/cvsroot
156 156 if root.startswith(":ext:"):
157 157 root = root[5:]
158 158 m = re.match(r'(?:([^@:/]+)@)?([^:/]+):?(.*)', root)
159 159 # Do not take Windows path "c:\foo\bar" for a connection strings
160 160 if os.path.isdir(root) or not m:
161 161 conntype = "local"
162 162 else:
163 163 conntype = "rsh"
164 164 user, host, root = m.group(1), m.group(2), m.group(3)
165 165
166 166 if conntype != "pserver":
167 167 if conntype == "rsh":
168 168 rsh = os.environ.get("CVS_RSH") or "ssh"
169 169 if user:
170 170 cmd = [rsh, '-l', user, host] + cmd
171 171 else:
172 172 cmd = [rsh, host] + cmd
173 173
174 174 # popen2 does not support argument lists under Windows
175 175 cmd = [util.shellquote(arg) for arg in cmd]
176 176 cmd = util.quotecommand(' '.join(cmd))
177 177 self.writep, self.readp = util.popen2(cmd)
178 178
179 179 self.realroot = root
180 180
181 181 self.writep.write("Root %s\n" % root)
182 182 self.writep.write("Valid-responses ok error Valid-requests Mode"
183 183 " M Mbinary E Checked-in Created Updated"
184 184 " Merged Removed\n")
185 185 self.writep.write("valid-requests\n")
186 186 self.writep.flush()
187 187 r = self.readp.readline()
188 188 if not r.startswith("Valid-requests"):
189 189 raise util.Abort(_('unexpected response from CVS server '
190 190 '(expected "Valid-requests", but got %r)')
191 191 % r)
192 192 if "UseUnchanged" in r:
193 193 self.writep.write("UseUnchanged\n")
194 194 self.writep.flush()
195 195 r = self.readp.readline()
196 196
197 197 def getheads(self):
198 198 self._parse()
199 199 return self.heads
200 200
201 201 def getfile(self, name, rev):
202 202
203 203 def chunkedread(fp, count):
204 204 # file-objects returned by socked.makefile() do not handle
205 205 # large read() requests very well.
206 206 chunksize = 65536
207 207 output = StringIO()
208 208 while count > 0:
209 209 data = fp.read(min(count, chunksize))
210 210 if not data:
211 211 raise util.Abort(_("%d bytes missing from remote file")
212 212 % count)
213 213 count -= len(data)
214 214 output.write(data)
215 215 return output.getvalue()
216 216
217 217 self._parse()
218 218 if rev.endswith("(DEAD)"):
219 219 raise IOError
220 220
221 221 args = ("-N -P -kk -r %s --" % rev).split()
222 222 args.append(self.cvsrepo + '/' + name)
223 223 for x in args:
224 224 self.writep.write("Argument %s\n" % x)
225 225 self.writep.write("Directory .\n%s\nco\n" % self.realroot)
226 226 self.writep.flush()
227 227
228 228 data = ""
229 229 mode = None
230 while 1:
230 while True:
231 231 line = self.readp.readline()
232 232 if line.startswith("Created ") or line.startswith("Updated "):
233 233 self.readp.readline() # path
234 234 self.readp.readline() # entries
235 235 mode = self.readp.readline()[:-1]
236 236 count = int(self.readp.readline()[:-1])
237 237 data = chunkedread(self.readp, count)
238 238 elif line.startswith(" "):
239 239 data += line[1:]
240 240 elif line.startswith("M "):
241 241 pass
242 242 elif line.startswith("Mbinary "):
243 243 count = int(self.readp.readline()[:-1])
244 244 data = chunkedread(self.readp, count)
245 245 else:
246 246 if line == "ok\n":
247 247 if mode is None:
248 248 raise util.Abort(_('malformed response from CVS'))
249 249 return (data, "x" in mode and "x" or "")
250 250 elif line.startswith("E "):
251 251 self.ui.warn(_("cvs server: %s\n") % line[2:])
252 252 elif line.startswith("Remove"):
253 253 self.readp.readline()
254 254 else:
255 255 raise util.Abort(_("unknown CVS response: %s") % line)
256 256
257 257 def getchanges(self, rev):
258 258 self._parse()
259 259 return sorted(self.files[rev].iteritems()), {}
260 260
261 261 def getcommit(self, rev):
262 262 self._parse()
263 263 return self.changeset[rev]
264 264
265 265 def gettags(self):
266 266 self._parse()
267 267 return self.tags
268 268
269 269 def getchangedfiles(self, rev, i):
270 270 self._parse()
271 271 return sorted(self.files[rev])
@@ -1,1582 +1,1582
1 1 """ Multicast DNS Service Discovery for Python, v0.12
2 2 Copyright (C) 2003, Paul Scott-Murphy
3 3
4 4 This module provides a framework for the use of DNS Service Discovery
5 5 using IP multicast. It has been tested against the JRendezvous
6 6 implementation from <a href="http://strangeberry.com">StrangeBerry</a>,
7 7 and against the mDNSResponder from Mac OS X 10.3.8.
8 8
9 9 This library is free software; you can redistribute it and/or
10 10 modify it under the terms of the GNU Lesser General Public
11 11 License as published by the Free Software Foundation; either
12 12 version 2.1 of the License, or (at your option) any later version.
13 13
14 14 This library is distributed in the hope that it will be useful,
15 15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 17 Lesser General Public License for more details.
18 18
19 19 You should have received a copy of the GNU Lesser General Public
20 20 License along with this library; if not, write to the Free Software
21 21 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 22
23 23 """
24 24
25 25 """0.12 update - allow selection of binding interface
26 26 typo fix - Thanks A. M. Kuchlingi
27 27 removed all use of word 'Rendezvous' - this is an API change"""
28 28
29 29 """0.11 update - correction to comments for addListener method
30 30 support for new record types seen from OS X
31 31 - IPv6 address
32 32 - hostinfo
33 33 ignore unknown DNS record types
34 34 fixes to name decoding
35 35 works alongside other processes using port 5353 (e.g. on Mac OS X)
36 36 tested against Mac OS X 10.3.2's mDNSResponder
37 37 corrections to removal of list entries for service browser"""
38 38
39 39 """0.10 update - Jonathon Paisley contributed these corrections:
40 40 always multicast replies, even when query is unicast
41 41 correct a pointer encoding problem
42 42 can now write records in any order
43 43 traceback shown on failure
44 44 better TXT record parsing
45 45 server is now separate from name
46 46 can cancel a service browser
47 47
48 48 modified some unit tests to accommodate these changes"""
49 49
50 50 """0.09 update - remove all records on service unregistration
51 51 fix DOS security problem with readName"""
52 52
53 53 """0.08 update - changed licensing to LGPL"""
54 54
55 55 """0.07 update - faster shutdown on engine
56 56 pointer encoding of outgoing names
57 57 ServiceBrowser now works
58 58 new unit tests"""
59 59
60 60 """0.06 update - small improvements with unit tests
61 61 added defined exception types
62 62 new style objects
63 63 fixed hostname/interface problem
64 64 fixed socket timeout problem
65 65 fixed addServiceListener() typo bug
66 66 using select() for socket reads
67 67 tested on Debian unstable with Python 2.2.2"""
68 68
69 69 """0.05 update - ensure case insensitivty on domain names
70 70 support for unicast DNS queries"""
71 71
72 72 """0.04 update - added some unit tests
73 73 added __ne__ adjuncts where required
74 74 ensure names end in '.local.'
75 75 timeout on receiving socket for clean shutdown"""
76 76
77 77 __author__ = "Paul Scott-Murphy"
78 78 __email__ = "paul at scott dash murphy dot com"
79 79 __version__ = "0.12"
80 80
81 81 import string
82 82 import time
83 83 import struct
84 84 import socket
85 85 import threading
86 86 import select
87 87 import traceback
88 88
89 89 __all__ = ["Zeroconf", "ServiceInfo", "ServiceBrowser"]
90 90
91 91 # hook for threads
92 92
93 93 globals()['_GLOBAL_DONE'] = 0
94 94
95 95 # Some timing constants
96 96
97 97 _UNREGISTER_TIME = 125
98 98 _CHECK_TIME = 175
99 99 _REGISTER_TIME = 225
100 100 _LISTENER_TIME = 200
101 101 _BROWSER_TIME = 500
102 102
103 103 # Some DNS constants
104 104
105 105 _MDNS_ADDR = '224.0.0.251'
106 106 _MDNS_PORT = 5353;
107 107 _DNS_PORT = 53;
108 108 _DNS_TTL = 60 * 60; # one hour default TTL
109 109
110 110 _MAX_MSG_TYPICAL = 1460 # unused
111 111 _MAX_MSG_ABSOLUTE = 8972
112 112
113 113 _FLAGS_QR_MASK = 0x8000 # query response mask
114 114 _FLAGS_QR_QUERY = 0x0000 # query
115 115 _FLAGS_QR_RESPONSE = 0x8000 # response
116 116
117 117 _FLAGS_AA = 0x0400 # Authorative answer
118 118 _FLAGS_TC = 0x0200 # Truncated
119 119 _FLAGS_RD = 0x0100 # Recursion desired
120 120 _FLAGS_RA = 0x8000 # Recursion available
121 121
122 122 _FLAGS_Z = 0x0040 # Zero
123 123 _FLAGS_AD = 0x0020 # Authentic data
124 124 _FLAGS_CD = 0x0010 # Checking disabled
125 125
126 126 _CLASS_IN = 1
127 127 _CLASS_CS = 2
128 128 _CLASS_CH = 3
129 129 _CLASS_HS = 4
130 130 _CLASS_NONE = 254
131 131 _CLASS_ANY = 255
132 132 _CLASS_MASK = 0x7FFF
133 133 _CLASS_UNIQUE = 0x8000
134 134
135 135 _TYPE_A = 1
136 136 _TYPE_NS = 2
137 137 _TYPE_MD = 3
138 138 _TYPE_MF = 4
139 139 _TYPE_CNAME = 5
140 140 _TYPE_SOA = 6
141 141 _TYPE_MB = 7
142 142 _TYPE_MG = 8
143 143 _TYPE_MR = 9
144 144 _TYPE_NULL = 10
145 145 _TYPE_WKS = 11
146 146 _TYPE_PTR = 12
147 147 _TYPE_HINFO = 13
148 148 _TYPE_MINFO = 14
149 149 _TYPE_MX = 15
150 150 _TYPE_TXT = 16
151 151 _TYPE_AAAA = 28
152 152 _TYPE_SRV = 33
153 153 _TYPE_ANY = 255
154 154
155 155 # Mapping constants to names
156 156
157 157 _CLASSES = { _CLASS_IN : "in",
158 158 _CLASS_CS : "cs",
159 159 _CLASS_CH : "ch",
160 160 _CLASS_HS : "hs",
161 161 _CLASS_NONE : "none",
162 162 _CLASS_ANY : "any" }
163 163
164 164 _TYPES = { _TYPE_A : "a",
165 165 _TYPE_NS : "ns",
166 166 _TYPE_MD : "md",
167 167 _TYPE_MF : "mf",
168 168 _TYPE_CNAME : "cname",
169 169 _TYPE_SOA : "soa",
170 170 _TYPE_MB : "mb",
171 171 _TYPE_MG : "mg",
172 172 _TYPE_MR : "mr",
173 173 _TYPE_NULL : "null",
174 174 _TYPE_WKS : "wks",
175 175 _TYPE_PTR : "ptr",
176 176 _TYPE_HINFO : "hinfo",
177 177 _TYPE_MINFO : "minfo",
178 178 _TYPE_MX : "mx",
179 179 _TYPE_TXT : "txt",
180 180 _TYPE_AAAA : "quada",
181 181 _TYPE_SRV : "srv",
182 182 _TYPE_ANY : "any" }
183 183
184 184 # utility functions
185 185
186 186 def currentTimeMillis():
187 187 """Current system time in milliseconds"""
188 188 return time.time() * 1000
189 189
190 190 # Exceptions
191 191
192 192 class NonLocalNameException(Exception):
193 193 pass
194 194
195 195 class NonUniqueNameException(Exception):
196 196 pass
197 197
198 198 class NamePartTooLongException(Exception):
199 199 pass
200 200
201 201 class AbstractMethodException(Exception):
202 202 pass
203 203
204 204 class BadTypeInNameException(Exception):
205 205 pass
206 206
207 207 class BadDomainName(Exception):
208 208 def __init__(self, pos):
209 209 Exception.__init__(self, "at position %s" % pos)
210 210
211 211 class BadDomainNameCircular(BadDomainName):
212 212 pass
213 213
214 214 # implementation classes
215 215
216 216 class DNSEntry(object):
217 217 """A DNS entry"""
218 218
219 219 def __init__(self, name, type, clazz):
220 220 self.key = string.lower(name)
221 221 self.name = name
222 222 self.type = type
223 223 self.clazz = clazz & _CLASS_MASK
224 224 self.unique = (clazz & _CLASS_UNIQUE) != 0
225 225
226 226 def __eq__(self, other):
227 227 """Equality test on name, type, and class"""
228 228 if isinstance(other, DNSEntry):
229 229 return self.name == other.name and self.type == other.type and self.clazz == other.clazz
230 230 return 0
231 231
232 232 def __ne__(self, other):
233 233 """Non-equality test"""
234 234 return not self.__eq__(other)
235 235
236 236 def getClazz(self, clazz):
237 237 """Class accessor"""
238 238 try:
239 239 return _CLASSES[clazz]
240 240 except KeyError:
241 241 return "?(%s)" % (clazz)
242 242
243 243 def getType(self, type):
244 244 """Type accessor"""
245 245 try:
246 246 return _TYPES[type]
247 247 except KeyError:
248 248 return "?(%s)" % (type)
249 249
250 250 def toString(self, hdr, other):
251 251 """String representation with additional information"""
252 252 result = "%s[%s,%s" % (hdr, self.getType(self.type), self.getClazz(self.clazz))
253 253 if self.unique:
254 254 result += "-unique,"
255 255 else:
256 256 result += ","
257 257 result += self.name
258 258 if other is not None:
259 259 result += ",%s]" % (other)
260 260 else:
261 261 result += "]"
262 262 return result
263 263
264 264 class DNSQuestion(DNSEntry):
265 265 """A DNS question entry"""
266 266
267 267 def __init__(self, name, type, clazz):
268 268 if not name.endswith(".local."):
269 269 raise NonLocalNameException(name)
270 270 DNSEntry.__init__(self, name, type, clazz)
271 271
272 272 def answeredBy(self, rec):
273 273 """Returns true if the question is answered by the record"""
274 274 return self.clazz == rec.clazz and (self.type == rec.type or self.type == _TYPE_ANY) and self.name == rec.name
275 275
276 276 def __repr__(self):
277 277 """String representation"""
278 278 return DNSEntry.toString(self, "question", None)
279 279
280 280
281 281 class DNSRecord(DNSEntry):
282 282 """A DNS record - like a DNS entry, but has a TTL"""
283 283
284 284 def __init__(self, name, type, clazz, ttl):
285 285 DNSEntry.__init__(self, name, type, clazz)
286 286 self.ttl = ttl
287 287 self.created = currentTimeMillis()
288 288
289 289 def __eq__(self, other):
290 290 """Tests equality as per DNSRecord"""
291 291 if isinstance(other, DNSRecord):
292 292 return DNSEntry.__eq__(self, other)
293 293 return 0
294 294
295 295 def suppressedBy(self, msg):
296 296 """Returns true if any answer in a message can suffice for the
297 297 information held in this record."""
298 298 for record in msg.answers:
299 299 if self.suppressedByAnswer(record):
300 300 return 1
301 301 return 0
302 302
303 303 def suppressedByAnswer(self, other):
304 304 """Returns true if another record has same name, type and class,
305 305 and if its TTL is at least half of this record's."""
306 306 if self == other and other.ttl > (self.ttl / 2):
307 307 return 1
308 308 return 0
309 309
310 310 def getExpirationTime(self, percent):
311 311 """Returns the time at which this record will have expired
312 312 by a certain percentage."""
313 313 return self.created + (percent * self.ttl * 10)
314 314
315 315 def getRemainingTTL(self, now):
316 316 """Returns the remaining TTL in seconds."""
317 317 return max(0, (self.getExpirationTime(100) - now) / 1000)
318 318
319 319 def isExpired(self, now):
320 320 """Returns true if this record has expired."""
321 321 return self.getExpirationTime(100) <= now
322 322
323 323 def isStale(self, now):
324 324 """Returns true if this record is at least half way expired."""
325 325 return self.getExpirationTime(50) <= now
326 326
327 327 def resetTTL(self, other):
328 328 """Sets this record's TTL and created time to that of
329 329 another record."""
330 330 self.created = other.created
331 331 self.ttl = other.ttl
332 332
333 333 def write(self, out):
334 334 """Abstract method"""
335 335 raise AbstractMethodException
336 336
337 337 def toString(self, other):
338 338 """String representation with addtional information"""
339 339 arg = "%s/%s,%s" % (self.ttl, self.getRemainingTTL(currentTimeMillis()), other)
340 340 return DNSEntry.toString(self, "record", arg)
341 341
342 342 class DNSAddress(DNSRecord):
343 343 """A DNS address record"""
344 344
345 345 def __init__(self, name, type, clazz, ttl, address):
346 346 DNSRecord.__init__(self, name, type, clazz, ttl)
347 347 self.address = address
348 348
349 349 def write(self, out):
350 350 """Used in constructing an outgoing packet"""
351 351 out.writeString(self.address, len(self.address))
352 352
353 353 def __eq__(self, other):
354 354 """Tests equality on address"""
355 355 if isinstance(other, DNSAddress):
356 356 return self.address == other.address
357 357 return 0
358 358
359 359 def __repr__(self):
360 360 """String representation"""
361 361 try:
362 362 return socket.inet_ntoa(self.address)
363 363 except Exception:
364 364 return self.address
365 365
366 366 class DNSHinfo(DNSRecord):
367 367 """A DNS host information record"""
368 368
369 369 def __init__(self, name, type, clazz, ttl, cpu, os):
370 370 DNSRecord.__init__(self, name, type, clazz, ttl)
371 371 self.cpu = cpu
372 372 self.os = os
373 373
374 374 def write(self, out):
375 375 """Used in constructing an outgoing packet"""
376 376 out.writeString(self.cpu, len(self.cpu))
377 377 out.writeString(self.os, len(self.os))
378 378
379 379 def __eq__(self, other):
380 380 """Tests equality on cpu and os"""
381 381 if isinstance(other, DNSHinfo):
382 382 return self.cpu == other.cpu and self.os == other.os
383 383 return 0
384 384
385 385 def __repr__(self):
386 386 """String representation"""
387 387 return self.cpu + " " + self.os
388 388
389 389 class DNSPointer(DNSRecord):
390 390 """A DNS pointer record"""
391 391
392 392 def __init__(self, name, type, clazz, ttl, alias):
393 393 DNSRecord.__init__(self, name, type, clazz, ttl)
394 394 self.alias = alias
395 395
396 396 def write(self, out):
397 397 """Used in constructing an outgoing packet"""
398 398 out.writeName(self.alias)
399 399
400 400 def __eq__(self, other):
401 401 """Tests equality on alias"""
402 402 if isinstance(other, DNSPointer):
403 403 return self.alias == other.alias
404 404 return 0
405 405
406 406 def __repr__(self):
407 407 """String representation"""
408 408 return self.toString(self.alias)
409 409
410 410 class DNSText(DNSRecord):
411 411 """A DNS text record"""
412 412
413 413 def __init__(self, name, type, clazz, ttl, text):
414 414 DNSRecord.__init__(self, name, type, clazz, ttl)
415 415 self.text = text
416 416
417 417 def write(self, out):
418 418 """Used in constructing an outgoing packet"""
419 419 out.writeString(self.text, len(self.text))
420 420
421 421 def __eq__(self, other):
422 422 """Tests equality on text"""
423 423 if isinstance(other, DNSText):
424 424 return self.text == other.text
425 425 return 0
426 426
427 427 def __repr__(self):
428 428 """String representation"""
429 429 if len(self.text) > 10:
430 430 return self.toString(self.text[:7] + "...")
431 431 else:
432 432 return self.toString(self.text)
433 433
434 434 class DNSService(DNSRecord):
435 435 """A DNS service record"""
436 436
437 437 def __init__(self, name, type, clazz, ttl, priority, weight, port, server):
438 438 DNSRecord.__init__(self, name, type, clazz, ttl)
439 439 self.priority = priority
440 440 self.weight = weight
441 441 self.port = port
442 442 self.server = server
443 443
444 444 def write(self, out):
445 445 """Used in constructing an outgoing packet"""
446 446 out.writeShort(self.priority)
447 447 out.writeShort(self.weight)
448 448 out.writeShort(self.port)
449 449 out.writeName(self.server)
450 450
451 451 def __eq__(self, other):
452 452 """Tests equality on priority, weight, port and server"""
453 453 if isinstance(other, DNSService):
454 454 return self.priority == other.priority and self.weight == other.weight and self.port == other.port and self.server == other.server
455 455 return 0
456 456
457 457 def __repr__(self):
458 458 """String representation"""
459 459 return self.toString("%s:%s" % (self.server, self.port))
460 460
461 461 class DNSIncoming(object):
462 462 """Object representation of an incoming DNS packet"""
463 463
464 464 def __init__(self, data):
465 465 """Constructor from string holding bytes of packet"""
466 466 self.offset = 0
467 467 self.data = data
468 468 self.questions = []
469 469 self.answers = []
470 470 self.numQuestions = 0
471 471 self.numAnswers = 0
472 472 self.numAuthorities = 0
473 473 self.numAdditionals = 0
474 474
475 475 self.readHeader()
476 476 self.readQuestions()
477 477 self.readOthers()
478 478
479 479 def readHeader(self):
480 480 """Reads header portion of packet"""
481 481 format = '!HHHHHH'
482 482 length = struct.calcsize(format)
483 483 info = struct.unpack(format, self.data[self.offset:self.offset+length])
484 484 self.offset += length
485 485
486 486 self.id = info[0]
487 487 self.flags = info[1]
488 488 self.numQuestions = info[2]
489 489 self.numAnswers = info[3]
490 490 self.numAuthorities = info[4]
491 491 self.numAdditionals = info[5]
492 492
493 493 def readQuestions(self):
494 494 """Reads questions section of packet"""
495 495 format = '!HH'
496 496 length = struct.calcsize(format)
497 497 for i in range(0, self.numQuestions):
498 498 name = self.readName()
499 499 info = struct.unpack(format, self.data[self.offset:self.offset+length])
500 500 self.offset += length
501 501
502 502 try:
503 503 question = DNSQuestion(name, info[0], info[1])
504 504 self.questions.append(question)
505 505 except NonLocalNameException:
506 506 pass
507 507
508 508 def readInt(self):
509 509 """Reads an integer from the packet"""
510 510 format = '!I'
511 511 length = struct.calcsize(format)
512 512 info = struct.unpack(format, self.data[self.offset:self.offset+length])
513 513 self.offset += length
514 514 return info[0]
515 515
516 516 def readCharacterString(self):
517 517 """Reads a character string from the packet"""
518 518 length = ord(self.data[self.offset])
519 519 self.offset += 1
520 520 return self.readString(length)
521 521
522 522 def readString(self, len):
523 523 """Reads a string of a given length from the packet"""
524 524 format = '!' + str(len) + 's'
525 525 length = struct.calcsize(format)
526 526 info = struct.unpack(format, self.data[self.offset:self.offset+length])
527 527 self.offset += length
528 528 return info[0]
529 529
530 530 def readUnsignedShort(self):
531 531 """Reads an unsigned short from the packet"""
532 532 format = '!H'
533 533 length = struct.calcsize(format)
534 534 info = struct.unpack(format, self.data[self.offset:self.offset+length])
535 535 self.offset += length
536 536 return info[0]
537 537
538 538 def readOthers(self):
539 539 """Reads the answers, authorities and additionals section of the packet"""
540 540 format = '!HHiH'
541 541 length = struct.calcsize(format)
542 542 n = self.numAnswers + self.numAuthorities + self.numAdditionals
543 543 for i in range(0, n):
544 544 domain = self.readName()
545 545 info = struct.unpack(format, self.data[self.offset:self.offset+length])
546 546 self.offset += length
547 547
548 548 rec = None
549 549 if info[0] == _TYPE_A:
550 550 rec = DNSAddress(domain, info[0], info[1], info[2], self.readString(4))
551 551 elif info[0] == _TYPE_CNAME or info[0] == _TYPE_PTR:
552 552 rec = DNSPointer(domain, info[0], info[1], info[2], self.readName())
553 553 elif info[0] == _TYPE_TXT:
554 554 rec = DNSText(domain, info[0], info[1], info[2], self.readString(info[3]))
555 555 elif info[0] == _TYPE_SRV:
556 556 rec = DNSService(domain, info[0], info[1], info[2], self.readUnsignedShort(), self.readUnsignedShort(), self.readUnsignedShort(), self.readName())
557 557 elif info[0] == _TYPE_HINFO:
558 558 rec = DNSHinfo(domain, info[0], info[1], info[2], self.readCharacterString(), self.readCharacterString())
559 559 elif info[0] == _TYPE_AAAA:
560 560 rec = DNSAddress(domain, info[0], info[1], info[2], self.readString(16))
561 561 else:
562 562 # Try to ignore types we don't know about
563 563 # this may mean the rest of the name is
564 564 # unable to be parsed, and may show errors
565 565 # so this is left for debugging. New types
566 566 # encountered need to be parsed properly.
567 567 #
568 568 #print "UNKNOWN TYPE = " + str(info[0])
569 569 #raise BadTypeInNameException
570 570 self.offset += info[3]
571 571
572 572 if rec is not None:
573 573 self.answers.append(rec)
574 574
575 575 def isQuery(self):
576 576 """Returns true if this is a query"""
577 577 return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_QUERY
578 578
579 579 def isResponse(self):
580 580 """Returns true if this is a response"""
581 581 return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_RESPONSE
582 582
583 583 def readUTF(self, offset, len):
584 584 """Reads a UTF-8 string of a given length from the packet"""
585 585 return self.data[offset:offset+len].decode('utf-8')
586 586
587 587 def readName(self):
588 588 """Reads a domain name from the packet"""
589 589 result = ''
590 590 off = self.offset
591 591 next = -1
592 592 first = off
593 593
594 while 1:
594 while True:
595 595 len = ord(self.data[off])
596 596 off += 1
597 597 if len == 0:
598 598 break
599 599 t = len & 0xC0
600 600 if t == 0x00:
601 601 result = ''.join((result, self.readUTF(off, len) + '.'))
602 602 off += len
603 603 elif t == 0xC0:
604 604 if next < 0:
605 605 next = off + 1
606 606 off = ((len & 0x3F) << 8) | ord(self.data[off])
607 607 if off >= first:
608 608 raise BadDomainNameCircular(off)
609 609 first = off
610 610 else:
611 611 raise BadDomainName(off)
612 612
613 613 if next >= 0:
614 614 self.offset = next
615 615 else:
616 616 self.offset = off
617 617
618 618 return result
619 619
620 620
621 621 class DNSOutgoing(object):
622 622 """Object representation of an outgoing packet"""
623 623
624 624 def __init__(self, flags, multicast = 1):
625 625 self.finished = 0
626 626 self.id = 0
627 627 self.multicast = multicast
628 628 self.flags = flags
629 629 self.names = {}
630 630 self.data = []
631 631 self.size = 12
632 632
633 633 self.questions = []
634 634 self.answers = []
635 635 self.authorities = []
636 636 self.additionals = []
637 637
638 638 def addQuestion(self, record):
639 639 """Adds a question"""
640 640 self.questions.append(record)
641 641
642 642 def addAnswer(self, inp, record):
643 643 """Adds an answer"""
644 644 if not record.suppressedBy(inp):
645 645 self.addAnswerAtTime(record, 0)
646 646
647 647 def addAnswerAtTime(self, record, now):
648 648 """Adds an answer if if does not expire by a certain time"""
649 649 if record is not None:
650 650 if now == 0 or not record.isExpired(now):
651 651 self.answers.append((record, now))
652 652
653 653 def addAuthorativeAnswer(self, record):
654 654 """Adds an authoritative answer"""
655 655 self.authorities.append(record)
656 656
657 657 def addAdditionalAnswer(self, record):
658 658 """Adds an additional answer"""
659 659 self.additionals.append(record)
660 660
661 661 def writeByte(self, value):
662 662 """Writes a single byte to the packet"""
663 663 format = '!c'
664 664 self.data.append(struct.pack(format, chr(value)))
665 665 self.size += 1
666 666
667 667 def insertShort(self, index, value):
668 668 """Inserts an unsigned short in a certain position in the packet"""
669 669 format = '!H'
670 670 self.data.insert(index, struct.pack(format, value))
671 671 self.size += 2
672 672
673 673 def writeShort(self, value):
674 674 """Writes an unsigned short to the packet"""
675 675 format = '!H'
676 676 self.data.append(struct.pack(format, value))
677 677 self.size += 2
678 678
679 679 def writeInt(self, value):
680 680 """Writes an unsigned integer to the packet"""
681 681 format = '!I'
682 682 self.data.append(struct.pack(format, int(value)))
683 683 self.size += 4
684 684
685 685 def writeString(self, value, length):
686 686 """Writes a string to the packet"""
687 687 format = '!' + str(length) + 's'
688 688 self.data.append(struct.pack(format, value))
689 689 self.size += length
690 690
691 691 def writeUTF(self, s):
692 692 """Writes a UTF-8 string of a given length to the packet"""
693 693 utfstr = s.encode('utf-8')
694 694 length = len(utfstr)
695 695 if length > 64:
696 696 raise NamePartTooLongException
697 697 self.writeByte(length)
698 698 self.writeString(utfstr, length)
699 699
700 700 def writeName(self, name):
701 701 """Writes a domain name to the packet"""
702 702
703 703 try:
704 704 # Find existing instance of this name in packet
705 705 #
706 706 index = self.names[name]
707 707 except KeyError:
708 708 # No record of this name already, so write it
709 709 # out as normal, recording the location of the name
710 710 # for future pointers to it.
711 711 #
712 712 self.names[name] = self.size
713 713 parts = name.split('.')
714 714 if parts[-1] == '':
715 715 parts = parts[:-1]
716 716 for part in parts:
717 717 self.writeUTF(part)
718 718 self.writeByte(0)
719 719 return
720 720
721 721 # An index was found, so write a pointer to it
722 722 #
723 723 self.writeByte((index >> 8) | 0xC0)
724 724 self.writeByte(index)
725 725
726 726 def writeQuestion(self, question):
727 727 """Writes a question to the packet"""
728 728 self.writeName(question.name)
729 729 self.writeShort(question.type)
730 730 self.writeShort(question.clazz)
731 731
732 732 def writeRecord(self, record, now):
733 733 """Writes a record (answer, authoritative answer, additional) to
734 734 the packet"""
735 735 self.writeName(record.name)
736 736 self.writeShort(record.type)
737 737 if record.unique and self.multicast:
738 738 self.writeShort(record.clazz | _CLASS_UNIQUE)
739 739 else:
740 740 self.writeShort(record.clazz)
741 741 if now == 0:
742 742 self.writeInt(record.ttl)
743 743 else:
744 744 self.writeInt(record.getRemainingTTL(now))
745 745 index = len(self.data)
746 746 # Adjust size for the short we will write before this record
747 747 #
748 748 self.size += 2
749 749 record.write(self)
750 750 self.size -= 2
751 751
752 752 length = len(''.join(self.data[index:]))
753 753 self.insertShort(index, length) # Here is the short we adjusted for
754 754
755 755 def packet(self):
756 756 """Returns a string containing the packet's bytes
757 757
758 758 No further parts should be added to the packet once this
759 759 is done."""
760 760 if not self.finished:
761 761 self.finished = 1
762 762 for question in self.questions:
763 763 self.writeQuestion(question)
764 764 for answer, time in self.answers:
765 765 self.writeRecord(answer, time)
766 766 for authority in self.authorities:
767 767 self.writeRecord(authority, 0)
768 768 for additional in self.additionals:
769 769 self.writeRecord(additional, 0)
770 770
771 771 self.insertShort(0, len(self.additionals))
772 772 self.insertShort(0, len(self.authorities))
773 773 self.insertShort(0, len(self.answers))
774 774 self.insertShort(0, len(self.questions))
775 775 self.insertShort(0, self.flags)
776 776 if self.multicast:
777 777 self.insertShort(0, 0)
778 778 else:
779 779 self.insertShort(0, self.id)
780 780 return ''.join(self.data)
781 781
782 782
783 783 class DNSCache(object):
784 784 """A cache of DNS entries"""
785 785
786 786 def __init__(self):
787 787 self.cache = {}
788 788
789 789 def add(self, entry):
790 790 """Adds an entry"""
791 791 try:
792 792 list = self.cache[entry.key]
793 793 except KeyError:
794 794 list = self.cache[entry.key] = []
795 795 list.append(entry)
796 796
797 797 def remove(self, entry):
798 798 """Removes an entry"""
799 799 try:
800 800 list = self.cache[entry.key]
801 801 list.remove(entry)
802 802 except KeyError:
803 803 pass
804 804
805 805 def get(self, entry):
806 806 """Gets an entry by key. Will return None if there is no
807 807 matching entry."""
808 808 try:
809 809 list = self.cache[entry.key]
810 810 return list[list.index(entry)]
811 811 except (KeyError, ValueError):
812 812 return None
813 813
814 814 def getByDetails(self, name, type, clazz):
815 815 """Gets an entry by details. Will return None if there is
816 816 no matching entry."""
817 817 entry = DNSEntry(name, type, clazz)
818 818 return self.get(entry)
819 819
820 820 def entriesWithName(self, name):
821 821 """Returns a list of entries whose key matches the name."""
822 822 try:
823 823 return self.cache[name]
824 824 except KeyError:
825 825 return []
826 826
827 827 def entries(self):
828 828 """Returns a list of all entries"""
829 829 def add(x, y): return x+y
830 830 try:
831 831 return reduce(add, self.cache.values())
832 832 except Exception:
833 833 return []
834 834
835 835
836 836 class Engine(threading.Thread):
837 837 """An engine wraps read access to sockets, allowing objects that
838 838 need to receive data from sockets to be called back when the
839 839 sockets are ready.
840 840
841 841 A reader needs a handle_read() method, which is called when the socket
842 842 it is interested in is ready for reading.
843 843
844 844 Writers are not implemented here, because we only send short
845 845 packets.
846 846 """
847 847
848 848 def __init__(self, zeroconf):
849 849 threading.Thread.__init__(self)
850 850 self.zeroconf = zeroconf
851 851 self.readers = {} # maps socket to reader
852 852 self.timeout = 5
853 853 self.condition = threading.Condition()
854 854 self.start()
855 855
856 856 def run(self):
857 857 while not globals()['_GLOBAL_DONE']:
858 858 rs = self.getReaders()
859 859 if len(rs) == 0:
860 860 # No sockets to manage, but we wait for the timeout
861 861 # or addition of a socket
862 862 #
863 863 self.condition.acquire()
864 864 self.condition.wait(self.timeout)
865 865 self.condition.release()
866 866 else:
867 867 try:
868 868 rr, wr, er = select.select(rs, [], [], self.timeout)
869 869 for socket in rr:
870 870 try:
871 871 self.readers[socket].handle_read()
872 872 except Exception:
873 873 if not globals()['_GLOBAL_DONE']:
874 874 traceback.print_exc()
875 875 except Exception:
876 876 pass
877 877
878 878 def getReaders(self):
879 879 self.condition.acquire()
880 880 result = self.readers.keys()
881 881 self.condition.release()
882 882 return result
883 883
884 884 def addReader(self, reader, socket):
885 885 self.condition.acquire()
886 886 self.readers[socket] = reader
887 887 self.condition.notify()
888 888 self.condition.release()
889 889
890 890 def delReader(self, socket):
891 891 self.condition.acquire()
892 892 del(self.readers[socket])
893 893 self.condition.notify()
894 894 self.condition.release()
895 895
896 896 def notify(self):
897 897 self.condition.acquire()
898 898 self.condition.notify()
899 899 self.condition.release()
900 900
901 901 class Listener(object):
902 902 """A Listener is used by this module to listen on the multicast
903 903 group to which DNS messages are sent, allowing the implementation
904 904 to cache information as it arrives.
905 905
906 906 It requires registration with an Engine object in order to have
907 907 the read() method called when a socket is availble for reading."""
908 908
909 909 def __init__(self, zeroconf):
910 910 self.zeroconf = zeroconf
911 911 self.zeroconf.engine.addReader(self, self.zeroconf.socket)
912 912
913 913 def handle_read(self):
914 914 data, (addr, port) = self.zeroconf.socket.recvfrom(_MAX_MSG_ABSOLUTE)
915 915 self.data = data
916 916 msg = DNSIncoming(data)
917 917 if msg.isQuery():
918 918 # Always multicast responses
919 919 #
920 920 if port == _MDNS_PORT:
921 921 self.zeroconf.handleQuery(msg, _MDNS_ADDR, _MDNS_PORT)
922 922 # If it's not a multicast query, reply via unicast
923 923 # and multicast
924 924 #
925 925 elif port == _DNS_PORT:
926 926 self.zeroconf.handleQuery(msg, addr, port)
927 927 self.zeroconf.handleQuery(msg, _MDNS_ADDR, _MDNS_PORT)
928 928 else:
929 929 self.zeroconf.handleResponse(msg)
930 930
931 931
932 932 class Reaper(threading.Thread):
933 933 """A Reaper is used by this module to remove cache entries that
934 934 have expired."""
935 935
936 936 def __init__(self, zeroconf):
937 937 threading.Thread.__init__(self)
938 938 self.zeroconf = zeroconf
939 939 self.start()
940 940
941 941 def run(self):
942 while 1:
942 while True:
943 943 self.zeroconf.wait(10 * 1000)
944 944 if globals()['_GLOBAL_DONE']:
945 945 return
946 946 now = currentTimeMillis()
947 947 for record in self.zeroconf.cache.entries():
948 948 if record.isExpired(now):
949 949 self.zeroconf.updateRecord(now, record)
950 950 self.zeroconf.cache.remove(record)
951 951
952 952
953 953 class ServiceBrowser(threading.Thread):
954 954 """Used to browse for a service of a specific type.
955 955
956 956 The listener object will have its addService() and
957 957 removeService() methods called when this browser
958 958 discovers changes in the services availability."""
959 959
960 960 def __init__(self, zeroconf, type, listener):
961 961 """Creates a browser for a specific type"""
962 962 threading.Thread.__init__(self)
963 963 self.zeroconf = zeroconf
964 964 self.type = type
965 965 self.listener = listener
966 966 self.services = {}
967 967 self.nextTime = currentTimeMillis()
968 968 self.delay = _BROWSER_TIME
969 969 self.list = []
970 970
971 971 self.done = 0
972 972
973 973 self.zeroconf.addListener(self, DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN))
974 974 self.start()
975 975
976 976 def updateRecord(self, zeroconf, now, record):
977 977 """Callback invoked by Zeroconf when new information arrives.
978 978
979 979 Updates information required by browser in the Zeroconf cache."""
980 980 if record.type == _TYPE_PTR and record.name == self.type:
981 981 expired = record.isExpired(now)
982 982 try:
983 983 oldrecord = self.services[record.alias.lower()]
984 984 if not expired:
985 985 oldrecord.resetTTL(record)
986 986 else:
987 987 del(self.services[record.alias.lower()])
988 988 callback = lambda x: self.listener.removeService(x, self.type, record.alias)
989 989 self.list.append(callback)
990 990 return
991 991 except Exception:
992 992 if not expired:
993 993 self.services[record.alias.lower()] = record
994 994 callback = lambda x: self.listener.addService(x, self.type, record.alias)
995 995 self.list.append(callback)
996 996
997 997 expires = record.getExpirationTime(75)
998 998 if expires < self.nextTime:
999 999 self.nextTime = expires
1000 1000
1001 1001 def cancel(self):
1002 1002 self.done = 1
1003 1003 self.zeroconf.notifyAll()
1004 1004
1005 1005 def run(self):
1006 while 1:
1006 while True:
1007 1007 event = None
1008 1008 now = currentTimeMillis()
1009 1009 if len(self.list) == 0 and self.nextTime > now:
1010 1010 self.zeroconf.wait(self.nextTime - now)
1011 1011 if globals()['_GLOBAL_DONE'] or self.done:
1012 1012 return
1013 1013 now = currentTimeMillis()
1014 1014
1015 1015 if self.nextTime <= now:
1016 1016 out = DNSOutgoing(_FLAGS_QR_QUERY)
1017 1017 out.addQuestion(DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN))
1018 1018 for record in self.services.values():
1019 1019 if not record.isExpired(now):
1020 1020 out.addAnswerAtTime(record, now)
1021 1021 self.zeroconf.send(out)
1022 1022 self.nextTime = now + self.delay
1023 1023 self.delay = min(20 * 1000, self.delay * 2)
1024 1024
1025 1025 if len(self.list) > 0:
1026 1026 event = self.list.pop(0)
1027 1027
1028 1028 if event is not None:
1029 1029 event(self.zeroconf)
1030 1030
1031 1031
1032 1032 class ServiceInfo(object):
1033 1033 """Service information"""
1034 1034
1035 1035 def __init__(self, type, name, address=None, port=None, weight=0, priority=0, properties=None, server=None):
1036 1036 """Create a service description.
1037 1037
1038 1038 type: fully qualified service type name
1039 1039 name: fully qualified service name
1040 1040 address: IP address as unsigned short, network byte order
1041 1041 port: port that the service runs on
1042 1042 weight: weight of the service
1043 1043 priority: priority of the service
1044 1044 properties: dictionary of properties (or a string holding the bytes for the text field)
1045 1045 server: fully qualified name for service host (defaults to name)"""
1046 1046
1047 1047 if not name.endswith(type):
1048 1048 raise BadTypeInNameException
1049 1049 self.type = type
1050 1050 self.name = name
1051 1051 self.address = address
1052 1052 self.port = port
1053 1053 self.weight = weight
1054 1054 self.priority = priority
1055 1055 if server:
1056 1056 self.server = server
1057 1057 else:
1058 1058 self.server = name
1059 1059 self.setProperties(properties)
1060 1060
1061 1061 def setProperties(self, properties):
1062 1062 """Sets properties and text of this info from a dictionary"""
1063 1063 if isinstance(properties, dict):
1064 1064 self.properties = properties
1065 1065 list = []
1066 1066 result = ''
1067 1067 for key in properties:
1068 1068 value = properties[key]
1069 1069 if value is None:
1070 1070 suffix = ''
1071 1071 elif isinstance(value, str):
1072 1072 suffix = value
1073 1073 elif isinstance(value, int):
1074 1074 if value:
1075 1075 suffix = 'true'
1076 1076 else:
1077 1077 suffix = 'false'
1078 1078 else:
1079 1079 suffix = ''
1080 1080 list.append('='.join((key, suffix)))
1081 1081 for item in list:
1082 1082 result = ''.join((result, struct.pack('!c', chr(len(item))), item))
1083 1083 self.text = result
1084 1084 else:
1085 1085 self.text = properties
1086 1086
1087 1087 def setText(self, text):
1088 1088 """Sets properties and text given a text field"""
1089 1089 self.text = text
1090 1090 try:
1091 1091 result = {}
1092 1092 end = len(text)
1093 1093 index = 0
1094 1094 strs = []
1095 1095 while index < end:
1096 1096 length = ord(text[index])
1097 1097 index += 1
1098 1098 strs.append(text[index:index+length])
1099 1099 index += length
1100 1100
1101 1101 for s in strs:
1102 1102 eindex = s.find('=')
1103 1103 if eindex == -1:
1104 1104 # No equals sign at all
1105 1105 key = s
1106 1106 value = 0
1107 1107 else:
1108 1108 key = s[:eindex]
1109 1109 value = s[eindex+1:]
1110 1110 if value == 'true':
1111 1111 value = 1
1112 1112 elif value == 'false' or not value:
1113 1113 value = 0
1114 1114
1115 1115 # Only update non-existent properties
1116 1116 if key and result.get(key) == None:
1117 1117 result[key] = value
1118 1118
1119 1119 self.properties = result
1120 1120 except Exception:
1121 1121 traceback.print_exc()
1122 1122 self.properties = None
1123 1123
1124 1124 def getType(self):
1125 1125 """Type accessor"""
1126 1126 return self.type
1127 1127
1128 1128 def getName(self):
1129 1129 """Name accessor"""
1130 1130 if self.type is not None and self.name.endswith("." + self.type):
1131 1131 return self.name[:len(self.name) - len(self.type) - 1]
1132 1132 return self.name
1133 1133
1134 1134 def getAddress(self):
1135 1135 """Address accessor"""
1136 1136 return self.address
1137 1137
1138 1138 def getPort(self):
1139 1139 """Port accessor"""
1140 1140 return self.port
1141 1141
1142 1142 def getPriority(self):
1143 1143 """Pirority accessor"""
1144 1144 return self.priority
1145 1145
1146 1146 def getWeight(self):
1147 1147 """Weight accessor"""
1148 1148 return self.weight
1149 1149
1150 1150 def getProperties(self):
1151 1151 """Properties accessor"""
1152 1152 return self.properties
1153 1153
1154 1154 def getText(self):
1155 1155 """Text accessor"""
1156 1156 return self.text
1157 1157
1158 1158 def getServer(self):
1159 1159 """Server accessor"""
1160 1160 return self.server
1161 1161
1162 1162 def updateRecord(self, zeroconf, now, record):
1163 1163 """Updates service information from a DNS record"""
1164 1164 if record is not None and not record.isExpired(now):
1165 1165 if record.type == _TYPE_A:
1166 1166 #if record.name == self.name:
1167 1167 if record.name == self.server:
1168 1168 self.address = record.address
1169 1169 elif record.type == _TYPE_SRV:
1170 1170 if record.name == self.name:
1171 1171 self.server = record.server
1172 1172 self.port = record.port
1173 1173 self.weight = record.weight
1174 1174 self.priority = record.priority
1175 1175 #self.address = None
1176 1176 self.updateRecord(zeroconf, now, zeroconf.cache.getByDetails(self.server, _TYPE_A, _CLASS_IN))
1177 1177 elif record.type == _TYPE_TXT:
1178 1178 if record.name == self.name:
1179 1179 self.setText(record.text)
1180 1180
1181 1181 def request(self, zeroconf, timeout):
1182 1182 """Returns true if the service could be discovered on the
1183 1183 network, and updates this object with details discovered.
1184 1184 """
1185 1185 now = currentTimeMillis()
1186 1186 delay = _LISTENER_TIME
1187 1187 next = now + delay
1188 1188 last = now + timeout
1189 1189 result = 0
1190 1190 try:
1191 1191 zeroconf.addListener(self, DNSQuestion(self.name, _TYPE_ANY, _CLASS_IN))
1192 1192 while self.server is None or self.address is None or self.text is None:
1193 1193 if last <= now:
1194 1194 return 0
1195 1195 if next <= now:
1196 1196 out = DNSOutgoing(_FLAGS_QR_QUERY)
1197 1197 out.addQuestion(DNSQuestion(self.name, _TYPE_SRV, _CLASS_IN))
1198 1198 out.addAnswerAtTime(zeroconf.cache.getByDetails(self.name, _TYPE_SRV, _CLASS_IN), now)
1199 1199 out.addQuestion(DNSQuestion(self.name, _TYPE_TXT, _CLASS_IN))
1200 1200 out.addAnswerAtTime(zeroconf.cache.getByDetails(self.name, _TYPE_TXT, _CLASS_IN), now)
1201 1201 if self.server is not None:
1202 1202 out.addQuestion(DNSQuestion(self.server, _TYPE_A, _CLASS_IN))
1203 1203 out.addAnswerAtTime(zeroconf.cache.getByDetails(self.server, _TYPE_A, _CLASS_IN), now)
1204 1204 zeroconf.send(out)
1205 1205 next = now + delay
1206 1206 delay = delay * 2
1207 1207
1208 1208 zeroconf.wait(min(next, last) - now)
1209 1209 now = currentTimeMillis()
1210 1210 result = 1
1211 1211 finally:
1212 1212 zeroconf.removeListener(self)
1213 1213
1214 1214 return result
1215 1215
1216 1216 def __eq__(self, other):
1217 1217 """Tests equality of service name"""
1218 1218 if isinstance(other, ServiceInfo):
1219 1219 return other.name == self.name
1220 1220 return 0
1221 1221
1222 1222 def __ne__(self, other):
1223 1223 """Non-equality test"""
1224 1224 return not self.__eq__(other)
1225 1225
1226 1226 def __repr__(self):
1227 1227 """String representation"""
1228 1228 result = "service[%s,%s:%s," % (self.name, socket.inet_ntoa(self.getAddress()), self.port)
1229 1229 if self.text is None:
1230 1230 result += "None"
1231 1231 else:
1232 1232 if len(self.text) < 20:
1233 1233 result += self.text
1234 1234 else:
1235 1235 result += self.text[:17] + "..."
1236 1236 result += "]"
1237 1237 return result
1238 1238
1239 1239
1240 1240 class Zeroconf(object):
1241 1241 """Implementation of Zeroconf Multicast DNS Service Discovery
1242 1242
1243 1243 Supports registration, unregistration, queries and browsing.
1244 1244 """
1245 1245 def __init__(self, bindaddress=None):
1246 1246 """Creates an instance of the Zeroconf class, establishing
1247 1247 multicast communications, listening and reaping threads."""
1248 1248 globals()['_GLOBAL_DONE'] = 0
1249 1249 if bindaddress is None:
1250 1250 self.intf = socket.gethostbyname(socket.gethostname())
1251 1251 else:
1252 1252 self.intf = bindaddress
1253 1253 self.group = ('', _MDNS_PORT)
1254 1254 self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
1255 1255 try:
1256 1256 self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
1257 1257 self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
1258 1258 except Exception:
1259 1259 # SO_REUSEADDR should be equivalent to SO_REUSEPORT for
1260 1260 # multicast UDP sockets (p 731, "TCP/IP Illustrated,
1261 1261 # Volume 2"), but some BSD-derived systems require
1262 1262 # SO_REUSEPORT to be specified explicity. Also, not all
1263 1263 # versions of Python have SO_REUSEPORT available. So
1264 1264 # if you're on a BSD-based system, and haven't upgraded
1265 1265 # to Python 2.3 yet, you may find this library doesn't
1266 1266 # work as expected.
1267 1267 #
1268 1268 pass
1269 1269 self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, 255)
1270 1270 self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, 1)
1271 1271 try:
1272 1272 self.socket.bind(self.group)
1273 1273 except Exception:
1274 1274 # Some versions of linux raise an exception even though
1275 1275 # the SO_REUSE* options have been set, so ignore it
1276 1276 #
1277 1277 pass
1278 1278 #self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_IF, socket.inet_aton(self.intf) + socket.inet_aton('0.0.0.0'))
1279 1279 self.socket.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'))
1280 1280
1281 1281 self.listeners = []
1282 1282 self.browsers = []
1283 1283 self.services = {}
1284 1284 self.servicetypes = {}
1285 1285
1286 1286 self.cache = DNSCache()
1287 1287
1288 1288 self.condition = threading.Condition()
1289 1289
1290 1290 self.engine = Engine(self)
1291 1291 self.listener = Listener(self)
1292 1292 self.reaper = Reaper(self)
1293 1293
1294 1294 def isLoopback(self):
1295 1295 return self.intf.startswith("127.0.0.1")
1296 1296
1297 1297 def isLinklocal(self):
1298 1298 return self.intf.startswith("169.254.")
1299 1299
1300 1300 def wait(self, timeout):
1301 1301 """Calling thread waits for a given number of milliseconds or
1302 1302 until notified."""
1303 1303 self.condition.acquire()
1304 1304 self.condition.wait(timeout/1000)
1305 1305 self.condition.release()
1306 1306
1307 1307 def notifyAll(self):
1308 1308 """Notifies all waiting threads"""
1309 1309 self.condition.acquire()
1310 1310 self.condition.notifyAll()
1311 1311 self.condition.release()
1312 1312
1313 1313 def getServiceInfo(self, type, name, timeout=3000):
1314 1314 """Returns network's service information for a particular
1315 1315 name and type, or None if no service matches by the timeout,
1316 1316 which defaults to 3 seconds."""
1317 1317 info = ServiceInfo(type, name)
1318 1318 if info.request(self, timeout):
1319 1319 return info
1320 1320 return None
1321 1321
1322 1322 def addServiceListener(self, type, listener):
1323 1323 """Adds a listener for a particular service type. This object
1324 1324 will then have its updateRecord method called when information
1325 1325 arrives for that type."""
1326 1326 self.removeServiceListener(listener)
1327 1327 self.browsers.append(ServiceBrowser(self, type, listener))
1328 1328
1329 1329 def removeServiceListener(self, listener):
1330 1330 """Removes a listener from the set that is currently listening."""
1331 1331 for browser in self.browsers:
1332 1332 if browser.listener == listener:
1333 1333 browser.cancel()
1334 1334 del(browser)
1335 1335
1336 1336 def registerService(self, info, ttl=_DNS_TTL):
1337 1337 """Registers service information to the network with a default TTL
1338 1338 of 60 seconds. Zeroconf will then respond to requests for
1339 1339 information for that service. The name of the service may be
1340 1340 changed if needed to make it unique on the network."""
1341 1341 self.checkService(info)
1342 1342 self.services[info.name.lower()] = info
1343 1343 if self.servicetypes.has_key(info.type):
1344 1344 self.servicetypes[info.type]+=1
1345 1345 else:
1346 1346 self.servicetypes[info.type]=1
1347 1347 now = currentTimeMillis()
1348 1348 nextTime = now
1349 1349 i = 0
1350 1350 while i < 3:
1351 1351 if now < nextTime:
1352 1352 self.wait(nextTime - now)
1353 1353 now = currentTimeMillis()
1354 1354 continue
1355 1355 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1356 1356 out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, ttl, info.name), 0)
1357 1357 out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, ttl, info.priority, info.weight, info.port, info.server), 0)
1358 1358 out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, ttl, info.text), 0)
1359 1359 if info.address:
1360 1360 out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, _CLASS_IN, ttl, info.address), 0)
1361 1361 self.send(out)
1362 1362 i += 1
1363 1363 nextTime += _REGISTER_TIME
1364 1364
1365 1365 def unregisterService(self, info):
1366 1366 """Unregister a service."""
1367 1367 try:
1368 1368 del(self.services[info.name.lower()])
1369 1369 if self.servicetypes[info.type]>1:
1370 1370 self.servicetypes[info.type]-=1
1371 1371 else:
1372 1372 del self.servicetypes[info.type]
1373 1373 except KeyError:
1374 1374 pass
1375 1375 now = currentTimeMillis()
1376 1376 nextTime = now
1377 1377 i = 0
1378 1378 while i < 3:
1379 1379 if now < nextTime:
1380 1380 self.wait(nextTime - now)
1381 1381 now = currentTimeMillis()
1382 1382 continue
1383 1383 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1384 1384 out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0)
1385 1385 out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, 0, info.priority, info.weight, info.port, info.name), 0)
1386 1386 out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0)
1387 1387 if info.address:
1388 1388 out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, _CLASS_IN, 0, info.address), 0)
1389 1389 self.send(out)
1390 1390 i += 1
1391 1391 nextTime += _UNREGISTER_TIME
1392 1392
1393 1393 def unregisterAllServices(self):
1394 1394 """Unregister all registered services."""
1395 1395 if len(self.services) > 0:
1396 1396 now = currentTimeMillis()
1397 1397 nextTime = now
1398 1398 i = 0
1399 1399 while i < 3:
1400 1400 if now < nextTime:
1401 1401 self.wait(nextTime - now)
1402 1402 now = currentTimeMillis()
1403 1403 continue
1404 1404 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1405 1405 for info in self.services.values():
1406 1406 out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0)
1407 1407 out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, 0, info.priority, info.weight, info.port, info.server), 0)
1408 1408 out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0)
1409 1409 if info.address:
1410 1410 out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, _CLASS_IN, 0, info.address), 0)
1411 1411 self.send(out)
1412 1412 i += 1
1413 1413 nextTime += _UNREGISTER_TIME
1414 1414
1415 1415 def checkService(self, info):
1416 1416 """Checks the network for a unique service name, modifying the
1417 1417 ServiceInfo passed in if it is not unique."""
1418 1418 now = currentTimeMillis()
1419 1419 nextTime = now
1420 1420 i = 0
1421 1421 while i < 3:
1422 1422 for record in self.cache.entriesWithName(info.type):
1423 1423 if record.type == _TYPE_PTR and not record.isExpired(now) and record.alias == info.name:
1424 1424 if (info.name.find('.') < 0):
1425 1425 info.name = info.name + ".[" + info.address + ":" + info.port + "]." + info.type
1426 1426 self.checkService(info)
1427 1427 return
1428 1428 raise NonUniqueNameException
1429 1429 if now < nextTime:
1430 1430 self.wait(nextTime - now)
1431 1431 now = currentTimeMillis()
1432 1432 continue
1433 1433 out = DNSOutgoing(_FLAGS_QR_QUERY | _FLAGS_AA)
1434 1434 self.debug = out
1435 1435 out.addQuestion(DNSQuestion(info.type, _TYPE_PTR, _CLASS_IN))
1436 1436 out.addAuthorativeAnswer(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, _DNS_TTL, info.name))
1437 1437 self.send(out)
1438 1438 i += 1
1439 1439 nextTime += _CHECK_TIME
1440 1440
1441 1441 def addListener(self, listener, question):
1442 1442 """Adds a listener for a given question. The listener will have
1443 1443 its updateRecord method called when information is available to
1444 1444 answer the question."""
1445 1445 now = currentTimeMillis()
1446 1446 self.listeners.append(listener)
1447 1447 if question is not None:
1448 1448 for record in self.cache.entriesWithName(question.name):
1449 1449 if question.answeredBy(record) and not record.isExpired(now):
1450 1450 listener.updateRecord(self, now, record)
1451 1451 self.notifyAll()
1452 1452
1453 1453 def removeListener(self, listener):
1454 1454 """Removes a listener."""
1455 1455 try:
1456 1456 self.listeners.remove(listener)
1457 1457 self.notifyAll()
1458 1458 except Exception:
1459 1459 pass
1460 1460
1461 1461 def updateRecord(self, now, rec):
1462 1462 """Used to notify listeners of new information that has updated
1463 1463 a record."""
1464 1464 for listener in self.listeners:
1465 1465 listener.updateRecord(self, now, rec)
1466 1466 self.notifyAll()
1467 1467
1468 1468 def handleResponse(self, msg):
1469 1469 """Deal with incoming response packets. All answers
1470 1470 are held in the cache, and listeners are notified."""
1471 1471 now = currentTimeMillis()
1472 1472 for record in msg.answers:
1473 1473 expired = record.isExpired(now)
1474 1474 if record in self.cache.entries():
1475 1475 if expired:
1476 1476 self.cache.remove(record)
1477 1477 else:
1478 1478 entry = self.cache.get(record)
1479 1479 if entry is not None:
1480 1480 entry.resetTTL(record)
1481 1481 record = entry
1482 1482 else:
1483 1483 self.cache.add(record)
1484 1484
1485 1485 self.updateRecord(now, record)
1486 1486
1487 1487 def handleQuery(self, msg, addr, port):
1488 1488 """Deal with incoming query packets. Provides a response if
1489 1489 possible."""
1490 1490 out = None
1491 1491
1492 1492 # Support unicast client responses
1493 1493 #
1494 1494 if port != _MDNS_PORT:
1495 1495 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA, 0)
1496 1496 for question in msg.questions:
1497 1497 out.addQuestion(question)
1498 1498
1499 1499 for question in msg.questions:
1500 1500 if question.type == _TYPE_PTR:
1501 1501 if question.name == "_services._dns-sd._udp.local.":
1502 1502 for stype in self.servicetypes.keys():
1503 1503 if out is None:
1504 1504 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1505 1505 out.addAnswer(msg, DNSPointer("_services._dns-sd._udp.local.", _TYPE_PTR, _CLASS_IN, _DNS_TTL, stype))
1506 1506 for service in self.services.values():
1507 1507 if question.name == service.type:
1508 1508 if out is None:
1509 1509 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1510 1510 out.addAnswer(msg, DNSPointer(service.type, _TYPE_PTR, _CLASS_IN, _DNS_TTL, service.name))
1511 1511 else:
1512 1512 try:
1513 1513 if out is None:
1514 1514 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1515 1515
1516 1516 # Answer A record queries for any service addresses we know
1517 1517 if question.type == _TYPE_A or question.type == _TYPE_ANY:
1518 1518 for service in self.services.values():
1519 1519 if service.server == question.name.lower():
1520 1520 out.addAnswer(msg, DNSAddress(question.name, _TYPE_A, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.address))
1521 1521
1522 1522 service = self.services.get(question.name.lower(), None)
1523 1523 if not service: continue
1524 1524
1525 1525 if question.type == _TYPE_SRV or question.type == _TYPE_ANY:
1526 1526 out.addAnswer(msg, DNSService(question.name, _TYPE_SRV, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.priority, service.weight, service.port, service.server))
1527 1527 if question.type == _TYPE_TXT or question.type == _TYPE_ANY:
1528 1528 out.addAnswer(msg, DNSText(question.name, _TYPE_TXT, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.text))
1529 1529 if question.type == _TYPE_SRV:
1530 1530 out.addAdditionalAnswer(DNSAddress(service.server, _TYPE_A, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.address))
1531 1531 except Exception:
1532 1532 traceback.print_exc()
1533 1533
1534 1534 if out is not None and out.answers:
1535 1535 out.id = msg.id
1536 1536 self.send(out, addr, port)
1537 1537
1538 1538 def send(self, out, addr = _MDNS_ADDR, port = _MDNS_PORT):
1539 1539 """Sends an outgoing packet."""
1540 1540 # This is a quick test to see if we can parse the packets we generate
1541 1541 #temp = DNSIncoming(out.packet())
1542 1542 try:
1543 1543 self.socket.sendto(out.packet(), 0, (addr, port))
1544 1544 except Exception:
1545 1545 # Ignore this, it may be a temporary loss of network connection
1546 1546 pass
1547 1547
1548 1548 def close(self):
1549 1549 """Ends the background threads, and prevent this instance from
1550 1550 servicing further queries."""
1551 1551 if globals()['_GLOBAL_DONE'] == 0:
1552 1552 globals()['_GLOBAL_DONE'] = 1
1553 1553 self.notifyAll()
1554 1554 self.engine.notify()
1555 1555 self.unregisterAllServices()
1556 1556 self.socket.setsockopt(socket.SOL_IP, socket.IP_DROP_MEMBERSHIP, socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'))
1557 1557 self.socket.close()
1558 1558
1559 1559 # Test a few module features, including service registration, service
1560 1560 # query (for Zoe), and service unregistration.
1561 1561
1562 1562 if __name__ == '__main__':
1563 1563 print "Multicast DNS Service Discovery for Python, version", __version__
1564 1564 r = Zeroconf()
1565 1565 print "1. Testing registration of a service..."
1566 1566 desc = {'version':'0.10','a':'test value', 'b':'another value'}
1567 1567 info = ServiceInfo("_http._tcp.local.", "My Service Name._http._tcp.local.", socket.inet_aton("127.0.0.1"), 1234, 0, 0, desc)
1568 1568 print " Registering service..."
1569 1569 r.registerService(info)
1570 1570 print " Registration done."
1571 1571 print "2. Testing query of service information..."
1572 1572 print " Getting ZOE service:", str(r.getServiceInfo("_http._tcp.local.", "ZOE._http._tcp.local."))
1573 1573 print " Query done."
1574 1574 print "3. Testing query of own service..."
1575 1575 print " Getting self:", str(r.getServiceInfo("_http._tcp.local.", "My Service Name._http._tcp.local."))
1576 1576 print " Query done."
1577 1577 print "4. Testing unregister of service information..."
1578 1578 r.unregisterService(info)
1579 1579 print " Unregister done."
1580 1580 r.close()
1581 1581
1582 1582 # no-check-code
@@ -1,91 +1,91
1 1 # ancestor.py - generic DAG ancestor algorithm for mercurial
2 2 #
3 3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import heapq
9 9
10 10 def ancestor(a, b, pfunc):
11 11 """
12 12 Returns the common ancestor of a and b that is furthest from a
13 13 root (as measured by longest path) or None if no ancestor is
14 14 found. If there are multiple common ancestors at the same
15 15 distance, the first one found is returned.
16 16
17 17 pfunc must return a list of parent vertices for a given vertex
18 18 """
19 19
20 20 if a == b:
21 21 return a
22 22
23 23 a, b = sorted([a, b])
24 24
25 25 # find depth from root of all ancestors
26 26 # depth is stored as a negative for heapq
27 27 parentcache = {}
28 28 visit = [a, b]
29 29 depth = {}
30 30 while visit:
31 31 vertex = visit[-1]
32 32 pl = pfunc(vertex)
33 33 parentcache[vertex] = pl
34 34 if not pl:
35 35 depth[vertex] = 0
36 36 visit.pop()
37 37 else:
38 38 for p in pl:
39 39 if p == a or p == b: # did we find a or b as a parent?
40 40 return p # we're done
41 41 if p not in depth:
42 42 visit.append(p)
43 43 if visit[-1] == vertex:
44 44 # -(maximum distance of parents + 1)
45 45 depth[vertex] = min([depth[p] for p in pl]) - 1
46 46 visit.pop()
47 47
48 48 # traverse ancestors in order of decreasing distance from root
49 49 def ancestors(vertex):
50 50 h = [(depth[vertex], vertex)]
51 51 seen = set()
52 52 while h:
53 53 d, n = heapq.heappop(h)
54 54 if n not in seen:
55 55 seen.add(n)
56 56 yield (d, n)
57 57 for p in parentcache[n]:
58 58 heapq.heappush(h, (depth[p], p))
59 59
60 60 def generations(vertex):
61 61 sg, s = None, set()
62 62 for g, v in ancestors(vertex):
63 63 if g != sg:
64 64 if sg:
65 65 yield sg, s
66 66 sg, s = g, set((v,))
67 67 else:
68 68 s.add(v)
69 69 yield sg, s
70 70
71 71 x = generations(a)
72 72 y = generations(b)
73 73 gx = x.next()
74 74 gy = y.next()
75 75
76 76 # increment each ancestor list until it is closer to root than
77 77 # the other, or they match
78 78 try:
79 while 1:
79 while True:
80 80 if gx[0] == gy[0]:
81 81 for v in gx[1]:
82 82 if v in gy[1]:
83 83 return v
84 84 gy = y.next()
85 85 gx = x.next()
86 86 elif gx[0] > gy[0]:
87 87 gy = y.next()
88 88 else:
89 89 gx = x.next()
90 90 except StopIteration:
91 91 return None
@@ -1,358 +1,358
1 1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 2 #
3 3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Repository class for viewing uncompressed bundles.
9 9
10 10 This provides a read-only repository interface to bundles as if they
11 11 were part of the actual repository.
12 12 """
13 13
14 14 from node import nullid
15 15 from i18n import _
16 16 import os, tempfile, shutil
17 17 import changegroup, util, mdiff, discovery
18 18 import localrepo, changelog, manifest, filelog, revlog, error
19 19
20 20 class bundlerevlog(revlog.revlog):
21 21 def __init__(self, opener, indexfile, bundle, linkmapper):
22 22 # How it works:
23 23 # to retrieve a revision, we need to know the offset of
24 24 # the revision in the bundle (an unbundle object).
25 25 #
26 26 # We store this offset in the index (start), to differentiate a
27 27 # rev in the bundle and from a rev in the revlog, we check
28 28 # len(index[r]). If the tuple is bigger than 7, it is a bundle
29 29 # (it is bigger since we store the node to which the delta is)
30 30 #
31 31 revlog.revlog.__init__(self, opener, indexfile)
32 32 self.bundle = bundle
33 33 self.basemap = {}
34 34 n = len(self)
35 35 chain = None
36 while 1:
36 while True:
37 37 chunkdata = bundle.deltachunk(chain)
38 38 if not chunkdata:
39 39 break
40 40 node = chunkdata['node']
41 41 p1 = chunkdata['p1']
42 42 p2 = chunkdata['p2']
43 43 cs = chunkdata['cs']
44 44 deltabase = chunkdata['deltabase']
45 45 delta = chunkdata['delta']
46 46
47 47 size = len(delta)
48 48 start = bundle.tell() - size
49 49
50 50 link = linkmapper(cs)
51 51 if node in self.nodemap:
52 52 # this can happen if two branches make the same change
53 53 chain = node
54 54 continue
55 55
56 56 for p in (p1, p2):
57 57 if not p in self.nodemap:
58 58 raise error.LookupError(p, self.indexfile,
59 59 _("unknown parent"))
60 60 # start, size, full unc. size, base (unused), link, p1, p2, node
61 61 e = (revlog.offset_type(start, 0), size, -1, -1, link,
62 62 self.rev(p1), self.rev(p2), node)
63 63 self.basemap[n] = deltabase
64 64 self.index.insert(-1, e)
65 65 self.nodemap[node] = n
66 66 chain = node
67 67 n += 1
68 68
69 69 def inbundle(self, rev):
70 70 """is rev from the bundle"""
71 71 if rev < 0:
72 72 return False
73 73 return rev in self.basemap
74 74 def bundlebase(self, rev):
75 75 return self.basemap[rev]
76 76 def _chunk(self, rev):
77 77 # Warning: in case of bundle, the diff is against bundlebase,
78 78 # not against rev - 1
79 79 # XXX: could use some caching
80 80 if not self.inbundle(rev):
81 81 return revlog.revlog._chunk(self, rev)
82 82 self.bundle.seek(self.start(rev))
83 83 return self.bundle.read(self.length(rev))
84 84
85 85 def revdiff(self, rev1, rev2):
86 86 """return or calculate a delta between two revisions"""
87 87 if self.inbundle(rev1) and self.inbundle(rev2):
88 88 # hot path for bundle
89 89 revb = self.rev(self.bundlebase(rev2))
90 90 if revb == rev1:
91 91 return self._chunk(rev2)
92 92 elif not self.inbundle(rev1) and not self.inbundle(rev2):
93 93 return revlog.revlog.revdiff(self, rev1, rev2)
94 94
95 95 return mdiff.textdiff(self.revision(self.node(rev1)),
96 96 self.revision(self.node(rev2)))
97 97
98 98 def revision(self, node):
99 99 """return an uncompressed revision of a given"""
100 100 if node == nullid:
101 101 return ""
102 102
103 103 text = None
104 104 chain = []
105 105 iter_node = node
106 106 rev = self.rev(iter_node)
107 107 # reconstruct the revision if it is from a changegroup
108 108 while self.inbundle(rev):
109 109 if self._cache and self._cache[0] == iter_node:
110 110 text = self._cache[2]
111 111 break
112 112 chain.append(rev)
113 113 iter_node = self.bundlebase(rev)
114 114 rev = self.rev(iter_node)
115 115 if text is None:
116 116 text = revlog.revlog.revision(self, iter_node)
117 117
118 118 while chain:
119 119 delta = self._chunk(chain.pop())
120 120 text = mdiff.patches(text, [delta])
121 121
122 122 p1, p2 = self.parents(node)
123 123 if node != revlog.hash(text, p1, p2):
124 124 raise error.RevlogError(_("integrity check failed on %s:%d")
125 125 % (self.datafile, self.rev(node)))
126 126
127 127 self._cache = (node, self.rev(node), text)
128 128 return text
129 129
130 130 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
131 131 raise NotImplementedError
132 132 def addgroup(self, revs, linkmapper, transaction):
133 133 raise NotImplementedError
134 134 def strip(self, rev, minlink):
135 135 raise NotImplementedError
136 136 def checksize(self):
137 137 raise NotImplementedError
138 138
139 139 class bundlechangelog(bundlerevlog, changelog.changelog):
140 140 def __init__(self, opener, bundle):
141 141 changelog.changelog.__init__(self, opener)
142 142 linkmapper = lambda x: x
143 143 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
144 144 linkmapper)
145 145
146 146 class bundlemanifest(bundlerevlog, manifest.manifest):
147 147 def __init__(self, opener, bundle, linkmapper):
148 148 manifest.manifest.__init__(self, opener)
149 149 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
150 150 linkmapper)
151 151
152 152 class bundlefilelog(bundlerevlog, filelog.filelog):
153 153 def __init__(self, opener, path, bundle, linkmapper, repo):
154 154 filelog.filelog.__init__(self, opener, path)
155 155 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
156 156 linkmapper)
157 157 self._repo = repo
158 158
159 159 def _file(self, f):
160 160 self._repo.file(f)
161 161
162 162 class bundlerepository(localrepo.localrepository):
163 163 def __init__(self, ui, path, bundlename):
164 164 self._tempparent = None
165 165 try:
166 166 localrepo.localrepository.__init__(self, ui, path)
167 167 except error.RepoError:
168 168 self._tempparent = tempfile.mkdtemp()
169 169 localrepo.instance(ui, self._tempparent, 1)
170 170 localrepo.localrepository.__init__(self, ui, self._tempparent)
171 171
172 172 if path:
173 173 self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
174 174 else:
175 175 self._url = 'bundle:' + bundlename
176 176
177 177 self.tempfile = None
178 178 f = util.posixfile(bundlename, "rb")
179 179 self.bundle = changegroup.readbundle(f, bundlename)
180 180 if self.bundle.compressed():
181 181 fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
182 182 suffix=".hg10un", dir=self.path)
183 183 self.tempfile = temp
184 184 fptemp = os.fdopen(fdtemp, 'wb')
185 185
186 186 try:
187 187 fptemp.write("HG10UN")
188 while 1:
188 while True:
189 189 chunk = self.bundle.read(2**18)
190 190 if not chunk:
191 191 break
192 192 fptemp.write(chunk)
193 193 finally:
194 194 fptemp.close()
195 195
196 196 f = util.posixfile(self.tempfile, "rb")
197 197 self.bundle = changegroup.readbundle(f, bundlename)
198 198
199 199 # dict with the mapping 'filename' -> position in the bundle
200 200 self.bundlefilespos = {}
201 201
202 202 @util.propertycache
203 203 def changelog(self):
204 204 # consume the header if it exists
205 205 self.bundle.changelogheader()
206 206 c = bundlechangelog(self.sopener, self.bundle)
207 207 self.manstart = self.bundle.tell()
208 208 return c
209 209
210 210 @util.propertycache
211 211 def manifest(self):
212 212 self.bundle.seek(self.manstart)
213 213 # consume the header if it exists
214 214 self.bundle.manifestheader()
215 215 m = bundlemanifest(self.sopener, self.bundle, self.changelog.rev)
216 216 self.filestart = self.bundle.tell()
217 217 return m
218 218
219 219 @util.propertycache
220 220 def manstart(self):
221 221 self.changelog
222 222 return self.manstart
223 223
224 224 @util.propertycache
225 225 def filestart(self):
226 226 self.manifest
227 227 return self.filestart
228 228
229 229 def url(self):
230 230 return self._url
231 231
232 232 def file(self, f):
233 233 if not self.bundlefilespos:
234 234 self.bundle.seek(self.filestart)
235 while 1:
235 while True:
236 236 chunkdata = self.bundle.filelogheader()
237 237 if not chunkdata:
238 238 break
239 239 fname = chunkdata['filename']
240 240 self.bundlefilespos[fname] = self.bundle.tell()
241 while 1:
241 while True:
242 242 c = self.bundle.deltachunk(None)
243 243 if not c:
244 244 break
245 245
246 246 if f[0] == '/':
247 247 f = f[1:]
248 248 if f in self.bundlefilespos:
249 249 self.bundle.seek(self.bundlefilespos[f])
250 250 return bundlefilelog(self.sopener, f, self.bundle,
251 251 self.changelog.rev, self)
252 252 else:
253 253 return filelog.filelog(self.sopener, f)
254 254
255 255 def close(self):
256 256 """Close assigned bundle file immediately."""
257 257 self.bundle.close()
258 258 if self.tempfile is not None:
259 259 os.unlink(self.tempfile)
260 260 if self._tempparent:
261 261 shutil.rmtree(self._tempparent, True)
262 262
263 263 def cancopy(self):
264 264 return False
265 265
266 266 def getcwd(self):
267 267 return os.getcwd() # always outside the repo
268 268
269 269 def instance(ui, path, create):
270 270 if create:
271 271 raise util.Abort(_('cannot create new bundle repository'))
272 272 parentpath = ui.config("bundle", "mainreporoot", "")
273 273 if parentpath:
274 274 # Try to make the full path relative so we get a nice, short URL.
275 275 # In particular, we don't want temp dir names in test outputs.
276 276 cwd = os.getcwd()
277 277 if parentpath == cwd:
278 278 parentpath = ''
279 279 else:
280 280 cwd = os.path.join(cwd,'')
281 281 if parentpath.startswith(cwd):
282 282 parentpath = parentpath[len(cwd):]
283 283 u = util.url(path)
284 284 path = u.localpath()
285 285 if u.scheme == 'bundle':
286 286 s = path.split("+", 1)
287 287 if len(s) == 1:
288 288 repopath, bundlename = parentpath, s[0]
289 289 else:
290 290 repopath, bundlename = s
291 291 else:
292 292 repopath, bundlename = parentpath, path
293 293 return bundlerepository(ui, repopath, bundlename)
294 294
295 295 def getremotechanges(ui, repo, other, onlyheads=None, bundlename=None,
296 296 force=False):
297 297 '''obtains a bundle of changes incoming from other
298 298
299 299 "onlyheads" restricts the returned changes to those reachable from the
300 300 specified heads.
301 301 "bundlename", if given, stores the bundle to this file path permanently;
302 302 otherwise it's stored to a temp file and gets deleted again when you call
303 303 the returned "cleanupfn".
304 304 "force" indicates whether to proceed on unrelated repos.
305 305
306 306 Returns a tuple (local, csets, cleanupfn):
307 307
308 308 "local" is a local repo from which to obtain the actual incoming changesets; it
309 309 is a bundlerepo for the obtained bundle when the original "other" is remote.
310 310 "csets" lists the incoming changeset node ids.
311 311 "cleanupfn" must be called without arguments when you're done processing the
312 312 changes; it closes both the original "other" and the one returned here.
313 313 '''
314 314 tmp = discovery.findcommonincoming(repo, other, heads=onlyheads, force=force)
315 315 common, incoming, rheads = tmp
316 316 if not incoming:
317 317 try:
318 318 os.unlink(bundlename)
319 319 except OSError:
320 320 pass
321 321 return other, [], other.close
322 322
323 323 bundle = None
324 324 bundlerepo = None
325 325 localrepo = other
326 326 if bundlename or not other.local():
327 327 # create a bundle (uncompressed if other repo is not local)
328 328
329 329 if other.capable('getbundle'):
330 330 cg = other.getbundle('incoming', common=common, heads=rheads)
331 331 elif onlyheads is None and not other.capable('changegroupsubset'):
332 332 # compat with older servers when pulling all remote heads
333 333 cg = other.changegroup(incoming, "incoming")
334 334 rheads = None
335 335 else:
336 336 cg = other.changegroupsubset(incoming, rheads, 'incoming')
337 337 bundletype = other.local() and "HG10BZ" or "HG10UN"
338 338 fname = bundle = changegroup.writebundle(cg, bundlename, bundletype)
339 339 # keep written bundle?
340 340 if bundlename:
341 341 bundle = None
342 342 if not other.local():
343 343 # use the created uncompressed bundlerepo
344 344 localrepo = bundlerepo = bundlerepository(ui, repo.root, fname)
345 345 # this repo contains local and other now, so filter out local again
346 346 common = repo.heads()
347 347
348 348 csets = localrepo.changelog.findmissing(common, rheads)
349 349
350 350 def cleanup():
351 351 if bundlerepo:
352 352 bundlerepo.close()
353 353 if bundle:
354 354 os.unlink(bundle)
355 355 other.close()
356 356
357 357 return (localrepo, csets, cleanup)
358 358
@@ -1,256 +1,256
1 1 # changegroup.py - Mercurial changegroup manipulation functions
2 2 #
3 3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import nullrev
10 10 import mdiff, util
11 11 import struct, os, bz2, zlib, tempfile
12 12
13 13 _BUNDLE10_DELTA_HEADER = "20s20s20s20s"
14 14
15 15 def readexactly(stream, n):
16 16 '''read n bytes from stream.read and abort if less was available'''
17 17 s = stream.read(n)
18 18 if len(s) < n:
19 19 raise util.Abort(_("stream ended unexpectedly"
20 20 " (got %d bytes, expected %d)")
21 21 % (len(s), n))
22 22 return s
23 23
24 24 def getchunk(stream):
25 25 """return the next chunk from stream as a string"""
26 26 d = readexactly(stream, 4)
27 27 l = struct.unpack(">l", d)[0]
28 28 if l <= 4:
29 29 if l:
30 30 raise util.Abort(_("invalid chunk length %d") % l)
31 31 return ""
32 32 return readexactly(stream, l - 4)
33 33
34 34 def chunkheader(length):
35 35 """return a changegroup chunk header (string)"""
36 36 return struct.pack(">l", length + 4)
37 37
38 38 def closechunk():
39 39 """return a changegroup chunk header (string) for a zero-length chunk"""
40 40 return struct.pack(">l", 0)
41 41
42 42 class nocompress(object):
43 43 def compress(self, x):
44 44 return x
45 45 def flush(self):
46 46 return ""
47 47
48 48 bundletypes = {
49 49 "": ("", nocompress), # only when using unbundle on ssh and old http servers
50 50 # since the unification ssh accepts a header but there
51 51 # is no capability signaling it.
52 52 "HG10UN": ("HG10UN", nocompress),
53 53 "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
54 54 "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
55 55 }
56 56
57 57 # hgweb uses this list to communicate its preferred type
58 58 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
59 59
60 60 def writebundle(cg, filename, bundletype):
61 61 """Write a bundle file and return its filename.
62 62
63 63 Existing files will not be overwritten.
64 64 If no filename is specified, a temporary file is created.
65 65 bz2 compression can be turned off.
66 66 The bundle file will be deleted in case of errors.
67 67 """
68 68
69 69 fh = None
70 70 cleanup = None
71 71 try:
72 72 if filename:
73 73 fh = open(filename, "wb")
74 74 else:
75 75 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
76 76 fh = os.fdopen(fd, "wb")
77 77 cleanup = filename
78 78
79 79 header, compressor = bundletypes[bundletype]
80 80 fh.write(header)
81 81 z = compressor()
82 82
83 83 # parse the changegroup data, otherwise we will block
84 84 # in case of sshrepo because we don't know the end of the stream
85 85
86 86 # an empty chunkgroup is the end of the changegroup
87 87 # a changegroup has at least 2 chunkgroups (changelog and manifest).
88 88 # after that, an empty chunkgroup is the end of the changegroup
89 89 empty = False
90 90 count = 0
91 91 while not empty or count <= 2:
92 92 empty = True
93 93 count += 1
94 while 1:
94 while True:
95 95 chunk = getchunk(cg)
96 96 if not chunk:
97 97 break
98 98 empty = False
99 99 fh.write(z.compress(chunkheader(len(chunk))))
100 100 pos = 0
101 101 while pos < len(chunk):
102 102 next = pos + 2**20
103 103 fh.write(z.compress(chunk[pos:next]))
104 104 pos = next
105 105 fh.write(z.compress(closechunk()))
106 106 fh.write(z.flush())
107 107 cleanup = None
108 108 return filename
109 109 finally:
110 110 if fh is not None:
111 111 fh.close()
112 112 if cleanup is not None:
113 113 os.unlink(cleanup)
114 114
115 115 def decompressor(fh, alg):
116 116 if alg == 'UN':
117 117 return fh
118 118 elif alg == 'GZ':
119 119 def generator(f):
120 120 zd = zlib.decompressobj()
121 121 for chunk in f:
122 122 yield zd.decompress(chunk)
123 123 elif alg == 'BZ':
124 124 def generator(f):
125 125 zd = bz2.BZ2Decompressor()
126 126 zd.decompress("BZ")
127 127 for chunk in util.filechunkiter(f, 4096):
128 128 yield zd.decompress(chunk)
129 129 else:
130 130 raise util.Abort("unknown bundle compression '%s'" % alg)
131 131 return util.chunkbuffer(generator(fh))
132 132
133 133 class unbundle10(object):
134 134 deltaheader = _BUNDLE10_DELTA_HEADER
135 135 deltaheadersize = struct.calcsize(deltaheader)
136 136 def __init__(self, fh, alg):
137 137 self._stream = decompressor(fh, alg)
138 138 self._type = alg
139 139 self.callback = None
140 140 def compressed(self):
141 141 return self._type != 'UN'
142 142 def read(self, l):
143 143 return self._stream.read(l)
144 144 def seek(self, pos):
145 145 return self._stream.seek(pos)
146 146 def tell(self):
147 147 return self._stream.tell()
148 148 def close(self):
149 149 return self._stream.close()
150 150
151 151 def chunklength(self):
152 152 d = readexactly(self._stream, 4)
153 153 l = struct.unpack(">l", d)[0]
154 154 if l <= 4:
155 155 if l:
156 156 raise util.Abort(_("invalid chunk length %d") % l)
157 157 return 0
158 158 if self.callback:
159 159 self.callback()
160 160 return l - 4
161 161
162 162 def changelogheader(self):
163 163 """v10 does not have a changelog header chunk"""
164 164 return {}
165 165
166 166 def manifestheader(self):
167 167 """v10 does not have a manifest header chunk"""
168 168 return {}
169 169
170 170 def filelogheader(self):
171 171 """return the header of the filelogs chunk, v10 only has the filename"""
172 172 l = self.chunklength()
173 173 if not l:
174 174 return {}
175 175 fname = readexactly(self._stream, l)
176 176 return dict(filename=fname)
177 177
178 178 def _deltaheader(self, headertuple, prevnode):
179 179 node, p1, p2, cs = headertuple
180 180 if prevnode is None:
181 181 deltabase = p1
182 182 else:
183 183 deltabase = prevnode
184 184 return node, p1, p2, deltabase, cs
185 185
186 186 def deltachunk(self, prevnode):
187 187 l = self.chunklength()
188 188 if not l:
189 189 return {}
190 190 headerdata = readexactly(self._stream, self.deltaheadersize)
191 191 header = struct.unpack(self.deltaheader, headerdata)
192 192 delta = readexactly(self._stream, l - self.deltaheadersize)
193 193 node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
194 194 return dict(node=node, p1=p1, p2=p2, cs=cs,
195 195 deltabase=deltabase, delta=delta)
196 196
197 197 class headerlessfixup(object):
198 198 def __init__(self, fh, h):
199 199 self._h = h
200 200 self._fh = fh
201 201 def read(self, n):
202 202 if self._h:
203 203 d, self._h = self._h[:n], self._h[n:]
204 204 if len(d) < n:
205 205 d += readexactly(self._fh, n - len(d))
206 206 return d
207 207 return readexactly(self._fh, n)
208 208
209 209 def readbundle(fh, fname):
210 210 header = readexactly(fh, 6)
211 211
212 212 if not fname:
213 213 fname = "stream"
214 214 if not header.startswith('HG') and header.startswith('\0'):
215 215 fh = headerlessfixup(fh, header)
216 216 header = "HG10UN"
217 217
218 218 magic, version, alg = header[0:2], header[2:4], header[4:6]
219 219
220 220 if magic != 'HG':
221 221 raise util.Abort(_('%s: not a Mercurial bundle') % fname)
222 222 if version != '10':
223 223 raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
224 224 return unbundle10(fh, alg)
225 225
226 226 class bundle10(object):
227 227 deltaheader = _BUNDLE10_DELTA_HEADER
228 228 def __init__(self, lookup):
229 229 self._lookup = lookup
230 230 def close(self):
231 231 return closechunk()
232 232 def fileheader(self, fname):
233 233 return chunkheader(len(fname)) + fname
234 234 def revchunk(self, revlog, rev, prev):
235 235 node = revlog.node(rev)
236 236 p1, p2 = revlog.parentrevs(rev)
237 237 base = prev
238 238
239 239 prefix = ''
240 240 if base == nullrev:
241 241 delta = revlog.revision(node)
242 242 prefix = mdiff.trivialdiffheader(len(delta))
243 243 else:
244 244 delta = revlog.revdiff(base, rev)
245 245 linknode = self._lookup(revlog, node)
246 246 p1n, p2n = revlog.parents(node)
247 247 basenode = revlog.node(base)
248 248 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
249 249 meta += prefix
250 250 l = len(meta) + len(delta)
251 251 yield chunkheader(l)
252 252 yield meta
253 253 yield delta
254 254 def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
255 255 # do nothing with basenode, it is implicitly the previous one in HG10
256 256 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
@@ -1,5082 +1,5082
1 1 # commands.py - command processing for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import hex, bin, nullid, nullrev, short
9 9 from lock import release
10 10 from i18n import _, gettext
11 11 import os, re, sys, difflib, time, tempfile, errno
12 12 import hg, scmutil, util, revlog, extensions, copies, error, bookmarks
13 13 import patch, help, url, encoding, templatekw, discovery
14 14 import archival, changegroup, cmdutil, sshserver, hbisect, hgweb, hgweb.server
15 15 import merge as mergemod
16 16 import minirst, revset
17 17 import dagparser, context, simplemerge
18 18 import random, setdiscovery, treediscovery, dagutil
19 19
20 20 table = {}
21 21
22 22 command = cmdutil.command(table)
23 23
24 24 # common command options
25 25
26 26 globalopts = [
27 27 ('R', 'repository', '',
28 28 _('repository root directory or name of overlay bundle file'),
29 29 _('REPO')),
30 30 ('', 'cwd', '',
31 31 _('change working directory'), _('DIR')),
32 32 ('y', 'noninteractive', None,
33 33 _('do not prompt, assume \'yes\' for any required answers')),
34 34 ('q', 'quiet', None, _('suppress output')),
35 35 ('v', 'verbose', None, _('enable additional output')),
36 36 ('', 'config', [],
37 37 _('set/override config option (use \'section.name=value\')'),
38 38 _('CONFIG')),
39 39 ('', 'debug', None, _('enable debugging output')),
40 40 ('', 'debugger', None, _('start debugger')),
41 41 ('', 'encoding', encoding.encoding, _('set the charset encoding'),
42 42 _('ENCODE')),
43 43 ('', 'encodingmode', encoding.encodingmode,
44 44 _('set the charset encoding mode'), _('MODE')),
45 45 ('', 'traceback', None, _('always print a traceback on exception')),
46 46 ('', 'time', None, _('time how long the command takes')),
47 47 ('', 'profile', None, _('print command execution profile')),
48 48 ('', 'version', None, _('output version information and exit')),
49 49 ('h', 'help', None, _('display help and exit')),
50 50 ]
51 51
52 52 dryrunopts = [('n', 'dry-run', None,
53 53 _('do not perform actions, just print output'))]
54 54
55 55 remoteopts = [
56 56 ('e', 'ssh', '',
57 57 _('specify ssh command to use'), _('CMD')),
58 58 ('', 'remotecmd', '',
59 59 _('specify hg command to run on the remote side'), _('CMD')),
60 60 ('', 'insecure', None,
61 61 _('do not verify server certificate (ignoring web.cacerts config)')),
62 62 ]
63 63
64 64 walkopts = [
65 65 ('I', 'include', [],
66 66 _('include names matching the given patterns'), _('PATTERN')),
67 67 ('X', 'exclude', [],
68 68 _('exclude names matching the given patterns'), _('PATTERN')),
69 69 ]
70 70
71 71 commitopts = [
72 72 ('m', 'message', '',
73 73 _('use text as commit message'), _('TEXT')),
74 74 ('l', 'logfile', '',
75 75 _('read commit message from file'), _('FILE')),
76 76 ]
77 77
78 78 commitopts2 = [
79 79 ('d', 'date', '',
80 80 _('record the specified date as commit date'), _('DATE')),
81 81 ('u', 'user', '',
82 82 _('record the specified user as committer'), _('USER')),
83 83 ]
84 84
85 85 templateopts = [
86 86 ('', 'style', '',
87 87 _('display using template map file'), _('STYLE')),
88 88 ('', 'template', '',
89 89 _('display with template'), _('TEMPLATE')),
90 90 ]
91 91
92 92 logopts = [
93 93 ('p', 'patch', None, _('show patch')),
94 94 ('g', 'git', None, _('use git extended diff format')),
95 95 ('l', 'limit', '',
96 96 _('limit number of changes displayed'), _('NUM')),
97 97 ('M', 'no-merges', None, _('do not show merges')),
98 98 ('', 'stat', None, _('output diffstat-style summary of changes')),
99 99 ] + templateopts
100 100
101 101 diffopts = [
102 102 ('a', 'text', None, _('treat all files as text')),
103 103 ('g', 'git', None, _('use git extended diff format')),
104 104 ('', 'nodates', None, _('omit dates from diff headers'))
105 105 ]
106 106
107 107 diffopts2 = [
108 108 ('p', 'show-function', None, _('show which function each change is in')),
109 109 ('', 'reverse', None, _('produce a diff that undoes the changes')),
110 110 ('w', 'ignore-all-space', None,
111 111 _('ignore white space when comparing lines')),
112 112 ('b', 'ignore-space-change', None,
113 113 _('ignore changes in the amount of white space')),
114 114 ('B', 'ignore-blank-lines', None,
115 115 _('ignore changes whose lines are all blank')),
116 116 ('U', 'unified', '',
117 117 _('number of lines of context to show'), _('NUM')),
118 118 ('', 'stat', None, _('output diffstat-style summary of changes')),
119 119 ]
120 120
121 121 similarityopts = [
122 122 ('s', 'similarity', '',
123 123 _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
124 124 ]
125 125
126 126 subrepoopts = [
127 127 ('S', 'subrepos', None,
128 128 _('recurse into subrepositories'))
129 129 ]
130 130
131 131 # Commands start here, listed alphabetically
132 132
133 133 @command('^add',
134 134 walkopts + subrepoopts + dryrunopts,
135 135 _('[OPTION]... [FILE]...'))
136 136 def add(ui, repo, *pats, **opts):
137 137 """add the specified files on the next commit
138 138
139 139 Schedule files to be version controlled and added to the
140 140 repository.
141 141
142 142 The files will be added to the repository at the next commit. To
143 143 undo an add before that, see :hg:`forget`.
144 144
145 145 If no names are given, add all files to the repository.
146 146
147 147 .. container:: verbose
148 148
149 149 An example showing how new (unknown) files are added
150 150 automatically by :hg:`add`::
151 151
152 152 $ ls
153 153 foo.c
154 154 $ hg status
155 155 ? foo.c
156 156 $ hg add
157 157 adding foo.c
158 158 $ hg status
159 159 A foo.c
160 160
161 161 Returns 0 if all files are successfully added.
162 162 """
163 163
164 164 m = scmutil.match(repo, pats, opts)
165 165 rejected = cmdutil.add(ui, repo, m, opts.get('dry_run'),
166 166 opts.get('subrepos'), prefix="")
167 167 return rejected and 1 or 0
168 168
169 169 @command('addremove',
170 170 similarityopts + walkopts + dryrunopts,
171 171 _('[OPTION]... [FILE]...'))
172 172 def addremove(ui, repo, *pats, **opts):
173 173 """add all new files, delete all missing files
174 174
175 175 Add all new files and remove all missing files from the
176 176 repository.
177 177
178 178 New files are ignored if they match any of the patterns in
179 179 ``.hgignore``. As with add, these changes take effect at the next
180 180 commit.
181 181
182 182 Use the -s/--similarity option to detect renamed files. With a
183 183 parameter greater than 0, this compares every removed file with
184 184 every added file and records those similar enough as renames. This
185 185 option takes a percentage between 0 (disabled) and 100 (files must
186 186 be identical) as its parameter. Detecting renamed files this way
187 187 can be expensive. After using this option, :hg:`status -C` can be
188 188 used to check which files were identified as moved or renamed.
189 189
190 190 Returns 0 if all files are successfully added.
191 191 """
192 192 try:
193 193 sim = float(opts.get('similarity') or 100)
194 194 except ValueError:
195 195 raise util.Abort(_('similarity must be a number'))
196 196 if sim < 0 or sim > 100:
197 197 raise util.Abort(_('similarity must be between 0 and 100'))
198 198 return scmutil.addremove(repo, pats, opts, similarity=sim / 100.0)
199 199
200 200 @command('^annotate|blame',
201 201 [('r', 'rev', '', _('annotate the specified revision'), _('REV')),
202 202 ('', 'follow', None,
203 203 _('follow copies/renames and list the filename (DEPRECATED)')),
204 204 ('', 'no-follow', None, _("don't follow copies and renames")),
205 205 ('a', 'text', None, _('treat all files as text')),
206 206 ('u', 'user', None, _('list the author (long with -v)')),
207 207 ('f', 'file', None, _('list the filename')),
208 208 ('d', 'date', None, _('list the date (short with -q)')),
209 209 ('n', 'number', None, _('list the revision number (default)')),
210 210 ('c', 'changeset', None, _('list the changeset')),
211 211 ('l', 'line-number', None, _('show line number at the first appearance'))
212 212 ] + walkopts,
213 213 _('[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...'))
214 214 def annotate(ui, repo, *pats, **opts):
215 215 """show changeset information by line for each file
216 216
217 217 List changes in files, showing the revision id responsible for
218 218 each line
219 219
220 220 This command is useful for discovering when a change was made and
221 221 by whom.
222 222
223 223 Without the -a/--text option, annotate will avoid processing files
224 224 it detects as binary. With -a, annotate will annotate the file
225 225 anyway, although the results will probably be neither useful
226 226 nor desirable.
227 227
228 228 Returns 0 on success.
229 229 """
230 230 if opts.get('follow'):
231 231 # --follow is deprecated and now just an alias for -f/--file
232 232 # to mimic the behavior of Mercurial before version 1.5
233 233 opts['file'] = True
234 234
235 235 datefunc = ui.quiet and util.shortdate or util.datestr
236 236 getdate = util.cachefunc(lambda x: datefunc(x[0].date()))
237 237
238 238 if not pats:
239 239 raise util.Abort(_('at least one filename or pattern is required'))
240 240
241 241 opmap = [('user', ' ', lambda x: ui.shortuser(x[0].user())),
242 242 ('number', ' ', lambda x: str(x[0].rev())),
243 243 ('changeset', ' ', lambda x: short(x[0].node())),
244 244 ('date', ' ', getdate),
245 245 ('file', ' ', lambda x: x[0].path()),
246 246 ('line_number', ':', lambda x: str(x[1])),
247 247 ]
248 248
249 249 if (not opts.get('user') and not opts.get('changeset')
250 250 and not opts.get('date') and not opts.get('file')):
251 251 opts['number'] = True
252 252
253 253 linenumber = opts.get('line_number') is not None
254 254 if linenumber and (not opts.get('changeset')) and (not opts.get('number')):
255 255 raise util.Abort(_('at least one of -n/-c is required for -l'))
256 256
257 257 funcmap = [(func, sep) for op, sep, func in opmap if opts.get(op)]
258 258 funcmap[0] = (funcmap[0][0], '') # no separator in front of first column
259 259
260 260 def bad(x, y):
261 261 raise util.Abort("%s: %s" % (x, y))
262 262
263 263 ctx = scmutil.revsingle(repo, opts.get('rev'))
264 264 m = scmutil.match(repo, pats, opts)
265 265 m.bad = bad
266 266 follow = not opts.get('no_follow')
267 267 for abs in ctx.walk(m):
268 268 fctx = ctx[abs]
269 269 if not opts.get('text') and util.binary(fctx.data()):
270 270 ui.write(_("%s: binary file\n") % ((pats and m.rel(abs)) or abs))
271 271 continue
272 272
273 273 lines = fctx.annotate(follow=follow, linenumber=linenumber)
274 274 pieces = []
275 275
276 276 for f, sep in funcmap:
277 277 l = [f(n) for n, dummy in lines]
278 278 if l:
279 279 sized = [(x, encoding.colwidth(x)) for x in l]
280 280 ml = max([w for x, w in sized])
281 281 pieces.append(["%s%s%s" % (sep, ' ' * (ml - w), x)
282 282 for x, w in sized])
283 283
284 284 if pieces:
285 285 for p, l in zip(zip(*pieces), lines):
286 286 ui.write("%s: %s" % ("".join(p), l[1]))
287 287
288 288 @command('archive',
289 289 [('', 'no-decode', None, _('do not pass files through decoders')),
290 290 ('p', 'prefix', '', _('directory prefix for files in archive'),
291 291 _('PREFIX')),
292 292 ('r', 'rev', '', _('revision to distribute'), _('REV')),
293 293 ('t', 'type', '', _('type of distribution to create'), _('TYPE')),
294 294 ] + subrepoopts + walkopts,
295 295 _('[OPTION]... DEST'))
296 296 def archive(ui, repo, dest, **opts):
297 297 '''create an unversioned archive of a repository revision
298 298
299 299 By default, the revision used is the parent of the working
300 300 directory; use -r/--rev to specify a different revision.
301 301
302 302 The archive type is automatically detected based on file
303 303 extension (or override using -t/--type).
304 304
305 305 Valid types are:
306 306
307 307 :``files``: a directory full of files (default)
308 308 :``tar``: tar archive, uncompressed
309 309 :``tbz2``: tar archive, compressed using bzip2
310 310 :``tgz``: tar archive, compressed using gzip
311 311 :``uzip``: zip archive, uncompressed
312 312 :``zip``: zip archive, compressed using deflate
313 313
314 314 The exact name of the destination archive or directory is given
315 315 using a format string; see :hg:`help export` for details.
316 316
317 317 Each member added to an archive file has a directory prefix
318 318 prepended. Use -p/--prefix to specify a format string for the
319 319 prefix. The default is the basename of the archive, with suffixes
320 320 removed.
321 321
322 322 Returns 0 on success.
323 323 '''
324 324
325 325 ctx = scmutil.revsingle(repo, opts.get('rev'))
326 326 if not ctx:
327 327 raise util.Abort(_('no working directory: please specify a revision'))
328 328 node = ctx.node()
329 329 dest = cmdutil.makefilename(repo, dest, node)
330 330 if os.path.realpath(dest) == repo.root:
331 331 raise util.Abort(_('repository root cannot be destination'))
332 332
333 333 kind = opts.get('type') or archival.guesskind(dest) or 'files'
334 334 prefix = opts.get('prefix')
335 335
336 336 if dest == '-':
337 337 if kind == 'files':
338 338 raise util.Abort(_('cannot archive plain files to stdout'))
339 339 dest = sys.stdout
340 340 if not prefix:
341 341 prefix = os.path.basename(repo.root) + '-%h'
342 342
343 343 prefix = cmdutil.makefilename(repo, prefix, node)
344 344 matchfn = scmutil.match(repo, [], opts)
345 345 archival.archive(repo, dest, node, kind, not opts.get('no_decode'),
346 346 matchfn, prefix, subrepos=opts.get('subrepos'))
347 347
348 348 @command('backout',
349 349 [('', 'merge', None, _('merge with old dirstate parent after backout')),
350 350 ('', 'parent', '', _('parent to choose when backing out merge'), _('REV')),
351 351 ('t', 'tool', '', _('specify merge tool')),
352 352 ('r', 'rev', '', _('revision to backout'), _('REV')),
353 353 ] + walkopts + commitopts + commitopts2,
354 354 _('[OPTION]... [-r] REV'))
355 355 def backout(ui, repo, node=None, rev=None, **opts):
356 356 '''reverse effect of earlier changeset
357 357
358 358 Prepare a new changeset with the effect of REV undone in the
359 359 current working directory.
360 360
361 361 If REV is the parent of the working directory, then this new changeset
362 362 is committed automatically. Otherwise, hg needs to merge the
363 363 changes and the merged result is left uncommitted.
364 364
365 365 By default, the pending changeset will have one parent,
366 366 maintaining a linear history. With --merge, the pending changeset
367 367 will instead have two parents: the old parent of the working
368 368 directory and a new child of REV that simply undoes REV.
369 369
370 370 Before version 1.7, the behavior without --merge was equivalent to
371 371 specifying --merge followed by :hg:`update --clean .` to cancel
372 372 the merge and leave the child of REV as a head to be merged
373 373 separately.
374 374
375 375 See :hg:`help dates` for a list of formats valid for -d/--date.
376 376
377 377 Returns 0 on success.
378 378 '''
379 379 if rev and node:
380 380 raise util.Abort(_("please specify just one revision"))
381 381
382 382 if not rev:
383 383 rev = node
384 384
385 385 if not rev:
386 386 raise util.Abort(_("please specify a revision to backout"))
387 387
388 388 date = opts.get('date')
389 389 if date:
390 390 opts['date'] = util.parsedate(date)
391 391
392 392 cmdutil.bailifchanged(repo)
393 393 node = scmutil.revsingle(repo, rev).node()
394 394
395 395 op1, op2 = repo.dirstate.parents()
396 396 a = repo.changelog.ancestor(op1, node)
397 397 if a != node:
398 398 raise util.Abort(_('cannot backout change on a different branch'))
399 399
400 400 p1, p2 = repo.changelog.parents(node)
401 401 if p1 == nullid:
402 402 raise util.Abort(_('cannot backout a change with no parents'))
403 403 if p2 != nullid:
404 404 if not opts.get('parent'):
405 405 raise util.Abort(_('cannot backout a merge changeset without '
406 406 '--parent'))
407 407 p = repo.lookup(opts['parent'])
408 408 if p not in (p1, p2):
409 409 raise util.Abort(_('%s is not a parent of %s') %
410 410 (short(p), short(node)))
411 411 parent = p
412 412 else:
413 413 if opts.get('parent'):
414 414 raise util.Abort(_('cannot use --parent on non-merge changeset'))
415 415 parent = p1
416 416
417 417 # the backout should appear on the same branch
418 418 branch = repo.dirstate.branch()
419 419 hg.clean(repo, node, show_stats=False)
420 420 repo.dirstate.setbranch(branch)
421 421 revert_opts = opts.copy()
422 422 revert_opts['date'] = None
423 423 revert_opts['all'] = True
424 424 revert_opts['rev'] = hex(parent)
425 425 revert_opts['no_backup'] = None
426 426 revert(ui, repo, **revert_opts)
427 427 if not opts.get('merge') and op1 != node:
428 428 try:
429 429 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
430 430 return hg.update(repo, op1)
431 431 finally:
432 432 ui.setconfig('ui', 'forcemerge', '')
433 433
434 434 commit_opts = opts.copy()
435 435 commit_opts['addremove'] = False
436 436 if not commit_opts['message'] and not commit_opts['logfile']:
437 437 # we don't translate commit messages
438 438 commit_opts['message'] = "Backed out changeset %s" % short(node)
439 439 commit_opts['force_editor'] = True
440 440 commit(ui, repo, **commit_opts)
441 441 def nice(node):
442 442 return '%d:%s' % (repo.changelog.rev(node), short(node))
443 443 ui.status(_('changeset %s backs out changeset %s\n') %
444 444 (nice(repo.changelog.tip()), nice(node)))
445 445 if opts.get('merge') and op1 != node:
446 446 hg.clean(repo, op1, show_stats=False)
447 447 ui.status(_('merging with changeset %s\n')
448 448 % nice(repo.changelog.tip()))
449 449 try:
450 450 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
451 451 return hg.merge(repo, hex(repo.changelog.tip()))
452 452 finally:
453 453 ui.setconfig('ui', 'forcemerge', '')
454 454 return 0
455 455
456 456 @command('bisect',
457 457 [('r', 'reset', False, _('reset bisect state')),
458 458 ('g', 'good', False, _('mark changeset good')),
459 459 ('b', 'bad', False, _('mark changeset bad')),
460 460 ('s', 'skip', False, _('skip testing changeset')),
461 461 ('e', 'extend', False, _('extend the bisect range')),
462 462 ('c', 'command', '', _('use command to check changeset state'), _('CMD')),
463 463 ('U', 'noupdate', False, _('do not update to target'))],
464 464 _("[-gbsr] [-U] [-c CMD] [REV]"))
465 465 def bisect(ui, repo, rev=None, extra=None, command=None,
466 466 reset=None, good=None, bad=None, skip=None, extend=None,
467 467 noupdate=None):
468 468 """subdivision search of changesets
469 469
470 470 This command helps to find changesets which introduce problems. To
471 471 use, mark the earliest changeset you know exhibits the problem as
472 472 bad, then mark the latest changeset which is free from the problem
473 473 as good. Bisect will update your working directory to a revision
474 474 for testing (unless the -U/--noupdate option is specified). Once
475 475 you have performed tests, mark the working directory as good or
476 476 bad, and bisect will either update to another candidate changeset
477 477 or announce that it has found the bad revision.
478 478
479 479 As a shortcut, you can also use the revision argument to mark a
480 480 revision as good or bad without checking it out first.
481 481
482 482 If you supply a command, it will be used for automatic bisection.
483 483 Its exit status will be used to mark revisions as good or bad:
484 484 status 0 means good, 125 means to skip the revision, 127
485 485 (command not found) will abort the bisection, and any other
486 486 non-zero exit status means the revision is bad.
487 487
488 488 Returns 0 on success.
489 489 """
490 490 def extendbisectrange(nodes, good):
491 491 # bisect is incomplete when it ends on a merge node and
492 492 # one of the parent was not checked.
493 493 parents = repo[nodes[0]].parents()
494 494 if len(parents) > 1:
495 495 side = good and state['bad'] or state['good']
496 496 num = len(set(i.node() for i in parents) & set(side))
497 497 if num == 1:
498 498 return parents[0].ancestor(parents[1])
499 499 return None
500 500
501 501 def print_result(nodes, good):
502 502 displayer = cmdutil.show_changeset(ui, repo, {})
503 503 if len(nodes) == 1:
504 504 # narrowed it down to a single revision
505 505 if good:
506 506 ui.write(_("The first good revision is:\n"))
507 507 else:
508 508 ui.write(_("The first bad revision is:\n"))
509 509 displayer.show(repo[nodes[0]])
510 510 extendnode = extendbisectrange(nodes, good)
511 511 if extendnode is not None:
512 512 ui.write(_('Not all ancestors of this changeset have been'
513 513 ' checked.\nUse bisect --extend to continue the '
514 514 'bisection from\nthe common ancestor, %s.\n')
515 515 % extendnode)
516 516 else:
517 517 # multiple possible revisions
518 518 if good:
519 519 ui.write(_("Due to skipped revisions, the first "
520 520 "good revision could be any of:\n"))
521 521 else:
522 522 ui.write(_("Due to skipped revisions, the first "
523 523 "bad revision could be any of:\n"))
524 524 for n in nodes:
525 525 displayer.show(repo[n])
526 526 displayer.close()
527 527
528 528 def check_state(state, interactive=True):
529 529 if not state['good'] or not state['bad']:
530 530 if (good or bad or skip or reset) and interactive:
531 531 return
532 532 if not state['good']:
533 533 raise util.Abort(_('cannot bisect (no known good revisions)'))
534 534 else:
535 535 raise util.Abort(_('cannot bisect (no known bad revisions)'))
536 536 return True
537 537
538 538 # backward compatibility
539 539 if rev in "good bad reset init".split():
540 540 ui.warn(_("(use of 'hg bisect <cmd>' is deprecated)\n"))
541 541 cmd, rev, extra = rev, extra, None
542 542 if cmd == "good":
543 543 good = True
544 544 elif cmd == "bad":
545 545 bad = True
546 546 else:
547 547 reset = True
548 548 elif extra or good + bad + skip + reset + extend + bool(command) > 1:
549 549 raise util.Abort(_('incompatible arguments'))
550 550
551 551 if reset:
552 552 p = repo.join("bisect.state")
553 553 if os.path.exists(p):
554 554 os.unlink(p)
555 555 return
556 556
557 557 state = hbisect.load_state(repo)
558 558
559 559 if command:
560 560 changesets = 1
561 561 try:
562 562 while changesets:
563 563 # update state
564 564 status = util.system(command)
565 565 if status == 125:
566 566 transition = "skip"
567 567 elif status == 0:
568 568 transition = "good"
569 569 # status < 0 means process was killed
570 570 elif status == 127:
571 571 raise util.Abort(_("failed to execute %s") % command)
572 572 elif status < 0:
573 573 raise util.Abort(_("%s killed") % command)
574 574 else:
575 575 transition = "bad"
576 576 ctx = scmutil.revsingle(repo, rev)
577 577 rev = None # clear for future iterations
578 578 state[transition].append(ctx.node())
579 579 ui.status(_('Changeset %d:%s: %s\n') % (ctx, ctx, transition))
580 580 check_state(state, interactive=False)
581 581 # bisect
582 582 nodes, changesets, good = hbisect.bisect(repo.changelog, state)
583 583 # update to next check
584 584 cmdutil.bailifchanged(repo)
585 585 hg.clean(repo, nodes[0], show_stats=False)
586 586 finally:
587 587 hbisect.save_state(repo, state)
588 588 print_result(nodes, good)
589 589 return
590 590
591 591 # update state
592 592
593 593 if rev:
594 594 nodes = [repo.lookup(i) for i in scmutil.revrange(repo, [rev])]
595 595 else:
596 596 nodes = [repo.lookup('.')]
597 597
598 598 if good or bad or skip:
599 599 if good:
600 600 state['good'] += nodes
601 601 elif bad:
602 602 state['bad'] += nodes
603 603 elif skip:
604 604 state['skip'] += nodes
605 605 hbisect.save_state(repo, state)
606 606
607 607 if not check_state(state):
608 608 return
609 609
610 610 # actually bisect
611 611 nodes, changesets, good = hbisect.bisect(repo.changelog, state)
612 612 if extend:
613 613 if not changesets:
614 614 extendnode = extendbisectrange(nodes, good)
615 615 if extendnode is not None:
616 616 ui.write(_("Extending search to changeset %d:%s\n"
617 617 % (extendnode.rev(), extendnode)))
618 618 if noupdate:
619 619 return
620 620 cmdutil.bailifchanged(repo)
621 621 return hg.clean(repo, extendnode.node())
622 622 raise util.Abort(_("nothing to extend"))
623 623
624 624 if changesets == 0:
625 625 print_result(nodes, good)
626 626 else:
627 627 assert len(nodes) == 1 # only a single node can be tested next
628 628 node = nodes[0]
629 629 # compute the approximate number of remaining tests
630 630 tests, size = 0, 2
631 631 while size <= changesets:
632 632 tests, size = tests + 1, size * 2
633 633 rev = repo.changelog.rev(node)
634 634 ui.write(_("Testing changeset %d:%s "
635 635 "(%d changesets remaining, ~%d tests)\n")
636 636 % (rev, short(node), changesets, tests))
637 637 if not noupdate:
638 638 cmdutil.bailifchanged(repo)
639 639 return hg.clean(repo, node)
640 640
641 641 @command('bookmarks',
642 642 [('f', 'force', False, _('force')),
643 643 ('r', 'rev', '', _('revision'), _('REV')),
644 644 ('d', 'delete', False, _('delete a given bookmark')),
645 645 ('m', 'rename', '', _('rename a given bookmark'), _('NAME')),
646 646 ('i', 'inactive', False, _('do not mark a new bookmark active'))],
647 647 _('hg bookmarks [-f] [-d] [-i] [-m NAME] [-r REV] [NAME]'))
648 648 def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False,
649 649 rename=None, inactive=False):
650 650 '''track a line of development with movable markers
651 651
652 652 Bookmarks are pointers to certain commits that move when
653 653 committing. Bookmarks are local. They can be renamed, copied and
654 654 deleted. It is possible to use bookmark names in :hg:`merge` and
655 655 :hg:`update` to merge and update respectively to a given bookmark.
656 656
657 657 You can use :hg:`bookmark NAME` to set a bookmark on the working
658 658 directory's parent revision with the given name. If you specify
659 659 a revision using -r REV (where REV may be an existing bookmark),
660 660 the bookmark is assigned to that revision.
661 661
662 662 Bookmarks can be pushed and pulled between repositories (see :hg:`help
663 663 push` and :hg:`help pull`). This requires both the local and remote
664 664 repositories to support bookmarks. For versions prior to 1.8, this means
665 665 the bookmarks extension must be enabled.
666 666 '''
667 667 hexfn = ui.debugflag and hex or short
668 668 marks = repo._bookmarks
669 669 cur = repo.changectx('.').node()
670 670
671 671 if rename:
672 672 if rename not in marks:
673 673 raise util.Abort(_("bookmark '%s' does not exist") % rename)
674 674 if mark in marks and not force:
675 675 raise util.Abort(_("bookmark '%s' already exists "
676 676 "(use -f to force)") % mark)
677 677 if mark is None:
678 678 raise util.Abort(_("new bookmark name required"))
679 679 marks[mark] = marks[rename]
680 680 if repo._bookmarkcurrent == rename and not inactive:
681 681 bookmarks.setcurrent(repo, mark)
682 682 del marks[rename]
683 683 bookmarks.write(repo)
684 684 return
685 685
686 686 if delete:
687 687 if mark is None:
688 688 raise util.Abort(_("bookmark name required"))
689 689 if mark not in marks:
690 690 raise util.Abort(_("bookmark '%s' does not exist") % mark)
691 691 if mark == repo._bookmarkcurrent:
692 692 bookmarks.setcurrent(repo, None)
693 693 del marks[mark]
694 694 bookmarks.write(repo)
695 695 return
696 696
697 697 if mark is not None:
698 698 if "\n" in mark:
699 699 raise util.Abort(_("bookmark name cannot contain newlines"))
700 700 mark = mark.strip()
701 701 if not mark:
702 702 raise util.Abort(_("bookmark names cannot consist entirely of "
703 703 "whitespace"))
704 704 if inactive and mark == repo._bookmarkcurrent:
705 705 bookmarks.setcurrent(repo, None)
706 706 return
707 707 if mark in marks and not force:
708 708 raise util.Abort(_("bookmark '%s' already exists "
709 709 "(use -f to force)") % mark)
710 710 if ((mark in repo.branchtags() or mark == repo.dirstate.branch())
711 711 and not force):
712 712 raise util.Abort(
713 713 _("a bookmark cannot have the name of an existing branch"))
714 714 if rev:
715 715 marks[mark] = repo.lookup(rev)
716 716 else:
717 717 marks[mark] = repo.changectx('.').node()
718 718 if not inactive and repo.changectx('.').node() == marks[mark]:
719 719 bookmarks.setcurrent(repo, mark)
720 720 bookmarks.write(repo)
721 721 return
722 722
723 723 if mark is None:
724 724 if rev:
725 725 raise util.Abort(_("bookmark name required"))
726 726 if len(marks) == 0:
727 727 ui.status(_("no bookmarks set\n"))
728 728 else:
729 729 for bmark, n in sorted(marks.iteritems()):
730 730 current = repo._bookmarkcurrent
731 731 if bmark == current and n == cur:
732 732 prefix, label = '*', 'bookmarks.current'
733 733 else:
734 734 prefix, label = ' ', ''
735 735
736 736 if ui.quiet:
737 737 ui.write("%s\n" % bmark, label=label)
738 738 else:
739 739 ui.write(" %s %-25s %d:%s\n" % (
740 740 prefix, bmark, repo.changelog.rev(n), hexfn(n)),
741 741 label=label)
742 742 return
743 743
744 744 @command('branch',
745 745 [('f', 'force', None,
746 746 _('set branch name even if it shadows an existing branch')),
747 747 ('C', 'clean', None, _('reset branch name to parent branch name'))],
748 748 _('[-fC] [NAME]'))
749 749 def branch(ui, repo, label=None, **opts):
750 750 """set or show the current branch name
751 751
752 752 With no argument, show the current branch name. With one argument,
753 753 set the working directory branch name (the branch will not exist
754 754 in the repository until the next commit). Standard practice
755 755 recommends that primary development take place on the 'default'
756 756 branch.
757 757
758 758 Unless -f/--force is specified, branch will not let you set a
759 759 branch name that already exists, even if it's inactive.
760 760
761 761 Use -C/--clean to reset the working directory branch to that of
762 762 the parent of the working directory, negating a previous branch
763 763 change.
764 764
765 765 Use the command :hg:`update` to switch to an existing branch. Use
766 766 :hg:`commit --close-branch` to mark this branch as closed.
767 767
768 768 Returns 0 on success.
769 769 """
770 770
771 771 if opts.get('clean'):
772 772 label = repo[None].p1().branch()
773 773 repo.dirstate.setbranch(label)
774 774 ui.status(_('reset working directory to branch %s\n') % label)
775 775 elif label:
776 776 if not opts.get('force') and label in repo.branchtags():
777 777 if label not in [p.branch() for p in repo.parents()]:
778 778 raise util.Abort(_('a branch of the same name already exists'),
779 779 # i18n: "it" refers to an existing branch
780 780 hint=_("use 'hg update' to switch to it"))
781 781 repo.dirstate.setbranch(label)
782 782 ui.status(_('marked working directory as branch %s\n') % label)
783 783 else:
784 784 ui.write("%s\n" % repo.dirstate.branch())
785 785
786 786 @command('branches',
787 787 [('a', 'active', False, _('show only branches that have unmerged heads')),
788 788 ('c', 'closed', False, _('show normal and closed branches'))],
789 789 _('[-ac]'))
790 790 def branches(ui, repo, active=False, closed=False):
791 791 """list repository named branches
792 792
793 793 List the repository's named branches, indicating which ones are
794 794 inactive. If -c/--closed is specified, also list branches which have
795 795 been marked closed (see :hg:`commit --close-branch`).
796 796
797 797 If -a/--active is specified, only show active branches. A branch
798 798 is considered active if it contains repository heads.
799 799
800 800 Use the command :hg:`update` to switch to an existing branch.
801 801
802 802 Returns 0.
803 803 """
804 804
805 805 hexfunc = ui.debugflag and hex or short
806 806 activebranches = [repo[n].branch() for n in repo.heads()]
807 807 def testactive(tag, node):
808 808 realhead = tag in activebranches
809 809 open = node in repo.branchheads(tag, closed=False)
810 810 return realhead and open
811 811 branches = sorted([(testactive(tag, node), repo.changelog.rev(node), tag)
812 812 for tag, node in repo.branchtags().items()],
813 813 reverse=True)
814 814
815 815 for isactive, node, tag in branches:
816 816 if (not active) or isactive:
817 817 if ui.quiet:
818 818 ui.write("%s\n" % tag)
819 819 else:
820 820 hn = repo.lookup(node)
821 821 if isactive:
822 822 label = 'branches.active'
823 823 notice = ''
824 824 elif hn not in repo.branchheads(tag, closed=False):
825 825 if not closed:
826 826 continue
827 827 label = 'branches.closed'
828 828 notice = _(' (closed)')
829 829 else:
830 830 label = 'branches.inactive'
831 831 notice = _(' (inactive)')
832 832 if tag == repo.dirstate.branch():
833 833 label = 'branches.current'
834 834 rev = str(node).rjust(31 - encoding.colwidth(tag))
835 835 rev = ui.label('%s:%s' % (rev, hexfunc(hn)), 'log.changeset')
836 836 tag = ui.label(tag, label)
837 837 ui.write("%s %s%s\n" % (tag, rev, notice))
838 838
839 839 @command('bundle',
840 840 [('f', 'force', None, _('run even when the destination is unrelated')),
841 841 ('r', 'rev', [], _('a changeset intended to be added to the destination'),
842 842 _('REV')),
843 843 ('b', 'branch', [], _('a specific branch you would like to bundle'),
844 844 _('BRANCH')),
845 845 ('', 'base', [],
846 846 _('a base changeset assumed to be available at the destination'),
847 847 _('REV')),
848 848 ('a', 'all', None, _('bundle all changesets in the repository')),
849 849 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE')),
850 850 ] + remoteopts,
851 851 _('[-f] [-t TYPE] [-a] [-r REV]... [--base REV]... FILE [DEST]'))
852 852 def bundle(ui, repo, fname, dest=None, **opts):
853 853 """create a changegroup file
854 854
855 855 Generate a compressed changegroup file collecting changesets not
856 856 known to be in another repository.
857 857
858 858 If you omit the destination repository, then hg assumes the
859 859 destination will have all the nodes you specify with --base
860 860 parameters. To create a bundle containing all changesets, use
861 861 -a/--all (or --base null).
862 862
863 863 You can change compression method with the -t/--type option.
864 864 The available compression methods are: none, bzip2, and
865 865 gzip (by default, bundles are compressed using bzip2).
866 866
867 867 The bundle file can then be transferred using conventional means
868 868 and applied to another repository with the unbundle or pull
869 869 command. This is useful when direct push and pull are not
870 870 available or when exporting an entire repository is undesirable.
871 871
872 872 Applying bundles preserves all changeset contents including
873 873 permissions, copy/rename information, and revision history.
874 874
875 875 Returns 0 on success, 1 if no changes found.
876 876 """
877 877 revs = None
878 878 if 'rev' in opts:
879 879 revs = scmutil.revrange(repo, opts['rev'])
880 880
881 881 if opts.get('all'):
882 882 base = ['null']
883 883 else:
884 884 base = scmutil.revrange(repo, opts.get('base'))
885 885 if base:
886 886 if dest:
887 887 raise util.Abort(_("--base is incompatible with specifying "
888 888 "a destination"))
889 889 common = [repo.lookup(rev) for rev in base]
890 890 heads = revs and map(repo.lookup, revs) or revs
891 891 else:
892 892 dest = ui.expandpath(dest or 'default-push', dest or 'default')
893 893 dest, branches = hg.parseurl(dest, opts.get('branch'))
894 894 other = hg.repository(hg.remoteui(repo, opts), dest)
895 895 revs, checkout = hg.addbranchrevs(repo, other, branches, revs)
896 896 heads = revs and map(repo.lookup, revs) or revs
897 897 common, outheads = discovery.findcommonoutgoing(repo, other,
898 898 onlyheads=heads,
899 899 force=opts.get('force'))
900 900
901 901 cg = repo.getbundle('bundle', common=common, heads=heads)
902 902 if not cg:
903 903 ui.status(_("no changes found\n"))
904 904 return 1
905 905
906 906 bundletype = opts.get('type', 'bzip2').lower()
907 907 btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'}
908 908 bundletype = btypes.get(bundletype)
909 909 if bundletype not in changegroup.bundletypes:
910 910 raise util.Abort(_('unknown bundle type specified with --type'))
911 911
912 912 changegroup.writebundle(cg, fname, bundletype)
913 913
914 914 @command('cat',
915 915 [('o', 'output', '',
916 916 _('print output to file with formatted name'), _('FORMAT')),
917 917 ('r', 'rev', '', _('print the given revision'), _('REV')),
918 918 ('', 'decode', None, _('apply any matching decode filter')),
919 919 ] + walkopts,
920 920 _('[OPTION]... FILE...'))
921 921 def cat(ui, repo, file1, *pats, **opts):
922 922 """output the current or given revision of files
923 923
924 924 Print the specified files as they were at the given revision. If
925 925 no revision is given, the parent of the working directory is used,
926 926 or tip if no revision is checked out.
927 927
928 928 Output may be to a file, in which case the name of the file is
929 929 given using a format string. The formatting rules are the same as
930 930 for the export command, with the following additions:
931 931
932 932 :``%s``: basename of file being printed
933 933 :``%d``: dirname of file being printed, or '.' if in repository root
934 934 :``%p``: root-relative path name of file being printed
935 935
936 936 Returns 0 on success.
937 937 """
938 938 ctx = scmutil.revsingle(repo, opts.get('rev'))
939 939 err = 1
940 940 m = scmutil.match(repo, (file1,) + pats, opts)
941 941 for abs in ctx.walk(m):
942 942 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
943 943 pathname=abs)
944 944 data = ctx[abs].data()
945 945 if opts.get('decode'):
946 946 data = repo.wwritedata(abs, data)
947 947 fp.write(data)
948 948 fp.close()
949 949 err = 0
950 950 return err
951 951
952 952 @command('^clone',
953 953 [('U', 'noupdate', None,
954 954 _('the clone will include an empty working copy (only a repository)')),
955 955 ('u', 'updaterev', '', _('revision, tag or branch to check out'), _('REV')),
956 956 ('r', 'rev', [], _('include the specified changeset'), _('REV')),
957 957 ('b', 'branch', [], _('clone only the specified branch'), _('BRANCH')),
958 958 ('', 'pull', None, _('use pull protocol to copy metadata')),
959 959 ('', 'uncompressed', None, _('use uncompressed transfer (fast over LAN)')),
960 960 ] + remoteopts,
961 961 _('[OPTION]... SOURCE [DEST]'))
962 962 def clone(ui, source, dest=None, **opts):
963 963 """make a copy of an existing repository
964 964
965 965 Create a copy of an existing repository in a new directory.
966 966
967 967 If no destination directory name is specified, it defaults to the
968 968 basename of the source.
969 969
970 970 The location of the source is added to the new repository's
971 971 ``.hg/hgrc`` file, as the default to be used for future pulls.
972 972
973 973 See :hg:`help urls` for valid source format details.
974 974
975 975 It is possible to specify an ``ssh://`` URL as the destination, but no
976 976 ``.hg/hgrc`` and working directory will be created on the remote side.
977 977 Please see :hg:`help urls` for important details about ``ssh://`` URLs.
978 978
979 979 A set of changesets (tags, or branch names) to pull may be specified
980 980 by listing each changeset (tag, or branch name) with -r/--rev.
981 981 If -r/--rev is used, the cloned repository will contain only a subset
982 982 of the changesets of the source repository. Only the set of changesets
983 983 defined by all -r/--rev options (including all their ancestors)
984 984 will be pulled into the destination repository.
985 985 No subsequent changesets (including subsequent tags) will be present
986 986 in the destination.
987 987
988 988 Using -r/--rev (or 'clone src#rev dest') implies --pull, even for
989 989 local source repositories.
990 990
991 991 For efficiency, hardlinks are used for cloning whenever the source
992 992 and destination are on the same filesystem (note this applies only
993 993 to the repository data, not to the working directory). Some
994 994 filesystems, such as AFS, implement hardlinking incorrectly, but
995 995 do not report errors. In these cases, use the --pull option to
996 996 avoid hardlinking.
997 997
998 998 In some cases, you can clone repositories and the working directory
999 999 using full hardlinks with ::
1000 1000
1001 1001 $ cp -al REPO REPOCLONE
1002 1002
1003 1003 This is the fastest way to clone, but it is not always safe. The
1004 1004 operation is not atomic (making sure REPO is not modified during
1005 1005 the operation is up to you) and you have to make sure your editor
1006 1006 breaks hardlinks (Emacs and most Linux Kernel tools do so). Also,
1007 1007 this is not compatible with certain extensions that place their
1008 1008 metadata under the .hg directory, such as mq.
1009 1009
1010 1010 Mercurial will update the working directory to the first applicable
1011 1011 revision from this list:
1012 1012
1013 1013 a) null if -U or the source repository has no changesets
1014 1014 b) if -u . and the source repository is local, the first parent of
1015 1015 the source repository's working directory
1016 1016 c) the changeset specified with -u (if a branch name, this means the
1017 1017 latest head of that branch)
1018 1018 d) the changeset specified with -r
1019 1019 e) the tipmost head specified with -b
1020 1020 f) the tipmost head specified with the url#branch source syntax
1021 1021 g) the tipmost head of the default branch
1022 1022 h) tip
1023 1023
1024 1024 Returns 0 on success.
1025 1025 """
1026 1026 if opts.get('noupdate') and opts.get('updaterev'):
1027 1027 raise util.Abort(_("cannot specify both --noupdate and --updaterev"))
1028 1028
1029 1029 r = hg.clone(hg.remoteui(ui, opts), source, dest,
1030 1030 pull=opts.get('pull'),
1031 1031 stream=opts.get('uncompressed'),
1032 1032 rev=opts.get('rev'),
1033 1033 update=opts.get('updaterev') or not opts.get('noupdate'),
1034 1034 branch=opts.get('branch'))
1035 1035
1036 1036 return r is None
1037 1037
1038 1038 @command('^commit|ci',
1039 1039 [('A', 'addremove', None,
1040 1040 _('mark new/missing files as added/removed before committing')),
1041 1041 ('', 'close-branch', None,
1042 1042 _('mark a branch as closed, hiding it from the branch list')),
1043 1043 ] + walkopts + commitopts + commitopts2,
1044 1044 _('[OPTION]... [FILE]...'))
1045 1045 def commit(ui, repo, *pats, **opts):
1046 1046 """commit the specified files or all outstanding changes
1047 1047
1048 1048 Commit changes to the given files into the repository. Unlike a
1049 1049 centralized SCM, this operation is a local operation. See
1050 1050 :hg:`push` for a way to actively distribute your changes.
1051 1051
1052 1052 If a list of files is omitted, all changes reported by :hg:`status`
1053 1053 will be committed.
1054 1054
1055 1055 If you are committing the result of a merge, do not provide any
1056 1056 filenames or -I/-X filters.
1057 1057
1058 1058 If no commit message is specified, Mercurial starts your
1059 1059 configured editor where you can enter a message. In case your
1060 1060 commit fails, you will find a backup of your message in
1061 1061 ``.hg/last-message.txt``.
1062 1062
1063 1063 See :hg:`help dates` for a list of formats valid for -d/--date.
1064 1064
1065 1065 Returns 0 on success, 1 if nothing changed.
1066 1066 """
1067 1067 extra = {}
1068 1068 if opts.get('close_branch'):
1069 1069 if repo['.'].node() not in repo.branchheads():
1070 1070 # The topo heads set is included in the branch heads set of the
1071 1071 # current branch, so it's sufficient to test branchheads
1072 1072 raise util.Abort(_('can only close branch heads'))
1073 1073 extra['close'] = 1
1074 1074 e = cmdutil.commiteditor
1075 1075 if opts.get('force_editor'):
1076 1076 e = cmdutil.commitforceeditor
1077 1077
1078 1078 def commitfunc(ui, repo, message, match, opts):
1079 1079 return repo.commit(message, opts.get('user'), opts.get('date'), match,
1080 1080 editor=e, extra=extra)
1081 1081
1082 1082 branch = repo[None].branch()
1083 1083 bheads = repo.branchheads(branch)
1084 1084
1085 1085 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
1086 1086 if not node:
1087 1087 stat = repo.status(match=scmutil.match(repo, pats, opts))
1088 1088 if stat[3]:
1089 1089 ui.status(_("nothing changed (%d missing files, see 'hg status')\n")
1090 1090 % len(stat[3]))
1091 1091 else:
1092 1092 ui.status(_("nothing changed\n"))
1093 1093 return 1
1094 1094
1095 1095 ctx = repo[node]
1096 1096 parents = ctx.parents()
1097 1097
1098 1098 if bheads and not [x for x in parents
1099 1099 if x.node() in bheads and x.branch() == branch]:
1100 1100 ui.status(_('created new head\n'))
1101 1101 # The message is not printed for initial roots. For the other
1102 1102 # changesets, it is printed in the following situations:
1103 1103 #
1104 1104 # Par column: for the 2 parents with ...
1105 1105 # N: null or no parent
1106 1106 # B: parent is on another named branch
1107 1107 # C: parent is a regular non head changeset
1108 1108 # H: parent was a branch head of the current branch
1109 1109 # Msg column: whether we print "created new head" message
1110 1110 # In the following, it is assumed that there already exists some
1111 1111 # initial branch heads of the current branch, otherwise nothing is
1112 1112 # printed anyway.
1113 1113 #
1114 1114 # Par Msg Comment
1115 1115 # NN y additional topo root
1116 1116 #
1117 1117 # BN y additional branch root
1118 1118 # CN y additional topo head
1119 1119 # HN n usual case
1120 1120 #
1121 1121 # BB y weird additional branch root
1122 1122 # CB y branch merge
1123 1123 # HB n merge with named branch
1124 1124 #
1125 1125 # CC y additional head from merge
1126 1126 # CH n merge with a head
1127 1127 #
1128 1128 # HH n head merge: head count decreases
1129 1129
1130 1130 if not opts.get('close_branch'):
1131 1131 for r in parents:
1132 1132 if r.extra().get('close') and r.branch() == branch:
1133 1133 ui.status(_('reopening closed branch head %d\n') % r)
1134 1134
1135 1135 if ui.debugflag:
1136 1136 ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
1137 1137 elif ui.verbose:
1138 1138 ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
1139 1139
1140 1140 @command('copy|cp',
1141 1141 [('A', 'after', None, _('record a copy that has already occurred')),
1142 1142 ('f', 'force', None, _('forcibly copy over an existing managed file')),
1143 1143 ] + walkopts + dryrunopts,
1144 1144 _('[OPTION]... [SOURCE]... DEST'))
1145 1145 def copy(ui, repo, *pats, **opts):
1146 1146 """mark files as copied for the next commit
1147 1147
1148 1148 Mark dest as having copies of source files. If dest is a
1149 1149 directory, copies are put in that directory. If dest is a file,
1150 1150 the source must be a single file.
1151 1151
1152 1152 By default, this command copies the contents of files as they
1153 1153 exist in the working directory. If invoked with -A/--after, the
1154 1154 operation is recorded, but no copying is performed.
1155 1155
1156 1156 This command takes effect with the next commit. To undo a copy
1157 1157 before that, see :hg:`revert`.
1158 1158
1159 1159 Returns 0 on success, 1 if errors are encountered.
1160 1160 """
1161 1161 wlock = repo.wlock(False)
1162 1162 try:
1163 1163 return cmdutil.copy(ui, repo, pats, opts)
1164 1164 finally:
1165 1165 wlock.release()
1166 1166
1167 1167 @command('debugancestor', [], _('[INDEX] REV1 REV2'))
1168 1168 def debugancestor(ui, repo, *args):
1169 1169 """find the ancestor revision of two revisions in a given index"""
1170 1170 if len(args) == 3:
1171 1171 index, rev1, rev2 = args
1172 1172 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), index)
1173 1173 lookup = r.lookup
1174 1174 elif len(args) == 2:
1175 1175 if not repo:
1176 1176 raise util.Abort(_("there is no Mercurial repository here "
1177 1177 "(.hg not found)"))
1178 1178 rev1, rev2 = args
1179 1179 r = repo.changelog
1180 1180 lookup = repo.lookup
1181 1181 else:
1182 1182 raise util.Abort(_('either two or three arguments required'))
1183 1183 a = r.ancestor(lookup(rev1), lookup(rev2))
1184 1184 ui.write("%d:%s\n" % (r.rev(a), hex(a)))
1185 1185
1186 1186 @command('debugbuilddag',
1187 1187 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
1188 1188 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
1189 1189 ('n', 'new-file', None, _('add new file at each rev'))],
1190 1190 _('[OPTION]... [TEXT]'))
1191 1191 def debugbuilddag(ui, repo, text=None,
1192 1192 mergeable_file=False,
1193 1193 overwritten_file=False,
1194 1194 new_file=False):
1195 1195 """builds a repo with a given DAG from scratch in the current empty repo
1196 1196
1197 1197 The description of the DAG is read from stdin if not given on the
1198 1198 command line.
1199 1199
1200 1200 Elements:
1201 1201
1202 1202 - "+n" is a linear run of n nodes based on the current default parent
1203 1203 - "." is a single node based on the current default parent
1204 1204 - "$" resets the default parent to null (implied at the start);
1205 1205 otherwise the default parent is always the last node created
1206 1206 - "<p" sets the default parent to the backref p
1207 1207 - "*p" is a fork at parent p, which is a backref
1208 1208 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
1209 1209 - "/p2" is a merge of the preceding node and p2
1210 1210 - ":tag" defines a local tag for the preceding node
1211 1211 - "@branch" sets the named branch for subsequent nodes
1212 1212 - "#...\\n" is a comment up to the end of the line
1213 1213
1214 1214 Whitespace between the above elements is ignored.
1215 1215
1216 1216 A backref is either
1217 1217
1218 1218 - a number n, which references the node curr-n, where curr is the current
1219 1219 node, or
1220 1220 - the name of a local tag you placed earlier using ":tag", or
1221 1221 - empty to denote the default parent.
1222 1222
1223 1223 All string valued-elements are either strictly alphanumeric, or must
1224 1224 be enclosed in double quotes ("..."), with "\\" as escape character.
1225 1225 """
1226 1226
1227 1227 if text is None:
1228 1228 ui.status(_("reading DAG from stdin\n"))
1229 1229 text = sys.stdin.read()
1230 1230
1231 1231 cl = repo.changelog
1232 1232 if len(cl) > 0:
1233 1233 raise util.Abort(_('repository is not empty'))
1234 1234
1235 1235 # determine number of revs in DAG
1236 1236 total = 0
1237 1237 for type, data in dagparser.parsedag(text):
1238 1238 if type == 'n':
1239 1239 total += 1
1240 1240
1241 1241 if mergeable_file:
1242 1242 linesperrev = 2
1243 1243 # make a file with k lines per rev
1244 1244 initialmergedlines = [str(i) for i in xrange(0, total * linesperrev)]
1245 1245 initialmergedlines.append("")
1246 1246
1247 1247 tags = []
1248 1248
1249 1249 tr = repo.transaction("builddag")
1250 1250 try:
1251 1251
1252 1252 at = -1
1253 1253 atbranch = 'default'
1254 1254 nodeids = []
1255 1255 ui.progress(_('building'), 0, unit=_('revisions'), total=total)
1256 1256 for type, data in dagparser.parsedag(text):
1257 1257 if type == 'n':
1258 1258 ui.note('node %s\n' % str(data))
1259 1259 id, ps = data
1260 1260
1261 1261 files = []
1262 1262 fctxs = {}
1263 1263
1264 1264 p2 = None
1265 1265 if mergeable_file:
1266 1266 fn = "mf"
1267 1267 p1 = repo[ps[0]]
1268 1268 if len(ps) > 1:
1269 1269 p2 = repo[ps[1]]
1270 1270 pa = p1.ancestor(p2)
1271 1271 base, local, other = [x[fn].data() for x in pa, p1, p2]
1272 1272 m3 = simplemerge.Merge3Text(base, local, other)
1273 1273 ml = [l.strip() for l in m3.merge_lines()]
1274 1274 ml.append("")
1275 1275 elif at > 0:
1276 1276 ml = p1[fn].data().split("\n")
1277 1277 else:
1278 1278 ml = initialmergedlines
1279 1279 ml[id * linesperrev] += " r%i" % id
1280 1280 mergedtext = "\n".join(ml)
1281 1281 files.append(fn)
1282 1282 fctxs[fn] = context.memfilectx(fn, mergedtext)
1283 1283
1284 1284 if overwritten_file:
1285 1285 fn = "of"
1286 1286 files.append(fn)
1287 1287 fctxs[fn] = context.memfilectx(fn, "r%i\n" % id)
1288 1288
1289 1289 if new_file:
1290 1290 fn = "nf%i" % id
1291 1291 files.append(fn)
1292 1292 fctxs[fn] = context.memfilectx(fn, "r%i\n" % id)
1293 1293 if len(ps) > 1:
1294 1294 if not p2:
1295 1295 p2 = repo[ps[1]]
1296 1296 for fn in p2:
1297 1297 if fn.startswith("nf"):
1298 1298 files.append(fn)
1299 1299 fctxs[fn] = p2[fn]
1300 1300
1301 1301 def fctxfn(repo, cx, path):
1302 1302 return fctxs.get(path)
1303 1303
1304 1304 if len(ps) == 0 or ps[0] < 0:
1305 1305 pars = [None, None]
1306 1306 elif len(ps) == 1:
1307 1307 pars = [nodeids[ps[0]], None]
1308 1308 else:
1309 1309 pars = [nodeids[p] for p in ps]
1310 1310 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
1311 1311 date=(id, 0),
1312 1312 user="debugbuilddag",
1313 1313 extra={'branch': atbranch})
1314 1314 nodeid = repo.commitctx(cx)
1315 1315 nodeids.append(nodeid)
1316 1316 at = id
1317 1317 elif type == 'l':
1318 1318 id, name = data
1319 1319 ui.note('tag %s\n' % name)
1320 1320 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
1321 1321 elif type == 'a':
1322 1322 ui.note('branch %s\n' % data)
1323 1323 atbranch = data
1324 1324 ui.progress(_('building'), id, unit=_('revisions'), total=total)
1325 1325 tr.close()
1326 1326 finally:
1327 1327 ui.progress(_('building'), None)
1328 1328 tr.release()
1329 1329
1330 1330 if tags:
1331 1331 repo.opener.write("localtags", "".join(tags))
1332 1332
1333 1333 @command('debugbundle', [('a', 'all', None, _('show all details'))], _('FILE'))
1334 1334 def debugbundle(ui, bundlepath, all=None, **opts):
1335 1335 """lists the contents of a bundle"""
1336 1336 f = url.open(ui, bundlepath)
1337 1337 try:
1338 1338 gen = changegroup.readbundle(f, bundlepath)
1339 1339 if all:
1340 1340 ui.write("format: id, p1, p2, cset, delta base, len(delta)\n")
1341 1341
1342 1342 def showchunks(named):
1343 1343 ui.write("\n%s\n" % named)
1344 1344 chain = None
1345 while 1:
1345 while True:
1346 1346 chunkdata = gen.deltachunk(chain)
1347 1347 if not chunkdata:
1348 1348 break
1349 1349 node = chunkdata['node']
1350 1350 p1 = chunkdata['p1']
1351 1351 p2 = chunkdata['p2']
1352 1352 cs = chunkdata['cs']
1353 1353 deltabase = chunkdata['deltabase']
1354 1354 delta = chunkdata['delta']
1355 1355 ui.write("%s %s %s %s %s %s\n" %
1356 1356 (hex(node), hex(p1), hex(p2),
1357 1357 hex(cs), hex(deltabase), len(delta)))
1358 1358 chain = node
1359 1359
1360 1360 chunkdata = gen.changelogheader()
1361 1361 showchunks("changelog")
1362 1362 chunkdata = gen.manifestheader()
1363 1363 showchunks("manifest")
1364 while 1:
1364 while True:
1365 1365 chunkdata = gen.filelogheader()
1366 1366 if not chunkdata:
1367 1367 break
1368 1368 fname = chunkdata['filename']
1369 1369 showchunks(fname)
1370 1370 else:
1371 1371 chunkdata = gen.changelogheader()
1372 1372 chain = None
1373 while 1:
1373 while True:
1374 1374 chunkdata = gen.deltachunk(chain)
1375 1375 if not chunkdata:
1376 1376 break
1377 1377 node = chunkdata['node']
1378 1378 ui.write("%s\n" % hex(node))
1379 1379 chain = node
1380 1380 finally:
1381 1381 f.close()
1382 1382
1383 1383 @command('debugcheckstate', [], '')
1384 1384 def debugcheckstate(ui, repo):
1385 1385 """validate the correctness of the current dirstate"""
1386 1386 parent1, parent2 = repo.dirstate.parents()
1387 1387 m1 = repo[parent1].manifest()
1388 1388 m2 = repo[parent2].manifest()
1389 1389 errors = 0
1390 1390 for f in repo.dirstate:
1391 1391 state = repo.dirstate[f]
1392 1392 if state in "nr" and f not in m1:
1393 1393 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
1394 1394 errors += 1
1395 1395 if state in "a" and f in m1:
1396 1396 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
1397 1397 errors += 1
1398 1398 if state in "m" and f not in m1 and f not in m2:
1399 1399 ui.warn(_("%s in state %s, but not in either manifest\n") %
1400 1400 (f, state))
1401 1401 errors += 1
1402 1402 for f in m1:
1403 1403 state = repo.dirstate[f]
1404 1404 if state not in "nrm":
1405 1405 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
1406 1406 errors += 1
1407 1407 if errors:
1408 1408 error = _(".hg/dirstate inconsistent with current parent's manifest")
1409 1409 raise util.Abort(error)
1410 1410
1411 1411 @command('debugcommands', [], _('[COMMAND]'))
1412 1412 def debugcommands(ui, cmd='', *args):
1413 1413 """list all available commands and options"""
1414 1414 for cmd, vals in sorted(table.iteritems()):
1415 1415 cmd = cmd.split('|')[0].strip('^')
1416 1416 opts = ', '.join([i[1] for i in vals[1]])
1417 1417 ui.write('%s: %s\n' % (cmd, opts))
1418 1418
1419 1419 @command('debugcomplete',
1420 1420 [('o', 'options', None, _('show the command options'))],
1421 1421 _('[-o] CMD'))
1422 1422 def debugcomplete(ui, cmd='', **opts):
1423 1423 """returns the completion list associated with the given command"""
1424 1424
1425 1425 if opts.get('options'):
1426 1426 options = []
1427 1427 otables = [globalopts]
1428 1428 if cmd:
1429 1429 aliases, entry = cmdutil.findcmd(cmd, table, False)
1430 1430 otables.append(entry[1])
1431 1431 for t in otables:
1432 1432 for o in t:
1433 1433 if "(DEPRECATED)" in o[3]:
1434 1434 continue
1435 1435 if o[0]:
1436 1436 options.append('-%s' % o[0])
1437 1437 options.append('--%s' % o[1])
1438 1438 ui.write("%s\n" % "\n".join(options))
1439 1439 return
1440 1440
1441 1441 cmdlist = cmdutil.findpossible(cmd, table)
1442 1442 if ui.verbose:
1443 1443 cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
1444 1444 ui.write("%s\n" % "\n".join(sorted(cmdlist)))
1445 1445
1446 1446 @command('debugdag',
1447 1447 [('t', 'tags', None, _('use tags as labels')),
1448 1448 ('b', 'branches', None, _('annotate with branch names')),
1449 1449 ('', 'dots', None, _('use dots for runs')),
1450 1450 ('s', 'spaces', None, _('separate elements by spaces'))],
1451 1451 _('[OPTION]... [FILE [REV]...]'))
1452 1452 def debugdag(ui, repo, file_=None, *revs, **opts):
1453 1453 """format the changelog or an index DAG as a concise textual description
1454 1454
1455 1455 If you pass a revlog index, the revlog's DAG is emitted. If you list
1456 1456 revision numbers, they get labelled in the output as rN.
1457 1457
1458 1458 Otherwise, the changelog DAG of the current repo is emitted.
1459 1459 """
1460 1460 spaces = opts.get('spaces')
1461 1461 dots = opts.get('dots')
1462 1462 if file_:
1463 1463 rlog = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), file_)
1464 1464 revs = set((int(r) for r in revs))
1465 1465 def events():
1466 1466 for r in rlog:
1467 1467 yield 'n', (r, list(set(p for p in rlog.parentrevs(r) if p != -1)))
1468 1468 if r in revs:
1469 1469 yield 'l', (r, "r%i" % r)
1470 1470 elif repo:
1471 1471 cl = repo.changelog
1472 1472 tags = opts.get('tags')
1473 1473 branches = opts.get('branches')
1474 1474 if tags:
1475 1475 labels = {}
1476 1476 for l, n in repo.tags().items():
1477 1477 labels.setdefault(cl.rev(n), []).append(l)
1478 1478 def events():
1479 1479 b = "default"
1480 1480 for r in cl:
1481 1481 if branches:
1482 1482 newb = cl.read(cl.node(r))[5]['branch']
1483 1483 if newb != b:
1484 1484 yield 'a', newb
1485 1485 b = newb
1486 1486 yield 'n', (r, list(set(p for p in cl.parentrevs(r) if p != -1)))
1487 1487 if tags:
1488 1488 ls = labels.get(r)
1489 1489 if ls:
1490 1490 for l in ls:
1491 1491 yield 'l', (r, l)
1492 1492 else:
1493 1493 raise util.Abort(_('need repo for changelog dag'))
1494 1494
1495 1495 for line in dagparser.dagtextlines(events(),
1496 1496 addspaces=spaces,
1497 1497 wraplabels=True,
1498 1498 wrapannotations=True,
1499 1499 wrapnonlinear=dots,
1500 1500 usedots=dots,
1501 1501 maxlinewidth=70):
1502 1502 ui.write(line)
1503 1503 ui.write("\n")
1504 1504
1505 1505 @command('debugdata',
1506 1506 [('c', 'changelog', False, _('open changelog')),
1507 1507 ('m', 'manifest', False, _('open manifest'))],
1508 1508 _('-c|-m|FILE REV'))
1509 1509 def debugdata(ui, repo, file_, rev = None, **opts):
1510 1510 """dump the contents of a data file revision"""
1511 1511 if opts.get('changelog') or opts.get('manifest'):
1512 1512 file_, rev = None, file_
1513 1513 elif rev is None:
1514 1514 raise error.CommandError('debugdata', _('invalid arguments'))
1515 1515 r = cmdutil.openrevlog(repo, 'debugdata', file_, opts)
1516 1516 try:
1517 1517 ui.write(r.revision(r.lookup(rev)))
1518 1518 except KeyError:
1519 1519 raise util.Abort(_('invalid revision identifier %s') % rev)
1520 1520
1521 1521 @command('debugdate',
1522 1522 [('e', 'extended', None, _('try extended date formats'))],
1523 1523 _('[-e] DATE [RANGE]'))
1524 1524 def debugdate(ui, date, range=None, **opts):
1525 1525 """parse and display a date"""
1526 1526 if opts["extended"]:
1527 1527 d = util.parsedate(date, util.extendeddateformats)
1528 1528 else:
1529 1529 d = util.parsedate(date)
1530 1530 ui.write("internal: %s %s\n" % d)
1531 1531 ui.write("standard: %s\n" % util.datestr(d))
1532 1532 if range:
1533 1533 m = util.matchdate(range)
1534 1534 ui.write("match: %s\n" % m(d[0]))
1535 1535
1536 1536 @command('debugdiscovery',
1537 1537 [('', 'old', None, _('use old-style discovery')),
1538 1538 ('', 'nonheads', None,
1539 1539 _('use old-style discovery with non-heads included')),
1540 1540 ] + remoteopts,
1541 1541 _('[-l REV] [-r REV] [-b BRANCH]... [OTHER]'))
1542 1542 def debugdiscovery(ui, repo, remoteurl="default", **opts):
1543 1543 """runs the changeset discovery protocol in isolation"""
1544 1544 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl), opts.get('branch'))
1545 1545 remote = hg.repository(hg.remoteui(repo, opts), remoteurl)
1546 1546 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
1547 1547
1548 1548 # make sure tests are repeatable
1549 1549 random.seed(12323)
1550 1550
1551 1551 def doit(localheads, remoteheads):
1552 1552 if opts.get('old'):
1553 1553 if localheads:
1554 1554 raise util.Abort('cannot use localheads with old style discovery')
1555 1555 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
1556 1556 force=True)
1557 1557 common = set(common)
1558 1558 if not opts.get('nonheads'):
1559 1559 ui.write("unpruned common: %s\n" % " ".join([short(n)
1560 1560 for n in common]))
1561 1561 dag = dagutil.revlogdag(repo.changelog)
1562 1562 all = dag.ancestorset(dag.internalizeall(common))
1563 1563 common = dag.externalizeall(dag.headsetofconnecteds(all))
1564 1564 else:
1565 1565 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote)
1566 1566 common = set(common)
1567 1567 rheads = set(hds)
1568 1568 lheads = set(repo.heads())
1569 1569 ui.write("common heads: %s\n" % " ".join([short(n) for n in common]))
1570 1570 if lheads <= common:
1571 1571 ui.write("local is subset\n")
1572 1572 elif rheads <= common:
1573 1573 ui.write("remote is subset\n")
1574 1574
1575 1575 serverlogs = opts.get('serverlog')
1576 1576 if serverlogs:
1577 1577 for filename in serverlogs:
1578 1578 logfile = open(filename, 'r')
1579 1579 try:
1580 1580 line = logfile.readline()
1581 1581 while line:
1582 1582 parts = line.strip().split(';')
1583 1583 op = parts[1]
1584 1584 if op == 'cg':
1585 1585 pass
1586 1586 elif op == 'cgss':
1587 1587 doit(parts[2].split(' '), parts[3].split(' '))
1588 1588 elif op == 'unb':
1589 1589 doit(parts[3].split(' '), parts[2].split(' '))
1590 1590 line = logfile.readline()
1591 1591 finally:
1592 1592 logfile.close()
1593 1593
1594 1594 else:
1595 1595 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches,
1596 1596 opts.get('remote_head'))
1597 1597 localrevs = opts.get('local_head')
1598 1598 doit(localrevs, remoterevs)
1599 1599
1600 1600 @command('debugfsinfo', [], _('[PATH]'))
1601 1601 def debugfsinfo(ui, path = "."):
1602 1602 """show information detected about current filesystem"""
1603 1603 util.writefile('.debugfsinfo', '')
1604 1604 ui.write('exec: %s\n' % (util.checkexec(path) and 'yes' or 'no'))
1605 1605 ui.write('symlink: %s\n' % (util.checklink(path) and 'yes' or 'no'))
1606 1606 ui.write('case-sensitive: %s\n' % (util.checkcase('.debugfsinfo')
1607 1607 and 'yes' or 'no'))
1608 1608 os.unlink('.debugfsinfo')
1609 1609
1610 1610 @command('debuggetbundle',
1611 1611 [('H', 'head', [], _('id of head node'), _('ID')),
1612 1612 ('C', 'common', [], _('id of common node'), _('ID')),
1613 1613 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
1614 1614 _('REPO FILE [-H|-C ID]...'))
1615 1615 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1616 1616 """retrieves a bundle from a repo
1617 1617
1618 1618 Every ID must be a full-length hex node id string. Saves the bundle to the
1619 1619 given file.
1620 1620 """
1621 1621 repo = hg.repository(ui, repopath)
1622 1622 if not repo.capable('getbundle'):
1623 1623 raise util.Abort("getbundle() not supported by target repository")
1624 1624 args = {}
1625 1625 if common:
1626 1626 args['common'] = [bin(s) for s in common]
1627 1627 if head:
1628 1628 args['heads'] = [bin(s) for s in head]
1629 1629 bundle = repo.getbundle('debug', **args)
1630 1630
1631 1631 bundletype = opts.get('type', 'bzip2').lower()
1632 1632 btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'}
1633 1633 bundletype = btypes.get(bundletype)
1634 1634 if bundletype not in changegroup.bundletypes:
1635 1635 raise util.Abort(_('unknown bundle type specified with --type'))
1636 1636 changegroup.writebundle(bundle, bundlepath, bundletype)
1637 1637
1638 1638 @command('debugignore', [], '')
1639 1639 def debugignore(ui, repo, *values, **opts):
1640 1640 """display the combined ignore pattern"""
1641 1641 ignore = repo.dirstate._ignore
1642 1642 if hasattr(ignore, 'includepat'):
1643 1643 ui.write("%s\n" % ignore.includepat)
1644 1644 else:
1645 1645 raise util.Abort(_("no ignore patterns found"))
1646 1646
1647 1647 @command('debugindex',
1648 1648 [('c', 'changelog', False, _('open changelog')),
1649 1649 ('m', 'manifest', False, _('open manifest')),
1650 1650 ('f', 'format', 0, _('revlog format'), _('FORMAT'))],
1651 1651 _('[-f FORMAT] -c|-m|FILE'))
1652 1652 def debugindex(ui, repo, file_ = None, **opts):
1653 1653 """dump the contents of an index file"""
1654 1654 r = cmdutil.openrevlog(repo, 'debugindex', file_, opts)
1655 1655 format = opts.get('format', 0)
1656 1656 if format not in (0, 1):
1657 1657 raise util.Abort(_("unknown format %d") % format)
1658 1658
1659 1659 generaldelta = r.version & revlog.REVLOGGENERALDELTA
1660 1660 if generaldelta:
1661 1661 basehdr = ' delta'
1662 1662 else:
1663 1663 basehdr = ' base'
1664 1664
1665 1665 if format == 0:
1666 1666 ui.write(" rev offset length " + basehdr + " linkrev"
1667 1667 " nodeid p1 p2\n")
1668 1668 elif format == 1:
1669 1669 ui.write(" rev flag offset length"
1670 1670 " size " + basehdr + " link p1 p2 nodeid\n")
1671 1671
1672 1672 for i in r:
1673 1673 node = r.node(i)
1674 1674 if generaldelta:
1675 1675 base = r.deltaparent(i)
1676 1676 else:
1677 1677 base = r.chainbase(i)
1678 1678 if format == 0:
1679 1679 try:
1680 1680 pp = r.parents(node)
1681 1681 except:
1682 1682 pp = [nullid, nullid]
1683 1683 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
1684 1684 i, r.start(i), r.length(i), base, r.linkrev(i),
1685 1685 short(node), short(pp[0]), short(pp[1])))
1686 1686 elif format == 1:
1687 1687 pr = r.parentrevs(i)
1688 1688 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % (
1689 1689 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
1690 1690 base, r.linkrev(i), pr[0], pr[1], short(node)))
1691 1691
1692 1692 @command('debugindexdot', [], _('FILE'))
1693 1693 def debugindexdot(ui, repo, file_):
1694 1694 """dump an index DAG as a graphviz dot file"""
1695 1695 r = None
1696 1696 if repo:
1697 1697 filelog = repo.file(file_)
1698 1698 if len(filelog):
1699 1699 r = filelog
1700 1700 if not r:
1701 1701 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), file_)
1702 1702 ui.write("digraph G {\n")
1703 1703 for i in r:
1704 1704 node = r.node(i)
1705 1705 pp = r.parents(node)
1706 1706 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
1707 1707 if pp[1] != nullid:
1708 1708 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
1709 1709 ui.write("}\n")
1710 1710
1711 1711 @command('debuginstall', [], '')
1712 1712 def debuginstall(ui):
1713 1713 '''test Mercurial installation
1714 1714
1715 1715 Returns 0 on success.
1716 1716 '''
1717 1717
1718 1718 def writetemp(contents):
1719 1719 (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
1720 1720 f = os.fdopen(fd, "wb")
1721 1721 f.write(contents)
1722 1722 f.close()
1723 1723 return name
1724 1724
1725 1725 problems = 0
1726 1726
1727 1727 # encoding
1728 1728 ui.status(_("Checking encoding (%s)...\n") % encoding.encoding)
1729 1729 try:
1730 1730 encoding.fromlocal("test")
1731 1731 except util.Abort, inst:
1732 1732 ui.write(" %s\n" % inst)
1733 1733 ui.write(_(" (check that your locale is properly set)\n"))
1734 1734 problems += 1
1735 1735
1736 1736 # compiled modules
1737 1737 ui.status(_("Checking installed modules (%s)...\n")
1738 1738 % os.path.dirname(__file__))
1739 1739 try:
1740 1740 import bdiff, mpatch, base85, osutil
1741 1741 except Exception, inst:
1742 1742 ui.write(" %s\n" % inst)
1743 1743 ui.write(_(" One or more extensions could not be found"))
1744 1744 ui.write(_(" (check that you compiled the extensions)\n"))
1745 1745 problems += 1
1746 1746
1747 1747 # templates
1748 1748 ui.status(_("Checking templates...\n"))
1749 1749 try:
1750 1750 import templater
1751 1751 templater.templater(templater.templatepath("map-cmdline.default"))
1752 1752 except Exception, inst:
1753 1753 ui.write(" %s\n" % inst)
1754 1754 ui.write(_(" (templates seem to have been installed incorrectly)\n"))
1755 1755 problems += 1
1756 1756
1757 1757 # editor
1758 1758 ui.status(_("Checking commit editor...\n"))
1759 1759 editor = ui.geteditor()
1760 1760 cmdpath = util.findexe(editor) or util.findexe(editor.split()[0])
1761 1761 if not cmdpath:
1762 1762 if editor == 'vi':
1763 1763 ui.write(_(" No commit editor set and can't find vi in PATH\n"))
1764 1764 ui.write(_(" (specify a commit editor in your configuration"
1765 1765 " file)\n"))
1766 1766 else:
1767 1767 ui.write(_(" Can't find editor '%s' in PATH\n") % editor)
1768 1768 ui.write(_(" (specify a commit editor in your configuration"
1769 1769 " file)\n"))
1770 1770 problems += 1
1771 1771
1772 1772 # check username
1773 1773 ui.status(_("Checking username...\n"))
1774 1774 try:
1775 1775 ui.username()
1776 1776 except util.Abort, e:
1777 1777 ui.write(" %s\n" % e)
1778 1778 ui.write(_(" (specify a username in your configuration file)\n"))
1779 1779 problems += 1
1780 1780
1781 1781 if not problems:
1782 1782 ui.status(_("No problems detected\n"))
1783 1783 else:
1784 1784 ui.write(_("%s problems detected,"
1785 1785 " please check your install!\n") % problems)
1786 1786
1787 1787 return problems
1788 1788
1789 1789 @command('debugknown', [], _('REPO ID...'))
1790 1790 def debugknown(ui, repopath, *ids, **opts):
1791 1791 """test whether node ids are known to a repo
1792 1792
1793 1793 Every ID must be a full-length hex node id string. Returns a list of 0s and 1s
1794 1794 indicating unknown/known.
1795 1795 """
1796 1796 repo = hg.repository(ui, repopath)
1797 1797 if not repo.capable('known'):
1798 1798 raise util.Abort("known() not supported by target repository")
1799 1799 flags = repo.known([bin(s) for s in ids])
1800 1800 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1801 1801
1802 1802 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'))
1803 1803 def debugpushkey(ui, repopath, namespace, *keyinfo):
1804 1804 '''access the pushkey key/value protocol
1805 1805
1806 1806 With two args, list the keys in the given namespace.
1807 1807
1808 1808 With five args, set a key to new if it currently is set to old.
1809 1809 Reports success or failure.
1810 1810 '''
1811 1811
1812 1812 target = hg.repository(ui, repopath)
1813 1813 if keyinfo:
1814 1814 key, old, new = keyinfo
1815 1815 r = target.pushkey(namespace, key, old, new)
1816 1816 ui.status(str(r) + '\n')
1817 1817 return not r
1818 1818 else:
1819 1819 for k, v in target.listkeys(namespace).iteritems():
1820 1820 ui.write("%s\t%s\n" % (k.encode('string-escape'),
1821 1821 v.encode('string-escape')))
1822 1822
1823 1823 @command('debugrebuildstate',
1824 1824 [('r', 'rev', '', _('revision to rebuild to'), _('REV'))],
1825 1825 _('[-r REV] [REV]'))
1826 1826 def debugrebuildstate(ui, repo, rev="tip"):
1827 1827 """rebuild the dirstate as it would look like for the given revision"""
1828 1828 ctx = scmutil.revsingle(repo, rev)
1829 1829 wlock = repo.wlock()
1830 1830 try:
1831 1831 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1832 1832 finally:
1833 1833 wlock.release()
1834 1834
1835 1835 @command('debugrename',
1836 1836 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1837 1837 _('[-r REV] FILE'))
1838 1838 def debugrename(ui, repo, file1, *pats, **opts):
1839 1839 """dump rename information"""
1840 1840
1841 1841 ctx = scmutil.revsingle(repo, opts.get('rev'))
1842 1842 m = scmutil.match(repo, (file1,) + pats, opts)
1843 1843 for abs in ctx.walk(m):
1844 1844 fctx = ctx[abs]
1845 1845 o = fctx.filelog().renamed(fctx.filenode())
1846 1846 rel = m.rel(abs)
1847 1847 if o:
1848 1848 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
1849 1849 else:
1850 1850 ui.write(_("%s not renamed\n") % rel)
1851 1851
1852 1852 @command('debugrevlog',
1853 1853 [('c', 'changelog', False, _('open changelog')),
1854 1854 ('m', 'manifest', False, _('open manifest')),
1855 1855 ('d', 'dump', False, _('dump index data'))],
1856 1856 _('-c|-m|FILE'))
1857 1857 def debugrevlog(ui, repo, file_ = None, **opts):
1858 1858 """show data and statistics about a revlog"""
1859 1859 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
1860 1860
1861 1861 if opts.get("dump"):
1862 1862 numrevs = len(r)
1863 1863 ui.write("# rev p1rev p2rev start end deltastart base p1 p2"
1864 1864 " rawsize totalsize compression heads\n")
1865 1865 ts = 0
1866 1866 heads = set()
1867 1867 for rev in xrange(numrevs):
1868 1868 dbase = r.deltaparent(rev)
1869 1869 if dbase == -1:
1870 1870 dbase = rev
1871 1871 cbase = r.chainbase(rev)
1872 1872 p1, p2 = r.parentrevs(rev)
1873 1873 rs = r.rawsize(rev)
1874 1874 ts = ts + rs
1875 1875 heads -= set(r.parentrevs(rev))
1876 1876 heads.add(rev)
1877 1877 ui.write("%d %d %d %d %d %d %d %d %d %d %d %d %d\n" %
1878 1878 (rev, p1, p2, r.start(rev), r.end(rev),
1879 1879 r.start(dbase), r.start(cbase),
1880 1880 r.start(p1), r.start(p2),
1881 1881 rs, ts, ts / r.end(rev), len(heads)))
1882 1882 return 0
1883 1883
1884 1884 v = r.version
1885 1885 format = v & 0xFFFF
1886 1886 flags = []
1887 1887 gdelta = False
1888 1888 if v & revlog.REVLOGNGINLINEDATA:
1889 1889 flags.append('inline')
1890 1890 if v & revlog.REVLOGGENERALDELTA:
1891 1891 gdelta = True
1892 1892 flags.append('generaldelta')
1893 1893 if not flags:
1894 1894 flags = ['(none)']
1895 1895
1896 1896 nummerges = 0
1897 1897 numfull = 0
1898 1898 numprev = 0
1899 1899 nump1 = 0
1900 1900 nump2 = 0
1901 1901 numother = 0
1902 1902 nump1prev = 0
1903 1903 nump2prev = 0
1904 1904 chainlengths = []
1905 1905
1906 1906 datasize = [None, 0, 0L]
1907 1907 fullsize = [None, 0, 0L]
1908 1908 deltasize = [None, 0, 0L]
1909 1909
1910 1910 def addsize(size, l):
1911 1911 if l[0] is None or size < l[0]:
1912 1912 l[0] = size
1913 1913 if size > l[1]:
1914 1914 l[1] = size
1915 1915 l[2] += size
1916 1916
1917 1917 numrevs = len(r)
1918 1918 for rev in xrange(numrevs):
1919 1919 p1, p2 = r.parentrevs(rev)
1920 1920 delta = r.deltaparent(rev)
1921 1921 if format > 0:
1922 1922 addsize(r.rawsize(rev), datasize)
1923 1923 if p2 != nullrev:
1924 1924 nummerges += 1
1925 1925 size = r.length(rev)
1926 1926 if delta == nullrev:
1927 1927 chainlengths.append(0)
1928 1928 numfull += 1
1929 1929 addsize(size, fullsize)
1930 1930 else:
1931 1931 chainlengths.append(chainlengths[delta] + 1)
1932 1932 addsize(size, deltasize)
1933 1933 if delta == rev - 1:
1934 1934 numprev += 1
1935 1935 if delta == p1:
1936 1936 nump1prev += 1
1937 1937 elif delta == p2:
1938 1938 nump2prev += 1
1939 1939 elif delta == p1:
1940 1940 nump1 += 1
1941 1941 elif delta == p2:
1942 1942 nump2 += 1
1943 1943 elif delta != nullrev:
1944 1944 numother += 1
1945 1945
1946 1946 numdeltas = numrevs - numfull
1947 1947 numoprev = numprev - nump1prev - nump2prev
1948 1948 totalrawsize = datasize[2]
1949 1949 datasize[2] /= numrevs
1950 1950 fulltotal = fullsize[2]
1951 1951 fullsize[2] /= numfull
1952 1952 deltatotal = deltasize[2]
1953 1953 deltasize[2] /= numrevs - numfull
1954 1954 totalsize = fulltotal + deltatotal
1955 1955 avgchainlen = sum(chainlengths) / numrevs
1956 1956 compratio = totalrawsize / totalsize
1957 1957
1958 1958 basedfmtstr = '%%%dd\n'
1959 1959 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
1960 1960
1961 1961 def dfmtstr(max):
1962 1962 return basedfmtstr % len(str(max))
1963 1963 def pcfmtstr(max, padding=0):
1964 1964 return basepcfmtstr % (len(str(max)), ' ' * padding)
1965 1965
1966 1966 def pcfmt(value, total):
1967 1967 return (value, 100 * float(value) / total)
1968 1968
1969 1969 ui.write('format : %d\n' % format)
1970 1970 ui.write('flags : %s\n' % ', '.join(flags))
1971 1971
1972 1972 ui.write('\n')
1973 1973 fmt = pcfmtstr(totalsize)
1974 1974 fmt2 = dfmtstr(totalsize)
1975 1975 ui.write('revisions : ' + fmt2 % numrevs)
1976 1976 ui.write(' merges : ' + fmt % pcfmt(nummerges, numrevs))
1977 1977 ui.write(' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs))
1978 1978 ui.write('revisions : ' + fmt2 % numrevs)
1979 1979 ui.write(' full : ' + fmt % pcfmt(numfull, numrevs))
1980 1980 ui.write(' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
1981 1981 ui.write('revision size : ' + fmt2 % totalsize)
1982 1982 ui.write(' full : ' + fmt % pcfmt(fulltotal, totalsize))
1983 1983 ui.write(' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
1984 1984
1985 1985 ui.write('\n')
1986 1986 fmt = dfmtstr(max(avgchainlen, compratio))
1987 1987 ui.write('avg chain length : ' + fmt % avgchainlen)
1988 1988 ui.write('compression ratio : ' + fmt % compratio)
1989 1989
1990 1990 if format > 0:
1991 1991 ui.write('\n')
1992 1992 ui.write('uncompressed data size (min/max/avg) : %d / %d / %d\n'
1993 1993 % tuple(datasize))
1994 1994 ui.write('full revision size (min/max/avg) : %d / %d / %d\n'
1995 1995 % tuple(fullsize))
1996 1996 ui.write('delta size (min/max/avg) : %d / %d / %d\n'
1997 1997 % tuple(deltasize))
1998 1998
1999 1999 if numdeltas > 0:
2000 2000 ui.write('\n')
2001 2001 fmt = pcfmtstr(numdeltas)
2002 2002 fmt2 = pcfmtstr(numdeltas, 4)
2003 2003 ui.write('deltas against prev : ' + fmt % pcfmt(numprev, numdeltas))
2004 2004 if numprev > 0:
2005 2005 ui.write(' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev))
2006 2006 ui.write(' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev))
2007 2007 ui.write(' other : ' + fmt2 % pcfmt(numoprev, numprev))
2008 2008 if gdelta:
2009 2009 ui.write('deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas))
2010 2010 ui.write('deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas))
2011 2011 ui.write('deltas against other : ' + fmt % pcfmt(numother, numdeltas))
2012 2012
2013 2013 @command('debugrevspec', [], ('REVSPEC'))
2014 2014 def debugrevspec(ui, repo, expr):
2015 2015 '''parse and apply a revision specification'''
2016 2016 if ui.verbose:
2017 2017 tree = revset.parse(expr)[0]
2018 2018 ui.note(tree, "\n")
2019 2019 newtree = revset.findaliases(ui, tree)
2020 2020 if newtree != tree:
2021 2021 ui.note(newtree, "\n")
2022 2022 func = revset.match(ui, expr)
2023 2023 for c in func(repo, range(len(repo))):
2024 2024 ui.write("%s\n" % c)
2025 2025
2026 2026 @command('debugsetparents', [], _('REV1 [REV2]'))
2027 2027 def debugsetparents(ui, repo, rev1, rev2=None):
2028 2028 """manually set the parents of the current working directory
2029 2029
2030 2030 This is useful for writing repository conversion tools, but should
2031 2031 be used with care.
2032 2032
2033 2033 Returns 0 on success.
2034 2034 """
2035 2035
2036 2036 r1 = scmutil.revsingle(repo, rev1).node()
2037 2037 r2 = scmutil.revsingle(repo, rev2, 'null').node()
2038 2038
2039 2039 wlock = repo.wlock()
2040 2040 try:
2041 2041 repo.dirstate.setparents(r1, r2)
2042 2042 finally:
2043 2043 wlock.release()
2044 2044
2045 2045 @command('debugstate',
2046 2046 [('', 'nodates', None, _('do not display the saved mtime')),
2047 2047 ('', 'datesort', None, _('sort by saved mtime'))],
2048 2048 _('[OPTION]...'))
2049 2049 def debugstate(ui, repo, nodates=None, datesort=None):
2050 2050 """show the contents of the current dirstate"""
2051 2051 timestr = ""
2052 2052 showdate = not nodates
2053 2053 if datesort:
2054 2054 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
2055 2055 else:
2056 2056 keyfunc = None # sort by filename
2057 2057 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
2058 2058 if showdate:
2059 2059 if ent[3] == -1:
2060 2060 # Pad or slice to locale representation
2061 2061 locale_len = len(time.strftime("%Y-%m-%d %H:%M:%S ",
2062 2062 time.localtime(0)))
2063 2063 timestr = 'unset'
2064 2064 timestr = (timestr[:locale_len] +
2065 2065 ' ' * (locale_len - len(timestr)))
2066 2066 else:
2067 2067 timestr = time.strftime("%Y-%m-%d %H:%M:%S ",
2068 2068 time.localtime(ent[3]))
2069 2069 if ent[1] & 020000:
2070 2070 mode = 'lnk'
2071 2071 else:
2072 2072 mode = '%3o' % (ent[1] & 0777)
2073 2073 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
2074 2074 for f in repo.dirstate.copies():
2075 2075 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
2076 2076
2077 2077 @command('debugsub',
2078 2078 [('r', 'rev', '',
2079 2079 _('revision to check'), _('REV'))],
2080 2080 _('[-r REV] [REV]'))
2081 2081 def debugsub(ui, repo, rev=None):
2082 2082 ctx = scmutil.revsingle(repo, rev, None)
2083 2083 for k, v in sorted(ctx.substate.items()):
2084 2084 ui.write('path %s\n' % k)
2085 2085 ui.write(' source %s\n' % v[0])
2086 2086 ui.write(' revision %s\n' % v[1])
2087 2087
2088 2088 @command('debugwalk', walkopts, _('[OPTION]... [FILE]...'))
2089 2089 def debugwalk(ui, repo, *pats, **opts):
2090 2090 """show how files match on given patterns"""
2091 2091 m = scmutil.match(repo, pats, opts)
2092 2092 items = list(repo.walk(m))
2093 2093 if not items:
2094 2094 return
2095 2095 fmt = 'f %%-%ds %%-%ds %%s' % (
2096 2096 max([len(abs) for abs in items]),
2097 2097 max([len(m.rel(abs)) for abs in items]))
2098 2098 for abs in items:
2099 2099 line = fmt % (abs, m.rel(abs), m.exact(abs) and 'exact' or '')
2100 2100 ui.write("%s\n" % line.rstrip())
2101 2101
2102 2102 @command('debugwireargs',
2103 2103 [('', 'three', '', 'three'),
2104 2104 ('', 'four', '', 'four'),
2105 2105 ('', 'five', '', 'five'),
2106 2106 ] + remoteopts,
2107 2107 _('REPO [OPTIONS]... [ONE [TWO]]'))
2108 2108 def debugwireargs(ui, repopath, *vals, **opts):
2109 2109 repo = hg.repository(hg.remoteui(ui, opts), repopath)
2110 2110 for opt in remoteopts:
2111 2111 del opts[opt[1]]
2112 2112 args = {}
2113 2113 for k, v in opts.iteritems():
2114 2114 if v:
2115 2115 args[k] = v
2116 2116 # run twice to check that we don't mess up the stream for the next command
2117 2117 res1 = repo.debugwireargs(*vals, **args)
2118 2118 res2 = repo.debugwireargs(*vals, **args)
2119 2119 ui.write("%s\n" % res1)
2120 2120 if res1 != res2:
2121 2121 ui.warn("%s\n" % res2)
2122 2122
2123 2123 @command('^diff',
2124 2124 [('r', 'rev', [], _('revision'), _('REV')),
2125 2125 ('c', 'change', '', _('change made by revision'), _('REV'))
2126 2126 ] + diffopts + diffopts2 + walkopts + subrepoopts,
2127 2127 _('[OPTION]... ([-c REV] | [-r REV1 [-r REV2]]) [FILE]...'))
2128 2128 def diff(ui, repo, *pats, **opts):
2129 2129 """diff repository (or selected files)
2130 2130
2131 2131 Show differences between revisions for the specified files.
2132 2132
2133 2133 Differences between files are shown using the unified diff format.
2134 2134
2135 2135 .. note::
2136 2136 diff may generate unexpected results for merges, as it will
2137 2137 default to comparing against the working directory's first
2138 2138 parent changeset if no revisions are specified.
2139 2139
2140 2140 When two revision arguments are given, then changes are shown
2141 2141 between those revisions. If only one revision is specified then
2142 2142 that revision is compared to the working directory, and, when no
2143 2143 revisions are specified, the working directory files are compared
2144 2144 to its parent.
2145 2145
2146 2146 Alternatively you can specify -c/--change with a revision to see
2147 2147 the changes in that changeset relative to its first parent.
2148 2148
2149 2149 Without the -a/--text option, diff will avoid generating diffs of
2150 2150 files it detects as binary. With -a, diff will generate a diff
2151 2151 anyway, probably with undesirable results.
2152 2152
2153 2153 Use the -g/--git option to generate diffs in the git extended diff
2154 2154 format. For more information, read :hg:`help diffs`.
2155 2155
2156 2156 Returns 0 on success.
2157 2157 """
2158 2158
2159 2159 revs = opts.get('rev')
2160 2160 change = opts.get('change')
2161 2161 stat = opts.get('stat')
2162 2162 reverse = opts.get('reverse')
2163 2163
2164 2164 if revs and change:
2165 2165 msg = _('cannot specify --rev and --change at the same time')
2166 2166 raise util.Abort(msg)
2167 2167 elif change:
2168 2168 node2 = scmutil.revsingle(repo, change, None).node()
2169 2169 node1 = repo[node2].p1().node()
2170 2170 else:
2171 2171 node1, node2 = scmutil.revpair(repo, revs)
2172 2172
2173 2173 if reverse:
2174 2174 node1, node2 = node2, node1
2175 2175
2176 2176 diffopts = patch.diffopts(ui, opts)
2177 2177 m = scmutil.match(repo, pats, opts)
2178 2178 cmdutil.diffordiffstat(ui, repo, diffopts, node1, node2, m, stat=stat,
2179 2179 listsubrepos=opts.get('subrepos'))
2180 2180
2181 2181 @command('^export',
2182 2182 [('o', 'output', '',
2183 2183 _('print output to file with formatted name'), _('FORMAT')),
2184 2184 ('', 'switch-parent', None, _('diff against the second parent')),
2185 2185 ('r', 'rev', [], _('revisions to export'), _('REV')),
2186 2186 ] + diffopts,
2187 2187 _('[OPTION]... [-o OUTFILESPEC] REV...'))
2188 2188 def export(ui, repo, *changesets, **opts):
2189 2189 """dump the header and diffs for one or more changesets
2190 2190
2191 2191 Print the changeset header and diffs for one or more revisions.
2192 2192
2193 2193 The information shown in the changeset header is: author, date,
2194 2194 branch name (if non-default), changeset hash, parent(s) and commit
2195 2195 comment.
2196 2196
2197 2197 .. note::
2198 2198 export may generate unexpected diff output for merge
2199 2199 changesets, as it will compare the merge changeset against its
2200 2200 first parent only.
2201 2201
2202 2202 Output may be to a file, in which case the name of the file is
2203 2203 given using a format string. The formatting rules are as follows:
2204 2204
2205 2205 :``%%``: literal "%" character
2206 2206 :``%H``: changeset hash (40 hexadecimal digits)
2207 2207 :``%N``: number of patches being generated
2208 2208 :``%R``: changeset revision number
2209 2209 :``%b``: basename of the exporting repository
2210 2210 :``%h``: short-form changeset hash (12 hexadecimal digits)
2211 2211 :``%n``: zero-padded sequence number, starting at 1
2212 2212 :``%r``: zero-padded changeset revision number
2213 2213
2214 2214 Without the -a/--text option, export will avoid generating diffs
2215 2215 of files it detects as binary. With -a, export will generate a
2216 2216 diff anyway, probably with undesirable results.
2217 2217
2218 2218 Use the -g/--git option to generate diffs in the git extended diff
2219 2219 format. See :hg:`help diffs` for more information.
2220 2220
2221 2221 With the --switch-parent option, the diff will be against the
2222 2222 second parent. It can be useful to review a merge.
2223 2223
2224 2224 Returns 0 on success.
2225 2225 """
2226 2226 changesets += tuple(opts.get('rev', []))
2227 2227 if not changesets:
2228 2228 raise util.Abort(_("export requires at least one changeset"))
2229 2229 revs = scmutil.revrange(repo, changesets)
2230 2230 if len(revs) > 1:
2231 2231 ui.note(_('exporting patches:\n'))
2232 2232 else:
2233 2233 ui.note(_('exporting patch:\n'))
2234 2234 cmdutil.export(repo, revs, template=opts.get('output'),
2235 2235 switch_parent=opts.get('switch_parent'),
2236 2236 opts=patch.diffopts(ui, opts))
2237 2237
2238 2238 @command('^forget', walkopts, _('[OPTION]... FILE...'))
2239 2239 def forget(ui, repo, *pats, **opts):
2240 2240 """forget the specified files on the next commit
2241 2241
2242 2242 Mark the specified files so they will no longer be tracked
2243 2243 after the next commit.
2244 2244
2245 2245 This only removes files from the current branch, not from the
2246 2246 entire project history, and it does not delete them from the
2247 2247 working directory.
2248 2248
2249 2249 To undo a forget before the next commit, see :hg:`add`.
2250 2250
2251 2251 Returns 0 on success.
2252 2252 """
2253 2253
2254 2254 if not pats:
2255 2255 raise util.Abort(_('no files specified'))
2256 2256
2257 2257 m = scmutil.match(repo, pats, opts)
2258 2258 s = repo.status(match=m, clean=True)
2259 2259 forget = sorted(s[0] + s[1] + s[3] + s[6])
2260 2260 errs = 0
2261 2261
2262 2262 for f in m.files():
2263 2263 if f not in repo.dirstate and not os.path.isdir(m.rel(f)):
2264 2264 ui.warn(_('not removing %s: file is already untracked\n')
2265 2265 % m.rel(f))
2266 2266 errs = 1
2267 2267
2268 2268 for f in forget:
2269 2269 if ui.verbose or not m.exact(f):
2270 2270 ui.status(_('removing %s\n') % m.rel(f))
2271 2271
2272 2272 repo[None].forget(forget)
2273 2273 return errs
2274 2274
2275 2275 @command('grep',
2276 2276 [('0', 'print0', None, _('end fields with NUL')),
2277 2277 ('', 'all', None, _('print all revisions that match')),
2278 2278 ('a', 'text', None, _('treat all files as text')),
2279 2279 ('f', 'follow', None,
2280 2280 _('follow changeset history,'
2281 2281 ' or file history across copies and renames')),
2282 2282 ('i', 'ignore-case', None, _('ignore case when matching')),
2283 2283 ('l', 'files-with-matches', None,
2284 2284 _('print only filenames and revisions that match')),
2285 2285 ('n', 'line-number', None, _('print matching line numbers')),
2286 2286 ('r', 'rev', [],
2287 2287 _('only search files changed within revision range'), _('REV')),
2288 2288 ('u', 'user', None, _('list the author (long with -v)')),
2289 2289 ('d', 'date', None, _('list the date (short with -q)')),
2290 2290 ] + walkopts,
2291 2291 _('[OPTION]... PATTERN [FILE]...'))
2292 2292 def grep(ui, repo, pattern, *pats, **opts):
2293 2293 """search for a pattern in specified files and revisions
2294 2294
2295 2295 Search revisions of files for a regular expression.
2296 2296
2297 2297 This command behaves differently than Unix grep. It only accepts
2298 2298 Python/Perl regexps. It searches repository history, not the
2299 2299 working directory. It always prints the revision number in which a
2300 2300 match appears.
2301 2301
2302 2302 By default, grep only prints output for the first revision of a
2303 2303 file in which it finds a match. To get it to print every revision
2304 2304 that contains a change in match status ("-" for a match that
2305 2305 becomes a non-match, or "+" for a non-match that becomes a match),
2306 2306 use the --all flag.
2307 2307
2308 2308 Returns 0 if a match is found, 1 otherwise.
2309 2309 """
2310 2310 reflags = 0
2311 2311 if opts.get('ignore_case'):
2312 2312 reflags |= re.I
2313 2313 try:
2314 2314 regexp = re.compile(pattern, reflags)
2315 2315 except re.error, inst:
2316 2316 ui.warn(_("grep: invalid match pattern: %s\n") % inst)
2317 2317 return 1
2318 2318 sep, eol = ':', '\n'
2319 2319 if opts.get('print0'):
2320 2320 sep = eol = '\0'
2321 2321
2322 2322 getfile = util.lrucachefunc(repo.file)
2323 2323
2324 2324 def matchlines(body):
2325 2325 begin = 0
2326 2326 linenum = 0
2327 2327 while True:
2328 2328 match = regexp.search(body, begin)
2329 2329 if not match:
2330 2330 break
2331 2331 mstart, mend = match.span()
2332 2332 linenum += body.count('\n', begin, mstart) + 1
2333 2333 lstart = body.rfind('\n', begin, mstart) + 1 or begin
2334 2334 begin = body.find('\n', mend) + 1 or len(body)
2335 2335 lend = begin - 1
2336 2336 yield linenum, mstart - lstart, mend - lstart, body[lstart:lend]
2337 2337
2338 2338 class linestate(object):
2339 2339 def __init__(self, line, linenum, colstart, colend):
2340 2340 self.line = line
2341 2341 self.linenum = linenum
2342 2342 self.colstart = colstart
2343 2343 self.colend = colend
2344 2344
2345 2345 def __hash__(self):
2346 2346 return hash((self.linenum, self.line))
2347 2347
2348 2348 def __eq__(self, other):
2349 2349 return self.line == other.line
2350 2350
2351 2351 matches = {}
2352 2352 copies = {}
2353 2353 def grepbody(fn, rev, body):
2354 2354 matches[rev].setdefault(fn, [])
2355 2355 m = matches[rev][fn]
2356 2356 for lnum, cstart, cend, line in matchlines(body):
2357 2357 s = linestate(line, lnum, cstart, cend)
2358 2358 m.append(s)
2359 2359
2360 2360 def difflinestates(a, b):
2361 2361 sm = difflib.SequenceMatcher(None, a, b)
2362 2362 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
2363 2363 if tag == 'insert':
2364 2364 for i in xrange(blo, bhi):
2365 2365 yield ('+', b[i])
2366 2366 elif tag == 'delete':
2367 2367 for i in xrange(alo, ahi):
2368 2368 yield ('-', a[i])
2369 2369 elif tag == 'replace':
2370 2370 for i in xrange(alo, ahi):
2371 2371 yield ('-', a[i])
2372 2372 for i in xrange(blo, bhi):
2373 2373 yield ('+', b[i])
2374 2374
2375 2375 def display(fn, ctx, pstates, states):
2376 2376 rev = ctx.rev()
2377 2377 datefunc = ui.quiet and util.shortdate or util.datestr
2378 2378 found = False
2379 2379 filerevmatches = {}
2380 2380 def binary():
2381 2381 flog = getfile(fn)
2382 2382 return util.binary(flog.read(ctx.filenode(fn)))
2383 2383
2384 2384 if opts.get('all'):
2385 2385 iter = difflinestates(pstates, states)
2386 2386 else:
2387 2387 iter = [('', l) for l in states]
2388 2388 for change, l in iter:
2389 2389 cols = [fn, str(rev)]
2390 2390 before, match, after = None, None, None
2391 2391 if opts.get('line_number'):
2392 2392 cols.append(str(l.linenum))
2393 2393 if opts.get('all'):
2394 2394 cols.append(change)
2395 2395 if opts.get('user'):
2396 2396 cols.append(ui.shortuser(ctx.user()))
2397 2397 if opts.get('date'):
2398 2398 cols.append(datefunc(ctx.date()))
2399 2399 if opts.get('files_with_matches'):
2400 2400 c = (fn, rev)
2401 2401 if c in filerevmatches:
2402 2402 continue
2403 2403 filerevmatches[c] = 1
2404 2404 else:
2405 2405 before = l.line[:l.colstart]
2406 2406 match = l.line[l.colstart:l.colend]
2407 2407 after = l.line[l.colend:]
2408 2408 ui.write(sep.join(cols))
2409 2409 if before is not None:
2410 2410 if not opts.get('text') and binary():
2411 2411 ui.write(sep + " Binary file matches")
2412 2412 else:
2413 2413 ui.write(sep + before)
2414 2414 ui.write(match, label='grep.match')
2415 2415 ui.write(after)
2416 2416 ui.write(eol)
2417 2417 found = True
2418 2418 return found
2419 2419
2420 2420 skip = {}
2421 2421 revfiles = {}
2422 2422 matchfn = scmutil.match(repo, pats, opts)
2423 2423 found = False
2424 2424 follow = opts.get('follow')
2425 2425
2426 2426 def prep(ctx, fns):
2427 2427 rev = ctx.rev()
2428 2428 pctx = ctx.p1()
2429 2429 parent = pctx.rev()
2430 2430 matches.setdefault(rev, {})
2431 2431 matches.setdefault(parent, {})
2432 2432 files = revfiles.setdefault(rev, [])
2433 2433 for fn in fns:
2434 2434 flog = getfile(fn)
2435 2435 try:
2436 2436 fnode = ctx.filenode(fn)
2437 2437 except error.LookupError:
2438 2438 continue
2439 2439
2440 2440 copied = flog.renamed(fnode)
2441 2441 copy = follow and copied and copied[0]
2442 2442 if copy:
2443 2443 copies.setdefault(rev, {})[fn] = copy
2444 2444 if fn in skip:
2445 2445 if copy:
2446 2446 skip[copy] = True
2447 2447 continue
2448 2448 files.append(fn)
2449 2449
2450 2450 if fn not in matches[rev]:
2451 2451 grepbody(fn, rev, flog.read(fnode))
2452 2452
2453 2453 pfn = copy or fn
2454 2454 if pfn not in matches[parent]:
2455 2455 try:
2456 2456 fnode = pctx.filenode(pfn)
2457 2457 grepbody(pfn, parent, flog.read(fnode))
2458 2458 except error.LookupError:
2459 2459 pass
2460 2460
2461 2461 for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep):
2462 2462 rev = ctx.rev()
2463 2463 parent = ctx.p1().rev()
2464 2464 for fn in sorted(revfiles.get(rev, [])):
2465 2465 states = matches[rev][fn]
2466 2466 copy = copies.get(rev, {}).get(fn)
2467 2467 if fn in skip:
2468 2468 if copy:
2469 2469 skip[copy] = True
2470 2470 continue
2471 2471 pstates = matches.get(parent, {}).get(copy or fn, [])
2472 2472 if pstates or states:
2473 2473 r = display(fn, ctx, pstates, states)
2474 2474 found = found or r
2475 2475 if r and not opts.get('all'):
2476 2476 skip[fn] = True
2477 2477 if copy:
2478 2478 skip[copy] = True
2479 2479 del matches[rev]
2480 2480 del revfiles[rev]
2481 2481
2482 2482 return not found
2483 2483
2484 2484 @command('heads',
2485 2485 [('r', 'rev', '',
2486 2486 _('show only heads which are descendants of STARTREV'), _('STARTREV')),
2487 2487 ('t', 'topo', False, _('show topological heads only')),
2488 2488 ('a', 'active', False, _('show active branchheads only (DEPRECATED)')),
2489 2489 ('c', 'closed', False, _('show normal and closed branch heads')),
2490 2490 ] + templateopts,
2491 2491 _('[-ac] [-r STARTREV] [REV]...'))
2492 2492 def heads(ui, repo, *branchrevs, **opts):
2493 2493 """show current repository heads or show branch heads
2494 2494
2495 2495 With no arguments, show all repository branch heads.
2496 2496
2497 2497 Repository "heads" are changesets with no child changesets. They are
2498 2498 where development generally takes place and are the usual targets
2499 2499 for update and merge operations. Branch heads are changesets that have
2500 2500 no child changeset on the same branch.
2501 2501
2502 2502 If one or more REVs are given, only branch heads on the branches
2503 2503 associated with the specified changesets are shown.
2504 2504
2505 2505 If -c/--closed is specified, also show branch heads marked closed
2506 2506 (see :hg:`commit --close-branch`).
2507 2507
2508 2508 If STARTREV is specified, only those heads that are descendants of
2509 2509 STARTREV will be displayed.
2510 2510
2511 2511 If -t/--topo is specified, named branch mechanics will be ignored and only
2512 2512 changesets without children will be shown.
2513 2513
2514 2514 Returns 0 if matching heads are found, 1 if not.
2515 2515 """
2516 2516
2517 2517 start = None
2518 2518 if 'rev' in opts:
2519 2519 start = scmutil.revsingle(repo, opts['rev'], None).node()
2520 2520
2521 2521 if opts.get('topo'):
2522 2522 heads = [repo[h] for h in repo.heads(start)]
2523 2523 else:
2524 2524 heads = []
2525 2525 for branch in repo.branchmap():
2526 2526 heads += repo.branchheads(branch, start, opts.get('closed'))
2527 2527 heads = [repo[h] for h in heads]
2528 2528
2529 2529 if branchrevs:
2530 2530 branches = set(repo[br].branch() for br in branchrevs)
2531 2531 heads = [h for h in heads if h.branch() in branches]
2532 2532
2533 2533 if opts.get('active') and branchrevs:
2534 2534 dagheads = repo.heads(start)
2535 2535 heads = [h for h in heads if h.node() in dagheads]
2536 2536
2537 2537 if branchrevs:
2538 2538 haveheads = set(h.branch() for h in heads)
2539 2539 if branches - haveheads:
2540 2540 headless = ', '.join(b for b in branches - haveheads)
2541 2541 msg = _('no open branch heads found on branches %s')
2542 2542 if opts.get('rev'):
2543 2543 msg += _(' (started at %s)' % opts['rev'])
2544 2544 ui.warn((msg + '\n') % headless)
2545 2545
2546 2546 if not heads:
2547 2547 return 1
2548 2548
2549 2549 heads = sorted(heads, key=lambda x: -x.rev())
2550 2550 displayer = cmdutil.show_changeset(ui, repo, opts)
2551 2551 for ctx in heads:
2552 2552 displayer.show(ctx)
2553 2553 displayer.close()
2554 2554
2555 2555 @command('help',
2556 2556 [('e', 'extension', None, _('show only help for extensions')),
2557 2557 ('c', 'command', None, _('show only help for commands'))],
2558 2558 _('[-ec] [TOPIC]'))
2559 2559 def help_(ui, name=None, with_version=False, unknowncmd=False, full=True, **opts):
2560 2560 """show help for a given topic or a help overview
2561 2561
2562 2562 With no arguments, print a list of commands with short help messages.
2563 2563
2564 2564 Given a topic, extension, or command name, print help for that
2565 2565 topic.
2566 2566
2567 2567 Returns 0 if successful.
2568 2568 """
2569 2569 option_lists = []
2570 2570 textwidth = min(ui.termwidth(), 80) - 2
2571 2571
2572 2572 def addglobalopts(aliases):
2573 2573 if ui.verbose:
2574 2574 option_lists.append((_("global options:"), globalopts))
2575 2575 if name == 'shortlist':
2576 2576 option_lists.append((_('use "hg help" for the full list '
2577 2577 'of commands'), ()))
2578 2578 else:
2579 2579 if name == 'shortlist':
2580 2580 msg = _('use "hg help" for the full list of commands '
2581 2581 'or "hg -v" for details')
2582 2582 elif name and not full:
2583 2583 msg = _('use "hg help %s" to show the full help text' % name)
2584 2584 elif aliases:
2585 2585 msg = _('use "hg -v help%s" to show builtin aliases and '
2586 2586 'global options') % (name and " " + name or "")
2587 2587 else:
2588 2588 msg = _('use "hg -v help %s" to show global options') % name
2589 2589 option_lists.append((msg, ()))
2590 2590
2591 2591 def helpcmd(name):
2592 2592 if with_version:
2593 2593 version_(ui)
2594 2594 ui.write('\n')
2595 2595
2596 2596 try:
2597 2597 aliases, entry = cmdutil.findcmd(name, table, strict=unknowncmd)
2598 2598 except error.AmbiguousCommand, inst:
2599 2599 # py3k fix: except vars can't be used outside the scope of the
2600 2600 # except block, nor can be used inside a lambda. python issue4617
2601 2601 prefix = inst.args[0]
2602 2602 select = lambda c: c.lstrip('^').startswith(prefix)
2603 2603 helplist(_('list of commands:\n\n'), select)
2604 2604 return
2605 2605
2606 2606 # check if it's an invalid alias and display its error if it is
2607 2607 if getattr(entry[0], 'badalias', False):
2608 2608 if not unknowncmd:
2609 2609 entry[0](ui)
2610 2610 return
2611 2611
2612 2612 # synopsis
2613 2613 if len(entry) > 2:
2614 2614 if entry[2].startswith('hg'):
2615 2615 ui.write("%s\n" % entry[2])
2616 2616 else:
2617 2617 ui.write('hg %s %s\n' % (aliases[0], entry[2]))
2618 2618 else:
2619 2619 ui.write('hg %s\n' % aliases[0])
2620 2620
2621 2621 # aliases
2622 2622 if full and not ui.quiet and len(aliases) > 1:
2623 2623 ui.write(_("\naliases: %s\n") % ', '.join(aliases[1:]))
2624 2624
2625 2625 # description
2626 2626 doc = gettext(entry[0].__doc__)
2627 2627 if not doc:
2628 2628 doc = _("(no help text available)")
2629 2629 if hasattr(entry[0], 'definition'): # aliased command
2630 2630 if entry[0].definition.startswith('!'): # shell alias
2631 2631 doc = _('shell alias for::\n\n %s') % entry[0].definition[1:]
2632 2632 else:
2633 2633 doc = _('alias for: hg %s\n\n%s') % (entry[0].definition, doc)
2634 2634 if ui.quiet or not full:
2635 2635 doc = doc.splitlines()[0]
2636 2636 keep = ui.verbose and ['verbose'] or []
2637 2637 formatted, pruned = minirst.format(doc, textwidth, keep=keep)
2638 2638 ui.write("\n%s\n" % formatted)
2639 2639 if pruned:
2640 2640 ui.write(_('\nuse "hg -v help %s" to show verbose help\n') % name)
2641 2641
2642 2642 if not ui.quiet:
2643 2643 # options
2644 2644 if entry[1]:
2645 2645 option_lists.append((_("options:\n"), entry[1]))
2646 2646
2647 2647 addglobalopts(False)
2648 2648
2649 2649 # check if this command shadows a non-trivial (multi-line)
2650 2650 # extension help text
2651 2651 try:
2652 2652 mod = extensions.find(name)
2653 2653 doc = gettext(mod.__doc__) or ''
2654 2654 if '\n' in doc.strip():
2655 2655 msg = _('use "hg help -e %s" to show help for '
2656 2656 'the %s extension') % (name, name)
2657 2657 ui.write('\n%s\n' % msg)
2658 2658 except KeyError:
2659 2659 pass
2660 2660
2661 2661 def helplist(header, select=None):
2662 2662 h = {}
2663 2663 cmds = {}
2664 2664 for c, e in table.iteritems():
2665 2665 f = c.split("|", 1)[0]
2666 2666 if select and not select(f):
2667 2667 continue
2668 2668 if (not select and name != 'shortlist' and
2669 2669 e[0].__module__ != __name__):
2670 2670 continue
2671 2671 if name == "shortlist" and not f.startswith("^"):
2672 2672 continue
2673 2673 f = f.lstrip("^")
2674 2674 if not ui.debugflag and f.startswith("debug"):
2675 2675 continue
2676 2676 doc = e[0].__doc__
2677 2677 if doc and 'DEPRECATED' in doc and not ui.verbose:
2678 2678 continue
2679 2679 doc = gettext(doc)
2680 2680 if not doc:
2681 2681 doc = _("(no help text available)")
2682 2682 h[f] = doc.splitlines()[0].rstrip()
2683 2683 cmds[f] = c.lstrip("^")
2684 2684
2685 2685 if not h:
2686 2686 ui.status(_('no commands defined\n'))
2687 2687 return
2688 2688
2689 2689 ui.status(header)
2690 2690 fns = sorted(h)
2691 2691 m = max(map(len, fns))
2692 2692 for f in fns:
2693 2693 if ui.verbose:
2694 2694 commands = cmds[f].replace("|",", ")
2695 2695 ui.write(" %s:\n %s\n"%(commands, h[f]))
2696 2696 else:
2697 2697 ui.write('%s\n' % (util.wrap(h[f], textwidth,
2698 2698 initindent=' %-*s ' % (m, f),
2699 2699 hangindent=' ' * (m + 4))))
2700 2700
2701 2701 if not ui.quiet:
2702 2702 addglobalopts(True)
2703 2703
2704 2704 def helptopic(name):
2705 2705 for names, header, doc in help.helptable:
2706 2706 if name in names:
2707 2707 break
2708 2708 else:
2709 2709 raise error.UnknownCommand(name)
2710 2710
2711 2711 # description
2712 2712 if not doc:
2713 2713 doc = _("(no help text available)")
2714 2714 if hasattr(doc, '__call__'):
2715 2715 doc = doc()
2716 2716
2717 2717 ui.write("%s\n\n" % header)
2718 2718 ui.write("%s\n" % minirst.format(doc, textwidth, indent=4))
2719 2719 try:
2720 2720 cmdutil.findcmd(name, table)
2721 2721 ui.write(_('\nuse "hg help -c %s" to see help for '
2722 2722 'the %s command\n') % (name, name))
2723 2723 except error.UnknownCommand:
2724 2724 pass
2725 2725
2726 2726 def helpext(name):
2727 2727 try:
2728 2728 mod = extensions.find(name)
2729 2729 doc = gettext(mod.__doc__) or _('no help text available')
2730 2730 except KeyError:
2731 2731 mod = None
2732 2732 doc = extensions.disabledext(name)
2733 2733 if not doc:
2734 2734 raise error.UnknownCommand(name)
2735 2735
2736 2736 if '\n' not in doc:
2737 2737 head, tail = doc, ""
2738 2738 else:
2739 2739 head, tail = doc.split('\n', 1)
2740 2740 ui.write(_('%s extension - %s\n\n') % (name.split('.')[-1], head))
2741 2741 if tail:
2742 2742 ui.write(minirst.format(tail, textwidth))
2743 2743 ui.status('\n\n')
2744 2744
2745 2745 if mod:
2746 2746 try:
2747 2747 ct = mod.cmdtable
2748 2748 except AttributeError:
2749 2749 ct = {}
2750 2750 modcmds = set([c.split('|', 1)[0] for c in ct])
2751 2751 helplist(_('list of commands:\n\n'), modcmds.__contains__)
2752 2752 else:
2753 2753 ui.write(_('use "hg help extensions" for information on enabling '
2754 2754 'extensions\n'))
2755 2755
2756 2756 def helpextcmd(name):
2757 2757 cmd, ext, mod = extensions.disabledcmd(ui, name, ui.config('ui', 'strict'))
2758 2758 doc = gettext(mod.__doc__).splitlines()[0]
2759 2759
2760 2760 msg = help.listexts(_("'%s' is provided by the following "
2761 2761 "extension:") % cmd, {ext: doc}, indent=4)
2762 2762 ui.write(minirst.format(msg, textwidth))
2763 2763 ui.write('\n\n')
2764 2764 ui.write(_('use "hg help extensions" for information on enabling '
2765 2765 'extensions\n'))
2766 2766
2767 2767 if name and name != 'shortlist':
2768 2768 i = None
2769 2769 if unknowncmd:
2770 2770 queries = (helpextcmd,)
2771 2771 elif opts.get('extension'):
2772 2772 queries = (helpext,)
2773 2773 elif opts.get('command'):
2774 2774 queries = (helpcmd,)
2775 2775 else:
2776 2776 queries = (helptopic, helpcmd, helpext, helpextcmd)
2777 2777 for f in queries:
2778 2778 try:
2779 2779 f(name)
2780 2780 i = None
2781 2781 break
2782 2782 except error.UnknownCommand, inst:
2783 2783 i = inst
2784 2784 if i:
2785 2785 raise i
2786 2786
2787 2787 else:
2788 2788 # program name
2789 2789 if ui.verbose or with_version:
2790 2790 version_(ui)
2791 2791 else:
2792 2792 ui.status(_("Mercurial Distributed SCM\n"))
2793 2793 ui.status('\n')
2794 2794
2795 2795 # list of commands
2796 2796 if name == "shortlist":
2797 2797 header = _('basic commands:\n\n')
2798 2798 else:
2799 2799 header = _('list of commands:\n\n')
2800 2800
2801 2801 helplist(header)
2802 2802 if name != 'shortlist':
2803 2803 text = help.listexts(_('enabled extensions:'), extensions.enabled())
2804 2804 if text:
2805 2805 ui.write("\n%s\n" % minirst.format(text, textwidth))
2806 2806
2807 2807 # list all option lists
2808 2808 opt_output = []
2809 2809 multioccur = False
2810 2810 for title, options in option_lists:
2811 2811 opt_output.append(("\n%s" % title, None))
2812 2812 for option in options:
2813 2813 if len(option) == 5:
2814 2814 shortopt, longopt, default, desc, optlabel = option
2815 2815 else:
2816 2816 shortopt, longopt, default, desc = option
2817 2817 optlabel = _("VALUE") # default label
2818 2818
2819 2819 if _("DEPRECATED") in desc and not ui.verbose:
2820 2820 continue
2821 2821 if isinstance(default, list):
2822 2822 numqualifier = " %s [+]" % optlabel
2823 2823 multioccur = True
2824 2824 elif (default is not None) and not isinstance(default, bool):
2825 2825 numqualifier = " %s" % optlabel
2826 2826 else:
2827 2827 numqualifier = ""
2828 2828 opt_output.append(("%2s%s" %
2829 2829 (shortopt and "-%s" % shortopt,
2830 2830 longopt and " --%s%s" %
2831 2831 (longopt, numqualifier)),
2832 2832 "%s%s" % (desc,
2833 2833 default
2834 2834 and _(" (default: %s)") % default
2835 2835 or "")))
2836 2836 if multioccur:
2837 2837 msg = _("\n[+] marked option can be specified multiple times")
2838 2838 if ui.verbose and name != 'shortlist':
2839 2839 opt_output.append((msg, None))
2840 2840 else:
2841 2841 opt_output.insert(-1, (msg, None))
2842 2842
2843 2843 if not name:
2844 2844 ui.write(_("\nadditional help topics:\n\n"))
2845 2845 topics = []
2846 2846 for names, header, doc in help.helptable:
2847 2847 topics.append((sorted(names, key=len, reverse=True)[0], header))
2848 2848 topics_len = max([len(s[0]) for s in topics])
2849 2849 for t, desc in topics:
2850 2850 ui.write(" %-*s %s\n" % (topics_len, t, desc))
2851 2851
2852 2852 if opt_output:
2853 2853 colwidth = encoding.colwidth
2854 2854 # normalize: (opt or message, desc or None, width of opt)
2855 2855 entries = [desc and (opt, desc, colwidth(opt)) or (opt, None, 0)
2856 2856 for opt, desc in opt_output]
2857 2857 hanging = max([e[2] for e in entries])
2858 2858 for opt, desc, width in entries:
2859 2859 if desc:
2860 2860 initindent = ' %s%s ' % (opt, ' ' * (hanging - width))
2861 2861 hangindent = ' ' * (hanging + 3)
2862 2862 ui.write('%s\n' % (util.wrap(desc, textwidth,
2863 2863 initindent=initindent,
2864 2864 hangindent=hangindent)))
2865 2865 else:
2866 2866 ui.write("%s\n" % opt)
2867 2867
2868 2868 @command('identify|id',
2869 2869 [('r', 'rev', '',
2870 2870 _('identify the specified revision'), _('REV')),
2871 2871 ('n', 'num', None, _('show local revision number')),
2872 2872 ('i', 'id', None, _('show global revision id')),
2873 2873 ('b', 'branch', None, _('show branch')),
2874 2874 ('t', 'tags', None, _('show tags')),
2875 2875 ('B', 'bookmarks', None, _('show bookmarks'))],
2876 2876 _('[-nibtB] [-r REV] [SOURCE]'))
2877 2877 def identify(ui, repo, source=None, rev=None,
2878 2878 num=None, id=None, branch=None, tags=None, bookmarks=None):
2879 2879 """identify the working copy or specified revision
2880 2880
2881 2881 Print a summary identifying the repository state at REV using one or
2882 2882 two parent hash identifiers, followed by a "+" if the working
2883 2883 directory has uncommitted changes, the branch name (if not default),
2884 2884 a list of tags, and a list of bookmarks.
2885 2885
2886 2886 When REV is not given, print a summary of the current state of the
2887 2887 repository.
2888 2888
2889 2889 Specifying a path to a repository root or Mercurial bundle will
2890 2890 cause lookup to operate on that repository/bundle.
2891 2891
2892 2892 Returns 0 if successful.
2893 2893 """
2894 2894
2895 2895 if not repo and not source:
2896 2896 raise util.Abort(_("there is no Mercurial repository here "
2897 2897 "(.hg not found)"))
2898 2898
2899 2899 hexfunc = ui.debugflag and hex or short
2900 2900 default = not (num or id or branch or tags or bookmarks)
2901 2901 output = []
2902 2902 revs = []
2903 2903
2904 2904 if source:
2905 2905 source, branches = hg.parseurl(ui.expandpath(source))
2906 2906 repo = hg.repository(ui, source)
2907 2907 revs, checkout = hg.addbranchrevs(repo, repo, branches, None)
2908 2908
2909 2909 if not repo.local():
2910 2910 if num or branch or tags:
2911 2911 raise util.Abort(
2912 2912 _("can't query remote revision number, branch, or tags"))
2913 2913 if not rev and revs:
2914 2914 rev = revs[0]
2915 2915 if not rev:
2916 2916 rev = "tip"
2917 2917
2918 2918 remoterev = repo.lookup(rev)
2919 2919 if default or id:
2920 2920 output = [hexfunc(remoterev)]
2921 2921
2922 2922 def getbms():
2923 2923 bms = []
2924 2924
2925 2925 if 'bookmarks' in repo.listkeys('namespaces'):
2926 2926 hexremoterev = hex(remoterev)
2927 2927 bms = [bm for bm, bmr in repo.listkeys('bookmarks').iteritems()
2928 2928 if bmr == hexremoterev]
2929 2929
2930 2930 return bms
2931 2931
2932 2932 if bookmarks:
2933 2933 output.extend(getbms())
2934 2934 elif default and not ui.quiet:
2935 2935 # multiple bookmarks for a single parent separated by '/'
2936 2936 bm = '/'.join(getbms())
2937 2937 if bm:
2938 2938 output.append(bm)
2939 2939 else:
2940 2940 if not rev:
2941 2941 ctx = repo[None]
2942 2942 parents = ctx.parents()
2943 2943 changed = ""
2944 2944 if default or id or num:
2945 2945 changed = util.any(repo.status()) and "+" or ""
2946 2946 if default or id:
2947 2947 output = ["%s%s" %
2948 2948 ('+'.join([hexfunc(p.node()) for p in parents]), changed)]
2949 2949 if num:
2950 2950 output.append("%s%s" %
2951 2951 ('+'.join([str(p.rev()) for p in parents]), changed))
2952 2952 else:
2953 2953 ctx = scmutil.revsingle(repo, rev)
2954 2954 if default or id:
2955 2955 output = [hexfunc(ctx.node())]
2956 2956 if num:
2957 2957 output.append(str(ctx.rev()))
2958 2958
2959 2959 if default and not ui.quiet:
2960 2960 b = ctx.branch()
2961 2961 if b != 'default':
2962 2962 output.append("(%s)" % b)
2963 2963
2964 2964 # multiple tags for a single parent separated by '/'
2965 2965 t = '/'.join(ctx.tags())
2966 2966 if t:
2967 2967 output.append(t)
2968 2968
2969 2969 # multiple bookmarks for a single parent separated by '/'
2970 2970 bm = '/'.join(ctx.bookmarks())
2971 2971 if bm:
2972 2972 output.append(bm)
2973 2973 else:
2974 2974 if branch:
2975 2975 output.append(ctx.branch())
2976 2976
2977 2977 if tags:
2978 2978 output.extend(ctx.tags())
2979 2979
2980 2980 if bookmarks:
2981 2981 output.extend(ctx.bookmarks())
2982 2982
2983 2983 ui.write("%s\n" % ' '.join(output))
2984 2984
2985 2985 @command('import|patch',
2986 2986 [('p', 'strip', 1,
2987 2987 _('directory strip option for patch. This has the same '
2988 2988 'meaning as the corresponding patch option'), _('NUM')),
2989 2989 ('b', 'base', '', _('base path'), _('PATH')),
2990 2990 ('f', 'force', None, _('skip check for outstanding uncommitted changes')),
2991 2991 ('', 'no-commit', None,
2992 2992 _("don't commit, just update the working directory")),
2993 2993 ('', 'exact', None,
2994 2994 _('apply patch to the nodes from which it was generated')),
2995 2995 ('', 'import-branch', None,
2996 2996 _('use any branch information in patch (implied by --exact)'))] +
2997 2997 commitopts + commitopts2 + similarityopts,
2998 2998 _('[OPTION]... PATCH...'))
2999 2999 def import_(ui, repo, patch1, *patches, **opts):
3000 3000 """import an ordered set of patches
3001 3001
3002 3002 Import a list of patches and commit them individually (unless
3003 3003 --no-commit is specified).
3004 3004
3005 3005 If there are outstanding changes in the working directory, import
3006 3006 will abort unless given the -f/--force flag.
3007 3007
3008 3008 You can import a patch straight from a mail message. Even patches
3009 3009 as attachments work (to use the body part, it must have type
3010 3010 text/plain or text/x-patch). From and Subject headers of email
3011 3011 message are used as default committer and commit message. All
3012 3012 text/plain body parts before first diff are added to commit
3013 3013 message.
3014 3014
3015 3015 If the imported patch was generated by :hg:`export`, user and
3016 3016 description from patch override values from message headers and
3017 3017 body. Values given on command line with -m/--message and -u/--user
3018 3018 override these.
3019 3019
3020 3020 If --exact is specified, import will set the working directory to
3021 3021 the parent of each patch before applying it, and will abort if the
3022 3022 resulting changeset has a different ID than the one recorded in
3023 3023 the patch. This may happen due to character set problems or other
3024 3024 deficiencies in the text patch format.
3025 3025
3026 3026 With -s/--similarity, hg will attempt to discover renames and
3027 3027 copies in the patch in the same way as 'addremove'.
3028 3028
3029 3029 To read a patch from standard input, use "-" as the patch name. If
3030 3030 a URL is specified, the patch will be downloaded from it.
3031 3031 See :hg:`help dates` for a list of formats valid for -d/--date.
3032 3032
3033 3033 Returns 0 on success.
3034 3034 """
3035 3035 patches = (patch1,) + patches
3036 3036
3037 3037 date = opts.get('date')
3038 3038 if date:
3039 3039 opts['date'] = util.parsedate(date)
3040 3040
3041 3041 try:
3042 3042 sim = float(opts.get('similarity') or 0)
3043 3043 except ValueError:
3044 3044 raise util.Abort(_('similarity must be a number'))
3045 3045 if sim < 0 or sim > 100:
3046 3046 raise util.Abort(_('similarity must be between 0 and 100'))
3047 3047
3048 3048 if opts.get('exact') or not opts.get('force'):
3049 3049 cmdutil.bailifchanged(repo)
3050 3050
3051 3051 d = opts["base"]
3052 3052 strip = opts["strip"]
3053 3053 wlock = lock = None
3054 3054 msgs = []
3055 3055
3056 3056 def tryone(ui, hunk):
3057 3057 tmpname, message, user, date, branch, nodeid, p1, p2 = \
3058 3058 patch.extract(ui, hunk)
3059 3059
3060 3060 if not tmpname:
3061 3061 return None
3062 3062 commitid = _('to working directory')
3063 3063
3064 3064 try:
3065 3065 cmdline_message = cmdutil.logmessage(opts)
3066 3066 if cmdline_message:
3067 3067 # pickup the cmdline msg
3068 3068 message = cmdline_message
3069 3069 elif message:
3070 3070 # pickup the patch msg
3071 3071 message = message.strip()
3072 3072 else:
3073 3073 # launch the editor
3074 3074 message = None
3075 3075 ui.debug('message:\n%s\n' % message)
3076 3076
3077 3077 wp = repo.parents()
3078 3078 if opts.get('exact'):
3079 3079 if not nodeid or not p1:
3080 3080 raise util.Abort(_('not a Mercurial patch'))
3081 3081 p1 = repo.lookup(p1)
3082 3082 p2 = repo.lookup(p2 or hex(nullid))
3083 3083
3084 3084 if p1 != wp[0].node():
3085 3085 hg.clean(repo, p1)
3086 3086 repo.dirstate.setparents(p1, p2)
3087 3087 elif p2:
3088 3088 try:
3089 3089 p1 = repo.lookup(p1)
3090 3090 p2 = repo.lookup(p2)
3091 3091 if p1 == wp[0].node():
3092 3092 repo.dirstate.setparents(p1, p2)
3093 3093 except error.RepoError:
3094 3094 pass
3095 3095 if opts.get('exact') or opts.get('import_branch'):
3096 3096 repo.dirstate.setbranch(branch or 'default')
3097 3097
3098 3098 files = {}
3099 3099 patch.patch(ui, repo, tmpname, strip=strip, files=files,
3100 3100 eolmode=None, similarity=sim / 100.0)
3101 3101 files = list(files)
3102 3102 if opts.get('no_commit'):
3103 3103 if message:
3104 3104 msgs.append(message)
3105 3105 else:
3106 3106 if opts.get('exact'):
3107 3107 m = None
3108 3108 else:
3109 3109 m = scmutil.matchfiles(repo, files or [])
3110 3110 n = repo.commit(message, opts.get('user') or user,
3111 3111 opts.get('date') or date, match=m,
3112 3112 editor=cmdutil.commiteditor)
3113 3113 if opts.get('exact'):
3114 3114 if hex(n) != nodeid:
3115 3115 repo.rollback()
3116 3116 raise util.Abort(_('patch is damaged'
3117 3117 ' or loses information'))
3118 3118 # Force a dirstate write so that the next transaction
3119 3119 # backups an up-do-date file.
3120 3120 repo.dirstate.write()
3121 3121 if n:
3122 3122 commitid = short(n)
3123 3123
3124 3124 return commitid
3125 3125 finally:
3126 3126 os.unlink(tmpname)
3127 3127
3128 3128 try:
3129 3129 wlock = repo.wlock()
3130 3130 lock = repo.lock()
3131 3131 lastcommit = None
3132 3132 for p in patches:
3133 3133 pf = os.path.join(d, p)
3134 3134
3135 3135 if pf == '-':
3136 3136 ui.status(_("applying patch from stdin\n"))
3137 3137 pf = sys.stdin
3138 3138 else:
3139 3139 ui.status(_("applying %s\n") % p)
3140 3140 pf = url.open(ui, pf)
3141 3141
3142 3142 haspatch = False
3143 3143 for hunk in patch.split(pf):
3144 3144 commitid = tryone(ui, hunk)
3145 3145 if commitid:
3146 3146 haspatch = True
3147 3147 if lastcommit:
3148 3148 ui.status(_('applied %s\n') % lastcommit)
3149 3149 lastcommit = commitid
3150 3150
3151 3151 if not haspatch:
3152 3152 raise util.Abort(_('no diffs found'))
3153 3153
3154 3154 if msgs:
3155 3155 repo.opener.write('last-message.txt', '\n* * *\n'.join(msgs))
3156 3156 finally:
3157 3157 release(lock, wlock)
3158 3158
3159 3159 @command('incoming|in',
3160 3160 [('f', 'force', None,
3161 3161 _('run even if remote repository is unrelated')),
3162 3162 ('n', 'newest-first', None, _('show newest record first')),
3163 3163 ('', 'bundle', '',
3164 3164 _('file to store the bundles into'), _('FILE')),
3165 3165 ('r', 'rev', [], _('a remote changeset intended to be added'), _('REV')),
3166 3166 ('B', 'bookmarks', False, _("compare bookmarks")),
3167 3167 ('b', 'branch', [],
3168 3168 _('a specific branch you would like to pull'), _('BRANCH')),
3169 3169 ] + logopts + remoteopts + subrepoopts,
3170 3170 _('[-p] [-n] [-M] [-f] [-r REV]... [--bundle FILENAME] [SOURCE]'))
3171 3171 def incoming(ui, repo, source="default", **opts):
3172 3172 """show new changesets found in source
3173 3173
3174 3174 Show new changesets found in the specified path/URL or the default
3175 3175 pull location. These are the changesets that would have been pulled
3176 3176 if a pull at the time you issued this command.
3177 3177
3178 3178 For remote repository, using --bundle avoids downloading the
3179 3179 changesets twice if the incoming is followed by a pull.
3180 3180
3181 3181 See pull for valid source format details.
3182 3182
3183 3183 Returns 0 if there are incoming changes, 1 otherwise.
3184 3184 """
3185 3185 if opts.get('bundle') and opts.get('subrepos'):
3186 3186 raise util.Abort(_('cannot combine --bundle and --subrepos'))
3187 3187
3188 3188 if opts.get('bookmarks'):
3189 3189 source, branches = hg.parseurl(ui.expandpath(source),
3190 3190 opts.get('branch'))
3191 3191 other = hg.repository(hg.remoteui(repo, opts), source)
3192 3192 if 'bookmarks' not in other.listkeys('namespaces'):
3193 3193 ui.warn(_("remote doesn't support bookmarks\n"))
3194 3194 return 0
3195 3195 ui.status(_('comparing with %s\n') % util.hidepassword(source))
3196 3196 return bookmarks.diff(ui, repo, other)
3197 3197
3198 3198 repo._subtoppath = ui.expandpath(source)
3199 3199 try:
3200 3200 return hg.incoming(ui, repo, source, opts)
3201 3201 finally:
3202 3202 del repo._subtoppath
3203 3203
3204 3204
3205 3205 @command('^init', remoteopts, _('[-e CMD] [--remotecmd CMD] [DEST]'))
3206 3206 def init(ui, dest=".", **opts):
3207 3207 """create a new repository in the given directory
3208 3208
3209 3209 Initialize a new repository in the given directory. If the given
3210 3210 directory does not exist, it will be created.
3211 3211
3212 3212 If no directory is given, the current directory is used.
3213 3213
3214 3214 It is possible to specify an ``ssh://`` URL as the destination.
3215 3215 See :hg:`help urls` for more information.
3216 3216
3217 3217 Returns 0 on success.
3218 3218 """
3219 3219 hg.repository(hg.remoteui(ui, opts), ui.expandpath(dest), create=True)
3220 3220
3221 3221 @command('locate',
3222 3222 [('r', 'rev', '', _('search the repository as it is in REV'), _('REV')),
3223 3223 ('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
3224 3224 ('f', 'fullpath', None, _('print complete paths from the filesystem root')),
3225 3225 ] + walkopts,
3226 3226 _('[OPTION]... [PATTERN]...'))
3227 3227 def locate(ui, repo, *pats, **opts):
3228 3228 """locate files matching specific patterns
3229 3229
3230 3230 Print files under Mercurial control in the working directory whose
3231 3231 names match the given patterns.
3232 3232
3233 3233 By default, this command searches all directories in the working
3234 3234 directory. To search just the current directory and its
3235 3235 subdirectories, use "--include .".
3236 3236
3237 3237 If no patterns are given to match, this command prints the names
3238 3238 of all files under Mercurial control in the working directory.
3239 3239
3240 3240 If you want to feed the output of this command into the "xargs"
3241 3241 command, use the -0 option to both this command and "xargs". This
3242 3242 will avoid the problem of "xargs" treating single filenames that
3243 3243 contain whitespace as multiple filenames.
3244 3244
3245 3245 Returns 0 if a match is found, 1 otherwise.
3246 3246 """
3247 3247 end = opts.get('print0') and '\0' or '\n'
3248 3248 rev = scmutil.revsingle(repo, opts.get('rev'), None).node()
3249 3249
3250 3250 ret = 1
3251 3251 m = scmutil.match(repo, pats, opts, default='relglob')
3252 3252 m.bad = lambda x, y: False
3253 3253 for abs in repo[rev].walk(m):
3254 3254 if not rev and abs not in repo.dirstate:
3255 3255 continue
3256 3256 if opts.get('fullpath'):
3257 3257 ui.write(repo.wjoin(abs), end)
3258 3258 else:
3259 3259 ui.write(((pats and m.rel(abs)) or abs), end)
3260 3260 ret = 0
3261 3261
3262 3262 return ret
3263 3263
3264 3264 @command('^log|history',
3265 3265 [('f', 'follow', None,
3266 3266 _('follow changeset history, or file history across copies and renames')),
3267 3267 ('', 'follow-first', None,
3268 3268 _('only follow the first parent of merge changesets')),
3269 3269 ('d', 'date', '', _('show revisions matching date spec'), _('DATE')),
3270 3270 ('C', 'copies', None, _('show copied files')),
3271 3271 ('k', 'keyword', [],
3272 3272 _('do case-insensitive search for a given text'), _('TEXT')),
3273 3273 ('r', 'rev', [], _('show the specified revision or range'), _('REV')),
3274 3274 ('', 'removed', None, _('include revisions where files were removed')),
3275 3275 ('m', 'only-merges', None, _('show only merges')),
3276 3276 ('u', 'user', [], _('revisions committed by user'), _('USER')),
3277 3277 ('', 'only-branch', [],
3278 3278 _('show only changesets within the given named branch (DEPRECATED)'),
3279 3279 _('BRANCH')),
3280 3280 ('b', 'branch', [],
3281 3281 _('show changesets within the given named branch'), _('BRANCH')),
3282 3282 ('P', 'prune', [],
3283 3283 _('do not display revision or any of its ancestors'), _('REV')),
3284 3284 ] + logopts + walkopts,
3285 3285 _('[OPTION]... [FILE]'))
3286 3286 def log(ui, repo, *pats, **opts):
3287 3287 """show revision history of entire repository or files
3288 3288
3289 3289 Print the revision history of the specified files or the entire
3290 3290 project.
3291 3291
3292 3292 File history is shown without following rename or copy history of
3293 3293 files. Use -f/--follow with a filename to follow history across
3294 3294 renames and copies. --follow without a filename will only show
3295 3295 ancestors or descendants of the starting revision. --follow-first
3296 3296 only follows the first parent of merge revisions.
3297 3297
3298 3298 If no revision range is specified, the default is ``tip:0`` unless
3299 3299 --follow is set, in which case the working directory parent is
3300 3300 used as the starting revision. You can specify a revision set for
3301 3301 log, see :hg:`help revsets` for more information.
3302 3302
3303 3303 See :hg:`help dates` for a list of formats valid for -d/--date.
3304 3304
3305 3305 By default this command prints revision number and changeset id,
3306 3306 tags, non-trivial parents, user, date and time, and a summary for
3307 3307 each commit. When the -v/--verbose switch is used, the list of
3308 3308 changed files and full commit message are shown.
3309 3309
3310 3310 .. note::
3311 3311 log -p/--patch may generate unexpected diff output for merge
3312 3312 changesets, as it will only compare the merge changeset against
3313 3313 its first parent. Also, only files different from BOTH parents
3314 3314 will appear in files:.
3315 3315
3316 3316 Returns 0 on success.
3317 3317 """
3318 3318
3319 3319 matchfn = scmutil.match(repo, pats, opts)
3320 3320 limit = cmdutil.loglimit(opts)
3321 3321 count = 0
3322 3322
3323 3323 endrev = None
3324 3324 if opts.get('copies') and opts.get('rev'):
3325 3325 endrev = max(scmutil.revrange(repo, opts.get('rev'))) + 1
3326 3326
3327 3327 df = False
3328 3328 if opts["date"]:
3329 3329 df = util.matchdate(opts["date"])
3330 3330
3331 3331 branches = opts.get('branch', []) + opts.get('only_branch', [])
3332 3332 opts['branch'] = [repo.lookupbranch(b) for b in branches]
3333 3333
3334 3334 displayer = cmdutil.show_changeset(ui, repo, opts, True)
3335 3335 def prep(ctx, fns):
3336 3336 rev = ctx.rev()
3337 3337 parents = [p for p in repo.changelog.parentrevs(rev)
3338 3338 if p != nullrev]
3339 3339 if opts.get('no_merges') and len(parents) == 2:
3340 3340 return
3341 3341 if opts.get('only_merges') and len(parents) != 2:
3342 3342 return
3343 3343 if opts.get('branch') and ctx.branch() not in opts['branch']:
3344 3344 return
3345 3345 if df and not df(ctx.date()[0]):
3346 3346 return
3347 3347 if opts['user'] and not [k for k in opts['user']
3348 3348 if k.lower() in ctx.user().lower()]:
3349 3349 return
3350 3350 if opts.get('keyword'):
3351 3351 for k in [kw.lower() for kw in opts['keyword']]:
3352 3352 if (k in ctx.user().lower() or
3353 3353 k in ctx.description().lower() or
3354 3354 k in " ".join(ctx.files()).lower()):
3355 3355 break
3356 3356 else:
3357 3357 return
3358 3358
3359 3359 copies = None
3360 3360 if opts.get('copies') and rev:
3361 3361 copies = []
3362 3362 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
3363 3363 for fn in ctx.files():
3364 3364 rename = getrenamed(fn, rev)
3365 3365 if rename:
3366 3366 copies.append((fn, rename[0]))
3367 3367
3368 3368 revmatchfn = None
3369 3369 if opts.get('patch') or opts.get('stat'):
3370 3370 if opts.get('follow') or opts.get('follow_first'):
3371 3371 # note: this might be wrong when following through merges
3372 3372 revmatchfn = scmutil.match(repo, fns, default='path')
3373 3373 else:
3374 3374 revmatchfn = matchfn
3375 3375
3376 3376 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
3377 3377
3378 3378 for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep):
3379 3379 if count == limit:
3380 3380 break
3381 3381 if displayer.flush(ctx.rev()):
3382 3382 count += 1
3383 3383 displayer.close()
3384 3384
3385 3385 @command('manifest',
3386 3386 [('r', 'rev', '', _('revision to display'), _('REV')),
3387 3387 ('', 'all', False, _("list files from all revisions"))],
3388 3388 _('[-r REV]'))
3389 3389 def manifest(ui, repo, node=None, rev=None, **opts):
3390 3390 """output the current or given revision of the project manifest
3391 3391
3392 3392 Print a list of version controlled files for the given revision.
3393 3393 If no revision is given, the first parent of the working directory
3394 3394 is used, or the null revision if no revision is checked out.
3395 3395
3396 3396 With -v, print file permissions, symlink and executable bits.
3397 3397 With --debug, print file revision hashes.
3398 3398
3399 3399 If option --all is specified, the list of all files from all revisions
3400 3400 is printed. This includes deleted and renamed files.
3401 3401
3402 3402 Returns 0 on success.
3403 3403 """
3404 3404 if opts.get('all'):
3405 3405 if rev or node:
3406 3406 raise util.Abort(_("can't specify a revision with --all"))
3407 3407
3408 3408 res = []
3409 3409 prefix = "data/"
3410 3410 suffix = ".i"
3411 3411 plen = len(prefix)
3412 3412 slen = len(suffix)
3413 3413 lock = repo.lock()
3414 3414 try:
3415 3415 for fn, b, size in repo.store.datafiles():
3416 3416 if size != 0 and fn[-slen:] == suffix and fn[:plen] == prefix:
3417 3417 res.append(fn[plen:-slen])
3418 3418 finally:
3419 3419 lock.release()
3420 3420 for f in sorted(res):
3421 3421 ui.write("%s\n" % f)
3422 3422 return
3423 3423
3424 3424 if rev and node:
3425 3425 raise util.Abort(_("please specify just one revision"))
3426 3426
3427 3427 if not node:
3428 3428 node = rev
3429 3429
3430 3430 decor = {'l':'644 @ ', 'x':'755 * ', '':'644 '}
3431 3431 ctx = scmutil.revsingle(repo, node)
3432 3432 for f in ctx:
3433 3433 if ui.debugflag:
3434 3434 ui.write("%40s " % hex(ctx.manifest()[f]))
3435 3435 if ui.verbose:
3436 3436 ui.write(decor[ctx.flags(f)])
3437 3437 ui.write("%s\n" % f)
3438 3438
3439 3439 @command('^merge',
3440 3440 [('f', 'force', None, _('force a merge with outstanding changes')),
3441 3441 ('t', 'tool', '', _('specify merge tool')),
3442 3442 ('r', 'rev', '', _('revision to merge'), _('REV')),
3443 3443 ('P', 'preview', None,
3444 3444 _('review revisions to merge (no merge is performed)'))],
3445 3445 _('[-P] [-f] [[-r] REV]'))
3446 3446 def merge(ui, repo, node=None, **opts):
3447 3447 """merge working directory with another revision
3448 3448
3449 3449 The current working directory is updated with all changes made in
3450 3450 the requested revision since the last common predecessor revision.
3451 3451
3452 3452 Files that changed between either parent are marked as changed for
3453 3453 the next commit and a commit must be performed before any further
3454 3454 updates to the repository are allowed. The next commit will have
3455 3455 two parents.
3456 3456
3457 3457 ``--tool`` can be used to specify the merge tool used for file
3458 3458 merges. It overrides the HGMERGE environment variable and your
3459 3459 configuration files. See :hg:`help merge-tools` for options.
3460 3460
3461 3461 If no revision is specified, the working directory's parent is a
3462 3462 head revision, and the current branch contains exactly one other
3463 3463 head, the other head is merged with by default. Otherwise, an
3464 3464 explicit revision with which to merge with must be provided.
3465 3465
3466 3466 :hg:`resolve` must be used to resolve unresolved files.
3467 3467
3468 3468 To undo an uncommitted merge, use :hg:`update --clean .` which
3469 3469 will check out a clean copy of the original merge parent, losing
3470 3470 all changes.
3471 3471
3472 3472 Returns 0 on success, 1 if there are unresolved files.
3473 3473 """
3474 3474
3475 3475 if opts.get('rev') and node:
3476 3476 raise util.Abort(_("please specify just one revision"))
3477 3477 if not node:
3478 3478 node = opts.get('rev')
3479 3479
3480 3480 if not node:
3481 3481 branch = repo[None].branch()
3482 3482 bheads = repo.branchheads(branch)
3483 3483 if len(bheads) > 2:
3484 3484 raise util.Abort(_("branch '%s' has %d heads - "
3485 3485 "please merge with an explicit rev")
3486 3486 % (branch, len(bheads)),
3487 3487 hint=_("run 'hg heads .' to see heads"))
3488 3488
3489 3489 parent = repo.dirstate.p1()
3490 3490 if len(bheads) == 1:
3491 3491 if len(repo.heads()) > 1:
3492 3492 raise util.Abort(_("branch '%s' has one head - "
3493 3493 "please merge with an explicit rev")
3494 3494 % branch,
3495 3495 hint=_("run 'hg heads' to see all heads"))
3496 3496 msg = _('there is nothing to merge')
3497 3497 if parent != repo.lookup(repo[None].branch()):
3498 3498 msg = _('%s - use "hg update" instead') % msg
3499 3499 raise util.Abort(msg)
3500 3500
3501 3501 if parent not in bheads:
3502 3502 raise util.Abort(_('working directory not at a head revision'),
3503 3503 hint=_("use 'hg update' or merge with an "
3504 3504 "explicit revision"))
3505 3505 node = parent == bheads[0] and bheads[-1] or bheads[0]
3506 3506 else:
3507 3507 node = scmutil.revsingle(repo, node).node()
3508 3508
3509 3509 if opts.get('preview'):
3510 3510 # find nodes that are ancestors of p2 but not of p1
3511 3511 p1 = repo.lookup('.')
3512 3512 p2 = repo.lookup(node)
3513 3513 nodes = repo.changelog.findmissing(common=[p1], heads=[p2])
3514 3514
3515 3515 displayer = cmdutil.show_changeset(ui, repo, opts)
3516 3516 for node in nodes:
3517 3517 displayer.show(repo[node])
3518 3518 displayer.close()
3519 3519 return 0
3520 3520
3521 3521 try:
3522 3522 # ui.forcemerge is an internal variable, do not document
3523 3523 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
3524 3524 return hg.merge(repo, node, force=opts.get('force'))
3525 3525 finally:
3526 3526 ui.setconfig('ui', 'forcemerge', '')
3527 3527
3528 3528 @command('outgoing|out',
3529 3529 [('f', 'force', None, _('run even when the destination is unrelated')),
3530 3530 ('r', 'rev', [],
3531 3531 _('a changeset intended to be included in the destination'), _('REV')),
3532 3532 ('n', 'newest-first', None, _('show newest record first')),
3533 3533 ('B', 'bookmarks', False, _('compare bookmarks')),
3534 3534 ('b', 'branch', [], _('a specific branch you would like to push'),
3535 3535 _('BRANCH')),
3536 3536 ] + logopts + remoteopts + subrepoopts,
3537 3537 _('[-M] [-p] [-n] [-f] [-r REV]... [DEST]'))
3538 3538 def outgoing(ui, repo, dest=None, **opts):
3539 3539 """show changesets not found in the destination
3540 3540
3541 3541 Show changesets not found in the specified destination repository
3542 3542 or the default push location. These are the changesets that would
3543 3543 be pushed if a push was requested.
3544 3544
3545 3545 See pull for details of valid destination formats.
3546 3546
3547 3547 Returns 0 if there are outgoing changes, 1 otherwise.
3548 3548 """
3549 3549
3550 3550 if opts.get('bookmarks'):
3551 3551 dest = ui.expandpath(dest or 'default-push', dest or 'default')
3552 3552 dest, branches = hg.parseurl(dest, opts.get('branch'))
3553 3553 other = hg.repository(hg.remoteui(repo, opts), dest)
3554 3554 if 'bookmarks' not in other.listkeys('namespaces'):
3555 3555 ui.warn(_("remote doesn't support bookmarks\n"))
3556 3556 return 0
3557 3557 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
3558 3558 return bookmarks.diff(ui, other, repo)
3559 3559
3560 3560 repo._subtoppath = ui.expandpath(dest or 'default-push', dest or 'default')
3561 3561 try:
3562 3562 return hg.outgoing(ui, repo, dest, opts)
3563 3563 finally:
3564 3564 del repo._subtoppath
3565 3565
3566 3566 @command('parents',
3567 3567 [('r', 'rev', '', _('show parents of the specified revision'), _('REV')),
3568 3568 ] + templateopts,
3569 3569 _('[-r REV] [FILE]'))
3570 3570 def parents(ui, repo, file_=None, **opts):
3571 3571 """show the parents of the working directory or revision
3572 3572
3573 3573 Print the working directory's parent revisions. If a revision is
3574 3574 given via -r/--rev, the parent of that revision will be printed.
3575 3575 If a file argument is given, the revision in which the file was
3576 3576 last changed (before the working directory revision or the
3577 3577 argument to --rev if given) is printed.
3578 3578
3579 3579 Returns 0 on success.
3580 3580 """
3581 3581
3582 3582 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
3583 3583
3584 3584 if file_:
3585 3585 m = scmutil.match(repo, (file_,), opts)
3586 3586 if m.anypats() or len(m.files()) != 1:
3587 3587 raise util.Abort(_('can only specify an explicit filename'))
3588 3588 file_ = m.files()[0]
3589 3589 filenodes = []
3590 3590 for cp in ctx.parents():
3591 3591 if not cp:
3592 3592 continue
3593 3593 try:
3594 3594 filenodes.append(cp.filenode(file_))
3595 3595 except error.LookupError:
3596 3596 pass
3597 3597 if not filenodes:
3598 3598 raise util.Abort(_("'%s' not found in manifest!") % file_)
3599 3599 fl = repo.file(file_)
3600 3600 p = [repo.lookup(fl.linkrev(fl.rev(fn))) for fn in filenodes]
3601 3601 else:
3602 3602 p = [cp.node() for cp in ctx.parents()]
3603 3603
3604 3604 displayer = cmdutil.show_changeset(ui, repo, opts)
3605 3605 for n in p:
3606 3606 if n != nullid:
3607 3607 displayer.show(repo[n])
3608 3608 displayer.close()
3609 3609
3610 3610 @command('paths', [], _('[NAME]'))
3611 3611 def paths(ui, repo, search=None):
3612 3612 """show aliases for remote repositories
3613 3613
3614 3614 Show definition of symbolic path name NAME. If no name is given,
3615 3615 show definition of all available names.
3616 3616
3617 3617 Option -q/--quiet suppresses all output when searching for NAME
3618 3618 and shows only the path names when listing all definitions.
3619 3619
3620 3620 Path names are defined in the [paths] section of your
3621 3621 configuration file and in ``/etc/mercurial/hgrc``. If run inside a
3622 3622 repository, ``.hg/hgrc`` is used, too.
3623 3623
3624 3624 The path names ``default`` and ``default-push`` have a special
3625 3625 meaning. When performing a push or pull operation, they are used
3626 3626 as fallbacks if no location is specified on the command-line.
3627 3627 When ``default-push`` is set, it will be used for push and
3628 3628 ``default`` will be used for pull; otherwise ``default`` is used
3629 3629 as the fallback for both. When cloning a repository, the clone
3630 3630 source is written as ``default`` in ``.hg/hgrc``. Note that
3631 3631 ``default`` and ``default-push`` apply to all inbound (e.g.
3632 3632 :hg:`incoming`) and outbound (e.g. :hg:`outgoing`, :hg:`email` and
3633 3633 :hg:`bundle`) operations.
3634 3634
3635 3635 See :hg:`help urls` for more information.
3636 3636
3637 3637 Returns 0 on success.
3638 3638 """
3639 3639 if search:
3640 3640 for name, path in ui.configitems("paths"):
3641 3641 if name == search:
3642 3642 ui.status("%s\n" % util.hidepassword(path))
3643 3643 return
3644 3644 if not ui.quiet:
3645 3645 ui.warn(_("not found!\n"))
3646 3646 return 1
3647 3647 else:
3648 3648 for name, path in ui.configitems("paths"):
3649 3649 if ui.quiet:
3650 3650 ui.write("%s\n" % name)
3651 3651 else:
3652 3652 ui.write("%s = %s\n" % (name, util.hidepassword(path)))
3653 3653
3654 3654 def postincoming(ui, repo, modheads, optupdate, checkout):
3655 3655 if modheads == 0:
3656 3656 return
3657 3657 if optupdate:
3658 3658 try:
3659 3659 return hg.update(repo, checkout)
3660 3660 except util.Abort, inst:
3661 3661 ui.warn(_("not updating: %s\n" % str(inst)))
3662 3662 return 0
3663 3663 if modheads > 1:
3664 3664 currentbranchheads = len(repo.branchheads())
3665 3665 if currentbranchheads == modheads:
3666 3666 ui.status(_("(run 'hg heads' to see heads, 'hg merge' to merge)\n"))
3667 3667 elif currentbranchheads > 1:
3668 3668 ui.status(_("(run 'hg heads .' to see heads, 'hg merge' to merge)\n"))
3669 3669 else:
3670 3670 ui.status(_("(run 'hg heads' to see heads)\n"))
3671 3671 else:
3672 3672 ui.status(_("(run 'hg update' to get a working copy)\n"))
3673 3673
3674 3674 @command('^pull',
3675 3675 [('u', 'update', None,
3676 3676 _('update to new branch head if changesets were pulled')),
3677 3677 ('f', 'force', None, _('run even when remote repository is unrelated')),
3678 3678 ('r', 'rev', [], _('a remote changeset intended to be added'), _('REV')),
3679 3679 ('B', 'bookmark', [], _("bookmark to pull"), _('BOOKMARK')),
3680 3680 ('b', 'branch', [], _('a specific branch you would like to pull'),
3681 3681 _('BRANCH')),
3682 3682 ] + remoteopts,
3683 3683 _('[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]'))
3684 3684 def pull(ui, repo, source="default", **opts):
3685 3685 """pull changes from the specified source
3686 3686
3687 3687 Pull changes from a remote repository to a local one.
3688 3688
3689 3689 This finds all changes from the repository at the specified path
3690 3690 or URL and adds them to a local repository (the current one unless
3691 3691 -R is specified). By default, this does not update the copy of the
3692 3692 project in the working directory.
3693 3693
3694 3694 Use :hg:`incoming` if you want to see what would have been added
3695 3695 by a pull at the time you issued this command. If you then decide
3696 3696 to add those changes to the repository, you should use :hg:`pull
3697 3697 -r X` where ``X`` is the last changeset listed by :hg:`incoming`.
3698 3698
3699 3699 If SOURCE is omitted, the 'default' path will be used.
3700 3700 See :hg:`help urls` for more information.
3701 3701
3702 3702 Returns 0 on success, 1 if an update had unresolved files.
3703 3703 """
3704 3704 source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
3705 3705 other = hg.repository(hg.remoteui(repo, opts), source)
3706 3706 ui.status(_('pulling from %s\n') % util.hidepassword(source))
3707 3707 revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev'))
3708 3708
3709 3709 if opts.get('bookmark'):
3710 3710 if not revs:
3711 3711 revs = []
3712 3712 rb = other.listkeys('bookmarks')
3713 3713 for b in opts['bookmark']:
3714 3714 if b not in rb:
3715 3715 raise util.Abort(_('remote bookmark %s not found!') % b)
3716 3716 revs.append(rb[b])
3717 3717
3718 3718 if revs:
3719 3719 try:
3720 3720 revs = [other.lookup(rev) for rev in revs]
3721 3721 except error.CapabilityError:
3722 3722 err = _("other repository doesn't support revision lookup, "
3723 3723 "so a rev cannot be specified.")
3724 3724 raise util.Abort(err)
3725 3725
3726 3726 modheads = repo.pull(other, heads=revs, force=opts.get('force'))
3727 3727 bookmarks.updatefromremote(ui, repo, other)
3728 3728 if checkout:
3729 3729 checkout = str(repo.changelog.rev(other.lookup(checkout)))
3730 3730 repo._subtoppath = source
3731 3731 try:
3732 3732 ret = postincoming(ui, repo, modheads, opts.get('update'), checkout)
3733 3733
3734 3734 finally:
3735 3735 del repo._subtoppath
3736 3736
3737 3737 # update specified bookmarks
3738 3738 if opts.get('bookmark'):
3739 3739 for b in opts['bookmark']:
3740 3740 # explicit pull overrides local bookmark if any
3741 3741 ui.status(_("importing bookmark %s\n") % b)
3742 3742 repo._bookmarks[b] = repo[rb[b]].node()
3743 3743 bookmarks.write(repo)
3744 3744
3745 3745 return ret
3746 3746
3747 3747 @command('^push',
3748 3748 [('f', 'force', None, _('force push')),
3749 3749 ('r', 'rev', [],
3750 3750 _('a changeset intended to be included in the destination'),
3751 3751 _('REV')),
3752 3752 ('B', 'bookmark', [], _("bookmark to push"), _('BOOKMARK')),
3753 3753 ('b', 'branch', [],
3754 3754 _('a specific branch you would like to push'), _('BRANCH')),
3755 3755 ('', 'new-branch', False, _('allow pushing a new branch')),
3756 3756 ] + remoteopts,
3757 3757 _('[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]'))
3758 3758 def push(ui, repo, dest=None, **opts):
3759 3759 """push changes to the specified destination
3760 3760
3761 3761 Push changesets from the local repository to the specified
3762 3762 destination.
3763 3763
3764 3764 This operation is symmetrical to pull: it is identical to a pull
3765 3765 in the destination repository from the current one.
3766 3766
3767 3767 By default, push will not allow creation of new heads at the
3768 3768 destination, since multiple heads would make it unclear which head
3769 3769 to use. In this situation, it is recommended to pull and merge
3770 3770 before pushing.
3771 3771
3772 3772 Use --new-branch if you want to allow push to create a new named
3773 3773 branch that is not present at the destination. This allows you to
3774 3774 only create a new branch without forcing other changes.
3775 3775
3776 3776 Use -f/--force to override the default behavior and push all
3777 3777 changesets on all branches.
3778 3778
3779 3779 If -r/--rev is used, the specified revision and all its ancestors
3780 3780 will be pushed to the remote repository.
3781 3781
3782 3782 Please see :hg:`help urls` for important details about ``ssh://``
3783 3783 URLs. If DESTINATION is omitted, a default path will be used.
3784 3784
3785 3785 Returns 0 if push was successful, 1 if nothing to push.
3786 3786 """
3787 3787
3788 3788 if opts.get('bookmark'):
3789 3789 for b in opts['bookmark']:
3790 3790 # translate -B options to -r so changesets get pushed
3791 3791 if b in repo._bookmarks:
3792 3792 opts.setdefault('rev', []).append(b)
3793 3793 else:
3794 3794 # if we try to push a deleted bookmark, translate it to null
3795 3795 # this lets simultaneous -r, -b options continue working
3796 3796 opts.setdefault('rev', []).append("null")
3797 3797
3798 3798 dest = ui.expandpath(dest or 'default-push', dest or 'default')
3799 3799 dest, branches = hg.parseurl(dest, opts.get('branch'))
3800 3800 ui.status(_('pushing to %s\n') % util.hidepassword(dest))
3801 3801 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
3802 3802 other = hg.repository(hg.remoteui(repo, opts), dest)
3803 3803 if revs:
3804 3804 revs = [repo.lookup(rev) for rev in revs]
3805 3805
3806 3806 repo._subtoppath = dest
3807 3807 try:
3808 3808 # push subrepos depth-first for coherent ordering
3809 3809 c = repo['']
3810 3810 subs = c.substate # only repos that are committed
3811 3811 for s in sorted(subs):
3812 3812 if not c.sub(s).push(opts.get('force')):
3813 3813 return False
3814 3814 finally:
3815 3815 del repo._subtoppath
3816 3816 result = repo.push(other, opts.get('force'), revs=revs,
3817 3817 newbranch=opts.get('new_branch'))
3818 3818
3819 3819 result = (result == 0)
3820 3820
3821 3821 if opts.get('bookmark'):
3822 3822 rb = other.listkeys('bookmarks')
3823 3823 for b in opts['bookmark']:
3824 3824 # explicit push overrides remote bookmark if any
3825 3825 if b in repo._bookmarks:
3826 3826 ui.status(_("exporting bookmark %s\n") % b)
3827 3827 new = repo[b].hex()
3828 3828 elif b in rb:
3829 3829 ui.status(_("deleting remote bookmark %s\n") % b)
3830 3830 new = '' # delete
3831 3831 else:
3832 3832 ui.warn(_('bookmark %s does not exist on the local '
3833 3833 'or remote repository!\n') % b)
3834 3834 return 2
3835 3835 old = rb.get(b, '')
3836 3836 r = other.pushkey('bookmarks', b, old, new)
3837 3837 if not r:
3838 3838 ui.warn(_('updating bookmark %s failed!\n') % b)
3839 3839 if not result:
3840 3840 result = 2
3841 3841
3842 3842 return result
3843 3843
3844 3844 @command('recover', [])
3845 3845 def recover(ui, repo):
3846 3846 """roll back an interrupted transaction
3847 3847
3848 3848 Recover from an interrupted commit or pull.
3849 3849
3850 3850 This command tries to fix the repository status after an
3851 3851 interrupted operation. It should only be necessary when Mercurial
3852 3852 suggests it.
3853 3853
3854 3854 Returns 0 if successful, 1 if nothing to recover or verify fails.
3855 3855 """
3856 3856 if repo.recover():
3857 3857 return hg.verify(repo)
3858 3858 return 1
3859 3859
3860 3860 @command('^remove|rm',
3861 3861 [('A', 'after', None, _('record delete for missing files')),
3862 3862 ('f', 'force', None,
3863 3863 _('remove (and delete) file even if added or modified')),
3864 3864 ] + walkopts,
3865 3865 _('[OPTION]... FILE...'))
3866 3866 def remove(ui, repo, *pats, **opts):
3867 3867 """remove the specified files on the next commit
3868 3868
3869 3869 Schedule the indicated files for removal from the repository.
3870 3870
3871 3871 This only removes files from the current branch, not from the
3872 3872 entire project history. -A/--after can be used to remove only
3873 3873 files that have already been deleted, -f/--force can be used to
3874 3874 force deletion, and -Af can be used to remove files from the next
3875 3875 revision without deleting them from the working directory.
3876 3876
3877 3877 The following table details the behavior of remove for different
3878 3878 file states (columns) and option combinations (rows). The file
3879 3879 states are Added [A], Clean [C], Modified [M] and Missing [!] (as
3880 3880 reported by :hg:`status`). The actions are Warn, Remove (from
3881 3881 branch) and Delete (from disk)::
3882 3882
3883 3883 A C M !
3884 3884 none W RD W R
3885 3885 -f R RD RD R
3886 3886 -A W W W R
3887 3887 -Af R R R R
3888 3888
3889 3889 Note that remove never deletes files in Added [A] state from the
3890 3890 working directory, not even if option --force is specified.
3891 3891
3892 3892 This command schedules the files to be removed at the next commit.
3893 3893 To undo a remove before that, see :hg:`revert`.
3894 3894
3895 3895 Returns 0 on success, 1 if any warnings encountered.
3896 3896 """
3897 3897
3898 3898 ret = 0
3899 3899 after, force = opts.get('after'), opts.get('force')
3900 3900 if not pats and not after:
3901 3901 raise util.Abort(_('no files specified'))
3902 3902
3903 3903 m = scmutil.match(repo, pats, opts)
3904 3904 s = repo.status(match=m, clean=True)
3905 3905 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
3906 3906
3907 3907 for f in m.files():
3908 3908 if f not in repo.dirstate and not os.path.isdir(m.rel(f)):
3909 3909 ui.warn(_('not removing %s: file is untracked\n') % m.rel(f))
3910 3910 ret = 1
3911 3911
3912 3912 if force:
3913 3913 list = modified + deleted + clean + added
3914 3914 elif after:
3915 3915 list = deleted
3916 3916 for f in modified + added + clean:
3917 3917 ui.warn(_('not removing %s: file still exists (use -f'
3918 3918 ' to force removal)\n') % m.rel(f))
3919 3919 ret = 1
3920 3920 else:
3921 3921 list = deleted + clean
3922 3922 for f in modified:
3923 3923 ui.warn(_('not removing %s: file is modified (use -f'
3924 3924 ' to force removal)\n') % m.rel(f))
3925 3925 ret = 1
3926 3926 for f in added:
3927 3927 ui.warn(_('not removing %s: file has been marked for add (use -f'
3928 3928 ' to force removal)\n') % m.rel(f))
3929 3929 ret = 1
3930 3930
3931 3931 for f in sorted(list):
3932 3932 if ui.verbose or not m.exact(f):
3933 3933 ui.status(_('removing %s\n') % m.rel(f))
3934 3934
3935 3935 wlock = repo.wlock()
3936 3936 try:
3937 3937 if not after:
3938 3938 for f in list:
3939 3939 if f in added:
3940 3940 continue # we never unlink added files on remove
3941 3941 try:
3942 3942 util.unlinkpath(repo.wjoin(f))
3943 3943 except OSError, inst:
3944 3944 if inst.errno != errno.ENOENT:
3945 3945 raise
3946 3946 repo[None].forget(list)
3947 3947 finally:
3948 3948 wlock.release()
3949 3949
3950 3950 return ret
3951 3951
3952 3952 @command('rename|move|mv',
3953 3953 [('A', 'after', None, _('record a rename that has already occurred')),
3954 3954 ('f', 'force', None, _('forcibly copy over an existing managed file')),
3955 3955 ] + walkopts + dryrunopts,
3956 3956 _('[OPTION]... SOURCE... DEST'))
3957 3957 def rename(ui, repo, *pats, **opts):
3958 3958 """rename files; equivalent of copy + remove
3959 3959
3960 3960 Mark dest as copies of sources; mark sources for deletion. If dest
3961 3961 is a directory, copies are put in that directory. If dest is a
3962 3962 file, there can only be one source.
3963 3963
3964 3964 By default, this command copies the contents of files as they
3965 3965 exist in the working directory. If invoked with -A/--after, the
3966 3966 operation is recorded, but no copying is performed.
3967 3967
3968 3968 This command takes effect at the next commit. To undo a rename
3969 3969 before that, see :hg:`revert`.
3970 3970
3971 3971 Returns 0 on success, 1 if errors are encountered.
3972 3972 """
3973 3973 wlock = repo.wlock(False)
3974 3974 try:
3975 3975 return cmdutil.copy(ui, repo, pats, opts, rename=True)
3976 3976 finally:
3977 3977 wlock.release()
3978 3978
3979 3979 @command('resolve',
3980 3980 [('a', 'all', None, _('select all unresolved files')),
3981 3981 ('l', 'list', None, _('list state of files needing merge')),
3982 3982 ('m', 'mark', None, _('mark files as resolved')),
3983 3983 ('u', 'unmark', None, _('mark files as unresolved')),
3984 3984 ('t', 'tool', '', _('specify merge tool')),
3985 3985 ('n', 'no-status', None, _('hide status prefix'))]
3986 3986 + walkopts,
3987 3987 _('[OPTION]... [FILE]...'))
3988 3988 def resolve(ui, repo, *pats, **opts):
3989 3989 """redo merges or set/view the merge status of files
3990 3990
3991 3991 Merges with unresolved conflicts are often the result of
3992 3992 non-interactive merging using the ``internal:merge`` configuration
3993 3993 setting, or a command-line merge tool like ``diff3``. The resolve
3994 3994 command is used to manage the files involved in a merge, after
3995 3995 :hg:`merge` has been run, and before :hg:`commit` is run (i.e. the
3996 3996 working directory must have two parents).
3997 3997
3998 3998 The resolve command can be used in the following ways:
3999 3999
4000 4000 - :hg:`resolve [--tool TOOL] FILE...`: attempt to re-merge the specified
4001 4001 files, discarding any previous merge attempts. Re-merging is not
4002 4002 performed for files already marked as resolved. Use ``--all/-a``
4003 4003 to selects all unresolved files. ``--tool`` can be used to specify
4004 4004 the merge tool used for the given files. It overrides the HGMERGE
4005 4005 environment variable and your configuration files.
4006 4006
4007 4007 - :hg:`resolve -m [FILE]`: mark a file as having been resolved
4008 4008 (e.g. after having manually fixed-up the files). The default is
4009 4009 to mark all unresolved files.
4010 4010
4011 4011 - :hg:`resolve -u [FILE]...`: mark a file as unresolved. The
4012 4012 default is to mark all resolved files.
4013 4013
4014 4014 - :hg:`resolve -l`: list files which had or still have conflicts.
4015 4015 In the printed list, ``U`` = unresolved and ``R`` = resolved.
4016 4016
4017 4017 Note that Mercurial will not let you commit files with unresolved
4018 4018 merge conflicts. You must use :hg:`resolve -m ...` before you can
4019 4019 commit after a conflicting merge.
4020 4020
4021 4021 Returns 0 on success, 1 if any files fail a resolve attempt.
4022 4022 """
4023 4023
4024 4024 all, mark, unmark, show, nostatus = \
4025 4025 [opts.get(o) for o in 'all mark unmark list no_status'.split()]
4026 4026
4027 4027 if (show and (mark or unmark)) or (mark and unmark):
4028 4028 raise util.Abort(_("too many options specified"))
4029 4029 if pats and all:
4030 4030 raise util.Abort(_("can't specify --all and patterns"))
4031 4031 if not (all or pats or show or mark or unmark):
4032 4032 raise util.Abort(_('no files or directories specified; '
4033 4033 'use --all to remerge all files'))
4034 4034
4035 4035 ms = mergemod.mergestate(repo)
4036 4036 m = scmutil.match(repo, pats, opts)
4037 4037 ret = 0
4038 4038
4039 4039 for f in ms:
4040 4040 if m(f):
4041 4041 if show:
4042 4042 if nostatus:
4043 4043 ui.write("%s\n" % f)
4044 4044 else:
4045 4045 ui.write("%s %s\n" % (ms[f].upper(), f),
4046 4046 label='resolve.' +
4047 4047 {'u': 'unresolved', 'r': 'resolved'}[ms[f]])
4048 4048 elif mark:
4049 4049 ms.mark(f, "r")
4050 4050 elif unmark:
4051 4051 ms.mark(f, "u")
4052 4052 else:
4053 4053 wctx = repo[None]
4054 4054 mctx = wctx.parents()[-1]
4055 4055
4056 4056 # backup pre-resolve (merge uses .orig for its own purposes)
4057 4057 a = repo.wjoin(f)
4058 4058 util.copyfile(a, a + ".resolve")
4059 4059
4060 4060 try:
4061 4061 # resolve file
4062 4062 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
4063 4063 if ms.resolve(f, wctx, mctx):
4064 4064 ret = 1
4065 4065 finally:
4066 4066 ui.setconfig('ui', 'forcemerge', '')
4067 4067
4068 4068 # replace filemerge's .orig file with our resolve file
4069 4069 util.rename(a + ".resolve", a + ".orig")
4070 4070
4071 4071 ms.commit()
4072 4072 return ret
4073 4073
4074 4074 @command('revert',
4075 4075 [('a', 'all', None, _('revert all changes when no arguments given')),
4076 4076 ('d', 'date', '', _('tipmost revision matching date'), _('DATE')),
4077 4077 ('r', 'rev', '', _('revert to the specified revision'), _('REV')),
4078 4078 ('', 'no-backup', None, _('do not save backup copies of files')),
4079 4079 ] + walkopts + dryrunopts,
4080 4080 _('[OPTION]... [-r REV] [NAME]...'))
4081 4081 def revert(ui, repo, *pats, **opts):
4082 4082 """restore individual files or directories to an earlier state
4083 4083
4084 4084 .. note::
4085 4085 This command is most likely not what you are looking for.
4086 4086 Revert will partially overwrite content in the working
4087 4087 directory without changing the working directory parents. Use
4088 4088 :hg:`update -r rev` to check out earlier revisions, or
4089 4089 :hg:`update --clean .` to undo a merge which has added another
4090 4090 parent.
4091 4091
4092 4092 With no revision specified, revert the named files or directories
4093 4093 to the contents they had in the parent of the working directory.
4094 4094 This restores the contents of the affected files to an unmodified
4095 4095 state and unschedules adds, removes, copies, and renames. If the
4096 4096 working directory has two parents, you must explicitly specify a
4097 4097 revision.
4098 4098
4099 4099 Using the -r/--rev option, revert the given files or directories
4100 4100 to their contents as of a specific revision. This can be helpful
4101 4101 to "roll back" some or all of an earlier change. See :hg:`help
4102 4102 dates` for a list of formats valid for -d/--date.
4103 4103
4104 4104 Revert modifies the working directory. It does not commit any
4105 4105 changes, or change the parent of the working directory. If you
4106 4106 revert to a revision other than the parent of the working
4107 4107 directory, the reverted files will thus appear modified
4108 4108 afterwards.
4109 4109
4110 4110 If a file has been deleted, it is restored. Files scheduled for
4111 4111 addition are just unscheduled and left as they are. If the
4112 4112 executable mode of a file was changed, it is reset.
4113 4113
4114 4114 If names are given, all files matching the names are reverted.
4115 4115 If no arguments are given, no files are reverted.
4116 4116
4117 4117 Modified files are saved with a .orig suffix before reverting.
4118 4118 To disable these backups, use --no-backup.
4119 4119
4120 4120 Returns 0 on success.
4121 4121 """
4122 4122
4123 4123 if opts.get("date"):
4124 4124 if opts.get("rev"):
4125 4125 raise util.Abort(_("you can't specify a revision and a date"))
4126 4126 opts["rev"] = cmdutil.finddate(ui, repo, opts["date"])
4127 4127
4128 4128 parent, p2 = repo.dirstate.parents()
4129 4129 if not opts.get('rev') and p2 != nullid:
4130 4130 raise util.Abort(_('uncommitted merge - '
4131 4131 'use "hg update", see "hg help revert"'))
4132 4132
4133 4133 if not pats and not opts.get('all'):
4134 4134 raise util.Abort(_('no files or directories specified; '
4135 4135 'use --all to revert the whole repo'))
4136 4136
4137 4137 ctx = scmutil.revsingle(repo, opts.get('rev'))
4138 4138 node = ctx.node()
4139 4139 mf = ctx.manifest()
4140 4140 if node == parent:
4141 4141 pmf = mf
4142 4142 else:
4143 4143 pmf = None
4144 4144
4145 4145 # need all matching names in dirstate and manifest of target rev,
4146 4146 # so have to walk both. do not print errors if files exist in one
4147 4147 # but not other.
4148 4148
4149 4149 names = {}
4150 4150
4151 4151 wlock = repo.wlock()
4152 4152 try:
4153 4153 # walk dirstate.
4154 4154
4155 4155 m = scmutil.match(repo, pats, opts)
4156 4156 m.bad = lambda x, y: False
4157 4157 for abs in repo.walk(m):
4158 4158 names[abs] = m.rel(abs), m.exact(abs)
4159 4159
4160 4160 # walk target manifest.
4161 4161
4162 4162 def badfn(path, msg):
4163 4163 if path in names:
4164 4164 return
4165 4165 path_ = path + '/'
4166 4166 for f in names:
4167 4167 if f.startswith(path_):
4168 4168 return
4169 4169 ui.warn("%s: %s\n" % (m.rel(path), msg))
4170 4170
4171 4171 m = scmutil.match(repo, pats, opts)
4172 4172 m.bad = badfn
4173 4173 for abs in repo[node].walk(m):
4174 4174 if abs not in names:
4175 4175 names[abs] = m.rel(abs), m.exact(abs)
4176 4176
4177 4177 m = scmutil.matchfiles(repo, names)
4178 4178 changes = repo.status(match=m)[:4]
4179 4179 modified, added, removed, deleted = map(set, changes)
4180 4180
4181 4181 # if f is a rename, also revert the source
4182 4182 cwd = repo.getcwd()
4183 4183 for f in added:
4184 4184 src = repo.dirstate.copied(f)
4185 4185 if src and src not in names and repo.dirstate[src] == 'r':
4186 4186 removed.add(src)
4187 4187 names[src] = (repo.pathto(src, cwd), True)
4188 4188
4189 4189 def removeforget(abs):
4190 4190 if repo.dirstate[abs] == 'a':
4191 4191 return _('forgetting %s\n')
4192 4192 return _('removing %s\n')
4193 4193
4194 4194 revert = ([], _('reverting %s\n'))
4195 4195 add = ([], _('adding %s\n'))
4196 4196 remove = ([], removeforget)
4197 4197 undelete = ([], _('undeleting %s\n'))
4198 4198
4199 4199 disptable = (
4200 4200 # dispatch table:
4201 4201 # file state
4202 4202 # action if in target manifest
4203 4203 # action if not in target manifest
4204 4204 # make backup if in target manifest
4205 4205 # make backup if not in target manifest
4206 4206 (modified, revert, remove, True, True),
4207 4207 (added, revert, remove, True, False),
4208 4208 (removed, undelete, None, False, False),
4209 4209 (deleted, revert, remove, False, False),
4210 4210 )
4211 4211
4212 4212 for abs, (rel, exact) in sorted(names.items()):
4213 4213 mfentry = mf.get(abs)
4214 4214 target = repo.wjoin(abs)
4215 4215 def handle(xlist, dobackup):
4216 4216 xlist[0].append(abs)
4217 4217 if (dobackup and not opts.get('no_backup') and
4218 4218 os.path.lexists(target)):
4219 4219 bakname = "%s.orig" % rel
4220 4220 ui.note(_('saving current version of %s as %s\n') %
4221 4221 (rel, bakname))
4222 4222 if not opts.get('dry_run'):
4223 4223 util.rename(target, bakname)
4224 4224 if ui.verbose or not exact:
4225 4225 msg = xlist[1]
4226 4226 if not isinstance(msg, basestring):
4227 4227 msg = msg(abs)
4228 4228 ui.status(msg % rel)
4229 4229 for table, hitlist, misslist, backuphit, backupmiss in disptable:
4230 4230 if abs not in table:
4231 4231 continue
4232 4232 # file has changed in dirstate
4233 4233 if mfentry:
4234 4234 handle(hitlist, backuphit)
4235 4235 elif misslist is not None:
4236 4236 handle(misslist, backupmiss)
4237 4237 break
4238 4238 else:
4239 4239 if abs not in repo.dirstate:
4240 4240 if mfentry:
4241 4241 handle(add, True)
4242 4242 elif exact:
4243 4243 ui.warn(_('file not managed: %s\n') % rel)
4244 4244 continue
4245 4245 # file has not changed in dirstate
4246 4246 if node == parent:
4247 4247 if exact:
4248 4248 ui.warn(_('no changes needed to %s\n') % rel)
4249 4249 continue
4250 4250 if pmf is None:
4251 4251 # only need parent manifest in this unlikely case,
4252 4252 # so do not read by default
4253 4253 pmf = repo[parent].manifest()
4254 4254 if abs in pmf:
4255 4255 if mfentry:
4256 4256 # if version of file is same in parent and target
4257 4257 # manifests, do nothing
4258 4258 if (pmf[abs] != mfentry or
4259 4259 pmf.flags(abs) != mf.flags(abs)):
4260 4260 handle(revert, False)
4261 4261 else:
4262 4262 handle(remove, False)
4263 4263
4264 4264 if not opts.get('dry_run'):
4265 4265 def checkout(f):
4266 4266 fc = ctx[f]
4267 4267 repo.wwrite(f, fc.data(), fc.flags())
4268 4268
4269 4269 audit_path = scmutil.pathauditor(repo.root)
4270 4270 for f in remove[0]:
4271 4271 if repo.dirstate[f] == 'a':
4272 4272 repo.dirstate.drop(f)
4273 4273 continue
4274 4274 audit_path(f)
4275 4275 try:
4276 4276 util.unlinkpath(repo.wjoin(f))
4277 4277 except OSError:
4278 4278 pass
4279 4279 repo.dirstate.remove(f)
4280 4280
4281 4281 normal = None
4282 4282 if node == parent:
4283 4283 # We're reverting to our parent. If possible, we'd like status
4284 4284 # to report the file as clean. We have to use normallookup for
4285 4285 # merges to avoid losing information about merged/dirty files.
4286 4286 if p2 != nullid:
4287 4287 normal = repo.dirstate.normallookup
4288 4288 else:
4289 4289 normal = repo.dirstate.normal
4290 4290 for f in revert[0]:
4291 4291 checkout(f)
4292 4292 if normal:
4293 4293 normal(f)
4294 4294
4295 4295 for f in add[0]:
4296 4296 checkout(f)
4297 4297 repo.dirstate.add(f)
4298 4298
4299 4299 normal = repo.dirstate.normallookup
4300 4300 if node == parent and p2 == nullid:
4301 4301 normal = repo.dirstate.normal
4302 4302 for f in undelete[0]:
4303 4303 checkout(f)
4304 4304 normal(f)
4305 4305
4306 4306 finally:
4307 4307 wlock.release()
4308 4308
4309 4309 @command('rollback', dryrunopts)
4310 4310 def rollback(ui, repo, **opts):
4311 4311 """roll back the last transaction (dangerous)
4312 4312
4313 4313 This command should be used with care. There is only one level of
4314 4314 rollback, and there is no way to undo a rollback. It will also
4315 4315 restore the dirstate at the time of the last transaction, losing
4316 4316 any dirstate changes since that time. This command does not alter
4317 4317 the working directory.
4318 4318
4319 4319 Transactions are used to encapsulate the effects of all commands
4320 4320 that create new changesets or propagate existing changesets into a
4321 4321 repository. For example, the following commands are transactional,
4322 4322 and their effects can be rolled back:
4323 4323
4324 4324 - commit
4325 4325 - import
4326 4326 - pull
4327 4327 - push (with this repository as the destination)
4328 4328 - unbundle
4329 4329
4330 4330 This command is not intended for use on public repositories. Once
4331 4331 changes are visible for pull by other users, rolling a transaction
4332 4332 back locally is ineffective (someone else may already have pulled
4333 4333 the changes). Furthermore, a race is possible with readers of the
4334 4334 repository; for example an in-progress pull from the repository
4335 4335 may fail if a rollback is performed.
4336 4336
4337 4337 Returns 0 on success, 1 if no rollback data is available.
4338 4338 """
4339 4339 return repo.rollback(opts.get('dry_run'))
4340 4340
4341 4341 @command('root', [])
4342 4342 def root(ui, repo):
4343 4343 """print the root (top) of the current working directory
4344 4344
4345 4345 Print the root directory of the current repository.
4346 4346
4347 4347 Returns 0 on success.
4348 4348 """
4349 4349 ui.write(repo.root + "\n")
4350 4350
4351 4351 @command('^serve',
4352 4352 [('A', 'accesslog', '', _('name of access log file to write to'),
4353 4353 _('FILE')),
4354 4354 ('d', 'daemon', None, _('run server in background')),
4355 4355 ('', 'daemon-pipefds', '', _('used internally by daemon mode'), _('NUM')),
4356 4356 ('E', 'errorlog', '', _('name of error log file to write to'), _('FILE')),
4357 4357 # use string type, then we can check if something was passed
4358 4358 ('p', 'port', '', _('port to listen on (default: 8000)'), _('PORT')),
4359 4359 ('a', 'address', '', _('address to listen on (default: all interfaces)'),
4360 4360 _('ADDR')),
4361 4361 ('', 'prefix', '', _('prefix path to serve from (default: server root)'),
4362 4362 _('PREFIX')),
4363 4363 ('n', 'name', '',
4364 4364 _('name to show in web pages (default: working directory)'), _('NAME')),
4365 4365 ('', 'web-conf', '',
4366 4366 _('name of the hgweb config file (see "hg help hgweb")'), _('FILE')),
4367 4367 ('', 'webdir-conf', '', _('name of the hgweb config file (DEPRECATED)'),
4368 4368 _('FILE')),
4369 4369 ('', 'pid-file', '', _('name of file to write process ID to'), _('FILE')),
4370 4370 ('', 'stdio', None, _('for remote clients')),
4371 4371 ('t', 'templates', '', _('web templates to use'), _('TEMPLATE')),
4372 4372 ('', 'style', '', _('template style to use'), _('STYLE')),
4373 4373 ('6', 'ipv6', None, _('use IPv6 in addition to IPv4')),
4374 4374 ('', 'certificate', '', _('SSL certificate file'), _('FILE'))],
4375 4375 _('[OPTION]...'))
4376 4376 def serve(ui, repo, **opts):
4377 4377 """start stand-alone webserver
4378 4378
4379 4379 Start a local HTTP repository browser and pull server. You can use
4380 4380 this for ad-hoc sharing and browsing of repositories. It is
4381 4381 recommended to use a real web server to serve a repository for
4382 4382 longer periods of time.
4383 4383
4384 4384 Please note that the server does not implement access control.
4385 4385 This means that, by default, anybody can read from the server and
4386 4386 nobody can write to it by default. Set the ``web.allow_push``
4387 4387 option to ``*`` to allow everybody to push to the server. You
4388 4388 should use a real web server if you need to authenticate users.
4389 4389
4390 4390 By default, the server logs accesses to stdout and errors to
4391 4391 stderr. Use the -A/--accesslog and -E/--errorlog options to log to
4392 4392 files.
4393 4393
4394 4394 To have the server choose a free port number to listen on, specify
4395 4395 a port number of 0; in this case, the server will print the port
4396 4396 number it uses.
4397 4397
4398 4398 Returns 0 on success.
4399 4399 """
4400 4400
4401 4401 if opts["stdio"]:
4402 4402 if repo is None:
4403 4403 raise error.RepoError(_("There is no Mercurial repository here"
4404 4404 " (.hg not found)"))
4405 4405 s = sshserver.sshserver(ui, repo)
4406 4406 s.serve_forever()
4407 4407
4408 4408 # this way we can check if something was given in the command-line
4409 4409 if opts.get('port'):
4410 4410 opts['port'] = util.getport(opts.get('port'))
4411 4411
4412 4412 baseui = repo and repo.baseui or ui
4413 4413 optlist = ("name templates style address port prefix ipv6"
4414 4414 " accesslog errorlog certificate encoding")
4415 4415 for o in optlist.split():
4416 4416 val = opts.get(o, '')
4417 4417 if val in (None, ''): # should check against default options instead
4418 4418 continue
4419 4419 baseui.setconfig("web", o, val)
4420 4420 if repo and repo.ui != baseui:
4421 4421 repo.ui.setconfig("web", o, val)
4422 4422
4423 4423 o = opts.get('web_conf') or opts.get('webdir_conf')
4424 4424 if not o:
4425 4425 if not repo:
4426 4426 raise error.RepoError(_("There is no Mercurial repository"
4427 4427 " here (.hg not found)"))
4428 4428 o = repo.root
4429 4429
4430 4430 app = hgweb.hgweb(o, baseui=ui)
4431 4431
4432 4432 class service(object):
4433 4433 def init(self):
4434 4434 util.setsignalhandler()
4435 4435 self.httpd = hgweb.server.create_server(ui, app)
4436 4436
4437 4437 if opts['port'] and not ui.verbose:
4438 4438 return
4439 4439
4440 4440 if self.httpd.prefix:
4441 4441 prefix = self.httpd.prefix.strip('/') + '/'
4442 4442 else:
4443 4443 prefix = ''
4444 4444
4445 4445 port = ':%d' % self.httpd.port
4446 4446 if port == ':80':
4447 4447 port = ''
4448 4448
4449 4449 bindaddr = self.httpd.addr
4450 4450 if bindaddr == '0.0.0.0':
4451 4451 bindaddr = '*'
4452 4452 elif ':' in bindaddr: # IPv6
4453 4453 bindaddr = '[%s]' % bindaddr
4454 4454
4455 4455 fqaddr = self.httpd.fqaddr
4456 4456 if ':' in fqaddr:
4457 4457 fqaddr = '[%s]' % fqaddr
4458 4458 if opts['port']:
4459 4459 write = ui.status
4460 4460 else:
4461 4461 write = ui.write
4462 4462 write(_('listening at http://%s%s/%s (bound to %s:%d)\n') %
4463 4463 (fqaddr, port, prefix, bindaddr, self.httpd.port))
4464 4464
4465 4465 def run(self):
4466 4466 self.httpd.serve_forever()
4467 4467
4468 4468 service = service()
4469 4469
4470 4470 cmdutil.service(opts, initfn=service.init, runfn=service.run)
4471 4471
4472 4472 @command('showconfig|debugconfig',
4473 4473 [('u', 'untrusted', None, _('show untrusted configuration options'))],
4474 4474 _('[-u] [NAME]...'))
4475 4475 def showconfig(ui, repo, *values, **opts):
4476 4476 """show combined config settings from all hgrc files
4477 4477
4478 4478 With no arguments, print names and values of all config items.
4479 4479
4480 4480 With one argument of the form section.name, print just the value
4481 4481 of that config item.
4482 4482
4483 4483 With multiple arguments, print names and values of all config
4484 4484 items with matching section names.
4485 4485
4486 4486 With --debug, the source (filename and line number) is printed
4487 4487 for each config item.
4488 4488
4489 4489 Returns 0 on success.
4490 4490 """
4491 4491
4492 4492 for f in scmutil.rcpath():
4493 4493 ui.debug(_('read config from: %s\n') % f)
4494 4494 untrusted = bool(opts.get('untrusted'))
4495 4495 if values:
4496 4496 sections = [v for v in values if '.' not in v]
4497 4497 items = [v for v in values if '.' in v]
4498 4498 if len(items) > 1 or items and sections:
4499 4499 raise util.Abort(_('only one config item permitted'))
4500 4500 for section, name, value in ui.walkconfig(untrusted=untrusted):
4501 4501 value = str(value).replace('\n', '\\n')
4502 4502 sectname = section + '.' + name
4503 4503 if values:
4504 4504 for v in values:
4505 4505 if v == section:
4506 4506 ui.debug('%s: ' %
4507 4507 ui.configsource(section, name, untrusted))
4508 4508 ui.write('%s=%s\n' % (sectname, value))
4509 4509 elif v == sectname:
4510 4510 ui.debug('%s: ' %
4511 4511 ui.configsource(section, name, untrusted))
4512 4512 ui.write(value, '\n')
4513 4513 else:
4514 4514 ui.debug('%s: ' %
4515 4515 ui.configsource(section, name, untrusted))
4516 4516 ui.write('%s=%s\n' % (sectname, value))
4517 4517
4518 4518 @command('^status|st',
4519 4519 [('A', 'all', None, _('show status of all files')),
4520 4520 ('m', 'modified', None, _('show only modified files')),
4521 4521 ('a', 'added', None, _('show only added files')),
4522 4522 ('r', 'removed', None, _('show only removed files')),
4523 4523 ('d', 'deleted', None, _('show only deleted (but tracked) files')),
4524 4524 ('c', 'clean', None, _('show only files without changes')),
4525 4525 ('u', 'unknown', None, _('show only unknown (not tracked) files')),
4526 4526 ('i', 'ignored', None, _('show only ignored files')),
4527 4527 ('n', 'no-status', None, _('hide status prefix')),
4528 4528 ('C', 'copies', None, _('show source of copied files')),
4529 4529 ('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
4530 4530 ('', 'rev', [], _('show difference from revision'), _('REV')),
4531 4531 ('', 'change', '', _('list the changed files of a revision'), _('REV')),
4532 4532 ] + walkopts + subrepoopts,
4533 4533 _('[OPTION]... [FILE]...'))
4534 4534 def status(ui, repo, *pats, **opts):
4535 4535 """show changed files in the working directory
4536 4536
4537 4537 Show status of files in the repository. If names are given, only
4538 4538 files that match are shown. Files that are clean or ignored or
4539 4539 the source of a copy/move operation, are not listed unless
4540 4540 -c/--clean, -i/--ignored, -C/--copies or -A/--all are given.
4541 4541 Unless options described with "show only ..." are given, the
4542 4542 options -mardu are used.
4543 4543
4544 4544 Option -q/--quiet hides untracked (unknown and ignored) files
4545 4545 unless explicitly requested with -u/--unknown or -i/--ignored.
4546 4546
4547 4547 .. note::
4548 4548 status may appear to disagree with diff if permissions have
4549 4549 changed or a merge has occurred. The standard diff format does
4550 4550 not report permission changes and diff only reports changes
4551 4551 relative to one merge parent.
4552 4552
4553 4553 If one revision is given, it is used as the base revision.
4554 4554 If two revisions are given, the differences between them are
4555 4555 shown. The --change option can also be used as a shortcut to list
4556 4556 the changed files of a revision from its first parent.
4557 4557
4558 4558 The codes used to show the status of files are::
4559 4559
4560 4560 M = modified
4561 4561 A = added
4562 4562 R = removed
4563 4563 C = clean
4564 4564 ! = missing (deleted by non-hg command, but still tracked)
4565 4565 ? = not tracked
4566 4566 I = ignored
4567 4567 = origin of the previous file listed as A (added)
4568 4568
4569 4569 Returns 0 on success.
4570 4570 """
4571 4571
4572 4572 revs = opts.get('rev')
4573 4573 change = opts.get('change')
4574 4574
4575 4575 if revs and change:
4576 4576 msg = _('cannot specify --rev and --change at the same time')
4577 4577 raise util.Abort(msg)
4578 4578 elif change:
4579 4579 node2 = repo.lookup(change)
4580 4580 node1 = repo[node2].p1().node()
4581 4581 else:
4582 4582 node1, node2 = scmutil.revpair(repo, revs)
4583 4583
4584 4584 cwd = (pats and repo.getcwd()) or ''
4585 4585 end = opts.get('print0') and '\0' or '\n'
4586 4586 copy = {}
4587 4587 states = 'modified added removed deleted unknown ignored clean'.split()
4588 4588 show = [k for k in states if opts.get(k)]
4589 4589 if opts.get('all'):
4590 4590 show += ui.quiet and (states[:4] + ['clean']) or states
4591 4591 if not show:
4592 4592 show = ui.quiet and states[:4] or states[:5]
4593 4593
4594 4594 stat = repo.status(node1, node2, scmutil.match(repo, pats, opts),
4595 4595 'ignored' in show, 'clean' in show, 'unknown' in show,
4596 4596 opts.get('subrepos'))
4597 4597 changestates = zip(states, 'MAR!?IC', stat)
4598 4598
4599 4599 if (opts.get('all') or opts.get('copies')) and not opts.get('no_status'):
4600 4600 ctxn = repo[nullid]
4601 4601 ctx1 = repo[node1]
4602 4602 ctx2 = repo[node2]
4603 4603 added = stat[1]
4604 4604 if node2 is None:
4605 4605 added = stat[0] + stat[1] # merged?
4606 4606
4607 4607 for k, v in copies.copies(repo, ctx1, ctx2, ctxn)[0].iteritems():
4608 4608 if k in added:
4609 4609 copy[k] = v
4610 4610 elif v in added:
4611 4611 copy[v] = k
4612 4612
4613 4613 for state, char, files in changestates:
4614 4614 if state in show:
4615 4615 format = "%s %%s%s" % (char, end)
4616 4616 if opts.get('no_status'):
4617 4617 format = "%%s%s" % end
4618 4618
4619 4619 for f in files:
4620 4620 ui.write(format % repo.pathto(f, cwd),
4621 4621 label='status.' + state)
4622 4622 if f in copy:
4623 4623 ui.write(' %s%s' % (repo.pathto(copy[f], cwd), end),
4624 4624 label='status.copied')
4625 4625
4626 4626 @command('^summary|sum',
4627 4627 [('', 'remote', None, _('check for push and pull'))], '[--remote]')
4628 4628 def summary(ui, repo, **opts):
4629 4629 """summarize working directory state
4630 4630
4631 4631 This generates a brief summary of the working directory state,
4632 4632 including parents, branch, commit status, and available updates.
4633 4633
4634 4634 With the --remote option, this will check the default paths for
4635 4635 incoming and outgoing changes. This can be time-consuming.
4636 4636
4637 4637 Returns 0 on success.
4638 4638 """
4639 4639
4640 4640 ctx = repo[None]
4641 4641 parents = ctx.parents()
4642 4642 pnode = parents[0].node()
4643 4643
4644 4644 for p in parents:
4645 4645 # label with log.changeset (instead of log.parent) since this
4646 4646 # shows a working directory parent *changeset*:
4647 4647 ui.write(_('parent: %d:%s ') % (p.rev(), str(p)),
4648 4648 label='log.changeset')
4649 4649 ui.write(' '.join(p.tags()), label='log.tag')
4650 4650 if p.bookmarks():
4651 4651 ui.write(' ' + ' '.join(p.bookmarks()), label='log.bookmark')
4652 4652 if p.rev() == -1:
4653 4653 if not len(repo):
4654 4654 ui.write(_(' (empty repository)'))
4655 4655 else:
4656 4656 ui.write(_(' (no revision checked out)'))
4657 4657 ui.write('\n')
4658 4658 if p.description():
4659 4659 ui.status(' ' + p.description().splitlines()[0].strip() + '\n',
4660 4660 label='log.summary')
4661 4661
4662 4662 branch = ctx.branch()
4663 4663 bheads = repo.branchheads(branch)
4664 4664 m = _('branch: %s\n') % branch
4665 4665 if branch != 'default':
4666 4666 ui.write(m, label='log.branch')
4667 4667 else:
4668 4668 ui.status(m, label='log.branch')
4669 4669
4670 4670 st = list(repo.status(unknown=True))[:6]
4671 4671
4672 4672 c = repo.dirstate.copies()
4673 4673 copied, renamed = [], []
4674 4674 for d, s in c.iteritems():
4675 4675 if s in st[2]:
4676 4676 st[2].remove(s)
4677 4677 renamed.append(d)
4678 4678 else:
4679 4679 copied.append(d)
4680 4680 if d in st[1]:
4681 4681 st[1].remove(d)
4682 4682 st.insert(3, renamed)
4683 4683 st.insert(4, copied)
4684 4684
4685 4685 ms = mergemod.mergestate(repo)
4686 4686 st.append([f for f in ms if ms[f] == 'u'])
4687 4687
4688 4688 subs = [s for s in ctx.substate if ctx.sub(s).dirty()]
4689 4689 st.append(subs)
4690 4690
4691 4691 labels = [ui.label(_('%d modified'), 'status.modified'),
4692 4692 ui.label(_('%d added'), 'status.added'),
4693 4693 ui.label(_('%d removed'), 'status.removed'),
4694 4694 ui.label(_('%d renamed'), 'status.copied'),
4695 4695 ui.label(_('%d copied'), 'status.copied'),
4696 4696 ui.label(_('%d deleted'), 'status.deleted'),
4697 4697 ui.label(_('%d unknown'), 'status.unknown'),
4698 4698 ui.label(_('%d ignored'), 'status.ignored'),
4699 4699 ui.label(_('%d unresolved'), 'resolve.unresolved'),
4700 4700 ui.label(_('%d subrepos'), 'status.modified')]
4701 4701 t = []
4702 4702 for s, l in zip(st, labels):
4703 4703 if s:
4704 4704 t.append(l % len(s))
4705 4705
4706 4706 t = ', '.join(t)
4707 4707 cleanworkdir = False
4708 4708
4709 4709 if len(parents) > 1:
4710 4710 t += _(' (merge)')
4711 4711 elif branch != parents[0].branch():
4712 4712 t += _(' (new branch)')
4713 4713 elif (parents[0].extra().get('close') and
4714 4714 pnode in repo.branchheads(branch, closed=True)):
4715 4715 t += _(' (head closed)')
4716 4716 elif not (st[0] or st[1] or st[2] or st[3] or st[4] or st[9]):
4717 4717 t += _(' (clean)')
4718 4718 cleanworkdir = True
4719 4719 elif pnode not in bheads:
4720 4720 t += _(' (new branch head)')
4721 4721
4722 4722 if cleanworkdir:
4723 4723 ui.status(_('commit: %s\n') % t.strip())
4724 4724 else:
4725 4725 ui.write(_('commit: %s\n') % t.strip())
4726 4726
4727 4727 # all ancestors of branch heads - all ancestors of parent = new csets
4728 4728 new = [0] * len(repo)
4729 4729 cl = repo.changelog
4730 4730 for a in [cl.rev(n) for n in bheads]:
4731 4731 new[a] = 1
4732 4732 for a in cl.ancestors(*[cl.rev(n) for n in bheads]):
4733 4733 new[a] = 1
4734 4734 for a in [p.rev() for p in parents]:
4735 4735 if a >= 0:
4736 4736 new[a] = 0
4737 4737 for a in cl.ancestors(*[p.rev() for p in parents]):
4738 4738 new[a] = 0
4739 4739 new = sum(new)
4740 4740
4741 4741 if new == 0:
4742 4742 ui.status(_('update: (current)\n'))
4743 4743 elif pnode not in bheads:
4744 4744 ui.write(_('update: %d new changesets (update)\n') % new)
4745 4745 else:
4746 4746 ui.write(_('update: %d new changesets, %d branch heads (merge)\n') %
4747 4747 (new, len(bheads)))
4748 4748
4749 4749 if opts.get('remote'):
4750 4750 t = []
4751 4751 source, branches = hg.parseurl(ui.expandpath('default'))
4752 4752 other = hg.repository(hg.remoteui(repo, {}), source)
4753 4753 revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev'))
4754 4754 ui.debug('comparing with %s\n' % util.hidepassword(source))
4755 4755 repo.ui.pushbuffer()
4756 4756 commoninc = discovery.findcommonincoming(repo, other)
4757 4757 _common, incoming, _rheads = commoninc
4758 4758 repo.ui.popbuffer()
4759 4759 if incoming:
4760 4760 t.append(_('1 or more incoming'))
4761 4761
4762 4762 dest, branches = hg.parseurl(ui.expandpath('default-push', 'default'))
4763 4763 revs, checkout = hg.addbranchrevs(repo, repo, branches, None)
4764 4764 if source != dest:
4765 4765 other = hg.repository(hg.remoteui(repo, {}), dest)
4766 4766 commoninc = None
4767 4767 ui.debug('comparing with %s\n' % util.hidepassword(dest))
4768 4768 repo.ui.pushbuffer()
4769 4769 common, outheads = discovery.findcommonoutgoing(repo, other,
4770 4770 commoninc=commoninc)
4771 4771 repo.ui.popbuffer()
4772 4772 o = repo.changelog.findmissing(common=common, heads=outheads)
4773 4773 if o:
4774 4774 t.append(_('%d outgoing') % len(o))
4775 4775 if 'bookmarks' in other.listkeys('namespaces'):
4776 4776 lmarks = repo.listkeys('bookmarks')
4777 4777 rmarks = other.listkeys('bookmarks')
4778 4778 diff = set(rmarks) - set(lmarks)
4779 4779 if len(diff) > 0:
4780 4780 t.append(_('%d incoming bookmarks') % len(diff))
4781 4781 diff = set(lmarks) - set(rmarks)
4782 4782 if len(diff) > 0:
4783 4783 t.append(_('%d outgoing bookmarks') % len(diff))
4784 4784
4785 4785 if t:
4786 4786 ui.write(_('remote: %s\n') % (', '.join(t)))
4787 4787 else:
4788 4788 ui.status(_('remote: (synced)\n'))
4789 4789
4790 4790 @command('tag',
4791 4791 [('f', 'force', None, _('force tag')),
4792 4792 ('l', 'local', None, _('make the tag local')),
4793 4793 ('r', 'rev', '', _('revision to tag'), _('REV')),
4794 4794 ('', 'remove', None, _('remove a tag')),
4795 4795 # -l/--local is already there, commitopts cannot be used
4796 4796 ('e', 'edit', None, _('edit commit message')),
4797 4797 ('m', 'message', '', _('use <text> as commit message'), _('TEXT')),
4798 4798 ] + commitopts2,
4799 4799 _('[-f] [-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME...'))
4800 4800 def tag(ui, repo, name1, *names, **opts):
4801 4801 """add one or more tags for the current or given revision
4802 4802
4803 4803 Name a particular revision using <name>.
4804 4804
4805 4805 Tags are used to name particular revisions of the repository and are
4806 4806 very useful to compare different revisions, to go back to significant
4807 4807 earlier versions or to mark branch points as releases, etc. Changing
4808 4808 an existing tag is normally disallowed; use -f/--force to override.
4809 4809
4810 4810 If no revision is given, the parent of the working directory is
4811 4811 used, or tip if no revision is checked out.
4812 4812
4813 4813 To facilitate version control, distribution, and merging of tags,
4814 4814 they are stored as a file named ".hgtags" which is managed similarly
4815 4815 to other project files and can be hand-edited if necessary. This
4816 4816 also means that tagging creates a new commit. The file
4817 4817 ".hg/localtags" is used for local tags (not shared among
4818 4818 repositories).
4819 4819
4820 4820 Tag commits are usually made at the head of a branch. If the parent
4821 4821 of the working directory is not a branch head, :hg:`tag` aborts; use
4822 4822 -f/--force to force the tag commit to be based on a non-head
4823 4823 changeset.
4824 4824
4825 4825 See :hg:`help dates` for a list of formats valid for -d/--date.
4826 4826
4827 4827 Since tag names have priority over branch names during revision
4828 4828 lookup, using an existing branch name as a tag name is discouraged.
4829 4829
4830 4830 Returns 0 on success.
4831 4831 """
4832 4832
4833 4833 rev_ = "."
4834 4834 names = [t.strip() for t in (name1,) + names]
4835 4835 if len(names) != len(set(names)):
4836 4836 raise util.Abort(_('tag names must be unique'))
4837 4837 for n in names:
4838 4838 if n in ['tip', '.', 'null']:
4839 4839 raise util.Abort(_("the name '%s' is reserved") % n)
4840 4840 if not n:
4841 4841 raise util.Abort(_('tag names cannot consist entirely of whitespace'))
4842 4842 if opts.get('rev') and opts.get('remove'):
4843 4843 raise util.Abort(_("--rev and --remove are incompatible"))
4844 4844 if opts.get('rev'):
4845 4845 rev_ = opts['rev']
4846 4846 message = opts.get('message')
4847 4847 if opts.get('remove'):
4848 4848 expectedtype = opts.get('local') and 'local' or 'global'
4849 4849 for n in names:
4850 4850 if not repo.tagtype(n):
4851 4851 raise util.Abort(_("tag '%s' does not exist") % n)
4852 4852 if repo.tagtype(n) != expectedtype:
4853 4853 if expectedtype == 'global':
4854 4854 raise util.Abort(_("tag '%s' is not a global tag") % n)
4855 4855 else:
4856 4856 raise util.Abort(_("tag '%s' is not a local tag") % n)
4857 4857 rev_ = nullid
4858 4858 if not message:
4859 4859 # we don't translate commit messages
4860 4860 message = 'Removed tag %s' % ', '.join(names)
4861 4861 elif not opts.get('force'):
4862 4862 for n in names:
4863 4863 if n in repo.tags():
4864 4864 raise util.Abort(_("tag '%s' already exists "
4865 4865 "(use -f to force)") % n)
4866 4866 if not opts.get('local'):
4867 4867 p1, p2 = repo.dirstate.parents()
4868 4868 if p2 != nullid:
4869 4869 raise util.Abort(_('uncommitted merge'))
4870 4870 bheads = repo.branchheads()
4871 4871 if not opts.get('force') and bheads and p1 not in bheads:
4872 4872 raise util.Abort(_('not at a branch head (use -f to force)'))
4873 4873 r = scmutil.revsingle(repo, rev_).node()
4874 4874
4875 4875 if not message:
4876 4876 # we don't translate commit messages
4877 4877 message = ('Added tag %s for changeset %s' %
4878 4878 (', '.join(names), short(r)))
4879 4879
4880 4880 date = opts.get('date')
4881 4881 if date:
4882 4882 date = util.parsedate(date)
4883 4883
4884 4884 if opts.get('edit'):
4885 4885 message = ui.edit(message, ui.username())
4886 4886
4887 4887 repo.tag(names, r, message, opts.get('local'), opts.get('user'), date)
4888 4888
4889 4889 @command('tags', [], '')
4890 4890 def tags(ui, repo):
4891 4891 """list repository tags
4892 4892
4893 4893 This lists both regular and local tags. When the -v/--verbose
4894 4894 switch is used, a third column "local" is printed for local tags.
4895 4895
4896 4896 Returns 0 on success.
4897 4897 """
4898 4898
4899 4899 hexfunc = ui.debugflag and hex or short
4900 4900 tagtype = ""
4901 4901
4902 4902 for t, n in reversed(repo.tagslist()):
4903 4903 if ui.quiet:
4904 4904 ui.write("%s\n" % t)
4905 4905 continue
4906 4906
4907 4907 hn = hexfunc(n)
4908 4908 r = "%5d:%s" % (repo.changelog.rev(n), hn)
4909 4909 spaces = " " * (30 - encoding.colwidth(t))
4910 4910
4911 4911 if ui.verbose:
4912 4912 if repo.tagtype(t) == 'local':
4913 4913 tagtype = " local"
4914 4914 else:
4915 4915 tagtype = ""
4916 4916 ui.write("%s%s %s%s\n" % (t, spaces, r, tagtype))
4917 4917
4918 4918 @command('tip',
4919 4919 [('p', 'patch', None, _('show patch')),
4920 4920 ('g', 'git', None, _('use git extended diff format')),
4921 4921 ] + templateopts,
4922 4922 _('[-p] [-g]'))
4923 4923 def tip(ui, repo, **opts):
4924 4924 """show the tip revision
4925 4925
4926 4926 The tip revision (usually just called the tip) is the changeset
4927 4927 most recently added to the repository (and therefore the most
4928 4928 recently changed head).
4929 4929
4930 4930 If you have just made a commit, that commit will be the tip. If
4931 4931 you have just pulled changes from another repository, the tip of
4932 4932 that repository becomes the current tip. The "tip" tag is special
4933 4933 and cannot be renamed or assigned to a different changeset.
4934 4934
4935 4935 Returns 0 on success.
4936 4936 """
4937 4937 displayer = cmdutil.show_changeset(ui, repo, opts)
4938 4938 displayer.show(repo[len(repo) - 1])
4939 4939 displayer.close()
4940 4940
4941 4941 @command('unbundle',
4942 4942 [('u', 'update', None,
4943 4943 _('update to new branch head if changesets were unbundled'))],
4944 4944 _('[-u] FILE...'))
4945 4945 def unbundle(ui, repo, fname1, *fnames, **opts):
4946 4946 """apply one or more changegroup files
4947 4947
4948 4948 Apply one or more compressed changegroup files generated by the
4949 4949 bundle command.
4950 4950
4951 4951 Returns 0 on success, 1 if an update has unresolved files.
4952 4952 """
4953 4953 fnames = (fname1,) + fnames
4954 4954
4955 4955 lock = repo.lock()
4956 4956 wc = repo['.']
4957 4957 try:
4958 4958 for fname in fnames:
4959 4959 f = url.open(ui, fname)
4960 4960 gen = changegroup.readbundle(f, fname)
4961 4961 modheads = repo.addchangegroup(gen, 'unbundle', 'bundle:' + fname,
4962 4962 lock=lock)
4963 4963 bookmarks.updatecurrentbookmark(repo, wc.node(), wc.branch())
4964 4964 finally:
4965 4965 lock.release()
4966 4966 return postincoming(ui, repo, modheads, opts.get('update'), None)
4967 4967
4968 4968 @command('^update|up|checkout|co',
4969 4969 [('C', 'clean', None, _('discard uncommitted changes (no backup)')),
4970 4970 ('c', 'check', None,
4971 4971 _('update across branches if no uncommitted changes')),
4972 4972 ('d', 'date', '', _('tipmost revision matching date'), _('DATE')),
4973 4973 ('r', 'rev', '', _('revision'), _('REV'))],
4974 4974 _('[-c] [-C] [-d DATE] [[-r] REV]'))
4975 4975 def update(ui, repo, node=None, rev=None, clean=False, date=None, check=False):
4976 4976 """update working directory (or switch revisions)
4977 4977
4978 4978 Update the repository's working directory to the specified
4979 4979 changeset. If no changeset is specified, update to the tip of the
4980 4980 current named branch.
4981 4981
4982 4982 If the changeset is not a descendant of the working directory's
4983 4983 parent, the update is aborted. With the -c/--check option, the
4984 4984 working directory is checked for uncommitted changes; if none are
4985 4985 found, the working directory is updated to the specified
4986 4986 changeset.
4987 4987
4988 4988 The following rules apply when the working directory contains
4989 4989 uncommitted changes:
4990 4990
4991 4991 1. If neither -c/--check nor -C/--clean is specified, and if
4992 4992 the requested changeset is an ancestor or descendant of
4993 4993 the working directory's parent, the uncommitted changes
4994 4994 are merged into the requested changeset and the merged
4995 4995 result is left uncommitted. If the requested changeset is
4996 4996 not an ancestor or descendant (that is, it is on another
4997 4997 branch), the update is aborted and the uncommitted changes
4998 4998 are preserved.
4999 4999
5000 5000 2. With the -c/--check option, the update is aborted and the
5001 5001 uncommitted changes are preserved.
5002 5002
5003 5003 3. With the -C/--clean option, uncommitted changes are discarded and
5004 5004 the working directory is updated to the requested changeset.
5005 5005
5006 5006 Use null as the changeset to remove the working directory (like
5007 5007 :hg:`clone -U`).
5008 5008
5009 5009 If you want to update just one file to an older changeset, use
5010 5010 :hg:`revert`.
5011 5011
5012 5012 See :hg:`help dates` for a list of formats valid for -d/--date.
5013 5013
5014 5014 Returns 0 on success, 1 if there are unresolved files.
5015 5015 """
5016 5016 if rev and node:
5017 5017 raise util.Abort(_("please specify just one revision"))
5018 5018
5019 5019 if rev is None or rev == '':
5020 5020 rev = node
5021 5021
5022 5022 # if we defined a bookmark, we have to remember the original bookmark name
5023 5023 brev = rev
5024 5024 rev = scmutil.revsingle(repo, rev, rev).rev()
5025 5025
5026 5026 if check and clean:
5027 5027 raise util.Abort(_("cannot specify both -c/--check and -C/--clean"))
5028 5028
5029 5029 if check:
5030 5030 # we could use dirty() but we can ignore merge and branch trivia
5031 5031 c = repo[None]
5032 5032 if c.modified() or c.added() or c.removed():
5033 5033 raise util.Abort(_("uncommitted local changes"))
5034 5034
5035 5035 if date:
5036 5036 if rev is not None:
5037 5037 raise util.Abort(_("you can't specify a revision and a date"))
5038 5038 rev = cmdutil.finddate(ui, repo, date)
5039 5039
5040 5040 if clean or check:
5041 5041 ret = hg.clean(repo, rev)
5042 5042 else:
5043 5043 ret = hg.update(repo, rev)
5044 5044
5045 5045 if brev in repo._bookmarks:
5046 5046 bookmarks.setcurrent(repo, brev)
5047 5047
5048 5048 return ret
5049 5049
5050 5050 @command('verify', [])
5051 5051 def verify(ui, repo):
5052 5052 """verify the integrity of the repository
5053 5053
5054 5054 Verify the integrity of the current repository.
5055 5055
5056 5056 This will perform an extensive check of the repository's
5057 5057 integrity, validating the hashes and checksums of each entry in
5058 5058 the changelog, manifest, and tracked files, as well as the
5059 5059 integrity of their crosslinks and indices.
5060 5060
5061 5061 Returns 0 on success, 1 if errors are encountered.
5062 5062 """
5063 5063 return hg.verify(repo)
5064 5064
5065 5065 @command('version', [])
5066 5066 def version_(ui):
5067 5067 """output version and copyright information"""
5068 5068 ui.write(_("Mercurial Distributed SCM (version %s)\n")
5069 5069 % util.version())
5070 5070 ui.status(_(
5071 5071 "(see http://mercurial.selenic.com for more information)\n"
5072 5072 "\nCopyright (C) 2005-2011 Matt Mackall and others\n"
5073 5073 "This is free software; see the source for copying conditions. "
5074 5074 "There is NO\nwarranty; "
5075 5075 "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
5076 5076 ))
5077 5077
5078 5078 norepo = ("clone init version help debugcommands debugcomplete"
5079 5079 " debugdate debuginstall debugfsinfo debugpushkey debugwireargs"
5080 5080 " debugknown debuggetbundle debugbundle")
5081 5081 optionalrepo = ("identify paths serve showconfig debugancestor debugdag"
5082 5082 " debugdata debugindex debugindexdot debugrevlog")
@@ -1,267 +1,267
1 1 # copies.py - copy detection for Mercurial
2 2 #
3 3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import util
9 9 import heapq
10 10
11 11 def _nonoverlap(d1, d2, d3):
12 12 "Return list of elements in d1 not in d2 or d3"
13 13 return sorted([d for d in d1 if d not in d3 and d not in d2])
14 14
15 15 def _dirname(f):
16 16 s = f.rfind("/")
17 17 if s == -1:
18 18 return ""
19 19 return f[:s]
20 20
21 21 def _dirs(files):
22 22 d = set()
23 23 for f in files:
24 24 f = _dirname(f)
25 25 while f not in d:
26 26 d.add(f)
27 27 f = _dirname(f)
28 28 return d
29 29
30 30 def _findlimit(repo, a, b):
31 31 """Find the earliest revision that's an ancestor of a or b but not both,
32 32 None if no such revision exists.
33 33 """
34 34 # basic idea:
35 35 # - mark a and b with different sides
36 36 # - if a parent's children are all on the same side, the parent is
37 37 # on that side, otherwise it is on no side
38 38 # - walk the graph in topological order with the help of a heap;
39 39 # - add unseen parents to side map
40 40 # - clear side of any parent that has children on different sides
41 41 # - track number of interesting revs that might still be on a side
42 42 # - track the lowest interesting rev seen
43 43 # - quit when interesting revs is zero
44 44
45 45 cl = repo.changelog
46 46 working = len(cl) # pseudo rev for the working directory
47 47 if a is None:
48 48 a = working
49 49 if b is None:
50 50 b = working
51 51
52 52 side = {a: -1, b: 1}
53 53 visit = [-a, -b]
54 54 heapq.heapify(visit)
55 55 interesting = len(visit)
56 56 hascommonancestor = False
57 57 limit = working
58 58
59 59 while interesting:
60 60 r = -heapq.heappop(visit)
61 61 if r == working:
62 62 parents = [cl.rev(p) for p in repo.dirstate.parents()]
63 63 else:
64 64 parents = cl.parentrevs(r)
65 65 for p in parents:
66 66 if p < 0:
67 67 continue
68 68 if p not in side:
69 69 # first time we see p; add it to visit
70 70 side[p] = side[r]
71 71 if side[p]:
72 72 interesting += 1
73 73 heapq.heappush(visit, -p)
74 74 elif side[p] and side[p] != side[r]:
75 75 # p was interesting but now we know better
76 76 side[p] = 0
77 77 interesting -= 1
78 78 hascommonancestor = True
79 79 if side[r]:
80 80 limit = r # lowest rev visited
81 81 interesting -= 1
82 82
83 83 if not hascommonancestor:
84 84 return None
85 85 return limit
86 86
87 87 def copies(repo, c1, c2, ca, checkdirs=False):
88 88 """
89 89 Find moves and copies between context c1 and c2
90 90 """
91 91 # avoid silly behavior for update from empty dir
92 92 if not c1 or not c2 or c1 == c2:
93 93 return {}, {}
94 94
95 95 # avoid silly behavior for parent -> working dir
96 96 if c2.node() is None and c1.node() == repo.dirstate.p1():
97 97 return repo.dirstate.copies(), {}
98 98
99 99 limit = _findlimit(repo, c1.rev(), c2.rev())
100 100 if limit is None:
101 101 # no common ancestor, no copies
102 102 return {}, {}
103 103 m1 = c1.manifest()
104 104 m2 = c2.manifest()
105 105 ma = ca.manifest()
106 106
107 107 def makectx(f, n):
108 108 if len(n) != 20: # in a working context?
109 109 if c1.rev() is None:
110 110 return c1.filectx(f)
111 111 return c2.filectx(f)
112 112 return repo.filectx(f, fileid=n)
113 113
114 114 ctx = util.lrucachefunc(makectx)
115 115 copy = {}
116 116 fullcopy = {}
117 117 diverge = {}
118 118
119 119 def related(f1, f2, limit):
120 120 # Walk back to common ancestor to see if the two files originate
121 121 # from the same file. Since workingfilectx's rev() is None it messes
122 122 # up the integer comparison logic, hence the pre-step check for
123 123 # None (f1 and f2 can only be workingfilectx's initially).
124 124
125 125 if f1 == f2:
126 126 return f1 # a match
127 127
128 128 g1, g2 = f1.ancestors(), f2.ancestors()
129 129 try:
130 130 f1r, f2r = f1.rev(), f2.rev()
131 131
132 132 if f1r is None:
133 133 f1 = g1.next()
134 134 if f2r is None:
135 135 f2 = g2.next()
136 136
137 while 1:
137 while True:
138 138 f1r, f2r = f1.rev(), f2.rev()
139 139 if f1r > f2r:
140 140 f1 = g1.next()
141 141 elif f2r > f1r:
142 142 f2 = g2.next()
143 143 elif f1 == f2:
144 144 return f1 # a match
145 145 elif f1r == f2r or f1r < limit or f2r < limit:
146 146 return False # copy no longer relevant
147 147 except StopIteration:
148 148 return False
149 149
150 150 def checkcopies(f, m1, m2):
151 151 '''check possible copies of f from m1 to m2'''
152 152 of = None
153 153 seen = set([f])
154 154 for oc in ctx(f, m1[f]).ancestors():
155 155 ocr = oc.rev()
156 156 of = oc.path()
157 157 if of in seen:
158 158 # check limit late - grab last rename before
159 159 if ocr < limit:
160 160 break
161 161 continue
162 162 seen.add(of)
163 163
164 164 fullcopy[f] = of # remember for dir rename detection
165 165 if of not in m2:
166 166 continue # no match, keep looking
167 167 if m2[of] == ma.get(of):
168 168 break # no merge needed, quit early
169 169 c2 = ctx(of, m2[of])
170 170 cr = related(oc, c2, ca.rev())
171 171 if cr and (of == f or of == c2.path()): # non-divergent
172 172 copy[f] = of
173 173 of = None
174 174 break
175 175
176 176 if of in ma:
177 177 diverge.setdefault(of, []).append(f)
178 178
179 179 repo.ui.debug(" searching for copies back to rev %d\n" % limit)
180 180
181 181 u1 = _nonoverlap(m1, m2, ma)
182 182 u2 = _nonoverlap(m2, m1, ma)
183 183
184 184 if u1:
185 185 repo.ui.debug(" unmatched files in local:\n %s\n"
186 186 % "\n ".join(u1))
187 187 if u2:
188 188 repo.ui.debug(" unmatched files in other:\n %s\n"
189 189 % "\n ".join(u2))
190 190
191 191 for f in u1:
192 192 checkcopies(f, m1, m2)
193 193 for f in u2:
194 194 checkcopies(f, m2, m1)
195 195
196 196 diverge2 = set()
197 197 for of, fl in diverge.items():
198 198 if len(fl) == 1 or of in c2:
199 199 del diverge[of] # not actually divergent, or not a rename
200 200 else:
201 201 diverge2.update(fl) # reverse map for below
202 202
203 203 if fullcopy:
204 204 repo.ui.debug(" all copies found (* = to merge, ! = divergent):\n")
205 205 for f in fullcopy:
206 206 note = ""
207 207 if f in copy:
208 208 note += "*"
209 209 if f in diverge2:
210 210 note += "!"
211 211 repo.ui.debug(" %s -> %s %s\n" % (f, fullcopy[f], note))
212 212 del diverge2
213 213
214 214 if not fullcopy or not checkdirs:
215 215 return copy, diverge
216 216
217 217 repo.ui.debug(" checking for directory renames\n")
218 218
219 219 # generate a directory move map
220 220 d1, d2 = _dirs(m1), _dirs(m2)
221 221 invalid = set()
222 222 dirmove = {}
223 223
224 224 # examine each file copy for a potential directory move, which is
225 225 # when all the files in a directory are moved to a new directory
226 226 for dst, src in fullcopy.iteritems():
227 227 dsrc, ddst = _dirname(src), _dirname(dst)
228 228 if dsrc in invalid:
229 229 # already seen to be uninteresting
230 230 continue
231 231 elif dsrc in d1 and ddst in d1:
232 232 # directory wasn't entirely moved locally
233 233 invalid.add(dsrc)
234 234 elif dsrc in d2 and ddst in d2:
235 235 # directory wasn't entirely moved remotely
236 236 invalid.add(dsrc)
237 237 elif dsrc in dirmove and dirmove[dsrc] != ddst:
238 238 # files from the same directory moved to two different places
239 239 invalid.add(dsrc)
240 240 else:
241 241 # looks good so far
242 242 dirmove[dsrc + "/"] = ddst + "/"
243 243
244 244 for i in invalid:
245 245 if i in dirmove:
246 246 del dirmove[i]
247 247 del d1, d2, invalid
248 248
249 249 if not dirmove:
250 250 return copy, diverge
251 251
252 252 for d in dirmove:
253 253 repo.ui.debug(" dir %s -> %s\n" % (d, dirmove[d]))
254 254
255 255 # check unaccounted nonoverlapping files against directory moves
256 256 for f in u1 + u2:
257 257 if f not in fullcopy:
258 258 for d in dirmove:
259 259 if f.startswith(d):
260 260 # new file added in a directory that was moved, move it
261 261 df = dirmove[d] + f[len(d):]
262 262 if df not in copy:
263 263 copy[f] = df
264 264 repo.ui.debug(" file %s -> %s\n" % (f, copy[f]))
265 265 break
266 266
267 267 return copy, diverge
@@ -1,88 +1,88
1 1 #
2 2 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import cgi, cStringIO, zlib, sys, urllib
9 9 from mercurial import util, wireproto
10 10 from common import HTTP_OK
11 11
12 12 HGTYPE = 'application/mercurial-0.1'
13 13
14 14 class webproto(object):
15 15 def __init__(self, req):
16 16 self.req = req
17 17 self.response = ''
18 18 def getargs(self, args):
19 19 knownargs = self._args()
20 20 data = {}
21 21 keys = args.split()
22 22 for k in keys:
23 23 if k == '*':
24 24 star = {}
25 25 for key in knownargs.keys():
26 26 if key != 'cmd' and key not in keys:
27 27 star[key] = knownargs[key][0]
28 28 data['*'] = star
29 29 else:
30 30 data[k] = knownargs[k][0]
31 31 return [data[k] for k in keys]
32 32 def _args(self):
33 33 args = self.req.form.copy()
34 34 chunks = []
35 35 i = 1
36 while 1:
36 while True:
37 37 h = self.req.env.get('HTTP_X_HGARG_' + str(i))
38 38 if h is None:
39 39 break
40 40 chunks += [h]
41 41 i += 1
42 42 args.update(cgi.parse_qs(''.join(chunks), keep_blank_values=True))
43 43 return args
44 44 def getfile(self, fp):
45 45 length = int(self.req.env['CONTENT_LENGTH'])
46 46 for s in util.filechunkiter(self.req, limit=length):
47 47 fp.write(s)
48 48 def redirect(self):
49 49 self.oldio = sys.stdout, sys.stderr
50 50 sys.stderr = sys.stdout = cStringIO.StringIO()
51 51 def groupchunks(self, cg):
52 52 z = zlib.compressobj()
53 while 1:
53 while True:
54 54 chunk = cg.read(4096)
55 55 if not chunk:
56 56 break
57 57 yield z.compress(chunk)
58 58 yield z.flush()
59 59 def _client(self):
60 60 return 'remote:%s:%s:%s' % (
61 61 self.req.env.get('wsgi.url_scheme') or 'http',
62 62 urllib.quote(self.req.env.get('REMOTE_HOST', '')),
63 63 urllib.quote(self.req.env.get('REMOTE_USER', '')))
64 64
65 65 def iscmd(cmd):
66 66 return cmd in wireproto.commands
67 67
68 68 def call(repo, req, cmd):
69 69 p = webproto(req)
70 70 rsp = wireproto.dispatch(repo, p, cmd)
71 71 if isinstance(rsp, str):
72 72 req.respond(HTTP_OK, HGTYPE, length=len(rsp))
73 73 return [rsp]
74 74 elif isinstance(rsp, wireproto.streamres):
75 75 req.respond(HTTP_OK, HGTYPE)
76 76 return rsp.gen
77 77 elif isinstance(rsp, wireproto.pushres):
78 78 val = sys.stdout.getvalue()
79 79 sys.stdout, sys.stderr = p.oldio
80 80 req.respond(HTTP_OK, HGTYPE)
81 81 return ['%d\n%s' % (rsp.res, val)]
82 82 elif isinstance(rsp, wireproto.pusherr):
83 83 # drain the incoming bundle
84 84 req.drain()
85 85 sys.stdout, sys.stderr = p.oldio
86 86 rsp = '0\n%s\n' % rsp.res
87 87 req.respond(HTTP_OK, HGTYPE, length=len(rsp))
88 88 return [rsp]
@@ -1,765 +1,765
1 1 # This library is free software; you can redistribute it and/or
2 2 # modify it under the terms of the GNU Lesser General Public
3 3 # License as published by the Free Software Foundation; either
4 4 # version 2.1 of the License, or (at your option) any later version.
5 5 #
6 6 # This library is distributed in the hope that it will be useful,
7 7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 9 # Lesser General Public License for more details.
10 10 #
11 11 # You should have received a copy of the GNU Lesser General Public
12 12 # License along with this library; if not, write to the
13 13 # Free Software Foundation, Inc.,
14 14 # 59 Temple Place, Suite 330,
15 15 # Boston, MA 02111-1307 USA
16 16
17 17 # This file is part of urlgrabber, a high-level cross-protocol url-grabber
18 18 # Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
19 19
20 20 # Modified by Benoit Boissinot:
21 21 # - fix for digest auth (inspired from urllib2.py @ Python v2.4)
22 22 # Modified by Dirkjan Ochtman:
23 23 # - import md5 function from a local util module
24 24 # Modified by Martin Geisler:
25 25 # - moved md5 function from local util module to this module
26 26 # Modified by Augie Fackler:
27 27 # - add safesend method and use it to prevent broken pipe errors
28 28 # on large POST requests
29 29
30 30 """An HTTP handler for urllib2 that supports HTTP 1.1 and keepalive.
31 31
32 32 >>> import urllib2
33 33 >>> from keepalive import HTTPHandler
34 34 >>> keepalive_handler = HTTPHandler()
35 35 >>> opener = urllib2.build_opener(keepalive_handler)
36 36 >>> urllib2.install_opener(opener)
37 37 >>>
38 38 >>> fo = urllib2.urlopen('http://www.python.org')
39 39
40 40 If a connection to a given host is requested, and all of the existing
41 41 connections are still in use, another connection will be opened. If
42 42 the handler tries to use an existing connection but it fails in some
43 43 way, it will be closed and removed from the pool.
44 44
45 45 To remove the handler, simply re-run build_opener with no arguments, and
46 46 install that opener.
47 47
48 48 You can explicitly close connections by using the close_connection()
49 49 method of the returned file-like object (described below) or you can
50 50 use the handler methods:
51 51
52 52 close_connection(host)
53 53 close_all()
54 54 open_connections()
55 55
56 56 NOTE: using the close_connection and close_all methods of the handler
57 57 should be done with care when using multiple threads.
58 58 * there is nothing that prevents another thread from creating new
59 59 connections immediately after connections are closed
60 60 * no checks are done to prevent in-use connections from being closed
61 61
62 62 >>> keepalive_handler.close_all()
63 63
64 64 EXTRA ATTRIBUTES AND METHODS
65 65
66 66 Upon a status of 200, the object returned has a few additional
67 67 attributes and methods, which should not be used if you want to
68 68 remain consistent with the normal urllib2-returned objects:
69 69
70 70 close_connection() - close the connection to the host
71 71 readlines() - you know, readlines()
72 72 status - the return status (ie 404)
73 73 reason - english translation of status (ie 'File not found')
74 74
75 75 If you want the best of both worlds, use this inside an
76 76 AttributeError-catching try:
77 77
78 78 >>> try: status = fo.status
79 79 >>> except AttributeError: status = None
80 80
81 81 Unfortunately, these are ONLY there if status == 200, so it's not
82 82 easy to distinguish between non-200 responses. The reason is that
83 83 urllib2 tries to do clever things with error codes 301, 302, 401,
84 84 and 407, and it wraps the object upon return.
85 85
86 86 For python versions earlier than 2.4, you can avoid this fancy error
87 87 handling by setting the module-level global HANDLE_ERRORS to zero.
88 88 You see, prior to 2.4, it's the HTTP Handler's job to determine what
89 89 to handle specially, and what to just pass up. HANDLE_ERRORS == 0
90 90 means "pass everything up". In python 2.4, however, this job no
91 91 longer belongs to the HTTP Handler and is now done by a NEW handler,
92 92 HTTPErrorProcessor. Here's the bottom line:
93 93
94 94 python version < 2.4
95 95 HANDLE_ERRORS == 1 (default) pass up 200, treat the rest as
96 96 errors
97 97 HANDLE_ERRORS == 0 pass everything up, error processing is
98 98 left to the calling code
99 99 python version >= 2.4
100 100 HANDLE_ERRORS == 1 pass up 200, treat the rest as errors
101 101 HANDLE_ERRORS == 0 (default) pass everything up, let the
102 102 other handlers (specifically,
103 103 HTTPErrorProcessor) decide what to do
104 104
105 105 In practice, setting the variable either way makes little difference
106 106 in python 2.4, so for the most consistent behavior across versions,
107 107 you probably just want to use the defaults, which will give you
108 108 exceptions on errors.
109 109
110 110 """
111 111
112 112 # $Id: keepalive.py,v 1.14 2006/04/04 21:00:32 mstenner Exp $
113 113
114 114 import errno
115 115 import httplib
116 116 import socket
117 117 import thread
118 118 import urllib2
119 119
120 120 DEBUG = None
121 121
122 122 import sys
123 123 if sys.version_info < (2, 4):
124 124 HANDLE_ERRORS = 1
125 125 else: HANDLE_ERRORS = 0
126 126
127 127 class ConnectionManager:
128 128 """
129 129 The connection manager must be able to:
130 130 * keep track of all existing
131 131 """
132 132 def __init__(self):
133 133 self._lock = thread.allocate_lock()
134 134 self._hostmap = {} # map hosts to a list of connections
135 135 self._connmap = {} # map connections to host
136 136 self._readymap = {} # map connection to ready state
137 137
138 138 def add(self, host, connection, ready):
139 139 self._lock.acquire()
140 140 try:
141 141 if not host in self._hostmap:
142 142 self._hostmap[host] = []
143 143 self._hostmap[host].append(connection)
144 144 self._connmap[connection] = host
145 145 self._readymap[connection] = ready
146 146 finally:
147 147 self._lock.release()
148 148
149 149 def remove(self, connection):
150 150 self._lock.acquire()
151 151 try:
152 152 try:
153 153 host = self._connmap[connection]
154 154 except KeyError:
155 155 pass
156 156 else:
157 157 del self._connmap[connection]
158 158 del self._readymap[connection]
159 159 self._hostmap[host].remove(connection)
160 160 if not self._hostmap[host]: del self._hostmap[host]
161 161 finally:
162 162 self._lock.release()
163 163
164 164 def set_ready(self, connection, ready):
165 165 try:
166 166 self._readymap[connection] = ready
167 167 except KeyError:
168 168 pass
169 169
170 170 def get_ready_conn(self, host):
171 171 conn = None
172 172 self._lock.acquire()
173 173 try:
174 174 if host in self._hostmap:
175 175 for c in self._hostmap[host]:
176 176 if self._readymap[c]:
177 177 self._readymap[c] = 0
178 178 conn = c
179 179 break
180 180 finally:
181 181 self._lock.release()
182 182 return conn
183 183
184 184 def get_all(self, host=None):
185 185 if host:
186 186 return list(self._hostmap.get(host, []))
187 187 else:
188 188 return dict(self._hostmap)
189 189
190 190 class KeepAliveHandler:
191 191 def __init__(self):
192 192 self._cm = ConnectionManager()
193 193
194 194 #### Connection Management
195 195 def open_connections(self):
196 196 """return a list of connected hosts and the number of connections
197 197 to each. [('foo.com:80', 2), ('bar.org', 1)]"""
198 198 return [(host, len(li)) for (host, li) in self._cm.get_all().items()]
199 199
200 200 def close_connection(self, host):
201 201 """close connection(s) to <host>
202 202 host is the host:port spec, as in 'www.cnn.com:8080' as passed in.
203 203 no error occurs if there is no connection to that host."""
204 204 for h in self._cm.get_all(host):
205 205 self._cm.remove(h)
206 206 h.close()
207 207
208 208 def close_all(self):
209 209 """close all open connections"""
210 210 for host, conns in self._cm.get_all().iteritems():
211 211 for h in conns:
212 212 self._cm.remove(h)
213 213 h.close()
214 214
215 215 def _request_closed(self, request, host, connection):
216 216 """tells us that this request is now closed and the the
217 217 connection is ready for another request"""
218 218 self._cm.set_ready(connection, 1)
219 219
220 220 def _remove_connection(self, host, connection, close=0):
221 221 if close:
222 222 connection.close()
223 223 self._cm.remove(connection)
224 224
225 225 #### Transaction Execution
226 226 def http_open(self, req):
227 227 return self.do_open(HTTPConnection, req)
228 228
229 229 def do_open(self, http_class, req):
230 230 host = req.get_host()
231 231 if not host:
232 232 raise urllib2.URLError('no host given')
233 233
234 234 try:
235 235 h = self._cm.get_ready_conn(host)
236 236 while h:
237 237 r = self._reuse_connection(h, req, host)
238 238
239 239 # if this response is non-None, then it worked and we're
240 240 # done. Break out, skipping the else block.
241 241 if r:
242 242 break
243 243
244 244 # connection is bad - possibly closed by server
245 245 # discard it and ask for the next free connection
246 246 h.close()
247 247 self._cm.remove(h)
248 248 h = self._cm.get_ready_conn(host)
249 249 else:
250 250 # no (working) free connections were found. Create a new one.
251 251 h = http_class(host)
252 252 if DEBUG:
253 253 DEBUG.info("creating new connection to %s (%d)",
254 254 host, id(h))
255 255 self._cm.add(host, h, 0)
256 256 self._start_transaction(h, req)
257 257 r = h.getresponse()
258 258 except (socket.error, httplib.HTTPException), err:
259 259 raise urllib2.URLError(err)
260 260
261 261 # if not a persistent connection, don't try to reuse it
262 262 if r.will_close:
263 263 self._cm.remove(h)
264 264
265 265 if DEBUG:
266 266 DEBUG.info("STATUS: %s, %s", r.status, r.reason)
267 267 r._handler = self
268 268 r._host = host
269 269 r._url = req.get_full_url()
270 270 r._connection = h
271 271 r.code = r.status
272 272 r.headers = r.msg
273 273 r.msg = r.reason
274 274
275 275 if r.status == 200 or not HANDLE_ERRORS:
276 276 return r
277 277 else:
278 278 return self.parent.error('http', req, r,
279 279 r.status, r.msg, r.headers)
280 280
281 281 def _reuse_connection(self, h, req, host):
282 282 """start the transaction with a re-used connection
283 283 return a response object (r) upon success or None on failure.
284 284 This DOES not close or remove bad connections in cases where
285 285 it returns. However, if an unexpected exception occurs, it
286 286 will close and remove the connection before re-raising.
287 287 """
288 288 try:
289 289 self._start_transaction(h, req)
290 290 r = h.getresponse()
291 291 # note: just because we got something back doesn't mean it
292 292 # worked. We'll check the version below, too.
293 293 except (socket.error, httplib.HTTPException):
294 294 r = None
295 295 except:
296 296 # adding this block just in case we've missed
297 297 # something we will still raise the exception, but
298 298 # lets try and close the connection and remove it
299 299 # first. We previously got into a nasty loop
300 300 # where an exception was uncaught, and so the
301 301 # connection stayed open. On the next try, the
302 302 # same exception was raised, etc. The tradeoff is
303 303 # that it's now possible this call will raise
304 304 # a DIFFERENT exception
305 305 if DEBUG:
306 306 DEBUG.error("unexpected exception - closing "
307 307 "connection to %s (%d)", host, id(h))
308 308 self._cm.remove(h)
309 309 h.close()
310 310 raise
311 311
312 312 if r is None or r.version == 9:
313 313 # httplib falls back to assuming HTTP 0.9 if it gets a
314 314 # bad header back. This is most likely to happen if
315 315 # the socket has been closed by the server since we
316 316 # last used the connection.
317 317 if DEBUG:
318 318 DEBUG.info("failed to re-use connection to %s (%d)",
319 319 host, id(h))
320 320 r = None
321 321 else:
322 322 if DEBUG:
323 323 DEBUG.info("re-using connection to %s (%d)", host, id(h))
324 324
325 325 return r
326 326
327 327 def _start_transaction(self, h, req):
328 328 # What follows mostly reimplements HTTPConnection.request()
329 329 # except it adds self.parent.addheaders in the mix.
330 330 headers = req.headers.copy()
331 331 if sys.version_info >= (2, 4):
332 332 headers.update(req.unredirected_hdrs)
333 333 headers.update(self.parent.addheaders)
334 334 headers = dict((n.lower(), v) for n, v in headers.items())
335 335 skipheaders = {}
336 336 for n in ('host', 'accept-encoding'):
337 337 if n in headers:
338 338 skipheaders['skip_' + n.replace('-', '_')] = 1
339 339 try:
340 340 if req.has_data():
341 341 data = req.get_data()
342 342 h.putrequest('POST', req.get_selector(), **skipheaders)
343 343 if 'content-type' not in headers:
344 344 h.putheader('Content-type',
345 345 'application/x-www-form-urlencoded')
346 346 if 'content-length' not in headers:
347 347 h.putheader('Content-length', '%d' % len(data))
348 348 else:
349 349 h.putrequest('GET', req.get_selector(), **skipheaders)
350 350 except (socket.error), err:
351 351 raise urllib2.URLError(err)
352 352 for k, v in headers.items():
353 353 h.putheader(k, v)
354 354 h.endheaders()
355 355 if req.has_data():
356 356 h.send(data)
357 357
358 358 class HTTPHandler(KeepAliveHandler, urllib2.HTTPHandler):
359 359 pass
360 360
361 361 class HTTPResponse(httplib.HTTPResponse):
362 362 # we need to subclass HTTPResponse in order to
363 363 # 1) add readline() and readlines() methods
364 364 # 2) add close_connection() methods
365 365 # 3) add info() and geturl() methods
366 366
367 367 # in order to add readline(), read must be modified to deal with a
368 368 # buffer. example: readline must read a buffer and then spit back
369 369 # one line at a time. The only real alternative is to read one
370 370 # BYTE at a time (ick). Once something has been read, it can't be
371 371 # put back (ok, maybe it can, but that's even uglier than this),
372 372 # so if you THEN do a normal read, you must first take stuff from
373 373 # the buffer.
374 374
375 375 # the read method wraps the original to accomodate buffering,
376 376 # although read() never adds to the buffer.
377 377 # Both readline and readlines have been stolen with almost no
378 378 # modification from socket.py
379 379
380 380
381 381 def __init__(self, sock, debuglevel=0, strict=0, method=None):
382 382 if method: # the httplib in python 2.3 uses the method arg
383 383 httplib.HTTPResponse.__init__(self, sock, debuglevel, method)
384 384 else: # 2.2 doesn't
385 385 httplib.HTTPResponse.__init__(self, sock, debuglevel)
386 386 self.fileno = sock.fileno
387 387 self.code = None
388 388 self._rbuf = ''
389 389 self._rbufsize = 8096
390 390 self._handler = None # inserted by the handler later
391 391 self._host = None # (same)
392 392 self._url = None # (same)
393 393 self._connection = None # (same)
394 394
395 395 _raw_read = httplib.HTTPResponse.read
396 396
397 397 def close(self):
398 398 if self.fp:
399 399 self.fp.close()
400 400 self.fp = None
401 401 if self._handler:
402 402 self._handler._request_closed(self, self._host,
403 403 self._connection)
404 404
405 405 def close_connection(self):
406 406 self._handler._remove_connection(self._host, self._connection, close=1)
407 407 self.close()
408 408
409 409 def info(self):
410 410 return self.headers
411 411
412 412 def geturl(self):
413 413 return self._url
414 414
415 415 def read(self, amt=None):
416 416 # the _rbuf test is only in this first if for speed. It's not
417 417 # logically necessary
418 418 if self._rbuf and not amt is None:
419 419 L = len(self._rbuf)
420 420 if amt > L:
421 421 amt -= L
422 422 else:
423 423 s = self._rbuf[:amt]
424 424 self._rbuf = self._rbuf[amt:]
425 425 return s
426 426
427 427 s = self._rbuf + self._raw_read(amt)
428 428 self._rbuf = ''
429 429 return s
430 430
431 431 # stolen from Python SVN #68532 to fix issue1088
432 432 def _read_chunked(self, amt):
433 433 chunk_left = self.chunk_left
434 434 value = ''
435 435
436 436 # XXX This accumulates chunks by repeated string concatenation,
437 437 # which is not efficient as the number or size of chunks gets big.
438 438 while True:
439 439 if chunk_left is None:
440 440 line = self.fp.readline()
441 441 i = line.find(';')
442 442 if i >= 0:
443 443 line = line[:i] # strip chunk-extensions
444 444 try:
445 445 chunk_left = int(line, 16)
446 446 except ValueError:
447 447 # close the connection as protocol synchronisation is
448 448 # probably lost
449 449 self.close()
450 450 raise httplib.IncompleteRead(value)
451 451 if chunk_left == 0:
452 452 break
453 453 if amt is None:
454 454 value += self._safe_read(chunk_left)
455 455 elif amt < chunk_left:
456 456 value += self._safe_read(amt)
457 457 self.chunk_left = chunk_left - amt
458 458 return value
459 459 elif amt == chunk_left:
460 460 value += self._safe_read(amt)
461 461 self._safe_read(2) # toss the CRLF at the end of the chunk
462 462 self.chunk_left = None
463 463 return value
464 464 else:
465 465 value += self._safe_read(chunk_left)
466 466 amt -= chunk_left
467 467
468 468 # we read the whole chunk, get another
469 469 self._safe_read(2) # toss the CRLF at the end of the chunk
470 470 chunk_left = None
471 471
472 472 # read and discard trailer up to the CRLF terminator
473 473 ### note: we shouldn't have any trailers!
474 474 while True:
475 475 line = self.fp.readline()
476 476 if not line:
477 477 # a vanishingly small number of sites EOF without
478 478 # sending the trailer
479 479 break
480 480 if line == '\r\n':
481 481 break
482 482
483 483 # we read everything; close the "file"
484 484 self.close()
485 485
486 486 return value
487 487
488 488 def readline(self, limit=-1):
489 489 i = self._rbuf.find('\n')
490 490 while i < 0 and not (0 < limit <= len(self._rbuf)):
491 491 new = self._raw_read(self._rbufsize)
492 492 if not new:
493 493 break
494 494 i = new.find('\n')
495 495 if i >= 0:
496 496 i = i + len(self._rbuf)
497 497 self._rbuf = self._rbuf + new
498 498 if i < 0:
499 499 i = len(self._rbuf)
500 500 else:
501 501 i = i + 1
502 502 if 0 <= limit < len(self._rbuf):
503 503 i = limit
504 504 data, self._rbuf = self._rbuf[:i], self._rbuf[i:]
505 505 return data
506 506
507 507 def readlines(self, sizehint = 0):
508 508 total = 0
509 509 list = []
510 while 1:
510 while True:
511 511 line = self.readline()
512 512 if not line:
513 513 break
514 514 list.append(line)
515 515 total += len(line)
516 516 if sizehint and total >= sizehint:
517 517 break
518 518 return list
519 519
520 520 def safesend(self, str):
521 521 """Send `str' to the server.
522 522
523 523 Shamelessly ripped off from httplib to patch a bad behavior.
524 524 """
525 525 # _broken_pipe_resp is an attribute we set in this function
526 526 # if the socket is closed while we're sending data but
527 527 # the server sent us a response before hanging up.
528 528 # In that case, we want to pretend to send the rest of the
529 529 # outgoing data, and then let the user use getresponse()
530 530 # (which we wrap) to get this last response before
531 531 # opening a new socket.
532 532 if getattr(self, '_broken_pipe_resp', None) is not None:
533 533 return
534 534
535 535 if self.sock is None:
536 536 if self.auto_open:
537 537 self.connect()
538 538 else:
539 539 raise httplib.NotConnected()
540 540
541 541 # send the data to the server. if we get a broken pipe, then close
542 542 # the socket. we want to reconnect when somebody tries to send again.
543 543 #
544 544 # NOTE: we DO propagate the error, though, because we cannot simply
545 545 # ignore the error... the caller will know if they can retry.
546 546 if self.debuglevel > 0:
547 547 print "send:", repr(str)
548 548 try:
549 549 blocksize = 8192
550 550 if hasattr(str,'read') :
551 551 if self.debuglevel > 0:
552 552 print "sendIng a read()able"
553 553 data = str.read(blocksize)
554 554 while data:
555 555 self.sock.sendall(data)
556 556 data = str.read(blocksize)
557 557 else:
558 558 self.sock.sendall(str)
559 559 except socket.error, v:
560 560 reraise = True
561 561 if v[0] == errno.EPIPE: # Broken pipe
562 562 if self._HTTPConnection__state == httplib._CS_REQ_SENT:
563 563 self._broken_pipe_resp = None
564 564 self._broken_pipe_resp = self.getresponse()
565 565 reraise = False
566 566 self.close()
567 567 if reraise:
568 568 raise
569 569
570 570 def wrapgetresponse(cls):
571 571 """Wraps getresponse in cls with a broken-pipe sane version.
572 572 """
573 573 def safegetresponse(self):
574 574 # In safesend() we might set the _broken_pipe_resp
575 575 # attribute, in which case the socket has already
576 576 # been closed and we just need to give them the response
577 577 # back. Otherwise, we use the normal response path.
578 578 r = getattr(self, '_broken_pipe_resp', None)
579 579 if r is not None:
580 580 return r
581 581 return cls.getresponse(self)
582 582 safegetresponse.__doc__ = cls.getresponse.__doc__
583 583 return safegetresponse
584 584
585 585 class HTTPConnection(httplib.HTTPConnection):
586 586 # use the modified response class
587 587 response_class = HTTPResponse
588 588 send = safesend
589 589 getresponse = wrapgetresponse(httplib.HTTPConnection)
590 590
591 591
592 592 #########################################################################
593 593 ##### TEST FUNCTIONS
594 594 #########################################################################
595 595
596 596 def error_handler(url):
597 597 global HANDLE_ERRORS
598 598 orig = HANDLE_ERRORS
599 599 keepalive_handler = HTTPHandler()
600 600 opener = urllib2.build_opener(keepalive_handler)
601 601 urllib2.install_opener(opener)
602 602 pos = {0: 'off', 1: 'on'}
603 603 for i in (0, 1):
604 604 print " fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i)
605 605 HANDLE_ERRORS = i
606 606 try:
607 607 fo = urllib2.urlopen(url)
608 608 fo.read()
609 609 fo.close()
610 610 try:
611 611 status, reason = fo.status, fo.reason
612 612 except AttributeError:
613 613 status, reason = None, None
614 614 except IOError, e:
615 615 print " EXCEPTION: %s" % e
616 616 raise
617 617 else:
618 618 print " status = %s, reason = %s" % (status, reason)
619 619 HANDLE_ERRORS = orig
620 620 hosts = keepalive_handler.open_connections()
621 621 print "open connections:", hosts
622 622 keepalive_handler.close_all()
623 623
624 624 def md5(s):
625 625 try:
626 626 from hashlib import md5 as _md5
627 627 except ImportError:
628 628 from md5 import md5 as _md5
629 629 global md5
630 630 md5 = _md5
631 631 return _md5(s)
632 632
633 633 def continuity(url):
634 634 format = '%25s: %s'
635 635
636 636 # first fetch the file with the normal http handler
637 637 opener = urllib2.build_opener()
638 638 urllib2.install_opener(opener)
639 639 fo = urllib2.urlopen(url)
640 640 foo = fo.read()
641 641 fo.close()
642 642 m = md5.new(foo)
643 643 print format % ('normal urllib', m.hexdigest())
644 644
645 645 # now install the keepalive handler and try again
646 646 opener = urllib2.build_opener(HTTPHandler())
647 647 urllib2.install_opener(opener)
648 648
649 649 fo = urllib2.urlopen(url)
650 650 foo = fo.read()
651 651 fo.close()
652 652 m = md5.new(foo)
653 653 print format % ('keepalive read', m.hexdigest())
654 654
655 655 fo = urllib2.urlopen(url)
656 656 foo = ''
657 while 1:
657 while True:
658 658 f = fo.readline()
659 659 if f:
660 660 foo = foo + f
661 661 else: break
662 662 fo.close()
663 663 m = md5.new(foo)
664 664 print format % ('keepalive readline', m.hexdigest())
665 665
666 666 def comp(N, url):
667 667 print ' making %i connections to:\n %s' % (N, url)
668 668
669 669 sys.stdout.write(' first using the normal urllib handlers')
670 670 # first use normal opener
671 671 opener = urllib2.build_opener()
672 672 urllib2.install_opener(opener)
673 673 t1 = fetch(N, url)
674 674 print ' TIME: %.3f s' % t1
675 675
676 676 sys.stdout.write(' now using the keepalive handler ')
677 677 # now install the keepalive handler and try again
678 678 opener = urllib2.build_opener(HTTPHandler())
679 679 urllib2.install_opener(opener)
680 680 t2 = fetch(N, url)
681 681 print ' TIME: %.3f s' % t2
682 682 print ' improvement factor: %.2f' % (t1 / t2)
683 683
684 684 def fetch(N, url, delay=0):
685 685 import time
686 686 lens = []
687 687 starttime = time.time()
688 688 for i in range(N):
689 689 if delay and i > 0:
690 690 time.sleep(delay)
691 691 fo = urllib2.urlopen(url)
692 692 foo = fo.read()
693 693 fo.close()
694 694 lens.append(len(foo))
695 695 diff = time.time() - starttime
696 696
697 697 j = 0
698 698 for i in lens[1:]:
699 699 j = j + 1
700 700 if not i == lens[0]:
701 701 print "WARNING: inconsistent length on read %i: %i" % (j, i)
702 702
703 703 return diff
704 704
705 705 def test_timeout(url):
706 706 global DEBUG
707 707 dbbackup = DEBUG
708 708 class FakeLogger:
709 709 def debug(self, msg, *args):
710 710 print msg % args
711 711 info = warning = error = debug
712 712 DEBUG = FakeLogger()
713 713 print " fetching the file to establish a connection"
714 714 fo = urllib2.urlopen(url)
715 715 data1 = fo.read()
716 716 fo.close()
717 717
718 718 i = 20
719 719 print " waiting %i seconds for the server to close the connection" % i
720 720 while i > 0:
721 721 sys.stdout.write('\r %2i' % i)
722 722 sys.stdout.flush()
723 723 time.sleep(1)
724 724 i -= 1
725 725 sys.stderr.write('\r')
726 726
727 727 print " fetching the file a second time"
728 728 fo = urllib2.urlopen(url)
729 729 data2 = fo.read()
730 730 fo.close()
731 731
732 732 if data1 == data2:
733 733 print ' data are identical'
734 734 else:
735 735 print ' ERROR: DATA DIFFER'
736 736
737 737 DEBUG = dbbackup
738 738
739 739
740 740 def test(url, N=10):
741 741 print "checking error hander (do this on a non-200)"
742 742 try: error_handler(url)
743 743 except IOError:
744 744 print "exiting - exception will prevent further tests"
745 745 sys.exit()
746 746 print
747 747 print "performing continuity test (making sure stuff isn't corrupted)"
748 748 continuity(url)
749 749 print
750 750 print "performing speed comparison"
751 751 comp(N, url)
752 752 print
753 753 print "performing dropped-connection check"
754 754 test_timeout(url)
755 755
756 756 if __name__ == '__main__':
757 757 import time
758 758 import sys
759 759 try:
760 760 N = int(sys.argv[1])
761 761 url = sys.argv[2]
762 762 except:
763 763 print "%s <integer> <url>" % sys.argv[0]
764 764 else:
765 765 test(url, N)
@@ -1,1982 +1,1982
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20
21 21 class localrepository(repo.repository):
22 22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 23 'known', 'getbundle'))
24 24 supportedformats = set(('revlogv1', 'generaldelta'))
25 25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 26 'dotencode'))
27 27
28 28 def __init__(self, baseui, path=None, create=False):
29 29 repo.repository.__init__(self)
30 30 self.root = os.path.realpath(util.expandpath(path))
31 31 self.path = os.path.join(self.root, ".hg")
32 32 self.origroot = path
33 33 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 34 self.opener = scmutil.opener(self.path)
35 35 self.wopener = scmutil.opener(self.root)
36 36 self.baseui = baseui
37 37 self.ui = baseui.copy()
38 38
39 39 try:
40 40 self.ui.readconfig(self.join("hgrc"), self.root)
41 41 extensions.loadall(self.ui)
42 42 except IOError:
43 43 pass
44 44
45 45 if not os.path.isdir(self.path):
46 46 if create:
47 47 if not os.path.exists(path):
48 48 util.makedirs(path)
49 49 util.makedir(self.path, notindexed=True)
50 50 requirements = ["revlogv1"]
51 51 if self.ui.configbool('format', 'usestore', True):
52 52 os.mkdir(os.path.join(self.path, "store"))
53 53 requirements.append("store")
54 54 if self.ui.configbool('format', 'usefncache', True):
55 55 requirements.append("fncache")
56 56 if self.ui.configbool('format', 'dotencode', True):
57 57 requirements.append('dotencode')
58 58 # create an invalid changelog
59 59 self.opener.append(
60 60 "00changelog.i",
61 61 '\0\0\0\2' # represents revlogv2
62 62 ' dummy changelog to prevent using the old repo layout'
63 63 )
64 64 if self.ui.configbool('format', 'generaldelta', False):
65 65 requirements.append("generaldelta")
66 66 else:
67 67 raise error.RepoError(_("repository %s not found") % path)
68 68 elif create:
69 69 raise error.RepoError(_("repository %s already exists") % path)
70 70 else:
71 71 try:
72 72 requirements = scmutil.readrequires(self.opener, self.supported)
73 73 except IOError, inst:
74 74 if inst.errno != errno.ENOENT:
75 75 raise
76 76 requirements = set()
77 77
78 78 self.sharedpath = self.path
79 79 try:
80 80 s = os.path.realpath(self.opener.read("sharedpath"))
81 81 if not os.path.exists(s):
82 82 raise error.RepoError(
83 83 _('.hg/sharedpath points to nonexistent directory %s') % s)
84 84 self.sharedpath = s
85 85 except IOError, inst:
86 86 if inst.errno != errno.ENOENT:
87 87 raise
88 88
89 89 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
90 90 self.spath = self.store.path
91 91 self.sopener = self.store.opener
92 92 self.sjoin = self.store.join
93 93 self.opener.createmode = self.store.createmode
94 94 self._applyrequirements(requirements)
95 95 if create:
96 96 self._writerequirements()
97 97
98 98 # These two define the set of tags for this repository. _tags
99 99 # maps tag name to node; _tagtypes maps tag name to 'global' or
100 100 # 'local'. (Global tags are defined by .hgtags across all
101 101 # heads, and local tags are defined in .hg/localtags.) They
102 102 # constitute the in-memory cache of tags.
103 103 self._tags = None
104 104 self._tagtypes = None
105 105
106 106 self._branchcache = None
107 107 self._branchcachetip = None
108 108 self.nodetagscache = None
109 109 self.filterpats = {}
110 110 self._datafilters = {}
111 111 self._transref = self._lockref = self._wlockref = None
112 112
113 113 def _applyrequirements(self, requirements):
114 114 self.requirements = requirements
115 115 openerreqs = set(('revlogv1', 'generaldelta'))
116 116 self.sopener.options = dict((r, 1) for r in requirements
117 117 if r in openerreqs)
118 118
119 119 def _writerequirements(self):
120 120 reqfile = self.opener("requires", "w")
121 121 for r in self.requirements:
122 122 reqfile.write("%s\n" % r)
123 123 reqfile.close()
124 124
125 125 def _checknested(self, path):
126 126 """Determine if path is a legal nested repository."""
127 127 if not path.startswith(self.root):
128 128 return False
129 129 subpath = path[len(self.root) + 1:]
130 130
131 131 # XXX: Checking against the current working copy is wrong in
132 132 # the sense that it can reject things like
133 133 #
134 134 # $ hg cat -r 10 sub/x.txt
135 135 #
136 136 # if sub/ is no longer a subrepository in the working copy
137 137 # parent revision.
138 138 #
139 139 # However, it can of course also allow things that would have
140 140 # been rejected before, such as the above cat command if sub/
141 141 # is a subrepository now, but was a normal directory before.
142 142 # The old path auditor would have rejected by mistake since it
143 143 # panics when it sees sub/.hg/.
144 144 #
145 145 # All in all, checking against the working copy seems sensible
146 146 # since we want to prevent access to nested repositories on
147 147 # the filesystem *now*.
148 148 ctx = self[None]
149 149 parts = util.splitpath(subpath)
150 150 while parts:
151 151 prefix = os.sep.join(parts)
152 152 if prefix in ctx.substate:
153 153 if prefix == subpath:
154 154 return True
155 155 else:
156 156 sub = ctx.sub(prefix)
157 157 return sub.checknested(subpath[len(prefix) + 1:])
158 158 else:
159 159 parts.pop()
160 160 return False
161 161
162 162 @util.propertycache
163 163 def _bookmarks(self):
164 164 return bookmarks.read(self)
165 165
166 166 @util.propertycache
167 167 def _bookmarkcurrent(self):
168 168 return bookmarks.readcurrent(self)
169 169
170 170 @propertycache
171 171 def changelog(self):
172 172 c = changelog.changelog(self.sopener)
173 173 if 'HG_PENDING' in os.environ:
174 174 p = os.environ['HG_PENDING']
175 175 if p.startswith(self.root):
176 176 c.readpending('00changelog.i.a')
177 177 return c
178 178
179 179 @propertycache
180 180 def manifest(self):
181 181 return manifest.manifest(self.sopener)
182 182
183 183 @propertycache
184 184 def dirstate(self):
185 185 warned = [0]
186 186 def validate(node):
187 187 try:
188 188 self.changelog.rev(node)
189 189 return node
190 190 except error.LookupError:
191 191 if not warned[0]:
192 192 warned[0] = True
193 193 self.ui.warn(_("warning: ignoring unknown"
194 194 " working parent %s!\n") % short(node))
195 195 return nullid
196 196
197 197 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
198 198
199 199 def __getitem__(self, changeid):
200 200 if changeid is None:
201 201 return context.workingctx(self)
202 202 return context.changectx(self, changeid)
203 203
204 204 def __contains__(self, changeid):
205 205 try:
206 206 return bool(self.lookup(changeid))
207 207 except error.RepoLookupError:
208 208 return False
209 209
210 210 def __nonzero__(self):
211 211 return True
212 212
213 213 def __len__(self):
214 214 return len(self.changelog)
215 215
216 216 def __iter__(self):
217 217 for i in xrange(len(self)):
218 218 yield i
219 219
220 220 def url(self):
221 221 return 'file:' + self.root
222 222
223 223 def hook(self, name, throw=False, **args):
224 224 return hook.hook(self.ui, self, name, throw, **args)
225 225
226 226 tag_disallowed = ':\r\n'
227 227
228 228 def _tag(self, names, node, message, local, user, date, extra={}):
229 229 if isinstance(names, str):
230 230 allchars = names
231 231 names = (names,)
232 232 else:
233 233 allchars = ''.join(names)
234 234 for c in self.tag_disallowed:
235 235 if c in allchars:
236 236 raise util.Abort(_('%r cannot be used in a tag name') % c)
237 237
238 238 branches = self.branchmap()
239 239 for name in names:
240 240 self.hook('pretag', throw=True, node=hex(node), tag=name,
241 241 local=local)
242 242 if name in branches:
243 243 self.ui.warn(_("warning: tag %s conflicts with existing"
244 244 " branch name\n") % name)
245 245
246 246 def writetags(fp, names, munge, prevtags):
247 247 fp.seek(0, 2)
248 248 if prevtags and prevtags[-1] != '\n':
249 249 fp.write('\n')
250 250 for name in names:
251 251 m = munge and munge(name) or name
252 252 if self._tagtypes and name in self._tagtypes:
253 253 old = self._tags.get(name, nullid)
254 254 fp.write('%s %s\n' % (hex(old), m))
255 255 fp.write('%s %s\n' % (hex(node), m))
256 256 fp.close()
257 257
258 258 prevtags = ''
259 259 if local:
260 260 try:
261 261 fp = self.opener('localtags', 'r+')
262 262 except IOError:
263 263 fp = self.opener('localtags', 'a')
264 264 else:
265 265 prevtags = fp.read()
266 266
267 267 # local tags are stored in the current charset
268 268 writetags(fp, names, None, prevtags)
269 269 for name in names:
270 270 self.hook('tag', node=hex(node), tag=name, local=local)
271 271 return
272 272
273 273 try:
274 274 fp = self.wfile('.hgtags', 'rb+')
275 275 except IOError:
276 276 fp = self.wfile('.hgtags', 'ab')
277 277 else:
278 278 prevtags = fp.read()
279 279
280 280 # committed tags are stored in UTF-8
281 281 writetags(fp, names, encoding.fromlocal, prevtags)
282 282
283 283 fp.close()
284 284
285 285 if '.hgtags' not in self.dirstate:
286 286 self[None].add(['.hgtags'])
287 287
288 288 m = matchmod.exact(self.root, '', ['.hgtags'])
289 289 tagnode = self.commit(message, user, date, extra=extra, match=m)
290 290
291 291 for name in names:
292 292 self.hook('tag', node=hex(node), tag=name, local=local)
293 293
294 294 return tagnode
295 295
296 296 def tag(self, names, node, message, local, user, date):
297 297 '''tag a revision with one or more symbolic names.
298 298
299 299 names is a list of strings or, when adding a single tag, names may be a
300 300 string.
301 301
302 302 if local is True, the tags are stored in a per-repository file.
303 303 otherwise, they are stored in the .hgtags file, and a new
304 304 changeset is committed with the change.
305 305
306 306 keyword arguments:
307 307
308 308 local: whether to store tags in non-version-controlled file
309 309 (default False)
310 310
311 311 message: commit message to use if committing
312 312
313 313 user: name of user to use if committing
314 314
315 315 date: date tuple to use if committing'''
316 316
317 317 if not local:
318 318 for x in self.status()[:5]:
319 319 if '.hgtags' in x:
320 320 raise util.Abort(_('working copy of .hgtags is changed '
321 321 '(please commit .hgtags manually)'))
322 322
323 323 self.tags() # instantiate the cache
324 324 self._tag(names, node, message, local, user, date)
325 325
326 326 def tags(self):
327 327 '''return a mapping of tag to node'''
328 328 if self._tags is None:
329 329 (self._tags, self._tagtypes) = self._findtags()
330 330
331 331 return self._tags
332 332
333 333 def _findtags(self):
334 334 '''Do the hard work of finding tags. Return a pair of dicts
335 335 (tags, tagtypes) where tags maps tag name to node, and tagtypes
336 336 maps tag name to a string like \'global\' or \'local\'.
337 337 Subclasses or extensions are free to add their own tags, but
338 338 should be aware that the returned dicts will be retained for the
339 339 duration of the localrepo object.'''
340 340
341 341 # XXX what tagtype should subclasses/extensions use? Currently
342 342 # mq and bookmarks add tags, but do not set the tagtype at all.
343 343 # Should each extension invent its own tag type? Should there
344 344 # be one tagtype for all such "virtual" tags? Or is the status
345 345 # quo fine?
346 346
347 347 alltags = {} # map tag name to (node, hist)
348 348 tagtypes = {}
349 349
350 350 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
351 351 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
352 352
353 353 # Build the return dicts. Have to re-encode tag names because
354 354 # the tags module always uses UTF-8 (in order not to lose info
355 355 # writing to the cache), but the rest of Mercurial wants them in
356 356 # local encoding.
357 357 tags = {}
358 358 for (name, (node, hist)) in alltags.iteritems():
359 359 if node != nullid:
360 360 try:
361 361 # ignore tags to unknown nodes
362 362 self.changelog.lookup(node)
363 363 tags[encoding.tolocal(name)] = node
364 364 except error.LookupError:
365 365 pass
366 366 tags['tip'] = self.changelog.tip()
367 367 tagtypes = dict([(encoding.tolocal(name), value)
368 368 for (name, value) in tagtypes.iteritems()])
369 369 return (tags, tagtypes)
370 370
371 371 def tagtype(self, tagname):
372 372 '''
373 373 return the type of the given tag. result can be:
374 374
375 375 'local' : a local tag
376 376 'global' : a global tag
377 377 None : tag does not exist
378 378 '''
379 379
380 380 self.tags()
381 381
382 382 return self._tagtypes.get(tagname)
383 383
384 384 def tagslist(self):
385 385 '''return a list of tags ordered by revision'''
386 386 l = []
387 387 for t, n in self.tags().iteritems():
388 388 r = self.changelog.rev(n)
389 389 l.append((r, t, n))
390 390 return [(t, n) for r, t, n in sorted(l)]
391 391
392 392 def nodetags(self, node):
393 393 '''return the tags associated with a node'''
394 394 if not self.nodetagscache:
395 395 self.nodetagscache = {}
396 396 for t, n in self.tags().iteritems():
397 397 self.nodetagscache.setdefault(n, []).append(t)
398 398 for tags in self.nodetagscache.itervalues():
399 399 tags.sort()
400 400 return self.nodetagscache.get(node, [])
401 401
402 402 def nodebookmarks(self, node):
403 403 marks = []
404 404 for bookmark, n in self._bookmarks.iteritems():
405 405 if n == node:
406 406 marks.append(bookmark)
407 407 return sorted(marks)
408 408
409 409 def _branchtags(self, partial, lrev):
410 410 # TODO: rename this function?
411 411 tiprev = len(self) - 1
412 412 if lrev != tiprev:
413 413 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
414 414 self._updatebranchcache(partial, ctxgen)
415 415 self._writebranchcache(partial, self.changelog.tip(), tiprev)
416 416
417 417 return partial
418 418
419 419 def updatebranchcache(self):
420 420 tip = self.changelog.tip()
421 421 if self._branchcache is not None and self._branchcachetip == tip:
422 422 return self._branchcache
423 423
424 424 oldtip = self._branchcachetip
425 425 self._branchcachetip = tip
426 426 if oldtip is None or oldtip not in self.changelog.nodemap:
427 427 partial, last, lrev = self._readbranchcache()
428 428 else:
429 429 lrev = self.changelog.rev(oldtip)
430 430 partial = self._branchcache
431 431
432 432 self._branchtags(partial, lrev)
433 433 # this private cache holds all heads (not just tips)
434 434 self._branchcache = partial
435 435
436 436 def branchmap(self):
437 437 '''returns a dictionary {branch: [branchheads]}'''
438 438 self.updatebranchcache()
439 439 return self._branchcache
440 440
441 441 def branchtags(self):
442 442 '''return a dict where branch names map to the tipmost head of
443 443 the branch, open heads come before closed'''
444 444 bt = {}
445 445 for bn, heads in self.branchmap().iteritems():
446 446 tip = heads[-1]
447 447 for h in reversed(heads):
448 448 if 'close' not in self.changelog.read(h)[5]:
449 449 tip = h
450 450 break
451 451 bt[bn] = tip
452 452 return bt
453 453
454 454 def _readbranchcache(self):
455 455 partial = {}
456 456 try:
457 457 f = self.opener("cache/branchheads")
458 458 lines = f.read().split('\n')
459 459 f.close()
460 460 except (IOError, OSError):
461 461 return {}, nullid, nullrev
462 462
463 463 try:
464 464 last, lrev = lines.pop(0).split(" ", 1)
465 465 last, lrev = bin(last), int(lrev)
466 466 if lrev >= len(self) or self[lrev].node() != last:
467 467 # invalidate the cache
468 468 raise ValueError('invalidating branch cache (tip differs)')
469 469 for l in lines:
470 470 if not l:
471 471 continue
472 472 node, label = l.split(" ", 1)
473 473 label = encoding.tolocal(label.strip())
474 474 partial.setdefault(label, []).append(bin(node))
475 475 except KeyboardInterrupt:
476 476 raise
477 477 except Exception, inst:
478 478 if self.ui.debugflag:
479 479 self.ui.warn(str(inst), '\n')
480 480 partial, last, lrev = {}, nullid, nullrev
481 481 return partial, last, lrev
482 482
483 483 def _writebranchcache(self, branches, tip, tiprev):
484 484 try:
485 485 f = self.opener("cache/branchheads", "w", atomictemp=True)
486 486 f.write("%s %s\n" % (hex(tip), tiprev))
487 487 for label, nodes in branches.iteritems():
488 488 for node in nodes:
489 489 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
490 490 f.rename()
491 491 except (IOError, OSError):
492 492 pass
493 493
494 494 def _updatebranchcache(self, partial, ctxgen):
495 495 # collect new branch entries
496 496 newbranches = {}
497 497 for c in ctxgen:
498 498 newbranches.setdefault(c.branch(), []).append(c.node())
499 499 # if older branchheads are reachable from new ones, they aren't
500 500 # really branchheads. Note checking parents is insufficient:
501 501 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
502 502 for branch, newnodes in newbranches.iteritems():
503 503 bheads = partial.setdefault(branch, [])
504 504 bheads.extend(newnodes)
505 505 if len(bheads) <= 1:
506 506 continue
507 507 bheads = sorted(bheads, key=lambda x: self[x].rev())
508 508 # starting from tip means fewer passes over reachable
509 509 while newnodes:
510 510 latest = newnodes.pop()
511 511 if latest not in bheads:
512 512 continue
513 513 minbhrev = self[bheads[0]].node()
514 514 reachable = self.changelog.reachable(latest, minbhrev)
515 515 reachable.remove(latest)
516 516 if reachable:
517 517 bheads = [b for b in bheads if b not in reachable]
518 518 partial[branch] = bheads
519 519
520 520 def lookup(self, key):
521 521 if isinstance(key, int):
522 522 return self.changelog.node(key)
523 523 elif key == '.':
524 524 return self.dirstate.p1()
525 525 elif key == 'null':
526 526 return nullid
527 527 elif key == 'tip':
528 528 return self.changelog.tip()
529 529 n = self.changelog._match(key)
530 530 if n:
531 531 return n
532 532 if key in self._bookmarks:
533 533 return self._bookmarks[key]
534 534 if key in self.tags():
535 535 return self.tags()[key]
536 536 if key in self.branchtags():
537 537 return self.branchtags()[key]
538 538 n = self.changelog._partialmatch(key)
539 539 if n:
540 540 return n
541 541
542 542 # can't find key, check if it might have come from damaged dirstate
543 543 if key in self.dirstate.parents():
544 544 raise error.Abort(_("working directory has unknown parent '%s'!")
545 545 % short(key))
546 546 try:
547 547 if len(key) == 20:
548 548 key = hex(key)
549 549 except TypeError:
550 550 pass
551 551 raise error.RepoLookupError(_("unknown revision '%s'") % key)
552 552
553 553 def lookupbranch(self, key, remote=None):
554 554 repo = remote or self
555 555 if key in repo.branchmap():
556 556 return key
557 557
558 558 repo = (remote and remote.local()) and remote or self
559 559 return repo[key].branch()
560 560
561 561 def known(self, nodes):
562 562 nm = self.changelog.nodemap
563 563 return [(n in nm) for n in nodes]
564 564
565 565 def local(self):
566 566 return True
567 567
568 568 def join(self, f):
569 569 return os.path.join(self.path, f)
570 570
571 571 def wjoin(self, f):
572 572 return os.path.join(self.root, f)
573 573
574 574 def file(self, f):
575 575 if f[0] == '/':
576 576 f = f[1:]
577 577 return filelog.filelog(self.sopener, f)
578 578
579 579 def changectx(self, changeid):
580 580 return self[changeid]
581 581
582 582 def parents(self, changeid=None):
583 583 '''get list of changectxs for parents of changeid'''
584 584 return self[changeid].parents()
585 585
586 586 def filectx(self, path, changeid=None, fileid=None):
587 587 """changeid can be a changeset revision, node, or tag.
588 588 fileid can be a file revision or node."""
589 589 return context.filectx(self, path, changeid, fileid)
590 590
591 591 def getcwd(self):
592 592 return self.dirstate.getcwd()
593 593
594 594 def pathto(self, f, cwd=None):
595 595 return self.dirstate.pathto(f, cwd)
596 596
597 597 def wfile(self, f, mode='r'):
598 598 return self.wopener(f, mode)
599 599
600 600 def _link(self, f):
601 601 return os.path.islink(self.wjoin(f))
602 602
603 603 def _loadfilter(self, filter):
604 604 if filter not in self.filterpats:
605 605 l = []
606 606 for pat, cmd in self.ui.configitems(filter):
607 607 if cmd == '!':
608 608 continue
609 609 mf = matchmod.match(self.root, '', [pat])
610 610 fn = None
611 611 params = cmd
612 612 for name, filterfn in self._datafilters.iteritems():
613 613 if cmd.startswith(name):
614 614 fn = filterfn
615 615 params = cmd[len(name):].lstrip()
616 616 break
617 617 if not fn:
618 618 fn = lambda s, c, **kwargs: util.filter(s, c)
619 619 # Wrap old filters not supporting keyword arguments
620 620 if not inspect.getargspec(fn)[2]:
621 621 oldfn = fn
622 622 fn = lambda s, c, **kwargs: oldfn(s, c)
623 623 l.append((mf, fn, params))
624 624 self.filterpats[filter] = l
625 625 return self.filterpats[filter]
626 626
627 627 def _filter(self, filterpats, filename, data):
628 628 for mf, fn, cmd in filterpats:
629 629 if mf(filename):
630 630 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
631 631 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
632 632 break
633 633
634 634 return data
635 635
636 636 @propertycache
637 637 def _encodefilterpats(self):
638 638 return self._loadfilter('encode')
639 639
640 640 @propertycache
641 641 def _decodefilterpats(self):
642 642 return self._loadfilter('decode')
643 643
644 644 def adddatafilter(self, name, filter):
645 645 self._datafilters[name] = filter
646 646
647 647 def wread(self, filename):
648 648 if self._link(filename):
649 649 data = os.readlink(self.wjoin(filename))
650 650 else:
651 651 data = self.wopener.read(filename)
652 652 return self._filter(self._encodefilterpats, filename, data)
653 653
654 654 def wwrite(self, filename, data, flags):
655 655 data = self._filter(self._decodefilterpats, filename, data)
656 656 if 'l' in flags:
657 657 self.wopener.symlink(data, filename)
658 658 else:
659 659 self.wopener.write(filename, data)
660 660 if 'x' in flags:
661 661 util.setflags(self.wjoin(filename), False, True)
662 662
663 663 def wwritedata(self, filename, data):
664 664 return self._filter(self._decodefilterpats, filename, data)
665 665
666 666 def transaction(self, desc):
667 667 tr = self._transref and self._transref() or None
668 668 if tr and tr.running():
669 669 return tr.nest()
670 670
671 671 # abort here if the journal already exists
672 672 if os.path.exists(self.sjoin("journal")):
673 673 raise error.RepoError(
674 674 _("abandoned transaction found - run hg recover"))
675 675
676 676 journalfiles = self._writejournal(desc)
677 677 renames = [(x, undoname(x)) for x in journalfiles]
678 678
679 679 tr = transaction.transaction(self.ui.warn, self.sopener,
680 680 self.sjoin("journal"),
681 681 aftertrans(renames),
682 682 self.store.createmode)
683 683 self._transref = weakref.ref(tr)
684 684 return tr
685 685
686 686 def _writejournal(self, desc):
687 687 # save dirstate for rollback
688 688 try:
689 689 ds = self.opener.read("dirstate")
690 690 except IOError:
691 691 ds = ""
692 692 self.opener.write("journal.dirstate", ds)
693 693 self.opener.write("journal.branch",
694 694 encoding.fromlocal(self.dirstate.branch()))
695 695 self.opener.write("journal.desc",
696 696 "%d\n%s\n" % (len(self), desc))
697 697
698 698 bkname = self.join('bookmarks')
699 699 if os.path.exists(bkname):
700 700 util.copyfile(bkname, self.join('journal.bookmarks'))
701 701 else:
702 702 self.opener.write('journal.bookmarks', '')
703 703
704 704 return (self.sjoin('journal'), self.join('journal.dirstate'),
705 705 self.join('journal.branch'), self.join('journal.desc'),
706 706 self.join('journal.bookmarks'))
707 707
708 708 def recover(self):
709 709 lock = self.lock()
710 710 try:
711 711 if os.path.exists(self.sjoin("journal")):
712 712 self.ui.status(_("rolling back interrupted transaction\n"))
713 713 transaction.rollback(self.sopener, self.sjoin("journal"),
714 714 self.ui.warn)
715 715 self.invalidate()
716 716 return True
717 717 else:
718 718 self.ui.warn(_("no interrupted transaction available\n"))
719 719 return False
720 720 finally:
721 721 lock.release()
722 722
723 723 def rollback(self, dryrun=False):
724 724 wlock = lock = None
725 725 try:
726 726 wlock = self.wlock()
727 727 lock = self.lock()
728 728 if os.path.exists(self.sjoin("undo")):
729 729 try:
730 730 args = self.opener.read("undo.desc").splitlines()
731 731 if len(args) >= 3 and self.ui.verbose:
732 732 desc = _("repository tip rolled back to revision %s"
733 733 " (undo %s: %s)\n") % (
734 734 int(args[0]) - 1, args[1], args[2])
735 735 elif len(args) >= 2:
736 736 desc = _("repository tip rolled back to revision %s"
737 737 " (undo %s)\n") % (
738 738 int(args[0]) - 1, args[1])
739 739 except IOError:
740 740 desc = _("rolling back unknown transaction\n")
741 741 self.ui.status(desc)
742 742 if dryrun:
743 743 return
744 744 transaction.rollback(self.sopener, self.sjoin("undo"),
745 745 self.ui.warn)
746 746 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
747 747 if os.path.exists(self.join('undo.bookmarks')):
748 748 util.rename(self.join('undo.bookmarks'),
749 749 self.join('bookmarks'))
750 750 try:
751 751 branch = self.opener.read("undo.branch")
752 752 self.dirstate.setbranch(branch)
753 753 except IOError:
754 754 self.ui.warn(_("named branch could not be reset, "
755 755 "current branch is still: %s\n")
756 756 % self.dirstate.branch())
757 757 self.invalidate()
758 758 self.dirstate.invalidate()
759 759 self.destroyed()
760 760 parents = tuple([p.rev() for p in self.parents()])
761 761 if len(parents) > 1:
762 762 self.ui.status(_("working directory now based on "
763 763 "revisions %d and %d\n") % parents)
764 764 else:
765 765 self.ui.status(_("working directory now based on "
766 766 "revision %d\n") % parents)
767 767 else:
768 768 self.ui.warn(_("no rollback information available\n"))
769 769 return 1
770 770 finally:
771 771 release(lock, wlock)
772 772
773 773 def invalidatecaches(self):
774 774 self._tags = None
775 775 self._tagtypes = None
776 776 self.nodetagscache = None
777 777 self._branchcache = None # in UTF-8
778 778 self._branchcachetip = None
779 779
780 780 def invalidate(self):
781 781 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
782 782 if a in self.__dict__:
783 783 delattr(self, a)
784 784 self.invalidatecaches()
785 785
786 786 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
787 787 try:
788 788 l = lock.lock(lockname, 0, releasefn, desc=desc)
789 789 except error.LockHeld, inst:
790 790 if not wait:
791 791 raise
792 792 self.ui.warn(_("waiting for lock on %s held by %r\n") %
793 793 (desc, inst.locker))
794 794 # default to 600 seconds timeout
795 795 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
796 796 releasefn, desc=desc)
797 797 if acquirefn:
798 798 acquirefn()
799 799 return l
800 800
801 801 def lock(self, wait=True):
802 802 '''Lock the repository store (.hg/store) and return a weak reference
803 803 to the lock. Use this before modifying the store (e.g. committing or
804 804 stripping). If you are opening a transaction, get a lock as well.)'''
805 805 l = self._lockref and self._lockref()
806 806 if l is not None and l.held:
807 807 l.lock()
808 808 return l
809 809
810 810 l = self._lock(self.sjoin("lock"), wait, self.store.write,
811 811 self.invalidate, _('repository %s') % self.origroot)
812 812 self._lockref = weakref.ref(l)
813 813 return l
814 814
815 815 def wlock(self, wait=True):
816 816 '''Lock the non-store parts of the repository (everything under
817 817 .hg except .hg/store) and return a weak reference to the lock.
818 818 Use this before modifying files in .hg.'''
819 819 l = self._wlockref and self._wlockref()
820 820 if l is not None and l.held:
821 821 l.lock()
822 822 return l
823 823
824 824 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
825 825 self.dirstate.invalidate, _('working directory of %s') %
826 826 self.origroot)
827 827 self._wlockref = weakref.ref(l)
828 828 return l
829 829
830 830 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
831 831 """
832 832 commit an individual file as part of a larger transaction
833 833 """
834 834
835 835 fname = fctx.path()
836 836 text = fctx.data()
837 837 flog = self.file(fname)
838 838 fparent1 = manifest1.get(fname, nullid)
839 839 fparent2 = fparent2o = manifest2.get(fname, nullid)
840 840
841 841 meta = {}
842 842 copy = fctx.renamed()
843 843 if copy and copy[0] != fname:
844 844 # Mark the new revision of this file as a copy of another
845 845 # file. This copy data will effectively act as a parent
846 846 # of this new revision. If this is a merge, the first
847 847 # parent will be the nullid (meaning "look up the copy data")
848 848 # and the second one will be the other parent. For example:
849 849 #
850 850 # 0 --- 1 --- 3 rev1 changes file foo
851 851 # \ / rev2 renames foo to bar and changes it
852 852 # \- 2 -/ rev3 should have bar with all changes and
853 853 # should record that bar descends from
854 854 # bar in rev2 and foo in rev1
855 855 #
856 856 # this allows this merge to succeed:
857 857 #
858 858 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
859 859 # \ / merging rev3 and rev4 should use bar@rev2
860 860 # \- 2 --- 4 as the merge base
861 861 #
862 862
863 863 cfname = copy[0]
864 864 crev = manifest1.get(cfname)
865 865 newfparent = fparent2
866 866
867 867 if manifest2: # branch merge
868 868 if fparent2 == nullid or crev is None: # copied on remote side
869 869 if cfname in manifest2:
870 870 crev = manifest2[cfname]
871 871 newfparent = fparent1
872 872
873 873 # find source in nearest ancestor if we've lost track
874 874 if not crev:
875 875 self.ui.debug(" %s: searching for copy revision for %s\n" %
876 876 (fname, cfname))
877 877 for ancestor in self[None].ancestors():
878 878 if cfname in ancestor:
879 879 crev = ancestor[cfname].filenode()
880 880 break
881 881
882 882 if crev:
883 883 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
884 884 meta["copy"] = cfname
885 885 meta["copyrev"] = hex(crev)
886 886 fparent1, fparent2 = nullid, newfparent
887 887 else:
888 888 self.ui.warn(_("warning: can't find ancestor for '%s' "
889 889 "copied from '%s'!\n") % (fname, cfname))
890 890
891 891 elif fparent2 != nullid:
892 892 # is one parent an ancestor of the other?
893 893 fparentancestor = flog.ancestor(fparent1, fparent2)
894 894 if fparentancestor == fparent1:
895 895 fparent1, fparent2 = fparent2, nullid
896 896 elif fparentancestor == fparent2:
897 897 fparent2 = nullid
898 898
899 899 # is the file changed?
900 900 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
901 901 changelist.append(fname)
902 902 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
903 903
904 904 # are just the flags changed during merge?
905 905 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
906 906 changelist.append(fname)
907 907
908 908 return fparent1
909 909
910 910 def commit(self, text="", user=None, date=None, match=None, force=False,
911 911 editor=False, extra={}):
912 912 """Add a new revision to current repository.
913 913
914 914 Revision information is gathered from the working directory,
915 915 match can be used to filter the committed files. If editor is
916 916 supplied, it is called to get a commit message.
917 917 """
918 918
919 919 def fail(f, msg):
920 920 raise util.Abort('%s: %s' % (f, msg))
921 921
922 922 if not match:
923 923 match = matchmod.always(self.root, '')
924 924
925 925 if not force:
926 926 vdirs = []
927 927 match.dir = vdirs.append
928 928 match.bad = fail
929 929
930 930 wlock = self.wlock()
931 931 try:
932 932 wctx = self[None]
933 933 merge = len(wctx.parents()) > 1
934 934
935 935 if (not force and merge and match and
936 936 (match.files() or match.anypats())):
937 937 raise util.Abort(_('cannot partially commit a merge '
938 938 '(do not specify files or patterns)'))
939 939
940 940 changes = self.status(match=match, clean=force)
941 941 if force:
942 942 changes[0].extend(changes[6]) # mq may commit unchanged files
943 943
944 944 # check subrepos
945 945 subs = []
946 946 removedsubs = set()
947 947 for p in wctx.parents():
948 948 removedsubs.update(s for s in p.substate if match(s))
949 949 for s in wctx.substate:
950 950 removedsubs.discard(s)
951 951 if match(s) and wctx.sub(s).dirty():
952 952 subs.append(s)
953 953 if (subs or removedsubs):
954 954 if (not match('.hgsub') and
955 955 '.hgsub' in (wctx.modified() + wctx.added())):
956 956 raise util.Abort(_("can't commit subrepos without .hgsub"))
957 957 if '.hgsubstate' not in changes[0]:
958 958 changes[0].insert(0, '.hgsubstate')
959 959
960 960 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
961 961 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
962 962 if changedsubs:
963 963 raise util.Abort(_("uncommitted changes in subrepo %s")
964 964 % changedsubs[0])
965 965
966 966 # make sure all explicit patterns are matched
967 967 if not force and match.files():
968 968 matched = set(changes[0] + changes[1] + changes[2])
969 969
970 970 for f in match.files():
971 971 if f == '.' or f in matched or f in wctx.substate:
972 972 continue
973 973 if f in changes[3]: # missing
974 974 fail(f, _('file not found!'))
975 975 if f in vdirs: # visited directory
976 976 d = f + '/'
977 977 for mf in matched:
978 978 if mf.startswith(d):
979 979 break
980 980 else:
981 981 fail(f, _("no match under directory!"))
982 982 elif f not in self.dirstate:
983 983 fail(f, _("file not tracked!"))
984 984
985 985 if (not force and not extra.get("close") and not merge
986 986 and not (changes[0] or changes[1] or changes[2])
987 987 and wctx.branch() == wctx.p1().branch()):
988 988 return None
989 989
990 990 ms = mergemod.mergestate(self)
991 991 for f in changes[0]:
992 992 if f in ms and ms[f] == 'u':
993 993 raise util.Abort(_("unresolved merge conflicts "
994 994 "(see hg help resolve)"))
995 995
996 996 cctx = context.workingctx(self, text, user, date, extra, changes)
997 997 if editor:
998 998 cctx._text = editor(self, cctx, subs)
999 999 edited = (text != cctx._text)
1000 1000
1001 1001 # commit subs
1002 1002 if subs or removedsubs:
1003 1003 state = wctx.substate.copy()
1004 1004 for s in sorted(subs):
1005 1005 sub = wctx.sub(s)
1006 1006 self.ui.status(_('committing subrepository %s\n') %
1007 1007 subrepo.subrelpath(sub))
1008 1008 sr = sub.commit(cctx._text, user, date)
1009 1009 state[s] = (state[s][0], sr)
1010 1010 subrepo.writestate(self, state)
1011 1011
1012 1012 # Save commit message in case this transaction gets rolled back
1013 1013 # (e.g. by a pretxncommit hook). Leave the content alone on
1014 1014 # the assumption that the user will use the same editor again.
1015 1015 msgfile = self.opener('last-message.txt', 'wb')
1016 1016 msgfile.write(cctx._text)
1017 1017 msgfile.close()
1018 1018
1019 1019 p1, p2 = self.dirstate.parents()
1020 1020 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1021 1021 try:
1022 1022 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1023 1023 ret = self.commitctx(cctx, True)
1024 1024 except:
1025 1025 if edited:
1026 1026 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1027 1027 self.ui.write(
1028 1028 _('note: commit message saved in %s\n') % msgfn)
1029 1029 raise
1030 1030
1031 1031 # update bookmarks, dirstate and mergestate
1032 1032 bookmarks.update(self, p1, ret)
1033 1033 for f in changes[0] + changes[1]:
1034 1034 self.dirstate.normal(f)
1035 1035 for f in changes[2]:
1036 1036 self.dirstate.drop(f)
1037 1037 self.dirstate.setparents(ret)
1038 1038 ms.reset()
1039 1039 finally:
1040 1040 wlock.release()
1041 1041
1042 1042 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1043 1043 return ret
1044 1044
1045 1045 def commitctx(self, ctx, error=False):
1046 1046 """Add a new revision to current repository.
1047 1047 Revision information is passed via the context argument.
1048 1048 """
1049 1049
1050 1050 tr = lock = None
1051 1051 removed = list(ctx.removed())
1052 1052 p1, p2 = ctx.p1(), ctx.p2()
1053 1053 user = ctx.user()
1054 1054
1055 1055 lock = self.lock()
1056 1056 try:
1057 1057 tr = self.transaction("commit")
1058 1058 trp = weakref.proxy(tr)
1059 1059
1060 1060 if ctx.files():
1061 1061 m1 = p1.manifest().copy()
1062 1062 m2 = p2.manifest()
1063 1063
1064 1064 # check in files
1065 1065 new = {}
1066 1066 changed = []
1067 1067 linkrev = len(self)
1068 1068 for f in sorted(ctx.modified() + ctx.added()):
1069 1069 self.ui.note(f + "\n")
1070 1070 try:
1071 1071 fctx = ctx[f]
1072 1072 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1073 1073 changed)
1074 1074 m1.set(f, fctx.flags())
1075 1075 except OSError, inst:
1076 1076 self.ui.warn(_("trouble committing %s!\n") % f)
1077 1077 raise
1078 1078 except IOError, inst:
1079 1079 errcode = getattr(inst, 'errno', errno.ENOENT)
1080 1080 if error or errcode and errcode != errno.ENOENT:
1081 1081 self.ui.warn(_("trouble committing %s!\n") % f)
1082 1082 raise
1083 1083 else:
1084 1084 removed.append(f)
1085 1085
1086 1086 # update manifest
1087 1087 m1.update(new)
1088 1088 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1089 1089 drop = [f for f in removed if f in m1]
1090 1090 for f in drop:
1091 1091 del m1[f]
1092 1092 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1093 1093 p2.manifestnode(), (new, drop))
1094 1094 files = changed + removed
1095 1095 else:
1096 1096 mn = p1.manifestnode()
1097 1097 files = []
1098 1098
1099 1099 # update changelog
1100 1100 self.changelog.delayupdate()
1101 1101 n = self.changelog.add(mn, files, ctx.description(),
1102 1102 trp, p1.node(), p2.node(),
1103 1103 user, ctx.date(), ctx.extra().copy())
1104 1104 p = lambda: self.changelog.writepending() and self.root or ""
1105 1105 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1106 1106 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1107 1107 parent2=xp2, pending=p)
1108 1108 self.changelog.finalize(trp)
1109 1109 tr.close()
1110 1110
1111 1111 if self._branchcache:
1112 1112 self.updatebranchcache()
1113 1113 return n
1114 1114 finally:
1115 1115 if tr:
1116 1116 tr.release()
1117 1117 lock.release()
1118 1118
1119 1119 def destroyed(self):
1120 1120 '''Inform the repository that nodes have been destroyed.
1121 1121 Intended for use by strip and rollback, so there's a common
1122 1122 place for anything that has to be done after destroying history.'''
1123 1123 # XXX it might be nice if we could take the list of destroyed
1124 1124 # nodes, but I don't see an easy way for rollback() to do that
1125 1125
1126 1126 # Ensure the persistent tag cache is updated. Doing it now
1127 1127 # means that the tag cache only has to worry about destroyed
1128 1128 # heads immediately after a strip/rollback. That in turn
1129 1129 # guarantees that "cachetip == currenttip" (comparing both rev
1130 1130 # and node) always means no nodes have been added or destroyed.
1131 1131
1132 1132 # XXX this is suboptimal when qrefresh'ing: we strip the current
1133 1133 # head, refresh the tag cache, then immediately add a new head.
1134 1134 # But I think doing it this way is necessary for the "instant
1135 1135 # tag cache retrieval" case to work.
1136 1136 self.invalidatecaches()
1137 1137
1138 1138 def walk(self, match, node=None):
1139 1139 '''
1140 1140 walk recursively through the directory tree or a given
1141 1141 changeset, finding all files matched by the match
1142 1142 function
1143 1143 '''
1144 1144 return self[node].walk(match)
1145 1145
1146 1146 def status(self, node1='.', node2=None, match=None,
1147 1147 ignored=False, clean=False, unknown=False,
1148 1148 listsubrepos=False):
1149 1149 """return status of files between two nodes or node and working directory
1150 1150
1151 1151 If node1 is None, use the first dirstate parent instead.
1152 1152 If node2 is None, compare node1 with working directory.
1153 1153 """
1154 1154
1155 1155 def mfmatches(ctx):
1156 1156 mf = ctx.manifest().copy()
1157 1157 for fn in mf.keys():
1158 1158 if not match(fn):
1159 1159 del mf[fn]
1160 1160 return mf
1161 1161
1162 1162 if isinstance(node1, context.changectx):
1163 1163 ctx1 = node1
1164 1164 else:
1165 1165 ctx1 = self[node1]
1166 1166 if isinstance(node2, context.changectx):
1167 1167 ctx2 = node2
1168 1168 else:
1169 1169 ctx2 = self[node2]
1170 1170
1171 1171 working = ctx2.rev() is None
1172 1172 parentworking = working and ctx1 == self['.']
1173 1173 match = match or matchmod.always(self.root, self.getcwd())
1174 1174 listignored, listclean, listunknown = ignored, clean, unknown
1175 1175
1176 1176 # load earliest manifest first for caching reasons
1177 1177 if not working and ctx2.rev() < ctx1.rev():
1178 1178 ctx2.manifest()
1179 1179
1180 1180 if not parentworking:
1181 1181 def bad(f, msg):
1182 1182 if f not in ctx1:
1183 1183 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1184 1184 match.bad = bad
1185 1185
1186 1186 if working: # we need to scan the working dir
1187 1187 subrepos = []
1188 1188 if '.hgsub' in self.dirstate:
1189 1189 subrepos = ctx1.substate.keys()
1190 1190 s = self.dirstate.status(match, subrepos, listignored,
1191 1191 listclean, listunknown)
1192 1192 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1193 1193
1194 1194 # check for any possibly clean files
1195 1195 if parentworking and cmp:
1196 1196 fixup = []
1197 1197 # do a full compare of any files that might have changed
1198 1198 for f in sorted(cmp):
1199 1199 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1200 1200 or ctx1[f].cmp(ctx2[f])):
1201 1201 modified.append(f)
1202 1202 else:
1203 1203 fixup.append(f)
1204 1204
1205 1205 # update dirstate for files that are actually clean
1206 1206 if fixup:
1207 1207 if listclean:
1208 1208 clean += fixup
1209 1209
1210 1210 try:
1211 1211 # updating the dirstate is optional
1212 1212 # so we don't wait on the lock
1213 1213 wlock = self.wlock(False)
1214 1214 try:
1215 1215 for f in fixup:
1216 1216 self.dirstate.normal(f)
1217 1217 finally:
1218 1218 wlock.release()
1219 1219 except error.LockError:
1220 1220 pass
1221 1221
1222 1222 if not parentworking:
1223 1223 mf1 = mfmatches(ctx1)
1224 1224 if working:
1225 1225 # we are comparing working dir against non-parent
1226 1226 # generate a pseudo-manifest for the working dir
1227 1227 mf2 = mfmatches(self['.'])
1228 1228 for f in cmp + modified + added:
1229 1229 mf2[f] = None
1230 1230 mf2.set(f, ctx2.flags(f))
1231 1231 for f in removed:
1232 1232 if f in mf2:
1233 1233 del mf2[f]
1234 1234 else:
1235 1235 # we are comparing two revisions
1236 1236 deleted, unknown, ignored = [], [], []
1237 1237 mf2 = mfmatches(ctx2)
1238 1238
1239 1239 modified, added, clean = [], [], []
1240 1240 for fn in mf2:
1241 1241 if fn in mf1:
1242 1242 if (fn not in deleted and
1243 1243 (mf1.flags(fn) != mf2.flags(fn) or
1244 1244 (mf1[fn] != mf2[fn] and
1245 1245 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1246 1246 modified.append(fn)
1247 1247 elif listclean:
1248 1248 clean.append(fn)
1249 1249 del mf1[fn]
1250 1250 elif fn not in deleted:
1251 1251 added.append(fn)
1252 1252 removed = mf1.keys()
1253 1253
1254 1254 r = modified, added, removed, deleted, unknown, ignored, clean
1255 1255
1256 1256 if listsubrepos:
1257 1257 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1258 1258 if working:
1259 1259 rev2 = None
1260 1260 else:
1261 1261 rev2 = ctx2.substate[subpath][1]
1262 1262 try:
1263 1263 submatch = matchmod.narrowmatcher(subpath, match)
1264 1264 s = sub.status(rev2, match=submatch, ignored=listignored,
1265 1265 clean=listclean, unknown=listunknown,
1266 1266 listsubrepos=True)
1267 1267 for rfiles, sfiles in zip(r, s):
1268 1268 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1269 1269 except error.LookupError:
1270 1270 self.ui.status(_("skipping missing subrepository: %s\n")
1271 1271 % subpath)
1272 1272
1273 1273 for l in r:
1274 1274 l.sort()
1275 1275 return r
1276 1276
1277 1277 def heads(self, start=None):
1278 1278 heads = self.changelog.heads(start)
1279 1279 # sort the output in rev descending order
1280 1280 return sorted(heads, key=self.changelog.rev, reverse=True)
1281 1281
1282 1282 def branchheads(self, branch=None, start=None, closed=False):
1283 1283 '''return a (possibly filtered) list of heads for the given branch
1284 1284
1285 1285 Heads are returned in topological order, from newest to oldest.
1286 1286 If branch is None, use the dirstate branch.
1287 1287 If start is not None, return only heads reachable from start.
1288 1288 If closed is True, return heads that are marked as closed as well.
1289 1289 '''
1290 1290 if branch is None:
1291 1291 branch = self[None].branch()
1292 1292 branches = self.branchmap()
1293 1293 if branch not in branches:
1294 1294 return []
1295 1295 # the cache returns heads ordered lowest to highest
1296 1296 bheads = list(reversed(branches[branch]))
1297 1297 if start is not None:
1298 1298 # filter out the heads that cannot be reached from startrev
1299 1299 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1300 1300 bheads = [h for h in bheads if h in fbheads]
1301 1301 if not closed:
1302 1302 bheads = [h for h in bheads if
1303 1303 ('close' not in self.changelog.read(h)[5])]
1304 1304 return bheads
1305 1305
1306 1306 def branches(self, nodes):
1307 1307 if not nodes:
1308 1308 nodes = [self.changelog.tip()]
1309 1309 b = []
1310 1310 for n in nodes:
1311 1311 t = n
1312 while 1:
1312 while True:
1313 1313 p = self.changelog.parents(n)
1314 1314 if p[1] != nullid or p[0] == nullid:
1315 1315 b.append((t, n, p[0], p[1]))
1316 1316 break
1317 1317 n = p[0]
1318 1318 return b
1319 1319
1320 1320 def between(self, pairs):
1321 1321 r = []
1322 1322
1323 1323 for top, bottom in pairs:
1324 1324 n, l, i = top, [], 0
1325 1325 f = 1
1326 1326
1327 1327 while n != bottom and n != nullid:
1328 1328 p = self.changelog.parents(n)[0]
1329 1329 if i == f:
1330 1330 l.append(n)
1331 1331 f = f * 2
1332 1332 n = p
1333 1333 i += 1
1334 1334
1335 1335 r.append(l)
1336 1336
1337 1337 return r
1338 1338
1339 1339 def pull(self, remote, heads=None, force=False):
1340 1340 lock = self.lock()
1341 1341 try:
1342 1342 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1343 1343 force=force)
1344 1344 common, fetch, rheads = tmp
1345 1345 if not fetch:
1346 1346 self.ui.status(_("no changes found\n"))
1347 1347 result = 0
1348 1348 else:
1349 1349 if heads is None and list(common) == [nullid]:
1350 1350 self.ui.status(_("requesting all changes\n"))
1351 1351 elif heads is None and remote.capable('changegroupsubset'):
1352 1352 # issue1320, avoid a race if remote changed after discovery
1353 1353 heads = rheads
1354 1354
1355 1355 if remote.capable('getbundle'):
1356 1356 cg = remote.getbundle('pull', common=common,
1357 1357 heads=heads or rheads)
1358 1358 elif heads is None:
1359 1359 cg = remote.changegroup(fetch, 'pull')
1360 1360 elif not remote.capable('changegroupsubset'):
1361 1361 raise util.Abort(_("partial pull cannot be done because "
1362 1362 "other repository doesn't support "
1363 1363 "changegroupsubset."))
1364 1364 else:
1365 1365 cg = remote.changegroupsubset(fetch, heads, 'pull')
1366 1366 result = self.addchangegroup(cg, 'pull', remote.url(),
1367 1367 lock=lock)
1368 1368 finally:
1369 1369 lock.release()
1370 1370
1371 1371 return result
1372 1372
1373 1373 def checkpush(self, force, revs):
1374 1374 """Extensions can override this function if additional checks have
1375 1375 to be performed before pushing, or call it if they override push
1376 1376 command.
1377 1377 """
1378 1378 pass
1379 1379
1380 1380 def push(self, remote, force=False, revs=None, newbranch=False):
1381 1381 '''Push outgoing changesets (limited by revs) from the current
1382 1382 repository to remote. Return an integer:
1383 1383 - 0 means HTTP error *or* nothing to push
1384 1384 - 1 means we pushed and remote head count is unchanged *or*
1385 1385 we have outgoing changesets but refused to push
1386 1386 - other values as described by addchangegroup()
1387 1387 '''
1388 1388 # there are two ways to push to remote repo:
1389 1389 #
1390 1390 # addchangegroup assumes local user can lock remote
1391 1391 # repo (local filesystem, old ssh servers).
1392 1392 #
1393 1393 # unbundle assumes local user cannot lock remote repo (new ssh
1394 1394 # servers, http servers).
1395 1395
1396 1396 self.checkpush(force, revs)
1397 1397 lock = None
1398 1398 unbundle = remote.capable('unbundle')
1399 1399 if not unbundle:
1400 1400 lock = remote.lock()
1401 1401 try:
1402 1402 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1403 1403 newbranch)
1404 1404 ret = remote_heads
1405 1405 if cg is not None:
1406 1406 if unbundle:
1407 1407 # local repo finds heads on server, finds out what
1408 1408 # revs it must push. once revs transferred, if server
1409 1409 # finds it has different heads (someone else won
1410 1410 # commit/push race), server aborts.
1411 1411 if force:
1412 1412 remote_heads = ['force']
1413 1413 # ssh: return remote's addchangegroup()
1414 1414 # http: return remote's addchangegroup() or 0 for error
1415 1415 ret = remote.unbundle(cg, remote_heads, 'push')
1416 1416 else:
1417 1417 # we return an integer indicating remote head count change
1418 1418 ret = remote.addchangegroup(cg, 'push', self.url(),
1419 1419 lock=lock)
1420 1420 finally:
1421 1421 if lock is not None:
1422 1422 lock.release()
1423 1423
1424 1424 self.ui.debug("checking for updated bookmarks\n")
1425 1425 rb = remote.listkeys('bookmarks')
1426 1426 for k in rb.keys():
1427 1427 if k in self._bookmarks:
1428 1428 nr, nl = rb[k], hex(self._bookmarks[k])
1429 1429 if nr in self:
1430 1430 cr = self[nr]
1431 1431 cl = self[nl]
1432 1432 if cl in cr.descendants():
1433 1433 r = remote.pushkey('bookmarks', k, nr, nl)
1434 1434 if r:
1435 1435 self.ui.status(_("updating bookmark %s\n") % k)
1436 1436 else:
1437 1437 self.ui.warn(_('updating bookmark %s'
1438 1438 ' failed!\n') % k)
1439 1439
1440 1440 return ret
1441 1441
1442 1442 def changegroupinfo(self, nodes, source):
1443 1443 if self.ui.verbose or source == 'bundle':
1444 1444 self.ui.status(_("%d changesets found\n") % len(nodes))
1445 1445 if self.ui.debugflag:
1446 1446 self.ui.debug("list of changesets:\n")
1447 1447 for node in nodes:
1448 1448 self.ui.debug("%s\n" % hex(node))
1449 1449
1450 1450 def changegroupsubset(self, bases, heads, source):
1451 1451 """Compute a changegroup consisting of all the nodes that are
1452 1452 descendents of any of the bases and ancestors of any of the heads.
1453 1453 Return a chunkbuffer object whose read() method will return
1454 1454 successive changegroup chunks.
1455 1455
1456 1456 It is fairly complex as determining which filenodes and which
1457 1457 manifest nodes need to be included for the changeset to be complete
1458 1458 is non-trivial.
1459 1459
1460 1460 Another wrinkle is doing the reverse, figuring out which changeset in
1461 1461 the changegroup a particular filenode or manifestnode belongs to.
1462 1462 """
1463 1463 cl = self.changelog
1464 1464 if not bases:
1465 1465 bases = [nullid]
1466 1466 csets, bases, heads = cl.nodesbetween(bases, heads)
1467 1467 # We assume that all ancestors of bases are known
1468 1468 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1469 1469 return self._changegroupsubset(common, csets, heads, source)
1470 1470
1471 1471 def getbundle(self, source, heads=None, common=None):
1472 1472 """Like changegroupsubset, but returns the set difference between the
1473 1473 ancestors of heads and the ancestors common.
1474 1474
1475 1475 If heads is None, use the local heads. If common is None, use [nullid].
1476 1476
1477 1477 The nodes in common might not all be known locally due to the way the
1478 1478 current discovery protocol works.
1479 1479 """
1480 1480 cl = self.changelog
1481 1481 if common:
1482 1482 nm = cl.nodemap
1483 1483 common = [n for n in common if n in nm]
1484 1484 else:
1485 1485 common = [nullid]
1486 1486 if not heads:
1487 1487 heads = cl.heads()
1488 1488 common, missing = cl.findcommonmissing(common, heads)
1489 1489 if not missing:
1490 1490 return None
1491 1491 return self._changegroupsubset(common, missing, heads, source)
1492 1492
1493 1493 def _changegroupsubset(self, commonrevs, csets, heads, source):
1494 1494
1495 1495 cl = self.changelog
1496 1496 mf = self.manifest
1497 1497 mfs = {} # needed manifests
1498 1498 fnodes = {} # needed file nodes
1499 1499 changedfiles = set()
1500 1500 fstate = ['', {}]
1501 1501 count = [0]
1502 1502
1503 1503 # can we go through the fast path ?
1504 1504 heads.sort()
1505 1505 if heads == sorted(self.heads()):
1506 1506 return self._changegroup(csets, source)
1507 1507
1508 1508 # slow path
1509 1509 self.hook('preoutgoing', throw=True, source=source)
1510 1510 self.changegroupinfo(csets, source)
1511 1511
1512 1512 # filter any nodes that claim to be part of the known set
1513 1513 def prune(revlog, missing):
1514 1514 for n in missing:
1515 1515 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1516 1516 yield n
1517 1517
1518 1518 def lookup(revlog, x):
1519 1519 if revlog == cl:
1520 1520 c = cl.read(x)
1521 1521 changedfiles.update(c[3])
1522 1522 mfs.setdefault(c[0], x)
1523 1523 count[0] += 1
1524 1524 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1525 1525 return x
1526 1526 elif revlog == mf:
1527 1527 clnode = mfs[x]
1528 1528 mdata = mf.readfast(x)
1529 1529 for f in changedfiles:
1530 1530 if f in mdata:
1531 1531 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1532 1532 count[0] += 1
1533 1533 self.ui.progress(_('bundling'), count[0],
1534 1534 unit=_('manifests'), total=len(mfs))
1535 1535 return mfs[x]
1536 1536 else:
1537 1537 self.ui.progress(
1538 1538 _('bundling'), count[0], item=fstate[0],
1539 1539 unit=_('files'), total=len(changedfiles))
1540 1540 return fstate[1][x]
1541 1541
1542 1542 bundler = changegroup.bundle10(lookup)
1543 1543 reorder = self.ui.config('bundle', 'reorder', 'auto')
1544 1544 if reorder == 'auto':
1545 1545 reorder = None
1546 1546 else:
1547 1547 reorder = util.parsebool(reorder)
1548 1548
1549 1549 def gengroup():
1550 1550 # Create a changenode group generator that will call our functions
1551 1551 # back to lookup the owning changenode and collect information.
1552 1552 for chunk in cl.group(csets, bundler, reorder=reorder):
1553 1553 yield chunk
1554 1554 self.ui.progress(_('bundling'), None)
1555 1555
1556 1556 # Create a generator for the manifestnodes that calls our lookup
1557 1557 # and data collection functions back.
1558 1558 count[0] = 0
1559 1559 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1560 1560 yield chunk
1561 1561 self.ui.progress(_('bundling'), None)
1562 1562
1563 1563 mfs.clear()
1564 1564
1565 1565 # Go through all our files in order sorted by name.
1566 1566 count[0] = 0
1567 1567 for fname in sorted(changedfiles):
1568 1568 filerevlog = self.file(fname)
1569 1569 if not len(filerevlog):
1570 1570 raise util.Abort(_("empty or missing revlog for %s") % fname)
1571 1571 fstate[0] = fname
1572 1572 fstate[1] = fnodes.pop(fname, {})
1573 1573 first = True
1574 1574
1575 1575 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1576 1576 bundler, reorder=reorder):
1577 1577 if first:
1578 1578 if chunk == bundler.close():
1579 1579 break
1580 1580 count[0] += 1
1581 1581 yield bundler.fileheader(fname)
1582 1582 first = False
1583 1583 yield chunk
1584 1584 # Signal that no more groups are left.
1585 1585 yield bundler.close()
1586 1586 self.ui.progress(_('bundling'), None)
1587 1587
1588 1588 if csets:
1589 1589 self.hook('outgoing', node=hex(csets[0]), source=source)
1590 1590
1591 1591 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1592 1592
1593 1593 def changegroup(self, basenodes, source):
1594 1594 # to avoid a race we use changegroupsubset() (issue1320)
1595 1595 return self.changegroupsubset(basenodes, self.heads(), source)
1596 1596
1597 1597 def _changegroup(self, nodes, source):
1598 1598 """Compute the changegroup of all nodes that we have that a recipient
1599 1599 doesn't. Return a chunkbuffer object whose read() method will return
1600 1600 successive changegroup chunks.
1601 1601
1602 1602 This is much easier than the previous function as we can assume that
1603 1603 the recipient has any changenode we aren't sending them.
1604 1604
1605 1605 nodes is the set of nodes to send"""
1606 1606
1607 1607 cl = self.changelog
1608 1608 mf = self.manifest
1609 1609 mfs = {}
1610 1610 changedfiles = set()
1611 1611 fstate = ['']
1612 1612 count = [0]
1613 1613
1614 1614 self.hook('preoutgoing', throw=True, source=source)
1615 1615 self.changegroupinfo(nodes, source)
1616 1616
1617 1617 revset = set([cl.rev(n) for n in nodes])
1618 1618
1619 1619 def gennodelst(log):
1620 1620 for r in log:
1621 1621 if log.linkrev(r) in revset:
1622 1622 yield log.node(r)
1623 1623
1624 1624 def lookup(revlog, x):
1625 1625 if revlog == cl:
1626 1626 c = cl.read(x)
1627 1627 changedfiles.update(c[3])
1628 1628 mfs.setdefault(c[0], x)
1629 1629 count[0] += 1
1630 1630 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1631 1631 return x
1632 1632 elif revlog == mf:
1633 1633 count[0] += 1
1634 1634 self.ui.progress(_('bundling'), count[0],
1635 1635 unit=_('manifests'), total=len(mfs))
1636 1636 return cl.node(revlog.linkrev(revlog.rev(x)))
1637 1637 else:
1638 1638 self.ui.progress(
1639 1639 _('bundling'), count[0], item=fstate[0],
1640 1640 total=len(changedfiles), unit=_('files'))
1641 1641 return cl.node(revlog.linkrev(revlog.rev(x)))
1642 1642
1643 1643 bundler = changegroup.bundle10(lookup)
1644 1644 reorder = self.ui.config('bundle', 'reorder', 'auto')
1645 1645 if reorder == 'auto':
1646 1646 reorder = None
1647 1647 else:
1648 1648 reorder = util.parsebool(reorder)
1649 1649
1650 1650 def gengroup():
1651 1651 '''yield a sequence of changegroup chunks (strings)'''
1652 1652 # construct a list of all changed files
1653 1653
1654 1654 for chunk in cl.group(nodes, bundler, reorder=reorder):
1655 1655 yield chunk
1656 1656 self.ui.progress(_('bundling'), None)
1657 1657
1658 1658 count[0] = 0
1659 1659 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1660 1660 yield chunk
1661 1661 self.ui.progress(_('bundling'), None)
1662 1662
1663 1663 count[0] = 0
1664 1664 for fname in sorted(changedfiles):
1665 1665 filerevlog = self.file(fname)
1666 1666 if not len(filerevlog):
1667 1667 raise util.Abort(_("empty or missing revlog for %s") % fname)
1668 1668 fstate[0] = fname
1669 1669 first = True
1670 1670 for chunk in filerevlog.group(gennodelst(filerevlog), bundler,
1671 1671 reorder=reorder):
1672 1672 if first:
1673 1673 if chunk == bundler.close():
1674 1674 break
1675 1675 count[0] += 1
1676 1676 yield bundler.fileheader(fname)
1677 1677 first = False
1678 1678 yield chunk
1679 1679 yield bundler.close()
1680 1680 self.ui.progress(_('bundling'), None)
1681 1681
1682 1682 if nodes:
1683 1683 self.hook('outgoing', node=hex(nodes[0]), source=source)
1684 1684
1685 1685 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1686 1686
1687 1687 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1688 1688 """Add the changegroup returned by source.read() to this repo.
1689 1689 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1690 1690 the URL of the repo where this changegroup is coming from.
1691 1691 If lock is not None, the function takes ownership of the lock
1692 1692 and releases it after the changegroup is added.
1693 1693
1694 1694 Return an integer summarizing the change to this repo:
1695 1695 - nothing changed or no source: 0
1696 1696 - more heads than before: 1+added heads (2..n)
1697 1697 - fewer heads than before: -1-removed heads (-2..-n)
1698 1698 - number of heads stays the same: 1
1699 1699 """
1700 1700 def csmap(x):
1701 1701 self.ui.debug("add changeset %s\n" % short(x))
1702 1702 return len(cl)
1703 1703
1704 1704 def revmap(x):
1705 1705 return cl.rev(x)
1706 1706
1707 1707 if not source:
1708 1708 return 0
1709 1709
1710 1710 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1711 1711
1712 1712 changesets = files = revisions = 0
1713 1713 efiles = set()
1714 1714
1715 1715 # write changelog data to temp files so concurrent readers will not see
1716 1716 # inconsistent view
1717 1717 cl = self.changelog
1718 1718 cl.delayupdate()
1719 1719 oldheads = cl.heads()
1720 1720
1721 1721 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1722 1722 try:
1723 1723 trp = weakref.proxy(tr)
1724 1724 # pull off the changeset group
1725 1725 self.ui.status(_("adding changesets\n"))
1726 1726 clstart = len(cl)
1727 1727 class prog(object):
1728 1728 step = _('changesets')
1729 1729 count = 1
1730 1730 ui = self.ui
1731 1731 total = None
1732 1732 def __call__(self):
1733 1733 self.ui.progress(self.step, self.count, unit=_('chunks'),
1734 1734 total=self.total)
1735 1735 self.count += 1
1736 1736 pr = prog()
1737 1737 source.callback = pr
1738 1738
1739 1739 source.changelogheader()
1740 1740 if (cl.addgroup(source, csmap, trp) is None
1741 1741 and not emptyok):
1742 1742 raise util.Abort(_("received changelog group is empty"))
1743 1743 clend = len(cl)
1744 1744 changesets = clend - clstart
1745 1745 for c in xrange(clstart, clend):
1746 1746 efiles.update(self[c].files())
1747 1747 efiles = len(efiles)
1748 1748 self.ui.progress(_('changesets'), None)
1749 1749
1750 1750 # pull off the manifest group
1751 1751 self.ui.status(_("adding manifests\n"))
1752 1752 pr.step = _('manifests')
1753 1753 pr.count = 1
1754 1754 pr.total = changesets # manifests <= changesets
1755 1755 # no need to check for empty manifest group here:
1756 1756 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1757 1757 # no new manifest will be created and the manifest group will
1758 1758 # be empty during the pull
1759 1759 source.manifestheader()
1760 1760 self.manifest.addgroup(source, revmap, trp)
1761 1761 self.ui.progress(_('manifests'), None)
1762 1762
1763 1763 needfiles = {}
1764 1764 if self.ui.configbool('server', 'validate', default=False):
1765 1765 # validate incoming csets have their manifests
1766 1766 for cset in xrange(clstart, clend):
1767 1767 mfest = self.changelog.read(self.changelog.node(cset))[0]
1768 1768 mfest = self.manifest.readdelta(mfest)
1769 1769 # store file nodes we must see
1770 1770 for f, n in mfest.iteritems():
1771 1771 needfiles.setdefault(f, set()).add(n)
1772 1772
1773 1773 # process the files
1774 1774 self.ui.status(_("adding file changes\n"))
1775 1775 pr.step = 'files'
1776 1776 pr.count = 1
1777 1777 pr.total = efiles
1778 1778 source.callback = None
1779 1779
1780 while 1:
1780 while True:
1781 1781 chunkdata = source.filelogheader()
1782 1782 if not chunkdata:
1783 1783 break
1784 1784 f = chunkdata["filename"]
1785 1785 self.ui.debug("adding %s revisions\n" % f)
1786 1786 pr()
1787 1787 fl = self.file(f)
1788 1788 o = len(fl)
1789 1789 if fl.addgroup(source, revmap, trp) is None:
1790 1790 raise util.Abort(_("received file revlog group is empty"))
1791 1791 revisions += len(fl) - o
1792 1792 files += 1
1793 1793 if f in needfiles:
1794 1794 needs = needfiles[f]
1795 1795 for new in xrange(o, len(fl)):
1796 1796 n = fl.node(new)
1797 1797 if n in needs:
1798 1798 needs.remove(n)
1799 1799 if not needs:
1800 1800 del needfiles[f]
1801 1801 self.ui.progress(_('files'), None)
1802 1802
1803 1803 for f, needs in needfiles.iteritems():
1804 1804 fl = self.file(f)
1805 1805 for n in needs:
1806 1806 try:
1807 1807 fl.rev(n)
1808 1808 except error.LookupError:
1809 1809 raise util.Abort(
1810 1810 _('missing file data for %s:%s - run hg verify') %
1811 1811 (f, hex(n)))
1812 1812
1813 1813 dh = 0
1814 1814 if oldheads:
1815 1815 heads = cl.heads()
1816 1816 dh = len(heads) - len(oldheads)
1817 1817 for h in heads:
1818 1818 if h not in oldheads and 'close' in self[h].extra():
1819 1819 dh -= 1
1820 1820 htext = ""
1821 1821 if dh:
1822 1822 htext = _(" (%+d heads)") % dh
1823 1823
1824 1824 self.ui.status(_("added %d changesets"
1825 1825 " with %d changes to %d files%s\n")
1826 1826 % (changesets, revisions, files, htext))
1827 1827
1828 1828 if changesets > 0:
1829 1829 p = lambda: cl.writepending() and self.root or ""
1830 1830 self.hook('pretxnchangegroup', throw=True,
1831 1831 node=hex(cl.node(clstart)), source=srctype,
1832 1832 url=url, pending=p)
1833 1833
1834 1834 # make changelog see real files again
1835 1835 cl.finalize(trp)
1836 1836
1837 1837 tr.close()
1838 1838 finally:
1839 1839 tr.release()
1840 1840 if lock:
1841 1841 lock.release()
1842 1842
1843 1843 if changesets > 0:
1844 1844 # forcefully update the on-disk branch cache
1845 1845 self.ui.debug("updating the branch cache\n")
1846 1846 self.updatebranchcache()
1847 1847 self.hook("changegroup", node=hex(cl.node(clstart)),
1848 1848 source=srctype, url=url)
1849 1849
1850 1850 for i in xrange(clstart, clend):
1851 1851 self.hook("incoming", node=hex(cl.node(i)),
1852 1852 source=srctype, url=url)
1853 1853
1854 1854 # never return 0 here:
1855 1855 if dh < 0:
1856 1856 return dh - 1
1857 1857 else:
1858 1858 return dh + 1
1859 1859
1860 1860 def stream_in(self, remote, requirements):
1861 1861 lock = self.lock()
1862 1862 try:
1863 1863 fp = remote.stream_out()
1864 1864 l = fp.readline()
1865 1865 try:
1866 1866 resp = int(l)
1867 1867 except ValueError:
1868 1868 raise error.ResponseError(
1869 1869 _('Unexpected response from remote server:'), l)
1870 1870 if resp == 1:
1871 1871 raise util.Abort(_('operation forbidden by server'))
1872 1872 elif resp == 2:
1873 1873 raise util.Abort(_('locking the remote repository failed'))
1874 1874 elif resp != 0:
1875 1875 raise util.Abort(_('the server sent an unknown error code'))
1876 1876 self.ui.status(_('streaming all changes\n'))
1877 1877 l = fp.readline()
1878 1878 try:
1879 1879 total_files, total_bytes = map(int, l.split(' ', 1))
1880 1880 except (ValueError, TypeError):
1881 1881 raise error.ResponseError(
1882 1882 _('Unexpected response from remote server:'), l)
1883 1883 self.ui.status(_('%d files to transfer, %s of data\n') %
1884 1884 (total_files, util.bytecount(total_bytes)))
1885 1885 start = time.time()
1886 1886 for i in xrange(total_files):
1887 1887 # XXX doesn't support '\n' or '\r' in filenames
1888 1888 l = fp.readline()
1889 1889 try:
1890 1890 name, size = l.split('\0', 1)
1891 1891 size = int(size)
1892 1892 except (ValueError, TypeError):
1893 1893 raise error.ResponseError(
1894 1894 _('Unexpected response from remote server:'), l)
1895 1895 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1896 1896 # for backwards compat, name was partially encoded
1897 1897 ofp = self.sopener(store.decodedir(name), 'w')
1898 1898 for chunk in util.filechunkiter(fp, limit=size):
1899 1899 ofp.write(chunk)
1900 1900 ofp.close()
1901 1901 elapsed = time.time() - start
1902 1902 if elapsed <= 0:
1903 1903 elapsed = 0.001
1904 1904 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1905 1905 (util.bytecount(total_bytes), elapsed,
1906 1906 util.bytecount(total_bytes / elapsed)))
1907 1907
1908 1908 # new requirements = old non-format requirements + new format-related
1909 1909 # requirements from the streamed-in repository
1910 1910 requirements.update(set(self.requirements) - self.supportedformats)
1911 1911 self._applyrequirements(requirements)
1912 1912 self._writerequirements()
1913 1913
1914 1914 self.invalidate()
1915 1915 return len(self.heads()) + 1
1916 1916 finally:
1917 1917 lock.release()
1918 1918
1919 1919 def clone(self, remote, heads=[], stream=False):
1920 1920 '''clone remote repository.
1921 1921
1922 1922 keyword arguments:
1923 1923 heads: list of revs to clone (forces use of pull)
1924 1924 stream: use streaming clone if possible'''
1925 1925
1926 1926 # now, all clients that can request uncompressed clones can
1927 1927 # read repo formats supported by all servers that can serve
1928 1928 # them.
1929 1929
1930 1930 # if revlog format changes, client will have to check version
1931 1931 # and format flags on "stream" capability, and use
1932 1932 # uncompressed only if compatible.
1933 1933
1934 1934 if stream and not heads:
1935 1935 # 'stream' means remote revlog format is revlogv1 only
1936 1936 if remote.capable('stream'):
1937 1937 return self.stream_in(remote, set(('revlogv1',)))
1938 1938 # otherwise, 'streamreqs' contains the remote revlog format
1939 1939 streamreqs = remote.capable('streamreqs')
1940 1940 if streamreqs:
1941 1941 streamreqs = set(streamreqs.split(','))
1942 1942 # if we support it, stream in and adjust our requirements
1943 1943 if not streamreqs - self.supportedformats:
1944 1944 return self.stream_in(remote, streamreqs)
1945 1945 return self.pull(remote, heads)
1946 1946
1947 1947 def pushkey(self, namespace, key, old, new):
1948 1948 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1949 1949 old=old, new=new)
1950 1950 ret = pushkey.push(self, namespace, key, old, new)
1951 1951 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1952 1952 ret=ret)
1953 1953 return ret
1954 1954
1955 1955 def listkeys(self, namespace):
1956 1956 self.hook('prelistkeys', throw=True, namespace=namespace)
1957 1957 values = pushkey.list(self, namespace)
1958 1958 self.hook('listkeys', namespace=namespace, values=values)
1959 1959 return values
1960 1960
1961 1961 def debugwireargs(self, one, two, three=None, four=None, five=None):
1962 1962 '''used to test argument passing over the wire'''
1963 1963 return "%s %s %s %s %s" % (one, two, three, four, five)
1964 1964
1965 1965 # used to avoid circular references so destructors work
1966 1966 def aftertrans(files):
1967 1967 renamefiles = [tuple(t) for t in files]
1968 1968 def a():
1969 1969 for src, dest in renamefiles:
1970 1970 util.rename(src, dest)
1971 1971 return a
1972 1972
1973 1973 def undoname(fn):
1974 1974 base, name = os.path.split(fn)
1975 1975 assert name.startswith('journal')
1976 1976 return os.path.join(base, name.replace('journal', 'undo', 1))
1977 1977
1978 1978 def instance(ui, path, create):
1979 1979 return localrepository(ui, util.localpath(path), create)
1980 1980
1981 1981 def islocal(path):
1982 1982 return True
@@ -1,137 +1,137
1 1 # lock.py - simple advisory locking scheme for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import util, error
9 9 import errno, os, socket, time
10 10 import warnings
11 11
12 12 class lock(object):
13 13 '''An advisory lock held by one process to control access to a set
14 14 of files. Non-cooperating processes or incorrectly written scripts
15 15 can ignore Mercurial's locking scheme and stomp all over the
16 16 repository, so don't do that.
17 17
18 18 Typically used via localrepository.lock() to lock the repository
19 19 store (.hg/store/) or localrepository.wlock() to lock everything
20 20 else under .hg/.'''
21 21
22 22 # lock is symlink on platforms that support it, file on others.
23 23
24 24 # symlink is used because create of directory entry and contents
25 25 # are atomic even over nfs.
26 26
27 27 # old-style lock: symlink to pid
28 28 # new-style lock: symlink to hostname:pid
29 29
30 30 _host = None
31 31
32 32 def __init__(self, file, timeout=-1, releasefn=None, desc=None):
33 33 self.f = file
34 34 self.held = 0
35 35 self.timeout = timeout
36 36 self.releasefn = releasefn
37 37 self.desc = desc
38 38 self.lock()
39 39
40 40 def __del__(self):
41 41 if self.held:
42 42 warnings.warn("use lock.release instead of del lock",
43 43 category=DeprecationWarning,
44 44 stacklevel=2)
45 45
46 46 # ensure the lock will be removed
47 47 # even if recursive locking did occur
48 48 self.held = 1
49 49
50 50 self.release()
51 51
52 52 def lock(self):
53 53 timeout = self.timeout
54 while 1:
54 while True:
55 55 try:
56 56 self.trylock()
57 57 return 1
58 58 except error.LockHeld, inst:
59 59 if timeout != 0:
60 60 time.sleep(1)
61 61 if timeout > 0:
62 62 timeout -= 1
63 63 continue
64 64 raise error.LockHeld(errno.ETIMEDOUT, inst.filename, self.desc,
65 65 inst.locker)
66 66
67 67 def trylock(self):
68 68 if self.held:
69 69 self.held += 1
70 70 return
71 71 if lock._host is None:
72 72 lock._host = socket.gethostname()
73 73 lockname = '%s:%s' % (lock._host, os.getpid())
74 74 while not self.held:
75 75 try:
76 76 util.makelock(lockname, self.f)
77 77 self.held = 1
78 78 except (OSError, IOError), why:
79 79 if why.errno == errno.EEXIST:
80 80 locker = self.testlock()
81 81 if locker is not None:
82 82 raise error.LockHeld(errno.EAGAIN, self.f, self.desc,
83 83 locker)
84 84 else:
85 85 raise error.LockUnavailable(why.errno, why.strerror,
86 86 why.filename, self.desc)
87 87
88 88 def testlock(self):
89 89 """return id of locker if lock is valid, else None.
90 90
91 91 If old-style lock, we cannot tell what machine locker is on.
92 92 with new-style lock, if locker is on this machine, we can
93 93 see if locker is alive. If locker is on this machine but
94 94 not alive, we can safely break lock.
95 95
96 96 The lock file is only deleted when None is returned.
97 97
98 98 """
99 99 locker = util.readlock(self.f)
100 100 try:
101 101 host, pid = locker.split(":", 1)
102 102 except ValueError:
103 103 return locker
104 104 if host != lock._host:
105 105 return locker
106 106 try:
107 107 pid = int(pid)
108 108 except ValueError:
109 109 return locker
110 110 if util.testpid(pid):
111 111 return locker
112 112 # if locker dead, break lock. must do this with another lock
113 113 # held, or can race and break valid lock.
114 114 try:
115 115 l = lock(self.f + '.break', timeout=0)
116 116 util.unlink(self.f)
117 117 l.release()
118 118 except error.LockError:
119 119 return locker
120 120
121 121 def release(self):
122 122 if self.held > 1:
123 123 self.held -= 1
124 124 elif self.held == 1:
125 125 self.held = 0
126 126 if self.releasefn:
127 127 self.releasefn()
128 128 try:
129 129 util.unlink(self.f)
130 130 except OSError:
131 131 pass
132 132
133 133 def release(*locks):
134 134 for lock in locks:
135 135 if lock is not None:
136 136 lock.release()
137 137
@@ -1,1786 +1,1786
1 1 # patch.py - patch file parsing routines
2 2 #
3 3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 import cStringIO, email.Parser, os, errno, re
10 10 import tempfile, zlib, shutil
11 11
12 12 from i18n import _
13 13 from node import hex, nullid, short
14 14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding
15 15
16 16 gitre = re.compile('diff --git a/(.*) b/(.*)')
17 17
18 18 class PatchError(Exception):
19 19 pass
20 20
21 21
22 22 # public functions
23 23
24 24 def split(stream):
25 25 '''return an iterator of individual patches from a stream'''
26 26 def isheader(line, inheader):
27 27 if inheader and line[0] in (' ', '\t'):
28 28 # continuation
29 29 return True
30 30 if line[0] in (' ', '-', '+'):
31 31 # diff line - don't check for header pattern in there
32 32 return False
33 33 l = line.split(': ', 1)
34 34 return len(l) == 2 and ' ' not in l[0]
35 35
36 36 def chunk(lines):
37 37 return cStringIO.StringIO(''.join(lines))
38 38
39 39 def hgsplit(stream, cur):
40 40 inheader = True
41 41
42 42 for line in stream:
43 43 if not line.strip():
44 44 inheader = False
45 45 if not inheader and line.startswith('# HG changeset patch'):
46 46 yield chunk(cur)
47 47 cur = []
48 48 inheader = True
49 49
50 50 cur.append(line)
51 51
52 52 if cur:
53 53 yield chunk(cur)
54 54
55 55 def mboxsplit(stream, cur):
56 56 for line in stream:
57 57 if line.startswith('From '):
58 58 for c in split(chunk(cur[1:])):
59 59 yield c
60 60 cur = []
61 61
62 62 cur.append(line)
63 63
64 64 if cur:
65 65 for c in split(chunk(cur[1:])):
66 66 yield c
67 67
68 68 def mimesplit(stream, cur):
69 69 def msgfp(m):
70 70 fp = cStringIO.StringIO()
71 71 g = email.Generator.Generator(fp, mangle_from_=False)
72 72 g.flatten(m)
73 73 fp.seek(0)
74 74 return fp
75 75
76 76 for line in stream:
77 77 cur.append(line)
78 78 c = chunk(cur)
79 79
80 80 m = email.Parser.Parser().parse(c)
81 81 if not m.is_multipart():
82 82 yield msgfp(m)
83 83 else:
84 84 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
85 85 for part in m.walk():
86 86 ct = part.get_content_type()
87 87 if ct not in ok_types:
88 88 continue
89 89 yield msgfp(part)
90 90
91 91 def headersplit(stream, cur):
92 92 inheader = False
93 93
94 94 for line in stream:
95 95 if not inheader and isheader(line, inheader):
96 96 yield chunk(cur)
97 97 cur = []
98 98 inheader = True
99 99 if inheader and not isheader(line, inheader):
100 100 inheader = False
101 101
102 102 cur.append(line)
103 103
104 104 if cur:
105 105 yield chunk(cur)
106 106
107 107 def remainder(cur):
108 108 yield chunk(cur)
109 109
110 110 class fiter(object):
111 111 def __init__(self, fp):
112 112 self.fp = fp
113 113
114 114 def __iter__(self):
115 115 return self
116 116
117 117 def next(self):
118 118 l = self.fp.readline()
119 119 if not l:
120 120 raise StopIteration
121 121 return l
122 122
123 123 inheader = False
124 124 cur = []
125 125
126 126 mimeheaders = ['content-type']
127 127
128 128 if not hasattr(stream, 'next'):
129 129 # http responses, for example, have readline but not next
130 130 stream = fiter(stream)
131 131
132 132 for line in stream:
133 133 cur.append(line)
134 134 if line.startswith('# HG changeset patch'):
135 135 return hgsplit(stream, cur)
136 136 elif line.startswith('From '):
137 137 return mboxsplit(stream, cur)
138 138 elif isheader(line, inheader):
139 139 inheader = True
140 140 if line.split(':', 1)[0].lower() in mimeheaders:
141 141 # let email parser handle this
142 142 return mimesplit(stream, cur)
143 143 elif line.startswith('--- ') and inheader:
144 144 # No evil headers seen by diff start, split by hand
145 145 return headersplit(stream, cur)
146 146 # Not enough info, keep reading
147 147
148 148 # if we are here, we have a very plain patch
149 149 return remainder(cur)
150 150
151 151 def extract(ui, fileobj):
152 152 '''extract patch from data read from fileobj.
153 153
154 154 patch can be a normal patch or contained in an email message.
155 155
156 156 return tuple (filename, message, user, date, branch, node, p1, p2).
157 157 Any item in the returned tuple can be None. If filename is None,
158 158 fileobj did not contain a patch. Caller must unlink filename when done.'''
159 159
160 160 # attempt to detect the start of a patch
161 161 # (this heuristic is borrowed from quilt)
162 162 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
163 163 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
164 164 r'---[ \t].*?^\+\+\+[ \t]|'
165 165 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
166 166
167 167 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
168 168 tmpfp = os.fdopen(fd, 'w')
169 169 try:
170 170 msg = email.Parser.Parser().parse(fileobj)
171 171
172 172 subject = msg['Subject']
173 173 user = msg['From']
174 174 if not subject and not user:
175 175 # Not an email, restore parsed headers if any
176 176 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
177 177
178 178 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
179 179 # should try to parse msg['Date']
180 180 date = None
181 181 nodeid = None
182 182 branch = None
183 183 parents = []
184 184
185 185 if subject:
186 186 if subject.startswith('[PATCH'):
187 187 pend = subject.find(']')
188 188 if pend >= 0:
189 189 subject = subject[pend + 1:].lstrip()
190 190 subject = subject.replace('\n\t', ' ')
191 191 ui.debug('Subject: %s\n' % subject)
192 192 if user:
193 193 ui.debug('From: %s\n' % user)
194 194 diffs_seen = 0
195 195 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
196 196 message = ''
197 197 for part in msg.walk():
198 198 content_type = part.get_content_type()
199 199 ui.debug('Content-Type: %s\n' % content_type)
200 200 if content_type not in ok_types:
201 201 continue
202 202 payload = part.get_payload(decode=True)
203 203 m = diffre.search(payload)
204 204 if m:
205 205 hgpatch = False
206 206 hgpatchheader = False
207 207 ignoretext = False
208 208
209 209 ui.debug('found patch at byte %d\n' % m.start(0))
210 210 diffs_seen += 1
211 211 cfp = cStringIO.StringIO()
212 212 for line in payload[:m.start(0)].splitlines():
213 213 if line.startswith('# HG changeset patch') and not hgpatch:
214 214 ui.debug('patch generated by hg export\n')
215 215 hgpatch = True
216 216 hgpatchheader = True
217 217 # drop earlier commit message content
218 218 cfp.seek(0)
219 219 cfp.truncate()
220 220 subject = None
221 221 elif hgpatchheader:
222 222 if line.startswith('# User '):
223 223 user = line[7:]
224 224 ui.debug('From: %s\n' % user)
225 225 elif line.startswith("# Date "):
226 226 date = line[7:]
227 227 elif line.startswith("# Branch "):
228 228 branch = line[9:]
229 229 elif line.startswith("# Node ID "):
230 230 nodeid = line[10:]
231 231 elif line.startswith("# Parent "):
232 232 parents.append(line[10:])
233 233 elif not line.startswith("# "):
234 234 hgpatchheader = False
235 235 elif line == '---' and gitsendmail:
236 236 ignoretext = True
237 237 if not hgpatchheader and not ignoretext:
238 238 cfp.write(line)
239 239 cfp.write('\n')
240 240 message = cfp.getvalue()
241 241 if tmpfp:
242 242 tmpfp.write(payload)
243 243 if not payload.endswith('\n'):
244 244 tmpfp.write('\n')
245 245 elif not diffs_seen and message and content_type == 'text/plain':
246 246 message += '\n' + payload
247 247 except:
248 248 tmpfp.close()
249 249 os.unlink(tmpname)
250 250 raise
251 251
252 252 if subject and not message.startswith(subject):
253 253 message = '%s\n%s' % (subject, message)
254 254 tmpfp.close()
255 255 if not diffs_seen:
256 256 os.unlink(tmpname)
257 257 return None, message, user, date, branch, None, None, None
258 258 p1 = parents and parents.pop(0) or None
259 259 p2 = parents and parents.pop(0) or None
260 260 return tmpname, message, user, date, branch, nodeid, p1, p2
261 261
262 262 class patchmeta(object):
263 263 """Patched file metadata
264 264
265 265 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
266 266 or COPY. 'path' is patched file path. 'oldpath' is set to the
267 267 origin file when 'op' is either COPY or RENAME, None otherwise. If
268 268 file mode is changed, 'mode' is a tuple (islink, isexec) where
269 269 'islink' is True if the file is a symlink and 'isexec' is True if
270 270 the file is executable. Otherwise, 'mode' is None.
271 271 """
272 272 def __init__(self, path):
273 273 self.path = path
274 274 self.oldpath = None
275 275 self.mode = None
276 276 self.op = 'MODIFY'
277 277 self.binary = False
278 278
279 279 def setmode(self, mode):
280 280 islink = mode & 020000
281 281 isexec = mode & 0100
282 282 self.mode = (islink, isexec)
283 283
284 284 def __repr__(self):
285 285 return "<patchmeta %s %r>" % (self.op, self.path)
286 286
287 287 def readgitpatch(lr):
288 288 """extract git-style metadata about patches from <patchname>"""
289 289
290 290 # Filter patch for git information
291 291 gp = None
292 292 gitpatches = []
293 293 for line in lr:
294 294 line = line.rstrip(' \r\n')
295 295 if line.startswith('diff --git'):
296 296 m = gitre.match(line)
297 297 if m:
298 298 if gp:
299 299 gitpatches.append(gp)
300 300 dst = m.group(2)
301 301 gp = patchmeta(dst)
302 302 elif gp:
303 303 if line.startswith('--- '):
304 304 gitpatches.append(gp)
305 305 gp = None
306 306 continue
307 307 if line.startswith('rename from '):
308 308 gp.op = 'RENAME'
309 309 gp.oldpath = line[12:]
310 310 elif line.startswith('rename to '):
311 311 gp.path = line[10:]
312 312 elif line.startswith('copy from '):
313 313 gp.op = 'COPY'
314 314 gp.oldpath = line[10:]
315 315 elif line.startswith('copy to '):
316 316 gp.path = line[8:]
317 317 elif line.startswith('deleted file'):
318 318 gp.op = 'DELETE'
319 319 elif line.startswith('new file mode '):
320 320 gp.op = 'ADD'
321 321 gp.setmode(int(line[-6:], 8))
322 322 elif line.startswith('new mode '):
323 323 gp.setmode(int(line[-6:], 8))
324 324 elif line.startswith('GIT binary patch'):
325 325 gp.binary = True
326 326 if gp:
327 327 gitpatches.append(gp)
328 328
329 329 return gitpatches
330 330
331 331 class linereader(object):
332 332 # simple class to allow pushing lines back into the input stream
333 333 def __init__(self, fp):
334 334 self.fp = fp
335 335 self.buf = []
336 336
337 337 def push(self, line):
338 338 if line is not None:
339 339 self.buf.append(line)
340 340
341 341 def readline(self):
342 342 if self.buf:
343 343 l = self.buf[0]
344 344 del self.buf[0]
345 345 return l
346 346 return self.fp.readline()
347 347
348 348 def __iter__(self):
349 while 1:
349 while True:
350 350 l = self.readline()
351 351 if not l:
352 352 break
353 353 yield l
354 354
355 355 class abstractbackend(object):
356 356 def __init__(self, ui):
357 357 self.ui = ui
358 358
359 359 def getfile(self, fname):
360 360 """Return target file data and flags as a (data, (islink,
361 361 isexec)) tuple.
362 362 """
363 363 raise NotImplementedError
364 364
365 365 def setfile(self, fname, data, mode, copysource):
366 366 """Write data to target file fname and set its mode. mode is a
367 367 (islink, isexec) tuple. If data is None, the file content should
368 368 be left unchanged. If the file is modified after being copied,
369 369 copysource is set to the original file name.
370 370 """
371 371 raise NotImplementedError
372 372
373 373 def unlink(self, fname):
374 374 """Unlink target file."""
375 375 raise NotImplementedError
376 376
377 377 def writerej(self, fname, failed, total, lines):
378 378 """Write rejected lines for fname. total is the number of hunks
379 379 which failed to apply and total the total number of hunks for this
380 380 files.
381 381 """
382 382 pass
383 383
384 384 def exists(self, fname):
385 385 raise NotImplementedError
386 386
387 387 class fsbackend(abstractbackend):
388 388 def __init__(self, ui, basedir):
389 389 super(fsbackend, self).__init__(ui)
390 390 self.opener = scmutil.opener(basedir)
391 391
392 392 def _join(self, f):
393 393 return os.path.join(self.opener.base, f)
394 394
395 395 def getfile(self, fname):
396 396 path = self._join(fname)
397 397 if os.path.islink(path):
398 398 return (os.readlink(path), (True, False))
399 399 isexec, islink = False, False
400 400 try:
401 401 isexec = os.lstat(path).st_mode & 0100 != 0
402 402 islink = os.path.islink(path)
403 403 except OSError, e:
404 404 if e.errno != errno.ENOENT:
405 405 raise
406 406 return (self.opener.read(fname), (islink, isexec))
407 407
408 408 def setfile(self, fname, data, mode, copysource):
409 409 islink, isexec = mode
410 410 if data is None:
411 411 util.setflags(self._join(fname), islink, isexec)
412 412 return
413 413 if islink:
414 414 self.opener.symlink(data, fname)
415 415 else:
416 416 self.opener.write(fname, data)
417 417 if isexec:
418 418 util.setflags(self._join(fname), False, True)
419 419
420 420 def unlink(self, fname):
421 421 try:
422 422 util.unlinkpath(self._join(fname))
423 423 except OSError, inst:
424 424 if inst.errno != errno.ENOENT:
425 425 raise
426 426
427 427 def writerej(self, fname, failed, total, lines):
428 428 fname = fname + ".rej"
429 429 self.ui.warn(
430 430 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
431 431 (failed, total, fname))
432 432 fp = self.opener(fname, 'w')
433 433 fp.writelines(lines)
434 434 fp.close()
435 435
436 436 def exists(self, fname):
437 437 return os.path.lexists(self._join(fname))
438 438
439 439 class workingbackend(fsbackend):
440 440 def __init__(self, ui, repo, similarity):
441 441 super(workingbackend, self).__init__(ui, repo.root)
442 442 self.repo = repo
443 443 self.similarity = similarity
444 444 self.removed = set()
445 445 self.changed = set()
446 446 self.copied = []
447 447
448 448 def _checkknown(self, fname):
449 449 if self.repo.dirstate[fname] == '?' and self.exists(fname):
450 450 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
451 451
452 452 def setfile(self, fname, data, mode, copysource):
453 453 self._checkknown(fname)
454 454 super(workingbackend, self).setfile(fname, data, mode, copysource)
455 455 if copysource is not None:
456 456 self.copied.append((copysource, fname))
457 457 self.changed.add(fname)
458 458
459 459 def unlink(self, fname):
460 460 self._checkknown(fname)
461 461 super(workingbackend, self).unlink(fname)
462 462 self.removed.add(fname)
463 463 self.changed.add(fname)
464 464
465 465 def close(self):
466 466 wctx = self.repo[None]
467 467 addremoved = set(self.changed)
468 468 for src, dst in self.copied:
469 469 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
470 470 addremoved.discard(src)
471 471 if (not self.similarity) and self.removed:
472 472 wctx.forget(sorted(self.removed))
473 473 if addremoved:
474 474 cwd = self.repo.getcwd()
475 475 if cwd:
476 476 addremoved = [util.pathto(self.repo.root, cwd, f)
477 477 for f in addremoved]
478 478 scmutil.addremove(self.repo, addremoved, similarity=self.similarity)
479 479 return sorted(self.changed)
480 480
481 481 class filestore(object):
482 482 def __init__(self):
483 483 self.opener = None
484 484 self.files = {}
485 485 self.created = 0
486 486
487 487 def setfile(self, fname, data, mode):
488 488 if self.opener is None:
489 489 root = tempfile.mkdtemp(prefix='hg-patch-')
490 490 self.opener = scmutil.opener(root)
491 491 # Avoid filename issues with these simple names
492 492 fn = str(self.created)
493 493 self.opener.write(fn, data)
494 494 self.created += 1
495 495 self.files[fname] = (fn, mode)
496 496
497 497 def getfile(self, fname):
498 498 if fname not in self.files:
499 499 raise IOError()
500 500 fn, mode = self.files[fname]
501 501 return self.opener.read(fn), mode
502 502
503 503 def close(self):
504 504 if self.opener:
505 505 shutil.rmtree(self.opener.base)
506 506
507 507 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
508 508 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
509 509 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
510 510 eolmodes = ['strict', 'crlf', 'lf', 'auto']
511 511
512 512 class patchfile(object):
513 513 def __init__(self, ui, fname, backend, store, mode, create, remove,
514 514 eolmode='strict', copysource=None):
515 515 self.fname = fname
516 516 self.eolmode = eolmode
517 517 self.eol = None
518 518 self.backend = backend
519 519 self.ui = ui
520 520 self.lines = []
521 521 self.exists = False
522 522 self.missing = True
523 523 self.mode = mode
524 524 self.copysource = copysource
525 525 self.create = create
526 526 self.remove = remove
527 527 try:
528 528 if copysource is None:
529 529 data, mode = backend.getfile(fname)
530 530 self.exists = True
531 531 else:
532 532 data, mode = store.getfile(copysource)
533 533 self.exists = backend.exists(fname)
534 534 self.missing = False
535 535 if data:
536 536 self.lines = data.splitlines(True)
537 537 if self.mode is None:
538 538 self.mode = mode
539 539 if self.lines:
540 540 # Normalize line endings
541 541 if self.lines[0].endswith('\r\n'):
542 542 self.eol = '\r\n'
543 543 elif self.lines[0].endswith('\n'):
544 544 self.eol = '\n'
545 545 if eolmode != 'strict':
546 546 nlines = []
547 547 for l in self.lines:
548 548 if l.endswith('\r\n'):
549 549 l = l[:-2] + '\n'
550 550 nlines.append(l)
551 551 self.lines = nlines
552 552 except IOError:
553 553 if create:
554 554 self.missing = False
555 555 if self.mode is None:
556 556 self.mode = (False, False)
557 557 if self.missing:
558 558 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
559 559
560 560 self.hash = {}
561 561 self.dirty = 0
562 562 self.offset = 0
563 563 self.skew = 0
564 564 self.rej = []
565 565 self.fileprinted = False
566 566 self.printfile(False)
567 567 self.hunks = 0
568 568
569 569 def writelines(self, fname, lines, mode):
570 570 if self.eolmode == 'auto':
571 571 eol = self.eol
572 572 elif self.eolmode == 'crlf':
573 573 eol = '\r\n'
574 574 else:
575 575 eol = '\n'
576 576
577 577 if self.eolmode != 'strict' and eol and eol != '\n':
578 578 rawlines = []
579 579 for l in lines:
580 580 if l and l[-1] == '\n':
581 581 l = l[:-1] + eol
582 582 rawlines.append(l)
583 583 lines = rawlines
584 584
585 585 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
586 586
587 587 def printfile(self, warn):
588 588 if self.fileprinted:
589 589 return
590 590 if warn or self.ui.verbose:
591 591 self.fileprinted = True
592 592 s = _("patching file %s\n") % self.fname
593 593 if warn:
594 594 self.ui.warn(s)
595 595 else:
596 596 self.ui.note(s)
597 597
598 598
599 599 def findlines(self, l, linenum):
600 600 # looks through the hash and finds candidate lines. The
601 601 # result is a list of line numbers sorted based on distance
602 602 # from linenum
603 603
604 604 cand = self.hash.get(l, [])
605 605 if len(cand) > 1:
606 606 # resort our list of potentials forward then back.
607 607 cand.sort(key=lambda x: abs(x - linenum))
608 608 return cand
609 609
610 610 def write_rej(self):
611 611 # our rejects are a little different from patch(1). This always
612 612 # creates rejects in the same form as the original patch. A file
613 613 # header is inserted so that you can run the reject through patch again
614 614 # without having to type the filename.
615 615 if not self.rej:
616 616 return
617 617 base = os.path.basename(self.fname)
618 618 lines = ["--- %s\n+++ %s\n" % (base, base)]
619 619 for x in self.rej:
620 620 for l in x.hunk:
621 621 lines.append(l)
622 622 if l[-1] != '\n':
623 623 lines.append("\n\ No newline at end of file\n")
624 624 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
625 625
626 626 def apply(self, h):
627 627 if not h.complete():
628 628 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
629 629 (h.number, h.desc, len(h.a), h.lena, len(h.b),
630 630 h.lenb))
631 631
632 632 self.hunks += 1
633 633
634 634 if self.missing:
635 635 self.rej.append(h)
636 636 return -1
637 637
638 638 if self.exists and self.create:
639 639 if self.copysource:
640 640 self.ui.warn(_("cannot create %s: destination already "
641 641 "exists\n" % self.fname))
642 642 else:
643 643 self.ui.warn(_("file %s already exists\n") % self.fname)
644 644 self.rej.append(h)
645 645 return -1
646 646
647 647 if isinstance(h, binhunk):
648 648 if self.remove:
649 649 self.backend.unlink(self.fname)
650 650 else:
651 651 self.lines[:] = h.new()
652 652 self.offset += len(h.new())
653 653 self.dirty = True
654 654 return 0
655 655
656 656 horig = h
657 657 if (self.eolmode in ('crlf', 'lf')
658 658 or self.eolmode == 'auto' and self.eol):
659 659 # If new eols are going to be normalized, then normalize
660 660 # hunk data before patching. Otherwise, preserve input
661 661 # line-endings.
662 662 h = h.getnormalized()
663 663
664 664 # fast case first, no offsets, no fuzz
665 665 old = h.old()
666 666 # patch starts counting at 1 unless we are adding the file
667 667 if h.starta == 0:
668 668 start = 0
669 669 else:
670 670 start = h.starta + self.offset - 1
671 671 orig_start = start
672 672 # if there's skew we want to emit the "(offset %d lines)" even
673 673 # when the hunk cleanly applies at start + skew, so skip the
674 674 # fast case code
675 675 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
676 676 if self.remove:
677 677 self.backend.unlink(self.fname)
678 678 else:
679 679 self.lines[start : start + h.lena] = h.new()
680 680 self.offset += h.lenb - h.lena
681 681 self.dirty = True
682 682 return 0
683 683
684 684 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
685 685 self.hash = {}
686 686 for x, s in enumerate(self.lines):
687 687 self.hash.setdefault(s, []).append(x)
688 688 if h.hunk[-1][0] != ' ':
689 689 # if the hunk tried to put something at the bottom of the file
690 690 # override the start line and use eof here
691 691 search_start = len(self.lines)
692 692 else:
693 693 search_start = orig_start + self.skew
694 694
695 695 for fuzzlen in xrange(3):
696 696 for toponly in [True, False]:
697 697 old = h.old(fuzzlen, toponly)
698 698
699 699 cand = self.findlines(old[0][1:], search_start)
700 700 for l in cand:
701 701 if diffhelpers.testhunk(old, self.lines, l) == 0:
702 702 newlines = h.new(fuzzlen, toponly)
703 703 self.lines[l : l + len(old)] = newlines
704 704 self.offset += len(newlines) - len(old)
705 705 self.skew = l - orig_start
706 706 self.dirty = True
707 707 offset = l - orig_start - fuzzlen
708 708 if fuzzlen:
709 709 msg = _("Hunk #%d succeeded at %d "
710 710 "with fuzz %d "
711 711 "(offset %d lines).\n")
712 712 self.printfile(True)
713 713 self.ui.warn(msg %
714 714 (h.number, l + 1, fuzzlen, offset))
715 715 else:
716 716 msg = _("Hunk #%d succeeded at %d "
717 717 "(offset %d lines).\n")
718 718 self.ui.note(msg % (h.number, l + 1, offset))
719 719 return fuzzlen
720 720 self.printfile(True)
721 721 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
722 722 self.rej.append(horig)
723 723 return -1
724 724
725 725 def close(self):
726 726 if self.dirty:
727 727 self.writelines(self.fname, self.lines, self.mode)
728 728 self.write_rej()
729 729 return len(self.rej)
730 730
731 731 class hunk(object):
732 732 def __init__(self, desc, num, lr, context):
733 733 self.number = num
734 734 self.desc = desc
735 735 self.hunk = [desc]
736 736 self.a = []
737 737 self.b = []
738 738 self.starta = self.lena = None
739 739 self.startb = self.lenb = None
740 740 if lr is not None:
741 741 if context:
742 742 self.read_context_hunk(lr)
743 743 else:
744 744 self.read_unified_hunk(lr)
745 745
746 746 def getnormalized(self):
747 747 """Return a copy with line endings normalized to LF."""
748 748
749 749 def normalize(lines):
750 750 nlines = []
751 751 for line in lines:
752 752 if line.endswith('\r\n'):
753 753 line = line[:-2] + '\n'
754 754 nlines.append(line)
755 755 return nlines
756 756
757 757 # Dummy object, it is rebuilt manually
758 758 nh = hunk(self.desc, self.number, None, None)
759 759 nh.number = self.number
760 760 nh.desc = self.desc
761 761 nh.hunk = self.hunk
762 762 nh.a = normalize(self.a)
763 763 nh.b = normalize(self.b)
764 764 nh.starta = self.starta
765 765 nh.startb = self.startb
766 766 nh.lena = self.lena
767 767 nh.lenb = self.lenb
768 768 return nh
769 769
770 770 def read_unified_hunk(self, lr):
771 771 m = unidesc.match(self.desc)
772 772 if not m:
773 773 raise PatchError(_("bad hunk #%d") % self.number)
774 774 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
775 775 if self.lena is None:
776 776 self.lena = 1
777 777 else:
778 778 self.lena = int(self.lena)
779 779 if self.lenb is None:
780 780 self.lenb = 1
781 781 else:
782 782 self.lenb = int(self.lenb)
783 783 self.starta = int(self.starta)
784 784 self.startb = int(self.startb)
785 785 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
786 786 # if we hit eof before finishing out the hunk, the last line will
787 787 # be zero length. Lets try to fix it up.
788 788 while len(self.hunk[-1]) == 0:
789 789 del self.hunk[-1]
790 790 del self.a[-1]
791 791 del self.b[-1]
792 792 self.lena -= 1
793 793 self.lenb -= 1
794 794 self._fixnewline(lr)
795 795
796 796 def read_context_hunk(self, lr):
797 797 self.desc = lr.readline()
798 798 m = contextdesc.match(self.desc)
799 799 if not m:
800 800 raise PatchError(_("bad hunk #%d") % self.number)
801 801 foo, self.starta, foo2, aend, foo3 = m.groups()
802 802 self.starta = int(self.starta)
803 803 if aend is None:
804 804 aend = self.starta
805 805 self.lena = int(aend) - self.starta
806 806 if self.starta:
807 807 self.lena += 1
808 808 for x in xrange(self.lena):
809 809 l = lr.readline()
810 810 if l.startswith('---'):
811 811 # lines addition, old block is empty
812 812 lr.push(l)
813 813 break
814 814 s = l[2:]
815 815 if l.startswith('- ') or l.startswith('! '):
816 816 u = '-' + s
817 817 elif l.startswith(' '):
818 818 u = ' ' + s
819 819 else:
820 820 raise PatchError(_("bad hunk #%d old text line %d") %
821 821 (self.number, x))
822 822 self.a.append(u)
823 823 self.hunk.append(u)
824 824
825 825 l = lr.readline()
826 826 if l.startswith('\ '):
827 827 s = self.a[-1][:-1]
828 828 self.a[-1] = s
829 829 self.hunk[-1] = s
830 830 l = lr.readline()
831 831 m = contextdesc.match(l)
832 832 if not m:
833 833 raise PatchError(_("bad hunk #%d") % self.number)
834 834 foo, self.startb, foo2, bend, foo3 = m.groups()
835 835 self.startb = int(self.startb)
836 836 if bend is None:
837 837 bend = self.startb
838 838 self.lenb = int(bend) - self.startb
839 839 if self.startb:
840 840 self.lenb += 1
841 841 hunki = 1
842 842 for x in xrange(self.lenb):
843 843 l = lr.readline()
844 844 if l.startswith('\ '):
845 845 # XXX: the only way to hit this is with an invalid line range.
846 846 # The no-eol marker is not counted in the line range, but I
847 847 # guess there are diff(1) out there which behave differently.
848 848 s = self.b[-1][:-1]
849 849 self.b[-1] = s
850 850 self.hunk[hunki - 1] = s
851 851 continue
852 852 if not l:
853 853 # line deletions, new block is empty and we hit EOF
854 854 lr.push(l)
855 855 break
856 856 s = l[2:]
857 857 if l.startswith('+ ') or l.startswith('! '):
858 858 u = '+' + s
859 859 elif l.startswith(' '):
860 860 u = ' ' + s
861 861 elif len(self.b) == 0:
862 862 # line deletions, new block is empty
863 863 lr.push(l)
864 864 break
865 865 else:
866 866 raise PatchError(_("bad hunk #%d old text line %d") %
867 867 (self.number, x))
868 868 self.b.append(s)
869 869 while True:
870 870 if hunki >= len(self.hunk):
871 871 h = ""
872 872 else:
873 873 h = self.hunk[hunki]
874 874 hunki += 1
875 875 if h == u:
876 876 break
877 877 elif h.startswith('-'):
878 878 continue
879 879 else:
880 880 self.hunk.insert(hunki - 1, u)
881 881 break
882 882
883 883 if not self.a:
884 884 # this happens when lines were only added to the hunk
885 885 for x in self.hunk:
886 886 if x.startswith('-') or x.startswith(' '):
887 887 self.a.append(x)
888 888 if not self.b:
889 889 # this happens when lines were only deleted from the hunk
890 890 for x in self.hunk:
891 891 if x.startswith('+') or x.startswith(' '):
892 892 self.b.append(x[1:])
893 893 # @@ -start,len +start,len @@
894 894 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
895 895 self.startb, self.lenb)
896 896 self.hunk[0] = self.desc
897 897 self._fixnewline(lr)
898 898
899 899 def _fixnewline(self, lr):
900 900 l = lr.readline()
901 901 if l.startswith('\ '):
902 902 diffhelpers.fix_newline(self.hunk, self.a, self.b)
903 903 else:
904 904 lr.push(l)
905 905
906 906 def complete(self):
907 907 return len(self.a) == self.lena and len(self.b) == self.lenb
908 908
909 909 def fuzzit(self, l, fuzz, toponly):
910 910 # this removes context lines from the top and bottom of list 'l'. It
911 911 # checks the hunk to make sure only context lines are removed, and then
912 912 # returns a new shortened list of lines.
913 913 fuzz = min(fuzz, len(l)-1)
914 914 if fuzz:
915 915 top = 0
916 916 bot = 0
917 917 hlen = len(self.hunk)
918 918 for x in xrange(hlen - 1):
919 919 # the hunk starts with the @@ line, so use x+1
920 920 if self.hunk[x + 1][0] == ' ':
921 921 top += 1
922 922 else:
923 923 break
924 924 if not toponly:
925 925 for x in xrange(hlen - 1):
926 926 if self.hunk[hlen - bot - 1][0] == ' ':
927 927 bot += 1
928 928 else:
929 929 break
930 930
931 931 # top and bot now count context in the hunk
932 932 # adjust them if either one is short
933 933 context = max(top, bot, 3)
934 934 if bot < context:
935 935 bot = max(0, fuzz - (context - bot))
936 936 else:
937 937 bot = min(fuzz, bot)
938 938 if top < context:
939 939 top = max(0, fuzz - (context - top))
940 940 else:
941 941 top = min(fuzz, top)
942 942
943 943 return l[top:len(l)-bot]
944 944 return l
945 945
946 946 def old(self, fuzz=0, toponly=False):
947 947 return self.fuzzit(self.a, fuzz, toponly)
948 948
949 949 def new(self, fuzz=0, toponly=False):
950 950 return self.fuzzit(self.b, fuzz, toponly)
951 951
952 952 class binhunk:
953 953 'A binary patch file. Only understands literals so far.'
954 954 def __init__(self, lr):
955 955 self.text = None
956 956 self.hunk = ['GIT binary patch\n']
957 957 self._read(lr)
958 958
959 959 def complete(self):
960 960 return self.text is not None
961 961
962 962 def new(self):
963 963 return [self.text]
964 964
965 965 def _read(self, lr):
966 966 line = lr.readline()
967 967 self.hunk.append(line)
968 968 while line and not line.startswith('literal '):
969 969 line = lr.readline()
970 970 self.hunk.append(line)
971 971 if not line:
972 972 raise PatchError(_('could not extract binary patch'))
973 973 size = int(line[8:].rstrip())
974 974 dec = []
975 975 line = lr.readline()
976 976 self.hunk.append(line)
977 977 while len(line) > 1:
978 978 l = line[0]
979 979 if l <= 'Z' and l >= 'A':
980 980 l = ord(l) - ord('A') + 1
981 981 else:
982 982 l = ord(l) - ord('a') + 27
983 983 dec.append(base85.b85decode(line[1:-1])[:l])
984 984 line = lr.readline()
985 985 self.hunk.append(line)
986 986 text = zlib.decompress(''.join(dec))
987 987 if len(text) != size:
988 988 raise PatchError(_('binary patch is %d bytes, not %d') %
989 989 len(text), size)
990 990 self.text = text
991 991
992 992 def parsefilename(str):
993 993 # --- filename \t|space stuff
994 994 s = str[4:].rstrip('\r\n')
995 995 i = s.find('\t')
996 996 if i < 0:
997 997 i = s.find(' ')
998 998 if i < 0:
999 999 return s
1000 1000 return s[:i]
1001 1001
1002 1002 def pathstrip(path, strip):
1003 1003 pathlen = len(path)
1004 1004 i = 0
1005 1005 if strip == 0:
1006 1006 return '', path.rstrip()
1007 1007 count = strip
1008 1008 while count > 0:
1009 1009 i = path.find('/', i)
1010 1010 if i == -1:
1011 1011 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1012 1012 (count, strip, path))
1013 1013 i += 1
1014 1014 # consume '//' in the path
1015 1015 while i < pathlen - 1 and path[i] == '/':
1016 1016 i += 1
1017 1017 count -= 1
1018 1018 return path[:i].lstrip(), path[i:].rstrip()
1019 1019
1020 1020 def selectfile(backend, afile_orig, bfile_orig, hunk, strip, gp):
1021 1021 if gp:
1022 1022 # Git patches do not play games. Excluding copies from the
1023 1023 # following heuristic avoids a lot of confusion
1024 1024 fname = pathstrip(gp.path, strip - 1)[1]
1025 1025 create = gp.op in ('ADD', 'COPY', 'RENAME')
1026 1026 remove = gp.op == 'DELETE'
1027 1027 missing = not create and not backend.exists(fname)
1028 1028 return fname, create, remove
1029 1029 nulla = afile_orig == "/dev/null"
1030 1030 nullb = bfile_orig == "/dev/null"
1031 1031 create = nulla and hunk.starta == 0 and hunk.lena == 0
1032 1032 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1033 1033 abase, afile = pathstrip(afile_orig, strip)
1034 1034 gooda = not nulla and backend.exists(afile)
1035 1035 bbase, bfile = pathstrip(bfile_orig, strip)
1036 1036 if afile == bfile:
1037 1037 goodb = gooda
1038 1038 else:
1039 1039 goodb = not nullb and backend.exists(bfile)
1040 1040 missing = not goodb and not gooda and not create
1041 1041
1042 1042 # some diff programs apparently produce patches where the afile is
1043 1043 # not /dev/null, but afile starts with bfile
1044 1044 abasedir = afile[:afile.rfind('/') + 1]
1045 1045 bbasedir = bfile[:bfile.rfind('/') + 1]
1046 1046 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1047 1047 and hunk.starta == 0 and hunk.lena == 0):
1048 1048 create = True
1049 1049 missing = False
1050 1050
1051 1051 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1052 1052 # diff is between a file and its backup. In this case, the original
1053 1053 # file should be patched (see original mpatch code).
1054 1054 isbackup = (abase == bbase and bfile.startswith(afile))
1055 1055 fname = None
1056 1056 if not missing:
1057 1057 if gooda and goodb:
1058 1058 fname = isbackup and afile or bfile
1059 1059 elif gooda:
1060 1060 fname = afile
1061 1061
1062 1062 if not fname:
1063 1063 if not nullb:
1064 1064 fname = isbackup and afile or bfile
1065 1065 elif not nulla:
1066 1066 fname = afile
1067 1067 else:
1068 1068 raise PatchError(_("undefined source and destination files"))
1069 1069
1070 1070 return fname, create, remove
1071 1071
1072 1072 def scangitpatch(lr, firstline):
1073 1073 """
1074 1074 Git patches can emit:
1075 1075 - rename a to b
1076 1076 - change b
1077 1077 - copy a to c
1078 1078 - change c
1079 1079
1080 1080 We cannot apply this sequence as-is, the renamed 'a' could not be
1081 1081 found for it would have been renamed already. And we cannot copy
1082 1082 from 'b' instead because 'b' would have been changed already. So
1083 1083 we scan the git patch for copy and rename commands so we can
1084 1084 perform the copies ahead of time.
1085 1085 """
1086 1086 pos = 0
1087 1087 try:
1088 1088 pos = lr.fp.tell()
1089 1089 fp = lr.fp
1090 1090 except IOError:
1091 1091 fp = cStringIO.StringIO(lr.fp.read())
1092 1092 gitlr = linereader(fp)
1093 1093 gitlr.push(firstline)
1094 1094 gitpatches = readgitpatch(gitlr)
1095 1095 fp.seek(pos)
1096 1096 return gitpatches
1097 1097
1098 1098 def iterhunks(fp):
1099 1099 """Read a patch and yield the following events:
1100 1100 - ("file", afile, bfile, firsthunk): select a new target file.
1101 1101 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1102 1102 "file" event.
1103 1103 - ("git", gitchanges): current diff is in git format, gitchanges
1104 1104 maps filenames to gitpatch records. Unique event.
1105 1105 """
1106 1106 afile = ""
1107 1107 bfile = ""
1108 1108 state = None
1109 1109 hunknum = 0
1110 1110 emitfile = newfile = False
1111 1111 gitpatches = None
1112 1112
1113 1113 # our states
1114 1114 BFILE = 1
1115 1115 context = None
1116 1116 lr = linereader(fp)
1117 1117
1118 1118 while True:
1119 1119 x = lr.readline()
1120 1120 if not x:
1121 1121 break
1122 1122 if state == BFILE and (
1123 1123 (not context and x[0] == '@')
1124 1124 or (context is not False and x.startswith('***************'))
1125 1125 or x.startswith('GIT binary patch')):
1126 1126 gp = None
1127 1127 if gitpatches and gitpatches[-1][0] == bfile:
1128 1128 gp = gitpatches.pop()[1]
1129 1129 if x.startswith('GIT binary patch'):
1130 1130 h = binhunk(lr)
1131 1131 else:
1132 1132 if context is None and x.startswith('***************'):
1133 1133 context = True
1134 1134 h = hunk(x, hunknum + 1, lr, context)
1135 1135 hunknum += 1
1136 1136 if emitfile:
1137 1137 emitfile = False
1138 1138 yield 'file', (afile, bfile, h, gp)
1139 1139 yield 'hunk', h
1140 1140 elif x.startswith('diff --git'):
1141 1141 m = gitre.match(x)
1142 1142 if not m:
1143 1143 continue
1144 1144 if gitpatches is None:
1145 1145 # scan whole input for git metadata
1146 1146 gitpatches = [('b/' + gp.path, gp) for gp
1147 1147 in scangitpatch(lr, x)]
1148 1148 yield 'git', [g[1] for g in gitpatches
1149 1149 if g[1].op in ('COPY', 'RENAME')]
1150 1150 gitpatches.reverse()
1151 1151 afile = 'a/' + m.group(1)
1152 1152 bfile = 'b/' + m.group(2)
1153 1153 while bfile != gitpatches[-1][0]:
1154 1154 gp = gitpatches.pop()[1]
1155 1155 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp)
1156 1156 gp = gitpatches[-1][1]
1157 1157 # copy/rename + modify should modify target, not source
1158 1158 if gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') or gp.mode:
1159 1159 afile = bfile
1160 1160 newfile = True
1161 1161 elif x.startswith('---'):
1162 1162 # check for a unified diff
1163 1163 l2 = lr.readline()
1164 1164 if not l2.startswith('+++'):
1165 1165 lr.push(l2)
1166 1166 continue
1167 1167 newfile = True
1168 1168 context = False
1169 1169 afile = parsefilename(x)
1170 1170 bfile = parsefilename(l2)
1171 1171 elif x.startswith('***'):
1172 1172 # check for a context diff
1173 1173 l2 = lr.readline()
1174 1174 if not l2.startswith('---'):
1175 1175 lr.push(l2)
1176 1176 continue
1177 1177 l3 = lr.readline()
1178 1178 lr.push(l3)
1179 1179 if not l3.startswith("***************"):
1180 1180 lr.push(l2)
1181 1181 continue
1182 1182 newfile = True
1183 1183 context = True
1184 1184 afile = parsefilename(x)
1185 1185 bfile = parsefilename(l2)
1186 1186
1187 1187 if newfile:
1188 1188 newfile = False
1189 1189 emitfile = True
1190 1190 state = BFILE
1191 1191 hunknum = 0
1192 1192
1193 1193 while gitpatches:
1194 1194 gp = gitpatches.pop()[1]
1195 1195 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp)
1196 1196
1197 1197 def applydiff(ui, fp, changed, backend, store, strip=1, eolmode='strict'):
1198 1198 """Reads a patch from fp and tries to apply it.
1199 1199
1200 1200 The dict 'changed' is filled in with all of the filenames changed
1201 1201 by the patch. Returns 0 for a clean patch, -1 if any rejects were
1202 1202 found and 1 if there was any fuzz.
1203 1203
1204 1204 If 'eolmode' is 'strict', the patch content and patched file are
1205 1205 read in binary mode. Otherwise, line endings are ignored when
1206 1206 patching then normalized according to 'eolmode'.
1207 1207 """
1208 1208 return _applydiff(ui, fp, patchfile, backend, store, changed, strip=strip,
1209 1209 eolmode=eolmode)
1210 1210
1211 1211 def _applydiff(ui, fp, patcher, backend, store, changed, strip=1,
1212 1212 eolmode='strict'):
1213 1213
1214 1214 def pstrip(p):
1215 1215 return pathstrip(p, strip - 1)[1]
1216 1216
1217 1217 rejects = 0
1218 1218 err = 0
1219 1219 current_file = None
1220 1220
1221 1221 for state, values in iterhunks(fp):
1222 1222 if state == 'hunk':
1223 1223 if not current_file:
1224 1224 continue
1225 1225 ret = current_file.apply(values)
1226 1226 if ret >= 0:
1227 1227 changed.setdefault(current_file.fname, None)
1228 1228 if ret > 0:
1229 1229 err = 1
1230 1230 elif state == 'file':
1231 1231 if current_file:
1232 1232 rejects += current_file.close()
1233 1233 current_file = None
1234 1234 afile, bfile, first_hunk, gp = values
1235 1235 copysource = None
1236 1236 if gp:
1237 1237 path = pstrip(gp.path)
1238 1238 if gp.oldpath:
1239 1239 copysource = pstrip(gp.oldpath)
1240 1240 changed[path] = gp
1241 1241 if gp.op == 'DELETE':
1242 1242 backend.unlink(path)
1243 1243 continue
1244 1244 if gp.op == 'RENAME':
1245 1245 backend.unlink(copysource)
1246 1246 if not first_hunk:
1247 1247 data, mode = None, None
1248 1248 if gp.op in ('RENAME', 'COPY'):
1249 1249 data, mode = store.getfile(copysource)
1250 1250 if gp.mode:
1251 1251 mode = gp.mode
1252 1252 if gp.op == 'ADD':
1253 1253 # Added files without content have no hunk and
1254 1254 # must be created
1255 1255 data = ''
1256 1256 if data or mode:
1257 1257 if (gp.op in ('ADD', 'RENAME', 'COPY')
1258 1258 and backend.exists(path)):
1259 1259 raise PatchError(_("cannot create %s: destination "
1260 1260 "already exists") % path)
1261 1261 backend.setfile(path, data, mode, copysource)
1262 1262 if not first_hunk:
1263 1263 continue
1264 1264 try:
1265 1265 mode = gp and gp.mode or None
1266 1266 current_file, create, remove = selectfile(
1267 1267 backend, afile, bfile, first_hunk, strip, gp)
1268 1268 current_file = patcher(ui, current_file, backend, store, mode,
1269 1269 create, remove, eolmode=eolmode,
1270 1270 copysource=copysource)
1271 1271 except PatchError, inst:
1272 1272 ui.warn(str(inst) + '\n')
1273 1273 current_file = None
1274 1274 rejects += 1
1275 1275 continue
1276 1276 elif state == 'git':
1277 1277 for gp in values:
1278 1278 path = pstrip(gp.oldpath)
1279 1279 data, mode = backend.getfile(path)
1280 1280 store.setfile(path, data, mode)
1281 1281 else:
1282 1282 raise util.Abort(_('unsupported parser state: %s') % state)
1283 1283
1284 1284 if current_file:
1285 1285 rejects += current_file.close()
1286 1286
1287 1287 if rejects:
1288 1288 return -1
1289 1289 return err
1290 1290
1291 1291 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1292 1292 similarity):
1293 1293 """use <patcher> to apply <patchname> to the working directory.
1294 1294 returns whether patch was applied with fuzz factor."""
1295 1295
1296 1296 fuzz = False
1297 1297 args = []
1298 1298 cwd = repo.root
1299 1299 if cwd:
1300 1300 args.append('-d %s' % util.shellquote(cwd))
1301 1301 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1302 1302 util.shellquote(patchname)))
1303 1303 try:
1304 1304 for line in fp:
1305 1305 line = line.rstrip()
1306 1306 ui.note(line + '\n')
1307 1307 if line.startswith('patching file '):
1308 1308 pf = util.parsepatchoutput(line)
1309 1309 printed_file = False
1310 1310 files.setdefault(pf, None)
1311 1311 elif line.find('with fuzz') >= 0:
1312 1312 fuzz = True
1313 1313 if not printed_file:
1314 1314 ui.warn(pf + '\n')
1315 1315 printed_file = True
1316 1316 ui.warn(line + '\n')
1317 1317 elif line.find('saving rejects to file') >= 0:
1318 1318 ui.warn(line + '\n')
1319 1319 elif line.find('FAILED') >= 0:
1320 1320 if not printed_file:
1321 1321 ui.warn(pf + '\n')
1322 1322 printed_file = True
1323 1323 ui.warn(line + '\n')
1324 1324 finally:
1325 1325 if files:
1326 1326 cfiles = list(files)
1327 1327 cwd = repo.getcwd()
1328 1328 if cwd:
1329 1329 cfiles = [util.pathto(repo.root, cwd, f)
1330 1330 for f in cfile]
1331 1331 scmutil.addremove(repo, cfiles, similarity=similarity)
1332 1332 code = fp.close()
1333 1333 if code:
1334 1334 raise PatchError(_("patch command failed: %s") %
1335 1335 util.explainexit(code)[0])
1336 1336 return fuzz
1337 1337
1338 1338 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1339 1339 similarity=0):
1340 1340 """use builtin patch to apply <patchobj> to the working directory.
1341 1341 returns whether patch was applied with fuzz factor."""
1342 1342
1343 1343 if files is None:
1344 1344 files = {}
1345 1345 if eolmode is None:
1346 1346 eolmode = ui.config('patch', 'eol', 'strict')
1347 1347 if eolmode.lower() not in eolmodes:
1348 1348 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1349 1349 eolmode = eolmode.lower()
1350 1350
1351 1351 store = filestore()
1352 1352 backend = workingbackend(ui, repo, similarity)
1353 1353 try:
1354 1354 fp = open(patchobj, 'rb')
1355 1355 except TypeError:
1356 1356 fp = patchobj
1357 1357 try:
1358 1358 ret = applydiff(ui, fp, files, backend, store, strip=strip,
1359 1359 eolmode=eolmode)
1360 1360 finally:
1361 1361 if fp != patchobj:
1362 1362 fp.close()
1363 1363 files.update(dict.fromkeys(backend.close()))
1364 1364 store.close()
1365 1365 if ret < 0:
1366 1366 raise PatchError(_('patch failed to apply'))
1367 1367 return ret > 0
1368 1368
1369 1369 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1370 1370 similarity=0):
1371 1371 """Apply <patchname> to the working directory.
1372 1372
1373 1373 'eolmode' specifies how end of lines should be handled. It can be:
1374 1374 - 'strict': inputs are read in binary mode, EOLs are preserved
1375 1375 - 'crlf': EOLs are ignored when patching and reset to CRLF
1376 1376 - 'lf': EOLs are ignored when patching and reset to LF
1377 1377 - None: get it from user settings, default to 'strict'
1378 1378 'eolmode' is ignored when using an external patcher program.
1379 1379
1380 1380 Returns whether patch was applied with fuzz factor.
1381 1381 """
1382 1382 patcher = ui.config('ui', 'patch')
1383 1383 if files is None:
1384 1384 files = {}
1385 1385 try:
1386 1386 if patcher:
1387 1387 return _externalpatch(ui, repo, patcher, patchname, strip,
1388 1388 files, similarity)
1389 1389 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1390 1390 similarity)
1391 1391 except PatchError, err:
1392 1392 raise util.Abort(str(err))
1393 1393
1394 1394 def changedfiles(ui, repo, patchpath, strip=1):
1395 1395 backend = fsbackend(ui, repo.root)
1396 1396 fp = open(patchpath, 'rb')
1397 1397 try:
1398 1398 changed = set()
1399 1399 for state, values in iterhunks(fp):
1400 1400 if state == 'file':
1401 1401 afile, bfile, first_hunk, gp = values
1402 1402 if gp:
1403 1403 changed.add(pathstrip(gp.path, strip - 1)[1])
1404 1404 if gp.op == 'RENAME':
1405 1405 changed.add(pathstrip(gp.oldpath, strip - 1)[1])
1406 1406 if not first_hunk:
1407 1407 continue
1408 1408 current_file, create, remove = selectfile(
1409 1409 backend, afile, bfile, first_hunk, strip, gp)
1410 1410 changed.add(current_file)
1411 1411 elif state not in ('hunk', 'git'):
1412 1412 raise util.Abort(_('unsupported parser state: %s') % state)
1413 1413 return changed
1414 1414 finally:
1415 1415 fp.close()
1416 1416
1417 1417 def b85diff(to, tn):
1418 1418 '''print base85-encoded binary diff'''
1419 1419 def gitindex(text):
1420 1420 if not text:
1421 1421 return hex(nullid)
1422 1422 l = len(text)
1423 1423 s = util.sha1('blob %d\0' % l)
1424 1424 s.update(text)
1425 1425 return s.hexdigest()
1426 1426
1427 1427 def fmtline(line):
1428 1428 l = len(line)
1429 1429 if l <= 26:
1430 1430 l = chr(ord('A') + l - 1)
1431 1431 else:
1432 1432 l = chr(l - 26 + ord('a') - 1)
1433 1433 return '%c%s\n' % (l, base85.b85encode(line, True))
1434 1434
1435 1435 def chunk(text, csize=52):
1436 1436 l = len(text)
1437 1437 i = 0
1438 1438 while i < l:
1439 1439 yield text[i:i + csize]
1440 1440 i += csize
1441 1441
1442 1442 tohash = gitindex(to)
1443 1443 tnhash = gitindex(tn)
1444 1444 if tohash == tnhash:
1445 1445 return ""
1446 1446
1447 1447 # TODO: deltas
1448 1448 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1449 1449 (tohash, tnhash, len(tn))]
1450 1450 for l in chunk(zlib.compress(tn)):
1451 1451 ret.append(fmtline(l))
1452 1452 ret.append('\n')
1453 1453 return ''.join(ret)
1454 1454
1455 1455 class GitDiffRequired(Exception):
1456 1456 pass
1457 1457
1458 1458 def diffopts(ui, opts=None, untrusted=False):
1459 1459 def get(key, name=None, getter=ui.configbool):
1460 1460 return ((opts and opts.get(key)) or
1461 1461 getter('diff', name or key, None, untrusted=untrusted))
1462 1462 return mdiff.diffopts(
1463 1463 text=opts and opts.get('text'),
1464 1464 git=get('git'),
1465 1465 nodates=get('nodates'),
1466 1466 showfunc=get('show_function', 'showfunc'),
1467 1467 ignorews=get('ignore_all_space', 'ignorews'),
1468 1468 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1469 1469 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1470 1470 context=get('unified', getter=ui.config))
1471 1471
1472 1472 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1473 1473 losedatafn=None, prefix=''):
1474 1474 '''yields diff of changes to files between two nodes, or node and
1475 1475 working directory.
1476 1476
1477 1477 if node1 is None, use first dirstate parent instead.
1478 1478 if node2 is None, compare node1 with working directory.
1479 1479
1480 1480 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1481 1481 every time some change cannot be represented with the current
1482 1482 patch format. Return False to upgrade to git patch format, True to
1483 1483 accept the loss or raise an exception to abort the diff. It is
1484 1484 called with the name of current file being diffed as 'fn'. If set
1485 1485 to None, patches will always be upgraded to git format when
1486 1486 necessary.
1487 1487
1488 1488 prefix is a filename prefix that is prepended to all filenames on
1489 1489 display (used for subrepos).
1490 1490 '''
1491 1491
1492 1492 if opts is None:
1493 1493 opts = mdiff.defaultopts
1494 1494
1495 1495 if not node1 and not node2:
1496 1496 node1 = repo.dirstate.p1()
1497 1497
1498 1498 def lrugetfilectx():
1499 1499 cache = {}
1500 1500 order = []
1501 1501 def getfilectx(f, ctx):
1502 1502 fctx = ctx.filectx(f, filelog=cache.get(f))
1503 1503 if f not in cache:
1504 1504 if len(cache) > 20:
1505 1505 del cache[order.pop(0)]
1506 1506 cache[f] = fctx.filelog()
1507 1507 else:
1508 1508 order.remove(f)
1509 1509 order.append(f)
1510 1510 return fctx
1511 1511 return getfilectx
1512 1512 getfilectx = lrugetfilectx()
1513 1513
1514 1514 ctx1 = repo[node1]
1515 1515 ctx2 = repo[node2]
1516 1516
1517 1517 if not changes:
1518 1518 changes = repo.status(ctx1, ctx2, match=match)
1519 1519 modified, added, removed = changes[:3]
1520 1520
1521 1521 if not modified and not added and not removed:
1522 1522 return []
1523 1523
1524 1524 revs = None
1525 1525 if not repo.ui.quiet:
1526 1526 hexfunc = repo.ui.debugflag and hex or short
1527 1527 revs = [hexfunc(node) for node in [node1, node2] if node]
1528 1528
1529 1529 copy = {}
1530 1530 if opts.git or opts.upgrade:
1531 1531 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1532 1532
1533 1533 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1534 1534 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1535 1535 if opts.upgrade and not opts.git:
1536 1536 try:
1537 1537 def losedata(fn):
1538 1538 if not losedatafn or not losedatafn(fn=fn):
1539 1539 raise GitDiffRequired()
1540 1540 # Buffer the whole output until we are sure it can be generated
1541 1541 return list(difffn(opts.copy(git=False), losedata))
1542 1542 except GitDiffRequired:
1543 1543 return difffn(opts.copy(git=True), None)
1544 1544 else:
1545 1545 return difffn(opts, None)
1546 1546
1547 1547 def difflabel(func, *args, **kw):
1548 1548 '''yields 2-tuples of (output, label) based on the output of func()'''
1549 1549 prefixes = [('diff', 'diff.diffline'),
1550 1550 ('copy', 'diff.extended'),
1551 1551 ('rename', 'diff.extended'),
1552 1552 ('old', 'diff.extended'),
1553 1553 ('new', 'diff.extended'),
1554 1554 ('deleted', 'diff.extended'),
1555 1555 ('---', 'diff.file_a'),
1556 1556 ('+++', 'diff.file_b'),
1557 1557 ('@@', 'diff.hunk'),
1558 1558 ('-', 'diff.deleted'),
1559 1559 ('+', 'diff.inserted')]
1560 1560
1561 1561 for chunk in func(*args, **kw):
1562 1562 lines = chunk.split('\n')
1563 1563 for i, line in enumerate(lines):
1564 1564 if i != 0:
1565 1565 yield ('\n', '')
1566 1566 stripline = line
1567 1567 if line and line[0] in '+-':
1568 1568 # highlight trailing whitespace, but only in changed lines
1569 1569 stripline = line.rstrip()
1570 1570 for prefix, label in prefixes:
1571 1571 if stripline.startswith(prefix):
1572 1572 yield (stripline, label)
1573 1573 break
1574 1574 else:
1575 1575 yield (line, '')
1576 1576 if line != stripline:
1577 1577 yield (line[len(stripline):], 'diff.trailingwhitespace')
1578 1578
1579 1579 def diffui(*args, **kw):
1580 1580 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1581 1581 return difflabel(diff, *args, **kw)
1582 1582
1583 1583
1584 1584 def _addmodehdr(header, omode, nmode):
1585 1585 if omode != nmode:
1586 1586 header.append('old mode %s\n' % omode)
1587 1587 header.append('new mode %s\n' % nmode)
1588 1588
1589 1589 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1590 1590 copy, getfilectx, opts, losedatafn, prefix):
1591 1591
1592 1592 def join(f):
1593 1593 return os.path.join(prefix, f)
1594 1594
1595 1595 date1 = util.datestr(ctx1.date())
1596 1596 man1 = ctx1.manifest()
1597 1597
1598 1598 gone = set()
1599 1599 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1600 1600
1601 1601 copyto = dict([(v, k) for k, v in copy.items()])
1602 1602
1603 1603 if opts.git:
1604 1604 revs = None
1605 1605
1606 1606 for f in sorted(modified + added + removed):
1607 1607 to = None
1608 1608 tn = None
1609 1609 dodiff = True
1610 1610 header = []
1611 1611 if f in man1:
1612 1612 to = getfilectx(f, ctx1).data()
1613 1613 if f not in removed:
1614 1614 tn = getfilectx(f, ctx2).data()
1615 1615 a, b = f, f
1616 1616 if opts.git or losedatafn:
1617 1617 if f in added:
1618 1618 mode = gitmode[ctx2.flags(f)]
1619 1619 if f in copy or f in copyto:
1620 1620 if opts.git:
1621 1621 if f in copy:
1622 1622 a = copy[f]
1623 1623 else:
1624 1624 a = copyto[f]
1625 1625 omode = gitmode[man1.flags(a)]
1626 1626 _addmodehdr(header, omode, mode)
1627 1627 if a in removed and a not in gone:
1628 1628 op = 'rename'
1629 1629 gone.add(a)
1630 1630 else:
1631 1631 op = 'copy'
1632 1632 header.append('%s from %s\n' % (op, join(a)))
1633 1633 header.append('%s to %s\n' % (op, join(f)))
1634 1634 to = getfilectx(a, ctx1).data()
1635 1635 else:
1636 1636 losedatafn(f)
1637 1637 else:
1638 1638 if opts.git:
1639 1639 header.append('new file mode %s\n' % mode)
1640 1640 elif ctx2.flags(f):
1641 1641 losedatafn(f)
1642 1642 # In theory, if tn was copied or renamed we should check
1643 1643 # if the source is binary too but the copy record already
1644 1644 # forces git mode.
1645 1645 if util.binary(tn):
1646 1646 if opts.git:
1647 1647 dodiff = 'binary'
1648 1648 else:
1649 1649 losedatafn(f)
1650 1650 if not opts.git and not tn:
1651 1651 # regular diffs cannot represent new empty file
1652 1652 losedatafn(f)
1653 1653 elif f in removed:
1654 1654 if opts.git:
1655 1655 # have we already reported a copy above?
1656 1656 if ((f in copy and copy[f] in added
1657 1657 and copyto[copy[f]] == f) or
1658 1658 (f in copyto and copyto[f] in added
1659 1659 and copy[copyto[f]] == f)):
1660 1660 dodiff = False
1661 1661 else:
1662 1662 header.append('deleted file mode %s\n' %
1663 1663 gitmode[man1.flags(f)])
1664 1664 elif not to or util.binary(to):
1665 1665 # regular diffs cannot represent empty file deletion
1666 1666 losedatafn(f)
1667 1667 else:
1668 1668 oflag = man1.flags(f)
1669 1669 nflag = ctx2.flags(f)
1670 1670 binary = util.binary(to) or util.binary(tn)
1671 1671 if opts.git:
1672 1672 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1673 1673 if binary:
1674 1674 dodiff = 'binary'
1675 1675 elif binary or nflag != oflag:
1676 1676 losedatafn(f)
1677 1677 if opts.git:
1678 1678 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1679 1679
1680 1680 if dodiff:
1681 1681 if dodiff == 'binary':
1682 1682 text = b85diff(to, tn)
1683 1683 else:
1684 1684 text = mdiff.unidiff(to, date1,
1685 1685 # ctx2 date may be dynamic
1686 1686 tn, util.datestr(ctx2.date()),
1687 1687 join(a), join(b), revs, opts=opts)
1688 1688 if header and (text or len(header) > 1):
1689 1689 yield ''.join(header)
1690 1690 if text:
1691 1691 yield text
1692 1692
1693 1693 def diffstatsum(stats):
1694 1694 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1695 1695 for f, a, r, b in stats:
1696 1696 maxfile = max(maxfile, encoding.colwidth(f))
1697 1697 maxtotal = max(maxtotal, a + r)
1698 1698 addtotal += a
1699 1699 removetotal += r
1700 1700 binary = binary or b
1701 1701
1702 1702 return maxfile, maxtotal, addtotal, removetotal, binary
1703 1703
1704 1704 def diffstatdata(lines):
1705 1705 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1706 1706
1707 1707 results = []
1708 1708 filename, adds, removes = None, 0, 0
1709 1709
1710 1710 def addresult():
1711 1711 if filename:
1712 1712 isbinary = adds == 0 and removes == 0
1713 1713 results.append((filename, adds, removes, isbinary))
1714 1714
1715 1715 for line in lines:
1716 1716 if line.startswith('diff'):
1717 1717 addresult()
1718 1718 # set numbers to 0 anyway when starting new file
1719 1719 adds, removes = 0, 0
1720 1720 if line.startswith('diff --git'):
1721 1721 filename = gitre.search(line).group(1)
1722 1722 elif line.startswith('diff -r'):
1723 1723 # format: "diff -r ... -r ... filename"
1724 1724 filename = diffre.search(line).group(1)
1725 1725 elif line.startswith('+') and not line.startswith('+++'):
1726 1726 adds += 1
1727 1727 elif line.startswith('-') and not line.startswith('---'):
1728 1728 removes += 1
1729 1729 addresult()
1730 1730 return results
1731 1731
1732 1732 def diffstat(lines, width=80, git=False):
1733 1733 output = []
1734 1734 stats = diffstatdata(lines)
1735 1735 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1736 1736
1737 1737 countwidth = len(str(maxtotal))
1738 1738 if hasbinary and countwidth < 3:
1739 1739 countwidth = 3
1740 1740 graphwidth = width - countwidth - maxname - 6
1741 1741 if graphwidth < 10:
1742 1742 graphwidth = 10
1743 1743
1744 1744 def scale(i):
1745 1745 if maxtotal <= graphwidth:
1746 1746 return i
1747 1747 # If diffstat runs out of room it doesn't print anything,
1748 1748 # which isn't very useful, so always print at least one + or -
1749 1749 # if there were at least some changes.
1750 1750 return max(i * graphwidth // maxtotal, int(bool(i)))
1751 1751
1752 1752 for filename, adds, removes, isbinary in stats:
1753 1753 if git and isbinary:
1754 1754 count = 'Bin'
1755 1755 else:
1756 1756 count = adds + removes
1757 1757 pluses = '+' * scale(adds)
1758 1758 minuses = '-' * scale(removes)
1759 1759 output.append(' %s%s | %*s %s%s\n' %
1760 1760 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1761 1761 countwidth, count, pluses, minuses))
1762 1762
1763 1763 if stats:
1764 1764 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1765 1765 % (len(stats), totaladds, totalremoves))
1766 1766
1767 1767 return ''.join(output)
1768 1768
1769 1769 def diffstatui(*args, **kw):
1770 1770 '''like diffstat(), but yields 2-tuples of (output, label) for
1771 1771 ui.write()
1772 1772 '''
1773 1773
1774 1774 for line in diffstat(*args, **kw).splitlines():
1775 1775 if line and line[-1] in '+-':
1776 1776 name, graph = line.rsplit(' ', 1)
1777 1777 yield (name + ' ', '')
1778 1778 m = re.search(r'\++', graph)
1779 1779 if m:
1780 1780 yield (m.group(0), 'diffstat.inserted')
1781 1781 m = re.search(r'-+', graph)
1782 1782 if m:
1783 1783 yield (m.group(0), 'diffstat.deleted')
1784 1784 else:
1785 1785 yield (line, '')
1786 1786 yield ('\n', '')
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now