##// END OF EJS Templates
patch.internalpatch: accept a prefix parameter
Siddharth Agarwal -
r24254:60c279ab default
parent child Browse files
Show More
@@ -1,672 +1,672 b''
1 # record.py
1 # record.py
2 #
2 #
3 # Copyright 2007 Bryan O'Sullivan <bos@serpentine.com>
3 # Copyright 2007 Bryan O'Sullivan <bos@serpentine.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''commands to interactively select changes for commit/qrefresh'''
8 '''commands to interactively select changes for commit/qrefresh'''
9
9
10 from mercurial.i18n import _
10 from mercurial.i18n import _
11 from mercurial import cmdutil, commands, extensions, hg, patch
11 from mercurial import cmdutil, commands, extensions, hg, patch
12 from mercurial import util
12 from mercurial import util
13 import copy, cStringIO, errno, os, re, shutil, tempfile
13 import copy, cStringIO, errno, os, re, shutil, tempfile
14
14
15 cmdtable = {}
15 cmdtable = {}
16 command = cmdutil.command(cmdtable)
16 command = cmdutil.command(cmdtable)
17 testedwith = 'internal'
17 testedwith = 'internal'
18
18
19 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
19 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
20
20
21 def scanpatch(fp):
21 def scanpatch(fp):
22 """like patch.iterhunks, but yield different events
22 """like patch.iterhunks, but yield different events
23
23
24 - ('file', [header_lines + fromfile + tofile])
24 - ('file', [header_lines + fromfile + tofile])
25 - ('context', [context_lines])
25 - ('context', [context_lines])
26 - ('hunk', [hunk_lines])
26 - ('hunk', [hunk_lines])
27 - ('range', (-start,len, +start,len, proc))
27 - ('range', (-start,len, +start,len, proc))
28 """
28 """
29 lr = patch.linereader(fp)
29 lr = patch.linereader(fp)
30
30
31 def scanwhile(first, p):
31 def scanwhile(first, p):
32 """scan lr while predicate holds"""
32 """scan lr while predicate holds"""
33 lines = [first]
33 lines = [first]
34 while True:
34 while True:
35 line = lr.readline()
35 line = lr.readline()
36 if not line:
36 if not line:
37 break
37 break
38 if p(line):
38 if p(line):
39 lines.append(line)
39 lines.append(line)
40 else:
40 else:
41 lr.push(line)
41 lr.push(line)
42 break
42 break
43 return lines
43 return lines
44
44
45 while True:
45 while True:
46 line = lr.readline()
46 line = lr.readline()
47 if not line:
47 if not line:
48 break
48 break
49 if line.startswith('diff --git a/') or line.startswith('diff -r '):
49 if line.startswith('diff --git a/') or line.startswith('diff -r '):
50 def notheader(line):
50 def notheader(line):
51 s = line.split(None, 1)
51 s = line.split(None, 1)
52 return not s or s[0] not in ('---', 'diff')
52 return not s or s[0] not in ('---', 'diff')
53 header = scanwhile(line, notheader)
53 header = scanwhile(line, notheader)
54 fromfile = lr.readline()
54 fromfile = lr.readline()
55 if fromfile.startswith('---'):
55 if fromfile.startswith('---'):
56 tofile = lr.readline()
56 tofile = lr.readline()
57 header += [fromfile, tofile]
57 header += [fromfile, tofile]
58 else:
58 else:
59 lr.push(fromfile)
59 lr.push(fromfile)
60 yield 'file', header
60 yield 'file', header
61 elif line[0] == ' ':
61 elif line[0] == ' ':
62 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
62 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
63 elif line[0] in '-+':
63 elif line[0] in '-+':
64 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
64 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
65 else:
65 else:
66 m = lines_re.match(line)
66 m = lines_re.match(line)
67 if m:
67 if m:
68 yield 'range', m.groups()
68 yield 'range', m.groups()
69 else:
69 else:
70 yield 'other', line
70 yield 'other', line
71
71
72 class header(object):
72 class header(object):
73 """patch header
73 """patch header
74
74
75 XXX shouldn't we move this to mercurial/patch.py ?
75 XXX shouldn't we move this to mercurial/patch.py ?
76 """
76 """
77 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
77 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
78 diff_re = re.compile('diff -r .* (.*)$')
78 diff_re = re.compile('diff -r .* (.*)$')
79 allhunks_re = re.compile('(?:index|deleted file) ')
79 allhunks_re = re.compile('(?:index|deleted file) ')
80 pretty_re = re.compile('(?:new file|deleted file) ')
80 pretty_re = re.compile('(?:new file|deleted file) ')
81 special_re = re.compile('(?:index|new|deleted|copy|rename) ')
81 special_re = re.compile('(?:index|new|deleted|copy|rename) ')
82
82
83 def __init__(self, header):
83 def __init__(self, header):
84 self.header = header
84 self.header = header
85 self.hunks = []
85 self.hunks = []
86
86
87 def binary(self):
87 def binary(self):
88 return util.any(h.startswith('index ') for h in self.header)
88 return util.any(h.startswith('index ') for h in self.header)
89
89
90 def pretty(self, fp):
90 def pretty(self, fp):
91 for h in self.header:
91 for h in self.header:
92 if h.startswith('index '):
92 if h.startswith('index '):
93 fp.write(_('this modifies a binary file (all or nothing)\n'))
93 fp.write(_('this modifies a binary file (all or nothing)\n'))
94 break
94 break
95 if self.pretty_re.match(h):
95 if self.pretty_re.match(h):
96 fp.write(h)
96 fp.write(h)
97 if self.binary():
97 if self.binary():
98 fp.write(_('this is a binary file\n'))
98 fp.write(_('this is a binary file\n'))
99 break
99 break
100 if h.startswith('---'):
100 if h.startswith('---'):
101 fp.write(_('%d hunks, %d lines changed\n') %
101 fp.write(_('%d hunks, %d lines changed\n') %
102 (len(self.hunks),
102 (len(self.hunks),
103 sum([max(h.added, h.removed) for h in self.hunks])))
103 sum([max(h.added, h.removed) for h in self.hunks])))
104 break
104 break
105 fp.write(h)
105 fp.write(h)
106
106
107 def write(self, fp):
107 def write(self, fp):
108 fp.write(''.join(self.header))
108 fp.write(''.join(self.header))
109
109
110 def allhunks(self):
110 def allhunks(self):
111 return util.any(self.allhunks_re.match(h) for h in self.header)
111 return util.any(self.allhunks_re.match(h) for h in self.header)
112
112
113 def files(self):
113 def files(self):
114 match = self.diffgit_re.match(self.header[0])
114 match = self.diffgit_re.match(self.header[0])
115 if match:
115 if match:
116 fromfile, tofile = match.groups()
116 fromfile, tofile = match.groups()
117 if fromfile == tofile:
117 if fromfile == tofile:
118 return [fromfile]
118 return [fromfile]
119 return [fromfile, tofile]
119 return [fromfile, tofile]
120 else:
120 else:
121 return self.diff_re.match(self.header[0]).groups()
121 return self.diff_re.match(self.header[0]).groups()
122
122
123 def filename(self):
123 def filename(self):
124 return self.files()[-1]
124 return self.files()[-1]
125
125
126 def __repr__(self):
126 def __repr__(self):
127 return '<header %s>' % (' '.join(map(repr, self.files())))
127 return '<header %s>' % (' '.join(map(repr, self.files())))
128
128
129 def special(self):
129 def special(self):
130 return util.any(self.special_re.match(h) for h in self.header)
130 return util.any(self.special_re.match(h) for h in self.header)
131
131
132 def countchanges(hunk):
132 def countchanges(hunk):
133 """hunk -> (n+,n-)"""
133 """hunk -> (n+,n-)"""
134 add = len([h for h in hunk if h[0] == '+'])
134 add = len([h for h in hunk if h[0] == '+'])
135 rem = len([h for h in hunk if h[0] == '-'])
135 rem = len([h for h in hunk if h[0] == '-'])
136 return add, rem
136 return add, rem
137
137
138 class hunk(object):
138 class hunk(object):
139 """patch hunk
139 """patch hunk
140
140
141 XXX shouldn't we merge this with patch.hunk ?
141 XXX shouldn't we merge this with patch.hunk ?
142 """
142 """
143 maxcontext = 3
143 maxcontext = 3
144
144
145 def __init__(self, header, fromline, toline, proc, before, hunk, after):
145 def __init__(self, header, fromline, toline, proc, before, hunk, after):
146 def trimcontext(number, lines):
146 def trimcontext(number, lines):
147 delta = len(lines) - self.maxcontext
147 delta = len(lines) - self.maxcontext
148 if False and delta > 0:
148 if False and delta > 0:
149 return number + delta, lines[:self.maxcontext]
149 return number + delta, lines[:self.maxcontext]
150 return number, lines
150 return number, lines
151
151
152 self.header = header
152 self.header = header
153 self.fromline, self.before = trimcontext(fromline, before)
153 self.fromline, self.before = trimcontext(fromline, before)
154 self.toline, self.after = trimcontext(toline, after)
154 self.toline, self.after = trimcontext(toline, after)
155 self.proc = proc
155 self.proc = proc
156 self.hunk = hunk
156 self.hunk = hunk
157 self.added, self.removed = countchanges(self.hunk)
157 self.added, self.removed = countchanges(self.hunk)
158
158
159 def write(self, fp):
159 def write(self, fp):
160 delta = len(self.before) + len(self.after)
160 delta = len(self.before) + len(self.after)
161 if self.after and self.after[-1] == '\\ No newline at end of file\n':
161 if self.after and self.after[-1] == '\\ No newline at end of file\n':
162 delta -= 1
162 delta -= 1
163 fromlen = delta + self.removed
163 fromlen = delta + self.removed
164 tolen = delta + self.added
164 tolen = delta + self.added
165 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
165 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
166 (self.fromline, fromlen, self.toline, tolen,
166 (self.fromline, fromlen, self.toline, tolen,
167 self.proc and (' ' + self.proc)))
167 self.proc and (' ' + self.proc)))
168 fp.write(''.join(self.before + self.hunk + self.after))
168 fp.write(''.join(self.before + self.hunk + self.after))
169
169
170 pretty = write
170 pretty = write
171
171
172 def filename(self):
172 def filename(self):
173 return self.header.filename()
173 return self.header.filename()
174
174
175 def __repr__(self):
175 def __repr__(self):
176 return '<hunk %r@%d>' % (self.filename(), self.fromline)
176 return '<hunk %r@%d>' % (self.filename(), self.fromline)
177
177
178 def parsepatch(fp):
178 def parsepatch(fp):
179 """patch -> [] of headers -> [] of hunks """
179 """patch -> [] of headers -> [] of hunks """
180 class parser(object):
180 class parser(object):
181 """patch parsing state machine"""
181 """patch parsing state machine"""
182 def __init__(self):
182 def __init__(self):
183 self.fromline = 0
183 self.fromline = 0
184 self.toline = 0
184 self.toline = 0
185 self.proc = ''
185 self.proc = ''
186 self.header = None
186 self.header = None
187 self.context = []
187 self.context = []
188 self.before = []
188 self.before = []
189 self.hunk = []
189 self.hunk = []
190 self.headers = []
190 self.headers = []
191
191
192 def addrange(self, limits):
192 def addrange(self, limits):
193 fromstart, fromend, tostart, toend, proc = limits
193 fromstart, fromend, tostart, toend, proc = limits
194 self.fromline = int(fromstart)
194 self.fromline = int(fromstart)
195 self.toline = int(tostart)
195 self.toline = int(tostart)
196 self.proc = proc
196 self.proc = proc
197
197
198 def addcontext(self, context):
198 def addcontext(self, context):
199 if self.hunk:
199 if self.hunk:
200 h = hunk(self.header, self.fromline, self.toline, self.proc,
200 h = hunk(self.header, self.fromline, self.toline, self.proc,
201 self.before, self.hunk, context)
201 self.before, self.hunk, context)
202 self.header.hunks.append(h)
202 self.header.hunks.append(h)
203 self.fromline += len(self.before) + h.removed
203 self.fromline += len(self.before) + h.removed
204 self.toline += len(self.before) + h.added
204 self.toline += len(self.before) + h.added
205 self.before = []
205 self.before = []
206 self.hunk = []
206 self.hunk = []
207 self.proc = ''
207 self.proc = ''
208 self.context = context
208 self.context = context
209
209
210 def addhunk(self, hunk):
210 def addhunk(self, hunk):
211 if self.context:
211 if self.context:
212 self.before = self.context
212 self.before = self.context
213 self.context = []
213 self.context = []
214 self.hunk = hunk
214 self.hunk = hunk
215
215
216 def newfile(self, hdr):
216 def newfile(self, hdr):
217 self.addcontext([])
217 self.addcontext([])
218 h = header(hdr)
218 h = header(hdr)
219 self.headers.append(h)
219 self.headers.append(h)
220 self.header = h
220 self.header = h
221
221
222 def addother(self, line):
222 def addother(self, line):
223 pass # 'other' lines are ignored
223 pass # 'other' lines are ignored
224
224
225 def finished(self):
225 def finished(self):
226 self.addcontext([])
226 self.addcontext([])
227 return self.headers
227 return self.headers
228
228
229 transitions = {
229 transitions = {
230 'file': {'context': addcontext,
230 'file': {'context': addcontext,
231 'file': newfile,
231 'file': newfile,
232 'hunk': addhunk,
232 'hunk': addhunk,
233 'range': addrange},
233 'range': addrange},
234 'context': {'file': newfile,
234 'context': {'file': newfile,
235 'hunk': addhunk,
235 'hunk': addhunk,
236 'range': addrange,
236 'range': addrange,
237 'other': addother},
237 'other': addother},
238 'hunk': {'context': addcontext,
238 'hunk': {'context': addcontext,
239 'file': newfile,
239 'file': newfile,
240 'range': addrange},
240 'range': addrange},
241 'range': {'context': addcontext,
241 'range': {'context': addcontext,
242 'hunk': addhunk},
242 'hunk': addhunk},
243 'other': {'other': addother},
243 'other': {'other': addother},
244 }
244 }
245
245
246 p = parser()
246 p = parser()
247
247
248 state = 'context'
248 state = 'context'
249 for newstate, data in scanpatch(fp):
249 for newstate, data in scanpatch(fp):
250 try:
250 try:
251 p.transitions[state][newstate](p, data)
251 p.transitions[state][newstate](p, data)
252 except KeyError:
252 except KeyError:
253 raise patch.PatchError('unhandled transition: %s -> %s' %
253 raise patch.PatchError('unhandled transition: %s -> %s' %
254 (state, newstate))
254 (state, newstate))
255 state = newstate
255 state = newstate
256 return p.finished()
256 return p.finished()
257
257
258 def filterpatch(ui, headers):
258 def filterpatch(ui, headers):
259 """Interactively filter patch chunks into applied-only chunks"""
259 """Interactively filter patch chunks into applied-only chunks"""
260
260
261 def prompt(skipfile, skipall, query, chunk):
261 def prompt(skipfile, skipall, query, chunk):
262 """prompt query, and process base inputs
262 """prompt query, and process base inputs
263
263
264 - y/n for the rest of file
264 - y/n for the rest of file
265 - y/n for the rest
265 - y/n for the rest
266 - ? (help)
266 - ? (help)
267 - q (quit)
267 - q (quit)
268
268
269 Return True/False and possibly updated skipfile and skipall.
269 Return True/False and possibly updated skipfile and skipall.
270 """
270 """
271 newpatches = None
271 newpatches = None
272 if skipall is not None:
272 if skipall is not None:
273 return skipall, skipfile, skipall, newpatches
273 return skipall, skipfile, skipall, newpatches
274 if skipfile is not None:
274 if skipfile is not None:
275 return skipfile, skipfile, skipall, newpatches
275 return skipfile, skipfile, skipall, newpatches
276 while True:
276 while True:
277 resps = _('[Ynesfdaq?]'
277 resps = _('[Ynesfdaq?]'
278 '$$ &Yes, record this change'
278 '$$ &Yes, record this change'
279 '$$ &No, skip this change'
279 '$$ &No, skip this change'
280 '$$ &Edit this change manually'
280 '$$ &Edit this change manually'
281 '$$ &Skip remaining changes to this file'
281 '$$ &Skip remaining changes to this file'
282 '$$ Record remaining changes to this &file'
282 '$$ Record remaining changes to this &file'
283 '$$ &Done, skip remaining changes and files'
283 '$$ &Done, skip remaining changes and files'
284 '$$ Record &all changes to all remaining files'
284 '$$ Record &all changes to all remaining files'
285 '$$ &Quit, recording no changes'
285 '$$ &Quit, recording no changes'
286 '$$ &? (display help)')
286 '$$ &? (display help)')
287 r = ui.promptchoice("%s %s" % (query, resps))
287 r = ui.promptchoice("%s %s" % (query, resps))
288 ui.write("\n")
288 ui.write("\n")
289 if r == 8: # ?
289 if r == 8: # ?
290 for c, t in ui.extractchoices(resps)[1]:
290 for c, t in ui.extractchoices(resps)[1]:
291 ui.write('%s - %s\n' % (c, t.lower()))
291 ui.write('%s - %s\n' % (c, t.lower()))
292 continue
292 continue
293 elif r == 0: # yes
293 elif r == 0: # yes
294 ret = True
294 ret = True
295 elif r == 1: # no
295 elif r == 1: # no
296 ret = False
296 ret = False
297 elif r == 2: # Edit patch
297 elif r == 2: # Edit patch
298 if chunk is None:
298 if chunk is None:
299 ui.write(_('cannot edit patch for whole file'))
299 ui.write(_('cannot edit patch for whole file'))
300 ui.write("\n")
300 ui.write("\n")
301 continue
301 continue
302 if chunk.header.binary():
302 if chunk.header.binary():
303 ui.write(_('cannot edit patch for binary file'))
303 ui.write(_('cannot edit patch for binary file'))
304 ui.write("\n")
304 ui.write("\n")
305 continue
305 continue
306 # Patch comment based on the Git one (based on comment at end of
306 # Patch comment based on the Git one (based on comment at end of
307 # http://mercurial.selenic.com/wiki/RecordExtension)
307 # http://mercurial.selenic.com/wiki/RecordExtension)
308 phelp = '---' + _("""
308 phelp = '---' + _("""
309 To remove '-' lines, make them ' ' lines (context).
309 To remove '-' lines, make them ' ' lines (context).
310 To remove '+' lines, delete them.
310 To remove '+' lines, delete them.
311 Lines starting with # will be removed from the patch.
311 Lines starting with # will be removed from the patch.
312
312
313 If the patch applies cleanly, the edited hunk will immediately be
313 If the patch applies cleanly, the edited hunk will immediately be
314 added to the record list. If it does not apply cleanly, a rejects
314 added to the record list. If it does not apply cleanly, a rejects
315 file will be generated: you can use that when you try again. If
315 file will be generated: you can use that when you try again. If
316 all lines of the hunk are removed, then the edit is aborted and
316 all lines of the hunk are removed, then the edit is aborted and
317 the hunk is left unchanged.
317 the hunk is left unchanged.
318 """)
318 """)
319 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
319 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
320 suffix=".diff", text=True)
320 suffix=".diff", text=True)
321 ncpatchfp = None
321 ncpatchfp = None
322 try:
322 try:
323 # Write the initial patch
323 # Write the initial patch
324 f = os.fdopen(patchfd, "w")
324 f = os.fdopen(patchfd, "w")
325 chunk.header.write(f)
325 chunk.header.write(f)
326 chunk.write(f)
326 chunk.write(f)
327 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
327 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
328 f.close()
328 f.close()
329 # Start the editor and wait for it to complete
329 # Start the editor and wait for it to complete
330 editor = ui.geteditor()
330 editor = ui.geteditor()
331 ui.system("%s \"%s\"" % (editor, patchfn),
331 ui.system("%s \"%s\"" % (editor, patchfn),
332 environ={'HGUSER': ui.username()},
332 environ={'HGUSER': ui.username()},
333 onerr=util.Abort, errprefix=_("edit failed"))
333 onerr=util.Abort, errprefix=_("edit failed"))
334 # Remove comment lines
334 # Remove comment lines
335 patchfp = open(patchfn)
335 patchfp = open(patchfn)
336 ncpatchfp = cStringIO.StringIO()
336 ncpatchfp = cStringIO.StringIO()
337 for line in patchfp:
337 for line in patchfp:
338 if not line.startswith('#'):
338 if not line.startswith('#'):
339 ncpatchfp.write(line)
339 ncpatchfp.write(line)
340 patchfp.close()
340 patchfp.close()
341 ncpatchfp.seek(0)
341 ncpatchfp.seek(0)
342 newpatches = parsepatch(ncpatchfp)
342 newpatches = parsepatch(ncpatchfp)
343 finally:
343 finally:
344 os.unlink(patchfn)
344 os.unlink(patchfn)
345 del ncpatchfp
345 del ncpatchfp
346 # Signal that the chunk shouldn't be applied as-is, but
346 # Signal that the chunk shouldn't be applied as-is, but
347 # provide the new patch to be used instead.
347 # provide the new patch to be used instead.
348 ret = False
348 ret = False
349 elif r == 3: # Skip
349 elif r == 3: # Skip
350 ret = skipfile = False
350 ret = skipfile = False
351 elif r == 4: # file (Record remaining)
351 elif r == 4: # file (Record remaining)
352 ret = skipfile = True
352 ret = skipfile = True
353 elif r == 5: # done, skip remaining
353 elif r == 5: # done, skip remaining
354 ret = skipall = False
354 ret = skipall = False
355 elif r == 6: # all
355 elif r == 6: # all
356 ret = skipall = True
356 ret = skipall = True
357 elif r == 7: # quit
357 elif r == 7: # quit
358 raise util.Abort(_('user quit'))
358 raise util.Abort(_('user quit'))
359 return ret, skipfile, skipall, newpatches
359 return ret, skipfile, skipall, newpatches
360
360
361 seen = set()
361 seen = set()
362 applied = {} # 'filename' -> [] of chunks
362 applied = {} # 'filename' -> [] of chunks
363 skipfile, skipall = None, None
363 skipfile, skipall = None, None
364 pos, total = 1, sum(len(h.hunks) for h in headers)
364 pos, total = 1, sum(len(h.hunks) for h in headers)
365 for h in headers:
365 for h in headers:
366 pos += len(h.hunks)
366 pos += len(h.hunks)
367 skipfile = None
367 skipfile = None
368 fixoffset = 0
368 fixoffset = 0
369 hdr = ''.join(h.header)
369 hdr = ''.join(h.header)
370 if hdr in seen:
370 if hdr in seen:
371 continue
371 continue
372 seen.add(hdr)
372 seen.add(hdr)
373 if skipall is None:
373 if skipall is None:
374 h.pretty(ui)
374 h.pretty(ui)
375 msg = (_('examine changes to %s?') %
375 msg = (_('examine changes to %s?') %
376 _(' and ').join("'%s'" % f for f in h.files()))
376 _(' and ').join("'%s'" % f for f in h.files()))
377 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
377 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
378 if not r:
378 if not r:
379 continue
379 continue
380 applied[h.filename()] = [h]
380 applied[h.filename()] = [h]
381 if h.allhunks():
381 if h.allhunks():
382 applied[h.filename()] += h.hunks
382 applied[h.filename()] += h.hunks
383 continue
383 continue
384 for i, chunk in enumerate(h.hunks):
384 for i, chunk in enumerate(h.hunks):
385 if skipfile is None and skipall is None:
385 if skipfile is None and skipall is None:
386 chunk.pretty(ui)
386 chunk.pretty(ui)
387 if total == 1:
387 if total == 1:
388 msg = _("record this change to '%s'?") % chunk.filename()
388 msg = _("record this change to '%s'?") % chunk.filename()
389 else:
389 else:
390 idx = pos - len(h.hunks) + i
390 idx = pos - len(h.hunks) + i
391 msg = _("record change %d/%d to '%s'?") % (idx, total,
391 msg = _("record change %d/%d to '%s'?") % (idx, total,
392 chunk.filename())
392 chunk.filename())
393 r, skipfile, skipall, newpatches = prompt(skipfile,
393 r, skipfile, skipall, newpatches = prompt(skipfile,
394 skipall, msg, chunk)
394 skipall, msg, chunk)
395 if r:
395 if r:
396 if fixoffset:
396 if fixoffset:
397 chunk = copy.copy(chunk)
397 chunk = copy.copy(chunk)
398 chunk.toline += fixoffset
398 chunk.toline += fixoffset
399 applied[chunk.filename()].append(chunk)
399 applied[chunk.filename()].append(chunk)
400 elif newpatches is not None:
400 elif newpatches is not None:
401 for newpatch in newpatches:
401 for newpatch in newpatches:
402 for newhunk in newpatch.hunks:
402 for newhunk in newpatch.hunks:
403 if fixoffset:
403 if fixoffset:
404 newhunk.toline += fixoffset
404 newhunk.toline += fixoffset
405 applied[newhunk.filename()].append(newhunk)
405 applied[newhunk.filename()].append(newhunk)
406 else:
406 else:
407 fixoffset += chunk.removed - chunk.added
407 fixoffset += chunk.removed - chunk.added
408 return sum([h for h in applied.itervalues()
408 return sum([h for h in applied.itervalues()
409 if h[0].special() or len(h) > 1], [])
409 if h[0].special() or len(h) > 1], [])
410
410
411 @command("record",
411 @command("record",
412 # same options as commit + white space diff options
412 # same options as commit + white space diff options
413 commands.table['^commit|ci'][1][:] + commands.diffwsopts,
413 commands.table['^commit|ci'][1][:] + commands.diffwsopts,
414 _('hg record [OPTION]... [FILE]...'))
414 _('hg record [OPTION]... [FILE]...'))
415 def record(ui, repo, *pats, **opts):
415 def record(ui, repo, *pats, **opts):
416 '''interactively select changes to commit
416 '''interactively select changes to commit
417
417
418 If a list of files is omitted, all changes reported by :hg:`status`
418 If a list of files is omitted, all changes reported by :hg:`status`
419 will be candidates for recording.
419 will be candidates for recording.
420
420
421 See :hg:`help dates` for a list of formats valid for -d/--date.
421 See :hg:`help dates` for a list of formats valid for -d/--date.
422
422
423 You will be prompted for whether to record changes to each
423 You will be prompted for whether to record changes to each
424 modified file, and for files with multiple changes, for each
424 modified file, and for files with multiple changes, for each
425 change to use. For each query, the following responses are
425 change to use. For each query, the following responses are
426 possible::
426 possible::
427
427
428 y - record this change
428 y - record this change
429 n - skip this change
429 n - skip this change
430 e - edit this change manually
430 e - edit this change manually
431
431
432 s - skip remaining changes to this file
432 s - skip remaining changes to this file
433 f - record remaining changes to this file
433 f - record remaining changes to this file
434
434
435 d - done, skip remaining changes and files
435 d - done, skip remaining changes and files
436 a - record all changes to all remaining files
436 a - record all changes to all remaining files
437 q - quit, recording no changes
437 q - quit, recording no changes
438
438
439 ? - display help
439 ? - display help
440
440
441 This command is not available when committing a merge.'''
441 This command is not available when committing a merge.'''
442
442
443 dorecord(ui, repo, commands.commit, 'commit', False, *pats, **opts)
443 dorecord(ui, repo, commands.commit, 'commit', False, *pats, **opts)
444
444
445 def qrefresh(origfn, ui, repo, *pats, **opts):
445 def qrefresh(origfn, ui, repo, *pats, **opts):
446 if not opts['interactive']:
446 if not opts['interactive']:
447 return origfn(ui, repo, *pats, **opts)
447 return origfn(ui, repo, *pats, **opts)
448
448
449 mq = extensions.find('mq')
449 mq = extensions.find('mq')
450
450
451 def committomq(ui, repo, *pats, **opts):
451 def committomq(ui, repo, *pats, **opts):
452 # At this point the working copy contains only changes that
452 # At this point the working copy contains only changes that
453 # were accepted. All other changes were reverted.
453 # were accepted. All other changes were reverted.
454 # We can't pass *pats here since qrefresh will undo all other
454 # We can't pass *pats here since qrefresh will undo all other
455 # changed files in the patch that aren't in pats.
455 # changed files in the patch that aren't in pats.
456 mq.refresh(ui, repo, **opts)
456 mq.refresh(ui, repo, **opts)
457
457
458 # backup all changed files
458 # backup all changed files
459 dorecord(ui, repo, committomq, 'qrefresh', True, *pats, **opts)
459 dorecord(ui, repo, committomq, 'qrefresh', True, *pats, **opts)
460
460
461 # This command registration is replaced during uisetup().
461 # This command registration is replaced during uisetup().
462 @command('qrecord',
462 @command('qrecord',
463 [],
463 [],
464 _('hg qrecord [OPTION]... PATCH [FILE]...'),
464 _('hg qrecord [OPTION]... PATCH [FILE]...'),
465 inferrepo=True)
465 inferrepo=True)
466 def qrecord(ui, repo, patch, *pats, **opts):
466 def qrecord(ui, repo, patch, *pats, **opts):
467 '''interactively record a new patch
467 '''interactively record a new patch
468
468
469 See :hg:`help qnew` & :hg:`help record` for more information and
469 See :hg:`help qnew` & :hg:`help record` for more information and
470 usage.
470 usage.
471 '''
471 '''
472
472
473 try:
473 try:
474 mq = extensions.find('mq')
474 mq = extensions.find('mq')
475 except KeyError:
475 except KeyError:
476 raise util.Abort(_("'mq' extension not loaded"))
476 raise util.Abort(_("'mq' extension not loaded"))
477
477
478 repo.mq.checkpatchname(patch)
478 repo.mq.checkpatchname(patch)
479
479
480 def committomq(ui, repo, *pats, **opts):
480 def committomq(ui, repo, *pats, **opts):
481 opts['checkname'] = False
481 opts['checkname'] = False
482 mq.new(ui, repo, patch, *pats, **opts)
482 mq.new(ui, repo, patch, *pats, **opts)
483
483
484 dorecord(ui, repo, committomq, 'qnew', False, *pats, **opts)
484 dorecord(ui, repo, committomq, 'qnew', False, *pats, **opts)
485
485
486 def qnew(origfn, ui, repo, patch, *args, **opts):
486 def qnew(origfn, ui, repo, patch, *args, **opts):
487 if opts['interactive']:
487 if opts['interactive']:
488 return qrecord(ui, repo, patch, *args, **opts)
488 return qrecord(ui, repo, patch, *args, **opts)
489 return origfn(ui, repo, patch, *args, **opts)
489 return origfn(ui, repo, patch, *args, **opts)
490
490
491 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall, *pats, **opts):
491 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall, *pats, **opts):
492 if not ui.interactive():
492 if not ui.interactive():
493 raise util.Abort(_('running non-interactively, use %s instead') %
493 raise util.Abort(_('running non-interactively, use %s instead') %
494 cmdsuggest)
494 cmdsuggest)
495
495
496 # make sure username is set before going interactive
496 # make sure username is set before going interactive
497 if not opts.get('user'):
497 if not opts.get('user'):
498 ui.username() # raise exception, username not provided
498 ui.username() # raise exception, username not provided
499
499
500 def recordfunc(ui, repo, message, match, opts):
500 def recordfunc(ui, repo, message, match, opts):
501 """This is generic record driver.
501 """This is generic record driver.
502
502
503 Its job is to interactively filter local changes, and
503 Its job is to interactively filter local changes, and
504 accordingly prepare working directory into a state in which the
504 accordingly prepare working directory into a state in which the
505 job can be delegated to a non-interactive commit command such as
505 job can be delegated to a non-interactive commit command such as
506 'commit' or 'qrefresh'.
506 'commit' or 'qrefresh'.
507
507
508 After the actual job is done by non-interactive command, the
508 After the actual job is done by non-interactive command, the
509 working directory is restored to its original state.
509 working directory is restored to its original state.
510
510
511 In the end we'll record interesting changes, and everything else
511 In the end we'll record interesting changes, and everything else
512 will be left in place, so the user can continue working.
512 will be left in place, so the user can continue working.
513 """
513 """
514
514
515 cmdutil.checkunfinished(repo, commit=True)
515 cmdutil.checkunfinished(repo, commit=True)
516 merge = len(repo[None].parents()) > 1
516 merge = len(repo[None].parents()) > 1
517 if merge:
517 if merge:
518 raise util.Abort(_('cannot partially commit a merge '
518 raise util.Abort(_('cannot partially commit a merge '
519 '(use "hg commit" instead)'))
519 '(use "hg commit" instead)'))
520
520
521 status = repo.status(match=match)
521 status = repo.status(match=match)
522 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
522 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
523 diffopts.nodates = True
523 diffopts.nodates = True
524 diffopts.git = True
524 diffopts.git = True
525 originalchunks = patch.diff(repo, changes=status, opts=diffopts)
525 originalchunks = patch.diff(repo, changes=status, opts=diffopts)
526 fp = cStringIO.StringIO()
526 fp = cStringIO.StringIO()
527 fp.write(''.join(originalchunks))
527 fp.write(''.join(originalchunks))
528 fp.seek(0)
528 fp.seek(0)
529
529
530 # 1. filter patch, so we have intending-to apply subset of it
530 # 1. filter patch, so we have intending-to apply subset of it
531 try:
531 try:
532 chunks = filterpatch(ui, parsepatch(fp))
532 chunks = filterpatch(ui, parsepatch(fp))
533 except patch.PatchError, err:
533 except patch.PatchError, err:
534 raise util.Abort(_('error parsing patch: %s') % err)
534 raise util.Abort(_('error parsing patch: %s') % err)
535
535
536 del fp
536 del fp
537
537
538 contenders = set()
538 contenders = set()
539 for h in chunks:
539 for h in chunks:
540 try:
540 try:
541 contenders.update(set(h.files()))
541 contenders.update(set(h.files()))
542 except AttributeError:
542 except AttributeError:
543 pass
543 pass
544
544
545 changed = status.modified + status.added + status.removed
545 changed = status.modified + status.added + status.removed
546 newfiles = [f for f in changed if f in contenders]
546 newfiles = [f for f in changed if f in contenders]
547 if not newfiles:
547 if not newfiles:
548 ui.status(_('no changes to record\n'))
548 ui.status(_('no changes to record\n'))
549 return 0
549 return 0
550
550
551 newandmodifiedfiles = set()
551 newandmodifiedfiles = set()
552 for h in chunks:
552 for h in chunks:
553 ishunk = isinstance(h, hunk)
553 ishunk = isinstance(h, hunk)
554 isnew = h.filename() in status.added
554 isnew = h.filename() in status.added
555 if ishunk and isnew and not h in originalchunks:
555 if ishunk and isnew and not h in originalchunks:
556 newandmodifiedfiles.add(h.filename())
556 newandmodifiedfiles.add(h.filename())
557
557
558 modified = set(status.modified)
558 modified = set(status.modified)
559
559
560 # 2. backup changed files, so we can restore them in the end
560 # 2. backup changed files, so we can restore them in the end
561
561
562 if backupall:
562 if backupall:
563 tobackup = changed
563 tobackup = changed
564 else:
564 else:
565 tobackup = [f for f in newfiles
565 tobackup = [f for f in newfiles
566 if f in modified or f in newandmodifiedfiles]
566 if f in modified or f in newandmodifiedfiles]
567
567
568 backups = {}
568 backups = {}
569 if tobackup:
569 if tobackup:
570 backupdir = repo.join('record-backups')
570 backupdir = repo.join('record-backups')
571 try:
571 try:
572 os.mkdir(backupdir)
572 os.mkdir(backupdir)
573 except OSError, err:
573 except OSError, err:
574 if err.errno != errno.EEXIST:
574 if err.errno != errno.EEXIST:
575 raise
575 raise
576 try:
576 try:
577 # backup continues
577 # backup continues
578 for f in tobackup:
578 for f in tobackup:
579 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
579 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
580 dir=backupdir)
580 dir=backupdir)
581 os.close(fd)
581 os.close(fd)
582 ui.debug('backup %r as %r\n' % (f, tmpname))
582 ui.debug('backup %r as %r\n' % (f, tmpname))
583 util.copyfile(repo.wjoin(f), tmpname)
583 util.copyfile(repo.wjoin(f), tmpname)
584 shutil.copystat(repo.wjoin(f), tmpname)
584 shutil.copystat(repo.wjoin(f), tmpname)
585 backups[f] = tmpname
585 backups[f] = tmpname
586
586
587 fp = cStringIO.StringIO()
587 fp = cStringIO.StringIO()
588 for c in chunks:
588 for c in chunks:
589 fname = c.filename()
589 fname = c.filename()
590 if fname in backups or fname in newandmodifiedfiles:
590 if fname in backups or fname in newandmodifiedfiles:
591 c.write(fp)
591 c.write(fp)
592 dopatch = fp.tell()
592 dopatch = fp.tell()
593 fp.seek(0)
593 fp.seek(0)
594
594
595 [os.unlink(c) for c in newandmodifiedfiles]
595 [os.unlink(c) for c in newandmodifiedfiles]
596
596
597 # 3a. apply filtered patch to clean repo (clean)
597 # 3a. apply filtered patch to clean repo (clean)
598 if backups:
598 if backups:
599 hg.revert(repo, repo.dirstate.p1(),
599 hg.revert(repo, repo.dirstate.p1(),
600 lambda key: key in backups)
600 lambda key: key in backups)
601
601
602 # 3b. (apply)
602 # 3b. (apply)
603 if dopatch:
603 if dopatch:
604 try:
604 try:
605 ui.debug('applying patch\n')
605 ui.debug('applying patch\n')
606 ui.debug(fp.getvalue())
606 ui.debug(fp.getvalue())
607 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
607 patch.internalpatch(ui, repo, fp, 1, '', eolmode=None)
608 except patch.PatchError, err:
608 except patch.PatchError, err:
609 raise util.Abort(str(err))
609 raise util.Abort(str(err))
610 del fp
610 del fp
611
611
612 # 4. We prepared working directory according to filtered
612 # 4. We prepared working directory according to filtered
613 # patch. Now is the time to delegate the job to
613 # patch. Now is the time to delegate the job to
614 # commit/qrefresh or the like!
614 # commit/qrefresh or the like!
615
615
616 # Make all of the pathnames absolute.
616 # Make all of the pathnames absolute.
617 newfiles = [repo.wjoin(nf) for nf in newfiles]
617 newfiles = [repo.wjoin(nf) for nf in newfiles]
618 commitfunc(ui, repo, *newfiles, **opts)
618 commitfunc(ui, repo, *newfiles, **opts)
619
619
620 return 0
620 return 0
621 finally:
621 finally:
622 # 5. finally restore backed-up files
622 # 5. finally restore backed-up files
623 try:
623 try:
624 for realname, tmpname in backups.iteritems():
624 for realname, tmpname in backups.iteritems():
625 ui.debug('restoring %r to %r\n' % (tmpname, realname))
625 ui.debug('restoring %r to %r\n' % (tmpname, realname))
626 util.copyfile(tmpname, repo.wjoin(realname))
626 util.copyfile(tmpname, repo.wjoin(realname))
627 # Our calls to copystat() here and above are a
627 # Our calls to copystat() here and above are a
628 # hack to trick any editors that have f open that
628 # hack to trick any editors that have f open that
629 # we haven't modified them.
629 # we haven't modified them.
630 #
630 #
631 # Also note that this racy as an editor could
631 # Also note that this racy as an editor could
632 # notice the file's mtime before we've finished
632 # notice the file's mtime before we've finished
633 # writing it.
633 # writing it.
634 shutil.copystat(tmpname, repo.wjoin(realname))
634 shutil.copystat(tmpname, repo.wjoin(realname))
635 os.unlink(tmpname)
635 os.unlink(tmpname)
636 if tobackup:
636 if tobackup:
637 os.rmdir(backupdir)
637 os.rmdir(backupdir)
638 except OSError:
638 except OSError:
639 pass
639 pass
640
640
641 # wrap ui.write so diff output can be labeled/colorized
641 # wrap ui.write so diff output can be labeled/colorized
642 def wrapwrite(orig, *args, **kw):
642 def wrapwrite(orig, *args, **kw):
643 label = kw.pop('label', '')
643 label = kw.pop('label', '')
644 for chunk, l in patch.difflabel(lambda: args):
644 for chunk, l in patch.difflabel(lambda: args):
645 orig(chunk, label=label + l)
645 orig(chunk, label=label + l)
646 oldwrite = ui.write
646 oldwrite = ui.write
647 extensions.wrapfunction(ui, 'write', wrapwrite)
647 extensions.wrapfunction(ui, 'write', wrapwrite)
648 try:
648 try:
649 return cmdutil.commit(ui, repo, recordfunc, pats, opts)
649 return cmdutil.commit(ui, repo, recordfunc, pats, opts)
650 finally:
650 finally:
651 ui.write = oldwrite
651 ui.write = oldwrite
652
652
653 def uisetup(ui):
653 def uisetup(ui):
654 try:
654 try:
655 mq = extensions.find('mq')
655 mq = extensions.find('mq')
656 except KeyError:
656 except KeyError:
657 return
657 return
658
658
659 cmdtable["qrecord"] = \
659 cmdtable["qrecord"] = \
660 (qrecord,
660 (qrecord,
661 # same options as qnew, but copy them so we don't get
661 # same options as qnew, but copy them so we don't get
662 # -i/--interactive for qrecord and add white space diff options
662 # -i/--interactive for qrecord and add white space diff options
663 mq.cmdtable['^qnew'][1][:] + commands.diffwsopts,
663 mq.cmdtable['^qnew'][1][:] + commands.diffwsopts,
664 _('hg qrecord [OPTION]... PATCH [FILE]...'))
664 _('hg qrecord [OPTION]... PATCH [FILE]...'))
665
665
666 _wrapcmd('qnew', mq.cmdtable, qnew, _("interactively record a new patch"))
666 _wrapcmd('qnew', mq.cmdtable, qnew, _("interactively record a new patch"))
667 _wrapcmd('qrefresh', mq.cmdtable, qrefresh,
667 _wrapcmd('qrefresh', mq.cmdtable, qrefresh,
668 _("interactively select changes to refresh"))
668 _("interactively select changes to refresh"))
669
669
670 def _wrapcmd(cmd, table, wrapfn, msg):
670 def _wrapcmd(cmd, table, wrapfn, msg):
671 entry = extensions.wrapcommand(table, cmd, wrapfn)
671 entry = extensions.wrapcommand(table, cmd, wrapfn)
672 entry[1].append(('i', 'interactive', None, msg))
672 entry[1].append(('i', 'interactive', None, msg))
@@ -1,1990 +1,1990 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import cStringIO, email, os, errno, re, posixpath
9 import cStringIO, email, os, errno, re, posixpath
10 import tempfile, zlib, shutil
10 import tempfile, zlib, shutil
11 # On python2.4 you have to import these by name or they fail to
11 # On python2.4 you have to import these by name or they fail to
12 # load. This was not a problem on Python 2.7.
12 # load. This was not a problem on Python 2.7.
13 import email.Generator
13 import email.Generator
14 import email.Parser
14 import email.Parser
15
15
16 from i18n import _
16 from i18n import _
17 from node import hex, short
17 from node import hex, short
18 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
18 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
19
19
20 gitre = re.compile('diff --git a/(.*) b/(.*)')
20 gitre = re.compile('diff --git a/(.*) b/(.*)')
21 tabsplitter = re.compile(r'(\t+|[^\t]+)')
21 tabsplitter = re.compile(r'(\t+|[^\t]+)')
22
22
23 class PatchError(Exception):
23 class PatchError(Exception):
24 pass
24 pass
25
25
26
26
27 # public functions
27 # public functions
28
28
29 def split(stream):
29 def split(stream):
30 '''return an iterator of individual patches from a stream'''
30 '''return an iterator of individual patches from a stream'''
31 def isheader(line, inheader):
31 def isheader(line, inheader):
32 if inheader and line[0] in (' ', '\t'):
32 if inheader and line[0] in (' ', '\t'):
33 # continuation
33 # continuation
34 return True
34 return True
35 if line[0] in (' ', '-', '+'):
35 if line[0] in (' ', '-', '+'):
36 # diff line - don't check for header pattern in there
36 # diff line - don't check for header pattern in there
37 return False
37 return False
38 l = line.split(': ', 1)
38 l = line.split(': ', 1)
39 return len(l) == 2 and ' ' not in l[0]
39 return len(l) == 2 and ' ' not in l[0]
40
40
41 def chunk(lines):
41 def chunk(lines):
42 return cStringIO.StringIO(''.join(lines))
42 return cStringIO.StringIO(''.join(lines))
43
43
44 def hgsplit(stream, cur):
44 def hgsplit(stream, cur):
45 inheader = True
45 inheader = True
46
46
47 for line in stream:
47 for line in stream:
48 if not line.strip():
48 if not line.strip():
49 inheader = False
49 inheader = False
50 if not inheader and line.startswith('# HG changeset patch'):
50 if not inheader and line.startswith('# HG changeset patch'):
51 yield chunk(cur)
51 yield chunk(cur)
52 cur = []
52 cur = []
53 inheader = True
53 inheader = True
54
54
55 cur.append(line)
55 cur.append(line)
56
56
57 if cur:
57 if cur:
58 yield chunk(cur)
58 yield chunk(cur)
59
59
60 def mboxsplit(stream, cur):
60 def mboxsplit(stream, cur):
61 for line in stream:
61 for line in stream:
62 if line.startswith('From '):
62 if line.startswith('From '):
63 for c in split(chunk(cur[1:])):
63 for c in split(chunk(cur[1:])):
64 yield c
64 yield c
65 cur = []
65 cur = []
66
66
67 cur.append(line)
67 cur.append(line)
68
68
69 if cur:
69 if cur:
70 for c in split(chunk(cur[1:])):
70 for c in split(chunk(cur[1:])):
71 yield c
71 yield c
72
72
73 def mimesplit(stream, cur):
73 def mimesplit(stream, cur):
74 def msgfp(m):
74 def msgfp(m):
75 fp = cStringIO.StringIO()
75 fp = cStringIO.StringIO()
76 g = email.Generator.Generator(fp, mangle_from_=False)
76 g = email.Generator.Generator(fp, mangle_from_=False)
77 g.flatten(m)
77 g.flatten(m)
78 fp.seek(0)
78 fp.seek(0)
79 return fp
79 return fp
80
80
81 for line in stream:
81 for line in stream:
82 cur.append(line)
82 cur.append(line)
83 c = chunk(cur)
83 c = chunk(cur)
84
84
85 m = email.Parser.Parser().parse(c)
85 m = email.Parser.Parser().parse(c)
86 if not m.is_multipart():
86 if not m.is_multipart():
87 yield msgfp(m)
87 yield msgfp(m)
88 else:
88 else:
89 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
89 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
90 for part in m.walk():
90 for part in m.walk():
91 ct = part.get_content_type()
91 ct = part.get_content_type()
92 if ct not in ok_types:
92 if ct not in ok_types:
93 continue
93 continue
94 yield msgfp(part)
94 yield msgfp(part)
95
95
96 def headersplit(stream, cur):
96 def headersplit(stream, cur):
97 inheader = False
97 inheader = False
98
98
99 for line in stream:
99 for line in stream:
100 if not inheader and isheader(line, inheader):
100 if not inheader and isheader(line, inheader):
101 yield chunk(cur)
101 yield chunk(cur)
102 cur = []
102 cur = []
103 inheader = True
103 inheader = True
104 if inheader and not isheader(line, inheader):
104 if inheader and not isheader(line, inheader):
105 inheader = False
105 inheader = False
106
106
107 cur.append(line)
107 cur.append(line)
108
108
109 if cur:
109 if cur:
110 yield chunk(cur)
110 yield chunk(cur)
111
111
112 def remainder(cur):
112 def remainder(cur):
113 yield chunk(cur)
113 yield chunk(cur)
114
114
115 class fiter(object):
115 class fiter(object):
116 def __init__(self, fp):
116 def __init__(self, fp):
117 self.fp = fp
117 self.fp = fp
118
118
119 def __iter__(self):
119 def __iter__(self):
120 return self
120 return self
121
121
122 def next(self):
122 def next(self):
123 l = self.fp.readline()
123 l = self.fp.readline()
124 if not l:
124 if not l:
125 raise StopIteration
125 raise StopIteration
126 return l
126 return l
127
127
128 inheader = False
128 inheader = False
129 cur = []
129 cur = []
130
130
131 mimeheaders = ['content-type']
131 mimeheaders = ['content-type']
132
132
133 if not util.safehasattr(stream, 'next'):
133 if not util.safehasattr(stream, 'next'):
134 # http responses, for example, have readline but not next
134 # http responses, for example, have readline but not next
135 stream = fiter(stream)
135 stream = fiter(stream)
136
136
137 for line in stream:
137 for line in stream:
138 cur.append(line)
138 cur.append(line)
139 if line.startswith('# HG changeset patch'):
139 if line.startswith('# HG changeset patch'):
140 return hgsplit(stream, cur)
140 return hgsplit(stream, cur)
141 elif line.startswith('From '):
141 elif line.startswith('From '):
142 return mboxsplit(stream, cur)
142 return mboxsplit(stream, cur)
143 elif isheader(line, inheader):
143 elif isheader(line, inheader):
144 inheader = True
144 inheader = True
145 if line.split(':', 1)[0].lower() in mimeheaders:
145 if line.split(':', 1)[0].lower() in mimeheaders:
146 # let email parser handle this
146 # let email parser handle this
147 return mimesplit(stream, cur)
147 return mimesplit(stream, cur)
148 elif line.startswith('--- ') and inheader:
148 elif line.startswith('--- ') and inheader:
149 # No evil headers seen by diff start, split by hand
149 # No evil headers seen by diff start, split by hand
150 return headersplit(stream, cur)
150 return headersplit(stream, cur)
151 # Not enough info, keep reading
151 # Not enough info, keep reading
152
152
153 # if we are here, we have a very plain patch
153 # if we are here, we have a very plain patch
154 return remainder(cur)
154 return remainder(cur)
155
155
156 def extract(ui, fileobj):
156 def extract(ui, fileobj):
157 '''extract patch from data read from fileobj.
157 '''extract patch from data read from fileobj.
158
158
159 patch can be a normal patch or contained in an email message.
159 patch can be a normal patch or contained in an email message.
160
160
161 return tuple (filename, message, user, date, branch, node, p1, p2).
161 return tuple (filename, message, user, date, branch, node, p1, p2).
162 Any item in the returned tuple can be None. If filename is None,
162 Any item in the returned tuple can be None. If filename is None,
163 fileobj did not contain a patch. Caller must unlink filename when done.'''
163 fileobj did not contain a patch. Caller must unlink filename when done.'''
164
164
165 # attempt to detect the start of a patch
165 # attempt to detect the start of a patch
166 # (this heuristic is borrowed from quilt)
166 # (this heuristic is borrowed from quilt)
167 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
167 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
168 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
168 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
169 r'---[ \t].*?^\+\+\+[ \t]|'
169 r'---[ \t].*?^\+\+\+[ \t]|'
170 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
170 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
171
171
172 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
172 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
173 tmpfp = os.fdopen(fd, 'w')
173 tmpfp = os.fdopen(fd, 'w')
174 try:
174 try:
175 msg = email.Parser.Parser().parse(fileobj)
175 msg = email.Parser.Parser().parse(fileobj)
176
176
177 subject = msg['Subject']
177 subject = msg['Subject']
178 user = msg['From']
178 user = msg['From']
179 if not subject and not user:
179 if not subject and not user:
180 # Not an email, restore parsed headers if any
180 # Not an email, restore parsed headers if any
181 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
181 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
182
182
183 # should try to parse msg['Date']
183 # should try to parse msg['Date']
184 date = None
184 date = None
185 nodeid = None
185 nodeid = None
186 branch = None
186 branch = None
187 parents = []
187 parents = []
188
188
189 if subject:
189 if subject:
190 if subject.startswith('[PATCH'):
190 if subject.startswith('[PATCH'):
191 pend = subject.find(']')
191 pend = subject.find(']')
192 if pend >= 0:
192 if pend >= 0:
193 subject = subject[pend + 1:].lstrip()
193 subject = subject[pend + 1:].lstrip()
194 subject = re.sub(r'\n[ \t]+', ' ', subject)
194 subject = re.sub(r'\n[ \t]+', ' ', subject)
195 ui.debug('Subject: %s\n' % subject)
195 ui.debug('Subject: %s\n' % subject)
196 if user:
196 if user:
197 ui.debug('From: %s\n' % user)
197 ui.debug('From: %s\n' % user)
198 diffs_seen = 0
198 diffs_seen = 0
199 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
199 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
200 message = ''
200 message = ''
201 for part in msg.walk():
201 for part in msg.walk():
202 content_type = part.get_content_type()
202 content_type = part.get_content_type()
203 ui.debug('Content-Type: %s\n' % content_type)
203 ui.debug('Content-Type: %s\n' % content_type)
204 if content_type not in ok_types:
204 if content_type not in ok_types:
205 continue
205 continue
206 payload = part.get_payload(decode=True)
206 payload = part.get_payload(decode=True)
207 m = diffre.search(payload)
207 m = diffre.search(payload)
208 if m:
208 if m:
209 hgpatch = False
209 hgpatch = False
210 hgpatchheader = False
210 hgpatchheader = False
211 ignoretext = False
211 ignoretext = False
212
212
213 ui.debug('found patch at byte %d\n' % m.start(0))
213 ui.debug('found patch at byte %d\n' % m.start(0))
214 diffs_seen += 1
214 diffs_seen += 1
215 cfp = cStringIO.StringIO()
215 cfp = cStringIO.StringIO()
216 for line in payload[:m.start(0)].splitlines():
216 for line in payload[:m.start(0)].splitlines():
217 if line.startswith('# HG changeset patch') and not hgpatch:
217 if line.startswith('# HG changeset patch') and not hgpatch:
218 ui.debug('patch generated by hg export\n')
218 ui.debug('patch generated by hg export\n')
219 hgpatch = True
219 hgpatch = True
220 hgpatchheader = True
220 hgpatchheader = True
221 # drop earlier commit message content
221 # drop earlier commit message content
222 cfp.seek(0)
222 cfp.seek(0)
223 cfp.truncate()
223 cfp.truncate()
224 subject = None
224 subject = None
225 elif hgpatchheader:
225 elif hgpatchheader:
226 if line.startswith('# User '):
226 if line.startswith('# User '):
227 user = line[7:]
227 user = line[7:]
228 ui.debug('From: %s\n' % user)
228 ui.debug('From: %s\n' % user)
229 elif line.startswith("# Date "):
229 elif line.startswith("# Date "):
230 date = line[7:]
230 date = line[7:]
231 elif line.startswith("# Branch "):
231 elif line.startswith("# Branch "):
232 branch = line[9:]
232 branch = line[9:]
233 elif line.startswith("# Node ID "):
233 elif line.startswith("# Node ID "):
234 nodeid = line[10:]
234 nodeid = line[10:]
235 elif line.startswith("# Parent "):
235 elif line.startswith("# Parent "):
236 parents.append(line[9:].lstrip())
236 parents.append(line[9:].lstrip())
237 elif not line.startswith("# "):
237 elif not line.startswith("# "):
238 hgpatchheader = False
238 hgpatchheader = False
239 elif line == '---':
239 elif line == '---':
240 ignoretext = True
240 ignoretext = True
241 if not hgpatchheader and not ignoretext:
241 if not hgpatchheader and not ignoretext:
242 cfp.write(line)
242 cfp.write(line)
243 cfp.write('\n')
243 cfp.write('\n')
244 message = cfp.getvalue()
244 message = cfp.getvalue()
245 if tmpfp:
245 if tmpfp:
246 tmpfp.write(payload)
246 tmpfp.write(payload)
247 if not payload.endswith('\n'):
247 if not payload.endswith('\n'):
248 tmpfp.write('\n')
248 tmpfp.write('\n')
249 elif not diffs_seen and message and content_type == 'text/plain':
249 elif not diffs_seen and message and content_type == 'text/plain':
250 message += '\n' + payload
250 message += '\n' + payload
251 except: # re-raises
251 except: # re-raises
252 tmpfp.close()
252 tmpfp.close()
253 os.unlink(tmpname)
253 os.unlink(tmpname)
254 raise
254 raise
255
255
256 if subject and not message.startswith(subject):
256 if subject and not message.startswith(subject):
257 message = '%s\n%s' % (subject, message)
257 message = '%s\n%s' % (subject, message)
258 tmpfp.close()
258 tmpfp.close()
259 if not diffs_seen:
259 if not diffs_seen:
260 os.unlink(tmpname)
260 os.unlink(tmpname)
261 return None, message, user, date, branch, None, None, None
261 return None, message, user, date, branch, None, None, None
262 p1 = parents and parents.pop(0) or None
262 p1 = parents and parents.pop(0) or None
263 p2 = parents and parents.pop(0) or None
263 p2 = parents and parents.pop(0) or None
264 return tmpname, message, user, date, branch, nodeid, p1, p2
264 return tmpname, message, user, date, branch, nodeid, p1, p2
265
265
266 class patchmeta(object):
266 class patchmeta(object):
267 """Patched file metadata
267 """Patched file metadata
268
268
269 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
269 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
270 or COPY. 'path' is patched file path. 'oldpath' is set to the
270 or COPY. 'path' is patched file path. 'oldpath' is set to the
271 origin file when 'op' is either COPY or RENAME, None otherwise. If
271 origin file when 'op' is either COPY or RENAME, None otherwise. If
272 file mode is changed, 'mode' is a tuple (islink, isexec) where
272 file mode is changed, 'mode' is a tuple (islink, isexec) where
273 'islink' is True if the file is a symlink and 'isexec' is True if
273 'islink' is True if the file is a symlink and 'isexec' is True if
274 the file is executable. Otherwise, 'mode' is None.
274 the file is executable. Otherwise, 'mode' is None.
275 """
275 """
276 def __init__(self, path):
276 def __init__(self, path):
277 self.path = path
277 self.path = path
278 self.oldpath = None
278 self.oldpath = None
279 self.mode = None
279 self.mode = None
280 self.op = 'MODIFY'
280 self.op = 'MODIFY'
281 self.binary = False
281 self.binary = False
282
282
283 def setmode(self, mode):
283 def setmode(self, mode):
284 islink = mode & 020000
284 islink = mode & 020000
285 isexec = mode & 0100
285 isexec = mode & 0100
286 self.mode = (islink, isexec)
286 self.mode = (islink, isexec)
287
287
288 def copy(self):
288 def copy(self):
289 other = patchmeta(self.path)
289 other = patchmeta(self.path)
290 other.oldpath = self.oldpath
290 other.oldpath = self.oldpath
291 other.mode = self.mode
291 other.mode = self.mode
292 other.op = self.op
292 other.op = self.op
293 other.binary = self.binary
293 other.binary = self.binary
294 return other
294 return other
295
295
296 def _ispatchinga(self, afile):
296 def _ispatchinga(self, afile):
297 if afile == '/dev/null':
297 if afile == '/dev/null':
298 return self.op == 'ADD'
298 return self.op == 'ADD'
299 return afile == 'a/' + (self.oldpath or self.path)
299 return afile == 'a/' + (self.oldpath or self.path)
300
300
301 def _ispatchingb(self, bfile):
301 def _ispatchingb(self, bfile):
302 if bfile == '/dev/null':
302 if bfile == '/dev/null':
303 return self.op == 'DELETE'
303 return self.op == 'DELETE'
304 return bfile == 'b/' + self.path
304 return bfile == 'b/' + self.path
305
305
306 def ispatching(self, afile, bfile):
306 def ispatching(self, afile, bfile):
307 return self._ispatchinga(afile) and self._ispatchingb(bfile)
307 return self._ispatchinga(afile) and self._ispatchingb(bfile)
308
308
309 def __repr__(self):
309 def __repr__(self):
310 return "<patchmeta %s %r>" % (self.op, self.path)
310 return "<patchmeta %s %r>" % (self.op, self.path)
311
311
312 def readgitpatch(lr):
312 def readgitpatch(lr):
313 """extract git-style metadata about patches from <patchname>"""
313 """extract git-style metadata about patches from <patchname>"""
314
314
315 # Filter patch for git information
315 # Filter patch for git information
316 gp = None
316 gp = None
317 gitpatches = []
317 gitpatches = []
318 for line in lr:
318 for line in lr:
319 line = line.rstrip(' \r\n')
319 line = line.rstrip(' \r\n')
320 if line.startswith('diff --git a/'):
320 if line.startswith('diff --git a/'):
321 m = gitre.match(line)
321 m = gitre.match(line)
322 if m:
322 if m:
323 if gp:
323 if gp:
324 gitpatches.append(gp)
324 gitpatches.append(gp)
325 dst = m.group(2)
325 dst = m.group(2)
326 gp = patchmeta(dst)
326 gp = patchmeta(dst)
327 elif gp:
327 elif gp:
328 if line.startswith('--- '):
328 if line.startswith('--- '):
329 gitpatches.append(gp)
329 gitpatches.append(gp)
330 gp = None
330 gp = None
331 continue
331 continue
332 if line.startswith('rename from '):
332 if line.startswith('rename from '):
333 gp.op = 'RENAME'
333 gp.op = 'RENAME'
334 gp.oldpath = line[12:]
334 gp.oldpath = line[12:]
335 elif line.startswith('rename to '):
335 elif line.startswith('rename to '):
336 gp.path = line[10:]
336 gp.path = line[10:]
337 elif line.startswith('copy from '):
337 elif line.startswith('copy from '):
338 gp.op = 'COPY'
338 gp.op = 'COPY'
339 gp.oldpath = line[10:]
339 gp.oldpath = line[10:]
340 elif line.startswith('copy to '):
340 elif line.startswith('copy to '):
341 gp.path = line[8:]
341 gp.path = line[8:]
342 elif line.startswith('deleted file'):
342 elif line.startswith('deleted file'):
343 gp.op = 'DELETE'
343 gp.op = 'DELETE'
344 elif line.startswith('new file mode '):
344 elif line.startswith('new file mode '):
345 gp.op = 'ADD'
345 gp.op = 'ADD'
346 gp.setmode(int(line[-6:], 8))
346 gp.setmode(int(line[-6:], 8))
347 elif line.startswith('new mode '):
347 elif line.startswith('new mode '):
348 gp.setmode(int(line[-6:], 8))
348 gp.setmode(int(line[-6:], 8))
349 elif line.startswith('GIT binary patch'):
349 elif line.startswith('GIT binary patch'):
350 gp.binary = True
350 gp.binary = True
351 if gp:
351 if gp:
352 gitpatches.append(gp)
352 gitpatches.append(gp)
353
353
354 return gitpatches
354 return gitpatches
355
355
356 class linereader(object):
356 class linereader(object):
357 # simple class to allow pushing lines back into the input stream
357 # simple class to allow pushing lines back into the input stream
358 def __init__(self, fp):
358 def __init__(self, fp):
359 self.fp = fp
359 self.fp = fp
360 self.buf = []
360 self.buf = []
361
361
362 def push(self, line):
362 def push(self, line):
363 if line is not None:
363 if line is not None:
364 self.buf.append(line)
364 self.buf.append(line)
365
365
366 def readline(self):
366 def readline(self):
367 if self.buf:
367 if self.buf:
368 l = self.buf[0]
368 l = self.buf[0]
369 del self.buf[0]
369 del self.buf[0]
370 return l
370 return l
371 return self.fp.readline()
371 return self.fp.readline()
372
372
373 def __iter__(self):
373 def __iter__(self):
374 while True:
374 while True:
375 l = self.readline()
375 l = self.readline()
376 if not l:
376 if not l:
377 break
377 break
378 yield l
378 yield l
379
379
380 class abstractbackend(object):
380 class abstractbackend(object):
381 def __init__(self, ui):
381 def __init__(self, ui):
382 self.ui = ui
382 self.ui = ui
383
383
384 def getfile(self, fname):
384 def getfile(self, fname):
385 """Return target file data and flags as a (data, (islink,
385 """Return target file data and flags as a (data, (islink,
386 isexec)) tuple. Data is None if file is missing/deleted.
386 isexec)) tuple. Data is None if file is missing/deleted.
387 """
387 """
388 raise NotImplementedError
388 raise NotImplementedError
389
389
390 def setfile(self, fname, data, mode, copysource):
390 def setfile(self, fname, data, mode, copysource):
391 """Write data to target file fname and set its mode. mode is a
391 """Write data to target file fname and set its mode. mode is a
392 (islink, isexec) tuple. If data is None, the file content should
392 (islink, isexec) tuple. If data is None, the file content should
393 be left unchanged. If the file is modified after being copied,
393 be left unchanged. If the file is modified after being copied,
394 copysource is set to the original file name.
394 copysource is set to the original file name.
395 """
395 """
396 raise NotImplementedError
396 raise NotImplementedError
397
397
398 def unlink(self, fname):
398 def unlink(self, fname):
399 """Unlink target file."""
399 """Unlink target file."""
400 raise NotImplementedError
400 raise NotImplementedError
401
401
402 def writerej(self, fname, failed, total, lines):
402 def writerej(self, fname, failed, total, lines):
403 """Write rejected lines for fname. total is the number of hunks
403 """Write rejected lines for fname. total is the number of hunks
404 which failed to apply and total the total number of hunks for this
404 which failed to apply and total the total number of hunks for this
405 files.
405 files.
406 """
406 """
407 pass
407 pass
408
408
409 def exists(self, fname):
409 def exists(self, fname):
410 raise NotImplementedError
410 raise NotImplementedError
411
411
412 class fsbackend(abstractbackend):
412 class fsbackend(abstractbackend):
413 def __init__(self, ui, basedir):
413 def __init__(self, ui, basedir):
414 super(fsbackend, self).__init__(ui)
414 super(fsbackend, self).__init__(ui)
415 self.opener = scmutil.opener(basedir)
415 self.opener = scmutil.opener(basedir)
416
416
417 def _join(self, f):
417 def _join(self, f):
418 return os.path.join(self.opener.base, f)
418 return os.path.join(self.opener.base, f)
419
419
420 def getfile(self, fname):
420 def getfile(self, fname):
421 if self.opener.islink(fname):
421 if self.opener.islink(fname):
422 return (self.opener.readlink(fname), (True, False))
422 return (self.opener.readlink(fname), (True, False))
423
423
424 isexec = False
424 isexec = False
425 try:
425 try:
426 isexec = self.opener.lstat(fname).st_mode & 0100 != 0
426 isexec = self.opener.lstat(fname).st_mode & 0100 != 0
427 except OSError, e:
427 except OSError, e:
428 if e.errno != errno.ENOENT:
428 if e.errno != errno.ENOENT:
429 raise
429 raise
430 try:
430 try:
431 return (self.opener.read(fname), (False, isexec))
431 return (self.opener.read(fname), (False, isexec))
432 except IOError, e:
432 except IOError, e:
433 if e.errno != errno.ENOENT:
433 if e.errno != errno.ENOENT:
434 raise
434 raise
435 return None, None
435 return None, None
436
436
437 def setfile(self, fname, data, mode, copysource):
437 def setfile(self, fname, data, mode, copysource):
438 islink, isexec = mode
438 islink, isexec = mode
439 if data is None:
439 if data is None:
440 self.opener.setflags(fname, islink, isexec)
440 self.opener.setflags(fname, islink, isexec)
441 return
441 return
442 if islink:
442 if islink:
443 self.opener.symlink(data, fname)
443 self.opener.symlink(data, fname)
444 else:
444 else:
445 self.opener.write(fname, data)
445 self.opener.write(fname, data)
446 if isexec:
446 if isexec:
447 self.opener.setflags(fname, False, True)
447 self.opener.setflags(fname, False, True)
448
448
449 def unlink(self, fname):
449 def unlink(self, fname):
450 self.opener.unlinkpath(fname, ignoremissing=True)
450 self.opener.unlinkpath(fname, ignoremissing=True)
451
451
452 def writerej(self, fname, failed, total, lines):
452 def writerej(self, fname, failed, total, lines):
453 fname = fname + ".rej"
453 fname = fname + ".rej"
454 self.ui.warn(
454 self.ui.warn(
455 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
455 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
456 (failed, total, fname))
456 (failed, total, fname))
457 fp = self.opener(fname, 'w')
457 fp = self.opener(fname, 'w')
458 fp.writelines(lines)
458 fp.writelines(lines)
459 fp.close()
459 fp.close()
460
460
461 def exists(self, fname):
461 def exists(self, fname):
462 return self.opener.lexists(fname)
462 return self.opener.lexists(fname)
463
463
464 class workingbackend(fsbackend):
464 class workingbackend(fsbackend):
465 def __init__(self, ui, repo, similarity):
465 def __init__(self, ui, repo, similarity):
466 super(workingbackend, self).__init__(ui, repo.root)
466 super(workingbackend, self).__init__(ui, repo.root)
467 self.repo = repo
467 self.repo = repo
468 self.similarity = similarity
468 self.similarity = similarity
469 self.removed = set()
469 self.removed = set()
470 self.changed = set()
470 self.changed = set()
471 self.copied = []
471 self.copied = []
472
472
473 def _checkknown(self, fname):
473 def _checkknown(self, fname):
474 if self.repo.dirstate[fname] == '?' and self.exists(fname):
474 if self.repo.dirstate[fname] == '?' and self.exists(fname):
475 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
475 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
476
476
477 def setfile(self, fname, data, mode, copysource):
477 def setfile(self, fname, data, mode, copysource):
478 self._checkknown(fname)
478 self._checkknown(fname)
479 super(workingbackend, self).setfile(fname, data, mode, copysource)
479 super(workingbackend, self).setfile(fname, data, mode, copysource)
480 if copysource is not None:
480 if copysource is not None:
481 self.copied.append((copysource, fname))
481 self.copied.append((copysource, fname))
482 self.changed.add(fname)
482 self.changed.add(fname)
483
483
484 def unlink(self, fname):
484 def unlink(self, fname):
485 self._checkknown(fname)
485 self._checkknown(fname)
486 super(workingbackend, self).unlink(fname)
486 super(workingbackend, self).unlink(fname)
487 self.removed.add(fname)
487 self.removed.add(fname)
488 self.changed.add(fname)
488 self.changed.add(fname)
489
489
490 def close(self):
490 def close(self):
491 wctx = self.repo[None]
491 wctx = self.repo[None]
492 changed = set(self.changed)
492 changed = set(self.changed)
493 for src, dst in self.copied:
493 for src, dst in self.copied:
494 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
494 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
495 if self.removed:
495 if self.removed:
496 wctx.forget(sorted(self.removed))
496 wctx.forget(sorted(self.removed))
497 for f in self.removed:
497 for f in self.removed:
498 if f not in self.repo.dirstate:
498 if f not in self.repo.dirstate:
499 # File was deleted and no longer belongs to the
499 # File was deleted and no longer belongs to the
500 # dirstate, it was probably marked added then
500 # dirstate, it was probably marked added then
501 # deleted, and should not be considered by
501 # deleted, and should not be considered by
502 # marktouched().
502 # marktouched().
503 changed.discard(f)
503 changed.discard(f)
504 if changed:
504 if changed:
505 scmutil.marktouched(self.repo, changed, self.similarity)
505 scmutil.marktouched(self.repo, changed, self.similarity)
506 return sorted(self.changed)
506 return sorted(self.changed)
507
507
508 class filestore(object):
508 class filestore(object):
509 def __init__(self, maxsize=None):
509 def __init__(self, maxsize=None):
510 self.opener = None
510 self.opener = None
511 self.files = {}
511 self.files = {}
512 self.created = 0
512 self.created = 0
513 self.maxsize = maxsize
513 self.maxsize = maxsize
514 if self.maxsize is None:
514 if self.maxsize is None:
515 self.maxsize = 4*(2**20)
515 self.maxsize = 4*(2**20)
516 self.size = 0
516 self.size = 0
517 self.data = {}
517 self.data = {}
518
518
519 def setfile(self, fname, data, mode, copied=None):
519 def setfile(self, fname, data, mode, copied=None):
520 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
520 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
521 self.data[fname] = (data, mode, copied)
521 self.data[fname] = (data, mode, copied)
522 self.size += len(data)
522 self.size += len(data)
523 else:
523 else:
524 if self.opener is None:
524 if self.opener is None:
525 root = tempfile.mkdtemp(prefix='hg-patch-')
525 root = tempfile.mkdtemp(prefix='hg-patch-')
526 self.opener = scmutil.opener(root)
526 self.opener = scmutil.opener(root)
527 # Avoid filename issues with these simple names
527 # Avoid filename issues with these simple names
528 fn = str(self.created)
528 fn = str(self.created)
529 self.opener.write(fn, data)
529 self.opener.write(fn, data)
530 self.created += 1
530 self.created += 1
531 self.files[fname] = (fn, mode, copied)
531 self.files[fname] = (fn, mode, copied)
532
532
533 def getfile(self, fname):
533 def getfile(self, fname):
534 if fname in self.data:
534 if fname in self.data:
535 return self.data[fname]
535 return self.data[fname]
536 if not self.opener or fname not in self.files:
536 if not self.opener or fname not in self.files:
537 return None, None, None
537 return None, None, None
538 fn, mode, copied = self.files[fname]
538 fn, mode, copied = self.files[fname]
539 return self.opener.read(fn), mode, copied
539 return self.opener.read(fn), mode, copied
540
540
541 def close(self):
541 def close(self):
542 if self.opener:
542 if self.opener:
543 shutil.rmtree(self.opener.base)
543 shutil.rmtree(self.opener.base)
544
544
545 class repobackend(abstractbackend):
545 class repobackend(abstractbackend):
546 def __init__(self, ui, repo, ctx, store):
546 def __init__(self, ui, repo, ctx, store):
547 super(repobackend, self).__init__(ui)
547 super(repobackend, self).__init__(ui)
548 self.repo = repo
548 self.repo = repo
549 self.ctx = ctx
549 self.ctx = ctx
550 self.store = store
550 self.store = store
551 self.changed = set()
551 self.changed = set()
552 self.removed = set()
552 self.removed = set()
553 self.copied = {}
553 self.copied = {}
554
554
555 def _checkknown(self, fname):
555 def _checkknown(self, fname):
556 if fname not in self.ctx:
556 if fname not in self.ctx:
557 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
557 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
558
558
559 def getfile(self, fname):
559 def getfile(self, fname):
560 try:
560 try:
561 fctx = self.ctx[fname]
561 fctx = self.ctx[fname]
562 except error.LookupError:
562 except error.LookupError:
563 return None, None
563 return None, None
564 flags = fctx.flags()
564 flags = fctx.flags()
565 return fctx.data(), ('l' in flags, 'x' in flags)
565 return fctx.data(), ('l' in flags, 'x' in flags)
566
566
567 def setfile(self, fname, data, mode, copysource):
567 def setfile(self, fname, data, mode, copysource):
568 if copysource:
568 if copysource:
569 self._checkknown(copysource)
569 self._checkknown(copysource)
570 if data is None:
570 if data is None:
571 data = self.ctx[fname].data()
571 data = self.ctx[fname].data()
572 self.store.setfile(fname, data, mode, copysource)
572 self.store.setfile(fname, data, mode, copysource)
573 self.changed.add(fname)
573 self.changed.add(fname)
574 if copysource:
574 if copysource:
575 self.copied[fname] = copysource
575 self.copied[fname] = copysource
576
576
577 def unlink(self, fname):
577 def unlink(self, fname):
578 self._checkknown(fname)
578 self._checkknown(fname)
579 self.removed.add(fname)
579 self.removed.add(fname)
580
580
581 def exists(self, fname):
581 def exists(self, fname):
582 return fname in self.ctx
582 return fname in self.ctx
583
583
584 def close(self):
584 def close(self):
585 return self.changed | self.removed
585 return self.changed | self.removed
586
586
587 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
587 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
588 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
588 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
589 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
589 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
590 eolmodes = ['strict', 'crlf', 'lf', 'auto']
590 eolmodes = ['strict', 'crlf', 'lf', 'auto']
591
591
592 class patchfile(object):
592 class patchfile(object):
593 def __init__(self, ui, gp, backend, store, eolmode='strict'):
593 def __init__(self, ui, gp, backend, store, eolmode='strict'):
594 self.fname = gp.path
594 self.fname = gp.path
595 self.eolmode = eolmode
595 self.eolmode = eolmode
596 self.eol = None
596 self.eol = None
597 self.backend = backend
597 self.backend = backend
598 self.ui = ui
598 self.ui = ui
599 self.lines = []
599 self.lines = []
600 self.exists = False
600 self.exists = False
601 self.missing = True
601 self.missing = True
602 self.mode = gp.mode
602 self.mode = gp.mode
603 self.copysource = gp.oldpath
603 self.copysource = gp.oldpath
604 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
604 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
605 self.remove = gp.op == 'DELETE'
605 self.remove = gp.op == 'DELETE'
606 if self.copysource is None:
606 if self.copysource is None:
607 data, mode = backend.getfile(self.fname)
607 data, mode = backend.getfile(self.fname)
608 else:
608 else:
609 data, mode = store.getfile(self.copysource)[:2]
609 data, mode = store.getfile(self.copysource)[:2]
610 if data is not None:
610 if data is not None:
611 self.exists = self.copysource is None or backend.exists(self.fname)
611 self.exists = self.copysource is None or backend.exists(self.fname)
612 self.missing = False
612 self.missing = False
613 if data:
613 if data:
614 self.lines = mdiff.splitnewlines(data)
614 self.lines = mdiff.splitnewlines(data)
615 if self.mode is None:
615 if self.mode is None:
616 self.mode = mode
616 self.mode = mode
617 if self.lines:
617 if self.lines:
618 # Normalize line endings
618 # Normalize line endings
619 if self.lines[0].endswith('\r\n'):
619 if self.lines[0].endswith('\r\n'):
620 self.eol = '\r\n'
620 self.eol = '\r\n'
621 elif self.lines[0].endswith('\n'):
621 elif self.lines[0].endswith('\n'):
622 self.eol = '\n'
622 self.eol = '\n'
623 if eolmode != 'strict':
623 if eolmode != 'strict':
624 nlines = []
624 nlines = []
625 for l in self.lines:
625 for l in self.lines:
626 if l.endswith('\r\n'):
626 if l.endswith('\r\n'):
627 l = l[:-2] + '\n'
627 l = l[:-2] + '\n'
628 nlines.append(l)
628 nlines.append(l)
629 self.lines = nlines
629 self.lines = nlines
630 else:
630 else:
631 if self.create:
631 if self.create:
632 self.missing = False
632 self.missing = False
633 if self.mode is None:
633 if self.mode is None:
634 self.mode = (False, False)
634 self.mode = (False, False)
635 if self.missing:
635 if self.missing:
636 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
636 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
637
637
638 self.hash = {}
638 self.hash = {}
639 self.dirty = 0
639 self.dirty = 0
640 self.offset = 0
640 self.offset = 0
641 self.skew = 0
641 self.skew = 0
642 self.rej = []
642 self.rej = []
643 self.fileprinted = False
643 self.fileprinted = False
644 self.printfile(False)
644 self.printfile(False)
645 self.hunks = 0
645 self.hunks = 0
646
646
647 def writelines(self, fname, lines, mode):
647 def writelines(self, fname, lines, mode):
648 if self.eolmode == 'auto':
648 if self.eolmode == 'auto':
649 eol = self.eol
649 eol = self.eol
650 elif self.eolmode == 'crlf':
650 elif self.eolmode == 'crlf':
651 eol = '\r\n'
651 eol = '\r\n'
652 else:
652 else:
653 eol = '\n'
653 eol = '\n'
654
654
655 if self.eolmode != 'strict' and eol and eol != '\n':
655 if self.eolmode != 'strict' and eol and eol != '\n':
656 rawlines = []
656 rawlines = []
657 for l in lines:
657 for l in lines:
658 if l and l[-1] == '\n':
658 if l and l[-1] == '\n':
659 l = l[:-1] + eol
659 l = l[:-1] + eol
660 rawlines.append(l)
660 rawlines.append(l)
661 lines = rawlines
661 lines = rawlines
662
662
663 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
663 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
664
664
665 def printfile(self, warn):
665 def printfile(self, warn):
666 if self.fileprinted:
666 if self.fileprinted:
667 return
667 return
668 if warn or self.ui.verbose:
668 if warn or self.ui.verbose:
669 self.fileprinted = True
669 self.fileprinted = True
670 s = _("patching file %s\n") % self.fname
670 s = _("patching file %s\n") % self.fname
671 if warn:
671 if warn:
672 self.ui.warn(s)
672 self.ui.warn(s)
673 else:
673 else:
674 self.ui.note(s)
674 self.ui.note(s)
675
675
676
676
677 def findlines(self, l, linenum):
677 def findlines(self, l, linenum):
678 # looks through the hash and finds candidate lines. The
678 # looks through the hash and finds candidate lines. The
679 # result is a list of line numbers sorted based on distance
679 # result is a list of line numbers sorted based on distance
680 # from linenum
680 # from linenum
681
681
682 cand = self.hash.get(l, [])
682 cand = self.hash.get(l, [])
683 if len(cand) > 1:
683 if len(cand) > 1:
684 # resort our list of potentials forward then back.
684 # resort our list of potentials forward then back.
685 cand.sort(key=lambda x: abs(x - linenum))
685 cand.sort(key=lambda x: abs(x - linenum))
686 return cand
686 return cand
687
687
688 def write_rej(self):
688 def write_rej(self):
689 # our rejects are a little different from patch(1). This always
689 # our rejects are a little different from patch(1). This always
690 # creates rejects in the same form as the original patch. A file
690 # creates rejects in the same form as the original patch. A file
691 # header is inserted so that you can run the reject through patch again
691 # header is inserted so that you can run the reject through patch again
692 # without having to type the filename.
692 # without having to type the filename.
693 if not self.rej:
693 if not self.rej:
694 return
694 return
695 base = os.path.basename(self.fname)
695 base = os.path.basename(self.fname)
696 lines = ["--- %s\n+++ %s\n" % (base, base)]
696 lines = ["--- %s\n+++ %s\n" % (base, base)]
697 for x in self.rej:
697 for x in self.rej:
698 for l in x.hunk:
698 for l in x.hunk:
699 lines.append(l)
699 lines.append(l)
700 if l[-1] != '\n':
700 if l[-1] != '\n':
701 lines.append("\n\ No newline at end of file\n")
701 lines.append("\n\ No newline at end of file\n")
702 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
702 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
703
703
704 def apply(self, h):
704 def apply(self, h):
705 if not h.complete():
705 if not h.complete():
706 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
706 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
707 (h.number, h.desc, len(h.a), h.lena, len(h.b),
707 (h.number, h.desc, len(h.a), h.lena, len(h.b),
708 h.lenb))
708 h.lenb))
709
709
710 self.hunks += 1
710 self.hunks += 1
711
711
712 if self.missing:
712 if self.missing:
713 self.rej.append(h)
713 self.rej.append(h)
714 return -1
714 return -1
715
715
716 if self.exists and self.create:
716 if self.exists and self.create:
717 if self.copysource:
717 if self.copysource:
718 self.ui.warn(_("cannot create %s: destination already "
718 self.ui.warn(_("cannot create %s: destination already "
719 "exists\n") % self.fname)
719 "exists\n") % self.fname)
720 else:
720 else:
721 self.ui.warn(_("file %s already exists\n") % self.fname)
721 self.ui.warn(_("file %s already exists\n") % self.fname)
722 self.rej.append(h)
722 self.rej.append(h)
723 return -1
723 return -1
724
724
725 if isinstance(h, binhunk):
725 if isinstance(h, binhunk):
726 if self.remove:
726 if self.remove:
727 self.backend.unlink(self.fname)
727 self.backend.unlink(self.fname)
728 else:
728 else:
729 l = h.new(self.lines)
729 l = h.new(self.lines)
730 self.lines[:] = l
730 self.lines[:] = l
731 self.offset += len(l)
731 self.offset += len(l)
732 self.dirty = True
732 self.dirty = True
733 return 0
733 return 0
734
734
735 horig = h
735 horig = h
736 if (self.eolmode in ('crlf', 'lf')
736 if (self.eolmode in ('crlf', 'lf')
737 or self.eolmode == 'auto' and self.eol):
737 or self.eolmode == 'auto' and self.eol):
738 # If new eols are going to be normalized, then normalize
738 # If new eols are going to be normalized, then normalize
739 # hunk data before patching. Otherwise, preserve input
739 # hunk data before patching. Otherwise, preserve input
740 # line-endings.
740 # line-endings.
741 h = h.getnormalized()
741 h = h.getnormalized()
742
742
743 # fast case first, no offsets, no fuzz
743 # fast case first, no offsets, no fuzz
744 old, oldstart, new, newstart = h.fuzzit(0, False)
744 old, oldstart, new, newstart = h.fuzzit(0, False)
745 oldstart += self.offset
745 oldstart += self.offset
746 orig_start = oldstart
746 orig_start = oldstart
747 # if there's skew we want to emit the "(offset %d lines)" even
747 # if there's skew we want to emit the "(offset %d lines)" even
748 # when the hunk cleanly applies at start + skew, so skip the
748 # when the hunk cleanly applies at start + skew, so skip the
749 # fast case code
749 # fast case code
750 if (self.skew == 0 and
750 if (self.skew == 0 and
751 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
751 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
752 if self.remove:
752 if self.remove:
753 self.backend.unlink(self.fname)
753 self.backend.unlink(self.fname)
754 else:
754 else:
755 self.lines[oldstart:oldstart + len(old)] = new
755 self.lines[oldstart:oldstart + len(old)] = new
756 self.offset += len(new) - len(old)
756 self.offset += len(new) - len(old)
757 self.dirty = True
757 self.dirty = True
758 return 0
758 return 0
759
759
760 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
760 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
761 self.hash = {}
761 self.hash = {}
762 for x, s in enumerate(self.lines):
762 for x, s in enumerate(self.lines):
763 self.hash.setdefault(s, []).append(x)
763 self.hash.setdefault(s, []).append(x)
764
764
765 for fuzzlen in xrange(3):
765 for fuzzlen in xrange(3):
766 for toponly in [True, False]:
766 for toponly in [True, False]:
767 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
767 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
768 oldstart = oldstart + self.offset + self.skew
768 oldstart = oldstart + self.offset + self.skew
769 oldstart = min(oldstart, len(self.lines))
769 oldstart = min(oldstart, len(self.lines))
770 if old:
770 if old:
771 cand = self.findlines(old[0][1:], oldstart)
771 cand = self.findlines(old[0][1:], oldstart)
772 else:
772 else:
773 # Only adding lines with no or fuzzed context, just
773 # Only adding lines with no or fuzzed context, just
774 # take the skew in account
774 # take the skew in account
775 cand = [oldstart]
775 cand = [oldstart]
776
776
777 for l in cand:
777 for l in cand:
778 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
778 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
779 self.lines[l : l + len(old)] = new
779 self.lines[l : l + len(old)] = new
780 self.offset += len(new) - len(old)
780 self.offset += len(new) - len(old)
781 self.skew = l - orig_start
781 self.skew = l - orig_start
782 self.dirty = True
782 self.dirty = True
783 offset = l - orig_start - fuzzlen
783 offset = l - orig_start - fuzzlen
784 if fuzzlen:
784 if fuzzlen:
785 msg = _("Hunk #%d succeeded at %d "
785 msg = _("Hunk #%d succeeded at %d "
786 "with fuzz %d "
786 "with fuzz %d "
787 "(offset %d lines).\n")
787 "(offset %d lines).\n")
788 self.printfile(True)
788 self.printfile(True)
789 self.ui.warn(msg %
789 self.ui.warn(msg %
790 (h.number, l + 1, fuzzlen, offset))
790 (h.number, l + 1, fuzzlen, offset))
791 else:
791 else:
792 msg = _("Hunk #%d succeeded at %d "
792 msg = _("Hunk #%d succeeded at %d "
793 "(offset %d lines).\n")
793 "(offset %d lines).\n")
794 self.ui.note(msg % (h.number, l + 1, offset))
794 self.ui.note(msg % (h.number, l + 1, offset))
795 return fuzzlen
795 return fuzzlen
796 self.printfile(True)
796 self.printfile(True)
797 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
797 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
798 self.rej.append(horig)
798 self.rej.append(horig)
799 return -1
799 return -1
800
800
801 def close(self):
801 def close(self):
802 if self.dirty:
802 if self.dirty:
803 self.writelines(self.fname, self.lines, self.mode)
803 self.writelines(self.fname, self.lines, self.mode)
804 self.write_rej()
804 self.write_rej()
805 return len(self.rej)
805 return len(self.rej)
806
806
807 class hunk(object):
807 class hunk(object):
808 def __init__(self, desc, num, lr, context):
808 def __init__(self, desc, num, lr, context):
809 self.number = num
809 self.number = num
810 self.desc = desc
810 self.desc = desc
811 self.hunk = [desc]
811 self.hunk = [desc]
812 self.a = []
812 self.a = []
813 self.b = []
813 self.b = []
814 self.starta = self.lena = None
814 self.starta = self.lena = None
815 self.startb = self.lenb = None
815 self.startb = self.lenb = None
816 if lr is not None:
816 if lr is not None:
817 if context:
817 if context:
818 self.read_context_hunk(lr)
818 self.read_context_hunk(lr)
819 else:
819 else:
820 self.read_unified_hunk(lr)
820 self.read_unified_hunk(lr)
821
821
822 def getnormalized(self):
822 def getnormalized(self):
823 """Return a copy with line endings normalized to LF."""
823 """Return a copy with line endings normalized to LF."""
824
824
825 def normalize(lines):
825 def normalize(lines):
826 nlines = []
826 nlines = []
827 for line in lines:
827 for line in lines:
828 if line.endswith('\r\n'):
828 if line.endswith('\r\n'):
829 line = line[:-2] + '\n'
829 line = line[:-2] + '\n'
830 nlines.append(line)
830 nlines.append(line)
831 return nlines
831 return nlines
832
832
833 # Dummy object, it is rebuilt manually
833 # Dummy object, it is rebuilt manually
834 nh = hunk(self.desc, self.number, None, None)
834 nh = hunk(self.desc, self.number, None, None)
835 nh.number = self.number
835 nh.number = self.number
836 nh.desc = self.desc
836 nh.desc = self.desc
837 nh.hunk = self.hunk
837 nh.hunk = self.hunk
838 nh.a = normalize(self.a)
838 nh.a = normalize(self.a)
839 nh.b = normalize(self.b)
839 nh.b = normalize(self.b)
840 nh.starta = self.starta
840 nh.starta = self.starta
841 nh.startb = self.startb
841 nh.startb = self.startb
842 nh.lena = self.lena
842 nh.lena = self.lena
843 nh.lenb = self.lenb
843 nh.lenb = self.lenb
844 return nh
844 return nh
845
845
846 def read_unified_hunk(self, lr):
846 def read_unified_hunk(self, lr):
847 m = unidesc.match(self.desc)
847 m = unidesc.match(self.desc)
848 if not m:
848 if not m:
849 raise PatchError(_("bad hunk #%d") % self.number)
849 raise PatchError(_("bad hunk #%d") % self.number)
850 self.starta, self.lena, self.startb, self.lenb = m.groups()
850 self.starta, self.lena, self.startb, self.lenb = m.groups()
851 if self.lena is None:
851 if self.lena is None:
852 self.lena = 1
852 self.lena = 1
853 else:
853 else:
854 self.lena = int(self.lena)
854 self.lena = int(self.lena)
855 if self.lenb is None:
855 if self.lenb is None:
856 self.lenb = 1
856 self.lenb = 1
857 else:
857 else:
858 self.lenb = int(self.lenb)
858 self.lenb = int(self.lenb)
859 self.starta = int(self.starta)
859 self.starta = int(self.starta)
860 self.startb = int(self.startb)
860 self.startb = int(self.startb)
861 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
861 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
862 self.b)
862 self.b)
863 # if we hit eof before finishing out the hunk, the last line will
863 # if we hit eof before finishing out the hunk, the last line will
864 # be zero length. Lets try to fix it up.
864 # be zero length. Lets try to fix it up.
865 while len(self.hunk[-1]) == 0:
865 while len(self.hunk[-1]) == 0:
866 del self.hunk[-1]
866 del self.hunk[-1]
867 del self.a[-1]
867 del self.a[-1]
868 del self.b[-1]
868 del self.b[-1]
869 self.lena -= 1
869 self.lena -= 1
870 self.lenb -= 1
870 self.lenb -= 1
871 self._fixnewline(lr)
871 self._fixnewline(lr)
872
872
873 def read_context_hunk(self, lr):
873 def read_context_hunk(self, lr):
874 self.desc = lr.readline()
874 self.desc = lr.readline()
875 m = contextdesc.match(self.desc)
875 m = contextdesc.match(self.desc)
876 if not m:
876 if not m:
877 raise PatchError(_("bad hunk #%d") % self.number)
877 raise PatchError(_("bad hunk #%d") % self.number)
878 self.starta, aend = m.groups()
878 self.starta, aend = m.groups()
879 self.starta = int(self.starta)
879 self.starta = int(self.starta)
880 if aend is None:
880 if aend is None:
881 aend = self.starta
881 aend = self.starta
882 self.lena = int(aend) - self.starta
882 self.lena = int(aend) - self.starta
883 if self.starta:
883 if self.starta:
884 self.lena += 1
884 self.lena += 1
885 for x in xrange(self.lena):
885 for x in xrange(self.lena):
886 l = lr.readline()
886 l = lr.readline()
887 if l.startswith('---'):
887 if l.startswith('---'):
888 # lines addition, old block is empty
888 # lines addition, old block is empty
889 lr.push(l)
889 lr.push(l)
890 break
890 break
891 s = l[2:]
891 s = l[2:]
892 if l.startswith('- ') or l.startswith('! '):
892 if l.startswith('- ') or l.startswith('! '):
893 u = '-' + s
893 u = '-' + s
894 elif l.startswith(' '):
894 elif l.startswith(' '):
895 u = ' ' + s
895 u = ' ' + s
896 else:
896 else:
897 raise PatchError(_("bad hunk #%d old text line %d") %
897 raise PatchError(_("bad hunk #%d old text line %d") %
898 (self.number, x))
898 (self.number, x))
899 self.a.append(u)
899 self.a.append(u)
900 self.hunk.append(u)
900 self.hunk.append(u)
901
901
902 l = lr.readline()
902 l = lr.readline()
903 if l.startswith('\ '):
903 if l.startswith('\ '):
904 s = self.a[-1][:-1]
904 s = self.a[-1][:-1]
905 self.a[-1] = s
905 self.a[-1] = s
906 self.hunk[-1] = s
906 self.hunk[-1] = s
907 l = lr.readline()
907 l = lr.readline()
908 m = contextdesc.match(l)
908 m = contextdesc.match(l)
909 if not m:
909 if not m:
910 raise PatchError(_("bad hunk #%d") % self.number)
910 raise PatchError(_("bad hunk #%d") % self.number)
911 self.startb, bend = m.groups()
911 self.startb, bend = m.groups()
912 self.startb = int(self.startb)
912 self.startb = int(self.startb)
913 if bend is None:
913 if bend is None:
914 bend = self.startb
914 bend = self.startb
915 self.lenb = int(bend) - self.startb
915 self.lenb = int(bend) - self.startb
916 if self.startb:
916 if self.startb:
917 self.lenb += 1
917 self.lenb += 1
918 hunki = 1
918 hunki = 1
919 for x in xrange(self.lenb):
919 for x in xrange(self.lenb):
920 l = lr.readline()
920 l = lr.readline()
921 if l.startswith('\ '):
921 if l.startswith('\ '):
922 # XXX: the only way to hit this is with an invalid line range.
922 # XXX: the only way to hit this is with an invalid line range.
923 # The no-eol marker is not counted in the line range, but I
923 # The no-eol marker is not counted in the line range, but I
924 # guess there are diff(1) out there which behave differently.
924 # guess there are diff(1) out there which behave differently.
925 s = self.b[-1][:-1]
925 s = self.b[-1][:-1]
926 self.b[-1] = s
926 self.b[-1] = s
927 self.hunk[hunki - 1] = s
927 self.hunk[hunki - 1] = s
928 continue
928 continue
929 if not l:
929 if not l:
930 # line deletions, new block is empty and we hit EOF
930 # line deletions, new block is empty and we hit EOF
931 lr.push(l)
931 lr.push(l)
932 break
932 break
933 s = l[2:]
933 s = l[2:]
934 if l.startswith('+ ') or l.startswith('! '):
934 if l.startswith('+ ') or l.startswith('! '):
935 u = '+' + s
935 u = '+' + s
936 elif l.startswith(' '):
936 elif l.startswith(' '):
937 u = ' ' + s
937 u = ' ' + s
938 elif len(self.b) == 0:
938 elif len(self.b) == 0:
939 # line deletions, new block is empty
939 # line deletions, new block is empty
940 lr.push(l)
940 lr.push(l)
941 break
941 break
942 else:
942 else:
943 raise PatchError(_("bad hunk #%d old text line %d") %
943 raise PatchError(_("bad hunk #%d old text line %d") %
944 (self.number, x))
944 (self.number, x))
945 self.b.append(s)
945 self.b.append(s)
946 while True:
946 while True:
947 if hunki >= len(self.hunk):
947 if hunki >= len(self.hunk):
948 h = ""
948 h = ""
949 else:
949 else:
950 h = self.hunk[hunki]
950 h = self.hunk[hunki]
951 hunki += 1
951 hunki += 1
952 if h == u:
952 if h == u:
953 break
953 break
954 elif h.startswith('-'):
954 elif h.startswith('-'):
955 continue
955 continue
956 else:
956 else:
957 self.hunk.insert(hunki - 1, u)
957 self.hunk.insert(hunki - 1, u)
958 break
958 break
959
959
960 if not self.a:
960 if not self.a:
961 # this happens when lines were only added to the hunk
961 # this happens when lines were only added to the hunk
962 for x in self.hunk:
962 for x in self.hunk:
963 if x.startswith('-') or x.startswith(' '):
963 if x.startswith('-') or x.startswith(' '):
964 self.a.append(x)
964 self.a.append(x)
965 if not self.b:
965 if not self.b:
966 # this happens when lines were only deleted from the hunk
966 # this happens when lines were only deleted from the hunk
967 for x in self.hunk:
967 for x in self.hunk:
968 if x.startswith('+') or x.startswith(' '):
968 if x.startswith('+') or x.startswith(' '):
969 self.b.append(x[1:])
969 self.b.append(x[1:])
970 # @@ -start,len +start,len @@
970 # @@ -start,len +start,len @@
971 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
971 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
972 self.startb, self.lenb)
972 self.startb, self.lenb)
973 self.hunk[0] = self.desc
973 self.hunk[0] = self.desc
974 self._fixnewline(lr)
974 self._fixnewline(lr)
975
975
976 def _fixnewline(self, lr):
976 def _fixnewline(self, lr):
977 l = lr.readline()
977 l = lr.readline()
978 if l.startswith('\ '):
978 if l.startswith('\ '):
979 diffhelpers.fix_newline(self.hunk, self.a, self.b)
979 diffhelpers.fix_newline(self.hunk, self.a, self.b)
980 else:
980 else:
981 lr.push(l)
981 lr.push(l)
982
982
983 def complete(self):
983 def complete(self):
984 return len(self.a) == self.lena and len(self.b) == self.lenb
984 return len(self.a) == self.lena and len(self.b) == self.lenb
985
985
986 def _fuzzit(self, old, new, fuzz, toponly):
986 def _fuzzit(self, old, new, fuzz, toponly):
987 # this removes context lines from the top and bottom of list 'l'. It
987 # this removes context lines from the top and bottom of list 'l'. It
988 # checks the hunk to make sure only context lines are removed, and then
988 # checks the hunk to make sure only context lines are removed, and then
989 # returns a new shortened list of lines.
989 # returns a new shortened list of lines.
990 fuzz = min(fuzz, len(old))
990 fuzz = min(fuzz, len(old))
991 if fuzz:
991 if fuzz:
992 top = 0
992 top = 0
993 bot = 0
993 bot = 0
994 hlen = len(self.hunk)
994 hlen = len(self.hunk)
995 for x in xrange(hlen - 1):
995 for x in xrange(hlen - 1):
996 # the hunk starts with the @@ line, so use x+1
996 # the hunk starts with the @@ line, so use x+1
997 if self.hunk[x + 1][0] == ' ':
997 if self.hunk[x + 1][0] == ' ':
998 top += 1
998 top += 1
999 else:
999 else:
1000 break
1000 break
1001 if not toponly:
1001 if not toponly:
1002 for x in xrange(hlen - 1):
1002 for x in xrange(hlen - 1):
1003 if self.hunk[hlen - bot - 1][0] == ' ':
1003 if self.hunk[hlen - bot - 1][0] == ' ':
1004 bot += 1
1004 bot += 1
1005 else:
1005 else:
1006 break
1006 break
1007
1007
1008 bot = min(fuzz, bot)
1008 bot = min(fuzz, bot)
1009 top = min(fuzz, top)
1009 top = min(fuzz, top)
1010 return old[top:len(old) - bot], new[top:len(new) - bot], top
1010 return old[top:len(old) - bot], new[top:len(new) - bot], top
1011 return old, new, 0
1011 return old, new, 0
1012
1012
1013 def fuzzit(self, fuzz, toponly):
1013 def fuzzit(self, fuzz, toponly):
1014 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1014 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1015 oldstart = self.starta + top
1015 oldstart = self.starta + top
1016 newstart = self.startb + top
1016 newstart = self.startb + top
1017 # zero length hunk ranges already have their start decremented
1017 # zero length hunk ranges already have their start decremented
1018 if self.lena and oldstart > 0:
1018 if self.lena and oldstart > 0:
1019 oldstart -= 1
1019 oldstart -= 1
1020 if self.lenb and newstart > 0:
1020 if self.lenb and newstart > 0:
1021 newstart -= 1
1021 newstart -= 1
1022 return old, oldstart, new, newstart
1022 return old, oldstart, new, newstart
1023
1023
1024 class binhunk(object):
1024 class binhunk(object):
1025 'A binary patch file.'
1025 'A binary patch file.'
1026 def __init__(self, lr, fname):
1026 def __init__(self, lr, fname):
1027 self.text = None
1027 self.text = None
1028 self.delta = False
1028 self.delta = False
1029 self.hunk = ['GIT binary patch\n']
1029 self.hunk = ['GIT binary patch\n']
1030 self._fname = fname
1030 self._fname = fname
1031 self._read(lr)
1031 self._read(lr)
1032
1032
1033 def complete(self):
1033 def complete(self):
1034 return self.text is not None
1034 return self.text is not None
1035
1035
1036 def new(self, lines):
1036 def new(self, lines):
1037 if self.delta:
1037 if self.delta:
1038 return [applybindelta(self.text, ''.join(lines))]
1038 return [applybindelta(self.text, ''.join(lines))]
1039 return [self.text]
1039 return [self.text]
1040
1040
1041 def _read(self, lr):
1041 def _read(self, lr):
1042 def getline(lr, hunk):
1042 def getline(lr, hunk):
1043 l = lr.readline()
1043 l = lr.readline()
1044 hunk.append(l)
1044 hunk.append(l)
1045 return l.rstrip('\r\n')
1045 return l.rstrip('\r\n')
1046
1046
1047 size = 0
1047 size = 0
1048 while True:
1048 while True:
1049 line = getline(lr, self.hunk)
1049 line = getline(lr, self.hunk)
1050 if not line:
1050 if not line:
1051 raise PatchError(_('could not extract "%s" binary data')
1051 raise PatchError(_('could not extract "%s" binary data')
1052 % self._fname)
1052 % self._fname)
1053 if line.startswith('literal '):
1053 if line.startswith('literal '):
1054 size = int(line[8:].rstrip())
1054 size = int(line[8:].rstrip())
1055 break
1055 break
1056 if line.startswith('delta '):
1056 if line.startswith('delta '):
1057 size = int(line[6:].rstrip())
1057 size = int(line[6:].rstrip())
1058 self.delta = True
1058 self.delta = True
1059 break
1059 break
1060 dec = []
1060 dec = []
1061 line = getline(lr, self.hunk)
1061 line = getline(lr, self.hunk)
1062 while len(line) > 1:
1062 while len(line) > 1:
1063 l = line[0]
1063 l = line[0]
1064 if l <= 'Z' and l >= 'A':
1064 if l <= 'Z' and l >= 'A':
1065 l = ord(l) - ord('A') + 1
1065 l = ord(l) - ord('A') + 1
1066 else:
1066 else:
1067 l = ord(l) - ord('a') + 27
1067 l = ord(l) - ord('a') + 27
1068 try:
1068 try:
1069 dec.append(base85.b85decode(line[1:])[:l])
1069 dec.append(base85.b85decode(line[1:])[:l])
1070 except ValueError, e:
1070 except ValueError, e:
1071 raise PatchError(_('could not decode "%s" binary patch: %s')
1071 raise PatchError(_('could not decode "%s" binary patch: %s')
1072 % (self._fname, str(e)))
1072 % (self._fname, str(e)))
1073 line = getline(lr, self.hunk)
1073 line = getline(lr, self.hunk)
1074 text = zlib.decompress(''.join(dec))
1074 text = zlib.decompress(''.join(dec))
1075 if len(text) != size:
1075 if len(text) != size:
1076 raise PatchError(_('"%s" length is %d bytes, should be %d')
1076 raise PatchError(_('"%s" length is %d bytes, should be %d')
1077 % (self._fname, len(text), size))
1077 % (self._fname, len(text), size))
1078 self.text = text
1078 self.text = text
1079
1079
1080 def parsefilename(str):
1080 def parsefilename(str):
1081 # --- filename \t|space stuff
1081 # --- filename \t|space stuff
1082 s = str[4:].rstrip('\r\n')
1082 s = str[4:].rstrip('\r\n')
1083 i = s.find('\t')
1083 i = s.find('\t')
1084 if i < 0:
1084 if i < 0:
1085 i = s.find(' ')
1085 i = s.find(' ')
1086 if i < 0:
1086 if i < 0:
1087 return s
1087 return s
1088 return s[:i]
1088 return s[:i]
1089
1089
1090 def pathtransform(path, strip, prefix):
1090 def pathtransform(path, strip, prefix):
1091 '''turn a path from a patch into a path suitable for the repository
1091 '''turn a path from a patch into a path suitable for the repository
1092
1092
1093 prefix, if not empty, is expected to be normalized with a / at the end.
1093 prefix, if not empty, is expected to be normalized with a / at the end.
1094
1094
1095 Returns (stripped components, path in repository).
1095 Returns (stripped components, path in repository).
1096
1096
1097 >>> pathtransform('a/b/c', 0, '')
1097 >>> pathtransform('a/b/c', 0, '')
1098 ('', 'a/b/c')
1098 ('', 'a/b/c')
1099 >>> pathtransform(' a/b/c ', 0, '')
1099 >>> pathtransform(' a/b/c ', 0, '')
1100 ('', ' a/b/c')
1100 ('', ' a/b/c')
1101 >>> pathtransform(' a/b/c ', 2, '')
1101 >>> pathtransform(' a/b/c ', 2, '')
1102 ('a/b/', 'c')
1102 ('a/b/', 'c')
1103 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1103 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1104 ('a//b/', 'd/e/c')
1104 ('a//b/', 'd/e/c')
1105 >>> pathtransform('a/b/c', 3, '')
1105 >>> pathtransform('a/b/c', 3, '')
1106 Traceback (most recent call last):
1106 Traceback (most recent call last):
1107 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1107 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1108 '''
1108 '''
1109 pathlen = len(path)
1109 pathlen = len(path)
1110 i = 0
1110 i = 0
1111 if strip == 0:
1111 if strip == 0:
1112 return '', path.rstrip()
1112 return '', path.rstrip()
1113 count = strip
1113 count = strip
1114 while count > 0:
1114 while count > 0:
1115 i = path.find('/', i)
1115 i = path.find('/', i)
1116 if i == -1:
1116 if i == -1:
1117 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1117 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1118 (count, strip, path))
1118 (count, strip, path))
1119 i += 1
1119 i += 1
1120 # consume '//' in the path
1120 # consume '//' in the path
1121 while i < pathlen - 1 and path[i] == '/':
1121 while i < pathlen - 1 and path[i] == '/':
1122 i += 1
1122 i += 1
1123 count -= 1
1123 count -= 1
1124 return path[:i].lstrip(), prefix + path[i:].rstrip()
1124 return path[:i].lstrip(), prefix + path[i:].rstrip()
1125
1125
1126 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1126 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1127 nulla = afile_orig == "/dev/null"
1127 nulla = afile_orig == "/dev/null"
1128 nullb = bfile_orig == "/dev/null"
1128 nullb = bfile_orig == "/dev/null"
1129 create = nulla and hunk.starta == 0 and hunk.lena == 0
1129 create = nulla and hunk.starta == 0 and hunk.lena == 0
1130 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1130 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1131 abase, afile = pathtransform(afile_orig, strip, prefix)
1131 abase, afile = pathtransform(afile_orig, strip, prefix)
1132 gooda = not nulla and backend.exists(afile)
1132 gooda = not nulla and backend.exists(afile)
1133 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1133 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1134 if afile == bfile:
1134 if afile == bfile:
1135 goodb = gooda
1135 goodb = gooda
1136 else:
1136 else:
1137 goodb = not nullb and backend.exists(bfile)
1137 goodb = not nullb and backend.exists(bfile)
1138 missing = not goodb and not gooda and not create
1138 missing = not goodb and not gooda and not create
1139
1139
1140 # some diff programs apparently produce patches where the afile is
1140 # some diff programs apparently produce patches where the afile is
1141 # not /dev/null, but afile starts with bfile
1141 # not /dev/null, but afile starts with bfile
1142 abasedir = afile[:afile.rfind('/') + 1]
1142 abasedir = afile[:afile.rfind('/') + 1]
1143 bbasedir = bfile[:bfile.rfind('/') + 1]
1143 bbasedir = bfile[:bfile.rfind('/') + 1]
1144 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1144 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1145 and hunk.starta == 0 and hunk.lena == 0):
1145 and hunk.starta == 0 and hunk.lena == 0):
1146 create = True
1146 create = True
1147 missing = False
1147 missing = False
1148
1148
1149 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1149 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1150 # diff is between a file and its backup. In this case, the original
1150 # diff is between a file and its backup. In this case, the original
1151 # file should be patched (see original mpatch code).
1151 # file should be patched (see original mpatch code).
1152 isbackup = (abase == bbase and bfile.startswith(afile))
1152 isbackup = (abase == bbase and bfile.startswith(afile))
1153 fname = None
1153 fname = None
1154 if not missing:
1154 if not missing:
1155 if gooda and goodb:
1155 if gooda and goodb:
1156 fname = isbackup and afile or bfile
1156 fname = isbackup and afile or bfile
1157 elif gooda:
1157 elif gooda:
1158 fname = afile
1158 fname = afile
1159
1159
1160 if not fname:
1160 if not fname:
1161 if not nullb:
1161 if not nullb:
1162 fname = isbackup and afile or bfile
1162 fname = isbackup and afile or bfile
1163 elif not nulla:
1163 elif not nulla:
1164 fname = afile
1164 fname = afile
1165 else:
1165 else:
1166 raise PatchError(_("undefined source and destination files"))
1166 raise PatchError(_("undefined source and destination files"))
1167
1167
1168 gp = patchmeta(fname)
1168 gp = patchmeta(fname)
1169 if create:
1169 if create:
1170 gp.op = 'ADD'
1170 gp.op = 'ADD'
1171 elif remove:
1171 elif remove:
1172 gp.op = 'DELETE'
1172 gp.op = 'DELETE'
1173 return gp
1173 return gp
1174
1174
1175 def scangitpatch(lr, firstline):
1175 def scangitpatch(lr, firstline):
1176 """
1176 """
1177 Git patches can emit:
1177 Git patches can emit:
1178 - rename a to b
1178 - rename a to b
1179 - change b
1179 - change b
1180 - copy a to c
1180 - copy a to c
1181 - change c
1181 - change c
1182
1182
1183 We cannot apply this sequence as-is, the renamed 'a' could not be
1183 We cannot apply this sequence as-is, the renamed 'a' could not be
1184 found for it would have been renamed already. And we cannot copy
1184 found for it would have been renamed already. And we cannot copy
1185 from 'b' instead because 'b' would have been changed already. So
1185 from 'b' instead because 'b' would have been changed already. So
1186 we scan the git patch for copy and rename commands so we can
1186 we scan the git patch for copy and rename commands so we can
1187 perform the copies ahead of time.
1187 perform the copies ahead of time.
1188 """
1188 """
1189 pos = 0
1189 pos = 0
1190 try:
1190 try:
1191 pos = lr.fp.tell()
1191 pos = lr.fp.tell()
1192 fp = lr.fp
1192 fp = lr.fp
1193 except IOError:
1193 except IOError:
1194 fp = cStringIO.StringIO(lr.fp.read())
1194 fp = cStringIO.StringIO(lr.fp.read())
1195 gitlr = linereader(fp)
1195 gitlr = linereader(fp)
1196 gitlr.push(firstline)
1196 gitlr.push(firstline)
1197 gitpatches = readgitpatch(gitlr)
1197 gitpatches = readgitpatch(gitlr)
1198 fp.seek(pos)
1198 fp.seek(pos)
1199 return gitpatches
1199 return gitpatches
1200
1200
1201 def iterhunks(fp):
1201 def iterhunks(fp):
1202 """Read a patch and yield the following events:
1202 """Read a patch and yield the following events:
1203 - ("file", afile, bfile, firsthunk): select a new target file.
1203 - ("file", afile, bfile, firsthunk): select a new target file.
1204 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1204 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1205 "file" event.
1205 "file" event.
1206 - ("git", gitchanges): current diff is in git format, gitchanges
1206 - ("git", gitchanges): current diff is in git format, gitchanges
1207 maps filenames to gitpatch records. Unique event.
1207 maps filenames to gitpatch records. Unique event.
1208 """
1208 """
1209 afile = ""
1209 afile = ""
1210 bfile = ""
1210 bfile = ""
1211 state = None
1211 state = None
1212 hunknum = 0
1212 hunknum = 0
1213 emitfile = newfile = False
1213 emitfile = newfile = False
1214 gitpatches = None
1214 gitpatches = None
1215
1215
1216 # our states
1216 # our states
1217 BFILE = 1
1217 BFILE = 1
1218 context = None
1218 context = None
1219 lr = linereader(fp)
1219 lr = linereader(fp)
1220
1220
1221 while True:
1221 while True:
1222 x = lr.readline()
1222 x = lr.readline()
1223 if not x:
1223 if not x:
1224 break
1224 break
1225 if state == BFILE and (
1225 if state == BFILE and (
1226 (not context and x[0] == '@')
1226 (not context and x[0] == '@')
1227 or (context is not False and x.startswith('***************'))
1227 or (context is not False and x.startswith('***************'))
1228 or x.startswith('GIT binary patch')):
1228 or x.startswith('GIT binary patch')):
1229 gp = None
1229 gp = None
1230 if (gitpatches and
1230 if (gitpatches and
1231 gitpatches[-1].ispatching(afile, bfile)):
1231 gitpatches[-1].ispatching(afile, bfile)):
1232 gp = gitpatches.pop()
1232 gp = gitpatches.pop()
1233 if x.startswith('GIT binary patch'):
1233 if x.startswith('GIT binary patch'):
1234 h = binhunk(lr, gp.path)
1234 h = binhunk(lr, gp.path)
1235 else:
1235 else:
1236 if context is None and x.startswith('***************'):
1236 if context is None and x.startswith('***************'):
1237 context = True
1237 context = True
1238 h = hunk(x, hunknum + 1, lr, context)
1238 h = hunk(x, hunknum + 1, lr, context)
1239 hunknum += 1
1239 hunknum += 1
1240 if emitfile:
1240 if emitfile:
1241 emitfile = False
1241 emitfile = False
1242 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1242 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1243 yield 'hunk', h
1243 yield 'hunk', h
1244 elif x.startswith('diff --git a/'):
1244 elif x.startswith('diff --git a/'):
1245 m = gitre.match(x.rstrip(' \r\n'))
1245 m = gitre.match(x.rstrip(' \r\n'))
1246 if not m:
1246 if not m:
1247 continue
1247 continue
1248 if gitpatches is None:
1248 if gitpatches is None:
1249 # scan whole input for git metadata
1249 # scan whole input for git metadata
1250 gitpatches = scangitpatch(lr, x)
1250 gitpatches = scangitpatch(lr, x)
1251 yield 'git', [g.copy() for g in gitpatches
1251 yield 'git', [g.copy() for g in gitpatches
1252 if g.op in ('COPY', 'RENAME')]
1252 if g.op in ('COPY', 'RENAME')]
1253 gitpatches.reverse()
1253 gitpatches.reverse()
1254 afile = 'a/' + m.group(1)
1254 afile = 'a/' + m.group(1)
1255 bfile = 'b/' + m.group(2)
1255 bfile = 'b/' + m.group(2)
1256 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1256 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1257 gp = gitpatches.pop()
1257 gp = gitpatches.pop()
1258 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1258 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1259 if not gitpatches:
1259 if not gitpatches:
1260 raise PatchError(_('failed to synchronize metadata for "%s"')
1260 raise PatchError(_('failed to synchronize metadata for "%s"')
1261 % afile[2:])
1261 % afile[2:])
1262 gp = gitpatches[-1]
1262 gp = gitpatches[-1]
1263 newfile = True
1263 newfile = True
1264 elif x.startswith('---'):
1264 elif x.startswith('---'):
1265 # check for a unified diff
1265 # check for a unified diff
1266 l2 = lr.readline()
1266 l2 = lr.readline()
1267 if not l2.startswith('+++'):
1267 if not l2.startswith('+++'):
1268 lr.push(l2)
1268 lr.push(l2)
1269 continue
1269 continue
1270 newfile = True
1270 newfile = True
1271 context = False
1271 context = False
1272 afile = parsefilename(x)
1272 afile = parsefilename(x)
1273 bfile = parsefilename(l2)
1273 bfile = parsefilename(l2)
1274 elif x.startswith('***'):
1274 elif x.startswith('***'):
1275 # check for a context diff
1275 # check for a context diff
1276 l2 = lr.readline()
1276 l2 = lr.readline()
1277 if not l2.startswith('---'):
1277 if not l2.startswith('---'):
1278 lr.push(l2)
1278 lr.push(l2)
1279 continue
1279 continue
1280 l3 = lr.readline()
1280 l3 = lr.readline()
1281 lr.push(l3)
1281 lr.push(l3)
1282 if not l3.startswith("***************"):
1282 if not l3.startswith("***************"):
1283 lr.push(l2)
1283 lr.push(l2)
1284 continue
1284 continue
1285 newfile = True
1285 newfile = True
1286 context = True
1286 context = True
1287 afile = parsefilename(x)
1287 afile = parsefilename(x)
1288 bfile = parsefilename(l2)
1288 bfile = parsefilename(l2)
1289
1289
1290 if newfile:
1290 if newfile:
1291 newfile = False
1291 newfile = False
1292 emitfile = True
1292 emitfile = True
1293 state = BFILE
1293 state = BFILE
1294 hunknum = 0
1294 hunknum = 0
1295
1295
1296 while gitpatches:
1296 while gitpatches:
1297 gp = gitpatches.pop()
1297 gp = gitpatches.pop()
1298 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1298 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1299
1299
1300 def applybindelta(binchunk, data):
1300 def applybindelta(binchunk, data):
1301 """Apply a binary delta hunk
1301 """Apply a binary delta hunk
1302 The algorithm used is the algorithm from git's patch-delta.c
1302 The algorithm used is the algorithm from git's patch-delta.c
1303 """
1303 """
1304 def deltahead(binchunk):
1304 def deltahead(binchunk):
1305 i = 0
1305 i = 0
1306 for c in binchunk:
1306 for c in binchunk:
1307 i += 1
1307 i += 1
1308 if not (ord(c) & 0x80):
1308 if not (ord(c) & 0x80):
1309 return i
1309 return i
1310 return i
1310 return i
1311 out = ""
1311 out = ""
1312 s = deltahead(binchunk)
1312 s = deltahead(binchunk)
1313 binchunk = binchunk[s:]
1313 binchunk = binchunk[s:]
1314 s = deltahead(binchunk)
1314 s = deltahead(binchunk)
1315 binchunk = binchunk[s:]
1315 binchunk = binchunk[s:]
1316 i = 0
1316 i = 0
1317 while i < len(binchunk):
1317 while i < len(binchunk):
1318 cmd = ord(binchunk[i])
1318 cmd = ord(binchunk[i])
1319 i += 1
1319 i += 1
1320 if (cmd & 0x80):
1320 if (cmd & 0x80):
1321 offset = 0
1321 offset = 0
1322 size = 0
1322 size = 0
1323 if (cmd & 0x01):
1323 if (cmd & 0x01):
1324 offset = ord(binchunk[i])
1324 offset = ord(binchunk[i])
1325 i += 1
1325 i += 1
1326 if (cmd & 0x02):
1326 if (cmd & 0x02):
1327 offset |= ord(binchunk[i]) << 8
1327 offset |= ord(binchunk[i]) << 8
1328 i += 1
1328 i += 1
1329 if (cmd & 0x04):
1329 if (cmd & 0x04):
1330 offset |= ord(binchunk[i]) << 16
1330 offset |= ord(binchunk[i]) << 16
1331 i += 1
1331 i += 1
1332 if (cmd & 0x08):
1332 if (cmd & 0x08):
1333 offset |= ord(binchunk[i]) << 24
1333 offset |= ord(binchunk[i]) << 24
1334 i += 1
1334 i += 1
1335 if (cmd & 0x10):
1335 if (cmd & 0x10):
1336 size = ord(binchunk[i])
1336 size = ord(binchunk[i])
1337 i += 1
1337 i += 1
1338 if (cmd & 0x20):
1338 if (cmd & 0x20):
1339 size |= ord(binchunk[i]) << 8
1339 size |= ord(binchunk[i]) << 8
1340 i += 1
1340 i += 1
1341 if (cmd & 0x40):
1341 if (cmd & 0x40):
1342 size |= ord(binchunk[i]) << 16
1342 size |= ord(binchunk[i]) << 16
1343 i += 1
1343 i += 1
1344 if size == 0:
1344 if size == 0:
1345 size = 0x10000
1345 size = 0x10000
1346 offset_end = offset + size
1346 offset_end = offset + size
1347 out += data[offset:offset_end]
1347 out += data[offset:offset_end]
1348 elif cmd != 0:
1348 elif cmd != 0:
1349 offset_end = i + cmd
1349 offset_end = i + cmd
1350 out += binchunk[i:offset_end]
1350 out += binchunk[i:offset_end]
1351 i += cmd
1351 i += cmd
1352 else:
1352 else:
1353 raise PatchError(_('unexpected delta opcode 0'))
1353 raise PatchError(_('unexpected delta opcode 0'))
1354 return out
1354 return out
1355
1355
1356 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1356 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1357 """Reads a patch from fp and tries to apply it.
1357 """Reads a patch from fp and tries to apply it.
1358
1358
1359 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1359 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1360 there was any fuzz.
1360 there was any fuzz.
1361
1361
1362 If 'eolmode' is 'strict', the patch content and patched file are
1362 If 'eolmode' is 'strict', the patch content and patched file are
1363 read in binary mode. Otherwise, line endings are ignored when
1363 read in binary mode. Otherwise, line endings are ignored when
1364 patching then normalized according to 'eolmode'.
1364 patching then normalized according to 'eolmode'.
1365 """
1365 """
1366 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1366 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1367 prefix=prefix, eolmode=eolmode)
1367 prefix=prefix, eolmode=eolmode)
1368
1368
1369 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1369 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1370 eolmode='strict'):
1370 eolmode='strict'):
1371
1371
1372 if prefix:
1372 if prefix:
1373 # clean up double slashes, lack of trailing slashes, etc
1373 # clean up double slashes, lack of trailing slashes, etc
1374 prefix = util.normpath(prefix) + '/'
1374 prefix = util.normpath(prefix) + '/'
1375 def pstrip(p):
1375 def pstrip(p):
1376 return pathtransform(p, strip - 1, prefix)[1]
1376 return pathtransform(p, strip - 1, prefix)[1]
1377
1377
1378 rejects = 0
1378 rejects = 0
1379 err = 0
1379 err = 0
1380 current_file = None
1380 current_file = None
1381
1381
1382 for state, values in iterhunks(fp):
1382 for state, values in iterhunks(fp):
1383 if state == 'hunk':
1383 if state == 'hunk':
1384 if not current_file:
1384 if not current_file:
1385 continue
1385 continue
1386 ret = current_file.apply(values)
1386 ret = current_file.apply(values)
1387 if ret > 0:
1387 if ret > 0:
1388 err = 1
1388 err = 1
1389 elif state == 'file':
1389 elif state == 'file':
1390 if current_file:
1390 if current_file:
1391 rejects += current_file.close()
1391 rejects += current_file.close()
1392 current_file = None
1392 current_file = None
1393 afile, bfile, first_hunk, gp = values
1393 afile, bfile, first_hunk, gp = values
1394 if gp:
1394 if gp:
1395 gp.path = pstrip(gp.path)
1395 gp.path = pstrip(gp.path)
1396 if gp.oldpath:
1396 if gp.oldpath:
1397 gp.oldpath = pstrip(gp.oldpath)
1397 gp.oldpath = pstrip(gp.oldpath)
1398 else:
1398 else:
1399 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1399 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1400 prefix)
1400 prefix)
1401 if gp.op == 'RENAME':
1401 if gp.op == 'RENAME':
1402 backend.unlink(gp.oldpath)
1402 backend.unlink(gp.oldpath)
1403 if not first_hunk:
1403 if not first_hunk:
1404 if gp.op == 'DELETE':
1404 if gp.op == 'DELETE':
1405 backend.unlink(gp.path)
1405 backend.unlink(gp.path)
1406 continue
1406 continue
1407 data, mode = None, None
1407 data, mode = None, None
1408 if gp.op in ('RENAME', 'COPY'):
1408 if gp.op in ('RENAME', 'COPY'):
1409 data, mode = store.getfile(gp.oldpath)[:2]
1409 data, mode = store.getfile(gp.oldpath)[:2]
1410 # FIXME: failing getfile has never been handled here
1410 # FIXME: failing getfile has never been handled here
1411 assert data is not None
1411 assert data is not None
1412 if gp.mode:
1412 if gp.mode:
1413 mode = gp.mode
1413 mode = gp.mode
1414 if gp.op == 'ADD':
1414 if gp.op == 'ADD':
1415 # Added files without content have no hunk and
1415 # Added files without content have no hunk and
1416 # must be created
1416 # must be created
1417 data = ''
1417 data = ''
1418 if data or mode:
1418 if data or mode:
1419 if (gp.op in ('ADD', 'RENAME', 'COPY')
1419 if (gp.op in ('ADD', 'RENAME', 'COPY')
1420 and backend.exists(gp.path)):
1420 and backend.exists(gp.path)):
1421 raise PatchError(_("cannot create %s: destination "
1421 raise PatchError(_("cannot create %s: destination "
1422 "already exists") % gp.path)
1422 "already exists") % gp.path)
1423 backend.setfile(gp.path, data, mode, gp.oldpath)
1423 backend.setfile(gp.path, data, mode, gp.oldpath)
1424 continue
1424 continue
1425 try:
1425 try:
1426 current_file = patcher(ui, gp, backend, store,
1426 current_file = patcher(ui, gp, backend, store,
1427 eolmode=eolmode)
1427 eolmode=eolmode)
1428 except PatchError, inst:
1428 except PatchError, inst:
1429 ui.warn(str(inst) + '\n')
1429 ui.warn(str(inst) + '\n')
1430 current_file = None
1430 current_file = None
1431 rejects += 1
1431 rejects += 1
1432 continue
1432 continue
1433 elif state == 'git':
1433 elif state == 'git':
1434 for gp in values:
1434 for gp in values:
1435 path = pstrip(gp.oldpath)
1435 path = pstrip(gp.oldpath)
1436 data, mode = backend.getfile(path)
1436 data, mode = backend.getfile(path)
1437 if data is None:
1437 if data is None:
1438 # The error ignored here will trigger a getfile()
1438 # The error ignored here will trigger a getfile()
1439 # error in a place more appropriate for error
1439 # error in a place more appropriate for error
1440 # handling, and will not interrupt the patching
1440 # handling, and will not interrupt the patching
1441 # process.
1441 # process.
1442 pass
1442 pass
1443 else:
1443 else:
1444 store.setfile(path, data, mode)
1444 store.setfile(path, data, mode)
1445 else:
1445 else:
1446 raise util.Abort(_('unsupported parser state: %s') % state)
1446 raise util.Abort(_('unsupported parser state: %s') % state)
1447
1447
1448 if current_file:
1448 if current_file:
1449 rejects += current_file.close()
1449 rejects += current_file.close()
1450
1450
1451 if rejects:
1451 if rejects:
1452 return -1
1452 return -1
1453 return err
1453 return err
1454
1454
1455 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1455 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1456 similarity):
1456 similarity):
1457 """use <patcher> to apply <patchname> to the working directory.
1457 """use <patcher> to apply <patchname> to the working directory.
1458 returns whether patch was applied with fuzz factor."""
1458 returns whether patch was applied with fuzz factor."""
1459
1459
1460 fuzz = False
1460 fuzz = False
1461 args = []
1461 args = []
1462 cwd = repo.root
1462 cwd = repo.root
1463 if cwd:
1463 if cwd:
1464 args.append('-d %s' % util.shellquote(cwd))
1464 args.append('-d %s' % util.shellquote(cwd))
1465 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1465 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1466 util.shellquote(patchname)))
1466 util.shellquote(patchname)))
1467 try:
1467 try:
1468 for line in fp:
1468 for line in fp:
1469 line = line.rstrip()
1469 line = line.rstrip()
1470 ui.note(line + '\n')
1470 ui.note(line + '\n')
1471 if line.startswith('patching file '):
1471 if line.startswith('patching file '):
1472 pf = util.parsepatchoutput(line)
1472 pf = util.parsepatchoutput(line)
1473 printed_file = False
1473 printed_file = False
1474 files.add(pf)
1474 files.add(pf)
1475 elif line.find('with fuzz') >= 0:
1475 elif line.find('with fuzz') >= 0:
1476 fuzz = True
1476 fuzz = True
1477 if not printed_file:
1477 if not printed_file:
1478 ui.warn(pf + '\n')
1478 ui.warn(pf + '\n')
1479 printed_file = True
1479 printed_file = True
1480 ui.warn(line + '\n')
1480 ui.warn(line + '\n')
1481 elif line.find('saving rejects to file') >= 0:
1481 elif line.find('saving rejects to file') >= 0:
1482 ui.warn(line + '\n')
1482 ui.warn(line + '\n')
1483 elif line.find('FAILED') >= 0:
1483 elif line.find('FAILED') >= 0:
1484 if not printed_file:
1484 if not printed_file:
1485 ui.warn(pf + '\n')
1485 ui.warn(pf + '\n')
1486 printed_file = True
1486 printed_file = True
1487 ui.warn(line + '\n')
1487 ui.warn(line + '\n')
1488 finally:
1488 finally:
1489 if files:
1489 if files:
1490 scmutil.marktouched(repo, files, similarity)
1490 scmutil.marktouched(repo, files, similarity)
1491 code = fp.close()
1491 code = fp.close()
1492 if code:
1492 if code:
1493 raise PatchError(_("patch command failed: %s") %
1493 raise PatchError(_("patch command failed: %s") %
1494 util.explainexit(code)[0])
1494 util.explainexit(code)[0])
1495 return fuzz
1495 return fuzz
1496
1496
1497 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
1497 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
1498 eolmode='strict'):
1498 eolmode='strict'):
1499 if files is None:
1499 if files is None:
1500 files = set()
1500 files = set()
1501 if eolmode is None:
1501 if eolmode is None:
1502 eolmode = ui.config('patch', 'eol', 'strict')
1502 eolmode = ui.config('patch', 'eol', 'strict')
1503 if eolmode.lower() not in eolmodes:
1503 if eolmode.lower() not in eolmodes:
1504 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1504 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1505 eolmode = eolmode.lower()
1505 eolmode = eolmode.lower()
1506
1506
1507 store = filestore()
1507 store = filestore()
1508 try:
1508 try:
1509 fp = open(patchobj, 'rb')
1509 fp = open(patchobj, 'rb')
1510 except TypeError:
1510 except TypeError:
1511 fp = patchobj
1511 fp = patchobj
1512 try:
1512 try:
1513 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
1513 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
1514 eolmode=eolmode)
1514 eolmode=eolmode)
1515 finally:
1515 finally:
1516 if fp != patchobj:
1516 if fp != patchobj:
1517 fp.close()
1517 fp.close()
1518 files.update(backend.close())
1518 files.update(backend.close())
1519 store.close()
1519 store.close()
1520 if ret < 0:
1520 if ret < 0:
1521 raise PatchError(_('patch failed to apply'))
1521 raise PatchError(_('patch failed to apply'))
1522 return ret > 0
1522 return ret > 0
1523
1523
1524 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1524 def internalpatch(ui, repo, patchobj, strip, prefix, files=None,
1525 similarity=0):
1525 eolmode='strict', similarity=0):
1526 """use builtin patch to apply <patchobj> to the working directory.
1526 """use builtin patch to apply <patchobj> to the working directory.
1527 returns whether patch was applied with fuzz factor."""
1527 returns whether patch was applied with fuzz factor."""
1528 backend = workingbackend(ui, repo, similarity)
1528 backend = workingbackend(ui, repo, similarity)
1529 return patchbackend(ui, backend, patchobj, strip, '', files, eolmode)
1529 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1530
1530
1531 def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
1531 def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
1532 eolmode='strict'):
1532 eolmode='strict'):
1533 backend = repobackend(ui, repo, ctx, store)
1533 backend = repobackend(ui, repo, ctx, store)
1534 return patchbackend(ui, backend, patchobj, strip, '', files, eolmode)
1534 return patchbackend(ui, backend, patchobj, strip, '', files, eolmode)
1535
1535
1536 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1536 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1537 similarity=0):
1537 similarity=0):
1538 """Apply <patchname> to the working directory.
1538 """Apply <patchname> to the working directory.
1539
1539
1540 'eolmode' specifies how end of lines should be handled. It can be:
1540 'eolmode' specifies how end of lines should be handled. It can be:
1541 - 'strict': inputs are read in binary mode, EOLs are preserved
1541 - 'strict': inputs are read in binary mode, EOLs are preserved
1542 - 'crlf': EOLs are ignored when patching and reset to CRLF
1542 - 'crlf': EOLs are ignored when patching and reset to CRLF
1543 - 'lf': EOLs are ignored when patching and reset to LF
1543 - 'lf': EOLs are ignored when patching and reset to LF
1544 - None: get it from user settings, default to 'strict'
1544 - None: get it from user settings, default to 'strict'
1545 'eolmode' is ignored when using an external patcher program.
1545 'eolmode' is ignored when using an external patcher program.
1546
1546
1547 Returns whether patch was applied with fuzz factor.
1547 Returns whether patch was applied with fuzz factor.
1548 """
1548 """
1549 patcher = ui.config('ui', 'patch')
1549 patcher = ui.config('ui', 'patch')
1550 if files is None:
1550 if files is None:
1551 files = set()
1551 files = set()
1552 if patcher:
1552 if patcher:
1553 return _externalpatch(ui, repo, patcher, patchname, strip,
1553 return _externalpatch(ui, repo, patcher, patchname, strip,
1554 files, similarity)
1554 files, similarity)
1555 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1555 return internalpatch(ui, repo, patchname, strip, '', files, eolmode,
1556 similarity)
1556 similarity)
1557
1557
1558 def changedfiles(ui, repo, patchpath, strip=1):
1558 def changedfiles(ui, repo, patchpath, strip=1):
1559 backend = fsbackend(ui, repo.root)
1559 backend = fsbackend(ui, repo.root)
1560 fp = open(patchpath, 'rb')
1560 fp = open(patchpath, 'rb')
1561 try:
1561 try:
1562 changed = set()
1562 changed = set()
1563 for state, values in iterhunks(fp):
1563 for state, values in iterhunks(fp):
1564 if state == 'file':
1564 if state == 'file':
1565 afile, bfile, first_hunk, gp = values
1565 afile, bfile, first_hunk, gp = values
1566 if gp:
1566 if gp:
1567 gp.path = pathtransform(gp.path, strip - 1, '')[1]
1567 gp.path = pathtransform(gp.path, strip - 1, '')[1]
1568 if gp.oldpath:
1568 if gp.oldpath:
1569 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
1569 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
1570 else:
1570 else:
1571 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1571 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1572 '')
1572 '')
1573 changed.add(gp.path)
1573 changed.add(gp.path)
1574 if gp.op == 'RENAME':
1574 if gp.op == 'RENAME':
1575 changed.add(gp.oldpath)
1575 changed.add(gp.oldpath)
1576 elif state not in ('hunk', 'git'):
1576 elif state not in ('hunk', 'git'):
1577 raise util.Abort(_('unsupported parser state: %s') % state)
1577 raise util.Abort(_('unsupported parser state: %s') % state)
1578 return changed
1578 return changed
1579 finally:
1579 finally:
1580 fp.close()
1580 fp.close()
1581
1581
1582 class GitDiffRequired(Exception):
1582 class GitDiffRequired(Exception):
1583 pass
1583 pass
1584
1584
1585 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
1585 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
1586 '''return diffopts with all features supported and parsed'''
1586 '''return diffopts with all features supported and parsed'''
1587 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
1587 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
1588 git=True, whitespace=True, formatchanging=True)
1588 git=True, whitespace=True, formatchanging=True)
1589
1589
1590 diffopts = diffallopts
1590 diffopts = diffallopts
1591
1591
1592 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
1592 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
1593 whitespace=False, formatchanging=False):
1593 whitespace=False, formatchanging=False):
1594 '''return diffopts with only opted-in features parsed
1594 '''return diffopts with only opted-in features parsed
1595
1595
1596 Features:
1596 Features:
1597 - git: git-style diffs
1597 - git: git-style diffs
1598 - whitespace: whitespace options like ignoreblanklines and ignorews
1598 - whitespace: whitespace options like ignoreblanklines and ignorews
1599 - formatchanging: options that will likely break or cause correctness issues
1599 - formatchanging: options that will likely break or cause correctness issues
1600 with most diff parsers
1600 with most diff parsers
1601 '''
1601 '''
1602 def get(key, name=None, getter=ui.configbool, forceplain=None):
1602 def get(key, name=None, getter=ui.configbool, forceplain=None):
1603 if opts:
1603 if opts:
1604 v = opts.get(key)
1604 v = opts.get(key)
1605 if v:
1605 if v:
1606 return v
1606 return v
1607 if forceplain is not None and ui.plain():
1607 if forceplain is not None and ui.plain():
1608 return forceplain
1608 return forceplain
1609 return getter(section, name or key, None, untrusted=untrusted)
1609 return getter(section, name or key, None, untrusted=untrusted)
1610
1610
1611 # core options, expected to be understood by every diff parser
1611 # core options, expected to be understood by every diff parser
1612 buildopts = {
1612 buildopts = {
1613 'nodates': get('nodates'),
1613 'nodates': get('nodates'),
1614 'showfunc': get('show_function', 'showfunc'),
1614 'showfunc': get('show_function', 'showfunc'),
1615 'context': get('unified', getter=ui.config),
1615 'context': get('unified', getter=ui.config),
1616 }
1616 }
1617
1617
1618 if git:
1618 if git:
1619 buildopts['git'] = get('git')
1619 buildopts['git'] = get('git')
1620 if whitespace:
1620 if whitespace:
1621 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
1621 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
1622 buildopts['ignorewsamount'] = get('ignore_space_change',
1622 buildopts['ignorewsamount'] = get('ignore_space_change',
1623 'ignorewsamount')
1623 'ignorewsamount')
1624 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
1624 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
1625 'ignoreblanklines')
1625 'ignoreblanklines')
1626 if formatchanging:
1626 if formatchanging:
1627 buildopts['text'] = opts and opts.get('text')
1627 buildopts['text'] = opts and opts.get('text')
1628 buildopts['nobinary'] = get('nobinary')
1628 buildopts['nobinary'] = get('nobinary')
1629 buildopts['noprefix'] = get('noprefix', forceplain=False)
1629 buildopts['noprefix'] = get('noprefix', forceplain=False)
1630
1630
1631 return mdiff.diffopts(**buildopts)
1631 return mdiff.diffopts(**buildopts)
1632
1632
1633 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1633 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1634 losedatafn=None, prefix=''):
1634 losedatafn=None, prefix=''):
1635 '''yields diff of changes to files between two nodes, or node and
1635 '''yields diff of changes to files between two nodes, or node and
1636 working directory.
1636 working directory.
1637
1637
1638 if node1 is None, use first dirstate parent instead.
1638 if node1 is None, use first dirstate parent instead.
1639 if node2 is None, compare node1 with working directory.
1639 if node2 is None, compare node1 with working directory.
1640
1640
1641 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1641 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1642 every time some change cannot be represented with the current
1642 every time some change cannot be represented with the current
1643 patch format. Return False to upgrade to git patch format, True to
1643 patch format. Return False to upgrade to git patch format, True to
1644 accept the loss or raise an exception to abort the diff. It is
1644 accept the loss or raise an exception to abort the diff. It is
1645 called with the name of current file being diffed as 'fn'. If set
1645 called with the name of current file being diffed as 'fn'. If set
1646 to None, patches will always be upgraded to git format when
1646 to None, patches will always be upgraded to git format when
1647 necessary.
1647 necessary.
1648
1648
1649 prefix is a filename prefix that is prepended to all filenames on
1649 prefix is a filename prefix that is prepended to all filenames on
1650 display (used for subrepos).
1650 display (used for subrepos).
1651 '''
1651 '''
1652
1652
1653 if opts is None:
1653 if opts is None:
1654 opts = mdiff.defaultopts
1654 opts = mdiff.defaultopts
1655
1655
1656 if not node1 and not node2:
1656 if not node1 and not node2:
1657 node1 = repo.dirstate.p1()
1657 node1 = repo.dirstate.p1()
1658
1658
1659 def lrugetfilectx():
1659 def lrugetfilectx():
1660 cache = {}
1660 cache = {}
1661 order = util.deque()
1661 order = util.deque()
1662 def getfilectx(f, ctx):
1662 def getfilectx(f, ctx):
1663 fctx = ctx.filectx(f, filelog=cache.get(f))
1663 fctx = ctx.filectx(f, filelog=cache.get(f))
1664 if f not in cache:
1664 if f not in cache:
1665 if len(cache) > 20:
1665 if len(cache) > 20:
1666 del cache[order.popleft()]
1666 del cache[order.popleft()]
1667 cache[f] = fctx.filelog()
1667 cache[f] = fctx.filelog()
1668 else:
1668 else:
1669 order.remove(f)
1669 order.remove(f)
1670 order.append(f)
1670 order.append(f)
1671 return fctx
1671 return fctx
1672 return getfilectx
1672 return getfilectx
1673 getfilectx = lrugetfilectx()
1673 getfilectx = lrugetfilectx()
1674
1674
1675 ctx1 = repo[node1]
1675 ctx1 = repo[node1]
1676 ctx2 = repo[node2]
1676 ctx2 = repo[node2]
1677
1677
1678 if not changes:
1678 if not changes:
1679 changes = repo.status(ctx1, ctx2, match=match)
1679 changes = repo.status(ctx1, ctx2, match=match)
1680 modified, added, removed = changes[:3]
1680 modified, added, removed = changes[:3]
1681
1681
1682 if not modified and not added and not removed:
1682 if not modified and not added and not removed:
1683 return []
1683 return []
1684
1684
1685 hexfunc = repo.ui.debugflag and hex or short
1685 hexfunc = repo.ui.debugflag and hex or short
1686 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
1686 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
1687
1687
1688 copy = {}
1688 copy = {}
1689 if opts.git or opts.upgrade:
1689 if opts.git or opts.upgrade:
1690 copy = copies.pathcopies(ctx1, ctx2)
1690 copy = copies.pathcopies(ctx1, ctx2)
1691
1691
1692 def difffn(opts, losedata):
1692 def difffn(opts, losedata):
1693 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1693 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1694 copy, getfilectx, opts, losedata, prefix)
1694 copy, getfilectx, opts, losedata, prefix)
1695 if opts.upgrade and not opts.git:
1695 if opts.upgrade and not opts.git:
1696 try:
1696 try:
1697 def losedata(fn):
1697 def losedata(fn):
1698 if not losedatafn or not losedatafn(fn=fn):
1698 if not losedatafn or not losedatafn(fn=fn):
1699 raise GitDiffRequired
1699 raise GitDiffRequired
1700 # Buffer the whole output until we are sure it can be generated
1700 # Buffer the whole output until we are sure it can be generated
1701 return list(difffn(opts.copy(git=False), losedata))
1701 return list(difffn(opts.copy(git=False), losedata))
1702 except GitDiffRequired:
1702 except GitDiffRequired:
1703 return difffn(opts.copy(git=True), None)
1703 return difffn(opts.copy(git=True), None)
1704 else:
1704 else:
1705 return difffn(opts, None)
1705 return difffn(opts, None)
1706
1706
1707 def difflabel(func, *args, **kw):
1707 def difflabel(func, *args, **kw):
1708 '''yields 2-tuples of (output, label) based on the output of func()'''
1708 '''yields 2-tuples of (output, label) based on the output of func()'''
1709 headprefixes = [('diff', 'diff.diffline'),
1709 headprefixes = [('diff', 'diff.diffline'),
1710 ('copy', 'diff.extended'),
1710 ('copy', 'diff.extended'),
1711 ('rename', 'diff.extended'),
1711 ('rename', 'diff.extended'),
1712 ('old', 'diff.extended'),
1712 ('old', 'diff.extended'),
1713 ('new', 'diff.extended'),
1713 ('new', 'diff.extended'),
1714 ('deleted', 'diff.extended'),
1714 ('deleted', 'diff.extended'),
1715 ('---', 'diff.file_a'),
1715 ('---', 'diff.file_a'),
1716 ('+++', 'diff.file_b')]
1716 ('+++', 'diff.file_b')]
1717 textprefixes = [('@', 'diff.hunk'),
1717 textprefixes = [('@', 'diff.hunk'),
1718 ('-', 'diff.deleted'),
1718 ('-', 'diff.deleted'),
1719 ('+', 'diff.inserted')]
1719 ('+', 'diff.inserted')]
1720 head = False
1720 head = False
1721 for chunk in func(*args, **kw):
1721 for chunk in func(*args, **kw):
1722 lines = chunk.split('\n')
1722 lines = chunk.split('\n')
1723 for i, line in enumerate(lines):
1723 for i, line in enumerate(lines):
1724 if i != 0:
1724 if i != 0:
1725 yield ('\n', '')
1725 yield ('\n', '')
1726 if head:
1726 if head:
1727 if line.startswith('@'):
1727 if line.startswith('@'):
1728 head = False
1728 head = False
1729 else:
1729 else:
1730 if line and line[0] not in ' +-@\\':
1730 if line and line[0] not in ' +-@\\':
1731 head = True
1731 head = True
1732 stripline = line
1732 stripline = line
1733 diffline = False
1733 diffline = False
1734 if not head and line and line[0] in '+-':
1734 if not head and line and line[0] in '+-':
1735 # highlight tabs and trailing whitespace, but only in
1735 # highlight tabs and trailing whitespace, but only in
1736 # changed lines
1736 # changed lines
1737 stripline = line.rstrip()
1737 stripline = line.rstrip()
1738 diffline = True
1738 diffline = True
1739
1739
1740 prefixes = textprefixes
1740 prefixes = textprefixes
1741 if head:
1741 if head:
1742 prefixes = headprefixes
1742 prefixes = headprefixes
1743 for prefix, label in prefixes:
1743 for prefix, label in prefixes:
1744 if stripline.startswith(prefix):
1744 if stripline.startswith(prefix):
1745 if diffline:
1745 if diffline:
1746 for token in tabsplitter.findall(stripline):
1746 for token in tabsplitter.findall(stripline):
1747 if '\t' == token[0]:
1747 if '\t' == token[0]:
1748 yield (token, 'diff.tab')
1748 yield (token, 'diff.tab')
1749 else:
1749 else:
1750 yield (token, label)
1750 yield (token, label)
1751 else:
1751 else:
1752 yield (stripline, label)
1752 yield (stripline, label)
1753 break
1753 break
1754 else:
1754 else:
1755 yield (line, '')
1755 yield (line, '')
1756 if line != stripline:
1756 if line != stripline:
1757 yield (line[len(stripline):], 'diff.trailingwhitespace')
1757 yield (line[len(stripline):], 'diff.trailingwhitespace')
1758
1758
1759 def diffui(*args, **kw):
1759 def diffui(*args, **kw):
1760 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1760 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1761 return difflabel(diff, *args, **kw)
1761 return difflabel(diff, *args, **kw)
1762
1762
1763 def _filepairs(ctx1, modified, added, removed, copy, opts):
1763 def _filepairs(ctx1, modified, added, removed, copy, opts):
1764 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
1764 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
1765 before and f2 is the the name after. For added files, f1 will be None,
1765 before and f2 is the the name after. For added files, f1 will be None,
1766 and for removed files, f2 will be None. copyop may be set to None, 'copy'
1766 and for removed files, f2 will be None. copyop may be set to None, 'copy'
1767 or 'rename' (the latter two only if opts.git is set).'''
1767 or 'rename' (the latter two only if opts.git is set).'''
1768 gone = set()
1768 gone = set()
1769
1769
1770 copyto = dict([(v, k) for k, v in copy.items()])
1770 copyto = dict([(v, k) for k, v in copy.items()])
1771
1771
1772 addedset, removedset = set(added), set(removed)
1772 addedset, removedset = set(added), set(removed)
1773 # Fix up added, since merged-in additions appear as
1773 # Fix up added, since merged-in additions appear as
1774 # modifications during merges
1774 # modifications during merges
1775 for f in modified:
1775 for f in modified:
1776 if f not in ctx1:
1776 if f not in ctx1:
1777 addedset.add(f)
1777 addedset.add(f)
1778
1778
1779 for f in sorted(modified + added + removed):
1779 for f in sorted(modified + added + removed):
1780 copyop = None
1780 copyop = None
1781 f1, f2 = f, f
1781 f1, f2 = f, f
1782 if f in addedset:
1782 if f in addedset:
1783 f1 = None
1783 f1 = None
1784 if f in copy:
1784 if f in copy:
1785 if opts.git:
1785 if opts.git:
1786 f1 = copy[f]
1786 f1 = copy[f]
1787 if f1 in removedset and f1 not in gone:
1787 if f1 in removedset and f1 not in gone:
1788 copyop = 'rename'
1788 copyop = 'rename'
1789 gone.add(f1)
1789 gone.add(f1)
1790 else:
1790 else:
1791 copyop = 'copy'
1791 copyop = 'copy'
1792 elif f in removedset:
1792 elif f in removedset:
1793 f2 = None
1793 f2 = None
1794 if opts.git:
1794 if opts.git:
1795 # have we already reported a copy above?
1795 # have we already reported a copy above?
1796 if (f in copyto and copyto[f] in addedset
1796 if (f in copyto and copyto[f] in addedset
1797 and copy[copyto[f]] == f):
1797 and copy[copyto[f]] == f):
1798 continue
1798 continue
1799 yield f1, f2, copyop
1799 yield f1, f2, copyop
1800
1800
1801 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1801 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1802 copy, getfilectx, opts, losedatafn, prefix):
1802 copy, getfilectx, opts, losedatafn, prefix):
1803
1803
1804 def gitindex(text):
1804 def gitindex(text):
1805 if not text:
1805 if not text:
1806 text = ""
1806 text = ""
1807 l = len(text)
1807 l = len(text)
1808 s = util.sha1('blob %d\0' % l)
1808 s = util.sha1('blob %d\0' % l)
1809 s.update(text)
1809 s.update(text)
1810 return s.hexdigest()
1810 return s.hexdigest()
1811
1811
1812 if opts.noprefix:
1812 if opts.noprefix:
1813 aprefix = bprefix = ''
1813 aprefix = bprefix = ''
1814 else:
1814 else:
1815 aprefix = 'a/'
1815 aprefix = 'a/'
1816 bprefix = 'b/'
1816 bprefix = 'b/'
1817
1817
1818 def diffline(f, revs):
1818 def diffline(f, revs):
1819 revinfo = ' '.join(["-r %s" % rev for rev in revs])
1819 revinfo = ' '.join(["-r %s" % rev for rev in revs])
1820 return 'diff %s %s' % (revinfo, f)
1820 return 'diff %s %s' % (revinfo, f)
1821
1821
1822 date1 = util.datestr(ctx1.date())
1822 date1 = util.datestr(ctx1.date())
1823 date2 = util.datestr(ctx2.date())
1823 date2 = util.datestr(ctx2.date())
1824
1824
1825 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1825 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1826
1826
1827 for f1, f2, copyop in _filepairs(
1827 for f1, f2, copyop in _filepairs(
1828 ctx1, modified, added, removed, copy, opts):
1828 ctx1, modified, added, removed, copy, opts):
1829 content1 = None
1829 content1 = None
1830 content2 = None
1830 content2 = None
1831 flag1 = None
1831 flag1 = None
1832 flag2 = None
1832 flag2 = None
1833 if f1:
1833 if f1:
1834 content1 = getfilectx(f1, ctx1).data()
1834 content1 = getfilectx(f1, ctx1).data()
1835 if opts.git or losedatafn:
1835 if opts.git or losedatafn:
1836 flag1 = ctx1.flags(f1)
1836 flag1 = ctx1.flags(f1)
1837 if f2:
1837 if f2:
1838 content2 = getfilectx(f2, ctx2).data()
1838 content2 = getfilectx(f2, ctx2).data()
1839 if opts.git or losedatafn:
1839 if opts.git or losedatafn:
1840 flag2 = ctx2.flags(f2)
1840 flag2 = ctx2.flags(f2)
1841 binary = False
1841 binary = False
1842 if opts.git or losedatafn:
1842 if opts.git or losedatafn:
1843 binary = util.binary(content1) or util.binary(content2)
1843 binary = util.binary(content1) or util.binary(content2)
1844
1844
1845 if losedatafn and not opts.git:
1845 if losedatafn and not opts.git:
1846 if (binary or
1846 if (binary or
1847 # copy/rename
1847 # copy/rename
1848 f2 in copy or
1848 f2 in copy or
1849 # empty file creation
1849 # empty file creation
1850 (not f1 and not content2) or
1850 (not f1 and not content2) or
1851 # empty file deletion
1851 # empty file deletion
1852 (not content1 and not f2) or
1852 (not content1 and not f2) or
1853 # create with flags
1853 # create with flags
1854 (not f1 and flag2) or
1854 (not f1 and flag2) or
1855 # change flags
1855 # change flags
1856 (f1 and f2 and flag1 != flag2)):
1856 (f1 and f2 and flag1 != flag2)):
1857 losedatafn(f2 or f1)
1857 losedatafn(f2 or f1)
1858
1858
1859 path1 = posixpath.join(prefix, f1 or f2)
1859 path1 = posixpath.join(prefix, f1 or f2)
1860 path2 = posixpath.join(prefix, f2 or f1)
1860 path2 = posixpath.join(prefix, f2 or f1)
1861 header = []
1861 header = []
1862 if opts.git:
1862 if opts.git:
1863 header.append('diff --git %s%s %s%s' %
1863 header.append('diff --git %s%s %s%s' %
1864 (aprefix, path1, bprefix, path2))
1864 (aprefix, path1, bprefix, path2))
1865 if not f1: # added
1865 if not f1: # added
1866 header.append('new file mode %s' % gitmode[flag2])
1866 header.append('new file mode %s' % gitmode[flag2])
1867 elif not f2: # removed
1867 elif not f2: # removed
1868 header.append('deleted file mode %s' % gitmode[flag1])
1868 header.append('deleted file mode %s' % gitmode[flag1])
1869 else: # modified/copied/renamed
1869 else: # modified/copied/renamed
1870 mode1, mode2 = gitmode[flag1], gitmode[flag2]
1870 mode1, mode2 = gitmode[flag1], gitmode[flag2]
1871 if mode1 != mode2:
1871 if mode1 != mode2:
1872 header.append('old mode %s' % mode1)
1872 header.append('old mode %s' % mode1)
1873 header.append('new mode %s' % mode2)
1873 header.append('new mode %s' % mode2)
1874 if copyop is not None:
1874 if copyop is not None:
1875 header.append('%s from %s' % (copyop, path1))
1875 header.append('%s from %s' % (copyop, path1))
1876 header.append('%s to %s' % (copyop, path2))
1876 header.append('%s to %s' % (copyop, path2))
1877 elif revs and not repo.ui.quiet:
1877 elif revs and not repo.ui.quiet:
1878 header.append(diffline(path1, revs))
1878 header.append(diffline(path1, revs))
1879
1879
1880 if binary and opts.git and not opts.nobinary:
1880 if binary and opts.git and not opts.nobinary:
1881 text = mdiff.b85diff(content1, content2)
1881 text = mdiff.b85diff(content1, content2)
1882 if text:
1882 if text:
1883 header.append('index %s..%s' %
1883 header.append('index %s..%s' %
1884 (gitindex(content1), gitindex(content2)))
1884 (gitindex(content1), gitindex(content2)))
1885 else:
1885 else:
1886 text = mdiff.unidiff(content1, date1,
1886 text = mdiff.unidiff(content1, date1,
1887 content2, date2,
1887 content2, date2,
1888 path1, path2, opts=opts)
1888 path1, path2, opts=opts)
1889 if header and (text or len(header) > 1):
1889 if header and (text or len(header) > 1):
1890 yield '\n'.join(header) + '\n'
1890 yield '\n'.join(header) + '\n'
1891 if text:
1891 if text:
1892 yield text
1892 yield text
1893
1893
1894 def diffstatsum(stats):
1894 def diffstatsum(stats):
1895 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1895 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1896 for f, a, r, b in stats:
1896 for f, a, r, b in stats:
1897 maxfile = max(maxfile, encoding.colwidth(f))
1897 maxfile = max(maxfile, encoding.colwidth(f))
1898 maxtotal = max(maxtotal, a + r)
1898 maxtotal = max(maxtotal, a + r)
1899 addtotal += a
1899 addtotal += a
1900 removetotal += r
1900 removetotal += r
1901 binary = binary or b
1901 binary = binary or b
1902
1902
1903 return maxfile, maxtotal, addtotal, removetotal, binary
1903 return maxfile, maxtotal, addtotal, removetotal, binary
1904
1904
1905 def diffstatdata(lines):
1905 def diffstatdata(lines):
1906 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1906 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1907
1907
1908 results = []
1908 results = []
1909 filename, adds, removes, isbinary = None, 0, 0, False
1909 filename, adds, removes, isbinary = None, 0, 0, False
1910
1910
1911 def addresult():
1911 def addresult():
1912 if filename:
1912 if filename:
1913 results.append((filename, adds, removes, isbinary))
1913 results.append((filename, adds, removes, isbinary))
1914
1914
1915 for line in lines:
1915 for line in lines:
1916 if line.startswith('diff'):
1916 if line.startswith('diff'):
1917 addresult()
1917 addresult()
1918 # set numbers to 0 anyway when starting new file
1918 # set numbers to 0 anyway when starting new file
1919 adds, removes, isbinary = 0, 0, False
1919 adds, removes, isbinary = 0, 0, False
1920 if line.startswith('diff --git a/'):
1920 if line.startswith('diff --git a/'):
1921 filename = gitre.search(line).group(2)
1921 filename = gitre.search(line).group(2)
1922 elif line.startswith('diff -r'):
1922 elif line.startswith('diff -r'):
1923 # format: "diff -r ... -r ... filename"
1923 # format: "diff -r ... -r ... filename"
1924 filename = diffre.search(line).group(1)
1924 filename = diffre.search(line).group(1)
1925 elif line.startswith('+') and not line.startswith('+++ '):
1925 elif line.startswith('+') and not line.startswith('+++ '):
1926 adds += 1
1926 adds += 1
1927 elif line.startswith('-') and not line.startswith('--- '):
1927 elif line.startswith('-') and not line.startswith('--- '):
1928 removes += 1
1928 removes += 1
1929 elif (line.startswith('GIT binary patch') or
1929 elif (line.startswith('GIT binary patch') or
1930 line.startswith('Binary file')):
1930 line.startswith('Binary file')):
1931 isbinary = True
1931 isbinary = True
1932 addresult()
1932 addresult()
1933 return results
1933 return results
1934
1934
1935 def diffstat(lines, width=80, git=False):
1935 def diffstat(lines, width=80, git=False):
1936 output = []
1936 output = []
1937 stats = diffstatdata(lines)
1937 stats = diffstatdata(lines)
1938 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1938 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1939
1939
1940 countwidth = len(str(maxtotal))
1940 countwidth = len(str(maxtotal))
1941 if hasbinary and countwidth < 3:
1941 if hasbinary and countwidth < 3:
1942 countwidth = 3
1942 countwidth = 3
1943 graphwidth = width - countwidth - maxname - 6
1943 graphwidth = width - countwidth - maxname - 6
1944 if graphwidth < 10:
1944 if graphwidth < 10:
1945 graphwidth = 10
1945 graphwidth = 10
1946
1946
1947 def scale(i):
1947 def scale(i):
1948 if maxtotal <= graphwidth:
1948 if maxtotal <= graphwidth:
1949 return i
1949 return i
1950 # If diffstat runs out of room it doesn't print anything,
1950 # If diffstat runs out of room it doesn't print anything,
1951 # which isn't very useful, so always print at least one + or -
1951 # which isn't very useful, so always print at least one + or -
1952 # if there were at least some changes.
1952 # if there were at least some changes.
1953 return max(i * graphwidth // maxtotal, int(bool(i)))
1953 return max(i * graphwidth // maxtotal, int(bool(i)))
1954
1954
1955 for filename, adds, removes, isbinary in stats:
1955 for filename, adds, removes, isbinary in stats:
1956 if isbinary:
1956 if isbinary:
1957 count = 'Bin'
1957 count = 'Bin'
1958 else:
1958 else:
1959 count = adds + removes
1959 count = adds + removes
1960 pluses = '+' * scale(adds)
1960 pluses = '+' * scale(adds)
1961 minuses = '-' * scale(removes)
1961 minuses = '-' * scale(removes)
1962 output.append(' %s%s | %*s %s%s\n' %
1962 output.append(' %s%s | %*s %s%s\n' %
1963 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1963 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1964 countwidth, count, pluses, minuses))
1964 countwidth, count, pluses, minuses))
1965
1965
1966 if stats:
1966 if stats:
1967 output.append(_(' %d files changed, %d insertions(+), '
1967 output.append(_(' %d files changed, %d insertions(+), '
1968 '%d deletions(-)\n')
1968 '%d deletions(-)\n')
1969 % (len(stats), totaladds, totalremoves))
1969 % (len(stats), totaladds, totalremoves))
1970
1970
1971 return ''.join(output)
1971 return ''.join(output)
1972
1972
1973 def diffstatui(*args, **kw):
1973 def diffstatui(*args, **kw):
1974 '''like diffstat(), but yields 2-tuples of (output, label) for
1974 '''like diffstat(), but yields 2-tuples of (output, label) for
1975 ui.write()
1975 ui.write()
1976 '''
1976 '''
1977
1977
1978 for line in diffstat(*args, **kw).splitlines():
1978 for line in diffstat(*args, **kw).splitlines():
1979 if line and line[-1] in '+-':
1979 if line and line[-1] in '+-':
1980 name, graph = line.rsplit(' ', 1)
1980 name, graph = line.rsplit(' ', 1)
1981 yield (name + ' ', '')
1981 yield (name + ' ', '')
1982 m = re.search(r'\++', graph)
1982 m = re.search(r'\++', graph)
1983 if m:
1983 if m:
1984 yield (m.group(0), 'diffstat.inserted')
1984 yield (m.group(0), 'diffstat.inserted')
1985 m = re.search(r'-+', graph)
1985 m = re.search(r'-+', graph)
1986 if m:
1986 if m:
1987 yield (m.group(0), 'diffstat.deleted')
1987 yield (m.group(0), 'diffstat.deleted')
1988 else:
1988 else:
1989 yield (line, '')
1989 yield (line, '')
1990 yield ('\n', '')
1990 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now