##// END OF EJS Templates
record: move hunk class from record to patch...
Laurent Charignon -
r24263:a45d1c51 default
parent child Browse files
Show More
@@ -1,612 +1,567 b''
1 # record.py
1 # record.py
2 #
2 #
3 # Copyright 2007 Bryan O'Sullivan <bos@serpentine.com>
3 # Copyright 2007 Bryan O'Sullivan <bos@serpentine.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''commands to interactively select changes for commit/qrefresh'''
8 '''commands to interactively select changes for commit/qrefresh'''
9
9
10 from mercurial.i18n import _
10 from mercurial.i18n import _
11 from mercurial import cmdutil, commands, extensions, hg, patch
11 from mercurial import cmdutil, commands, extensions, hg, patch
12 from mercurial import util
12 from mercurial import util
13 import copy, cStringIO, errno, os, re, shutil, tempfile
13 import copy, cStringIO, errno, os, re, shutil, tempfile
14
14
15 cmdtable = {}
15 cmdtable = {}
16 command = cmdutil.command(cmdtable)
16 command = cmdutil.command(cmdtable)
17 testedwith = 'internal'
17 testedwith = 'internal'
18
18
19 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
19 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
20
20
21 def scanpatch(fp):
21 def scanpatch(fp):
22 """like patch.iterhunks, but yield different events
22 """like patch.iterhunks, but yield different events
23
23
24 - ('file', [header_lines + fromfile + tofile])
24 - ('file', [header_lines + fromfile + tofile])
25 - ('context', [context_lines])
25 - ('context', [context_lines])
26 - ('hunk', [hunk_lines])
26 - ('hunk', [hunk_lines])
27 - ('range', (-start,len, +start,len, proc))
27 - ('range', (-start,len, +start,len, proc))
28 """
28 """
29 lr = patch.linereader(fp)
29 lr = patch.linereader(fp)
30
30
31 def scanwhile(first, p):
31 def scanwhile(first, p):
32 """scan lr while predicate holds"""
32 """scan lr while predicate holds"""
33 lines = [first]
33 lines = [first]
34 while True:
34 while True:
35 line = lr.readline()
35 line = lr.readline()
36 if not line:
36 if not line:
37 break
37 break
38 if p(line):
38 if p(line):
39 lines.append(line)
39 lines.append(line)
40 else:
40 else:
41 lr.push(line)
41 lr.push(line)
42 break
42 break
43 return lines
43 return lines
44
44
45 while True:
45 while True:
46 line = lr.readline()
46 line = lr.readline()
47 if not line:
47 if not line:
48 break
48 break
49 if line.startswith('diff --git a/') or line.startswith('diff -r '):
49 if line.startswith('diff --git a/') or line.startswith('diff -r '):
50 def notheader(line):
50 def notheader(line):
51 s = line.split(None, 1)
51 s = line.split(None, 1)
52 return not s or s[0] not in ('---', 'diff')
52 return not s or s[0] not in ('---', 'diff')
53 header = scanwhile(line, notheader)
53 header = scanwhile(line, notheader)
54 fromfile = lr.readline()
54 fromfile = lr.readline()
55 if fromfile.startswith('---'):
55 if fromfile.startswith('---'):
56 tofile = lr.readline()
56 tofile = lr.readline()
57 header += [fromfile, tofile]
57 header += [fromfile, tofile]
58 else:
58 else:
59 lr.push(fromfile)
59 lr.push(fromfile)
60 yield 'file', header
60 yield 'file', header
61 elif line[0] == ' ':
61 elif line[0] == ' ':
62 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
62 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
63 elif line[0] in '-+':
63 elif line[0] in '-+':
64 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
64 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
65 else:
65 else:
66 m = lines_re.match(line)
66 m = lines_re.match(line)
67 if m:
67 if m:
68 yield 'range', m.groups()
68 yield 'range', m.groups()
69 else:
69 else:
70 yield 'other', line
70 yield 'other', line
71
71
72 class hunk(object):
73 """patch hunk
74
75 XXX shouldn't we merge this with patch.hunk ?
76 """
77 maxcontext = 3
78
79 def __init__(self, header, fromline, toline, proc, before, hunk, after):
80 def trimcontext(number, lines):
81 delta = len(lines) - self.maxcontext
82 if False and delta > 0:
83 return number + delta, lines[:self.maxcontext]
84 return number, lines
85
86 self.header = header
87 self.fromline, self.before = trimcontext(fromline, before)
88 self.toline, self.after = trimcontext(toline, after)
89 self.proc = proc
90 self.hunk = hunk
91 self.added, self.removed = self.countchanges(self.hunk)
92
93 def countchanges(self, hunk):
94 """hunk -> (n+,n-)"""
95 add = len([h for h in hunk if h[0] == '+'])
96 rem = len([h for h in hunk if h[0] == '-'])
97 return add, rem
98
99 def write(self, fp):
100 delta = len(self.before) + len(self.after)
101 if self.after and self.after[-1] == '\\ No newline at end of file\n':
102 delta -= 1
103 fromlen = delta + self.removed
104 tolen = delta + self.added
105 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
106 (self.fromline, fromlen, self.toline, tolen,
107 self.proc and (' ' + self.proc)))
108 fp.write(''.join(self.before + self.hunk + self.after))
109
110 pretty = write
111
112 def filename(self):
113 return self.header.filename()
114
115 def __repr__(self):
116 return '<hunk %r@%d>' % (self.filename(), self.fromline)
117
72
118 def parsepatch(fp):
73 def parsepatch(fp):
119 """patch -> [] of headers -> [] of hunks """
74 """patch -> [] of headers -> [] of hunks """
120 class parser(object):
75 class parser(object):
121 """patch parsing state machine"""
76 """patch parsing state machine"""
122 def __init__(self):
77 def __init__(self):
123 self.fromline = 0
78 self.fromline = 0
124 self.toline = 0
79 self.toline = 0
125 self.proc = ''
80 self.proc = ''
126 self.header = None
81 self.header = None
127 self.context = []
82 self.context = []
128 self.before = []
83 self.before = []
129 self.hunk = []
84 self.hunk = []
130 self.headers = []
85 self.headers = []
131
86
132 def addrange(self, limits):
87 def addrange(self, limits):
133 fromstart, fromend, tostart, toend, proc = limits
88 fromstart, fromend, tostart, toend, proc = limits
134 self.fromline = int(fromstart)
89 self.fromline = int(fromstart)
135 self.toline = int(tostart)
90 self.toline = int(tostart)
136 self.proc = proc
91 self.proc = proc
137
92
138 def addcontext(self, context):
93 def addcontext(self, context):
139 if self.hunk:
94 if self.hunk:
140 h = hunk(self.header, self.fromline, self.toline, self.proc,
95 h = patch.recordhunk(self.header, self.fromline, self.toline,
141 self.before, self.hunk, context)
96 self.proc, self.before, self.hunk, context)
142 self.header.hunks.append(h)
97 self.header.hunks.append(h)
143 self.fromline += len(self.before) + h.removed
98 self.fromline += len(self.before) + h.removed
144 self.toline += len(self.before) + h.added
99 self.toline += len(self.before) + h.added
145 self.before = []
100 self.before = []
146 self.hunk = []
101 self.hunk = []
147 self.proc = ''
102 self.proc = ''
148 self.context = context
103 self.context = context
149
104
150 def addhunk(self, hunk):
105 def addhunk(self, hunk):
151 if self.context:
106 if self.context:
152 self.before = self.context
107 self.before = self.context
153 self.context = []
108 self.context = []
154 self.hunk = hunk
109 self.hunk = hunk
155
110
156 def newfile(self, hdr):
111 def newfile(self, hdr):
157 self.addcontext([])
112 self.addcontext([])
158 h = patch.header(hdr)
113 h = patch.header(hdr)
159 self.headers.append(h)
114 self.headers.append(h)
160 self.header = h
115 self.header = h
161
116
162 def addother(self, line):
117 def addother(self, line):
163 pass # 'other' lines are ignored
118 pass # 'other' lines are ignored
164
119
165 def finished(self):
120 def finished(self):
166 self.addcontext([])
121 self.addcontext([])
167 return self.headers
122 return self.headers
168
123
169 transitions = {
124 transitions = {
170 'file': {'context': addcontext,
125 'file': {'context': addcontext,
171 'file': newfile,
126 'file': newfile,
172 'hunk': addhunk,
127 'hunk': addhunk,
173 'range': addrange},
128 'range': addrange},
174 'context': {'file': newfile,
129 'context': {'file': newfile,
175 'hunk': addhunk,
130 'hunk': addhunk,
176 'range': addrange,
131 'range': addrange,
177 'other': addother},
132 'other': addother},
178 'hunk': {'context': addcontext,
133 'hunk': {'context': addcontext,
179 'file': newfile,
134 'file': newfile,
180 'range': addrange},
135 'range': addrange},
181 'range': {'context': addcontext,
136 'range': {'context': addcontext,
182 'hunk': addhunk},
137 'hunk': addhunk},
183 'other': {'other': addother},
138 'other': {'other': addother},
184 }
139 }
185
140
186 p = parser()
141 p = parser()
187
142
188 state = 'context'
143 state = 'context'
189 for newstate, data in scanpatch(fp):
144 for newstate, data in scanpatch(fp):
190 try:
145 try:
191 p.transitions[state][newstate](p, data)
146 p.transitions[state][newstate](p, data)
192 except KeyError:
147 except KeyError:
193 raise patch.PatchError('unhandled transition: %s -> %s' %
148 raise patch.PatchError('unhandled transition: %s -> %s' %
194 (state, newstate))
149 (state, newstate))
195 state = newstate
150 state = newstate
196 return p.finished()
151 return p.finished()
197
152
198 def filterpatch(ui, headers):
153 def filterpatch(ui, headers):
199 """Interactively filter patch chunks into applied-only chunks"""
154 """Interactively filter patch chunks into applied-only chunks"""
200
155
201 def prompt(skipfile, skipall, query, chunk):
156 def prompt(skipfile, skipall, query, chunk):
202 """prompt query, and process base inputs
157 """prompt query, and process base inputs
203
158
204 - y/n for the rest of file
159 - y/n for the rest of file
205 - y/n for the rest
160 - y/n for the rest
206 - ? (help)
161 - ? (help)
207 - q (quit)
162 - q (quit)
208
163
209 Return True/False and possibly updated skipfile and skipall.
164 Return True/False and possibly updated skipfile and skipall.
210 """
165 """
211 newpatches = None
166 newpatches = None
212 if skipall is not None:
167 if skipall is not None:
213 return skipall, skipfile, skipall, newpatches
168 return skipall, skipfile, skipall, newpatches
214 if skipfile is not None:
169 if skipfile is not None:
215 return skipfile, skipfile, skipall, newpatches
170 return skipfile, skipfile, skipall, newpatches
216 while True:
171 while True:
217 resps = _('[Ynesfdaq?]'
172 resps = _('[Ynesfdaq?]'
218 '$$ &Yes, record this change'
173 '$$ &Yes, record this change'
219 '$$ &No, skip this change'
174 '$$ &No, skip this change'
220 '$$ &Edit this change manually'
175 '$$ &Edit this change manually'
221 '$$ &Skip remaining changes to this file'
176 '$$ &Skip remaining changes to this file'
222 '$$ Record remaining changes to this &file'
177 '$$ Record remaining changes to this &file'
223 '$$ &Done, skip remaining changes and files'
178 '$$ &Done, skip remaining changes and files'
224 '$$ Record &all changes to all remaining files'
179 '$$ Record &all changes to all remaining files'
225 '$$ &Quit, recording no changes'
180 '$$ &Quit, recording no changes'
226 '$$ &? (display help)')
181 '$$ &? (display help)')
227 r = ui.promptchoice("%s %s" % (query, resps))
182 r = ui.promptchoice("%s %s" % (query, resps))
228 ui.write("\n")
183 ui.write("\n")
229 if r == 8: # ?
184 if r == 8: # ?
230 for c, t in ui.extractchoices(resps)[1]:
185 for c, t in ui.extractchoices(resps)[1]:
231 ui.write('%s - %s\n' % (c, t.lower()))
186 ui.write('%s - %s\n' % (c, t.lower()))
232 continue
187 continue
233 elif r == 0: # yes
188 elif r == 0: # yes
234 ret = True
189 ret = True
235 elif r == 1: # no
190 elif r == 1: # no
236 ret = False
191 ret = False
237 elif r == 2: # Edit patch
192 elif r == 2: # Edit patch
238 if chunk is None:
193 if chunk is None:
239 ui.write(_('cannot edit patch for whole file'))
194 ui.write(_('cannot edit patch for whole file'))
240 ui.write("\n")
195 ui.write("\n")
241 continue
196 continue
242 if chunk.header.binary():
197 if chunk.header.binary():
243 ui.write(_('cannot edit patch for binary file'))
198 ui.write(_('cannot edit patch for binary file'))
244 ui.write("\n")
199 ui.write("\n")
245 continue
200 continue
246 # Patch comment based on the Git one (based on comment at end of
201 # Patch comment based on the Git one (based on comment at end of
247 # http://mercurial.selenic.com/wiki/RecordExtension)
202 # http://mercurial.selenic.com/wiki/RecordExtension)
248 phelp = '---' + _("""
203 phelp = '---' + _("""
249 To remove '-' lines, make them ' ' lines (context).
204 To remove '-' lines, make them ' ' lines (context).
250 To remove '+' lines, delete them.
205 To remove '+' lines, delete them.
251 Lines starting with # will be removed from the patch.
206 Lines starting with # will be removed from the patch.
252
207
253 If the patch applies cleanly, the edited hunk will immediately be
208 If the patch applies cleanly, the edited hunk will immediately be
254 added to the record list. If it does not apply cleanly, a rejects
209 added to the record list. If it does not apply cleanly, a rejects
255 file will be generated: you can use that when you try again. If
210 file will be generated: you can use that when you try again. If
256 all lines of the hunk are removed, then the edit is aborted and
211 all lines of the hunk are removed, then the edit is aborted and
257 the hunk is left unchanged.
212 the hunk is left unchanged.
258 """)
213 """)
259 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
214 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
260 suffix=".diff", text=True)
215 suffix=".diff", text=True)
261 ncpatchfp = None
216 ncpatchfp = None
262 try:
217 try:
263 # Write the initial patch
218 # Write the initial patch
264 f = os.fdopen(patchfd, "w")
219 f = os.fdopen(patchfd, "w")
265 chunk.header.write(f)
220 chunk.header.write(f)
266 chunk.write(f)
221 chunk.write(f)
267 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
222 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
268 f.close()
223 f.close()
269 # Start the editor and wait for it to complete
224 # Start the editor and wait for it to complete
270 editor = ui.geteditor()
225 editor = ui.geteditor()
271 ui.system("%s \"%s\"" % (editor, patchfn),
226 ui.system("%s \"%s\"" % (editor, patchfn),
272 environ={'HGUSER': ui.username()},
227 environ={'HGUSER': ui.username()},
273 onerr=util.Abort, errprefix=_("edit failed"))
228 onerr=util.Abort, errprefix=_("edit failed"))
274 # Remove comment lines
229 # Remove comment lines
275 patchfp = open(patchfn)
230 patchfp = open(patchfn)
276 ncpatchfp = cStringIO.StringIO()
231 ncpatchfp = cStringIO.StringIO()
277 for line in patchfp:
232 for line in patchfp:
278 if not line.startswith('#'):
233 if not line.startswith('#'):
279 ncpatchfp.write(line)
234 ncpatchfp.write(line)
280 patchfp.close()
235 patchfp.close()
281 ncpatchfp.seek(0)
236 ncpatchfp.seek(0)
282 newpatches = parsepatch(ncpatchfp)
237 newpatches = parsepatch(ncpatchfp)
283 finally:
238 finally:
284 os.unlink(patchfn)
239 os.unlink(patchfn)
285 del ncpatchfp
240 del ncpatchfp
286 # Signal that the chunk shouldn't be applied as-is, but
241 # Signal that the chunk shouldn't be applied as-is, but
287 # provide the new patch to be used instead.
242 # provide the new patch to be used instead.
288 ret = False
243 ret = False
289 elif r == 3: # Skip
244 elif r == 3: # Skip
290 ret = skipfile = False
245 ret = skipfile = False
291 elif r == 4: # file (Record remaining)
246 elif r == 4: # file (Record remaining)
292 ret = skipfile = True
247 ret = skipfile = True
293 elif r == 5: # done, skip remaining
248 elif r == 5: # done, skip remaining
294 ret = skipall = False
249 ret = skipall = False
295 elif r == 6: # all
250 elif r == 6: # all
296 ret = skipall = True
251 ret = skipall = True
297 elif r == 7: # quit
252 elif r == 7: # quit
298 raise util.Abort(_('user quit'))
253 raise util.Abort(_('user quit'))
299 return ret, skipfile, skipall, newpatches
254 return ret, skipfile, skipall, newpatches
300
255
301 seen = set()
256 seen = set()
302 applied = {} # 'filename' -> [] of chunks
257 applied = {} # 'filename' -> [] of chunks
303 skipfile, skipall = None, None
258 skipfile, skipall = None, None
304 pos, total = 1, sum(len(h.hunks) for h in headers)
259 pos, total = 1, sum(len(h.hunks) for h in headers)
305 for h in headers:
260 for h in headers:
306 pos += len(h.hunks)
261 pos += len(h.hunks)
307 skipfile = None
262 skipfile = None
308 fixoffset = 0
263 fixoffset = 0
309 hdr = ''.join(h.header)
264 hdr = ''.join(h.header)
310 if hdr in seen:
265 if hdr in seen:
311 continue
266 continue
312 seen.add(hdr)
267 seen.add(hdr)
313 if skipall is None:
268 if skipall is None:
314 h.pretty(ui)
269 h.pretty(ui)
315 msg = (_('examine changes to %s?') %
270 msg = (_('examine changes to %s?') %
316 _(' and ').join("'%s'" % f for f in h.files()))
271 _(' and ').join("'%s'" % f for f in h.files()))
317 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
272 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
318 if not r:
273 if not r:
319 continue
274 continue
320 applied[h.filename()] = [h]
275 applied[h.filename()] = [h]
321 if h.allhunks():
276 if h.allhunks():
322 applied[h.filename()] += h.hunks
277 applied[h.filename()] += h.hunks
323 continue
278 continue
324 for i, chunk in enumerate(h.hunks):
279 for i, chunk in enumerate(h.hunks):
325 if skipfile is None and skipall is None:
280 if skipfile is None and skipall is None:
326 chunk.pretty(ui)
281 chunk.pretty(ui)
327 if total == 1:
282 if total == 1:
328 msg = _("record this change to '%s'?") % chunk.filename()
283 msg = _("record this change to '%s'?") % chunk.filename()
329 else:
284 else:
330 idx = pos - len(h.hunks) + i
285 idx = pos - len(h.hunks) + i
331 msg = _("record change %d/%d to '%s'?") % (idx, total,
286 msg = _("record change %d/%d to '%s'?") % (idx, total,
332 chunk.filename())
287 chunk.filename())
333 r, skipfile, skipall, newpatches = prompt(skipfile,
288 r, skipfile, skipall, newpatches = prompt(skipfile,
334 skipall, msg, chunk)
289 skipall, msg, chunk)
335 if r:
290 if r:
336 if fixoffset:
291 if fixoffset:
337 chunk = copy.copy(chunk)
292 chunk = copy.copy(chunk)
338 chunk.toline += fixoffset
293 chunk.toline += fixoffset
339 applied[chunk.filename()].append(chunk)
294 applied[chunk.filename()].append(chunk)
340 elif newpatches is not None:
295 elif newpatches is not None:
341 for newpatch in newpatches:
296 for newpatch in newpatches:
342 for newhunk in newpatch.hunks:
297 for newhunk in newpatch.hunks:
343 if fixoffset:
298 if fixoffset:
344 newhunk.toline += fixoffset
299 newhunk.toline += fixoffset
345 applied[newhunk.filename()].append(newhunk)
300 applied[newhunk.filename()].append(newhunk)
346 else:
301 else:
347 fixoffset += chunk.removed - chunk.added
302 fixoffset += chunk.removed - chunk.added
348 return sum([h for h in applied.itervalues()
303 return sum([h for h in applied.itervalues()
349 if h[0].special() or len(h) > 1], [])
304 if h[0].special() or len(h) > 1], [])
350
305
351 @command("record",
306 @command("record",
352 # same options as commit + white space diff options
307 # same options as commit + white space diff options
353 commands.table['^commit|ci'][1][:] + commands.diffwsopts,
308 commands.table['^commit|ci'][1][:] + commands.diffwsopts,
354 _('hg record [OPTION]... [FILE]...'))
309 _('hg record [OPTION]... [FILE]...'))
355 def record(ui, repo, *pats, **opts):
310 def record(ui, repo, *pats, **opts):
356 '''interactively select changes to commit
311 '''interactively select changes to commit
357
312
358 If a list of files is omitted, all changes reported by :hg:`status`
313 If a list of files is omitted, all changes reported by :hg:`status`
359 will be candidates for recording.
314 will be candidates for recording.
360
315
361 See :hg:`help dates` for a list of formats valid for -d/--date.
316 See :hg:`help dates` for a list of formats valid for -d/--date.
362
317
363 You will be prompted for whether to record changes to each
318 You will be prompted for whether to record changes to each
364 modified file, and for files with multiple changes, for each
319 modified file, and for files with multiple changes, for each
365 change to use. For each query, the following responses are
320 change to use. For each query, the following responses are
366 possible::
321 possible::
367
322
368 y - record this change
323 y - record this change
369 n - skip this change
324 n - skip this change
370 e - edit this change manually
325 e - edit this change manually
371
326
372 s - skip remaining changes to this file
327 s - skip remaining changes to this file
373 f - record remaining changes to this file
328 f - record remaining changes to this file
374
329
375 d - done, skip remaining changes and files
330 d - done, skip remaining changes and files
376 a - record all changes to all remaining files
331 a - record all changes to all remaining files
377 q - quit, recording no changes
332 q - quit, recording no changes
378
333
379 ? - display help
334 ? - display help
380
335
381 This command is not available when committing a merge.'''
336 This command is not available when committing a merge.'''
382
337
383 dorecord(ui, repo, commands.commit, 'commit', False, *pats, **opts)
338 dorecord(ui, repo, commands.commit, 'commit', False, *pats, **opts)
384
339
385 def qrefresh(origfn, ui, repo, *pats, **opts):
340 def qrefresh(origfn, ui, repo, *pats, **opts):
386 if not opts['interactive']:
341 if not opts['interactive']:
387 return origfn(ui, repo, *pats, **opts)
342 return origfn(ui, repo, *pats, **opts)
388
343
389 mq = extensions.find('mq')
344 mq = extensions.find('mq')
390
345
391 def committomq(ui, repo, *pats, **opts):
346 def committomq(ui, repo, *pats, **opts):
392 # At this point the working copy contains only changes that
347 # At this point the working copy contains only changes that
393 # were accepted. All other changes were reverted.
348 # were accepted. All other changes were reverted.
394 # We can't pass *pats here since qrefresh will undo all other
349 # We can't pass *pats here since qrefresh will undo all other
395 # changed files in the patch that aren't in pats.
350 # changed files in the patch that aren't in pats.
396 mq.refresh(ui, repo, **opts)
351 mq.refresh(ui, repo, **opts)
397
352
398 # backup all changed files
353 # backup all changed files
399 dorecord(ui, repo, committomq, 'qrefresh', True, *pats, **opts)
354 dorecord(ui, repo, committomq, 'qrefresh', True, *pats, **opts)
400
355
401 # This command registration is replaced during uisetup().
356 # This command registration is replaced during uisetup().
402 @command('qrecord',
357 @command('qrecord',
403 [],
358 [],
404 _('hg qrecord [OPTION]... PATCH [FILE]...'),
359 _('hg qrecord [OPTION]... PATCH [FILE]...'),
405 inferrepo=True)
360 inferrepo=True)
406 def qrecord(ui, repo, patch, *pats, **opts):
361 def qrecord(ui, repo, patch, *pats, **opts):
407 '''interactively record a new patch
362 '''interactively record a new patch
408
363
409 See :hg:`help qnew` & :hg:`help record` for more information and
364 See :hg:`help qnew` & :hg:`help record` for more information and
410 usage.
365 usage.
411 '''
366 '''
412
367
413 try:
368 try:
414 mq = extensions.find('mq')
369 mq = extensions.find('mq')
415 except KeyError:
370 except KeyError:
416 raise util.Abort(_("'mq' extension not loaded"))
371 raise util.Abort(_("'mq' extension not loaded"))
417
372
418 repo.mq.checkpatchname(patch)
373 repo.mq.checkpatchname(patch)
419
374
420 def committomq(ui, repo, *pats, **opts):
375 def committomq(ui, repo, *pats, **opts):
421 opts['checkname'] = False
376 opts['checkname'] = False
422 mq.new(ui, repo, patch, *pats, **opts)
377 mq.new(ui, repo, patch, *pats, **opts)
423
378
424 dorecord(ui, repo, committomq, 'qnew', False, *pats, **opts)
379 dorecord(ui, repo, committomq, 'qnew', False, *pats, **opts)
425
380
426 def qnew(origfn, ui, repo, patch, *args, **opts):
381 def qnew(origfn, ui, repo, patch, *args, **opts):
427 if opts['interactive']:
382 if opts['interactive']:
428 return qrecord(ui, repo, patch, *args, **opts)
383 return qrecord(ui, repo, patch, *args, **opts)
429 return origfn(ui, repo, patch, *args, **opts)
384 return origfn(ui, repo, patch, *args, **opts)
430
385
431 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall, *pats, **opts):
386 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall, *pats, **opts):
432 if not ui.interactive():
387 if not ui.interactive():
433 raise util.Abort(_('running non-interactively, use %s instead') %
388 raise util.Abort(_('running non-interactively, use %s instead') %
434 cmdsuggest)
389 cmdsuggest)
435
390
436 # make sure username is set before going interactive
391 # make sure username is set before going interactive
437 if not opts.get('user'):
392 if not opts.get('user'):
438 ui.username() # raise exception, username not provided
393 ui.username() # raise exception, username not provided
439
394
440 def recordfunc(ui, repo, message, match, opts):
395 def recordfunc(ui, repo, message, match, opts):
441 """This is generic record driver.
396 """This is generic record driver.
442
397
443 Its job is to interactively filter local changes, and
398 Its job is to interactively filter local changes, and
444 accordingly prepare working directory into a state in which the
399 accordingly prepare working directory into a state in which the
445 job can be delegated to a non-interactive commit command such as
400 job can be delegated to a non-interactive commit command such as
446 'commit' or 'qrefresh'.
401 'commit' or 'qrefresh'.
447
402
448 After the actual job is done by non-interactive command, the
403 After the actual job is done by non-interactive command, the
449 working directory is restored to its original state.
404 working directory is restored to its original state.
450
405
451 In the end we'll record interesting changes, and everything else
406 In the end we'll record interesting changes, and everything else
452 will be left in place, so the user can continue working.
407 will be left in place, so the user can continue working.
453 """
408 """
454
409
455 cmdutil.checkunfinished(repo, commit=True)
410 cmdutil.checkunfinished(repo, commit=True)
456 merge = len(repo[None].parents()) > 1
411 merge = len(repo[None].parents()) > 1
457 if merge:
412 if merge:
458 raise util.Abort(_('cannot partially commit a merge '
413 raise util.Abort(_('cannot partially commit a merge '
459 '(use "hg commit" instead)'))
414 '(use "hg commit" instead)'))
460
415
461 status = repo.status(match=match)
416 status = repo.status(match=match)
462 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
417 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
463 diffopts.nodates = True
418 diffopts.nodates = True
464 diffopts.git = True
419 diffopts.git = True
465 originalchunks = patch.diff(repo, changes=status, opts=diffopts)
420 originalchunks = patch.diff(repo, changes=status, opts=diffopts)
466 fp = cStringIO.StringIO()
421 fp = cStringIO.StringIO()
467 fp.write(''.join(originalchunks))
422 fp.write(''.join(originalchunks))
468 fp.seek(0)
423 fp.seek(0)
469
424
470 # 1. filter patch, so we have intending-to apply subset of it
425 # 1. filter patch, so we have intending-to apply subset of it
471 try:
426 try:
472 chunks = filterpatch(ui, parsepatch(fp))
427 chunks = filterpatch(ui, parsepatch(fp))
473 except patch.PatchError, err:
428 except patch.PatchError, err:
474 raise util.Abort(_('error parsing patch: %s') % err)
429 raise util.Abort(_('error parsing patch: %s') % err)
475
430
476 del fp
431 del fp
477
432
478 contenders = set()
433 contenders = set()
479 for h in chunks:
434 for h in chunks:
480 try:
435 try:
481 contenders.update(set(h.files()))
436 contenders.update(set(h.files()))
482 except AttributeError:
437 except AttributeError:
483 pass
438 pass
484
439
485 changed = status.modified + status.added + status.removed
440 changed = status.modified + status.added + status.removed
486 newfiles = [f for f in changed if f in contenders]
441 newfiles = [f for f in changed if f in contenders]
487 if not newfiles:
442 if not newfiles:
488 ui.status(_('no changes to record\n'))
443 ui.status(_('no changes to record\n'))
489 return 0
444 return 0
490
445
491 newandmodifiedfiles = set()
446 newandmodifiedfiles = set()
492 for h in chunks:
447 for h in chunks:
493 ishunk = isinstance(h, hunk)
448 ishunk = isinstance(h, patch.recordhunk)
494 isnew = h.filename() in status.added
449 isnew = h.filename() in status.added
495 if ishunk and isnew and not h in originalchunks:
450 if ishunk and isnew and not h in originalchunks:
496 newandmodifiedfiles.add(h.filename())
451 newandmodifiedfiles.add(h.filename())
497
452
498 modified = set(status.modified)
453 modified = set(status.modified)
499
454
500 # 2. backup changed files, so we can restore them in the end
455 # 2. backup changed files, so we can restore them in the end
501
456
502 if backupall:
457 if backupall:
503 tobackup = changed
458 tobackup = changed
504 else:
459 else:
505 tobackup = [f for f in newfiles
460 tobackup = [f for f in newfiles
506 if f in modified or f in newandmodifiedfiles]
461 if f in modified or f in newandmodifiedfiles]
507
462
508 backups = {}
463 backups = {}
509 if tobackup:
464 if tobackup:
510 backupdir = repo.join('record-backups')
465 backupdir = repo.join('record-backups')
511 try:
466 try:
512 os.mkdir(backupdir)
467 os.mkdir(backupdir)
513 except OSError, err:
468 except OSError, err:
514 if err.errno != errno.EEXIST:
469 if err.errno != errno.EEXIST:
515 raise
470 raise
516 try:
471 try:
517 # backup continues
472 # backup continues
518 for f in tobackup:
473 for f in tobackup:
519 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
474 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
520 dir=backupdir)
475 dir=backupdir)
521 os.close(fd)
476 os.close(fd)
522 ui.debug('backup %r as %r\n' % (f, tmpname))
477 ui.debug('backup %r as %r\n' % (f, tmpname))
523 util.copyfile(repo.wjoin(f), tmpname)
478 util.copyfile(repo.wjoin(f), tmpname)
524 shutil.copystat(repo.wjoin(f), tmpname)
479 shutil.copystat(repo.wjoin(f), tmpname)
525 backups[f] = tmpname
480 backups[f] = tmpname
526
481
527 fp = cStringIO.StringIO()
482 fp = cStringIO.StringIO()
528 for c in chunks:
483 for c in chunks:
529 fname = c.filename()
484 fname = c.filename()
530 if fname in backups or fname in newandmodifiedfiles:
485 if fname in backups or fname in newandmodifiedfiles:
531 c.write(fp)
486 c.write(fp)
532 dopatch = fp.tell()
487 dopatch = fp.tell()
533 fp.seek(0)
488 fp.seek(0)
534
489
535 [os.unlink(c) for c in newandmodifiedfiles]
490 [os.unlink(c) for c in newandmodifiedfiles]
536
491
537 # 3a. apply filtered patch to clean repo (clean)
492 # 3a. apply filtered patch to clean repo (clean)
538 if backups:
493 if backups:
539 hg.revert(repo, repo.dirstate.p1(),
494 hg.revert(repo, repo.dirstate.p1(),
540 lambda key: key in backups)
495 lambda key: key in backups)
541
496
542 # 3b. (apply)
497 # 3b. (apply)
543 if dopatch:
498 if dopatch:
544 try:
499 try:
545 ui.debug('applying patch\n')
500 ui.debug('applying patch\n')
546 ui.debug(fp.getvalue())
501 ui.debug(fp.getvalue())
547 patch.internalpatch(ui, repo, fp, 1, '', eolmode=None)
502 patch.internalpatch(ui, repo, fp, 1, '', eolmode=None)
548 except patch.PatchError, err:
503 except patch.PatchError, err:
549 raise util.Abort(str(err))
504 raise util.Abort(str(err))
550 del fp
505 del fp
551
506
552 # 4. We prepared working directory according to filtered
507 # 4. We prepared working directory according to filtered
553 # patch. Now is the time to delegate the job to
508 # patch. Now is the time to delegate the job to
554 # commit/qrefresh or the like!
509 # commit/qrefresh or the like!
555
510
556 # Make all of the pathnames absolute.
511 # Make all of the pathnames absolute.
557 newfiles = [repo.wjoin(nf) for nf in newfiles]
512 newfiles = [repo.wjoin(nf) for nf in newfiles]
558 commitfunc(ui, repo, *newfiles, **opts)
513 commitfunc(ui, repo, *newfiles, **opts)
559
514
560 return 0
515 return 0
561 finally:
516 finally:
562 # 5. finally restore backed-up files
517 # 5. finally restore backed-up files
563 try:
518 try:
564 for realname, tmpname in backups.iteritems():
519 for realname, tmpname in backups.iteritems():
565 ui.debug('restoring %r to %r\n' % (tmpname, realname))
520 ui.debug('restoring %r to %r\n' % (tmpname, realname))
566 util.copyfile(tmpname, repo.wjoin(realname))
521 util.copyfile(tmpname, repo.wjoin(realname))
567 # Our calls to copystat() here and above are a
522 # Our calls to copystat() here and above are a
568 # hack to trick any editors that have f open that
523 # hack to trick any editors that have f open that
569 # we haven't modified them.
524 # we haven't modified them.
570 #
525 #
571 # Also note that this racy as an editor could
526 # Also note that this racy as an editor could
572 # notice the file's mtime before we've finished
527 # notice the file's mtime before we've finished
573 # writing it.
528 # writing it.
574 shutil.copystat(tmpname, repo.wjoin(realname))
529 shutil.copystat(tmpname, repo.wjoin(realname))
575 os.unlink(tmpname)
530 os.unlink(tmpname)
576 if tobackup:
531 if tobackup:
577 os.rmdir(backupdir)
532 os.rmdir(backupdir)
578 except OSError:
533 except OSError:
579 pass
534 pass
580
535
581 # wrap ui.write so diff output can be labeled/colorized
536 # wrap ui.write so diff output can be labeled/colorized
582 def wrapwrite(orig, *args, **kw):
537 def wrapwrite(orig, *args, **kw):
583 label = kw.pop('label', '')
538 label = kw.pop('label', '')
584 for chunk, l in patch.difflabel(lambda: args):
539 for chunk, l in patch.difflabel(lambda: args):
585 orig(chunk, label=label + l)
540 orig(chunk, label=label + l)
586 oldwrite = ui.write
541 oldwrite = ui.write
587 extensions.wrapfunction(ui, 'write', wrapwrite)
542 extensions.wrapfunction(ui, 'write', wrapwrite)
588 try:
543 try:
589 return cmdutil.commit(ui, repo, recordfunc, pats, opts)
544 return cmdutil.commit(ui, repo, recordfunc, pats, opts)
590 finally:
545 finally:
591 ui.write = oldwrite
546 ui.write = oldwrite
592
547
593 def uisetup(ui):
548 def uisetup(ui):
594 try:
549 try:
595 mq = extensions.find('mq')
550 mq = extensions.find('mq')
596 except KeyError:
551 except KeyError:
597 return
552 return
598
553
599 cmdtable["qrecord"] = \
554 cmdtable["qrecord"] = \
600 (qrecord,
555 (qrecord,
601 # same options as qnew, but copy them so we don't get
556 # same options as qnew, but copy them so we don't get
602 # -i/--interactive for qrecord and add white space diff options
557 # -i/--interactive for qrecord and add white space diff options
603 mq.cmdtable['^qnew'][1][:] + commands.diffwsopts,
558 mq.cmdtable['^qnew'][1][:] + commands.diffwsopts,
604 _('hg qrecord [OPTION]... PATCH [FILE]...'))
559 _('hg qrecord [OPTION]... PATCH [FILE]...'))
605
560
606 _wrapcmd('qnew', mq.cmdtable, qnew, _("interactively record a new patch"))
561 _wrapcmd('qnew', mq.cmdtable, qnew, _("interactively record a new patch"))
607 _wrapcmd('qrefresh', mq.cmdtable, qrefresh,
562 _wrapcmd('qrefresh', mq.cmdtable, qrefresh,
608 _("interactively select changes to refresh"))
563 _("interactively select changes to refresh"))
609
564
610 def _wrapcmd(cmd, table, wrapfn, msg):
565 def _wrapcmd(cmd, table, wrapfn, msg):
611 entry = extensions.wrapcommand(table, cmd, wrapfn)
566 entry = extensions.wrapcommand(table, cmd, wrapfn)
612 entry[1].append(('i', 'interactive', None, msg))
567 entry[1].append(('i', 'interactive', None, msg))
@@ -1,2048 +1,2094 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import cStringIO, email, os, errno, re, posixpath
9 import cStringIO, email, os, errno, re, posixpath
10 import tempfile, zlib, shutil
10 import tempfile, zlib, shutil
11 # On python2.4 you have to import these by name or they fail to
11 # On python2.4 you have to import these by name or they fail to
12 # load. This was not a problem on Python 2.7.
12 # load. This was not a problem on Python 2.7.
13 import email.Generator
13 import email.Generator
14 import email.Parser
14 import email.Parser
15
15
16 from i18n import _
16 from i18n import _
17 from node import hex, short
17 from node import hex, short
18 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
18 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
19
19
20 gitre = re.compile('diff --git a/(.*) b/(.*)')
20 gitre = re.compile('diff --git a/(.*) b/(.*)')
21 tabsplitter = re.compile(r'(\t+|[^\t]+)')
21 tabsplitter = re.compile(r'(\t+|[^\t]+)')
22
22
23 class PatchError(Exception):
23 class PatchError(Exception):
24 pass
24 pass
25
25
26
26
27 # public functions
27 # public functions
28
28
29 def split(stream):
29 def split(stream):
30 '''return an iterator of individual patches from a stream'''
30 '''return an iterator of individual patches from a stream'''
31 def isheader(line, inheader):
31 def isheader(line, inheader):
32 if inheader and line[0] in (' ', '\t'):
32 if inheader and line[0] in (' ', '\t'):
33 # continuation
33 # continuation
34 return True
34 return True
35 if line[0] in (' ', '-', '+'):
35 if line[0] in (' ', '-', '+'):
36 # diff line - don't check for header pattern in there
36 # diff line - don't check for header pattern in there
37 return False
37 return False
38 l = line.split(': ', 1)
38 l = line.split(': ', 1)
39 return len(l) == 2 and ' ' not in l[0]
39 return len(l) == 2 and ' ' not in l[0]
40
40
41 def chunk(lines):
41 def chunk(lines):
42 return cStringIO.StringIO(''.join(lines))
42 return cStringIO.StringIO(''.join(lines))
43
43
44 def hgsplit(stream, cur):
44 def hgsplit(stream, cur):
45 inheader = True
45 inheader = True
46
46
47 for line in stream:
47 for line in stream:
48 if not line.strip():
48 if not line.strip():
49 inheader = False
49 inheader = False
50 if not inheader and line.startswith('# HG changeset patch'):
50 if not inheader and line.startswith('# HG changeset patch'):
51 yield chunk(cur)
51 yield chunk(cur)
52 cur = []
52 cur = []
53 inheader = True
53 inheader = True
54
54
55 cur.append(line)
55 cur.append(line)
56
56
57 if cur:
57 if cur:
58 yield chunk(cur)
58 yield chunk(cur)
59
59
60 def mboxsplit(stream, cur):
60 def mboxsplit(stream, cur):
61 for line in stream:
61 for line in stream:
62 if line.startswith('From '):
62 if line.startswith('From '):
63 for c in split(chunk(cur[1:])):
63 for c in split(chunk(cur[1:])):
64 yield c
64 yield c
65 cur = []
65 cur = []
66
66
67 cur.append(line)
67 cur.append(line)
68
68
69 if cur:
69 if cur:
70 for c in split(chunk(cur[1:])):
70 for c in split(chunk(cur[1:])):
71 yield c
71 yield c
72
72
73 def mimesplit(stream, cur):
73 def mimesplit(stream, cur):
74 def msgfp(m):
74 def msgfp(m):
75 fp = cStringIO.StringIO()
75 fp = cStringIO.StringIO()
76 g = email.Generator.Generator(fp, mangle_from_=False)
76 g = email.Generator.Generator(fp, mangle_from_=False)
77 g.flatten(m)
77 g.flatten(m)
78 fp.seek(0)
78 fp.seek(0)
79 return fp
79 return fp
80
80
81 for line in stream:
81 for line in stream:
82 cur.append(line)
82 cur.append(line)
83 c = chunk(cur)
83 c = chunk(cur)
84
84
85 m = email.Parser.Parser().parse(c)
85 m = email.Parser.Parser().parse(c)
86 if not m.is_multipart():
86 if not m.is_multipart():
87 yield msgfp(m)
87 yield msgfp(m)
88 else:
88 else:
89 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
89 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
90 for part in m.walk():
90 for part in m.walk():
91 ct = part.get_content_type()
91 ct = part.get_content_type()
92 if ct not in ok_types:
92 if ct not in ok_types:
93 continue
93 continue
94 yield msgfp(part)
94 yield msgfp(part)
95
95
96 def headersplit(stream, cur):
96 def headersplit(stream, cur):
97 inheader = False
97 inheader = False
98
98
99 for line in stream:
99 for line in stream:
100 if not inheader and isheader(line, inheader):
100 if not inheader and isheader(line, inheader):
101 yield chunk(cur)
101 yield chunk(cur)
102 cur = []
102 cur = []
103 inheader = True
103 inheader = True
104 if inheader and not isheader(line, inheader):
104 if inheader and not isheader(line, inheader):
105 inheader = False
105 inheader = False
106
106
107 cur.append(line)
107 cur.append(line)
108
108
109 if cur:
109 if cur:
110 yield chunk(cur)
110 yield chunk(cur)
111
111
112 def remainder(cur):
112 def remainder(cur):
113 yield chunk(cur)
113 yield chunk(cur)
114
114
115 class fiter(object):
115 class fiter(object):
116 def __init__(self, fp):
116 def __init__(self, fp):
117 self.fp = fp
117 self.fp = fp
118
118
119 def __iter__(self):
119 def __iter__(self):
120 return self
120 return self
121
121
122 def next(self):
122 def next(self):
123 l = self.fp.readline()
123 l = self.fp.readline()
124 if not l:
124 if not l:
125 raise StopIteration
125 raise StopIteration
126 return l
126 return l
127
127
128 inheader = False
128 inheader = False
129 cur = []
129 cur = []
130
130
131 mimeheaders = ['content-type']
131 mimeheaders = ['content-type']
132
132
133 if not util.safehasattr(stream, 'next'):
133 if not util.safehasattr(stream, 'next'):
134 # http responses, for example, have readline but not next
134 # http responses, for example, have readline but not next
135 stream = fiter(stream)
135 stream = fiter(stream)
136
136
137 for line in stream:
137 for line in stream:
138 cur.append(line)
138 cur.append(line)
139 if line.startswith('# HG changeset patch'):
139 if line.startswith('# HG changeset patch'):
140 return hgsplit(stream, cur)
140 return hgsplit(stream, cur)
141 elif line.startswith('From '):
141 elif line.startswith('From '):
142 return mboxsplit(stream, cur)
142 return mboxsplit(stream, cur)
143 elif isheader(line, inheader):
143 elif isheader(line, inheader):
144 inheader = True
144 inheader = True
145 if line.split(':', 1)[0].lower() in mimeheaders:
145 if line.split(':', 1)[0].lower() in mimeheaders:
146 # let email parser handle this
146 # let email parser handle this
147 return mimesplit(stream, cur)
147 return mimesplit(stream, cur)
148 elif line.startswith('--- ') and inheader:
148 elif line.startswith('--- ') and inheader:
149 # No evil headers seen by diff start, split by hand
149 # No evil headers seen by diff start, split by hand
150 return headersplit(stream, cur)
150 return headersplit(stream, cur)
151 # Not enough info, keep reading
151 # Not enough info, keep reading
152
152
153 # if we are here, we have a very plain patch
153 # if we are here, we have a very plain patch
154 return remainder(cur)
154 return remainder(cur)
155
155
156 def extract(ui, fileobj):
156 def extract(ui, fileobj):
157 '''extract patch from data read from fileobj.
157 '''extract patch from data read from fileobj.
158
158
159 patch can be a normal patch or contained in an email message.
159 patch can be a normal patch or contained in an email message.
160
160
161 return tuple (filename, message, user, date, branch, node, p1, p2).
161 return tuple (filename, message, user, date, branch, node, p1, p2).
162 Any item in the returned tuple can be None. If filename is None,
162 Any item in the returned tuple can be None. If filename is None,
163 fileobj did not contain a patch. Caller must unlink filename when done.'''
163 fileobj did not contain a patch. Caller must unlink filename when done.'''
164
164
165 # attempt to detect the start of a patch
165 # attempt to detect the start of a patch
166 # (this heuristic is borrowed from quilt)
166 # (this heuristic is borrowed from quilt)
167 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
167 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
168 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
168 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
169 r'---[ \t].*?^\+\+\+[ \t]|'
169 r'---[ \t].*?^\+\+\+[ \t]|'
170 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
170 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
171
171
172 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
172 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
173 tmpfp = os.fdopen(fd, 'w')
173 tmpfp = os.fdopen(fd, 'w')
174 try:
174 try:
175 msg = email.Parser.Parser().parse(fileobj)
175 msg = email.Parser.Parser().parse(fileobj)
176
176
177 subject = msg['Subject']
177 subject = msg['Subject']
178 user = msg['From']
178 user = msg['From']
179 if not subject and not user:
179 if not subject and not user:
180 # Not an email, restore parsed headers if any
180 # Not an email, restore parsed headers if any
181 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
181 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
182
182
183 # should try to parse msg['Date']
183 # should try to parse msg['Date']
184 date = None
184 date = None
185 nodeid = None
185 nodeid = None
186 branch = None
186 branch = None
187 parents = []
187 parents = []
188
188
189 if subject:
189 if subject:
190 if subject.startswith('[PATCH'):
190 if subject.startswith('[PATCH'):
191 pend = subject.find(']')
191 pend = subject.find(']')
192 if pend >= 0:
192 if pend >= 0:
193 subject = subject[pend + 1:].lstrip()
193 subject = subject[pend + 1:].lstrip()
194 subject = re.sub(r'\n[ \t]+', ' ', subject)
194 subject = re.sub(r'\n[ \t]+', ' ', subject)
195 ui.debug('Subject: %s\n' % subject)
195 ui.debug('Subject: %s\n' % subject)
196 if user:
196 if user:
197 ui.debug('From: %s\n' % user)
197 ui.debug('From: %s\n' % user)
198 diffs_seen = 0
198 diffs_seen = 0
199 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
199 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
200 message = ''
200 message = ''
201 for part in msg.walk():
201 for part in msg.walk():
202 content_type = part.get_content_type()
202 content_type = part.get_content_type()
203 ui.debug('Content-Type: %s\n' % content_type)
203 ui.debug('Content-Type: %s\n' % content_type)
204 if content_type not in ok_types:
204 if content_type not in ok_types:
205 continue
205 continue
206 payload = part.get_payload(decode=True)
206 payload = part.get_payload(decode=True)
207 m = diffre.search(payload)
207 m = diffre.search(payload)
208 if m:
208 if m:
209 hgpatch = False
209 hgpatch = False
210 hgpatchheader = False
210 hgpatchheader = False
211 ignoretext = False
211 ignoretext = False
212
212
213 ui.debug('found patch at byte %d\n' % m.start(0))
213 ui.debug('found patch at byte %d\n' % m.start(0))
214 diffs_seen += 1
214 diffs_seen += 1
215 cfp = cStringIO.StringIO()
215 cfp = cStringIO.StringIO()
216 for line in payload[:m.start(0)].splitlines():
216 for line in payload[:m.start(0)].splitlines():
217 if line.startswith('# HG changeset patch') and not hgpatch:
217 if line.startswith('# HG changeset patch') and not hgpatch:
218 ui.debug('patch generated by hg export\n')
218 ui.debug('patch generated by hg export\n')
219 hgpatch = True
219 hgpatch = True
220 hgpatchheader = True
220 hgpatchheader = True
221 # drop earlier commit message content
221 # drop earlier commit message content
222 cfp.seek(0)
222 cfp.seek(0)
223 cfp.truncate()
223 cfp.truncate()
224 subject = None
224 subject = None
225 elif hgpatchheader:
225 elif hgpatchheader:
226 if line.startswith('# User '):
226 if line.startswith('# User '):
227 user = line[7:]
227 user = line[7:]
228 ui.debug('From: %s\n' % user)
228 ui.debug('From: %s\n' % user)
229 elif line.startswith("# Date "):
229 elif line.startswith("# Date "):
230 date = line[7:]
230 date = line[7:]
231 elif line.startswith("# Branch "):
231 elif line.startswith("# Branch "):
232 branch = line[9:]
232 branch = line[9:]
233 elif line.startswith("# Node ID "):
233 elif line.startswith("# Node ID "):
234 nodeid = line[10:]
234 nodeid = line[10:]
235 elif line.startswith("# Parent "):
235 elif line.startswith("# Parent "):
236 parents.append(line[9:].lstrip())
236 parents.append(line[9:].lstrip())
237 elif not line.startswith("# "):
237 elif not line.startswith("# "):
238 hgpatchheader = False
238 hgpatchheader = False
239 elif line == '---':
239 elif line == '---':
240 ignoretext = True
240 ignoretext = True
241 if not hgpatchheader and not ignoretext:
241 if not hgpatchheader and not ignoretext:
242 cfp.write(line)
242 cfp.write(line)
243 cfp.write('\n')
243 cfp.write('\n')
244 message = cfp.getvalue()
244 message = cfp.getvalue()
245 if tmpfp:
245 if tmpfp:
246 tmpfp.write(payload)
246 tmpfp.write(payload)
247 if not payload.endswith('\n'):
247 if not payload.endswith('\n'):
248 tmpfp.write('\n')
248 tmpfp.write('\n')
249 elif not diffs_seen and message and content_type == 'text/plain':
249 elif not diffs_seen and message and content_type == 'text/plain':
250 message += '\n' + payload
250 message += '\n' + payload
251 except: # re-raises
251 except: # re-raises
252 tmpfp.close()
252 tmpfp.close()
253 os.unlink(tmpname)
253 os.unlink(tmpname)
254 raise
254 raise
255
255
256 if subject and not message.startswith(subject):
256 if subject and not message.startswith(subject):
257 message = '%s\n%s' % (subject, message)
257 message = '%s\n%s' % (subject, message)
258 tmpfp.close()
258 tmpfp.close()
259 if not diffs_seen:
259 if not diffs_seen:
260 os.unlink(tmpname)
260 os.unlink(tmpname)
261 return None, message, user, date, branch, None, None, None
261 return None, message, user, date, branch, None, None, None
262 p1 = parents and parents.pop(0) or None
262 p1 = parents and parents.pop(0) or None
263 p2 = parents and parents.pop(0) or None
263 p2 = parents and parents.pop(0) or None
264 return tmpname, message, user, date, branch, nodeid, p1, p2
264 return tmpname, message, user, date, branch, nodeid, p1, p2
265
265
266 class patchmeta(object):
266 class patchmeta(object):
267 """Patched file metadata
267 """Patched file metadata
268
268
269 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
269 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
270 or COPY. 'path' is patched file path. 'oldpath' is set to the
270 or COPY. 'path' is patched file path. 'oldpath' is set to the
271 origin file when 'op' is either COPY or RENAME, None otherwise. If
271 origin file when 'op' is either COPY or RENAME, None otherwise. If
272 file mode is changed, 'mode' is a tuple (islink, isexec) where
272 file mode is changed, 'mode' is a tuple (islink, isexec) where
273 'islink' is True if the file is a symlink and 'isexec' is True if
273 'islink' is True if the file is a symlink and 'isexec' is True if
274 the file is executable. Otherwise, 'mode' is None.
274 the file is executable. Otherwise, 'mode' is None.
275 """
275 """
276 def __init__(self, path):
276 def __init__(self, path):
277 self.path = path
277 self.path = path
278 self.oldpath = None
278 self.oldpath = None
279 self.mode = None
279 self.mode = None
280 self.op = 'MODIFY'
280 self.op = 'MODIFY'
281 self.binary = False
281 self.binary = False
282
282
283 def setmode(self, mode):
283 def setmode(self, mode):
284 islink = mode & 020000
284 islink = mode & 020000
285 isexec = mode & 0100
285 isexec = mode & 0100
286 self.mode = (islink, isexec)
286 self.mode = (islink, isexec)
287
287
288 def copy(self):
288 def copy(self):
289 other = patchmeta(self.path)
289 other = patchmeta(self.path)
290 other.oldpath = self.oldpath
290 other.oldpath = self.oldpath
291 other.mode = self.mode
291 other.mode = self.mode
292 other.op = self.op
292 other.op = self.op
293 other.binary = self.binary
293 other.binary = self.binary
294 return other
294 return other
295
295
296 def _ispatchinga(self, afile):
296 def _ispatchinga(self, afile):
297 if afile == '/dev/null':
297 if afile == '/dev/null':
298 return self.op == 'ADD'
298 return self.op == 'ADD'
299 return afile == 'a/' + (self.oldpath or self.path)
299 return afile == 'a/' + (self.oldpath or self.path)
300
300
301 def _ispatchingb(self, bfile):
301 def _ispatchingb(self, bfile):
302 if bfile == '/dev/null':
302 if bfile == '/dev/null':
303 return self.op == 'DELETE'
303 return self.op == 'DELETE'
304 return bfile == 'b/' + self.path
304 return bfile == 'b/' + self.path
305
305
306 def ispatching(self, afile, bfile):
306 def ispatching(self, afile, bfile):
307 return self._ispatchinga(afile) and self._ispatchingb(bfile)
307 return self._ispatchinga(afile) and self._ispatchingb(bfile)
308
308
309 def __repr__(self):
309 def __repr__(self):
310 return "<patchmeta %s %r>" % (self.op, self.path)
310 return "<patchmeta %s %r>" % (self.op, self.path)
311
311
312 def readgitpatch(lr):
312 def readgitpatch(lr):
313 """extract git-style metadata about patches from <patchname>"""
313 """extract git-style metadata about patches from <patchname>"""
314
314
315 # Filter patch for git information
315 # Filter patch for git information
316 gp = None
316 gp = None
317 gitpatches = []
317 gitpatches = []
318 for line in lr:
318 for line in lr:
319 line = line.rstrip(' \r\n')
319 line = line.rstrip(' \r\n')
320 if line.startswith('diff --git a/'):
320 if line.startswith('diff --git a/'):
321 m = gitre.match(line)
321 m = gitre.match(line)
322 if m:
322 if m:
323 if gp:
323 if gp:
324 gitpatches.append(gp)
324 gitpatches.append(gp)
325 dst = m.group(2)
325 dst = m.group(2)
326 gp = patchmeta(dst)
326 gp = patchmeta(dst)
327 elif gp:
327 elif gp:
328 if line.startswith('--- '):
328 if line.startswith('--- '):
329 gitpatches.append(gp)
329 gitpatches.append(gp)
330 gp = None
330 gp = None
331 continue
331 continue
332 if line.startswith('rename from '):
332 if line.startswith('rename from '):
333 gp.op = 'RENAME'
333 gp.op = 'RENAME'
334 gp.oldpath = line[12:]
334 gp.oldpath = line[12:]
335 elif line.startswith('rename to '):
335 elif line.startswith('rename to '):
336 gp.path = line[10:]
336 gp.path = line[10:]
337 elif line.startswith('copy from '):
337 elif line.startswith('copy from '):
338 gp.op = 'COPY'
338 gp.op = 'COPY'
339 gp.oldpath = line[10:]
339 gp.oldpath = line[10:]
340 elif line.startswith('copy to '):
340 elif line.startswith('copy to '):
341 gp.path = line[8:]
341 gp.path = line[8:]
342 elif line.startswith('deleted file'):
342 elif line.startswith('deleted file'):
343 gp.op = 'DELETE'
343 gp.op = 'DELETE'
344 elif line.startswith('new file mode '):
344 elif line.startswith('new file mode '):
345 gp.op = 'ADD'
345 gp.op = 'ADD'
346 gp.setmode(int(line[-6:], 8))
346 gp.setmode(int(line[-6:], 8))
347 elif line.startswith('new mode '):
347 elif line.startswith('new mode '):
348 gp.setmode(int(line[-6:], 8))
348 gp.setmode(int(line[-6:], 8))
349 elif line.startswith('GIT binary patch'):
349 elif line.startswith('GIT binary patch'):
350 gp.binary = True
350 gp.binary = True
351 if gp:
351 if gp:
352 gitpatches.append(gp)
352 gitpatches.append(gp)
353
353
354 return gitpatches
354 return gitpatches
355
355
356 class linereader(object):
356 class linereader(object):
357 # simple class to allow pushing lines back into the input stream
357 # simple class to allow pushing lines back into the input stream
358 def __init__(self, fp):
358 def __init__(self, fp):
359 self.fp = fp
359 self.fp = fp
360 self.buf = []
360 self.buf = []
361
361
362 def push(self, line):
362 def push(self, line):
363 if line is not None:
363 if line is not None:
364 self.buf.append(line)
364 self.buf.append(line)
365
365
366 def readline(self):
366 def readline(self):
367 if self.buf:
367 if self.buf:
368 l = self.buf[0]
368 l = self.buf[0]
369 del self.buf[0]
369 del self.buf[0]
370 return l
370 return l
371 return self.fp.readline()
371 return self.fp.readline()
372
372
373 def __iter__(self):
373 def __iter__(self):
374 while True:
374 while True:
375 l = self.readline()
375 l = self.readline()
376 if not l:
376 if not l:
377 break
377 break
378 yield l
378 yield l
379
379
380 class abstractbackend(object):
380 class abstractbackend(object):
381 def __init__(self, ui):
381 def __init__(self, ui):
382 self.ui = ui
382 self.ui = ui
383
383
384 def getfile(self, fname):
384 def getfile(self, fname):
385 """Return target file data and flags as a (data, (islink,
385 """Return target file data and flags as a (data, (islink,
386 isexec)) tuple. Data is None if file is missing/deleted.
386 isexec)) tuple. Data is None if file is missing/deleted.
387 """
387 """
388 raise NotImplementedError
388 raise NotImplementedError
389
389
390 def setfile(self, fname, data, mode, copysource):
390 def setfile(self, fname, data, mode, copysource):
391 """Write data to target file fname and set its mode. mode is a
391 """Write data to target file fname and set its mode. mode is a
392 (islink, isexec) tuple. If data is None, the file content should
392 (islink, isexec) tuple. If data is None, the file content should
393 be left unchanged. If the file is modified after being copied,
393 be left unchanged. If the file is modified after being copied,
394 copysource is set to the original file name.
394 copysource is set to the original file name.
395 """
395 """
396 raise NotImplementedError
396 raise NotImplementedError
397
397
398 def unlink(self, fname):
398 def unlink(self, fname):
399 """Unlink target file."""
399 """Unlink target file."""
400 raise NotImplementedError
400 raise NotImplementedError
401
401
402 def writerej(self, fname, failed, total, lines):
402 def writerej(self, fname, failed, total, lines):
403 """Write rejected lines for fname. total is the number of hunks
403 """Write rejected lines for fname. total is the number of hunks
404 which failed to apply and total the total number of hunks for this
404 which failed to apply and total the total number of hunks for this
405 files.
405 files.
406 """
406 """
407 pass
407 pass
408
408
409 def exists(self, fname):
409 def exists(self, fname):
410 raise NotImplementedError
410 raise NotImplementedError
411
411
412 class fsbackend(abstractbackend):
412 class fsbackend(abstractbackend):
413 def __init__(self, ui, basedir):
413 def __init__(self, ui, basedir):
414 super(fsbackend, self).__init__(ui)
414 super(fsbackend, self).__init__(ui)
415 self.opener = scmutil.opener(basedir)
415 self.opener = scmutil.opener(basedir)
416
416
417 def _join(self, f):
417 def _join(self, f):
418 return os.path.join(self.opener.base, f)
418 return os.path.join(self.opener.base, f)
419
419
420 def getfile(self, fname):
420 def getfile(self, fname):
421 if self.opener.islink(fname):
421 if self.opener.islink(fname):
422 return (self.opener.readlink(fname), (True, False))
422 return (self.opener.readlink(fname), (True, False))
423
423
424 isexec = False
424 isexec = False
425 try:
425 try:
426 isexec = self.opener.lstat(fname).st_mode & 0100 != 0
426 isexec = self.opener.lstat(fname).st_mode & 0100 != 0
427 except OSError, e:
427 except OSError, e:
428 if e.errno != errno.ENOENT:
428 if e.errno != errno.ENOENT:
429 raise
429 raise
430 try:
430 try:
431 return (self.opener.read(fname), (False, isexec))
431 return (self.opener.read(fname), (False, isexec))
432 except IOError, e:
432 except IOError, e:
433 if e.errno != errno.ENOENT:
433 if e.errno != errno.ENOENT:
434 raise
434 raise
435 return None, None
435 return None, None
436
436
437 def setfile(self, fname, data, mode, copysource):
437 def setfile(self, fname, data, mode, copysource):
438 islink, isexec = mode
438 islink, isexec = mode
439 if data is None:
439 if data is None:
440 self.opener.setflags(fname, islink, isexec)
440 self.opener.setflags(fname, islink, isexec)
441 return
441 return
442 if islink:
442 if islink:
443 self.opener.symlink(data, fname)
443 self.opener.symlink(data, fname)
444 else:
444 else:
445 self.opener.write(fname, data)
445 self.opener.write(fname, data)
446 if isexec:
446 if isexec:
447 self.opener.setflags(fname, False, True)
447 self.opener.setflags(fname, False, True)
448
448
449 def unlink(self, fname):
449 def unlink(self, fname):
450 self.opener.unlinkpath(fname, ignoremissing=True)
450 self.opener.unlinkpath(fname, ignoremissing=True)
451
451
452 def writerej(self, fname, failed, total, lines):
452 def writerej(self, fname, failed, total, lines):
453 fname = fname + ".rej"
453 fname = fname + ".rej"
454 self.ui.warn(
454 self.ui.warn(
455 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
455 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
456 (failed, total, fname))
456 (failed, total, fname))
457 fp = self.opener(fname, 'w')
457 fp = self.opener(fname, 'w')
458 fp.writelines(lines)
458 fp.writelines(lines)
459 fp.close()
459 fp.close()
460
460
461 def exists(self, fname):
461 def exists(self, fname):
462 return self.opener.lexists(fname)
462 return self.opener.lexists(fname)
463
463
464 class workingbackend(fsbackend):
464 class workingbackend(fsbackend):
465 def __init__(self, ui, repo, similarity):
465 def __init__(self, ui, repo, similarity):
466 super(workingbackend, self).__init__(ui, repo.root)
466 super(workingbackend, self).__init__(ui, repo.root)
467 self.repo = repo
467 self.repo = repo
468 self.similarity = similarity
468 self.similarity = similarity
469 self.removed = set()
469 self.removed = set()
470 self.changed = set()
470 self.changed = set()
471 self.copied = []
471 self.copied = []
472
472
473 def _checkknown(self, fname):
473 def _checkknown(self, fname):
474 if self.repo.dirstate[fname] == '?' and self.exists(fname):
474 if self.repo.dirstate[fname] == '?' and self.exists(fname):
475 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
475 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
476
476
477 def setfile(self, fname, data, mode, copysource):
477 def setfile(self, fname, data, mode, copysource):
478 self._checkknown(fname)
478 self._checkknown(fname)
479 super(workingbackend, self).setfile(fname, data, mode, copysource)
479 super(workingbackend, self).setfile(fname, data, mode, copysource)
480 if copysource is not None:
480 if copysource is not None:
481 self.copied.append((copysource, fname))
481 self.copied.append((copysource, fname))
482 self.changed.add(fname)
482 self.changed.add(fname)
483
483
484 def unlink(self, fname):
484 def unlink(self, fname):
485 self._checkknown(fname)
485 self._checkknown(fname)
486 super(workingbackend, self).unlink(fname)
486 super(workingbackend, self).unlink(fname)
487 self.removed.add(fname)
487 self.removed.add(fname)
488 self.changed.add(fname)
488 self.changed.add(fname)
489
489
490 def close(self):
490 def close(self):
491 wctx = self.repo[None]
491 wctx = self.repo[None]
492 changed = set(self.changed)
492 changed = set(self.changed)
493 for src, dst in self.copied:
493 for src, dst in self.copied:
494 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
494 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
495 if self.removed:
495 if self.removed:
496 wctx.forget(sorted(self.removed))
496 wctx.forget(sorted(self.removed))
497 for f in self.removed:
497 for f in self.removed:
498 if f not in self.repo.dirstate:
498 if f not in self.repo.dirstate:
499 # File was deleted and no longer belongs to the
499 # File was deleted and no longer belongs to the
500 # dirstate, it was probably marked added then
500 # dirstate, it was probably marked added then
501 # deleted, and should not be considered by
501 # deleted, and should not be considered by
502 # marktouched().
502 # marktouched().
503 changed.discard(f)
503 changed.discard(f)
504 if changed:
504 if changed:
505 scmutil.marktouched(self.repo, changed, self.similarity)
505 scmutil.marktouched(self.repo, changed, self.similarity)
506 return sorted(self.changed)
506 return sorted(self.changed)
507
507
508 class filestore(object):
508 class filestore(object):
509 def __init__(self, maxsize=None):
509 def __init__(self, maxsize=None):
510 self.opener = None
510 self.opener = None
511 self.files = {}
511 self.files = {}
512 self.created = 0
512 self.created = 0
513 self.maxsize = maxsize
513 self.maxsize = maxsize
514 if self.maxsize is None:
514 if self.maxsize is None:
515 self.maxsize = 4*(2**20)
515 self.maxsize = 4*(2**20)
516 self.size = 0
516 self.size = 0
517 self.data = {}
517 self.data = {}
518
518
519 def setfile(self, fname, data, mode, copied=None):
519 def setfile(self, fname, data, mode, copied=None):
520 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
520 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
521 self.data[fname] = (data, mode, copied)
521 self.data[fname] = (data, mode, copied)
522 self.size += len(data)
522 self.size += len(data)
523 else:
523 else:
524 if self.opener is None:
524 if self.opener is None:
525 root = tempfile.mkdtemp(prefix='hg-patch-')
525 root = tempfile.mkdtemp(prefix='hg-patch-')
526 self.opener = scmutil.opener(root)
526 self.opener = scmutil.opener(root)
527 # Avoid filename issues with these simple names
527 # Avoid filename issues with these simple names
528 fn = str(self.created)
528 fn = str(self.created)
529 self.opener.write(fn, data)
529 self.opener.write(fn, data)
530 self.created += 1
530 self.created += 1
531 self.files[fname] = (fn, mode, copied)
531 self.files[fname] = (fn, mode, copied)
532
532
533 def getfile(self, fname):
533 def getfile(self, fname):
534 if fname in self.data:
534 if fname in self.data:
535 return self.data[fname]
535 return self.data[fname]
536 if not self.opener or fname not in self.files:
536 if not self.opener or fname not in self.files:
537 return None, None, None
537 return None, None, None
538 fn, mode, copied = self.files[fname]
538 fn, mode, copied = self.files[fname]
539 return self.opener.read(fn), mode, copied
539 return self.opener.read(fn), mode, copied
540
540
541 def close(self):
541 def close(self):
542 if self.opener:
542 if self.opener:
543 shutil.rmtree(self.opener.base)
543 shutil.rmtree(self.opener.base)
544
544
545 class repobackend(abstractbackend):
545 class repobackend(abstractbackend):
546 def __init__(self, ui, repo, ctx, store):
546 def __init__(self, ui, repo, ctx, store):
547 super(repobackend, self).__init__(ui)
547 super(repobackend, self).__init__(ui)
548 self.repo = repo
548 self.repo = repo
549 self.ctx = ctx
549 self.ctx = ctx
550 self.store = store
550 self.store = store
551 self.changed = set()
551 self.changed = set()
552 self.removed = set()
552 self.removed = set()
553 self.copied = {}
553 self.copied = {}
554
554
555 def _checkknown(self, fname):
555 def _checkknown(self, fname):
556 if fname not in self.ctx:
556 if fname not in self.ctx:
557 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
557 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
558
558
559 def getfile(self, fname):
559 def getfile(self, fname):
560 try:
560 try:
561 fctx = self.ctx[fname]
561 fctx = self.ctx[fname]
562 except error.LookupError:
562 except error.LookupError:
563 return None, None
563 return None, None
564 flags = fctx.flags()
564 flags = fctx.flags()
565 return fctx.data(), ('l' in flags, 'x' in flags)
565 return fctx.data(), ('l' in flags, 'x' in flags)
566
566
567 def setfile(self, fname, data, mode, copysource):
567 def setfile(self, fname, data, mode, copysource):
568 if copysource:
568 if copysource:
569 self._checkknown(copysource)
569 self._checkknown(copysource)
570 if data is None:
570 if data is None:
571 data = self.ctx[fname].data()
571 data = self.ctx[fname].data()
572 self.store.setfile(fname, data, mode, copysource)
572 self.store.setfile(fname, data, mode, copysource)
573 self.changed.add(fname)
573 self.changed.add(fname)
574 if copysource:
574 if copysource:
575 self.copied[fname] = copysource
575 self.copied[fname] = copysource
576
576
577 def unlink(self, fname):
577 def unlink(self, fname):
578 self._checkknown(fname)
578 self._checkknown(fname)
579 self.removed.add(fname)
579 self.removed.add(fname)
580
580
581 def exists(self, fname):
581 def exists(self, fname):
582 return fname in self.ctx
582 return fname in self.ctx
583
583
584 def close(self):
584 def close(self):
585 return self.changed | self.removed
585 return self.changed | self.removed
586
586
587 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
587 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
588 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
588 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
589 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
589 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
590 eolmodes = ['strict', 'crlf', 'lf', 'auto']
590 eolmodes = ['strict', 'crlf', 'lf', 'auto']
591
591
592 class patchfile(object):
592 class patchfile(object):
593 def __init__(self, ui, gp, backend, store, eolmode='strict'):
593 def __init__(self, ui, gp, backend, store, eolmode='strict'):
594 self.fname = gp.path
594 self.fname = gp.path
595 self.eolmode = eolmode
595 self.eolmode = eolmode
596 self.eol = None
596 self.eol = None
597 self.backend = backend
597 self.backend = backend
598 self.ui = ui
598 self.ui = ui
599 self.lines = []
599 self.lines = []
600 self.exists = False
600 self.exists = False
601 self.missing = True
601 self.missing = True
602 self.mode = gp.mode
602 self.mode = gp.mode
603 self.copysource = gp.oldpath
603 self.copysource = gp.oldpath
604 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
604 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
605 self.remove = gp.op == 'DELETE'
605 self.remove = gp.op == 'DELETE'
606 if self.copysource is None:
606 if self.copysource is None:
607 data, mode = backend.getfile(self.fname)
607 data, mode = backend.getfile(self.fname)
608 else:
608 else:
609 data, mode = store.getfile(self.copysource)[:2]
609 data, mode = store.getfile(self.copysource)[:2]
610 if data is not None:
610 if data is not None:
611 self.exists = self.copysource is None or backend.exists(self.fname)
611 self.exists = self.copysource is None or backend.exists(self.fname)
612 self.missing = False
612 self.missing = False
613 if data:
613 if data:
614 self.lines = mdiff.splitnewlines(data)
614 self.lines = mdiff.splitnewlines(data)
615 if self.mode is None:
615 if self.mode is None:
616 self.mode = mode
616 self.mode = mode
617 if self.lines:
617 if self.lines:
618 # Normalize line endings
618 # Normalize line endings
619 if self.lines[0].endswith('\r\n'):
619 if self.lines[0].endswith('\r\n'):
620 self.eol = '\r\n'
620 self.eol = '\r\n'
621 elif self.lines[0].endswith('\n'):
621 elif self.lines[0].endswith('\n'):
622 self.eol = '\n'
622 self.eol = '\n'
623 if eolmode != 'strict':
623 if eolmode != 'strict':
624 nlines = []
624 nlines = []
625 for l in self.lines:
625 for l in self.lines:
626 if l.endswith('\r\n'):
626 if l.endswith('\r\n'):
627 l = l[:-2] + '\n'
627 l = l[:-2] + '\n'
628 nlines.append(l)
628 nlines.append(l)
629 self.lines = nlines
629 self.lines = nlines
630 else:
630 else:
631 if self.create:
631 if self.create:
632 self.missing = False
632 self.missing = False
633 if self.mode is None:
633 if self.mode is None:
634 self.mode = (False, False)
634 self.mode = (False, False)
635 if self.missing:
635 if self.missing:
636 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
636 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
637
637
638 self.hash = {}
638 self.hash = {}
639 self.dirty = 0
639 self.dirty = 0
640 self.offset = 0
640 self.offset = 0
641 self.skew = 0
641 self.skew = 0
642 self.rej = []
642 self.rej = []
643 self.fileprinted = False
643 self.fileprinted = False
644 self.printfile(False)
644 self.printfile(False)
645 self.hunks = 0
645 self.hunks = 0
646
646
647 def writelines(self, fname, lines, mode):
647 def writelines(self, fname, lines, mode):
648 if self.eolmode == 'auto':
648 if self.eolmode == 'auto':
649 eol = self.eol
649 eol = self.eol
650 elif self.eolmode == 'crlf':
650 elif self.eolmode == 'crlf':
651 eol = '\r\n'
651 eol = '\r\n'
652 else:
652 else:
653 eol = '\n'
653 eol = '\n'
654
654
655 if self.eolmode != 'strict' and eol and eol != '\n':
655 if self.eolmode != 'strict' and eol and eol != '\n':
656 rawlines = []
656 rawlines = []
657 for l in lines:
657 for l in lines:
658 if l and l[-1] == '\n':
658 if l and l[-1] == '\n':
659 l = l[:-1] + eol
659 l = l[:-1] + eol
660 rawlines.append(l)
660 rawlines.append(l)
661 lines = rawlines
661 lines = rawlines
662
662
663 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
663 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
664
664
665 def printfile(self, warn):
665 def printfile(self, warn):
666 if self.fileprinted:
666 if self.fileprinted:
667 return
667 return
668 if warn or self.ui.verbose:
668 if warn or self.ui.verbose:
669 self.fileprinted = True
669 self.fileprinted = True
670 s = _("patching file %s\n") % self.fname
670 s = _("patching file %s\n") % self.fname
671 if warn:
671 if warn:
672 self.ui.warn(s)
672 self.ui.warn(s)
673 else:
673 else:
674 self.ui.note(s)
674 self.ui.note(s)
675
675
676
676
677 def findlines(self, l, linenum):
677 def findlines(self, l, linenum):
678 # looks through the hash and finds candidate lines. The
678 # looks through the hash and finds candidate lines. The
679 # result is a list of line numbers sorted based on distance
679 # result is a list of line numbers sorted based on distance
680 # from linenum
680 # from linenum
681
681
682 cand = self.hash.get(l, [])
682 cand = self.hash.get(l, [])
683 if len(cand) > 1:
683 if len(cand) > 1:
684 # resort our list of potentials forward then back.
684 # resort our list of potentials forward then back.
685 cand.sort(key=lambda x: abs(x - linenum))
685 cand.sort(key=lambda x: abs(x - linenum))
686 return cand
686 return cand
687
687
688 def write_rej(self):
688 def write_rej(self):
689 # our rejects are a little different from patch(1). This always
689 # our rejects are a little different from patch(1). This always
690 # creates rejects in the same form as the original patch. A file
690 # creates rejects in the same form as the original patch. A file
691 # header is inserted so that you can run the reject through patch again
691 # header is inserted so that you can run the reject through patch again
692 # without having to type the filename.
692 # without having to type the filename.
693 if not self.rej:
693 if not self.rej:
694 return
694 return
695 base = os.path.basename(self.fname)
695 base = os.path.basename(self.fname)
696 lines = ["--- %s\n+++ %s\n" % (base, base)]
696 lines = ["--- %s\n+++ %s\n" % (base, base)]
697 for x in self.rej:
697 for x in self.rej:
698 for l in x.hunk:
698 for l in x.hunk:
699 lines.append(l)
699 lines.append(l)
700 if l[-1] != '\n':
700 if l[-1] != '\n':
701 lines.append("\n\ No newline at end of file\n")
701 lines.append("\n\ No newline at end of file\n")
702 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
702 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
703
703
704 def apply(self, h):
704 def apply(self, h):
705 if not h.complete():
705 if not h.complete():
706 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
706 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
707 (h.number, h.desc, len(h.a), h.lena, len(h.b),
707 (h.number, h.desc, len(h.a), h.lena, len(h.b),
708 h.lenb))
708 h.lenb))
709
709
710 self.hunks += 1
710 self.hunks += 1
711
711
712 if self.missing:
712 if self.missing:
713 self.rej.append(h)
713 self.rej.append(h)
714 return -1
714 return -1
715
715
716 if self.exists and self.create:
716 if self.exists and self.create:
717 if self.copysource:
717 if self.copysource:
718 self.ui.warn(_("cannot create %s: destination already "
718 self.ui.warn(_("cannot create %s: destination already "
719 "exists\n") % self.fname)
719 "exists\n") % self.fname)
720 else:
720 else:
721 self.ui.warn(_("file %s already exists\n") % self.fname)
721 self.ui.warn(_("file %s already exists\n") % self.fname)
722 self.rej.append(h)
722 self.rej.append(h)
723 return -1
723 return -1
724
724
725 if isinstance(h, binhunk):
725 if isinstance(h, binhunk):
726 if self.remove:
726 if self.remove:
727 self.backend.unlink(self.fname)
727 self.backend.unlink(self.fname)
728 else:
728 else:
729 l = h.new(self.lines)
729 l = h.new(self.lines)
730 self.lines[:] = l
730 self.lines[:] = l
731 self.offset += len(l)
731 self.offset += len(l)
732 self.dirty = True
732 self.dirty = True
733 return 0
733 return 0
734
734
735 horig = h
735 horig = h
736 if (self.eolmode in ('crlf', 'lf')
736 if (self.eolmode in ('crlf', 'lf')
737 or self.eolmode == 'auto' and self.eol):
737 or self.eolmode == 'auto' and self.eol):
738 # If new eols are going to be normalized, then normalize
738 # If new eols are going to be normalized, then normalize
739 # hunk data before patching. Otherwise, preserve input
739 # hunk data before patching. Otherwise, preserve input
740 # line-endings.
740 # line-endings.
741 h = h.getnormalized()
741 h = h.getnormalized()
742
742
743 # fast case first, no offsets, no fuzz
743 # fast case first, no offsets, no fuzz
744 old, oldstart, new, newstart = h.fuzzit(0, False)
744 old, oldstart, new, newstart = h.fuzzit(0, False)
745 oldstart += self.offset
745 oldstart += self.offset
746 orig_start = oldstart
746 orig_start = oldstart
747 # if there's skew we want to emit the "(offset %d lines)" even
747 # if there's skew we want to emit the "(offset %d lines)" even
748 # when the hunk cleanly applies at start + skew, so skip the
748 # when the hunk cleanly applies at start + skew, so skip the
749 # fast case code
749 # fast case code
750 if (self.skew == 0 and
750 if (self.skew == 0 and
751 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
751 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
752 if self.remove:
752 if self.remove:
753 self.backend.unlink(self.fname)
753 self.backend.unlink(self.fname)
754 else:
754 else:
755 self.lines[oldstart:oldstart + len(old)] = new
755 self.lines[oldstart:oldstart + len(old)] = new
756 self.offset += len(new) - len(old)
756 self.offset += len(new) - len(old)
757 self.dirty = True
757 self.dirty = True
758 return 0
758 return 0
759
759
760 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
760 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
761 self.hash = {}
761 self.hash = {}
762 for x, s in enumerate(self.lines):
762 for x, s in enumerate(self.lines):
763 self.hash.setdefault(s, []).append(x)
763 self.hash.setdefault(s, []).append(x)
764
764
765 for fuzzlen in xrange(3):
765 for fuzzlen in xrange(3):
766 for toponly in [True, False]:
766 for toponly in [True, False]:
767 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
767 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
768 oldstart = oldstart + self.offset + self.skew
768 oldstart = oldstart + self.offset + self.skew
769 oldstart = min(oldstart, len(self.lines))
769 oldstart = min(oldstart, len(self.lines))
770 if old:
770 if old:
771 cand = self.findlines(old[0][1:], oldstart)
771 cand = self.findlines(old[0][1:], oldstart)
772 else:
772 else:
773 # Only adding lines with no or fuzzed context, just
773 # Only adding lines with no or fuzzed context, just
774 # take the skew in account
774 # take the skew in account
775 cand = [oldstart]
775 cand = [oldstart]
776
776
777 for l in cand:
777 for l in cand:
778 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
778 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
779 self.lines[l : l + len(old)] = new
779 self.lines[l : l + len(old)] = new
780 self.offset += len(new) - len(old)
780 self.offset += len(new) - len(old)
781 self.skew = l - orig_start
781 self.skew = l - orig_start
782 self.dirty = True
782 self.dirty = True
783 offset = l - orig_start - fuzzlen
783 offset = l - orig_start - fuzzlen
784 if fuzzlen:
784 if fuzzlen:
785 msg = _("Hunk #%d succeeded at %d "
785 msg = _("Hunk #%d succeeded at %d "
786 "with fuzz %d "
786 "with fuzz %d "
787 "(offset %d lines).\n")
787 "(offset %d lines).\n")
788 self.printfile(True)
788 self.printfile(True)
789 self.ui.warn(msg %
789 self.ui.warn(msg %
790 (h.number, l + 1, fuzzlen, offset))
790 (h.number, l + 1, fuzzlen, offset))
791 else:
791 else:
792 msg = _("Hunk #%d succeeded at %d "
792 msg = _("Hunk #%d succeeded at %d "
793 "(offset %d lines).\n")
793 "(offset %d lines).\n")
794 self.ui.note(msg % (h.number, l + 1, offset))
794 self.ui.note(msg % (h.number, l + 1, offset))
795 return fuzzlen
795 return fuzzlen
796 self.printfile(True)
796 self.printfile(True)
797 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
797 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
798 self.rej.append(horig)
798 self.rej.append(horig)
799 return -1
799 return -1
800
800
801 def close(self):
801 def close(self):
802 if self.dirty:
802 if self.dirty:
803 self.writelines(self.fname, self.lines, self.mode)
803 self.writelines(self.fname, self.lines, self.mode)
804 self.write_rej()
804 self.write_rej()
805 return len(self.rej)
805 return len(self.rej)
806
806
807 class header(object):
807 class header(object):
808 """patch header
808 """patch header
809 """
809 """
810 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
810 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
811 diff_re = re.compile('diff -r .* (.*)$')
811 diff_re = re.compile('diff -r .* (.*)$')
812 allhunks_re = re.compile('(?:index|deleted file) ')
812 allhunks_re = re.compile('(?:index|deleted file) ')
813 pretty_re = re.compile('(?:new file|deleted file) ')
813 pretty_re = re.compile('(?:new file|deleted file) ')
814 special_re = re.compile('(?:index|new|deleted|copy|rename) ')
814 special_re = re.compile('(?:index|new|deleted|copy|rename) ')
815
815
816 def __init__(self, header):
816 def __init__(self, header):
817 self.header = header
817 self.header = header
818 self.hunks = []
818 self.hunks = []
819
819
820 def binary(self):
820 def binary(self):
821 return util.any(h.startswith('index ') for h in self.header)
821 return util.any(h.startswith('index ') for h in self.header)
822
822
823 def pretty(self, fp):
823 def pretty(self, fp):
824 for h in self.header:
824 for h in self.header:
825 if h.startswith('index '):
825 if h.startswith('index '):
826 fp.write(_('this modifies a binary file (all or nothing)\n'))
826 fp.write(_('this modifies a binary file (all or nothing)\n'))
827 break
827 break
828 if self.pretty_re.match(h):
828 if self.pretty_re.match(h):
829 fp.write(h)
829 fp.write(h)
830 if self.binary():
830 if self.binary():
831 fp.write(_('this is a binary file\n'))
831 fp.write(_('this is a binary file\n'))
832 break
832 break
833 if h.startswith('---'):
833 if h.startswith('---'):
834 fp.write(_('%d hunks, %d lines changed\n') %
834 fp.write(_('%d hunks, %d lines changed\n') %
835 (len(self.hunks),
835 (len(self.hunks),
836 sum([max(h.added, h.removed) for h in self.hunks])))
836 sum([max(h.added, h.removed) for h in self.hunks])))
837 break
837 break
838 fp.write(h)
838 fp.write(h)
839
839
840 def write(self, fp):
840 def write(self, fp):
841 fp.write(''.join(self.header))
841 fp.write(''.join(self.header))
842
842
843 def allhunks(self):
843 def allhunks(self):
844 return util.any(self.allhunks_re.match(h) for h in self.header)
844 return util.any(self.allhunks_re.match(h) for h in self.header)
845
845
846 def files(self):
846 def files(self):
847 match = self.diffgit_re.match(self.header[0])
847 match = self.diffgit_re.match(self.header[0])
848 if match:
848 if match:
849 fromfile, tofile = match.groups()
849 fromfile, tofile = match.groups()
850 if fromfile == tofile:
850 if fromfile == tofile:
851 return [fromfile]
851 return [fromfile]
852 return [fromfile, tofile]
852 return [fromfile, tofile]
853 else:
853 else:
854 return self.diff_re.match(self.header[0]).groups()
854 return self.diff_re.match(self.header[0]).groups()
855
855
856 def filename(self):
856 def filename(self):
857 return self.files()[-1]
857 return self.files()[-1]
858
858
859 def __repr__(self):
859 def __repr__(self):
860 return '<header %s>' % (' '.join(map(repr, self.files())))
860 return '<header %s>' % (' '.join(map(repr, self.files())))
861
861
862 def special(self):
862 def special(self):
863 return util.any(self.special_re.match(h) for h in self.header)
863 return util.any(self.special_re.match(h) for h in self.header)
864
864
865 class recordhunk(object):
866 """patch hunk
867
868 XXX shouldn't we merge this with the other hunk class?
869 """
870 maxcontext = 3
871
872 def __init__(self, header, fromline, toline, proc, before, hunk, after):
873 def trimcontext(number, lines):
874 delta = len(lines) - self.maxcontext
875 if False and delta > 0:
876 return number + delta, lines[:self.maxcontext]
877 return number, lines
878
879 self.header = header
880 self.fromline, self.before = trimcontext(fromline, before)
881 self.toline, self.after = trimcontext(toline, after)
882 self.proc = proc
883 self.hunk = hunk
884 self.added, self.removed = self.countchanges(self.hunk)
885
886 def countchanges(self, hunk):
887 """hunk -> (n+,n-)"""
888 add = len([h for h in hunk if h[0] == '+'])
889 rem = len([h for h in hunk if h[0] == '-'])
890 return add, rem
891
892 def write(self, fp):
893 delta = len(self.before) + len(self.after)
894 if self.after and self.after[-1] == '\\ No newline at end of file\n':
895 delta -= 1
896 fromlen = delta + self.removed
897 tolen = delta + self.added
898 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
899 (self.fromline, fromlen, self.toline, tolen,
900 self.proc and (' ' + self.proc)))
901 fp.write(''.join(self.before + self.hunk + self.after))
902
903 pretty = write
904
905 def filename(self):
906 return self.header.filename()
907
908 def __repr__(self):
909 return '<hunk %r@%d>' % (self.filename(), self.fromline)
910
865 class hunk(object):
911 class hunk(object):
866 def __init__(self, desc, num, lr, context):
912 def __init__(self, desc, num, lr, context):
867 self.number = num
913 self.number = num
868 self.desc = desc
914 self.desc = desc
869 self.hunk = [desc]
915 self.hunk = [desc]
870 self.a = []
916 self.a = []
871 self.b = []
917 self.b = []
872 self.starta = self.lena = None
918 self.starta = self.lena = None
873 self.startb = self.lenb = None
919 self.startb = self.lenb = None
874 if lr is not None:
920 if lr is not None:
875 if context:
921 if context:
876 self.read_context_hunk(lr)
922 self.read_context_hunk(lr)
877 else:
923 else:
878 self.read_unified_hunk(lr)
924 self.read_unified_hunk(lr)
879
925
880 def getnormalized(self):
926 def getnormalized(self):
881 """Return a copy with line endings normalized to LF."""
927 """Return a copy with line endings normalized to LF."""
882
928
883 def normalize(lines):
929 def normalize(lines):
884 nlines = []
930 nlines = []
885 for line in lines:
931 for line in lines:
886 if line.endswith('\r\n'):
932 if line.endswith('\r\n'):
887 line = line[:-2] + '\n'
933 line = line[:-2] + '\n'
888 nlines.append(line)
934 nlines.append(line)
889 return nlines
935 return nlines
890
936
891 # Dummy object, it is rebuilt manually
937 # Dummy object, it is rebuilt manually
892 nh = hunk(self.desc, self.number, None, None)
938 nh = hunk(self.desc, self.number, None, None)
893 nh.number = self.number
939 nh.number = self.number
894 nh.desc = self.desc
940 nh.desc = self.desc
895 nh.hunk = self.hunk
941 nh.hunk = self.hunk
896 nh.a = normalize(self.a)
942 nh.a = normalize(self.a)
897 nh.b = normalize(self.b)
943 nh.b = normalize(self.b)
898 nh.starta = self.starta
944 nh.starta = self.starta
899 nh.startb = self.startb
945 nh.startb = self.startb
900 nh.lena = self.lena
946 nh.lena = self.lena
901 nh.lenb = self.lenb
947 nh.lenb = self.lenb
902 return nh
948 return nh
903
949
904 def read_unified_hunk(self, lr):
950 def read_unified_hunk(self, lr):
905 m = unidesc.match(self.desc)
951 m = unidesc.match(self.desc)
906 if not m:
952 if not m:
907 raise PatchError(_("bad hunk #%d") % self.number)
953 raise PatchError(_("bad hunk #%d") % self.number)
908 self.starta, self.lena, self.startb, self.lenb = m.groups()
954 self.starta, self.lena, self.startb, self.lenb = m.groups()
909 if self.lena is None:
955 if self.lena is None:
910 self.lena = 1
956 self.lena = 1
911 else:
957 else:
912 self.lena = int(self.lena)
958 self.lena = int(self.lena)
913 if self.lenb is None:
959 if self.lenb is None:
914 self.lenb = 1
960 self.lenb = 1
915 else:
961 else:
916 self.lenb = int(self.lenb)
962 self.lenb = int(self.lenb)
917 self.starta = int(self.starta)
963 self.starta = int(self.starta)
918 self.startb = int(self.startb)
964 self.startb = int(self.startb)
919 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
965 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
920 self.b)
966 self.b)
921 # if we hit eof before finishing out the hunk, the last line will
967 # if we hit eof before finishing out the hunk, the last line will
922 # be zero length. Lets try to fix it up.
968 # be zero length. Lets try to fix it up.
923 while len(self.hunk[-1]) == 0:
969 while len(self.hunk[-1]) == 0:
924 del self.hunk[-1]
970 del self.hunk[-1]
925 del self.a[-1]
971 del self.a[-1]
926 del self.b[-1]
972 del self.b[-1]
927 self.lena -= 1
973 self.lena -= 1
928 self.lenb -= 1
974 self.lenb -= 1
929 self._fixnewline(lr)
975 self._fixnewline(lr)
930
976
931 def read_context_hunk(self, lr):
977 def read_context_hunk(self, lr):
932 self.desc = lr.readline()
978 self.desc = lr.readline()
933 m = contextdesc.match(self.desc)
979 m = contextdesc.match(self.desc)
934 if not m:
980 if not m:
935 raise PatchError(_("bad hunk #%d") % self.number)
981 raise PatchError(_("bad hunk #%d") % self.number)
936 self.starta, aend = m.groups()
982 self.starta, aend = m.groups()
937 self.starta = int(self.starta)
983 self.starta = int(self.starta)
938 if aend is None:
984 if aend is None:
939 aend = self.starta
985 aend = self.starta
940 self.lena = int(aend) - self.starta
986 self.lena = int(aend) - self.starta
941 if self.starta:
987 if self.starta:
942 self.lena += 1
988 self.lena += 1
943 for x in xrange(self.lena):
989 for x in xrange(self.lena):
944 l = lr.readline()
990 l = lr.readline()
945 if l.startswith('---'):
991 if l.startswith('---'):
946 # lines addition, old block is empty
992 # lines addition, old block is empty
947 lr.push(l)
993 lr.push(l)
948 break
994 break
949 s = l[2:]
995 s = l[2:]
950 if l.startswith('- ') or l.startswith('! '):
996 if l.startswith('- ') or l.startswith('! '):
951 u = '-' + s
997 u = '-' + s
952 elif l.startswith(' '):
998 elif l.startswith(' '):
953 u = ' ' + s
999 u = ' ' + s
954 else:
1000 else:
955 raise PatchError(_("bad hunk #%d old text line %d") %
1001 raise PatchError(_("bad hunk #%d old text line %d") %
956 (self.number, x))
1002 (self.number, x))
957 self.a.append(u)
1003 self.a.append(u)
958 self.hunk.append(u)
1004 self.hunk.append(u)
959
1005
960 l = lr.readline()
1006 l = lr.readline()
961 if l.startswith('\ '):
1007 if l.startswith('\ '):
962 s = self.a[-1][:-1]
1008 s = self.a[-1][:-1]
963 self.a[-1] = s
1009 self.a[-1] = s
964 self.hunk[-1] = s
1010 self.hunk[-1] = s
965 l = lr.readline()
1011 l = lr.readline()
966 m = contextdesc.match(l)
1012 m = contextdesc.match(l)
967 if not m:
1013 if not m:
968 raise PatchError(_("bad hunk #%d") % self.number)
1014 raise PatchError(_("bad hunk #%d") % self.number)
969 self.startb, bend = m.groups()
1015 self.startb, bend = m.groups()
970 self.startb = int(self.startb)
1016 self.startb = int(self.startb)
971 if bend is None:
1017 if bend is None:
972 bend = self.startb
1018 bend = self.startb
973 self.lenb = int(bend) - self.startb
1019 self.lenb = int(bend) - self.startb
974 if self.startb:
1020 if self.startb:
975 self.lenb += 1
1021 self.lenb += 1
976 hunki = 1
1022 hunki = 1
977 for x in xrange(self.lenb):
1023 for x in xrange(self.lenb):
978 l = lr.readline()
1024 l = lr.readline()
979 if l.startswith('\ '):
1025 if l.startswith('\ '):
980 # XXX: the only way to hit this is with an invalid line range.
1026 # XXX: the only way to hit this is with an invalid line range.
981 # The no-eol marker is not counted in the line range, but I
1027 # The no-eol marker is not counted in the line range, but I
982 # guess there are diff(1) out there which behave differently.
1028 # guess there are diff(1) out there which behave differently.
983 s = self.b[-1][:-1]
1029 s = self.b[-1][:-1]
984 self.b[-1] = s
1030 self.b[-1] = s
985 self.hunk[hunki - 1] = s
1031 self.hunk[hunki - 1] = s
986 continue
1032 continue
987 if not l:
1033 if not l:
988 # line deletions, new block is empty and we hit EOF
1034 # line deletions, new block is empty and we hit EOF
989 lr.push(l)
1035 lr.push(l)
990 break
1036 break
991 s = l[2:]
1037 s = l[2:]
992 if l.startswith('+ ') or l.startswith('! '):
1038 if l.startswith('+ ') or l.startswith('! '):
993 u = '+' + s
1039 u = '+' + s
994 elif l.startswith(' '):
1040 elif l.startswith(' '):
995 u = ' ' + s
1041 u = ' ' + s
996 elif len(self.b) == 0:
1042 elif len(self.b) == 0:
997 # line deletions, new block is empty
1043 # line deletions, new block is empty
998 lr.push(l)
1044 lr.push(l)
999 break
1045 break
1000 else:
1046 else:
1001 raise PatchError(_("bad hunk #%d old text line %d") %
1047 raise PatchError(_("bad hunk #%d old text line %d") %
1002 (self.number, x))
1048 (self.number, x))
1003 self.b.append(s)
1049 self.b.append(s)
1004 while True:
1050 while True:
1005 if hunki >= len(self.hunk):
1051 if hunki >= len(self.hunk):
1006 h = ""
1052 h = ""
1007 else:
1053 else:
1008 h = self.hunk[hunki]
1054 h = self.hunk[hunki]
1009 hunki += 1
1055 hunki += 1
1010 if h == u:
1056 if h == u:
1011 break
1057 break
1012 elif h.startswith('-'):
1058 elif h.startswith('-'):
1013 continue
1059 continue
1014 else:
1060 else:
1015 self.hunk.insert(hunki - 1, u)
1061 self.hunk.insert(hunki - 1, u)
1016 break
1062 break
1017
1063
1018 if not self.a:
1064 if not self.a:
1019 # this happens when lines were only added to the hunk
1065 # this happens when lines were only added to the hunk
1020 for x in self.hunk:
1066 for x in self.hunk:
1021 if x.startswith('-') or x.startswith(' '):
1067 if x.startswith('-') or x.startswith(' '):
1022 self.a.append(x)
1068 self.a.append(x)
1023 if not self.b:
1069 if not self.b:
1024 # this happens when lines were only deleted from the hunk
1070 # this happens when lines were only deleted from the hunk
1025 for x in self.hunk:
1071 for x in self.hunk:
1026 if x.startswith('+') or x.startswith(' '):
1072 if x.startswith('+') or x.startswith(' '):
1027 self.b.append(x[1:])
1073 self.b.append(x[1:])
1028 # @@ -start,len +start,len @@
1074 # @@ -start,len +start,len @@
1029 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1075 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1030 self.startb, self.lenb)
1076 self.startb, self.lenb)
1031 self.hunk[0] = self.desc
1077 self.hunk[0] = self.desc
1032 self._fixnewline(lr)
1078 self._fixnewline(lr)
1033
1079
1034 def _fixnewline(self, lr):
1080 def _fixnewline(self, lr):
1035 l = lr.readline()
1081 l = lr.readline()
1036 if l.startswith('\ '):
1082 if l.startswith('\ '):
1037 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1083 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1038 else:
1084 else:
1039 lr.push(l)
1085 lr.push(l)
1040
1086
1041 def complete(self):
1087 def complete(self):
1042 return len(self.a) == self.lena and len(self.b) == self.lenb
1088 return len(self.a) == self.lena and len(self.b) == self.lenb
1043
1089
1044 def _fuzzit(self, old, new, fuzz, toponly):
1090 def _fuzzit(self, old, new, fuzz, toponly):
1045 # this removes context lines from the top and bottom of list 'l'. It
1091 # this removes context lines from the top and bottom of list 'l'. It
1046 # checks the hunk to make sure only context lines are removed, and then
1092 # checks the hunk to make sure only context lines are removed, and then
1047 # returns a new shortened list of lines.
1093 # returns a new shortened list of lines.
1048 fuzz = min(fuzz, len(old))
1094 fuzz = min(fuzz, len(old))
1049 if fuzz:
1095 if fuzz:
1050 top = 0
1096 top = 0
1051 bot = 0
1097 bot = 0
1052 hlen = len(self.hunk)
1098 hlen = len(self.hunk)
1053 for x in xrange(hlen - 1):
1099 for x in xrange(hlen - 1):
1054 # the hunk starts with the @@ line, so use x+1
1100 # the hunk starts with the @@ line, so use x+1
1055 if self.hunk[x + 1][0] == ' ':
1101 if self.hunk[x + 1][0] == ' ':
1056 top += 1
1102 top += 1
1057 else:
1103 else:
1058 break
1104 break
1059 if not toponly:
1105 if not toponly:
1060 for x in xrange(hlen - 1):
1106 for x in xrange(hlen - 1):
1061 if self.hunk[hlen - bot - 1][0] == ' ':
1107 if self.hunk[hlen - bot - 1][0] == ' ':
1062 bot += 1
1108 bot += 1
1063 else:
1109 else:
1064 break
1110 break
1065
1111
1066 bot = min(fuzz, bot)
1112 bot = min(fuzz, bot)
1067 top = min(fuzz, top)
1113 top = min(fuzz, top)
1068 return old[top:len(old) - bot], new[top:len(new) - bot], top
1114 return old[top:len(old) - bot], new[top:len(new) - bot], top
1069 return old, new, 0
1115 return old, new, 0
1070
1116
1071 def fuzzit(self, fuzz, toponly):
1117 def fuzzit(self, fuzz, toponly):
1072 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1118 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1073 oldstart = self.starta + top
1119 oldstart = self.starta + top
1074 newstart = self.startb + top
1120 newstart = self.startb + top
1075 # zero length hunk ranges already have their start decremented
1121 # zero length hunk ranges already have their start decremented
1076 if self.lena and oldstart > 0:
1122 if self.lena and oldstart > 0:
1077 oldstart -= 1
1123 oldstart -= 1
1078 if self.lenb and newstart > 0:
1124 if self.lenb and newstart > 0:
1079 newstart -= 1
1125 newstart -= 1
1080 return old, oldstart, new, newstart
1126 return old, oldstart, new, newstart
1081
1127
1082 class binhunk(object):
1128 class binhunk(object):
1083 'A binary patch file.'
1129 'A binary patch file.'
1084 def __init__(self, lr, fname):
1130 def __init__(self, lr, fname):
1085 self.text = None
1131 self.text = None
1086 self.delta = False
1132 self.delta = False
1087 self.hunk = ['GIT binary patch\n']
1133 self.hunk = ['GIT binary patch\n']
1088 self._fname = fname
1134 self._fname = fname
1089 self._read(lr)
1135 self._read(lr)
1090
1136
1091 def complete(self):
1137 def complete(self):
1092 return self.text is not None
1138 return self.text is not None
1093
1139
1094 def new(self, lines):
1140 def new(self, lines):
1095 if self.delta:
1141 if self.delta:
1096 return [applybindelta(self.text, ''.join(lines))]
1142 return [applybindelta(self.text, ''.join(lines))]
1097 return [self.text]
1143 return [self.text]
1098
1144
1099 def _read(self, lr):
1145 def _read(self, lr):
1100 def getline(lr, hunk):
1146 def getline(lr, hunk):
1101 l = lr.readline()
1147 l = lr.readline()
1102 hunk.append(l)
1148 hunk.append(l)
1103 return l.rstrip('\r\n')
1149 return l.rstrip('\r\n')
1104
1150
1105 size = 0
1151 size = 0
1106 while True:
1152 while True:
1107 line = getline(lr, self.hunk)
1153 line = getline(lr, self.hunk)
1108 if not line:
1154 if not line:
1109 raise PatchError(_('could not extract "%s" binary data')
1155 raise PatchError(_('could not extract "%s" binary data')
1110 % self._fname)
1156 % self._fname)
1111 if line.startswith('literal '):
1157 if line.startswith('literal '):
1112 size = int(line[8:].rstrip())
1158 size = int(line[8:].rstrip())
1113 break
1159 break
1114 if line.startswith('delta '):
1160 if line.startswith('delta '):
1115 size = int(line[6:].rstrip())
1161 size = int(line[6:].rstrip())
1116 self.delta = True
1162 self.delta = True
1117 break
1163 break
1118 dec = []
1164 dec = []
1119 line = getline(lr, self.hunk)
1165 line = getline(lr, self.hunk)
1120 while len(line) > 1:
1166 while len(line) > 1:
1121 l = line[0]
1167 l = line[0]
1122 if l <= 'Z' and l >= 'A':
1168 if l <= 'Z' and l >= 'A':
1123 l = ord(l) - ord('A') + 1
1169 l = ord(l) - ord('A') + 1
1124 else:
1170 else:
1125 l = ord(l) - ord('a') + 27
1171 l = ord(l) - ord('a') + 27
1126 try:
1172 try:
1127 dec.append(base85.b85decode(line[1:])[:l])
1173 dec.append(base85.b85decode(line[1:])[:l])
1128 except ValueError, e:
1174 except ValueError, e:
1129 raise PatchError(_('could not decode "%s" binary patch: %s')
1175 raise PatchError(_('could not decode "%s" binary patch: %s')
1130 % (self._fname, str(e)))
1176 % (self._fname, str(e)))
1131 line = getline(lr, self.hunk)
1177 line = getline(lr, self.hunk)
1132 text = zlib.decompress(''.join(dec))
1178 text = zlib.decompress(''.join(dec))
1133 if len(text) != size:
1179 if len(text) != size:
1134 raise PatchError(_('"%s" length is %d bytes, should be %d')
1180 raise PatchError(_('"%s" length is %d bytes, should be %d')
1135 % (self._fname, len(text), size))
1181 % (self._fname, len(text), size))
1136 self.text = text
1182 self.text = text
1137
1183
1138 def parsefilename(str):
1184 def parsefilename(str):
1139 # --- filename \t|space stuff
1185 # --- filename \t|space stuff
1140 s = str[4:].rstrip('\r\n')
1186 s = str[4:].rstrip('\r\n')
1141 i = s.find('\t')
1187 i = s.find('\t')
1142 if i < 0:
1188 if i < 0:
1143 i = s.find(' ')
1189 i = s.find(' ')
1144 if i < 0:
1190 if i < 0:
1145 return s
1191 return s
1146 return s[:i]
1192 return s[:i]
1147
1193
1148 def pathtransform(path, strip, prefix):
1194 def pathtransform(path, strip, prefix):
1149 '''turn a path from a patch into a path suitable for the repository
1195 '''turn a path from a patch into a path suitable for the repository
1150
1196
1151 prefix, if not empty, is expected to be normalized with a / at the end.
1197 prefix, if not empty, is expected to be normalized with a / at the end.
1152
1198
1153 Returns (stripped components, path in repository).
1199 Returns (stripped components, path in repository).
1154
1200
1155 >>> pathtransform('a/b/c', 0, '')
1201 >>> pathtransform('a/b/c', 0, '')
1156 ('', 'a/b/c')
1202 ('', 'a/b/c')
1157 >>> pathtransform(' a/b/c ', 0, '')
1203 >>> pathtransform(' a/b/c ', 0, '')
1158 ('', ' a/b/c')
1204 ('', ' a/b/c')
1159 >>> pathtransform(' a/b/c ', 2, '')
1205 >>> pathtransform(' a/b/c ', 2, '')
1160 ('a/b/', 'c')
1206 ('a/b/', 'c')
1161 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1207 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1162 ('a//b/', 'd/e/c')
1208 ('a//b/', 'd/e/c')
1163 >>> pathtransform('a/b/c', 3, '')
1209 >>> pathtransform('a/b/c', 3, '')
1164 Traceback (most recent call last):
1210 Traceback (most recent call last):
1165 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1211 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1166 '''
1212 '''
1167 pathlen = len(path)
1213 pathlen = len(path)
1168 i = 0
1214 i = 0
1169 if strip == 0:
1215 if strip == 0:
1170 return '', path.rstrip()
1216 return '', path.rstrip()
1171 count = strip
1217 count = strip
1172 while count > 0:
1218 while count > 0:
1173 i = path.find('/', i)
1219 i = path.find('/', i)
1174 if i == -1:
1220 if i == -1:
1175 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1221 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1176 (count, strip, path))
1222 (count, strip, path))
1177 i += 1
1223 i += 1
1178 # consume '//' in the path
1224 # consume '//' in the path
1179 while i < pathlen - 1 and path[i] == '/':
1225 while i < pathlen - 1 and path[i] == '/':
1180 i += 1
1226 i += 1
1181 count -= 1
1227 count -= 1
1182 return path[:i].lstrip(), prefix + path[i:].rstrip()
1228 return path[:i].lstrip(), prefix + path[i:].rstrip()
1183
1229
1184 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1230 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1185 nulla = afile_orig == "/dev/null"
1231 nulla = afile_orig == "/dev/null"
1186 nullb = bfile_orig == "/dev/null"
1232 nullb = bfile_orig == "/dev/null"
1187 create = nulla and hunk.starta == 0 and hunk.lena == 0
1233 create = nulla and hunk.starta == 0 and hunk.lena == 0
1188 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1234 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1189 abase, afile = pathtransform(afile_orig, strip, prefix)
1235 abase, afile = pathtransform(afile_orig, strip, prefix)
1190 gooda = not nulla and backend.exists(afile)
1236 gooda = not nulla and backend.exists(afile)
1191 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1237 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1192 if afile == bfile:
1238 if afile == bfile:
1193 goodb = gooda
1239 goodb = gooda
1194 else:
1240 else:
1195 goodb = not nullb and backend.exists(bfile)
1241 goodb = not nullb and backend.exists(bfile)
1196 missing = not goodb and not gooda and not create
1242 missing = not goodb and not gooda and not create
1197
1243
1198 # some diff programs apparently produce patches where the afile is
1244 # some diff programs apparently produce patches where the afile is
1199 # not /dev/null, but afile starts with bfile
1245 # not /dev/null, but afile starts with bfile
1200 abasedir = afile[:afile.rfind('/') + 1]
1246 abasedir = afile[:afile.rfind('/') + 1]
1201 bbasedir = bfile[:bfile.rfind('/') + 1]
1247 bbasedir = bfile[:bfile.rfind('/') + 1]
1202 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1248 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1203 and hunk.starta == 0 and hunk.lena == 0):
1249 and hunk.starta == 0 and hunk.lena == 0):
1204 create = True
1250 create = True
1205 missing = False
1251 missing = False
1206
1252
1207 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1253 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1208 # diff is between a file and its backup. In this case, the original
1254 # diff is between a file and its backup. In this case, the original
1209 # file should be patched (see original mpatch code).
1255 # file should be patched (see original mpatch code).
1210 isbackup = (abase == bbase and bfile.startswith(afile))
1256 isbackup = (abase == bbase and bfile.startswith(afile))
1211 fname = None
1257 fname = None
1212 if not missing:
1258 if not missing:
1213 if gooda and goodb:
1259 if gooda and goodb:
1214 fname = isbackup and afile or bfile
1260 fname = isbackup and afile or bfile
1215 elif gooda:
1261 elif gooda:
1216 fname = afile
1262 fname = afile
1217
1263
1218 if not fname:
1264 if not fname:
1219 if not nullb:
1265 if not nullb:
1220 fname = isbackup and afile or bfile
1266 fname = isbackup and afile or bfile
1221 elif not nulla:
1267 elif not nulla:
1222 fname = afile
1268 fname = afile
1223 else:
1269 else:
1224 raise PatchError(_("undefined source and destination files"))
1270 raise PatchError(_("undefined source and destination files"))
1225
1271
1226 gp = patchmeta(fname)
1272 gp = patchmeta(fname)
1227 if create:
1273 if create:
1228 gp.op = 'ADD'
1274 gp.op = 'ADD'
1229 elif remove:
1275 elif remove:
1230 gp.op = 'DELETE'
1276 gp.op = 'DELETE'
1231 return gp
1277 return gp
1232
1278
1233 def scangitpatch(lr, firstline):
1279 def scangitpatch(lr, firstline):
1234 """
1280 """
1235 Git patches can emit:
1281 Git patches can emit:
1236 - rename a to b
1282 - rename a to b
1237 - change b
1283 - change b
1238 - copy a to c
1284 - copy a to c
1239 - change c
1285 - change c
1240
1286
1241 We cannot apply this sequence as-is, the renamed 'a' could not be
1287 We cannot apply this sequence as-is, the renamed 'a' could not be
1242 found for it would have been renamed already. And we cannot copy
1288 found for it would have been renamed already. And we cannot copy
1243 from 'b' instead because 'b' would have been changed already. So
1289 from 'b' instead because 'b' would have been changed already. So
1244 we scan the git patch for copy and rename commands so we can
1290 we scan the git patch for copy and rename commands so we can
1245 perform the copies ahead of time.
1291 perform the copies ahead of time.
1246 """
1292 """
1247 pos = 0
1293 pos = 0
1248 try:
1294 try:
1249 pos = lr.fp.tell()
1295 pos = lr.fp.tell()
1250 fp = lr.fp
1296 fp = lr.fp
1251 except IOError:
1297 except IOError:
1252 fp = cStringIO.StringIO(lr.fp.read())
1298 fp = cStringIO.StringIO(lr.fp.read())
1253 gitlr = linereader(fp)
1299 gitlr = linereader(fp)
1254 gitlr.push(firstline)
1300 gitlr.push(firstline)
1255 gitpatches = readgitpatch(gitlr)
1301 gitpatches = readgitpatch(gitlr)
1256 fp.seek(pos)
1302 fp.seek(pos)
1257 return gitpatches
1303 return gitpatches
1258
1304
1259 def iterhunks(fp):
1305 def iterhunks(fp):
1260 """Read a patch and yield the following events:
1306 """Read a patch and yield the following events:
1261 - ("file", afile, bfile, firsthunk): select a new target file.
1307 - ("file", afile, bfile, firsthunk): select a new target file.
1262 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1308 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1263 "file" event.
1309 "file" event.
1264 - ("git", gitchanges): current diff is in git format, gitchanges
1310 - ("git", gitchanges): current diff is in git format, gitchanges
1265 maps filenames to gitpatch records. Unique event.
1311 maps filenames to gitpatch records. Unique event.
1266 """
1312 """
1267 afile = ""
1313 afile = ""
1268 bfile = ""
1314 bfile = ""
1269 state = None
1315 state = None
1270 hunknum = 0
1316 hunknum = 0
1271 emitfile = newfile = False
1317 emitfile = newfile = False
1272 gitpatches = None
1318 gitpatches = None
1273
1319
1274 # our states
1320 # our states
1275 BFILE = 1
1321 BFILE = 1
1276 context = None
1322 context = None
1277 lr = linereader(fp)
1323 lr = linereader(fp)
1278
1324
1279 while True:
1325 while True:
1280 x = lr.readline()
1326 x = lr.readline()
1281 if not x:
1327 if not x:
1282 break
1328 break
1283 if state == BFILE and (
1329 if state == BFILE and (
1284 (not context and x[0] == '@')
1330 (not context and x[0] == '@')
1285 or (context is not False and x.startswith('***************'))
1331 or (context is not False and x.startswith('***************'))
1286 or x.startswith('GIT binary patch')):
1332 or x.startswith('GIT binary patch')):
1287 gp = None
1333 gp = None
1288 if (gitpatches and
1334 if (gitpatches and
1289 gitpatches[-1].ispatching(afile, bfile)):
1335 gitpatches[-1].ispatching(afile, bfile)):
1290 gp = gitpatches.pop()
1336 gp = gitpatches.pop()
1291 if x.startswith('GIT binary patch'):
1337 if x.startswith('GIT binary patch'):
1292 h = binhunk(lr, gp.path)
1338 h = binhunk(lr, gp.path)
1293 else:
1339 else:
1294 if context is None and x.startswith('***************'):
1340 if context is None and x.startswith('***************'):
1295 context = True
1341 context = True
1296 h = hunk(x, hunknum + 1, lr, context)
1342 h = hunk(x, hunknum + 1, lr, context)
1297 hunknum += 1
1343 hunknum += 1
1298 if emitfile:
1344 if emitfile:
1299 emitfile = False
1345 emitfile = False
1300 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1346 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1301 yield 'hunk', h
1347 yield 'hunk', h
1302 elif x.startswith('diff --git a/'):
1348 elif x.startswith('diff --git a/'):
1303 m = gitre.match(x.rstrip(' \r\n'))
1349 m = gitre.match(x.rstrip(' \r\n'))
1304 if not m:
1350 if not m:
1305 continue
1351 continue
1306 if gitpatches is None:
1352 if gitpatches is None:
1307 # scan whole input for git metadata
1353 # scan whole input for git metadata
1308 gitpatches = scangitpatch(lr, x)
1354 gitpatches = scangitpatch(lr, x)
1309 yield 'git', [g.copy() for g in gitpatches
1355 yield 'git', [g.copy() for g in gitpatches
1310 if g.op in ('COPY', 'RENAME')]
1356 if g.op in ('COPY', 'RENAME')]
1311 gitpatches.reverse()
1357 gitpatches.reverse()
1312 afile = 'a/' + m.group(1)
1358 afile = 'a/' + m.group(1)
1313 bfile = 'b/' + m.group(2)
1359 bfile = 'b/' + m.group(2)
1314 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1360 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1315 gp = gitpatches.pop()
1361 gp = gitpatches.pop()
1316 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1362 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1317 if not gitpatches:
1363 if not gitpatches:
1318 raise PatchError(_('failed to synchronize metadata for "%s"')
1364 raise PatchError(_('failed to synchronize metadata for "%s"')
1319 % afile[2:])
1365 % afile[2:])
1320 gp = gitpatches[-1]
1366 gp = gitpatches[-1]
1321 newfile = True
1367 newfile = True
1322 elif x.startswith('---'):
1368 elif x.startswith('---'):
1323 # check for a unified diff
1369 # check for a unified diff
1324 l2 = lr.readline()
1370 l2 = lr.readline()
1325 if not l2.startswith('+++'):
1371 if not l2.startswith('+++'):
1326 lr.push(l2)
1372 lr.push(l2)
1327 continue
1373 continue
1328 newfile = True
1374 newfile = True
1329 context = False
1375 context = False
1330 afile = parsefilename(x)
1376 afile = parsefilename(x)
1331 bfile = parsefilename(l2)
1377 bfile = parsefilename(l2)
1332 elif x.startswith('***'):
1378 elif x.startswith('***'):
1333 # check for a context diff
1379 # check for a context diff
1334 l2 = lr.readline()
1380 l2 = lr.readline()
1335 if not l2.startswith('---'):
1381 if not l2.startswith('---'):
1336 lr.push(l2)
1382 lr.push(l2)
1337 continue
1383 continue
1338 l3 = lr.readline()
1384 l3 = lr.readline()
1339 lr.push(l3)
1385 lr.push(l3)
1340 if not l3.startswith("***************"):
1386 if not l3.startswith("***************"):
1341 lr.push(l2)
1387 lr.push(l2)
1342 continue
1388 continue
1343 newfile = True
1389 newfile = True
1344 context = True
1390 context = True
1345 afile = parsefilename(x)
1391 afile = parsefilename(x)
1346 bfile = parsefilename(l2)
1392 bfile = parsefilename(l2)
1347
1393
1348 if newfile:
1394 if newfile:
1349 newfile = False
1395 newfile = False
1350 emitfile = True
1396 emitfile = True
1351 state = BFILE
1397 state = BFILE
1352 hunknum = 0
1398 hunknum = 0
1353
1399
1354 while gitpatches:
1400 while gitpatches:
1355 gp = gitpatches.pop()
1401 gp = gitpatches.pop()
1356 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1402 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1357
1403
1358 def applybindelta(binchunk, data):
1404 def applybindelta(binchunk, data):
1359 """Apply a binary delta hunk
1405 """Apply a binary delta hunk
1360 The algorithm used is the algorithm from git's patch-delta.c
1406 The algorithm used is the algorithm from git's patch-delta.c
1361 """
1407 """
1362 def deltahead(binchunk):
1408 def deltahead(binchunk):
1363 i = 0
1409 i = 0
1364 for c in binchunk:
1410 for c in binchunk:
1365 i += 1
1411 i += 1
1366 if not (ord(c) & 0x80):
1412 if not (ord(c) & 0x80):
1367 return i
1413 return i
1368 return i
1414 return i
1369 out = ""
1415 out = ""
1370 s = deltahead(binchunk)
1416 s = deltahead(binchunk)
1371 binchunk = binchunk[s:]
1417 binchunk = binchunk[s:]
1372 s = deltahead(binchunk)
1418 s = deltahead(binchunk)
1373 binchunk = binchunk[s:]
1419 binchunk = binchunk[s:]
1374 i = 0
1420 i = 0
1375 while i < len(binchunk):
1421 while i < len(binchunk):
1376 cmd = ord(binchunk[i])
1422 cmd = ord(binchunk[i])
1377 i += 1
1423 i += 1
1378 if (cmd & 0x80):
1424 if (cmd & 0x80):
1379 offset = 0
1425 offset = 0
1380 size = 0
1426 size = 0
1381 if (cmd & 0x01):
1427 if (cmd & 0x01):
1382 offset = ord(binchunk[i])
1428 offset = ord(binchunk[i])
1383 i += 1
1429 i += 1
1384 if (cmd & 0x02):
1430 if (cmd & 0x02):
1385 offset |= ord(binchunk[i]) << 8
1431 offset |= ord(binchunk[i]) << 8
1386 i += 1
1432 i += 1
1387 if (cmd & 0x04):
1433 if (cmd & 0x04):
1388 offset |= ord(binchunk[i]) << 16
1434 offset |= ord(binchunk[i]) << 16
1389 i += 1
1435 i += 1
1390 if (cmd & 0x08):
1436 if (cmd & 0x08):
1391 offset |= ord(binchunk[i]) << 24
1437 offset |= ord(binchunk[i]) << 24
1392 i += 1
1438 i += 1
1393 if (cmd & 0x10):
1439 if (cmd & 0x10):
1394 size = ord(binchunk[i])
1440 size = ord(binchunk[i])
1395 i += 1
1441 i += 1
1396 if (cmd & 0x20):
1442 if (cmd & 0x20):
1397 size |= ord(binchunk[i]) << 8
1443 size |= ord(binchunk[i]) << 8
1398 i += 1
1444 i += 1
1399 if (cmd & 0x40):
1445 if (cmd & 0x40):
1400 size |= ord(binchunk[i]) << 16
1446 size |= ord(binchunk[i]) << 16
1401 i += 1
1447 i += 1
1402 if size == 0:
1448 if size == 0:
1403 size = 0x10000
1449 size = 0x10000
1404 offset_end = offset + size
1450 offset_end = offset + size
1405 out += data[offset:offset_end]
1451 out += data[offset:offset_end]
1406 elif cmd != 0:
1452 elif cmd != 0:
1407 offset_end = i + cmd
1453 offset_end = i + cmd
1408 out += binchunk[i:offset_end]
1454 out += binchunk[i:offset_end]
1409 i += cmd
1455 i += cmd
1410 else:
1456 else:
1411 raise PatchError(_('unexpected delta opcode 0'))
1457 raise PatchError(_('unexpected delta opcode 0'))
1412 return out
1458 return out
1413
1459
1414 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1460 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1415 """Reads a patch from fp and tries to apply it.
1461 """Reads a patch from fp and tries to apply it.
1416
1462
1417 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1463 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1418 there was any fuzz.
1464 there was any fuzz.
1419
1465
1420 If 'eolmode' is 'strict', the patch content and patched file are
1466 If 'eolmode' is 'strict', the patch content and patched file are
1421 read in binary mode. Otherwise, line endings are ignored when
1467 read in binary mode. Otherwise, line endings are ignored when
1422 patching then normalized according to 'eolmode'.
1468 patching then normalized according to 'eolmode'.
1423 """
1469 """
1424 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1470 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1425 prefix=prefix, eolmode=eolmode)
1471 prefix=prefix, eolmode=eolmode)
1426
1472
1427 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1473 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1428 eolmode='strict'):
1474 eolmode='strict'):
1429
1475
1430 if prefix:
1476 if prefix:
1431 # clean up double slashes, lack of trailing slashes, etc
1477 # clean up double slashes, lack of trailing slashes, etc
1432 prefix = util.normpath(prefix) + '/'
1478 prefix = util.normpath(prefix) + '/'
1433 def pstrip(p):
1479 def pstrip(p):
1434 return pathtransform(p, strip - 1, prefix)[1]
1480 return pathtransform(p, strip - 1, prefix)[1]
1435
1481
1436 rejects = 0
1482 rejects = 0
1437 err = 0
1483 err = 0
1438 current_file = None
1484 current_file = None
1439
1485
1440 for state, values in iterhunks(fp):
1486 for state, values in iterhunks(fp):
1441 if state == 'hunk':
1487 if state == 'hunk':
1442 if not current_file:
1488 if not current_file:
1443 continue
1489 continue
1444 ret = current_file.apply(values)
1490 ret = current_file.apply(values)
1445 if ret > 0:
1491 if ret > 0:
1446 err = 1
1492 err = 1
1447 elif state == 'file':
1493 elif state == 'file':
1448 if current_file:
1494 if current_file:
1449 rejects += current_file.close()
1495 rejects += current_file.close()
1450 current_file = None
1496 current_file = None
1451 afile, bfile, first_hunk, gp = values
1497 afile, bfile, first_hunk, gp = values
1452 if gp:
1498 if gp:
1453 gp.path = pstrip(gp.path)
1499 gp.path = pstrip(gp.path)
1454 if gp.oldpath:
1500 if gp.oldpath:
1455 gp.oldpath = pstrip(gp.oldpath)
1501 gp.oldpath = pstrip(gp.oldpath)
1456 else:
1502 else:
1457 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1503 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1458 prefix)
1504 prefix)
1459 if gp.op == 'RENAME':
1505 if gp.op == 'RENAME':
1460 backend.unlink(gp.oldpath)
1506 backend.unlink(gp.oldpath)
1461 if not first_hunk:
1507 if not first_hunk:
1462 if gp.op == 'DELETE':
1508 if gp.op == 'DELETE':
1463 backend.unlink(gp.path)
1509 backend.unlink(gp.path)
1464 continue
1510 continue
1465 data, mode = None, None
1511 data, mode = None, None
1466 if gp.op in ('RENAME', 'COPY'):
1512 if gp.op in ('RENAME', 'COPY'):
1467 data, mode = store.getfile(gp.oldpath)[:2]
1513 data, mode = store.getfile(gp.oldpath)[:2]
1468 # FIXME: failing getfile has never been handled here
1514 # FIXME: failing getfile has never been handled here
1469 assert data is not None
1515 assert data is not None
1470 if gp.mode:
1516 if gp.mode:
1471 mode = gp.mode
1517 mode = gp.mode
1472 if gp.op == 'ADD':
1518 if gp.op == 'ADD':
1473 # Added files without content have no hunk and
1519 # Added files without content have no hunk and
1474 # must be created
1520 # must be created
1475 data = ''
1521 data = ''
1476 if data or mode:
1522 if data or mode:
1477 if (gp.op in ('ADD', 'RENAME', 'COPY')
1523 if (gp.op in ('ADD', 'RENAME', 'COPY')
1478 and backend.exists(gp.path)):
1524 and backend.exists(gp.path)):
1479 raise PatchError(_("cannot create %s: destination "
1525 raise PatchError(_("cannot create %s: destination "
1480 "already exists") % gp.path)
1526 "already exists") % gp.path)
1481 backend.setfile(gp.path, data, mode, gp.oldpath)
1527 backend.setfile(gp.path, data, mode, gp.oldpath)
1482 continue
1528 continue
1483 try:
1529 try:
1484 current_file = patcher(ui, gp, backend, store,
1530 current_file = patcher(ui, gp, backend, store,
1485 eolmode=eolmode)
1531 eolmode=eolmode)
1486 except PatchError, inst:
1532 except PatchError, inst:
1487 ui.warn(str(inst) + '\n')
1533 ui.warn(str(inst) + '\n')
1488 current_file = None
1534 current_file = None
1489 rejects += 1
1535 rejects += 1
1490 continue
1536 continue
1491 elif state == 'git':
1537 elif state == 'git':
1492 for gp in values:
1538 for gp in values:
1493 path = pstrip(gp.oldpath)
1539 path = pstrip(gp.oldpath)
1494 data, mode = backend.getfile(path)
1540 data, mode = backend.getfile(path)
1495 if data is None:
1541 if data is None:
1496 # The error ignored here will trigger a getfile()
1542 # The error ignored here will trigger a getfile()
1497 # error in a place more appropriate for error
1543 # error in a place more appropriate for error
1498 # handling, and will not interrupt the patching
1544 # handling, and will not interrupt the patching
1499 # process.
1545 # process.
1500 pass
1546 pass
1501 else:
1547 else:
1502 store.setfile(path, data, mode)
1548 store.setfile(path, data, mode)
1503 else:
1549 else:
1504 raise util.Abort(_('unsupported parser state: %s') % state)
1550 raise util.Abort(_('unsupported parser state: %s') % state)
1505
1551
1506 if current_file:
1552 if current_file:
1507 rejects += current_file.close()
1553 rejects += current_file.close()
1508
1554
1509 if rejects:
1555 if rejects:
1510 return -1
1556 return -1
1511 return err
1557 return err
1512
1558
1513 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1559 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1514 similarity):
1560 similarity):
1515 """use <patcher> to apply <patchname> to the working directory.
1561 """use <patcher> to apply <patchname> to the working directory.
1516 returns whether patch was applied with fuzz factor."""
1562 returns whether patch was applied with fuzz factor."""
1517
1563
1518 fuzz = False
1564 fuzz = False
1519 args = []
1565 args = []
1520 cwd = repo.root
1566 cwd = repo.root
1521 if cwd:
1567 if cwd:
1522 args.append('-d %s' % util.shellquote(cwd))
1568 args.append('-d %s' % util.shellquote(cwd))
1523 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1569 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1524 util.shellquote(patchname)))
1570 util.shellquote(patchname)))
1525 try:
1571 try:
1526 for line in fp:
1572 for line in fp:
1527 line = line.rstrip()
1573 line = line.rstrip()
1528 ui.note(line + '\n')
1574 ui.note(line + '\n')
1529 if line.startswith('patching file '):
1575 if line.startswith('patching file '):
1530 pf = util.parsepatchoutput(line)
1576 pf = util.parsepatchoutput(line)
1531 printed_file = False
1577 printed_file = False
1532 files.add(pf)
1578 files.add(pf)
1533 elif line.find('with fuzz') >= 0:
1579 elif line.find('with fuzz') >= 0:
1534 fuzz = True
1580 fuzz = True
1535 if not printed_file:
1581 if not printed_file:
1536 ui.warn(pf + '\n')
1582 ui.warn(pf + '\n')
1537 printed_file = True
1583 printed_file = True
1538 ui.warn(line + '\n')
1584 ui.warn(line + '\n')
1539 elif line.find('saving rejects to file') >= 0:
1585 elif line.find('saving rejects to file') >= 0:
1540 ui.warn(line + '\n')
1586 ui.warn(line + '\n')
1541 elif line.find('FAILED') >= 0:
1587 elif line.find('FAILED') >= 0:
1542 if not printed_file:
1588 if not printed_file:
1543 ui.warn(pf + '\n')
1589 ui.warn(pf + '\n')
1544 printed_file = True
1590 printed_file = True
1545 ui.warn(line + '\n')
1591 ui.warn(line + '\n')
1546 finally:
1592 finally:
1547 if files:
1593 if files:
1548 scmutil.marktouched(repo, files, similarity)
1594 scmutil.marktouched(repo, files, similarity)
1549 code = fp.close()
1595 code = fp.close()
1550 if code:
1596 if code:
1551 raise PatchError(_("patch command failed: %s") %
1597 raise PatchError(_("patch command failed: %s") %
1552 util.explainexit(code)[0])
1598 util.explainexit(code)[0])
1553 return fuzz
1599 return fuzz
1554
1600
1555 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
1601 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
1556 eolmode='strict'):
1602 eolmode='strict'):
1557 if files is None:
1603 if files is None:
1558 files = set()
1604 files = set()
1559 if eolmode is None:
1605 if eolmode is None:
1560 eolmode = ui.config('patch', 'eol', 'strict')
1606 eolmode = ui.config('patch', 'eol', 'strict')
1561 if eolmode.lower() not in eolmodes:
1607 if eolmode.lower() not in eolmodes:
1562 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1608 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1563 eolmode = eolmode.lower()
1609 eolmode = eolmode.lower()
1564
1610
1565 store = filestore()
1611 store = filestore()
1566 try:
1612 try:
1567 fp = open(patchobj, 'rb')
1613 fp = open(patchobj, 'rb')
1568 except TypeError:
1614 except TypeError:
1569 fp = patchobj
1615 fp = patchobj
1570 try:
1616 try:
1571 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
1617 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
1572 eolmode=eolmode)
1618 eolmode=eolmode)
1573 finally:
1619 finally:
1574 if fp != patchobj:
1620 if fp != patchobj:
1575 fp.close()
1621 fp.close()
1576 files.update(backend.close())
1622 files.update(backend.close())
1577 store.close()
1623 store.close()
1578 if ret < 0:
1624 if ret < 0:
1579 raise PatchError(_('patch failed to apply'))
1625 raise PatchError(_('patch failed to apply'))
1580 return ret > 0
1626 return ret > 0
1581
1627
1582 def internalpatch(ui, repo, patchobj, strip, prefix, files=None,
1628 def internalpatch(ui, repo, patchobj, strip, prefix, files=None,
1583 eolmode='strict', similarity=0):
1629 eolmode='strict', similarity=0):
1584 """use builtin patch to apply <patchobj> to the working directory.
1630 """use builtin patch to apply <patchobj> to the working directory.
1585 returns whether patch was applied with fuzz factor."""
1631 returns whether patch was applied with fuzz factor."""
1586 backend = workingbackend(ui, repo, similarity)
1632 backend = workingbackend(ui, repo, similarity)
1587 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1633 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1588
1634
1589 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
1635 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
1590 eolmode='strict'):
1636 eolmode='strict'):
1591 backend = repobackend(ui, repo, ctx, store)
1637 backend = repobackend(ui, repo, ctx, store)
1592 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1638 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1593
1639
1594 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
1640 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
1595 similarity=0):
1641 similarity=0):
1596 """Apply <patchname> to the working directory.
1642 """Apply <patchname> to the working directory.
1597
1643
1598 'eolmode' specifies how end of lines should be handled. It can be:
1644 'eolmode' specifies how end of lines should be handled. It can be:
1599 - 'strict': inputs are read in binary mode, EOLs are preserved
1645 - 'strict': inputs are read in binary mode, EOLs are preserved
1600 - 'crlf': EOLs are ignored when patching and reset to CRLF
1646 - 'crlf': EOLs are ignored when patching and reset to CRLF
1601 - 'lf': EOLs are ignored when patching and reset to LF
1647 - 'lf': EOLs are ignored when patching and reset to LF
1602 - None: get it from user settings, default to 'strict'
1648 - None: get it from user settings, default to 'strict'
1603 'eolmode' is ignored when using an external patcher program.
1649 'eolmode' is ignored when using an external patcher program.
1604
1650
1605 Returns whether patch was applied with fuzz factor.
1651 Returns whether patch was applied with fuzz factor.
1606 """
1652 """
1607 patcher = ui.config('ui', 'patch')
1653 patcher = ui.config('ui', 'patch')
1608 if files is None:
1654 if files is None:
1609 files = set()
1655 files = set()
1610 if patcher:
1656 if patcher:
1611 return _externalpatch(ui, repo, patcher, patchname, strip,
1657 return _externalpatch(ui, repo, patcher, patchname, strip,
1612 files, similarity)
1658 files, similarity)
1613 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
1659 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
1614 similarity)
1660 similarity)
1615
1661
1616 def changedfiles(ui, repo, patchpath, strip=1):
1662 def changedfiles(ui, repo, patchpath, strip=1):
1617 backend = fsbackend(ui, repo.root)
1663 backend = fsbackend(ui, repo.root)
1618 fp = open(patchpath, 'rb')
1664 fp = open(patchpath, 'rb')
1619 try:
1665 try:
1620 changed = set()
1666 changed = set()
1621 for state, values in iterhunks(fp):
1667 for state, values in iterhunks(fp):
1622 if state == 'file':
1668 if state == 'file':
1623 afile, bfile, first_hunk, gp = values
1669 afile, bfile, first_hunk, gp = values
1624 if gp:
1670 if gp:
1625 gp.path = pathtransform(gp.path, strip - 1, '')[1]
1671 gp.path = pathtransform(gp.path, strip - 1, '')[1]
1626 if gp.oldpath:
1672 if gp.oldpath:
1627 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
1673 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
1628 else:
1674 else:
1629 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1675 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1630 '')
1676 '')
1631 changed.add(gp.path)
1677 changed.add(gp.path)
1632 if gp.op == 'RENAME':
1678 if gp.op == 'RENAME':
1633 changed.add(gp.oldpath)
1679 changed.add(gp.oldpath)
1634 elif state not in ('hunk', 'git'):
1680 elif state not in ('hunk', 'git'):
1635 raise util.Abort(_('unsupported parser state: %s') % state)
1681 raise util.Abort(_('unsupported parser state: %s') % state)
1636 return changed
1682 return changed
1637 finally:
1683 finally:
1638 fp.close()
1684 fp.close()
1639
1685
1640 class GitDiffRequired(Exception):
1686 class GitDiffRequired(Exception):
1641 pass
1687 pass
1642
1688
1643 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
1689 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
1644 '''return diffopts with all features supported and parsed'''
1690 '''return diffopts with all features supported and parsed'''
1645 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
1691 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
1646 git=True, whitespace=True, formatchanging=True)
1692 git=True, whitespace=True, formatchanging=True)
1647
1693
1648 diffopts = diffallopts
1694 diffopts = diffallopts
1649
1695
1650 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
1696 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
1651 whitespace=False, formatchanging=False):
1697 whitespace=False, formatchanging=False):
1652 '''return diffopts with only opted-in features parsed
1698 '''return diffopts with only opted-in features parsed
1653
1699
1654 Features:
1700 Features:
1655 - git: git-style diffs
1701 - git: git-style diffs
1656 - whitespace: whitespace options like ignoreblanklines and ignorews
1702 - whitespace: whitespace options like ignoreblanklines and ignorews
1657 - formatchanging: options that will likely break or cause correctness issues
1703 - formatchanging: options that will likely break or cause correctness issues
1658 with most diff parsers
1704 with most diff parsers
1659 '''
1705 '''
1660 def get(key, name=None, getter=ui.configbool, forceplain=None):
1706 def get(key, name=None, getter=ui.configbool, forceplain=None):
1661 if opts:
1707 if opts:
1662 v = opts.get(key)
1708 v = opts.get(key)
1663 if v:
1709 if v:
1664 return v
1710 return v
1665 if forceplain is not None and ui.plain():
1711 if forceplain is not None and ui.plain():
1666 return forceplain
1712 return forceplain
1667 return getter(section, name or key, None, untrusted=untrusted)
1713 return getter(section, name or key, None, untrusted=untrusted)
1668
1714
1669 # core options, expected to be understood by every diff parser
1715 # core options, expected to be understood by every diff parser
1670 buildopts = {
1716 buildopts = {
1671 'nodates': get('nodates'),
1717 'nodates': get('nodates'),
1672 'showfunc': get('show_function', 'showfunc'),
1718 'showfunc': get('show_function', 'showfunc'),
1673 'context': get('unified', getter=ui.config),
1719 'context': get('unified', getter=ui.config),
1674 }
1720 }
1675
1721
1676 if git:
1722 if git:
1677 buildopts['git'] = get('git')
1723 buildopts['git'] = get('git')
1678 if whitespace:
1724 if whitespace:
1679 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
1725 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
1680 buildopts['ignorewsamount'] = get('ignore_space_change',
1726 buildopts['ignorewsamount'] = get('ignore_space_change',
1681 'ignorewsamount')
1727 'ignorewsamount')
1682 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
1728 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
1683 'ignoreblanklines')
1729 'ignoreblanklines')
1684 if formatchanging:
1730 if formatchanging:
1685 buildopts['text'] = opts and opts.get('text')
1731 buildopts['text'] = opts and opts.get('text')
1686 buildopts['nobinary'] = get('nobinary')
1732 buildopts['nobinary'] = get('nobinary')
1687 buildopts['noprefix'] = get('noprefix', forceplain=False)
1733 buildopts['noprefix'] = get('noprefix', forceplain=False)
1688
1734
1689 return mdiff.diffopts(**buildopts)
1735 return mdiff.diffopts(**buildopts)
1690
1736
1691 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1737 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1692 losedatafn=None, prefix=''):
1738 losedatafn=None, prefix=''):
1693 '''yields diff of changes to files between two nodes, or node and
1739 '''yields diff of changes to files between two nodes, or node and
1694 working directory.
1740 working directory.
1695
1741
1696 if node1 is None, use first dirstate parent instead.
1742 if node1 is None, use first dirstate parent instead.
1697 if node2 is None, compare node1 with working directory.
1743 if node2 is None, compare node1 with working directory.
1698
1744
1699 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1745 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1700 every time some change cannot be represented with the current
1746 every time some change cannot be represented with the current
1701 patch format. Return False to upgrade to git patch format, True to
1747 patch format. Return False to upgrade to git patch format, True to
1702 accept the loss or raise an exception to abort the diff. It is
1748 accept the loss or raise an exception to abort the diff. It is
1703 called with the name of current file being diffed as 'fn'. If set
1749 called with the name of current file being diffed as 'fn'. If set
1704 to None, patches will always be upgraded to git format when
1750 to None, patches will always be upgraded to git format when
1705 necessary.
1751 necessary.
1706
1752
1707 prefix is a filename prefix that is prepended to all filenames on
1753 prefix is a filename prefix that is prepended to all filenames on
1708 display (used for subrepos).
1754 display (used for subrepos).
1709 '''
1755 '''
1710
1756
1711 if opts is None:
1757 if opts is None:
1712 opts = mdiff.defaultopts
1758 opts = mdiff.defaultopts
1713
1759
1714 if not node1 and not node2:
1760 if not node1 and not node2:
1715 node1 = repo.dirstate.p1()
1761 node1 = repo.dirstate.p1()
1716
1762
1717 def lrugetfilectx():
1763 def lrugetfilectx():
1718 cache = {}
1764 cache = {}
1719 order = util.deque()
1765 order = util.deque()
1720 def getfilectx(f, ctx):
1766 def getfilectx(f, ctx):
1721 fctx = ctx.filectx(f, filelog=cache.get(f))
1767 fctx = ctx.filectx(f, filelog=cache.get(f))
1722 if f not in cache:
1768 if f not in cache:
1723 if len(cache) > 20:
1769 if len(cache) > 20:
1724 del cache[order.popleft()]
1770 del cache[order.popleft()]
1725 cache[f] = fctx.filelog()
1771 cache[f] = fctx.filelog()
1726 else:
1772 else:
1727 order.remove(f)
1773 order.remove(f)
1728 order.append(f)
1774 order.append(f)
1729 return fctx
1775 return fctx
1730 return getfilectx
1776 return getfilectx
1731 getfilectx = lrugetfilectx()
1777 getfilectx = lrugetfilectx()
1732
1778
1733 ctx1 = repo[node1]
1779 ctx1 = repo[node1]
1734 ctx2 = repo[node2]
1780 ctx2 = repo[node2]
1735
1781
1736 if not changes:
1782 if not changes:
1737 changes = repo.status(ctx1, ctx2, match=match)
1783 changes = repo.status(ctx1, ctx2, match=match)
1738 modified, added, removed = changes[:3]
1784 modified, added, removed = changes[:3]
1739
1785
1740 if not modified and not added and not removed:
1786 if not modified and not added and not removed:
1741 return []
1787 return []
1742
1788
1743 hexfunc = repo.ui.debugflag and hex or short
1789 hexfunc = repo.ui.debugflag and hex or short
1744 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
1790 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
1745
1791
1746 copy = {}
1792 copy = {}
1747 if opts.git or opts.upgrade:
1793 if opts.git or opts.upgrade:
1748 copy = copies.pathcopies(ctx1, ctx2)
1794 copy = copies.pathcopies(ctx1, ctx2)
1749
1795
1750 def difffn(opts, losedata):
1796 def difffn(opts, losedata):
1751 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1797 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1752 copy, getfilectx, opts, losedata, prefix)
1798 copy, getfilectx, opts, losedata, prefix)
1753 if opts.upgrade and not opts.git:
1799 if opts.upgrade and not opts.git:
1754 try:
1800 try:
1755 def losedata(fn):
1801 def losedata(fn):
1756 if not losedatafn or not losedatafn(fn=fn):
1802 if not losedatafn or not losedatafn(fn=fn):
1757 raise GitDiffRequired
1803 raise GitDiffRequired
1758 # Buffer the whole output until we are sure it can be generated
1804 # Buffer the whole output until we are sure it can be generated
1759 return list(difffn(opts.copy(git=False), losedata))
1805 return list(difffn(opts.copy(git=False), losedata))
1760 except GitDiffRequired:
1806 except GitDiffRequired:
1761 return difffn(opts.copy(git=True), None)
1807 return difffn(opts.copy(git=True), None)
1762 else:
1808 else:
1763 return difffn(opts, None)
1809 return difffn(opts, None)
1764
1810
1765 def difflabel(func, *args, **kw):
1811 def difflabel(func, *args, **kw):
1766 '''yields 2-tuples of (output, label) based on the output of func()'''
1812 '''yields 2-tuples of (output, label) based on the output of func()'''
1767 headprefixes = [('diff', 'diff.diffline'),
1813 headprefixes = [('diff', 'diff.diffline'),
1768 ('copy', 'diff.extended'),
1814 ('copy', 'diff.extended'),
1769 ('rename', 'diff.extended'),
1815 ('rename', 'diff.extended'),
1770 ('old', 'diff.extended'),
1816 ('old', 'diff.extended'),
1771 ('new', 'diff.extended'),
1817 ('new', 'diff.extended'),
1772 ('deleted', 'diff.extended'),
1818 ('deleted', 'diff.extended'),
1773 ('---', 'diff.file_a'),
1819 ('---', 'diff.file_a'),
1774 ('+++', 'diff.file_b')]
1820 ('+++', 'diff.file_b')]
1775 textprefixes = [('@', 'diff.hunk'),
1821 textprefixes = [('@', 'diff.hunk'),
1776 ('-', 'diff.deleted'),
1822 ('-', 'diff.deleted'),
1777 ('+', 'diff.inserted')]
1823 ('+', 'diff.inserted')]
1778 head = False
1824 head = False
1779 for chunk in func(*args, **kw):
1825 for chunk in func(*args, **kw):
1780 lines = chunk.split('\n')
1826 lines = chunk.split('\n')
1781 for i, line in enumerate(lines):
1827 for i, line in enumerate(lines):
1782 if i != 0:
1828 if i != 0:
1783 yield ('\n', '')
1829 yield ('\n', '')
1784 if head:
1830 if head:
1785 if line.startswith('@'):
1831 if line.startswith('@'):
1786 head = False
1832 head = False
1787 else:
1833 else:
1788 if line and line[0] not in ' +-@\\':
1834 if line and line[0] not in ' +-@\\':
1789 head = True
1835 head = True
1790 stripline = line
1836 stripline = line
1791 diffline = False
1837 diffline = False
1792 if not head and line and line[0] in '+-':
1838 if not head and line and line[0] in '+-':
1793 # highlight tabs and trailing whitespace, but only in
1839 # highlight tabs and trailing whitespace, but only in
1794 # changed lines
1840 # changed lines
1795 stripline = line.rstrip()
1841 stripline = line.rstrip()
1796 diffline = True
1842 diffline = True
1797
1843
1798 prefixes = textprefixes
1844 prefixes = textprefixes
1799 if head:
1845 if head:
1800 prefixes = headprefixes
1846 prefixes = headprefixes
1801 for prefix, label in prefixes:
1847 for prefix, label in prefixes:
1802 if stripline.startswith(prefix):
1848 if stripline.startswith(prefix):
1803 if diffline:
1849 if diffline:
1804 for token in tabsplitter.findall(stripline):
1850 for token in tabsplitter.findall(stripline):
1805 if '\t' == token[0]:
1851 if '\t' == token[0]:
1806 yield (token, 'diff.tab')
1852 yield (token, 'diff.tab')
1807 else:
1853 else:
1808 yield (token, label)
1854 yield (token, label)
1809 else:
1855 else:
1810 yield (stripline, label)
1856 yield (stripline, label)
1811 break
1857 break
1812 else:
1858 else:
1813 yield (line, '')
1859 yield (line, '')
1814 if line != stripline:
1860 if line != stripline:
1815 yield (line[len(stripline):], 'diff.trailingwhitespace')
1861 yield (line[len(stripline):], 'diff.trailingwhitespace')
1816
1862
1817 def diffui(*args, **kw):
1863 def diffui(*args, **kw):
1818 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1864 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1819 return difflabel(diff, *args, **kw)
1865 return difflabel(diff, *args, **kw)
1820
1866
1821 def _filepairs(ctx1, modified, added, removed, copy, opts):
1867 def _filepairs(ctx1, modified, added, removed, copy, opts):
1822 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
1868 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
1823 before and f2 is the the name after. For added files, f1 will be None,
1869 before and f2 is the the name after. For added files, f1 will be None,
1824 and for removed files, f2 will be None. copyop may be set to None, 'copy'
1870 and for removed files, f2 will be None. copyop may be set to None, 'copy'
1825 or 'rename' (the latter two only if opts.git is set).'''
1871 or 'rename' (the latter two only if opts.git is set).'''
1826 gone = set()
1872 gone = set()
1827
1873
1828 copyto = dict([(v, k) for k, v in copy.items()])
1874 copyto = dict([(v, k) for k, v in copy.items()])
1829
1875
1830 addedset, removedset = set(added), set(removed)
1876 addedset, removedset = set(added), set(removed)
1831 # Fix up added, since merged-in additions appear as
1877 # Fix up added, since merged-in additions appear as
1832 # modifications during merges
1878 # modifications during merges
1833 for f in modified:
1879 for f in modified:
1834 if f not in ctx1:
1880 if f not in ctx1:
1835 addedset.add(f)
1881 addedset.add(f)
1836
1882
1837 for f in sorted(modified + added + removed):
1883 for f in sorted(modified + added + removed):
1838 copyop = None
1884 copyop = None
1839 f1, f2 = f, f
1885 f1, f2 = f, f
1840 if f in addedset:
1886 if f in addedset:
1841 f1 = None
1887 f1 = None
1842 if f in copy:
1888 if f in copy:
1843 if opts.git:
1889 if opts.git:
1844 f1 = copy[f]
1890 f1 = copy[f]
1845 if f1 in removedset and f1 not in gone:
1891 if f1 in removedset and f1 not in gone:
1846 copyop = 'rename'
1892 copyop = 'rename'
1847 gone.add(f1)
1893 gone.add(f1)
1848 else:
1894 else:
1849 copyop = 'copy'
1895 copyop = 'copy'
1850 elif f in removedset:
1896 elif f in removedset:
1851 f2 = None
1897 f2 = None
1852 if opts.git:
1898 if opts.git:
1853 # have we already reported a copy above?
1899 # have we already reported a copy above?
1854 if (f in copyto and copyto[f] in addedset
1900 if (f in copyto and copyto[f] in addedset
1855 and copy[copyto[f]] == f):
1901 and copy[copyto[f]] == f):
1856 continue
1902 continue
1857 yield f1, f2, copyop
1903 yield f1, f2, copyop
1858
1904
1859 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1905 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1860 copy, getfilectx, opts, losedatafn, prefix):
1906 copy, getfilectx, opts, losedatafn, prefix):
1861
1907
1862 def gitindex(text):
1908 def gitindex(text):
1863 if not text:
1909 if not text:
1864 text = ""
1910 text = ""
1865 l = len(text)
1911 l = len(text)
1866 s = util.sha1('blob %d\0' % l)
1912 s = util.sha1('blob %d\0' % l)
1867 s.update(text)
1913 s.update(text)
1868 return s.hexdigest()
1914 return s.hexdigest()
1869
1915
1870 if opts.noprefix:
1916 if opts.noprefix:
1871 aprefix = bprefix = ''
1917 aprefix = bprefix = ''
1872 else:
1918 else:
1873 aprefix = 'a/'
1919 aprefix = 'a/'
1874 bprefix = 'b/'
1920 bprefix = 'b/'
1875
1921
1876 def diffline(f, revs):
1922 def diffline(f, revs):
1877 revinfo = ' '.join(["-r %s" % rev for rev in revs])
1923 revinfo = ' '.join(["-r %s" % rev for rev in revs])
1878 return 'diff %s %s' % (revinfo, f)
1924 return 'diff %s %s' % (revinfo, f)
1879
1925
1880 date1 = util.datestr(ctx1.date())
1926 date1 = util.datestr(ctx1.date())
1881 date2 = util.datestr(ctx2.date())
1927 date2 = util.datestr(ctx2.date())
1882
1928
1883 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1929 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1884
1930
1885 for f1, f2, copyop in _filepairs(
1931 for f1, f2, copyop in _filepairs(
1886 ctx1, modified, added, removed, copy, opts):
1932 ctx1, modified, added, removed, copy, opts):
1887 content1 = None
1933 content1 = None
1888 content2 = None
1934 content2 = None
1889 flag1 = None
1935 flag1 = None
1890 flag2 = None
1936 flag2 = None
1891 if f1:
1937 if f1:
1892 content1 = getfilectx(f1, ctx1).data()
1938 content1 = getfilectx(f1, ctx1).data()
1893 if opts.git or losedatafn:
1939 if opts.git or losedatafn:
1894 flag1 = ctx1.flags(f1)
1940 flag1 = ctx1.flags(f1)
1895 if f2:
1941 if f2:
1896 content2 = getfilectx(f2, ctx2).data()
1942 content2 = getfilectx(f2, ctx2).data()
1897 if opts.git or losedatafn:
1943 if opts.git or losedatafn:
1898 flag2 = ctx2.flags(f2)
1944 flag2 = ctx2.flags(f2)
1899 binary = False
1945 binary = False
1900 if opts.git or losedatafn:
1946 if opts.git or losedatafn:
1901 binary = util.binary(content1) or util.binary(content2)
1947 binary = util.binary(content1) or util.binary(content2)
1902
1948
1903 if losedatafn and not opts.git:
1949 if losedatafn and not opts.git:
1904 if (binary or
1950 if (binary or
1905 # copy/rename
1951 # copy/rename
1906 f2 in copy or
1952 f2 in copy or
1907 # empty file creation
1953 # empty file creation
1908 (not f1 and not content2) or
1954 (not f1 and not content2) or
1909 # empty file deletion
1955 # empty file deletion
1910 (not content1 and not f2) or
1956 (not content1 and not f2) or
1911 # create with flags
1957 # create with flags
1912 (not f1 and flag2) or
1958 (not f1 and flag2) or
1913 # change flags
1959 # change flags
1914 (f1 and f2 and flag1 != flag2)):
1960 (f1 and f2 and flag1 != flag2)):
1915 losedatafn(f2 or f1)
1961 losedatafn(f2 or f1)
1916
1962
1917 path1 = posixpath.join(prefix, f1 or f2)
1963 path1 = posixpath.join(prefix, f1 or f2)
1918 path2 = posixpath.join(prefix, f2 or f1)
1964 path2 = posixpath.join(prefix, f2 or f1)
1919 header = []
1965 header = []
1920 if opts.git:
1966 if opts.git:
1921 header.append('diff --git %s%s %s%s' %
1967 header.append('diff --git %s%s %s%s' %
1922 (aprefix, path1, bprefix, path2))
1968 (aprefix, path1, bprefix, path2))
1923 if not f1: # added
1969 if not f1: # added
1924 header.append('new file mode %s' % gitmode[flag2])
1970 header.append('new file mode %s' % gitmode[flag2])
1925 elif not f2: # removed
1971 elif not f2: # removed
1926 header.append('deleted file mode %s' % gitmode[flag1])
1972 header.append('deleted file mode %s' % gitmode[flag1])
1927 else: # modified/copied/renamed
1973 else: # modified/copied/renamed
1928 mode1, mode2 = gitmode[flag1], gitmode[flag2]
1974 mode1, mode2 = gitmode[flag1], gitmode[flag2]
1929 if mode1 != mode2:
1975 if mode1 != mode2:
1930 header.append('old mode %s' % mode1)
1976 header.append('old mode %s' % mode1)
1931 header.append('new mode %s' % mode2)
1977 header.append('new mode %s' % mode2)
1932 if copyop is not None:
1978 if copyop is not None:
1933 header.append('%s from %s' % (copyop, path1))
1979 header.append('%s from %s' % (copyop, path1))
1934 header.append('%s to %s' % (copyop, path2))
1980 header.append('%s to %s' % (copyop, path2))
1935 elif revs and not repo.ui.quiet:
1981 elif revs and not repo.ui.quiet:
1936 header.append(diffline(path1, revs))
1982 header.append(diffline(path1, revs))
1937
1983
1938 if binary and opts.git and not opts.nobinary:
1984 if binary and opts.git and not opts.nobinary:
1939 text = mdiff.b85diff(content1, content2)
1985 text = mdiff.b85diff(content1, content2)
1940 if text:
1986 if text:
1941 header.append('index %s..%s' %
1987 header.append('index %s..%s' %
1942 (gitindex(content1), gitindex(content2)))
1988 (gitindex(content1), gitindex(content2)))
1943 else:
1989 else:
1944 text = mdiff.unidiff(content1, date1,
1990 text = mdiff.unidiff(content1, date1,
1945 content2, date2,
1991 content2, date2,
1946 path1, path2, opts=opts)
1992 path1, path2, opts=opts)
1947 if header and (text or len(header) > 1):
1993 if header and (text or len(header) > 1):
1948 yield '\n'.join(header) + '\n'
1994 yield '\n'.join(header) + '\n'
1949 if text:
1995 if text:
1950 yield text
1996 yield text
1951
1997
1952 def diffstatsum(stats):
1998 def diffstatsum(stats):
1953 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1999 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1954 for f, a, r, b in stats:
2000 for f, a, r, b in stats:
1955 maxfile = max(maxfile, encoding.colwidth(f))
2001 maxfile = max(maxfile, encoding.colwidth(f))
1956 maxtotal = max(maxtotal, a + r)
2002 maxtotal = max(maxtotal, a + r)
1957 addtotal += a
2003 addtotal += a
1958 removetotal += r
2004 removetotal += r
1959 binary = binary or b
2005 binary = binary or b
1960
2006
1961 return maxfile, maxtotal, addtotal, removetotal, binary
2007 return maxfile, maxtotal, addtotal, removetotal, binary
1962
2008
1963 def diffstatdata(lines):
2009 def diffstatdata(lines):
1964 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2010 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1965
2011
1966 results = []
2012 results = []
1967 filename, adds, removes, isbinary = None, 0, 0, False
2013 filename, adds, removes, isbinary = None, 0, 0, False
1968
2014
1969 def addresult():
2015 def addresult():
1970 if filename:
2016 if filename:
1971 results.append((filename, adds, removes, isbinary))
2017 results.append((filename, adds, removes, isbinary))
1972
2018
1973 for line in lines:
2019 for line in lines:
1974 if line.startswith('diff'):
2020 if line.startswith('diff'):
1975 addresult()
2021 addresult()
1976 # set numbers to 0 anyway when starting new file
2022 # set numbers to 0 anyway when starting new file
1977 adds, removes, isbinary = 0, 0, False
2023 adds, removes, isbinary = 0, 0, False
1978 if line.startswith('diff --git a/'):
2024 if line.startswith('diff --git a/'):
1979 filename = gitre.search(line).group(2)
2025 filename = gitre.search(line).group(2)
1980 elif line.startswith('diff -r'):
2026 elif line.startswith('diff -r'):
1981 # format: "diff -r ... -r ... filename"
2027 # format: "diff -r ... -r ... filename"
1982 filename = diffre.search(line).group(1)
2028 filename = diffre.search(line).group(1)
1983 elif line.startswith('+') and not line.startswith('+++ '):
2029 elif line.startswith('+') and not line.startswith('+++ '):
1984 adds += 1
2030 adds += 1
1985 elif line.startswith('-') and not line.startswith('--- '):
2031 elif line.startswith('-') and not line.startswith('--- '):
1986 removes += 1
2032 removes += 1
1987 elif (line.startswith('GIT binary patch') or
2033 elif (line.startswith('GIT binary patch') or
1988 line.startswith('Binary file')):
2034 line.startswith('Binary file')):
1989 isbinary = True
2035 isbinary = True
1990 addresult()
2036 addresult()
1991 return results
2037 return results
1992
2038
1993 def diffstat(lines, width=80, git=False):
2039 def diffstat(lines, width=80, git=False):
1994 output = []
2040 output = []
1995 stats = diffstatdata(lines)
2041 stats = diffstatdata(lines)
1996 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2042 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1997
2043
1998 countwidth = len(str(maxtotal))
2044 countwidth = len(str(maxtotal))
1999 if hasbinary and countwidth < 3:
2045 if hasbinary and countwidth < 3:
2000 countwidth = 3
2046 countwidth = 3
2001 graphwidth = width - countwidth - maxname - 6
2047 graphwidth = width - countwidth - maxname - 6
2002 if graphwidth < 10:
2048 if graphwidth < 10:
2003 graphwidth = 10
2049 graphwidth = 10
2004
2050
2005 def scale(i):
2051 def scale(i):
2006 if maxtotal <= graphwidth:
2052 if maxtotal <= graphwidth:
2007 return i
2053 return i
2008 # If diffstat runs out of room it doesn't print anything,
2054 # If diffstat runs out of room it doesn't print anything,
2009 # which isn't very useful, so always print at least one + or -
2055 # which isn't very useful, so always print at least one + or -
2010 # if there were at least some changes.
2056 # if there were at least some changes.
2011 return max(i * graphwidth // maxtotal, int(bool(i)))
2057 return max(i * graphwidth // maxtotal, int(bool(i)))
2012
2058
2013 for filename, adds, removes, isbinary in stats:
2059 for filename, adds, removes, isbinary in stats:
2014 if isbinary:
2060 if isbinary:
2015 count = 'Bin'
2061 count = 'Bin'
2016 else:
2062 else:
2017 count = adds + removes
2063 count = adds + removes
2018 pluses = '+' * scale(adds)
2064 pluses = '+' * scale(adds)
2019 minuses = '-' * scale(removes)
2065 minuses = '-' * scale(removes)
2020 output.append(' %s%s | %*s %s%s\n' %
2066 output.append(' %s%s | %*s %s%s\n' %
2021 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2067 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2022 countwidth, count, pluses, minuses))
2068 countwidth, count, pluses, minuses))
2023
2069
2024 if stats:
2070 if stats:
2025 output.append(_(' %d files changed, %d insertions(+), '
2071 output.append(_(' %d files changed, %d insertions(+), '
2026 '%d deletions(-)\n')
2072 '%d deletions(-)\n')
2027 % (len(stats), totaladds, totalremoves))
2073 % (len(stats), totaladds, totalremoves))
2028
2074
2029 return ''.join(output)
2075 return ''.join(output)
2030
2076
2031 def diffstatui(*args, **kw):
2077 def diffstatui(*args, **kw):
2032 '''like diffstat(), but yields 2-tuples of (output, label) for
2078 '''like diffstat(), but yields 2-tuples of (output, label) for
2033 ui.write()
2079 ui.write()
2034 '''
2080 '''
2035
2081
2036 for line in diffstat(*args, **kw).splitlines():
2082 for line in diffstat(*args, **kw).splitlines():
2037 if line and line[-1] in '+-':
2083 if line and line[-1] in '+-':
2038 name, graph = line.rsplit(' ', 1)
2084 name, graph = line.rsplit(' ', 1)
2039 yield (name + ' ', '')
2085 yield (name + ' ', '')
2040 m = re.search(r'\++', graph)
2086 m = re.search(r'\++', graph)
2041 if m:
2087 if m:
2042 yield (m.group(0), 'diffstat.inserted')
2088 yield (m.group(0), 'diffstat.inserted')
2043 m = re.search(r'-+', graph)
2089 m = re.search(r'-+', graph)
2044 if m:
2090 if m:
2045 yield (m.group(0), 'diffstat.deleted')
2091 yield (m.group(0), 'diffstat.deleted')
2046 else:
2092 else:
2047 yield (line, '')
2093 yield (line, '')
2048 yield ('\n', '')
2094 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now