##// END OF EJS Templates
record: move scanpatch from record to patch...
Laurent Charignon -
r24264:c4205452 default
parent child Browse files
Show More
@@ -1,567 +1,514
1 # record.py
1 # record.py
2 #
2 #
3 # Copyright 2007 Bryan O'Sullivan <bos@serpentine.com>
3 # Copyright 2007 Bryan O'Sullivan <bos@serpentine.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''commands to interactively select changes for commit/qrefresh'''
8 '''commands to interactively select changes for commit/qrefresh'''
9
9
10 from mercurial.i18n import _
10 from mercurial.i18n import _
11 from mercurial import cmdutil, commands, extensions, hg, patch
11 from mercurial import cmdutil, commands, extensions, hg, patch
12 from mercurial import util
12 from mercurial import util
13 import copy, cStringIO, errno, os, re, shutil, tempfile
13 import copy, cStringIO, errno, os, shutil, tempfile
14
14
15 cmdtable = {}
15 cmdtable = {}
16 command = cmdutil.command(cmdtable)
16 command = cmdutil.command(cmdtable)
17 testedwith = 'internal'
17 testedwith = 'internal'
18
18
19 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
20
21 def scanpatch(fp):
22 """like patch.iterhunks, but yield different events
23
24 - ('file', [header_lines + fromfile + tofile])
25 - ('context', [context_lines])
26 - ('hunk', [hunk_lines])
27 - ('range', (-start,len, +start,len, proc))
28 """
29 lr = patch.linereader(fp)
30
31 def scanwhile(first, p):
32 """scan lr while predicate holds"""
33 lines = [first]
34 while True:
35 line = lr.readline()
36 if not line:
37 break
38 if p(line):
39 lines.append(line)
40 else:
41 lr.push(line)
42 break
43 return lines
44
45 while True:
46 line = lr.readline()
47 if not line:
48 break
49 if line.startswith('diff --git a/') or line.startswith('diff -r '):
50 def notheader(line):
51 s = line.split(None, 1)
52 return not s or s[0] not in ('---', 'diff')
53 header = scanwhile(line, notheader)
54 fromfile = lr.readline()
55 if fromfile.startswith('---'):
56 tofile = lr.readline()
57 header += [fromfile, tofile]
58 else:
59 lr.push(fromfile)
60 yield 'file', header
61 elif line[0] == ' ':
62 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
63 elif line[0] in '-+':
64 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
65 else:
66 m = lines_re.match(line)
67 if m:
68 yield 'range', m.groups()
69 else:
70 yield 'other', line
71
72
19
73 def parsepatch(fp):
20 def parsepatch(fp):
74 """patch -> [] of headers -> [] of hunks """
21 """patch -> [] of headers -> [] of hunks """
75 class parser(object):
22 class parser(object):
76 """patch parsing state machine"""
23 """patch parsing state machine"""
77 def __init__(self):
24 def __init__(self):
78 self.fromline = 0
25 self.fromline = 0
79 self.toline = 0
26 self.toline = 0
80 self.proc = ''
27 self.proc = ''
81 self.header = None
28 self.header = None
82 self.context = []
29 self.context = []
83 self.before = []
30 self.before = []
84 self.hunk = []
31 self.hunk = []
85 self.headers = []
32 self.headers = []
86
33
87 def addrange(self, limits):
34 def addrange(self, limits):
88 fromstart, fromend, tostart, toend, proc = limits
35 fromstart, fromend, tostart, toend, proc = limits
89 self.fromline = int(fromstart)
36 self.fromline = int(fromstart)
90 self.toline = int(tostart)
37 self.toline = int(tostart)
91 self.proc = proc
38 self.proc = proc
92
39
93 def addcontext(self, context):
40 def addcontext(self, context):
94 if self.hunk:
41 if self.hunk:
95 h = patch.recordhunk(self.header, self.fromline, self.toline,
42 h = patch.recordhunk(self.header, self.fromline, self.toline,
96 self.proc, self.before, self.hunk, context)
43 self.proc, self.before, self.hunk, context)
97 self.header.hunks.append(h)
44 self.header.hunks.append(h)
98 self.fromline += len(self.before) + h.removed
45 self.fromline += len(self.before) + h.removed
99 self.toline += len(self.before) + h.added
46 self.toline += len(self.before) + h.added
100 self.before = []
47 self.before = []
101 self.hunk = []
48 self.hunk = []
102 self.proc = ''
49 self.proc = ''
103 self.context = context
50 self.context = context
104
51
105 def addhunk(self, hunk):
52 def addhunk(self, hunk):
106 if self.context:
53 if self.context:
107 self.before = self.context
54 self.before = self.context
108 self.context = []
55 self.context = []
109 self.hunk = hunk
56 self.hunk = hunk
110
57
111 def newfile(self, hdr):
58 def newfile(self, hdr):
112 self.addcontext([])
59 self.addcontext([])
113 h = patch.header(hdr)
60 h = patch.header(hdr)
114 self.headers.append(h)
61 self.headers.append(h)
115 self.header = h
62 self.header = h
116
63
117 def addother(self, line):
64 def addother(self, line):
118 pass # 'other' lines are ignored
65 pass # 'other' lines are ignored
119
66
120 def finished(self):
67 def finished(self):
121 self.addcontext([])
68 self.addcontext([])
122 return self.headers
69 return self.headers
123
70
124 transitions = {
71 transitions = {
125 'file': {'context': addcontext,
72 'file': {'context': addcontext,
126 'file': newfile,
73 'file': newfile,
127 'hunk': addhunk,
74 'hunk': addhunk,
128 'range': addrange},
75 'range': addrange},
129 'context': {'file': newfile,
76 'context': {'file': newfile,
130 'hunk': addhunk,
77 'hunk': addhunk,
131 'range': addrange,
78 'range': addrange,
132 'other': addother},
79 'other': addother},
133 'hunk': {'context': addcontext,
80 'hunk': {'context': addcontext,
134 'file': newfile,
81 'file': newfile,
135 'range': addrange},
82 'range': addrange},
136 'range': {'context': addcontext,
83 'range': {'context': addcontext,
137 'hunk': addhunk},
84 'hunk': addhunk},
138 'other': {'other': addother},
85 'other': {'other': addother},
139 }
86 }
140
87
141 p = parser()
88 p = parser()
142
89
143 state = 'context'
90 state = 'context'
144 for newstate, data in scanpatch(fp):
91 for newstate, data in patch.scanpatch(fp):
145 try:
92 try:
146 p.transitions[state][newstate](p, data)
93 p.transitions[state][newstate](p, data)
147 except KeyError:
94 except KeyError:
148 raise patch.PatchError('unhandled transition: %s -> %s' %
95 raise patch.PatchError('unhandled transition: %s -> %s' %
149 (state, newstate))
96 (state, newstate))
150 state = newstate
97 state = newstate
151 return p.finished()
98 return p.finished()
152
99
153 def filterpatch(ui, headers):
100 def filterpatch(ui, headers):
154 """Interactively filter patch chunks into applied-only chunks"""
101 """Interactively filter patch chunks into applied-only chunks"""
155
102
156 def prompt(skipfile, skipall, query, chunk):
103 def prompt(skipfile, skipall, query, chunk):
157 """prompt query, and process base inputs
104 """prompt query, and process base inputs
158
105
159 - y/n for the rest of file
106 - y/n for the rest of file
160 - y/n for the rest
107 - y/n for the rest
161 - ? (help)
108 - ? (help)
162 - q (quit)
109 - q (quit)
163
110
164 Return True/False and possibly updated skipfile and skipall.
111 Return True/False and possibly updated skipfile and skipall.
165 """
112 """
166 newpatches = None
113 newpatches = None
167 if skipall is not None:
114 if skipall is not None:
168 return skipall, skipfile, skipall, newpatches
115 return skipall, skipfile, skipall, newpatches
169 if skipfile is not None:
116 if skipfile is not None:
170 return skipfile, skipfile, skipall, newpatches
117 return skipfile, skipfile, skipall, newpatches
171 while True:
118 while True:
172 resps = _('[Ynesfdaq?]'
119 resps = _('[Ynesfdaq?]'
173 '$$ &Yes, record this change'
120 '$$ &Yes, record this change'
174 '$$ &No, skip this change'
121 '$$ &No, skip this change'
175 '$$ &Edit this change manually'
122 '$$ &Edit this change manually'
176 '$$ &Skip remaining changes to this file'
123 '$$ &Skip remaining changes to this file'
177 '$$ Record remaining changes to this &file'
124 '$$ Record remaining changes to this &file'
178 '$$ &Done, skip remaining changes and files'
125 '$$ &Done, skip remaining changes and files'
179 '$$ Record &all changes to all remaining files'
126 '$$ Record &all changes to all remaining files'
180 '$$ &Quit, recording no changes'
127 '$$ &Quit, recording no changes'
181 '$$ &? (display help)')
128 '$$ &? (display help)')
182 r = ui.promptchoice("%s %s" % (query, resps))
129 r = ui.promptchoice("%s %s" % (query, resps))
183 ui.write("\n")
130 ui.write("\n")
184 if r == 8: # ?
131 if r == 8: # ?
185 for c, t in ui.extractchoices(resps)[1]:
132 for c, t in ui.extractchoices(resps)[1]:
186 ui.write('%s - %s\n' % (c, t.lower()))
133 ui.write('%s - %s\n' % (c, t.lower()))
187 continue
134 continue
188 elif r == 0: # yes
135 elif r == 0: # yes
189 ret = True
136 ret = True
190 elif r == 1: # no
137 elif r == 1: # no
191 ret = False
138 ret = False
192 elif r == 2: # Edit patch
139 elif r == 2: # Edit patch
193 if chunk is None:
140 if chunk is None:
194 ui.write(_('cannot edit patch for whole file'))
141 ui.write(_('cannot edit patch for whole file'))
195 ui.write("\n")
142 ui.write("\n")
196 continue
143 continue
197 if chunk.header.binary():
144 if chunk.header.binary():
198 ui.write(_('cannot edit patch for binary file'))
145 ui.write(_('cannot edit patch for binary file'))
199 ui.write("\n")
146 ui.write("\n")
200 continue
147 continue
201 # Patch comment based on the Git one (based on comment at end of
148 # Patch comment based on the Git one (based on comment at end of
202 # http://mercurial.selenic.com/wiki/RecordExtension)
149 # http://mercurial.selenic.com/wiki/RecordExtension)
203 phelp = '---' + _("""
150 phelp = '---' + _("""
204 To remove '-' lines, make them ' ' lines (context).
151 To remove '-' lines, make them ' ' lines (context).
205 To remove '+' lines, delete them.
152 To remove '+' lines, delete them.
206 Lines starting with # will be removed from the patch.
153 Lines starting with # will be removed from the patch.
207
154
208 If the patch applies cleanly, the edited hunk will immediately be
155 If the patch applies cleanly, the edited hunk will immediately be
209 added to the record list. If it does not apply cleanly, a rejects
156 added to the record list. If it does not apply cleanly, a rejects
210 file will be generated: you can use that when you try again. If
157 file will be generated: you can use that when you try again. If
211 all lines of the hunk are removed, then the edit is aborted and
158 all lines of the hunk are removed, then the edit is aborted and
212 the hunk is left unchanged.
159 the hunk is left unchanged.
213 """)
160 """)
214 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
161 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
215 suffix=".diff", text=True)
162 suffix=".diff", text=True)
216 ncpatchfp = None
163 ncpatchfp = None
217 try:
164 try:
218 # Write the initial patch
165 # Write the initial patch
219 f = os.fdopen(patchfd, "w")
166 f = os.fdopen(patchfd, "w")
220 chunk.header.write(f)
167 chunk.header.write(f)
221 chunk.write(f)
168 chunk.write(f)
222 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
169 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
223 f.close()
170 f.close()
224 # Start the editor and wait for it to complete
171 # Start the editor and wait for it to complete
225 editor = ui.geteditor()
172 editor = ui.geteditor()
226 ui.system("%s \"%s\"" % (editor, patchfn),
173 ui.system("%s \"%s\"" % (editor, patchfn),
227 environ={'HGUSER': ui.username()},
174 environ={'HGUSER': ui.username()},
228 onerr=util.Abort, errprefix=_("edit failed"))
175 onerr=util.Abort, errprefix=_("edit failed"))
229 # Remove comment lines
176 # Remove comment lines
230 patchfp = open(patchfn)
177 patchfp = open(patchfn)
231 ncpatchfp = cStringIO.StringIO()
178 ncpatchfp = cStringIO.StringIO()
232 for line in patchfp:
179 for line in patchfp:
233 if not line.startswith('#'):
180 if not line.startswith('#'):
234 ncpatchfp.write(line)
181 ncpatchfp.write(line)
235 patchfp.close()
182 patchfp.close()
236 ncpatchfp.seek(0)
183 ncpatchfp.seek(0)
237 newpatches = parsepatch(ncpatchfp)
184 newpatches = parsepatch(ncpatchfp)
238 finally:
185 finally:
239 os.unlink(patchfn)
186 os.unlink(patchfn)
240 del ncpatchfp
187 del ncpatchfp
241 # Signal that the chunk shouldn't be applied as-is, but
188 # Signal that the chunk shouldn't be applied as-is, but
242 # provide the new patch to be used instead.
189 # provide the new patch to be used instead.
243 ret = False
190 ret = False
244 elif r == 3: # Skip
191 elif r == 3: # Skip
245 ret = skipfile = False
192 ret = skipfile = False
246 elif r == 4: # file (Record remaining)
193 elif r == 4: # file (Record remaining)
247 ret = skipfile = True
194 ret = skipfile = True
248 elif r == 5: # done, skip remaining
195 elif r == 5: # done, skip remaining
249 ret = skipall = False
196 ret = skipall = False
250 elif r == 6: # all
197 elif r == 6: # all
251 ret = skipall = True
198 ret = skipall = True
252 elif r == 7: # quit
199 elif r == 7: # quit
253 raise util.Abort(_('user quit'))
200 raise util.Abort(_('user quit'))
254 return ret, skipfile, skipall, newpatches
201 return ret, skipfile, skipall, newpatches
255
202
256 seen = set()
203 seen = set()
257 applied = {} # 'filename' -> [] of chunks
204 applied = {} # 'filename' -> [] of chunks
258 skipfile, skipall = None, None
205 skipfile, skipall = None, None
259 pos, total = 1, sum(len(h.hunks) for h in headers)
206 pos, total = 1, sum(len(h.hunks) for h in headers)
260 for h in headers:
207 for h in headers:
261 pos += len(h.hunks)
208 pos += len(h.hunks)
262 skipfile = None
209 skipfile = None
263 fixoffset = 0
210 fixoffset = 0
264 hdr = ''.join(h.header)
211 hdr = ''.join(h.header)
265 if hdr in seen:
212 if hdr in seen:
266 continue
213 continue
267 seen.add(hdr)
214 seen.add(hdr)
268 if skipall is None:
215 if skipall is None:
269 h.pretty(ui)
216 h.pretty(ui)
270 msg = (_('examine changes to %s?') %
217 msg = (_('examine changes to %s?') %
271 _(' and ').join("'%s'" % f for f in h.files()))
218 _(' and ').join("'%s'" % f for f in h.files()))
272 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
219 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
273 if not r:
220 if not r:
274 continue
221 continue
275 applied[h.filename()] = [h]
222 applied[h.filename()] = [h]
276 if h.allhunks():
223 if h.allhunks():
277 applied[h.filename()] += h.hunks
224 applied[h.filename()] += h.hunks
278 continue
225 continue
279 for i, chunk in enumerate(h.hunks):
226 for i, chunk in enumerate(h.hunks):
280 if skipfile is None and skipall is None:
227 if skipfile is None and skipall is None:
281 chunk.pretty(ui)
228 chunk.pretty(ui)
282 if total == 1:
229 if total == 1:
283 msg = _("record this change to '%s'?") % chunk.filename()
230 msg = _("record this change to '%s'?") % chunk.filename()
284 else:
231 else:
285 idx = pos - len(h.hunks) + i
232 idx = pos - len(h.hunks) + i
286 msg = _("record change %d/%d to '%s'?") % (idx, total,
233 msg = _("record change %d/%d to '%s'?") % (idx, total,
287 chunk.filename())
234 chunk.filename())
288 r, skipfile, skipall, newpatches = prompt(skipfile,
235 r, skipfile, skipall, newpatches = prompt(skipfile,
289 skipall, msg, chunk)
236 skipall, msg, chunk)
290 if r:
237 if r:
291 if fixoffset:
238 if fixoffset:
292 chunk = copy.copy(chunk)
239 chunk = copy.copy(chunk)
293 chunk.toline += fixoffset
240 chunk.toline += fixoffset
294 applied[chunk.filename()].append(chunk)
241 applied[chunk.filename()].append(chunk)
295 elif newpatches is not None:
242 elif newpatches is not None:
296 for newpatch in newpatches:
243 for newpatch in newpatches:
297 for newhunk in newpatch.hunks:
244 for newhunk in newpatch.hunks:
298 if fixoffset:
245 if fixoffset:
299 newhunk.toline += fixoffset
246 newhunk.toline += fixoffset
300 applied[newhunk.filename()].append(newhunk)
247 applied[newhunk.filename()].append(newhunk)
301 else:
248 else:
302 fixoffset += chunk.removed - chunk.added
249 fixoffset += chunk.removed - chunk.added
303 return sum([h for h in applied.itervalues()
250 return sum([h for h in applied.itervalues()
304 if h[0].special() or len(h) > 1], [])
251 if h[0].special() or len(h) > 1], [])
305
252
306 @command("record",
253 @command("record",
307 # same options as commit + white space diff options
254 # same options as commit + white space diff options
308 commands.table['^commit|ci'][1][:] + commands.diffwsopts,
255 commands.table['^commit|ci'][1][:] + commands.diffwsopts,
309 _('hg record [OPTION]... [FILE]...'))
256 _('hg record [OPTION]... [FILE]...'))
310 def record(ui, repo, *pats, **opts):
257 def record(ui, repo, *pats, **opts):
311 '''interactively select changes to commit
258 '''interactively select changes to commit
312
259
313 If a list of files is omitted, all changes reported by :hg:`status`
260 If a list of files is omitted, all changes reported by :hg:`status`
314 will be candidates for recording.
261 will be candidates for recording.
315
262
316 See :hg:`help dates` for a list of formats valid for -d/--date.
263 See :hg:`help dates` for a list of formats valid for -d/--date.
317
264
318 You will be prompted for whether to record changes to each
265 You will be prompted for whether to record changes to each
319 modified file, and for files with multiple changes, for each
266 modified file, and for files with multiple changes, for each
320 change to use. For each query, the following responses are
267 change to use. For each query, the following responses are
321 possible::
268 possible::
322
269
323 y - record this change
270 y - record this change
324 n - skip this change
271 n - skip this change
325 e - edit this change manually
272 e - edit this change manually
326
273
327 s - skip remaining changes to this file
274 s - skip remaining changes to this file
328 f - record remaining changes to this file
275 f - record remaining changes to this file
329
276
330 d - done, skip remaining changes and files
277 d - done, skip remaining changes and files
331 a - record all changes to all remaining files
278 a - record all changes to all remaining files
332 q - quit, recording no changes
279 q - quit, recording no changes
333
280
334 ? - display help
281 ? - display help
335
282
336 This command is not available when committing a merge.'''
283 This command is not available when committing a merge.'''
337
284
338 dorecord(ui, repo, commands.commit, 'commit', False, *pats, **opts)
285 dorecord(ui, repo, commands.commit, 'commit', False, *pats, **opts)
339
286
340 def qrefresh(origfn, ui, repo, *pats, **opts):
287 def qrefresh(origfn, ui, repo, *pats, **opts):
341 if not opts['interactive']:
288 if not opts['interactive']:
342 return origfn(ui, repo, *pats, **opts)
289 return origfn(ui, repo, *pats, **opts)
343
290
344 mq = extensions.find('mq')
291 mq = extensions.find('mq')
345
292
346 def committomq(ui, repo, *pats, **opts):
293 def committomq(ui, repo, *pats, **opts):
347 # At this point the working copy contains only changes that
294 # At this point the working copy contains only changes that
348 # were accepted. All other changes were reverted.
295 # were accepted. All other changes were reverted.
349 # We can't pass *pats here since qrefresh will undo all other
296 # We can't pass *pats here since qrefresh will undo all other
350 # changed files in the patch that aren't in pats.
297 # changed files in the patch that aren't in pats.
351 mq.refresh(ui, repo, **opts)
298 mq.refresh(ui, repo, **opts)
352
299
353 # backup all changed files
300 # backup all changed files
354 dorecord(ui, repo, committomq, 'qrefresh', True, *pats, **opts)
301 dorecord(ui, repo, committomq, 'qrefresh', True, *pats, **opts)
355
302
356 # This command registration is replaced during uisetup().
303 # This command registration is replaced during uisetup().
357 @command('qrecord',
304 @command('qrecord',
358 [],
305 [],
359 _('hg qrecord [OPTION]... PATCH [FILE]...'),
306 _('hg qrecord [OPTION]... PATCH [FILE]...'),
360 inferrepo=True)
307 inferrepo=True)
361 def qrecord(ui, repo, patch, *pats, **opts):
308 def qrecord(ui, repo, patch, *pats, **opts):
362 '''interactively record a new patch
309 '''interactively record a new patch
363
310
364 See :hg:`help qnew` & :hg:`help record` for more information and
311 See :hg:`help qnew` & :hg:`help record` for more information and
365 usage.
312 usage.
366 '''
313 '''
367
314
368 try:
315 try:
369 mq = extensions.find('mq')
316 mq = extensions.find('mq')
370 except KeyError:
317 except KeyError:
371 raise util.Abort(_("'mq' extension not loaded"))
318 raise util.Abort(_("'mq' extension not loaded"))
372
319
373 repo.mq.checkpatchname(patch)
320 repo.mq.checkpatchname(patch)
374
321
375 def committomq(ui, repo, *pats, **opts):
322 def committomq(ui, repo, *pats, **opts):
376 opts['checkname'] = False
323 opts['checkname'] = False
377 mq.new(ui, repo, patch, *pats, **opts)
324 mq.new(ui, repo, patch, *pats, **opts)
378
325
379 dorecord(ui, repo, committomq, 'qnew', False, *pats, **opts)
326 dorecord(ui, repo, committomq, 'qnew', False, *pats, **opts)
380
327
381 def qnew(origfn, ui, repo, patch, *args, **opts):
328 def qnew(origfn, ui, repo, patch, *args, **opts):
382 if opts['interactive']:
329 if opts['interactive']:
383 return qrecord(ui, repo, patch, *args, **opts)
330 return qrecord(ui, repo, patch, *args, **opts)
384 return origfn(ui, repo, patch, *args, **opts)
331 return origfn(ui, repo, patch, *args, **opts)
385
332
386 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall, *pats, **opts):
333 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall, *pats, **opts):
387 if not ui.interactive():
334 if not ui.interactive():
388 raise util.Abort(_('running non-interactively, use %s instead') %
335 raise util.Abort(_('running non-interactively, use %s instead') %
389 cmdsuggest)
336 cmdsuggest)
390
337
391 # make sure username is set before going interactive
338 # make sure username is set before going interactive
392 if not opts.get('user'):
339 if not opts.get('user'):
393 ui.username() # raise exception, username not provided
340 ui.username() # raise exception, username not provided
394
341
395 def recordfunc(ui, repo, message, match, opts):
342 def recordfunc(ui, repo, message, match, opts):
396 """This is generic record driver.
343 """This is generic record driver.
397
344
398 Its job is to interactively filter local changes, and
345 Its job is to interactively filter local changes, and
399 accordingly prepare working directory into a state in which the
346 accordingly prepare working directory into a state in which the
400 job can be delegated to a non-interactive commit command such as
347 job can be delegated to a non-interactive commit command such as
401 'commit' or 'qrefresh'.
348 'commit' or 'qrefresh'.
402
349
403 After the actual job is done by non-interactive command, the
350 After the actual job is done by non-interactive command, the
404 working directory is restored to its original state.
351 working directory is restored to its original state.
405
352
406 In the end we'll record interesting changes, and everything else
353 In the end we'll record interesting changes, and everything else
407 will be left in place, so the user can continue working.
354 will be left in place, so the user can continue working.
408 """
355 """
409
356
410 cmdutil.checkunfinished(repo, commit=True)
357 cmdutil.checkunfinished(repo, commit=True)
411 merge = len(repo[None].parents()) > 1
358 merge = len(repo[None].parents()) > 1
412 if merge:
359 if merge:
413 raise util.Abort(_('cannot partially commit a merge '
360 raise util.Abort(_('cannot partially commit a merge '
414 '(use "hg commit" instead)'))
361 '(use "hg commit" instead)'))
415
362
416 status = repo.status(match=match)
363 status = repo.status(match=match)
417 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
364 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
418 diffopts.nodates = True
365 diffopts.nodates = True
419 diffopts.git = True
366 diffopts.git = True
420 originalchunks = patch.diff(repo, changes=status, opts=diffopts)
367 originalchunks = patch.diff(repo, changes=status, opts=diffopts)
421 fp = cStringIO.StringIO()
368 fp = cStringIO.StringIO()
422 fp.write(''.join(originalchunks))
369 fp.write(''.join(originalchunks))
423 fp.seek(0)
370 fp.seek(0)
424
371
425 # 1. filter patch, so we have intending-to apply subset of it
372 # 1. filter patch, so we have intending-to apply subset of it
426 try:
373 try:
427 chunks = filterpatch(ui, parsepatch(fp))
374 chunks = filterpatch(ui, parsepatch(fp))
428 except patch.PatchError, err:
375 except patch.PatchError, err:
429 raise util.Abort(_('error parsing patch: %s') % err)
376 raise util.Abort(_('error parsing patch: %s') % err)
430
377
431 del fp
378 del fp
432
379
433 contenders = set()
380 contenders = set()
434 for h in chunks:
381 for h in chunks:
435 try:
382 try:
436 contenders.update(set(h.files()))
383 contenders.update(set(h.files()))
437 except AttributeError:
384 except AttributeError:
438 pass
385 pass
439
386
440 changed = status.modified + status.added + status.removed
387 changed = status.modified + status.added + status.removed
441 newfiles = [f for f in changed if f in contenders]
388 newfiles = [f for f in changed if f in contenders]
442 if not newfiles:
389 if not newfiles:
443 ui.status(_('no changes to record\n'))
390 ui.status(_('no changes to record\n'))
444 return 0
391 return 0
445
392
446 newandmodifiedfiles = set()
393 newandmodifiedfiles = set()
447 for h in chunks:
394 for h in chunks:
448 ishunk = isinstance(h, patch.recordhunk)
395 ishunk = isinstance(h, patch.recordhunk)
449 isnew = h.filename() in status.added
396 isnew = h.filename() in status.added
450 if ishunk and isnew and not h in originalchunks:
397 if ishunk and isnew and not h in originalchunks:
451 newandmodifiedfiles.add(h.filename())
398 newandmodifiedfiles.add(h.filename())
452
399
453 modified = set(status.modified)
400 modified = set(status.modified)
454
401
455 # 2. backup changed files, so we can restore them in the end
402 # 2. backup changed files, so we can restore them in the end
456
403
457 if backupall:
404 if backupall:
458 tobackup = changed
405 tobackup = changed
459 else:
406 else:
460 tobackup = [f for f in newfiles
407 tobackup = [f for f in newfiles
461 if f in modified or f in newandmodifiedfiles]
408 if f in modified or f in newandmodifiedfiles]
462
409
463 backups = {}
410 backups = {}
464 if tobackup:
411 if tobackup:
465 backupdir = repo.join('record-backups')
412 backupdir = repo.join('record-backups')
466 try:
413 try:
467 os.mkdir(backupdir)
414 os.mkdir(backupdir)
468 except OSError, err:
415 except OSError, err:
469 if err.errno != errno.EEXIST:
416 if err.errno != errno.EEXIST:
470 raise
417 raise
471 try:
418 try:
472 # backup continues
419 # backup continues
473 for f in tobackup:
420 for f in tobackup:
474 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
421 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
475 dir=backupdir)
422 dir=backupdir)
476 os.close(fd)
423 os.close(fd)
477 ui.debug('backup %r as %r\n' % (f, tmpname))
424 ui.debug('backup %r as %r\n' % (f, tmpname))
478 util.copyfile(repo.wjoin(f), tmpname)
425 util.copyfile(repo.wjoin(f), tmpname)
479 shutil.copystat(repo.wjoin(f), tmpname)
426 shutil.copystat(repo.wjoin(f), tmpname)
480 backups[f] = tmpname
427 backups[f] = tmpname
481
428
482 fp = cStringIO.StringIO()
429 fp = cStringIO.StringIO()
483 for c in chunks:
430 for c in chunks:
484 fname = c.filename()
431 fname = c.filename()
485 if fname in backups or fname in newandmodifiedfiles:
432 if fname in backups or fname in newandmodifiedfiles:
486 c.write(fp)
433 c.write(fp)
487 dopatch = fp.tell()
434 dopatch = fp.tell()
488 fp.seek(0)
435 fp.seek(0)
489
436
490 [os.unlink(c) for c in newandmodifiedfiles]
437 [os.unlink(c) for c in newandmodifiedfiles]
491
438
492 # 3a. apply filtered patch to clean repo (clean)
439 # 3a. apply filtered patch to clean repo (clean)
493 if backups:
440 if backups:
494 hg.revert(repo, repo.dirstate.p1(),
441 hg.revert(repo, repo.dirstate.p1(),
495 lambda key: key in backups)
442 lambda key: key in backups)
496
443
497 # 3b. (apply)
444 # 3b. (apply)
498 if dopatch:
445 if dopatch:
499 try:
446 try:
500 ui.debug('applying patch\n')
447 ui.debug('applying patch\n')
501 ui.debug(fp.getvalue())
448 ui.debug(fp.getvalue())
502 patch.internalpatch(ui, repo, fp, 1, '', eolmode=None)
449 patch.internalpatch(ui, repo, fp, 1, '', eolmode=None)
503 except patch.PatchError, err:
450 except patch.PatchError, err:
504 raise util.Abort(str(err))
451 raise util.Abort(str(err))
505 del fp
452 del fp
506
453
507 # 4. We prepared working directory according to filtered
454 # 4. We prepared working directory according to filtered
508 # patch. Now is the time to delegate the job to
455 # patch. Now is the time to delegate the job to
509 # commit/qrefresh or the like!
456 # commit/qrefresh or the like!
510
457
511 # Make all of the pathnames absolute.
458 # Make all of the pathnames absolute.
512 newfiles = [repo.wjoin(nf) for nf in newfiles]
459 newfiles = [repo.wjoin(nf) for nf in newfiles]
513 commitfunc(ui, repo, *newfiles, **opts)
460 commitfunc(ui, repo, *newfiles, **opts)
514
461
515 return 0
462 return 0
516 finally:
463 finally:
517 # 5. finally restore backed-up files
464 # 5. finally restore backed-up files
518 try:
465 try:
519 for realname, tmpname in backups.iteritems():
466 for realname, tmpname in backups.iteritems():
520 ui.debug('restoring %r to %r\n' % (tmpname, realname))
467 ui.debug('restoring %r to %r\n' % (tmpname, realname))
521 util.copyfile(tmpname, repo.wjoin(realname))
468 util.copyfile(tmpname, repo.wjoin(realname))
522 # Our calls to copystat() here and above are a
469 # Our calls to copystat() here and above are a
523 # hack to trick any editors that have f open that
470 # hack to trick any editors that have f open that
524 # we haven't modified them.
471 # we haven't modified them.
525 #
472 #
526 # Also note that this racy as an editor could
473 # Also note that this racy as an editor could
527 # notice the file's mtime before we've finished
474 # notice the file's mtime before we've finished
528 # writing it.
475 # writing it.
529 shutil.copystat(tmpname, repo.wjoin(realname))
476 shutil.copystat(tmpname, repo.wjoin(realname))
530 os.unlink(tmpname)
477 os.unlink(tmpname)
531 if tobackup:
478 if tobackup:
532 os.rmdir(backupdir)
479 os.rmdir(backupdir)
533 except OSError:
480 except OSError:
534 pass
481 pass
535
482
536 # wrap ui.write so diff output can be labeled/colorized
483 # wrap ui.write so diff output can be labeled/colorized
537 def wrapwrite(orig, *args, **kw):
484 def wrapwrite(orig, *args, **kw):
538 label = kw.pop('label', '')
485 label = kw.pop('label', '')
539 for chunk, l in patch.difflabel(lambda: args):
486 for chunk, l in patch.difflabel(lambda: args):
540 orig(chunk, label=label + l)
487 orig(chunk, label=label + l)
541 oldwrite = ui.write
488 oldwrite = ui.write
542 extensions.wrapfunction(ui, 'write', wrapwrite)
489 extensions.wrapfunction(ui, 'write', wrapwrite)
543 try:
490 try:
544 return cmdutil.commit(ui, repo, recordfunc, pats, opts)
491 return cmdutil.commit(ui, repo, recordfunc, pats, opts)
545 finally:
492 finally:
546 ui.write = oldwrite
493 ui.write = oldwrite
547
494
548 def uisetup(ui):
495 def uisetup(ui):
549 try:
496 try:
550 mq = extensions.find('mq')
497 mq = extensions.find('mq')
551 except KeyError:
498 except KeyError:
552 return
499 return
553
500
554 cmdtable["qrecord"] = \
501 cmdtable["qrecord"] = \
555 (qrecord,
502 (qrecord,
556 # same options as qnew, but copy them so we don't get
503 # same options as qnew, but copy them so we don't get
557 # -i/--interactive for qrecord and add white space diff options
504 # -i/--interactive for qrecord and add white space diff options
558 mq.cmdtable['^qnew'][1][:] + commands.diffwsopts,
505 mq.cmdtable['^qnew'][1][:] + commands.diffwsopts,
559 _('hg qrecord [OPTION]... PATCH [FILE]...'))
506 _('hg qrecord [OPTION]... PATCH [FILE]...'))
560
507
561 _wrapcmd('qnew', mq.cmdtable, qnew, _("interactively record a new patch"))
508 _wrapcmd('qnew', mq.cmdtable, qnew, _("interactively record a new patch"))
562 _wrapcmd('qrefresh', mq.cmdtable, qrefresh,
509 _wrapcmd('qrefresh', mq.cmdtable, qrefresh,
563 _("interactively select changes to refresh"))
510 _("interactively select changes to refresh"))
564
511
565 def _wrapcmd(cmd, table, wrapfn, msg):
512 def _wrapcmd(cmd, table, wrapfn, msg):
566 entry = extensions.wrapcommand(table, cmd, wrapfn)
513 entry = extensions.wrapcommand(table, cmd, wrapfn)
567 entry[1].append(('i', 'interactive', None, msg))
514 entry[1].append(('i', 'interactive', None, msg))
@@ -1,2094 +1,2146
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import cStringIO, email, os, errno, re, posixpath
9 import cStringIO, email, os, errno, re, posixpath
10 import tempfile, zlib, shutil
10 import tempfile, zlib, shutil
11 # On python2.4 you have to import these by name or they fail to
11 # On python2.4 you have to import these by name or they fail to
12 # load. This was not a problem on Python 2.7.
12 # load. This was not a problem on Python 2.7.
13 import email.Generator
13 import email.Generator
14 import email.Parser
14 import email.Parser
15
15
16 from i18n import _
16 from i18n import _
17 from node import hex, short
17 from node import hex, short
18 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
18 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
19
19
20 gitre = re.compile('diff --git a/(.*) b/(.*)')
20 gitre = re.compile('diff --git a/(.*) b/(.*)')
21 tabsplitter = re.compile(r'(\t+|[^\t]+)')
21 tabsplitter = re.compile(r'(\t+|[^\t]+)')
22
22
23 class PatchError(Exception):
23 class PatchError(Exception):
24 pass
24 pass
25
25
26
26
27 # public functions
27 # public functions
28
28
29 def split(stream):
29 def split(stream):
30 '''return an iterator of individual patches from a stream'''
30 '''return an iterator of individual patches from a stream'''
31 def isheader(line, inheader):
31 def isheader(line, inheader):
32 if inheader and line[0] in (' ', '\t'):
32 if inheader and line[0] in (' ', '\t'):
33 # continuation
33 # continuation
34 return True
34 return True
35 if line[0] in (' ', '-', '+'):
35 if line[0] in (' ', '-', '+'):
36 # diff line - don't check for header pattern in there
36 # diff line - don't check for header pattern in there
37 return False
37 return False
38 l = line.split(': ', 1)
38 l = line.split(': ', 1)
39 return len(l) == 2 and ' ' not in l[0]
39 return len(l) == 2 and ' ' not in l[0]
40
40
41 def chunk(lines):
41 def chunk(lines):
42 return cStringIO.StringIO(''.join(lines))
42 return cStringIO.StringIO(''.join(lines))
43
43
44 def hgsplit(stream, cur):
44 def hgsplit(stream, cur):
45 inheader = True
45 inheader = True
46
46
47 for line in stream:
47 for line in stream:
48 if not line.strip():
48 if not line.strip():
49 inheader = False
49 inheader = False
50 if not inheader and line.startswith('# HG changeset patch'):
50 if not inheader and line.startswith('# HG changeset patch'):
51 yield chunk(cur)
51 yield chunk(cur)
52 cur = []
52 cur = []
53 inheader = True
53 inheader = True
54
54
55 cur.append(line)
55 cur.append(line)
56
56
57 if cur:
57 if cur:
58 yield chunk(cur)
58 yield chunk(cur)
59
59
60 def mboxsplit(stream, cur):
60 def mboxsplit(stream, cur):
61 for line in stream:
61 for line in stream:
62 if line.startswith('From '):
62 if line.startswith('From '):
63 for c in split(chunk(cur[1:])):
63 for c in split(chunk(cur[1:])):
64 yield c
64 yield c
65 cur = []
65 cur = []
66
66
67 cur.append(line)
67 cur.append(line)
68
68
69 if cur:
69 if cur:
70 for c in split(chunk(cur[1:])):
70 for c in split(chunk(cur[1:])):
71 yield c
71 yield c
72
72
73 def mimesplit(stream, cur):
73 def mimesplit(stream, cur):
74 def msgfp(m):
74 def msgfp(m):
75 fp = cStringIO.StringIO()
75 fp = cStringIO.StringIO()
76 g = email.Generator.Generator(fp, mangle_from_=False)
76 g = email.Generator.Generator(fp, mangle_from_=False)
77 g.flatten(m)
77 g.flatten(m)
78 fp.seek(0)
78 fp.seek(0)
79 return fp
79 return fp
80
80
81 for line in stream:
81 for line in stream:
82 cur.append(line)
82 cur.append(line)
83 c = chunk(cur)
83 c = chunk(cur)
84
84
85 m = email.Parser.Parser().parse(c)
85 m = email.Parser.Parser().parse(c)
86 if not m.is_multipart():
86 if not m.is_multipart():
87 yield msgfp(m)
87 yield msgfp(m)
88 else:
88 else:
89 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
89 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
90 for part in m.walk():
90 for part in m.walk():
91 ct = part.get_content_type()
91 ct = part.get_content_type()
92 if ct not in ok_types:
92 if ct not in ok_types:
93 continue
93 continue
94 yield msgfp(part)
94 yield msgfp(part)
95
95
96 def headersplit(stream, cur):
96 def headersplit(stream, cur):
97 inheader = False
97 inheader = False
98
98
99 for line in stream:
99 for line in stream:
100 if not inheader and isheader(line, inheader):
100 if not inheader and isheader(line, inheader):
101 yield chunk(cur)
101 yield chunk(cur)
102 cur = []
102 cur = []
103 inheader = True
103 inheader = True
104 if inheader and not isheader(line, inheader):
104 if inheader and not isheader(line, inheader):
105 inheader = False
105 inheader = False
106
106
107 cur.append(line)
107 cur.append(line)
108
108
109 if cur:
109 if cur:
110 yield chunk(cur)
110 yield chunk(cur)
111
111
112 def remainder(cur):
112 def remainder(cur):
113 yield chunk(cur)
113 yield chunk(cur)
114
114
115 class fiter(object):
115 class fiter(object):
116 def __init__(self, fp):
116 def __init__(self, fp):
117 self.fp = fp
117 self.fp = fp
118
118
119 def __iter__(self):
119 def __iter__(self):
120 return self
120 return self
121
121
122 def next(self):
122 def next(self):
123 l = self.fp.readline()
123 l = self.fp.readline()
124 if not l:
124 if not l:
125 raise StopIteration
125 raise StopIteration
126 return l
126 return l
127
127
128 inheader = False
128 inheader = False
129 cur = []
129 cur = []
130
130
131 mimeheaders = ['content-type']
131 mimeheaders = ['content-type']
132
132
133 if not util.safehasattr(stream, 'next'):
133 if not util.safehasattr(stream, 'next'):
134 # http responses, for example, have readline but not next
134 # http responses, for example, have readline but not next
135 stream = fiter(stream)
135 stream = fiter(stream)
136
136
137 for line in stream:
137 for line in stream:
138 cur.append(line)
138 cur.append(line)
139 if line.startswith('# HG changeset patch'):
139 if line.startswith('# HG changeset patch'):
140 return hgsplit(stream, cur)
140 return hgsplit(stream, cur)
141 elif line.startswith('From '):
141 elif line.startswith('From '):
142 return mboxsplit(stream, cur)
142 return mboxsplit(stream, cur)
143 elif isheader(line, inheader):
143 elif isheader(line, inheader):
144 inheader = True
144 inheader = True
145 if line.split(':', 1)[0].lower() in mimeheaders:
145 if line.split(':', 1)[0].lower() in mimeheaders:
146 # let email parser handle this
146 # let email parser handle this
147 return mimesplit(stream, cur)
147 return mimesplit(stream, cur)
148 elif line.startswith('--- ') and inheader:
148 elif line.startswith('--- ') and inheader:
149 # No evil headers seen by diff start, split by hand
149 # No evil headers seen by diff start, split by hand
150 return headersplit(stream, cur)
150 return headersplit(stream, cur)
151 # Not enough info, keep reading
151 # Not enough info, keep reading
152
152
153 # if we are here, we have a very plain patch
153 # if we are here, we have a very plain patch
154 return remainder(cur)
154 return remainder(cur)
155
155
156 def extract(ui, fileobj):
156 def extract(ui, fileobj):
157 '''extract patch from data read from fileobj.
157 '''extract patch from data read from fileobj.
158
158
159 patch can be a normal patch or contained in an email message.
159 patch can be a normal patch or contained in an email message.
160
160
161 return tuple (filename, message, user, date, branch, node, p1, p2).
161 return tuple (filename, message, user, date, branch, node, p1, p2).
162 Any item in the returned tuple can be None. If filename is None,
162 Any item in the returned tuple can be None. If filename is None,
163 fileobj did not contain a patch. Caller must unlink filename when done.'''
163 fileobj did not contain a patch. Caller must unlink filename when done.'''
164
164
165 # attempt to detect the start of a patch
165 # attempt to detect the start of a patch
166 # (this heuristic is borrowed from quilt)
166 # (this heuristic is borrowed from quilt)
167 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
167 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
168 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
168 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
169 r'---[ \t].*?^\+\+\+[ \t]|'
169 r'---[ \t].*?^\+\+\+[ \t]|'
170 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
170 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
171
171
172 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
172 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
173 tmpfp = os.fdopen(fd, 'w')
173 tmpfp = os.fdopen(fd, 'w')
174 try:
174 try:
175 msg = email.Parser.Parser().parse(fileobj)
175 msg = email.Parser.Parser().parse(fileobj)
176
176
177 subject = msg['Subject']
177 subject = msg['Subject']
178 user = msg['From']
178 user = msg['From']
179 if not subject and not user:
179 if not subject and not user:
180 # Not an email, restore parsed headers if any
180 # Not an email, restore parsed headers if any
181 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
181 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
182
182
183 # should try to parse msg['Date']
183 # should try to parse msg['Date']
184 date = None
184 date = None
185 nodeid = None
185 nodeid = None
186 branch = None
186 branch = None
187 parents = []
187 parents = []
188
188
189 if subject:
189 if subject:
190 if subject.startswith('[PATCH'):
190 if subject.startswith('[PATCH'):
191 pend = subject.find(']')
191 pend = subject.find(']')
192 if pend >= 0:
192 if pend >= 0:
193 subject = subject[pend + 1:].lstrip()
193 subject = subject[pend + 1:].lstrip()
194 subject = re.sub(r'\n[ \t]+', ' ', subject)
194 subject = re.sub(r'\n[ \t]+', ' ', subject)
195 ui.debug('Subject: %s\n' % subject)
195 ui.debug('Subject: %s\n' % subject)
196 if user:
196 if user:
197 ui.debug('From: %s\n' % user)
197 ui.debug('From: %s\n' % user)
198 diffs_seen = 0
198 diffs_seen = 0
199 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
199 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
200 message = ''
200 message = ''
201 for part in msg.walk():
201 for part in msg.walk():
202 content_type = part.get_content_type()
202 content_type = part.get_content_type()
203 ui.debug('Content-Type: %s\n' % content_type)
203 ui.debug('Content-Type: %s\n' % content_type)
204 if content_type not in ok_types:
204 if content_type not in ok_types:
205 continue
205 continue
206 payload = part.get_payload(decode=True)
206 payload = part.get_payload(decode=True)
207 m = diffre.search(payload)
207 m = diffre.search(payload)
208 if m:
208 if m:
209 hgpatch = False
209 hgpatch = False
210 hgpatchheader = False
210 hgpatchheader = False
211 ignoretext = False
211 ignoretext = False
212
212
213 ui.debug('found patch at byte %d\n' % m.start(0))
213 ui.debug('found patch at byte %d\n' % m.start(0))
214 diffs_seen += 1
214 diffs_seen += 1
215 cfp = cStringIO.StringIO()
215 cfp = cStringIO.StringIO()
216 for line in payload[:m.start(0)].splitlines():
216 for line in payload[:m.start(0)].splitlines():
217 if line.startswith('# HG changeset patch') and not hgpatch:
217 if line.startswith('# HG changeset patch') and not hgpatch:
218 ui.debug('patch generated by hg export\n')
218 ui.debug('patch generated by hg export\n')
219 hgpatch = True
219 hgpatch = True
220 hgpatchheader = True
220 hgpatchheader = True
221 # drop earlier commit message content
221 # drop earlier commit message content
222 cfp.seek(0)
222 cfp.seek(0)
223 cfp.truncate()
223 cfp.truncate()
224 subject = None
224 subject = None
225 elif hgpatchheader:
225 elif hgpatchheader:
226 if line.startswith('# User '):
226 if line.startswith('# User '):
227 user = line[7:]
227 user = line[7:]
228 ui.debug('From: %s\n' % user)
228 ui.debug('From: %s\n' % user)
229 elif line.startswith("# Date "):
229 elif line.startswith("# Date "):
230 date = line[7:]
230 date = line[7:]
231 elif line.startswith("# Branch "):
231 elif line.startswith("# Branch "):
232 branch = line[9:]
232 branch = line[9:]
233 elif line.startswith("# Node ID "):
233 elif line.startswith("# Node ID "):
234 nodeid = line[10:]
234 nodeid = line[10:]
235 elif line.startswith("# Parent "):
235 elif line.startswith("# Parent "):
236 parents.append(line[9:].lstrip())
236 parents.append(line[9:].lstrip())
237 elif not line.startswith("# "):
237 elif not line.startswith("# "):
238 hgpatchheader = False
238 hgpatchheader = False
239 elif line == '---':
239 elif line == '---':
240 ignoretext = True
240 ignoretext = True
241 if not hgpatchheader and not ignoretext:
241 if not hgpatchheader and not ignoretext:
242 cfp.write(line)
242 cfp.write(line)
243 cfp.write('\n')
243 cfp.write('\n')
244 message = cfp.getvalue()
244 message = cfp.getvalue()
245 if tmpfp:
245 if tmpfp:
246 tmpfp.write(payload)
246 tmpfp.write(payload)
247 if not payload.endswith('\n'):
247 if not payload.endswith('\n'):
248 tmpfp.write('\n')
248 tmpfp.write('\n')
249 elif not diffs_seen and message and content_type == 'text/plain':
249 elif not diffs_seen and message and content_type == 'text/plain':
250 message += '\n' + payload
250 message += '\n' + payload
251 except: # re-raises
251 except: # re-raises
252 tmpfp.close()
252 tmpfp.close()
253 os.unlink(tmpname)
253 os.unlink(tmpname)
254 raise
254 raise
255
255
256 if subject and not message.startswith(subject):
256 if subject and not message.startswith(subject):
257 message = '%s\n%s' % (subject, message)
257 message = '%s\n%s' % (subject, message)
258 tmpfp.close()
258 tmpfp.close()
259 if not diffs_seen:
259 if not diffs_seen:
260 os.unlink(tmpname)
260 os.unlink(tmpname)
261 return None, message, user, date, branch, None, None, None
261 return None, message, user, date, branch, None, None, None
262 p1 = parents and parents.pop(0) or None
262 p1 = parents and parents.pop(0) or None
263 p2 = parents and parents.pop(0) or None
263 p2 = parents and parents.pop(0) or None
264 return tmpname, message, user, date, branch, nodeid, p1, p2
264 return tmpname, message, user, date, branch, nodeid, p1, p2
265
265
266 class patchmeta(object):
266 class patchmeta(object):
267 """Patched file metadata
267 """Patched file metadata
268
268
269 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
269 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
270 or COPY. 'path' is patched file path. 'oldpath' is set to the
270 or COPY. 'path' is patched file path. 'oldpath' is set to the
271 origin file when 'op' is either COPY or RENAME, None otherwise. If
271 origin file when 'op' is either COPY or RENAME, None otherwise. If
272 file mode is changed, 'mode' is a tuple (islink, isexec) where
272 file mode is changed, 'mode' is a tuple (islink, isexec) where
273 'islink' is True if the file is a symlink and 'isexec' is True if
273 'islink' is True if the file is a symlink and 'isexec' is True if
274 the file is executable. Otherwise, 'mode' is None.
274 the file is executable. Otherwise, 'mode' is None.
275 """
275 """
276 def __init__(self, path):
276 def __init__(self, path):
277 self.path = path
277 self.path = path
278 self.oldpath = None
278 self.oldpath = None
279 self.mode = None
279 self.mode = None
280 self.op = 'MODIFY'
280 self.op = 'MODIFY'
281 self.binary = False
281 self.binary = False
282
282
283 def setmode(self, mode):
283 def setmode(self, mode):
284 islink = mode & 020000
284 islink = mode & 020000
285 isexec = mode & 0100
285 isexec = mode & 0100
286 self.mode = (islink, isexec)
286 self.mode = (islink, isexec)
287
287
288 def copy(self):
288 def copy(self):
289 other = patchmeta(self.path)
289 other = patchmeta(self.path)
290 other.oldpath = self.oldpath
290 other.oldpath = self.oldpath
291 other.mode = self.mode
291 other.mode = self.mode
292 other.op = self.op
292 other.op = self.op
293 other.binary = self.binary
293 other.binary = self.binary
294 return other
294 return other
295
295
296 def _ispatchinga(self, afile):
296 def _ispatchinga(self, afile):
297 if afile == '/dev/null':
297 if afile == '/dev/null':
298 return self.op == 'ADD'
298 return self.op == 'ADD'
299 return afile == 'a/' + (self.oldpath or self.path)
299 return afile == 'a/' + (self.oldpath or self.path)
300
300
301 def _ispatchingb(self, bfile):
301 def _ispatchingb(self, bfile):
302 if bfile == '/dev/null':
302 if bfile == '/dev/null':
303 return self.op == 'DELETE'
303 return self.op == 'DELETE'
304 return bfile == 'b/' + self.path
304 return bfile == 'b/' + self.path
305
305
306 def ispatching(self, afile, bfile):
306 def ispatching(self, afile, bfile):
307 return self._ispatchinga(afile) and self._ispatchingb(bfile)
307 return self._ispatchinga(afile) and self._ispatchingb(bfile)
308
308
309 def __repr__(self):
309 def __repr__(self):
310 return "<patchmeta %s %r>" % (self.op, self.path)
310 return "<patchmeta %s %r>" % (self.op, self.path)
311
311
312 def readgitpatch(lr):
312 def readgitpatch(lr):
313 """extract git-style metadata about patches from <patchname>"""
313 """extract git-style metadata about patches from <patchname>"""
314
314
315 # Filter patch for git information
315 # Filter patch for git information
316 gp = None
316 gp = None
317 gitpatches = []
317 gitpatches = []
318 for line in lr:
318 for line in lr:
319 line = line.rstrip(' \r\n')
319 line = line.rstrip(' \r\n')
320 if line.startswith('diff --git a/'):
320 if line.startswith('diff --git a/'):
321 m = gitre.match(line)
321 m = gitre.match(line)
322 if m:
322 if m:
323 if gp:
323 if gp:
324 gitpatches.append(gp)
324 gitpatches.append(gp)
325 dst = m.group(2)
325 dst = m.group(2)
326 gp = patchmeta(dst)
326 gp = patchmeta(dst)
327 elif gp:
327 elif gp:
328 if line.startswith('--- '):
328 if line.startswith('--- '):
329 gitpatches.append(gp)
329 gitpatches.append(gp)
330 gp = None
330 gp = None
331 continue
331 continue
332 if line.startswith('rename from '):
332 if line.startswith('rename from '):
333 gp.op = 'RENAME'
333 gp.op = 'RENAME'
334 gp.oldpath = line[12:]
334 gp.oldpath = line[12:]
335 elif line.startswith('rename to '):
335 elif line.startswith('rename to '):
336 gp.path = line[10:]
336 gp.path = line[10:]
337 elif line.startswith('copy from '):
337 elif line.startswith('copy from '):
338 gp.op = 'COPY'
338 gp.op = 'COPY'
339 gp.oldpath = line[10:]
339 gp.oldpath = line[10:]
340 elif line.startswith('copy to '):
340 elif line.startswith('copy to '):
341 gp.path = line[8:]
341 gp.path = line[8:]
342 elif line.startswith('deleted file'):
342 elif line.startswith('deleted file'):
343 gp.op = 'DELETE'
343 gp.op = 'DELETE'
344 elif line.startswith('new file mode '):
344 elif line.startswith('new file mode '):
345 gp.op = 'ADD'
345 gp.op = 'ADD'
346 gp.setmode(int(line[-6:], 8))
346 gp.setmode(int(line[-6:], 8))
347 elif line.startswith('new mode '):
347 elif line.startswith('new mode '):
348 gp.setmode(int(line[-6:], 8))
348 gp.setmode(int(line[-6:], 8))
349 elif line.startswith('GIT binary patch'):
349 elif line.startswith('GIT binary patch'):
350 gp.binary = True
350 gp.binary = True
351 if gp:
351 if gp:
352 gitpatches.append(gp)
352 gitpatches.append(gp)
353
353
354 return gitpatches
354 return gitpatches
355
355
356 class linereader(object):
356 class linereader(object):
357 # simple class to allow pushing lines back into the input stream
357 # simple class to allow pushing lines back into the input stream
358 def __init__(self, fp):
358 def __init__(self, fp):
359 self.fp = fp
359 self.fp = fp
360 self.buf = []
360 self.buf = []
361
361
362 def push(self, line):
362 def push(self, line):
363 if line is not None:
363 if line is not None:
364 self.buf.append(line)
364 self.buf.append(line)
365
365
366 def readline(self):
366 def readline(self):
367 if self.buf:
367 if self.buf:
368 l = self.buf[0]
368 l = self.buf[0]
369 del self.buf[0]
369 del self.buf[0]
370 return l
370 return l
371 return self.fp.readline()
371 return self.fp.readline()
372
372
373 def __iter__(self):
373 def __iter__(self):
374 while True:
374 while True:
375 l = self.readline()
375 l = self.readline()
376 if not l:
376 if not l:
377 break
377 break
378 yield l
378 yield l
379
379
380 class abstractbackend(object):
380 class abstractbackend(object):
381 def __init__(self, ui):
381 def __init__(self, ui):
382 self.ui = ui
382 self.ui = ui
383
383
384 def getfile(self, fname):
384 def getfile(self, fname):
385 """Return target file data and flags as a (data, (islink,
385 """Return target file data and flags as a (data, (islink,
386 isexec)) tuple. Data is None if file is missing/deleted.
386 isexec)) tuple. Data is None if file is missing/deleted.
387 """
387 """
388 raise NotImplementedError
388 raise NotImplementedError
389
389
390 def setfile(self, fname, data, mode, copysource):
390 def setfile(self, fname, data, mode, copysource):
391 """Write data to target file fname and set its mode. mode is a
391 """Write data to target file fname and set its mode. mode is a
392 (islink, isexec) tuple. If data is None, the file content should
392 (islink, isexec) tuple. If data is None, the file content should
393 be left unchanged. If the file is modified after being copied,
393 be left unchanged. If the file is modified after being copied,
394 copysource is set to the original file name.
394 copysource is set to the original file name.
395 """
395 """
396 raise NotImplementedError
396 raise NotImplementedError
397
397
398 def unlink(self, fname):
398 def unlink(self, fname):
399 """Unlink target file."""
399 """Unlink target file."""
400 raise NotImplementedError
400 raise NotImplementedError
401
401
402 def writerej(self, fname, failed, total, lines):
402 def writerej(self, fname, failed, total, lines):
403 """Write rejected lines for fname. total is the number of hunks
403 """Write rejected lines for fname. total is the number of hunks
404 which failed to apply and total the total number of hunks for this
404 which failed to apply and total the total number of hunks for this
405 files.
405 files.
406 """
406 """
407 pass
407 pass
408
408
409 def exists(self, fname):
409 def exists(self, fname):
410 raise NotImplementedError
410 raise NotImplementedError
411
411
412 class fsbackend(abstractbackend):
412 class fsbackend(abstractbackend):
413 def __init__(self, ui, basedir):
413 def __init__(self, ui, basedir):
414 super(fsbackend, self).__init__(ui)
414 super(fsbackend, self).__init__(ui)
415 self.opener = scmutil.opener(basedir)
415 self.opener = scmutil.opener(basedir)
416
416
417 def _join(self, f):
417 def _join(self, f):
418 return os.path.join(self.opener.base, f)
418 return os.path.join(self.opener.base, f)
419
419
420 def getfile(self, fname):
420 def getfile(self, fname):
421 if self.opener.islink(fname):
421 if self.opener.islink(fname):
422 return (self.opener.readlink(fname), (True, False))
422 return (self.opener.readlink(fname), (True, False))
423
423
424 isexec = False
424 isexec = False
425 try:
425 try:
426 isexec = self.opener.lstat(fname).st_mode & 0100 != 0
426 isexec = self.opener.lstat(fname).st_mode & 0100 != 0
427 except OSError, e:
427 except OSError, e:
428 if e.errno != errno.ENOENT:
428 if e.errno != errno.ENOENT:
429 raise
429 raise
430 try:
430 try:
431 return (self.opener.read(fname), (False, isexec))
431 return (self.opener.read(fname), (False, isexec))
432 except IOError, e:
432 except IOError, e:
433 if e.errno != errno.ENOENT:
433 if e.errno != errno.ENOENT:
434 raise
434 raise
435 return None, None
435 return None, None
436
436
437 def setfile(self, fname, data, mode, copysource):
437 def setfile(self, fname, data, mode, copysource):
438 islink, isexec = mode
438 islink, isexec = mode
439 if data is None:
439 if data is None:
440 self.opener.setflags(fname, islink, isexec)
440 self.opener.setflags(fname, islink, isexec)
441 return
441 return
442 if islink:
442 if islink:
443 self.opener.symlink(data, fname)
443 self.opener.symlink(data, fname)
444 else:
444 else:
445 self.opener.write(fname, data)
445 self.opener.write(fname, data)
446 if isexec:
446 if isexec:
447 self.opener.setflags(fname, False, True)
447 self.opener.setflags(fname, False, True)
448
448
449 def unlink(self, fname):
449 def unlink(self, fname):
450 self.opener.unlinkpath(fname, ignoremissing=True)
450 self.opener.unlinkpath(fname, ignoremissing=True)
451
451
452 def writerej(self, fname, failed, total, lines):
452 def writerej(self, fname, failed, total, lines):
453 fname = fname + ".rej"
453 fname = fname + ".rej"
454 self.ui.warn(
454 self.ui.warn(
455 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
455 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
456 (failed, total, fname))
456 (failed, total, fname))
457 fp = self.opener(fname, 'w')
457 fp = self.opener(fname, 'w')
458 fp.writelines(lines)
458 fp.writelines(lines)
459 fp.close()
459 fp.close()
460
460
461 def exists(self, fname):
461 def exists(self, fname):
462 return self.opener.lexists(fname)
462 return self.opener.lexists(fname)
463
463
464 class workingbackend(fsbackend):
464 class workingbackend(fsbackend):
465 def __init__(self, ui, repo, similarity):
465 def __init__(self, ui, repo, similarity):
466 super(workingbackend, self).__init__(ui, repo.root)
466 super(workingbackend, self).__init__(ui, repo.root)
467 self.repo = repo
467 self.repo = repo
468 self.similarity = similarity
468 self.similarity = similarity
469 self.removed = set()
469 self.removed = set()
470 self.changed = set()
470 self.changed = set()
471 self.copied = []
471 self.copied = []
472
472
473 def _checkknown(self, fname):
473 def _checkknown(self, fname):
474 if self.repo.dirstate[fname] == '?' and self.exists(fname):
474 if self.repo.dirstate[fname] == '?' and self.exists(fname):
475 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
475 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
476
476
477 def setfile(self, fname, data, mode, copysource):
477 def setfile(self, fname, data, mode, copysource):
478 self._checkknown(fname)
478 self._checkknown(fname)
479 super(workingbackend, self).setfile(fname, data, mode, copysource)
479 super(workingbackend, self).setfile(fname, data, mode, copysource)
480 if copysource is not None:
480 if copysource is not None:
481 self.copied.append((copysource, fname))
481 self.copied.append((copysource, fname))
482 self.changed.add(fname)
482 self.changed.add(fname)
483
483
484 def unlink(self, fname):
484 def unlink(self, fname):
485 self._checkknown(fname)
485 self._checkknown(fname)
486 super(workingbackend, self).unlink(fname)
486 super(workingbackend, self).unlink(fname)
487 self.removed.add(fname)
487 self.removed.add(fname)
488 self.changed.add(fname)
488 self.changed.add(fname)
489
489
490 def close(self):
490 def close(self):
491 wctx = self.repo[None]
491 wctx = self.repo[None]
492 changed = set(self.changed)
492 changed = set(self.changed)
493 for src, dst in self.copied:
493 for src, dst in self.copied:
494 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
494 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
495 if self.removed:
495 if self.removed:
496 wctx.forget(sorted(self.removed))
496 wctx.forget(sorted(self.removed))
497 for f in self.removed:
497 for f in self.removed:
498 if f not in self.repo.dirstate:
498 if f not in self.repo.dirstate:
499 # File was deleted and no longer belongs to the
499 # File was deleted and no longer belongs to the
500 # dirstate, it was probably marked added then
500 # dirstate, it was probably marked added then
501 # deleted, and should not be considered by
501 # deleted, and should not be considered by
502 # marktouched().
502 # marktouched().
503 changed.discard(f)
503 changed.discard(f)
504 if changed:
504 if changed:
505 scmutil.marktouched(self.repo, changed, self.similarity)
505 scmutil.marktouched(self.repo, changed, self.similarity)
506 return sorted(self.changed)
506 return sorted(self.changed)
507
507
508 class filestore(object):
508 class filestore(object):
509 def __init__(self, maxsize=None):
509 def __init__(self, maxsize=None):
510 self.opener = None
510 self.opener = None
511 self.files = {}
511 self.files = {}
512 self.created = 0
512 self.created = 0
513 self.maxsize = maxsize
513 self.maxsize = maxsize
514 if self.maxsize is None:
514 if self.maxsize is None:
515 self.maxsize = 4*(2**20)
515 self.maxsize = 4*(2**20)
516 self.size = 0
516 self.size = 0
517 self.data = {}
517 self.data = {}
518
518
519 def setfile(self, fname, data, mode, copied=None):
519 def setfile(self, fname, data, mode, copied=None):
520 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
520 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
521 self.data[fname] = (data, mode, copied)
521 self.data[fname] = (data, mode, copied)
522 self.size += len(data)
522 self.size += len(data)
523 else:
523 else:
524 if self.opener is None:
524 if self.opener is None:
525 root = tempfile.mkdtemp(prefix='hg-patch-')
525 root = tempfile.mkdtemp(prefix='hg-patch-')
526 self.opener = scmutil.opener(root)
526 self.opener = scmutil.opener(root)
527 # Avoid filename issues with these simple names
527 # Avoid filename issues with these simple names
528 fn = str(self.created)
528 fn = str(self.created)
529 self.opener.write(fn, data)
529 self.opener.write(fn, data)
530 self.created += 1
530 self.created += 1
531 self.files[fname] = (fn, mode, copied)
531 self.files[fname] = (fn, mode, copied)
532
532
533 def getfile(self, fname):
533 def getfile(self, fname):
534 if fname in self.data:
534 if fname in self.data:
535 return self.data[fname]
535 return self.data[fname]
536 if not self.opener or fname not in self.files:
536 if not self.opener or fname not in self.files:
537 return None, None, None
537 return None, None, None
538 fn, mode, copied = self.files[fname]
538 fn, mode, copied = self.files[fname]
539 return self.opener.read(fn), mode, copied
539 return self.opener.read(fn), mode, copied
540
540
541 def close(self):
541 def close(self):
542 if self.opener:
542 if self.opener:
543 shutil.rmtree(self.opener.base)
543 shutil.rmtree(self.opener.base)
544
544
545 class repobackend(abstractbackend):
545 class repobackend(abstractbackend):
546 def __init__(self, ui, repo, ctx, store):
546 def __init__(self, ui, repo, ctx, store):
547 super(repobackend, self).__init__(ui)
547 super(repobackend, self).__init__(ui)
548 self.repo = repo
548 self.repo = repo
549 self.ctx = ctx
549 self.ctx = ctx
550 self.store = store
550 self.store = store
551 self.changed = set()
551 self.changed = set()
552 self.removed = set()
552 self.removed = set()
553 self.copied = {}
553 self.copied = {}
554
554
555 def _checkknown(self, fname):
555 def _checkknown(self, fname):
556 if fname not in self.ctx:
556 if fname not in self.ctx:
557 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
557 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
558
558
559 def getfile(self, fname):
559 def getfile(self, fname):
560 try:
560 try:
561 fctx = self.ctx[fname]
561 fctx = self.ctx[fname]
562 except error.LookupError:
562 except error.LookupError:
563 return None, None
563 return None, None
564 flags = fctx.flags()
564 flags = fctx.flags()
565 return fctx.data(), ('l' in flags, 'x' in flags)
565 return fctx.data(), ('l' in flags, 'x' in flags)
566
566
567 def setfile(self, fname, data, mode, copysource):
567 def setfile(self, fname, data, mode, copysource):
568 if copysource:
568 if copysource:
569 self._checkknown(copysource)
569 self._checkknown(copysource)
570 if data is None:
570 if data is None:
571 data = self.ctx[fname].data()
571 data = self.ctx[fname].data()
572 self.store.setfile(fname, data, mode, copysource)
572 self.store.setfile(fname, data, mode, copysource)
573 self.changed.add(fname)
573 self.changed.add(fname)
574 if copysource:
574 if copysource:
575 self.copied[fname] = copysource
575 self.copied[fname] = copysource
576
576
577 def unlink(self, fname):
577 def unlink(self, fname):
578 self._checkknown(fname)
578 self._checkknown(fname)
579 self.removed.add(fname)
579 self.removed.add(fname)
580
580
581 def exists(self, fname):
581 def exists(self, fname):
582 return fname in self.ctx
582 return fname in self.ctx
583
583
584 def close(self):
584 def close(self):
585 return self.changed | self.removed
585 return self.changed | self.removed
586
586
587 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
587 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
588 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
588 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
589 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
589 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
590 eolmodes = ['strict', 'crlf', 'lf', 'auto']
590 eolmodes = ['strict', 'crlf', 'lf', 'auto']
591
591
592 class patchfile(object):
592 class patchfile(object):
593 def __init__(self, ui, gp, backend, store, eolmode='strict'):
593 def __init__(self, ui, gp, backend, store, eolmode='strict'):
594 self.fname = gp.path
594 self.fname = gp.path
595 self.eolmode = eolmode
595 self.eolmode = eolmode
596 self.eol = None
596 self.eol = None
597 self.backend = backend
597 self.backend = backend
598 self.ui = ui
598 self.ui = ui
599 self.lines = []
599 self.lines = []
600 self.exists = False
600 self.exists = False
601 self.missing = True
601 self.missing = True
602 self.mode = gp.mode
602 self.mode = gp.mode
603 self.copysource = gp.oldpath
603 self.copysource = gp.oldpath
604 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
604 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
605 self.remove = gp.op == 'DELETE'
605 self.remove = gp.op == 'DELETE'
606 if self.copysource is None:
606 if self.copysource is None:
607 data, mode = backend.getfile(self.fname)
607 data, mode = backend.getfile(self.fname)
608 else:
608 else:
609 data, mode = store.getfile(self.copysource)[:2]
609 data, mode = store.getfile(self.copysource)[:2]
610 if data is not None:
610 if data is not None:
611 self.exists = self.copysource is None or backend.exists(self.fname)
611 self.exists = self.copysource is None or backend.exists(self.fname)
612 self.missing = False
612 self.missing = False
613 if data:
613 if data:
614 self.lines = mdiff.splitnewlines(data)
614 self.lines = mdiff.splitnewlines(data)
615 if self.mode is None:
615 if self.mode is None:
616 self.mode = mode
616 self.mode = mode
617 if self.lines:
617 if self.lines:
618 # Normalize line endings
618 # Normalize line endings
619 if self.lines[0].endswith('\r\n'):
619 if self.lines[0].endswith('\r\n'):
620 self.eol = '\r\n'
620 self.eol = '\r\n'
621 elif self.lines[0].endswith('\n'):
621 elif self.lines[0].endswith('\n'):
622 self.eol = '\n'
622 self.eol = '\n'
623 if eolmode != 'strict':
623 if eolmode != 'strict':
624 nlines = []
624 nlines = []
625 for l in self.lines:
625 for l in self.lines:
626 if l.endswith('\r\n'):
626 if l.endswith('\r\n'):
627 l = l[:-2] + '\n'
627 l = l[:-2] + '\n'
628 nlines.append(l)
628 nlines.append(l)
629 self.lines = nlines
629 self.lines = nlines
630 else:
630 else:
631 if self.create:
631 if self.create:
632 self.missing = False
632 self.missing = False
633 if self.mode is None:
633 if self.mode is None:
634 self.mode = (False, False)
634 self.mode = (False, False)
635 if self.missing:
635 if self.missing:
636 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
636 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
637
637
638 self.hash = {}
638 self.hash = {}
639 self.dirty = 0
639 self.dirty = 0
640 self.offset = 0
640 self.offset = 0
641 self.skew = 0
641 self.skew = 0
642 self.rej = []
642 self.rej = []
643 self.fileprinted = False
643 self.fileprinted = False
644 self.printfile(False)
644 self.printfile(False)
645 self.hunks = 0
645 self.hunks = 0
646
646
647 def writelines(self, fname, lines, mode):
647 def writelines(self, fname, lines, mode):
648 if self.eolmode == 'auto':
648 if self.eolmode == 'auto':
649 eol = self.eol
649 eol = self.eol
650 elif self.eolmode == 'crlf':
650 elif self.eolmode == 'crlf':
651 eol = '\r\n'
651 eol = '\r\n'
652 else:
652 else:
653 eol = '\n'
653 eol = '\n'
654
654
655 if self.eolmode != 'strict' and eol and eol != '\n':
655 if self.eolmode != 'strict' and eol and eol != '\n':
656 rawlines = []
656 rawlines = []
657 for l in lines:
657 for l in lines:
658 if l and l[-1] == '\n':
658 if l and l[-1] == '\n':
659 l = l[:-1] + eol
659 l = l[:-1] + eol
660 rawlines.append(l)
660 rawlines.append(l)
661 lines = rawlines
661 lines = rawlines
662
662
663 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
663 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
664
664
665 def printfile(self, warn):
665 def printfile(self, warn):
666 if self.fileprinted:
666 if self.fileprinted:
667 return
667 return
668 if warn or self.ui.verbose:
668 if warn or self.ui.verbose:
669 self.fileprinted = True
669 self.fileprinted = True
670 s = _("patching file %s\n") % self.fname
670 s = _("patching file %s\n") % self.fname
671 if warn:
671 if warn:
672 self.ui.warn(s)
672 self.ui.warn(s)
673 else:
673 else:
674 self.ui.note(s)
674 self.ui.note(s)
675
675
676
676
677 def findlines(self, l, linenum):
677 def findlines(self, l, linenum):
678 # looks through the hash and finds candidate lines. The
678 # looks through the hash and finds candidate lines. The
679 # result is a list of line numbers sorted based on distance
679 # result is a list of line numbers sorted based on distance
680 # from linenum
680 # from linenum
681
681
682 cand = self.hash.get(l, [])
682 cand = self.hash.get(l, [])
683 if len(cand) > 1:
683 if len(cand) > 1:
684 # resort our list of potentials forward then back.
684 # resort our list of potentials forward then back.
685 cand.sort(key=lambda x: abs(x - linenum))
685 cand.sort(key=lambda x: abs(x - linenum))
686 return cand
686 return cand
687
687
688 def write_rej(self):
688 def write_rej(self):
689 # our rejects are a little different from patch(1). This always
689 # our rejects are a little different from patch(1). This always
690 # creates rejects in the same form as the original patch. A file
690 # creates rejects in the same form as the original patch. A file
691 # header is inserted so that you can run the reject through patch again
691 # header is inserted so that you can run the reject through patch again
692 # without having to type the filename.
692 # without having to type the filename.
693 if not self.rej:
693 if not self.rej:
694 return
694 return
695 base = os.path.basename(self.fname)
695 base = os.path.basename(self.fname)
696 lines = ["--- %s\n+++ %s\n" % (base, base)]
696 lines = ["--- %s\n+++ %s\n" % (base, base)]
697 for x in self.rej:
697 for x in self.rej:
698 for l in x.hunk:
698 for l in x.hunk:
699 lines.append(l)
699 lines.append(l)
700 if l[-1] != '\n':
700 if l[-1] != '\n':
701 lines.append("\n\ No newline at end of file\n")
701 lines.append("\n\ No newline at end of file\n")
702 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
702 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
703
703
704 def apply(self, h):
704 def apply(self, h):
705 if not h.complete():
705 if not h.complete():
706 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
706 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
707 (h.number, h.desc, len(h.a), h.lena, len(h.b),
707 (h.number, h.desc, len(h.a), h.lena, len(h.b),
708 h.lenb))
708 h.lenb))
709
709
710 self.hunks += 1
710 self.hunks += 1
711
711
712 if self.missing:
712 if self.missing:
713 self.rej.append(h)
713 self.rej.append(h)
714 return -1
714 return -1
715
715
716 if self.exists and self.create:
716 if self.exists and self.create:
717 if self.copysource:
717 if self.copysource:
718 self.ui.warn(_("cannot create %s: destination already "
718 self.ui.warn(_("cannot create %s: destination already "
719 "exists\n") % self.fname)
719 "exists\n") % self.fname)
720 else:
720 else:
721 self.ui.warn(_("file %s already exists\n") % self.fname)
721 self.ui.warn(_("file %s already exists\n") % self.fname)
722 self.rej.append(h)
722 self.rej.append(h)
723 return -1
723 return -1
724
724
725 if isinstance(h, binhunk):
725 if isinstance(h, binhunk):
726 if self.remove:
726 if self.remove:
727 self.backend.unlink(self.fname)
727 self.backend.unlink(self.fname)
728 else:
728 else:
729 l = h.new(self.lines)
729 l = h.new(self.lines)
730 self.lines[:] = l
730 self.lines[:] = l
731 self.offset += len(l)
731 self.offset += len(l)
732 self.dirty = True
732 self.dirty = True
733 return 0
733 return 0
734
734
735 horig = h
735 horig = h
736 if (self.eolmode in ('crlf', 'lf')
736 if (self.eolmode in ('crlf', 'lf')
737 or self.eolmode == 'auto' and self.eol):
737 or self.eolmode == 'auto' and self.eol):
738 # If new eols are going to be normalized, then normalize
738 # If new eols are going to be normalized, then normalize
739 # hunk data before patching. Otherwise, preserve input
739 # hunk data before patching. Otherwise, preserve input
740 # line-endings.
740 # line-endings.
741 h = h.getnormalized()
741 h = h.getnormalized()
742
742
743 # fast case first, no offsets, no fuzz
743 # fast case first, no offsets, no fuzz
744 old, oldstart, new, newstart = h.fuzzit(0, False)
744 old, oldstart, new, newstart = h.fuzzit(0, False)
745 oldstart += self.offset
745 oldstart += self.offset
746 orig_start = oldstart
746 orig_start = oldstart
747 # if there's skew we want to emit the "(offset %d lines)" even
747 # if there's skew we want to emit the "(offset %d lines)" even
748 # when the hunk cleanly applies at start + skew, so skip the
748 # when the hunk cleanly applies at start + skew, so skip the
749 # fast case code
749 # fast case code
750 if (self.skew == 0 and
750 if (self.skew == 0 and
751 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
751 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
752 if self.remove:
752 if self.remove:
753 self.backend.unlink(self.fname)
753 self.backend.unlink(self.fname)
754 else:
754 else:
755 self.lines[oldstart:oldstart + len(old)] = new
755 self.lines[oldstart:oldstart + len(old)] = new
756 self.offset += len(new) - len(old)
756 self.offset += len(new) - len(old)
757 self.dirty = True
757 self.dirty = True
758 return 0
758 return 0
759
759
760 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
760 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
761 self.hash = {}
761 self.hash = {}
762 for x, s in enumerate(self.lines):
762 for x, s in enumerate(self.lines):
763 self.hash.setdefault(s, []).append(x)
763 self.hash.setdefault(s, []).append(x)
764
764
765 for fuzzlen in xrange(3):
765 for fuzzlen in xrange(3):
766 for toponly in [True, False]:
766 for toponly in [True, False]:
767 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
767 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
768 oldstart = oldstart + self.offset + self.skew
768 oldstart = oldstart + self.offset + self.skew
769 oldstart = min(oldstart, len(self.lines))
769 oldstart = min(oldstart, len(self.lines))
770 if old:
770 if old:
771 cand = self.findlines(old[0][1:], oldstart)
771 cand = self.findlines(old[0][1:], oldstart)
772 else:
772 else:
773 # Only adding lines with no or fuzzed context, just
773 # Only adding lines with no or fuzzed context, just
774 # take the skew in account
774 # take the skew in account
775 cand = [oldstart]
775 cand = [oldstart]
776
776
777 for l in cand:
777 for l in cand:
778 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
778 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
779 self.lines[l : l + len(old)] = new
779 self.lines[l : l + len(old)] = new
780 self.offset += len(new) - len(old)
780 self.offset += len(new) - len(old)
781 self.skew = l - orig_start
781 self.skew = l - orig_start
782 self.dirty = True
782 self.dirty = True
783 offset = l - orig_start - fuzzlen
783 offset = l - orig_start - fuzzlen
784 if fuzzlen:
784 if fuzzlen:
785 msg = _("Hunk #%d succeeded at %d "
785 msg = _("Hunk #%d succeeded at %d "
786 "with fuzz %d "
786 "with fuzz %d "
787 "(offset %d lines).\n")
787 "(offset %d lines).\n")
788 self.printfile(True)
788 self.printfile(True)
789 self.ui.warn(msg %
789 self.ui.warn(msg %
790 (h.number, l + 1, fuzzlen, offset))
790 (h.number, l + 1, fuzzlen, offset))
791 else:
791 else:
792 msg = _("Hunk #%d succeeded at %d "
792 msg = _("Hunk #%d succeeded at %d "
793 "(offset %d lines).\n")
793 "(offset %d lines).\n")
794 self.ui.note(msg % (h.number, l + 1, offset))
794 self.ui.note(msg % (h.number, l + 1, offset))
795 return fuzzlen
795 return fuzzlen
796 self.printfile(True)
796 self.printfile(True)
797 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
797 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
798 self.rej.append(horig)
798 self.rej.append(horig)
799 return -1
799 return -1
800
800
801 def close(self):
801 def close(self):
802 if self.dirty:
802 if self.dirty:
803 self.writelines(self.fname, self.lines, self.mode)
803 self.writelines(self.fname, self.lines, self.mode)
804 self.write_rej()
804 self.write_rej()
805 return len(self.rej)
805 return len(self.rej)
806
806
807 class header(object):
807 class header(object):
808 """patch header
808 """patch header
809 """
809 """
810 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
810 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
811 diff_re = re.compile('diff -r .* (.*)$')
811 diff_re = re.compile('diff -r .* (.*)$')
812 allhunks_re = re.compile('(?:index|deleted file) ')
812 allhunks_re = re.compile('(?:index|deleted file) ')
813 pretty_re = re.compile('(?:new file|deleted file) ')
813 pretty_re = re.compile('(?:new file|deleted file) ')
814 special_re = re.compile('(?:index|new|deleted|copy|rename) ')
814 special_re = re.compile('(?:index|new|deleted|copy|rename) ')
815
815
816 def __init__(self, header):
816 def __init__(self, header):
817 self.header = header
817 self.header = header
818 self.hunks = []
818 self.hunks = []
819
819
820 def binary(self):
820 def binary(self):
821 return util.any(h.startswith('index ') for h in self.header)
821 return util.any(h.startswith('index ') for h in self.header)
822
822
823 def pretty(self, fp):
823 def pretty(self, fp):
824 for h in self.header:
824 for h in self.header:
825 if h.startswith('index '):
825 if h.startswith('index '):
826 fp.write(_('this modifies a binary file (all or nothing)\n'))
826 fp.write(_('this modifies a binary file (all or nothing)\n'))
827 break
827 break
828 if self.pretty_re.match(h):
828 if self.pretty_re.match(h):
829 fp.write(h)
829 fp.write(h)
830 if self.binary():
830 if self.binary():
831 fp.write(_('this is a binary file\n'))
831 fp.write(_('this is a binary file\n'))
832 break
832 break
833 if h.startswith('---'):
833 if h.startswith('---'):
834 fp.write(_('%d hunks, %d lines changed\n') %
834 fp.write(_('%d hunks, %d lines changed\n') %
835 (len(self.hunks),
835 (len(self.hunks),
836 sum([max(h.added, h.removed) for h in self.hunks])))
836 sum([max(h.added, h.removed) for h in self.hunks])))
837 break
837 break
838 fp.write(h)
838 fp.write(h)
839
839
840 def write(self, fp):
840 def write(self, fp):
841 fp.write(''.join(self.header))
841 fp.write(''.join(self.header))
842
842
843 def allhunks(self):
843 def allhunks(self):
844 return util.any(self.allhunks_re.match(h) for h in self.header)
844 return util.any(self.allhunks_re.match(h) for h in self.header)
845
845
846 def files(self):
846 def files(self):
847 match = self.diffgit_re.match(self.header[0])
847 match = self.diffgit_re.match(self.header[0])
848 if match:
848 if match:
849 fromfile, tofile = match.groups()
849 fromfile, tofile = match.groups()
850 if fromfile == tofile:
850 if fromfile == tofile:
851 return [fromfile]
851 return [fromfile]
852 return [fromfile, tofile]
852 return [fromfile, tofile]
853 else:
853 else:
854 return self.diff_re.match(self.header[0]).groups()
854 return self.diff_re.match(self.header[0]).groups()
855
855
856 def filename(self):
856 def filename(self):
857 return self.files()[-1]
857 return self.files()[-1]
858
858
859 def __repr__(self):
859 def __repr__(self):
860 return '<header %s>' % (' '.join(map(repr, self.files())))
860 return '<header %s>' % (' '.join(map(repr, self.files())))
861
861
862 def special(self):
862 def special(self):
863 return util.any(self.special_re.match(h) for h in self.header)
863 return util.any(self.special_re.match(h) for h in self.header)
864
864
865 class recordhunk(object):
865 class recordhunk(object):
866 """patch hunk
866 """patch hunk
867
867
868 XXX shouldn't we merge this with the other hunk class?
868 XXX shouldn't we merge this with the other hunk class?
869 """
869 """
870 maxcontext = 3
870 maxcontext = 3
871
871
872 def __init__(self, header, fromline, toline, proc, before, hunk, after):
872 def __init__(self, header, fromline, toline, proc, before, hunk, after):
873 def trimcontext(number, lines):
873 def trimcontext(number, lines):
874 delta = len(lines) - self.maxcontext
874 delta = len(lines) - self.maxcontext
875 if False and delta > 0:
875 if False and delta > 0:
876 return number + delta, lines[:self.maxcontext]
876 return number + delta, lines[:self.maxcontext]
877 return number, lines
877 return number, lines
878
878
879 self.header = header
879 self.header = header
880 self.fromline, self.before = trimcontext(fromline, before)
880 self.fromline, self.before = trimcontext(fromline, before)
881 self.toline, self.after = trimcontext(toline, after)
881 self.toline, self.after = trimcontext(toline, after)
882 self.proc = proc
882 self.proc = proc
883 self.hunk = hunk
883 self.hunk = hunk
884 self.added, self.removed = self.countchanges(self.hunk)
884 self.added, self.removed = self.countchanges(self.hunk)
885
885
886 def countchanges(self, hunk):
886 def countchanges(self, hunk):
887 """hunk -> (n+,n-)"""
887 """hunk -> (n+,n-)"""
888 add = len([h for h in hunk if h[0] == '+'])
888 add = len([h for h in hunk if h[0] == '+'])
889 rem = len([h for h in hunk if h[0] == '-'])
889 rem = len([h for h in hunk if h[0] == '-'])
890 return add, rem
890 return add, rem
891
891
892 def write(self, fp):
892 def write(self, fp):
893 delta = len(self.before) + len(self.after)
893 delta = len(self.before) + len(self.after)
894 if self.after and self.after[-1] == '\\ No newline at end of file\n':
894 if self.after and self.after[-1] == '\\ No newline at end of file\n':
895 delta -= 1
895 delta -= 1
896 fromlen = delta + self.removed
896 fromlen = delta + self.removed
897 tolen = delta + self.added
897 tolen = delta + self.added
898 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
898 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
899 (self.fromline, fromlen, self.toline, tolen,
899 (self.fromline, fromlen, self.toline, tolen,
900 self.proc and (' ' + self.proc)))
900 self.proc and (' ' + self.proc)))
901 fp.write(''.join(self.before + self.hunk + self.after))
901 fp.write(''.join(self.before + self.hunk + self.after))
902
902
903 pretty = write
903 pretty = write
904
904
905 def filename(self):
905 def filename(self):
906 return self.header.filename()
906 return self.header.filename()
907
907
908 def __repr__(self):
908 def __repr__(self):
909 return '<hunk %r@%d>' % (self.filename(), self.fromline)
909 return '<hunk %r@%d>' % (self.filename(), self.fromline)
910
910
911 class hunk(object):
911 class hunk(object):
912 def __init__(self, desc, num, lr, context):
912 def __init__(self, desc, num, lr, context):
913 self.number = num
913 self.number = num
914 self.desc = desc
914 self.desc = desc
915 self.hunk = [desc]
915 self.hunk = [desc]
916 self.a = []
916 self.a = []
917 self.b = []
917 self.b = []
918 self.starta = self.lena = None
918 self.starta = self.lena = None
919 self.startb = self.lenb = None
919 self.startb = self.lenb = None
920 if lr is not None:
920 if lr is not None:
921 if context:
921 if context:
922 self.read_context_hunk(lr)
922 self.read_context_hunk(lr)
923 else:
923 else:
924 self.read_unified_hunk(lr)
924 self.read_unified_hunk(lr)
925
925
926 def getnormalized(self):
926 def getnormalized(self):
927 """Return a copy with line endings normalized to LF."""
927 """Return a copy with line endings normalized to LF."""
928
928
929 def normalize(lines):
929 def normalize(lines):
930 nlines = []
930 nlines = []
931 for line in lines:
931 for line in lines:
932 if line.endswith('\r\n'):
932 if line.endswith('\r\n'):
933 line = line[:-2] + '\n'
933 line = line[:-2] + '\n'
934 nlines.append(line)
934 nlines.append(line)
935 return nlines
935 return nlines
936
936
937 # Dummy object, it is rebuilt manually
937 # Dummy object, it is rebuilt manually
938 nh = hunk(self.desc, self.number, None, None)
938 nh = hunk(self.desc, self.number, None, None)
939 nh.number = self.number
939 nh.number = self.number
940 nh.desc = self.desc
940 nh.desc = self.desc
941 nh.hunk = self.hunk
941 nh.hunk = self.hunk
942 nh.a = normalize(self.a)
942 nh.a = normalize(self.a)
943 nh.b = normalize(self.b)
943 nh.b = normalize(self.b)
944 nh.starta = self.starta
944 nh.starta = self.starta
945 nh.startb = self.startb
945 nh.startb = self.startb
946 nh.lena = self.lena
946 nh.lena = self.lena
947 nh.lenb = self.lenb
947 nh.lenb = self.lenb
948 return nh
948 return nh
949
949
950 def read_unified_hunk(self, lr):
950 def read_unified_hunk(self, lr):
951 m = unidesc.match(self.desc)
951 m = unidesc.match(self.desc)
952 if not m:
952 if not m:
953 raise PatchError(_("bad hunk #%d") % self.number)
953 raise PatchError(_("bad hunk #%d") % self.number)
954 self.starta, self.lena, self.startb, self.lenb = m.groups()
954 self.starta, self.lena, self.startb, self.lenb = m.groups()
955 if self.lena is None:
955 if self.lena is None:
956 self.lena = 1
956 self.lena = 1
957 else:
957 else:
958 self.lena = int(self.lena)
958 self.lena = int(self.lena)
959 if self.lenb is None:
959 if self.lenb is None:
960 self.lenb = 1
960 self.lenb = 1
961 else:
961 else:
962 self.lenb = int(self.lenb)
962 self.lenb = int(self.lenb)
963 self.starta = int(self.starta)
963 self.starta = int(self.starta)
964 self.startb = int(self.startb)
964 self.startb = int(self.startb)
965 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
965 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
966 self.b)
966 self.b)
967 # if we hit eof before finishing out the hunk, the last line will
967 # if we hit eof before finishing out the hunk, the last line will
968 # be zero length. Lets try to fix it up.
968 # be zero length. Lets try to fix it up.
969 while len(self.hunk[-1]) == 0:
969 while len(self.hunk[-1]) == 0:
970 del self.hunk[-1]
970 del self.hunk[-1]
971 del self.a[-1]
971 del self.a[-1]
972 del self.b[-1]
972 del self.b[-1]
973 self.lena -= 1
973 self.lena -= 1
974 self.lenb -= 1
974 self.lenb -= 1
975 self._fixnewline(lr)
975 self._fixnewline(lr)
976
976
977 def read_context_hunk(self, lr):
977 def read_context_hunk(self, lr):
978 self.desc = lr.readline()
978 self.desc = lr.readline()
979 m = contextdesc.match(self.desc)
979 m = contextdesc.match(self.desc)
980 if not m:
980 if not m:
981 raise PatchError(_("bad hunk #%d") % self.number)
981 raise PatchError(_("bad hunk #%d") % self.number)
982 self.starta, aend = m.groups()
982 self.starta, aend = m.groups()
983 self.starta = int(self.starta)
983 self.starta = int(self.starta)
984 if aend is None:
984 if aend is None:
985 aend = self.starta
985 aend = self.starta
986 self.lena = int(aend) - self.starta
986 self.lena = int(aend) - self.starta
987 if self.starta:
987 if self.starta:
988 self.lena += 1
988 self.lena += 1
989 for x in xrange(self.lena):
989 for x in xrange(self.lena):
990 l = lr.readline()
990 l = lr.readline()
991 if l.startswith('---'):
991 if l.startswith('---'):
992 # lines addition, old block is empty
992 # lines addition, old block is empty
993 lr.push(l)
993 lr.push(l)
994 break
994 break
995 s = l[2:]
995 s = l[2:]
996 if l.startswith('- ') or l.startswith('! '):
996 if l.startswith('- ') or l.startswith('! '):
997 u = '-' + s
997 u = '-' + s
998 elif l.startswith(' '):
998 elif l.startswith(' '):
999 u = ' ' + s
999 u = ' ' + s
1000 else:
1000 else:
1001 raise PatchError(_("bad hunk #%d old text line %d") %
1001 raise PatchError(_("bad hunk #%d old text line %d") %
1002 (self.number, x))
1002 (self.number, x))
1003 self.a.append(u)
1003 self.a.append(u)
1004 self.hunk.append(u)
1004 self.hunk.append(u)
1005
1005
1006 l = lr.readline()
1006 l = lr.readline()
1007 if l.startswith('\ '):
1007 if l.startswith('\ '):
1008 s = self.a[-1][:-1]
1008 s = self.a[-1][:-1]
1009 self.a[-1] = s
1009 self.a[-1] = s
1010 self.hunk[-1] = s
1010 self.hunk[-1] = s
1011 l = lr.readline()
1011 l = lr.readline()
1012 m = contextdesc.match(l)
1012 m = contextdesc.match(l)
1013 if not m:
1013 if not m:
1014 raise PatchError(_("bad hunk #%d") % self.number)
1014 raise PatchError(_("bad hunk #%d") % self.number)
1015 self.startb, bend = m.groups()
1015 self.startb, bend = m.groups()
1016 self.startb = int(self.startb)
1016 self.startb = int(self.startb)
1017 if bend is None:
1017 if bend is None:
1018 bend = self.startb
1018 bend = self.startb
1019 self.lenb = int(bend) - self.startb
1019 self.lenb = int(bend) - self.startb
1020 if self.startb:
1020 if self.startb:
1021 self.lenb += 1
1021 self.lenb += 1
1022 hunki = 1
1022 hunki = 1
1023 for x in xrange(self.lenb):
1023 for x in xrange(self.lenb):
1024 l = lr.readline()
1024 l = lr.readline()
1025 if l.startswith('\ '):
1025 if l.startswith('\ '):
1026 # XXX: the only way to hit this is with an invalid line range.
1026 # XXX: the only way to hit this is with an invalid line range.
1027 # The no-eol marker is not counted in the line range, but I
1027 # The no-eol marker is not counted in the line range, but I
1028 # guess there are diff(1) out there which behave differently.
1028 # guess there are diff(1) out there which behave differently.
1029 s = self.b[-1][:-1]
1029 s = self.b[-1][:-1]
1030 self.b[-1] = s
1030 self.b[-1] = s
1031 self.hunk[hunki - 1] = s
1031 self.hunk[hunki - 1] = s
1032 continue
1032 continue
1033 if not l:
1033 if not l:
1034 # line deletions, new block is empty and we hit EOF
1034 # line deletions, new block is empty and we hit EOF
1035 lr.push(l)
1035 lr.push(l)
1036 break
1036 break
1037 s = l[2:]
1037 s = l[2:]
1038 if l.startswith('+ ') or l.startswith('! '):
1038 if l.startswith('+ ') or l.startswith('! '):
1039 u = '+' + s
1039 u = '+' + s
1040 elif l.startswith(' '):
1040 elif l.startswith(' '):
1041 u = ' ' + s
1041 u = ' ' + s
1042 elif len(self.b) == 0:
1042 elif len(self.b) == 0:
1043 # line deletions, new block is empty
1043 # line deletions, new block is empty
1044 lr.push(l)
1044 lr.push(l)
1045 break
1045 break
1046 else:
1046 else:
1047 raise PatchError(_("bad hunk #%d old text line %d") %
1047 raise PatchError(_("bad hunk #%d old text line %d") %
1048 (self.number, x))
1048 (self.number, x))
1049 self.b.append(s)
1049 self.b.append(s)
1050 while True:
1050 while True:
1051 if hunki >= len(self.hunk):
1051 if hunki >= len(self.hunk):
1052 h = ""
1052 h = ""
1053 else:
1053 else:
1054 h = self.hunk[hunki]
1054 h = self.hunk[hunki]
1055 hunki += 1
1055 hunki += 1
1056 if h == u:
1056 if h == u:
1057 break
1057 break
1058 elif h.startswith('-'):
1058 elif h.startswith('-'):
1059 continue
1059 continue
1060 else:
1060 else:
1061 self.hunk.insert(hunki - 1, u)
1061 self.hunk.insert(hunki - 1, u)
1062 break
1062 break
1063
1063
1064 if not self.a:
1064 if not self.a:
1065 # this happens when lines were only added to the hunk
1065 # this happens when lines were only added to the hunk
1066 for x in self.hunk:
1066 for x in self.hunk:
1067 if x.startswith('-') or x.startswith(' '):
1067 if x.startswith('-') or x.startswith(' '):
1068 self.a.append(x)
1068 self.a.append(x)
1069 if not self.b:
1069 if not self.b:
1070 # this happens when lines were only deleted from the hunk
1070 # this happens when lines were only deleted from the hunk
1071 for x in self.hunk:
1071 for x in self.hunk:
1072 if x.startswith('+') or x.startswith(' '):
1072 if x.startswith('+') or x.startswith(' '):
1073 self.b.append(x[1:])
1073 self.b.append(x[1:])
1074 # @@ -start,len +start,len @@
1074 # @@ -start,len +start,len @@
1075 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1075 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1076 self.startb, self.lenb)
1076 self.startb, self.lenb)
1077 self.hunk[0] = self.desc
1077 self.hunk[0] = self.desc
1078 self._fixnewline(lr)
1078 self._fixnewline(lr)
1079
1079
1080 def _fixnewline(self, lr):
1080 def _fixnewline(self, lr):
1081 l = lr.readline()
1081 l = lr.readline()
1082 if l.startswith('\ '):
1082 if l.startswith('\ '):
1083 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1083 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1084 else:
1084 else:
1085 lr.push(l)
1085 lr.push(l)
1086
1086
1087 def complete(self):
1087 def complete(self):
1088 return len(self.a) == self.lena and len(self.b) == self.lenb
1088 return len(self.a) == self.lena and len(self.b) == self.lenb
1089
1089
1090 def _fuzzit(self, old, new, fuzz, toponly):
1090 def _fuzzit(self, old, new, fuzz, toponly):
1091 # this removes context lines from the top and bottom of list 'l'. It
1091 # this removes context lines from the top and bottom of list 'l'. It
1092 # checks the hunk to make sure only context lines are removed, and then
1092 # checks the hunk to make sure only context lines are removed, and then
1093 # returns a new shortened list of lines.
1093 # returns a new shortened list of lines.
1094 fuzz = min(fuzz, len(old))
1094 fuzz = min(fuzz, len(old))
1095 if fuzz:
1095 if fuzz:
1096 top = 0
1096 top = 0
1097 bot = 0
1097 bot = 0
1098 hlen = len(self.hunk)
1098 hlen = len(self.hunk)
1099 for x in xrange(hlen - 1):
1099 for x in xrange(hlen - 1):
1100 # the hunk starts with the @@ line, so use x+1
1100 # the hunk starts with the @@ line, so use x+1
1101 if self.hunk[x + 1][0] == ' ':
1101 if self.hunk[x + 1][0] == ' ':
1102 top += 1
1102 top += 1
1103 else:
1103 else:
1104 break
1104 break
1105 if not toponly:
1105 if not toponly:
1106 for x in xrange(hlen - 1):
1106 for x in xrange(hlen - 1):
1107 if self.hunk[hlen - bot - 1][0] == ' ':
1107 if self.hunk[hlen - bot - 1][0] == ' ':
1108 bot += 1
1108 bot += 1
1109 else:
1109 else:
1110 break
1110 break
1111
1111
1112 bot = min(fuzz, bot)
1112 bot = min(fuzz, bot)
1113 top = min(fuzz, top)
1113 top = min(fuzz, top)
1114 return old[top:len(old) - bot], new[top:len(new) - bot], top
1114 return old[top:len(old) - bot], new[top:len(new) - bot], top
1115 return old, new, 0
1115 return old, new, 0
1116
1116
1117 def fuzzit(self, fuzz, toponly):
1117 def fuzzit(self, fuzz, toponly):
1118 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1118 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1119 oldstart = self.starta + top
1119 oldstart = self.starta + top
1120 newstart = self.startb + top
1120 newstart = self.startb + top
1121 # zero length hunk ranges already have their start decremented
1121 # zero length hunk ranges already have their start decremented
1122 if self.lena and oldstart > 0:
1122 if self.lena and oldstart > 0:
1123 oldstart -= 1
1123 oldstart -= 1
1124 if self.lenb and newstart > 0:
1124 if self.lenb and newstart > 0:
1125 newstart -= 1
1125 newstart -= 1
1126 return old, oldstart, new, newstart
1126 return old, oldstart, new, newstart
1127
1127
1128 class binhunk(object):
1128 class binhunk(object):
1129 'A binary patch file.'
1129 'A binary patch file.'
1130 def __init__(self, lr, fname):
1130 def __init__(self, lr, fname):
1131 self.text = None
1131 self.text = None
1132 self.delta = False
1132 self.delta = False
1133 self.hunk = ['GIT binary patch\n']
1133 self.hunk = ['GIT binary patch\n']
1134 self._fname = fname
1134 self._fname = fname
1135 self._read(lr)
1135 self._read(lr)
1136
1136
1137 def complete(self):
1137 def complete(self):
1138 return self.text is not None
1138 return self.text is not None
1139
1139
1140 def new(self, lines):
1140 def new(self, lines):
1141 if self.delta:
1141 if self.delta:
1142 return [applybindelta(self.text, ''.join(lines))]
1142 return [applybindelta(self.text, ''.join(lines))]
1143 return [self.text]
1143 return [self.text]
1144
1144
1145 def _read(self, lr):
1145 def _read(self, lr):
1146 def getline(lr, hunk):
1146 def getline(lr, hunk):
1147 l = lr.readline()
1147 l = lr.readline()
1148 hunk.append(l)
1148 hunk.append(l)
1149 return l.rstrip('\r\n')
1149 return l.rstrip('\r\n')
1150
1150
1151 size = 0
1151 size = 0
1152 while True:
1152 while True:
1153 line = getline(lr, self.hunk)
1153 line = getline(lr, self.hunk)
1154 if not line:
1154 if not line:
1155 raise PatchError(_('could not extract "%s" binary data')
1155 raise PatchError(_('could not extract "%s" binary data')
1156 % self._fname)
1156 % self._fname)
1157 if line.startswith('literal '):
1157 if line.startswith('literal '):
1158 size = int(line[8:].rstrip())
1158 size = int(line[8:].rstrip())
1159 break
1159 break
1160 if line.startswith('delta '):
1160 if line.startswith('delta '):
1161 size = int(line[6:].rstrip())
1161 size = int(line[6:].rstrip())
1162 self.delta = True
1162 self.delta = True
1163 break
1163 break
1164 dec = []
1164 dec = []
1165 line = getline(lr, self.hunk)
1165 line = getline(lr, self.hunk)
1166 while len(line) > 1:
1166 while len(line) > 1:
1167 l = line[0]
1167 l = line[0]
1168 if l <= 'Z' and l >= 'A':
1168 if l <= 'Z' and l >= 'A':
1169 l = ord(l) - ord('A') + 1
1169 l = ord(l) - ord('A') + 1
1170 else:
1170 else:
1171 l = ord(l) - ord('a') + 27
1171 l = ord(l) - ord('a') + 27
1172 try:
1172 try:
1173 dec.append(base85.b85decode(line[1:])[:l])
1173 dec.append(base85.b85decode(line[1:])[:l])
1174 except ValueError, e:
1174 except ValueError, e:
1175 raise PatchError(_('could not decode "%s" binary patch: %s')
1175 raise PatchError(_('could not decode "%s" binary patch: %s')
1176 % (self._fname, str(e)))
1176 % (self._fname, str(e)))
1177 line = getline(lr, self.hunk)
1177 line = getline(lr, self.hunk)
1178 text = zlib.decompress(''.join(dec))
1178 text = zlib.decompress(''.join(dec))
1179 if len(text) != size:
1179 if len(text) != size:
1180 raise PatchError(_('"%s" length is %d bytes, should be %d')
1180 raise PatchError(_('"%s" length is %d bytes, should be %d')
1181 % (self._fname, len(text), size))
1181 % (self._fname, len(text), size))
1182 self.text = text
1182 self.text = text
1183
1183
1184 def parsefilename(str):
1184 def parsefilename(str):
1185 # --- filename \t|space stuff
1185 # --- filename \t|space stuff
1186 s = str[4:].rstrip('\r\n')
1186 s = str[4:].rstrip('\r\n')
1187 i = s.find('\t')
1187 i = s.find('\t')
1188 if i < 0:
1188 if i < 0:
1189 i = s.find(' ')
1189 i = s.find(' ')
1190 if i < 0:
1190 if i < 0:
1191 return s
1191 return s
1192 return s[:i]
1192 return s[:i]
1193
1193
1194 def pathtransform(path, strip, prefix):
1194 def pathtransform(path, strip, prefix):
1195 '''turn a path from a patch into a path suitable for the repository
1195 '''turn a path from a patch into a path suitable for the repository
1196
1196
1197 prefix, if not empty, is expected to be normalized with a / at the end.
1197 prefix, if not empty, is expected to be normalized with a / at the end.
1198
1198
1199 Returns (stripped components, path in repository).
1199 Returns (stripped components, path in repository).
1200
1200
1201 >>> pathtransform('a/b/c', 0, '')
1201 >>> pathtransform('a/b/c', 0, '')
1202 ('', 'a/b/c')
1202 ('', 'a/b/c')
1203 >>> pathtransform(' a/b/c ', 0, '')
1203 >>> pathtransform(' a/b/c ', 0, '')
1204 ('', ' a/b/c')
1204 ('', ' a/b/c')
1205 >>> pathtransform(' a/b/c ', 2, '')
1205 >>> pathtransform(' a/b/c ', 2, '')
1206 ('a/b/', 'c')
1206 ('a/b/', 'c')
1207 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1207 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1208 ('a//b/', 'd/e/c')
1208 ('a//b/', 'd/e/c')
1209 >>> pathtransform('a/b/c', 3, '')
1209 >>> pathtransform('a/b/c', 3, '')
1210 Traceback (most recent call last):
1210 Traceback (most recent call last):
1211 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1211 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1212 '''
1212 '''
1213 pathlen = len(path)
1213 pathlen = len(path)
1214 i = 0
1214 i = 0
1215 if strip == 0:
1215 if strip == 0:
1216 return '', path.rstrip()
1216 return '', path.rstrip()
1217 count = strip
1217 count = strip
1218 while count > 0:
1218 while count > 0:
1219 i = path.find('/', i)
1219 i = path.find('/', i)
1220 if i == -1:
1220 if i == -1:
1221 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1221 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1222 (count, strip, path))
1222 (count, strip, path))
1223 i += 1
1223 i += 1
1224 # consume '//' in the path
1224 # consume '//' in the path
1225 while i < pathlen - 1 and path[i] == '/':
1225 while i < pathlen - 1 and path[i] == '/':
1226 i += 1
1226 i += 1
1227 count -= 1
1227 count -= 1
1228 return path[:i].lstrip(), prefix + path[i:].rstrip()
1228 return path[:i].lstrip(), prefix + path[i:].rstrip()
1229
1229
1230 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1230 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1231 nulla = afile_orig == "/dev/null"
1231 nulla = afile_orig == "/dev/null"
1232 nullb = bfile_orig == "/dev/null"
1232 nullb = bfile_orig == "/dev/null"
1233 create = nulla and hunk.starta == 0 and hunk.lena == 0
1233 create = nulla and hunk.starta == 0 and hunk.lena == 0
1234 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1234 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1235 abase, afile = pathtransform(afile_orig, strip, prefix)
1235 abase, afile = pathtransform(afile_orig, strip, prefix)
1236 gooda = not nulla and backend.exists(afile)
1236 gooda = not nulla and backend.exists(afile)
1237 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1237 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1238 if afile == bfile:
1238 if afile == bfile:
1239 goodb = gooda
1239 goodb = gooda
1240 else:
1240 else:
1241 goodb = not nullb and backend.exists(bfile)
1241 goodb = not nullb and backend.exists(bfile)
1242 missing = not goodb and not gooda and not create
1242 missing = not goodb and not gooda and not create
1243
1243
1244 # some diff programs apparently produce patches where the afile is
1244 # some diff programs apparently produce patches where the afile is
1245 # not /dev/null, but afile starts with bfile
1245 # not /dev/null, but afile starts with bfile
1246 abasedir = afile[:afile.rfind('/') + 1]
1246 abasedir = afile[:afile.rfind('/') + 1]
1247 bbasedir = bfile[:bfile.rfind('/') + 1]
1247 bbasedir = bfile[:bfile.rfind('/') + 1]
1248 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1248 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1249 and hunk.starta == 0 and hunk.lena == 0):
1249 and hunk.starta == 0 and hunk.lena == 0):
1250 create = True
1250 create = True
1251 missing = False
1251 missing = False
1252
1252
1253 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1253 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1254 # diff is between a file and its backup. In this case, the original
1254 # diff is between a file and its backup. In this case, the original
1255 # file should be patched (see original mpatch code).
1255 # file should be patched (see original mpatch code).
1256 isbackup = (abase == bbase and bfile.startswith(afile))
1256 isbackup = (abase == bbase and bfile.startswith(afile))
1257 fname = None
1257 fname = None
1258 if not missing:
1258 if not missing:
1259 if gooda and goodb:
1259 if gooda and goodb:
1260 fname = isbackup and afile or bfile
1260 fname = isbackup and afile or bfile
1261 elif gooda:
1261 elif gooda:
1262 fname = afile
1262 fname = afile
1263
1263
1264 if not fname:
1264 if not fname:
1265 if not nullb:
1265 if not nullb:
1266 fname = isbackup and afile or bfile
1266 fname = isbackup and afile or bfile
1267 elif not nulla:
1267 elif not nulla:
1268 fname = afile
1268 fname = afile
1269 else:
1269 else:
1270 raise PatchError(_("undefined source and destination files"))
1270 raise PatchError(_("undefined source and destination files"))
1271
1271
1272 gp = patchmeta(fname)
1272 gp = patchmeta(fname)
1273 if create:
1273 if create:
1274 gp.op = 'ADD'
1274 gp.op = 'ADD'
1275 elif remove:
1275 elif remove:
1276 gp.op = 'DELETE'
1276 gp.op = 'DELETE'
1277 return gp
1277 return gp
1278
1278
1279 def scanpatch(fp):
1280 """like patch.iterhunks, but yield different events
1281
1282 - ('file', [header_lines + fromfile + tofile])
1283 - ('context', [context_lines])
1284 - ('hunk', [hunk_lines])
1285 - ('range', (-start,len, +start,len, proc))
1286 """
1287 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1288 lr = linereader(fp)
1289
1290 def scanwhile(first, p):
1291 """scan lr while predicate holds"""
1292 lines = [first]
1293 while True:
1294 line = lr.readline()
1295 if not line:
1296 break
1297 if p(line):
1298 lines.append(line)
1299 else:
1300 lr.push(line)
1301 break
1302 return lines
1303
1304 while True:
1305 line = lr.readline()
1306 if not line:
1307 break
1308 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1309 def notheader(line):
1310 s = line.split(None, 1)
1311 return not s or s[0] not in ('---', 'diff')
1312 header = scanwhile(line, notheader)
1313 fromfile = lr.readline()
1314 if fromfile.startswith('---'):
1315 tofile = lr.readline()
1316 header += [fromfile, tofile]
1317 else:
1318 lr.push(fromfile)
1319 yield 'file', header
1320 elif line[0] == ' ':
1321 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1322 elif line[0] in '-+':
1323 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1324 else:
1325 m = lines_re.match(line)
1326 if m:
1327 yield 'range', m.groups()
1328 else:
1329 yield 'other', line
1330
1279 def scangitpatch(lr, firstline):
1331 def scangitpatch(lr, firstline):
1280 """
1332 """
1281 Git patches can emit:
1333 Git patches can emit:
1282 - rename a to b
1334 - rename a to b
1283 - change b
1335 - change b
1284 - copy a to c
1336 - copy a to c
1285 - change c
1337 - change c
1286
1338
1287 We cannot apply this sequence as-is, the renamed 'a' could not be
1339 We cannot apply this sequence as-is, the renamed 'a' could not be
1288 found for it would have been renamed already. And we cannot copy
1340 found for it would have been renamed already. And we cannot copy
1289 from 'b' instead because 'b' would have been changed already. So
1341 from 'b' instead because 'b' would have been changed already. So
1290 we scan the git patch for copy and rename commands so we can
1342 we scan the git patch for copy and rename commands so we can
1291 perform the copies ahead of time.
1343 perform the copies ahead of time.
1292 """
1344 """
1293 pos = 0
1345 pos = 0
1294 try:
1346 try:
1295 pos = lr.fp.tell()
1347 pos = lr.fp.tell()
1296 fp = lr.fp
1348 fp = lr.fp
1297 except IOError:
1349 except IOError:
1298 fp = cStringIO.StringIO(lr.fp.read())
1350 fp = cStringIO.StringIO(lr.fp.read())
1299 gitlr = linereader(fp)
1351 gitlr = linereader(fp)
1300 gitlr.push(firstline)
1352 gitlr.push(firstline)
1301 gitpatches = readgitpatch(gitlr)
1353 gitpatches = readgitpatch(gitlr)
1302 fp.seek(pos)
1354 fp.seek(pos)
1303 return gitpatches
1355 return gitpatches
1304
1356
1305 def iterhunks(fp):
1357 def iterhunks(fp):
1306 """Read a patch and yield the following events:
1358 """Read a patch and yield the following events:
1307 - ("file", afile, bfile, firsthunk): select a new target file.
1359 - ("file", afile, bfile, firsthunk): select a new target file.
1308 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1360 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1309 "file" event.
1361 "file" event.
1310 - ("git", gitchanges): current diff is in git format, gitchanges
1362 - ("git", gitchanges): current diff is in git format, gitchanges
1311 maps filenames to gitpatch records. Unique event.
1363 maps filenames to gitpatch records. Unique event.
1312 """
1364 """
1313 afile = ""
1365 afile = ""
1314 bfile = ""
1366 bfile = ""
1315 state = None
1367 state = None
1316 hunknum = 0
1368 hunknum = 0
1317 emitfile = newfile = False
1369 emitfile = newfile = False
1318 gitpatches = None
1370 gitpatches = None
1319
1371
1320 # our states
1372 # our states
1321 BFILE = 1
1373 BFILE = 1
1322 context = None
1374 context = None
1323 lr = linereader(fp)
1375 lr = linereader(fp)
1324
1376
1325 while True:
1377 while True:
1326 x = lr.readline()
1378 x = lr.readline()
1327 if not x:
1379 if not x:
1328 break
1380 break
1329 if state == BFILE and (
1381 if state == BFILE and (
1330 (not context and x[0] == '@')
1382 (not context and x[0] == '@')
1331 or (context is not False and x.startswith('***************'))
1383 or (context is not False and x.startswith('***************'))
1332 or x.startswith('GIT binary patch')):
1384 or x.startswith('GIT binary patch')):
1333 gp = None
1385 gp = None
1334 if (gitpatches and
1386 if (gitpatches and
1335 gitpatches[-1].ispatching(afile, bfile)):
1387 gitpatches[-1].ispatching(afile, bfile)):
1336 gp = gitpatches.pop()
1388 gp = gitpatches.pop()
1337 if x.startswith('GIT binary patch'):
1389 if x.startswith('GIT binary patch'):
1338 h = binhunk(lr, gp.path)
1390 h = binhunk(lr, gp.path)
1339 else:
1391 else:
1340 if context is None and x.startswith('***************'):
1392 if context is None and x.startswith('***************'):
1341 context = True
1393 context = True
1342 h = hunk(x, hunknum + 1, lr, context)
1394 h = hunk(x, hunknum + 1, lr, context)
1343 hunknum += 1
1395 hunknum += 1
1344 if emitfile:
1396 if emitfile:
1345 emitfile = False
1397 emitfile = False
1346 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1398 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1347 yield 'hunk', h
1399 yield 'hunk', h
1348 elif x.startswith('diff --git a/'):
1400 elif x.startswith('diff --git a/'):
1349 m = gitre.match(x.rstrip(' \r\n'))
1401 m = gitre.match(x.rstrip(' \r\n'))
1350 if not m:
1402 if not m:
1351 continue
1403 continue
1352 if gitpatches is None:
1404 if gitpatches is None:
1353 # scan whole input for git metadata
1405 # scan whole input for git metadata
1354 gitpatches = scangitpatch(lr, x)
1406 gitpatches = scangitpatch(lr, x)
1355 yield 'git', [g.copy() for g in gitpatches
1407 yield 'git', [g.copy() for g in gitpatches
1356 if g.op in ('COPY', 'RENAME')]
1408 if g.op in ('COPY', 'RENAME')]
1357 gitpatches.reverse()
1409 gitpatches.reverse()
1358 afile = 'a/' + m.group(1)
1410 afile = 'a/' + m.group(1)
1359 bfile = 'b/' + m.group(2)
1411 bfile = 'b/' + m.group(2)
1360 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1412 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1361 gp = gitpatches.pop()
1413 gp = gitpatches.pop()
1362 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1414 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1363 if not gitpatches:
1415 if not gitpatches:
1364 raise PatchError(_('failed to synchronize metadata for "%s"')
1416 raise PatchError(_('failed to synchronize metadata for "%s"')
1365 % afile[2:])
1417 % afile[2:])
1366 gp = gitpatches[-1]
1418 gp = gitpatches[-1]
1367 newfile = True
1419 newfile = True
1368 elif x.startswith('---'):
1420 elif x.startswith('---'):
1369 # check for a unified diff
1421 # check for a unified diff
1370 l2 = lr.readline()
1422 l2 = lr.readline()
1371 if not l2.startswith('+++'):
1423 if not l2.startswith('+++'):
1372 lr.push(l2)
1424 lr.push(l2)
1373 continue
1425 continue
1374 newfile = True
1426 newfile = True
1375 context = False
1427 context = False
1376 afile = parsefilename(x)
1428 afile = parsefilename(x)
1377 bfile = parsefilename(l2)
1429 bfile = parsefilename(l2)
1378 elif x.startswith('***'):
1430 elif x.startswith('***'):
1379 # check for a context diff
1431 # check for a context diff
1380 l2 = lr.readline()
1432 l2 = lr.readline()
1381 if not l2.startswith('---'):
1433 if not l2.startswith('---'):
1382 lr.push(l2)
1434 lr.push(l2)
1383 continue
1435 continue
1384 l3 = lr.readline()
1436 l3 = lr.readline()
1385 lr.push(l3)
1437 lr.push(l3)
1386 if not l3.startswith("***************"):
1438 if not l3.startswith("***************"):
1387 lr.push(l2)
1439 lr.push(l2)
1388 continue
1440 continue
1389 newfile = True
1441 newfile = True
1390 context = True
1442 context = True
1391 afile = parsefilename(x)
1443 afile = parsefilename(x)
1392 bfile = parsefilename(l2)
1444 bfile = parsefilename(l2)
1393
1445
1394 if newfile:
1446 if newfile:
1395 newfile = False
1447 newfile = False
1396 emitfile = True
1448 emitfile = True
1397 state = BFILE
1449 state = BFILE
1398 hunknum = 0
1450 hunknum = 0
1399
1451
1400 while gitpatches:
1452 while gitpatches:
1401 gp = gitpatches.pop()
1453 gp = gitpatches.pop()
1402 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1454 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1403
1455
1404 def applybindelta(binchunk, data):
1456 def applybindelta(binchunk, data):
1405 """Apply a binary delta hunk
1457 """Apply a binary delta hunk
1406 The algorithm used is the algorithm from git's patch-delta.c
1458 The algorithm used is the algorithm from git's patch-delta.c
1407 """
1459 """
1408 def deltahead(binchunk):
1460 def deltahead(binchunk):
1409 i = 0
1461 i = 0
1410 for c in binchunk:
1462 for c in binchunk:
1411 i += 1
1463 i += 1
1412 if not (ord(c) & 0x80):
1464 if not (ord(c) & 0x80):
1413 return i
1465 return i
1414 return i
1466 return i
1415 out = ""
1467 out = ""
1416 s = deltahead(binchunk)
1468 s = deltahead(binchunk)
1417 binchunk = binchunk[s:]
1469 binchunk = binchunk[s:]
1418 s = deltahead(binchunk)
1470 s = deltahead(binchunk)
1419 binchunk = binchunk[s:]
1471 binchunk = binchunk[s:]
1420 i = 0
1472 i = 0
1421 while i < len(binchunk):
1473 while i < len(binchunk):
1422 cmd = ord(binchunk[i])
1474 cmd = ord(binchunk[i])
1423 i += 1
1475 i += 1
1424 if (cmd & 0x80):
1476 if (cmd & 0x80):
1425 offset = 0
1477 offset = 0
1426 size = 0
1478 size = 0
1427 if (cmd & 0x01):
1479 if (cmd & 0x01):
1428 offset = ord(binchunk[i])
1480 offset = ord(binchunk[i])
1429 i += 1
1481 i += 1
1430 if (cmd & 0x02):
1482 if (cmd & 0x02):
1431 offset |= ord(binchunk[i]) << 8
1483 offset |= ord(binchunk[i]) << 8
1432 i += 1
1484 i += 1
1433 if (cmd & 0x04):
1485 if (cmd & 0x04):
1434 offset |= ord(binchunk[i]) << 16
1486 offset |= ord(binchunk[i]) << 16
1435 i += 1
1487 i += 1
1436 if (cmd & 0x08):
1488 if (cmd & 0x08):
1437 offset |= ord(binchunk[i]) << 24
1489 offset |= ord(binchunk[i]) << 24
1438 i += 1
1490 i += 1
1439 if (cmd & 0x10):
1491 if (cmd & 0x10):
1440 size = ord(binchunk[i])
1492 size = ord(binchunk[i])
1441 i += 1
1493 i += 1
1442 if (cmd & 0x20):
1494 if (cmd & 0x20):
1443 size |= ord(binchunk[i]) << 8
1495 size |= ord(binchunk[i]) << 8
1444 i += 1
1496 i += 1
1445 if (cmd & 0x40):
1497 if (cmd & 0x40):
1446 size |= ord(binchunk[i]) << 16
1498 size |= ord(binchunk[i]) << 16
1447 i += 1
1499 i += 1
1448 if size == 0:
1500 if size == 0:
1449 size = 0x10000
1501 size = 0x10000
1450 offset_end = offset + size
1502 offset_end = offset + size
1451 out += data[offset:offset_end]
1503 out += data[offset:offset_end]
1452 elif cmd != 0:
1504 elif cmd != 0:
1453 offset_end = i + cmd
1505 offset_end = i + cmd
1454 out += binchunk[i:offset_end]
1506 out += binchunk[i:offset_end]
1455 i += cmd
1507 i += cmd
1456 else:
1508 else:
1457 raise PatchError(_('unexpected delta opcode 0'))
1509 raise PatchError(_('unexpected delta opcode 0'))
1458 return out
1510 return out
1459
1511
1460 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1512 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1461 """Reads a patch from fp and tries to apply it.
1513 """Reads a patch from fp and tries to apply it.
1462
1514
1463 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1515 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1464 there was any fuzz.
1516 there was any fuzz.
1465
1517
1466 If 'eolmode' is 'strict', the patch content and patched file are
1518 If 'eolmode' is 'strict', the patch content and patched file are
1467 read in binary mode. Otherwise, line endings are ignored when
1519 read in binary mode. Otherwise, line endings are ignored when
1468 patching then normalized according to 'eolmode'.
1520 patching then normalized according to 'eolmode'.
1469 """
1521 """
1470 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1522 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1471 prefix=prefix, eolmode=eolmode)
1523 prefix=prefix, eolmode=eolmode)
1472
1524
1473 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1525 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1474 eolmode='strict'):
1526 eolmode='strict'):
1475
1527
1476 if prefix:
1528 if prefix:
1477 # clean up double slashes, lack of trailing slashes, etc
1529 # clean up double slashes, lack of trailing slashes, etc
1478 prefix = util.normpath(prefix) + '/'
1530 prefix = util.normpath(prefix) + '/'
1479 def pstrip(p):
1531 def pstrip(p):
1480 return pathtransform(p, strip - 1, prefix)[1]
1532 return pathtransform(p, strip - 1, prefix)[1]
1481
1533
1482 rejects = 0
1534 rejects = 0
1483 err = 0
1535 err = 0
1484 current_file = None
1536 current_file = None
1485
1537
1486 for state, values in iterhunks(fp):
1538 for state, values in iterhunks(fp):
1487 if state == 'hunk':
1539 if state == 'hunk':
1488 if not current_file:
1540 if not current_file:
1489 continue
1541 continue
1490 ret = current_file.apply(values)
1542 ret = current_file.apply(values)
1491 if ret > 0:
1543 if ret > 0:
1492 err = 1
1544 err = 1
1493 elif state == 'file':
1545 elif state == 'file':
1494 if current_file:
1546 if current_file:
1495 rejects += current_file.close()
1547 rejects += current_file.close()
1496 current_file = None
1548 current_file = None
1497 afile, bfile, first_hunk, gp = values
1549 afile, bfile, first_hunk, gp = values
1498 if gp:
1550 if gp:
1499 gp.path = pstrip(gp.path)
1551 gp.path = pstrip(gp.path)
1500 if gp.oldpath:
1552 if gp.oldpath:
1501 gp.oldpath = pstrip(gp.oldpath)
1553 gp.oldpath = pstrip(gp.oldpath)
1502 else:
1554 else:
1503 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1555 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1504 prefix)
1556 prefix)
1505 if gp.op == 'RENAME':
1557 if gp.op == 'RENAME':
1506 backend.unlink(gp.oldpath)
1558 backend.unlink(gp.oldpath)
1507 if not first_hunk:
1559 if not first_hunk:
1508 if gp.op == 'DELETE':
1560 if gp.op == 'DELETE':
1509 backend.unlink(gp.path)
1561 backend.unlink(gp.path)
1510 continue
1562 continue
1511 data, mode = None, None
1563 data, mode = None, None
1512 if gp.op in ('RENAME', 'COPY'):
1564 if gp.op in ('RENAME', 'COPY'):
1513 data, mode = store.getfile(gp.oldpath)[:2]
1565 data, mode = store.getfile(gp.oldpath)[:2]
1514 # FIXME: failing getfile has never been handled here
1566 # FIXME: failing getfile has never been handled here
1515 assert data is not None
1567 assert data is not None
1516 if gp.mode:
1568 if gp.mode:
1517 mode = gp.mode
1569 mode = gp.mode
1518 if gp.op == 'ADD':
1570 if gp.op == 'ADD':
1519 # Added files without content have no hunk and
1571 # Added files without content have no hunk and
1520 # must be created
1572 # must be created
1521 data = ''
1573 data = ''
1522 if data or mode:
1574 if data or mode:
1523 if (gp.op in ('ADD', 'RENAME', 'COPY')
1575 if (gp.op in ('ADD', 'RENAME', 'COPY')
1524 and backend.exists(gp.path)):
1576 and backend.exists(gp.path)):
1525 raise PatchError(_("cannot create %s: destination "
1577 raise PatchError(_("cannot create %s: destination "
1526 "already exists") % gp.path)
1578 "already exists") % gp.path)
1527 backend.setfile(gp.path, data, mode, gp.oldpath)
1579 backend.setfile(gp.path, data, mode, gp.oldpath)
1528 continue
1580 continue
1529 try:
1581 try:
1530 current_file = patcher(ui, gp, backend, store,
1582 current_file = patcher(ui, gp, backend, store,
1531 eolmode=eolmode)
1583 eolmode=eolmode)
1532 except PatchError, inst:
1584 except PatchError, inst:
1533 ui.warn(str(inst) + '\n')
1585 ui.warn(str(inst) + '\n')
1534 current_file = None
1586 current_file = None
1535 rejects += 1
1587 rejects += 1
1536 continue
1588 continue
1537 elif state == 'git':
1589 elif state == 'git':
1538 for gp in values:
1590 for gp in values:
1539 path = pstrip(gp.oldpath)
1591 path = pstrip(gp.oldpath)
1540 data, mode = backend.getfile(path)
1592 data, mode = backend.getfile(path)
1541 if data is None:
1593 if data is None:
1542 # The error ignored here will trigger a getfile()
1594 # The error ignored here will trigger a getfile()
1543 # error in a place more appropriate for error
1595 # error in a place more appropriate for error
1544 # handling, and will not interrupt the patching
1596 # handling, and will not interrupt the patching
1545 # process.
1597 # process.
1546 pass
1598 pass
1547 else:
1599 else:
1548 store.setfile(path, data, mode)
1600 store.setfile(path, data, mode)
1549 else:
1601 else:
1550 raise util.Abort(_('unsupported parser state: %s') % state)
1602 raise util.Abort(_('unsupported parser state: %s') % state)
1551
1603
1552 if current_file:
1604 if current_file:
1553 rejects += current_file.close()
1605 rejects += current_file.close()
1554
1606
1555 if rejects:
1607 if rejects:
1556 return -1
1608 return -1
1557 return err
1609 return err
1558
1610
1559 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1611 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1560 similarity):
1612 similarity):
1561 """use <patcher> to apply <patchname> to the working directory.
1613 """use <patcher> to apply <patchname> to the working directory.
1562 returns whether patch was applied with fuzz factor."""
1614 returns whether patch was applied with fuzz factor."""
1563
1615
1564 fuzz = False
1616 fuzz = False
1565 args = []
1617 args = []
1566 cwd = repo.root
1618 cwd = repo.root
1567 if cwd:
1619 if cwd:
1568 args.append('-d %s' % util.shellquote(cwd))
1620 args.append('-d %s' % util.shellquote(cwd))
1569 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1621 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1570 util.shellquote(patchname)))
1622 util.shellquote(patchname)))
1571 try:
1623 try:
1572 for line in fp:
1624 for line in fp:
1573 line = line.rstrip()
1625 line = line.rstrip()
1574 ui.note(line + '\n')
1626 ui.note(line + '\n')
1575 if line.startswith('patching file '):
1627 if line.startswith('patching file '):
1576 pf = util.parsepatchoutput(line)
1628 pf = util.parsepatchoutput(line)
1577 printed_file = False
1629 printed_file = False
1578 files.add(pf)
1630 files.add(pf)
1579 elif line.find('with fuzz') >= 0:
1631 elif line.find('with fuzz') >= 0:
1580 fuzz = True
1632 fuzz = True
1581 if not printed_file:
1633 if not printed_file:
1582 ui.warn(pf + '\n')
1634 ui.warn(pf + '\n')
1583 printed_file = True
1635 printed_file = True
1584 ui.warn(line + '\n')
1636 ui.warn(line + '\n')
1585 elif line.find('saving rejects to file') >= 0:
1637 elif line.find('saving rejects to file') >= 0:
1586 ui.warn(line + '\n')
1638 ui.warn(line + '\n')
1587 elif line.find('FAILED') >= 0:
1639 elif line.find('FAILED') >= 0:
1588 if not printed_file:
1640 if not printed_file:
1589 ui.warn(pf + '\n')
1641 ui.warn(pf + '\n')
1590 printed_file = True
1642 printed_file = True
1591 ui.warn(line + '\n')
1643 ui.warn(line + '\n')
1592 finally:
1644 finally:
1593 if files:
1645 if files:
1594 scmutil.marktouched(repo, files, similarity)
1646 scmutil.marktouched(repo, files, similarity)
1595 code = fp.close()
1647 code = fp.close()
1596 if code:
1648 if code:
1597 raise PatchError(_("patch command failed: %s") %
1649 raise PatchError(_("patch command failed: %s") %
1598 util.explainexit(code)[0])
1650 util.explainexit(code)[0])
1599 return fuzz
1651 return fuzz
1600
1652
1601 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
1653 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
1602 eolmode='strict'):
1654 eolmode='strict'):
1603 if files is None:
1655 if files is None:
1604 files = set()
1656 files = set()
1605 if eolmode is None:
1657 if eolmode is None:
1606 eolmode = ui.config('patch', 'eol', 'strict')
1658 eolmode = ui.config('patch', 'eol', 'strict')
1607 if eolmode.lower() not in eolmodes:
1659 if eolmode.lower() not in eolmodes:
1608 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1660 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1609 eolmode = eolmode.lower()
1661 eolmode = eolmode.lower()
1610
1662
1611 store = filestore()
1663 store = filestore()
1612 try:
1664 try:
1613 fp = open(patchobj, 'rb')
1665 fp = open(patchobj, 'rb')
1614 except TypeError:
1666 except TypeError:
1615 fp = patchobj
1667 fp = patchobj
1616 try:
1668 try:
1617 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
1669 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
1618 eolmode=eolmode)
1670 eolmode=eolmode)
1619 finally:
1671 finally:
1620 if fp != patchobj:
1672 if fp != patchobj:
1621 fp.close()
1673 fp.close()
1622 files.update(backend.close())
1674 files.update(backend.close())
1623 store.close()
1675 store.close()
1624 if ret < 0:
1676 if ret < 0:
1625 raise PatchError(_('patch failed to apply'))
1677 raise PatchError(_('patch failed to apply'))
1626 return ret > 0
1678 return ret > 0
1627
1679
1628 def internalpatch(ui, repo, patchobj, strip, prefix, files=None,
1680 def internalpatch(ui, repo, patchobj, strip, prefix, files=None,
1629 eolmode='strict', similarity=0):
1681 eolmode='strict', similarity=0):
1630 """use builtin patch to apply <patchobj> to the working directory.
1682 """use builtin patch to apply <patchobj> to the working directory.
1631 returns whether patch was applied with fuzz factor."""
1683 returns whether patch was applied with fuzz factor."""
1632 backend = workingbackend(ui, repo, similarity)
1684 backend = workingbackend(ui, repo, similarity)
1633 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1685 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1634
1686
1635 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
1687 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
1636 eolmode='strict'):
1688 eolmode='strict'):
1637 backend = repobackend(ui, repo, ctx, store)
1689 backend = repobackend(ui, repo, ctx, store)
1638 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1690 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1639
1691
1640 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
1692 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
1641 similarity=0):
1693 similarity=0):
1642 """Apply <patchname> to the working directory.
1694 """Apply <patchname> to the working directory.
1643
1695
1644 'eolmode' specifies how end of lines should be handled. It can be:
1696 'eolmode' specifies how end of lines should be handled. It can be:
1645 - 'strict': inputs are read in binary mode, EOLs are preserved
1697 - 'strict': inputs are read in binary mode, EOLs are preserved
1646 - 'crlf': EOLs are ignored when patching and reset to CRLF
1698 - 'crlf': EOLs are ignored when patching and reset to CRLF
1647 - 'lf': EOLs are ignored when patching and reset to LF
1699 - 'lf': EOLs are ignored when patching and reset to LF
1648 - None: get it from user settings, default to 'strict'
1700 - None: get it from user settings, default to 'strict'
1649 'eolmode' is ignored when using an external patcher program.
1701 'eolmode' is ignored when using an external patcher program.
1650
1702
1651 Returns whether patch was applied with fuzz factor.
1703 Returns whether patch was applied with fuzz factor.
1652 """
1704 """
1653 patcher = ui.config('ui', 'patch')
1705 patcher = ui.config('ui', 'patch')
1654 if files is None:
1706 if files is None:
1655 files = set()
1707 files = set()
1656 if patcher:
1708 if patcher:
1657 return _externalpatch(ui, repo, patcher, patchname, strip,
1709 return _externalpatch(ui, repo, patcher, patchname, strip,
1658 files, similarity)
1710 files, similarity)
1659 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
1711 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
1660 similarity)
1712 similarity)
1661
1713
1662 def changedfiles(ui, repo, patchpath, strip=1):
1714 def changedfiles(ui, repo, patchpath, strip=1):
1663 backend = fsbackend(ui, repo.root)
1715 backend = fsbackend(ui, repo.root)
1664 fp = open(patchpath, 'rb')
1716 fp = open(patchpath, 'rb')
1665 try:
1717 try:
1666 changed = set()
1718 changed = set()
1667 for state, values in iterhunks(fp):
1719 for state, values in iterhunks(fp):
1668 if state == 'file':
1720 if state == 'file':
1669 afile, bfile, first_hunk, gp = values
1721 afile, bfile, first_hunk, gp = values
1670 if gp:
1722 if gp:
1671 gp.path = pathtransform(gp.path, strip - 1, '')[1]
1723 gp.path = pathtransform(gp.path, strip - 1, '')[1]
1672 if gp.oldpath:
1724 if gp.oldpath:
1673 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
1725 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
1674 else:
1726 else:
1675 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1727 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1676 '')
1728 '')
1677 changed.add(gp.path)
1729 changed.add(gp.path)
1678 if gp.op == 'RENAME':
1730 if gp.op == 'RENAME':
1679 changed.add(gp.oldpath)
1731 changed.add(gp.oldpath)
1680 elif state not in ('hunk', 'git'):
1732 elif state not in ('hunk', 'git'):
1681 raise util.Abort(_('unsupported parser state: %s') % state)
1733 raise util.Abort(_('unsupported parser state: %s') % state)
1682 return changed
1734 return changed
1683 finally:
1735 finally:
1684 fp.close()
1736 fp.close()
1685
1737
1686 class GitDiffRequired(Exception):
1738 class GitDiffRequired(Exception):
1687 pass
1739 pass
1688
1740
1689 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
1741 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
1690 '''return diffopts with all features supported and parsed'''
1742 '''return diffopts with all features supported and parsed'''
1691 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
1743 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
1692 git=True, whitespace=True, formatchanging=True)
1744 git=True, whitespace=True, formatchanging=True)
1693
1745
1694 diffopts = diffallopts
1746 diffopts = diffallopts
1695
1747
1696 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
1748 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
1697 whitespace=False, formatchanging=False):
1749 whitespace=False, formatchanging=False):
1698 '''return diffopts with only opted-in features parsed
1750 '''return diffopts with only opted-in features parsed
1699
1751
1700 Features:
1752 Features:
1701 - git: git-style diffs
1753 - git: git-style diffs
1702 - whitespace: whitespace options like ignoreblanklines and ignorews
1754 - whitespace: whitespace options like ignoreblanklines and ignorews
1703 - formatchanging: options that will likely break or cause correctness issues
1755 - formatchanging: options that will likely break or cause correctness issues
1704 with most diff parsers
1756 with most diff parsers
1705 '''
1757 '''
1706 def get(key, name=None, getter=ui.configbool, forceplain=None):
1758 def get(key, name=None, getter=ui.configbool, forceplain=None):
1707 if opts:
1759 if opts:
1708 v = opts.get(key)
1760 v = opts.get(key)
1709 if v:
1761 if v:
1710 return v
1762 return v
1711 if forceplain is not None and ui.plain():
1763 if forceplain is not None and ui.plain():
1712 return forceplain
1764 return forceplain
1713 return getter(section, name or key, None, untrusted=untrusted)
1765 return getter(section, name or key, None, untrusted=untrusted)
1714
1766
1715 # core options, expected to be understood by every diff parser
1767 # core options, expected to be understood by every diff parser
1716 buildopts = {
1768 buildopts = {
1717 'nodates': get('nodates'),
1769 'nodates': get('nodates'),
1718 'showfunc': get('show_function', 'showfunc'),
1770 'showfunc': get('show_function', 'showfunc'),
1719 'context': get('unified', getter=ui.config),
1771 'context': get('unified', getter=ui.config),
1720 }
1772 }
1721
1773
1722 if git:
1774 if git:
1723 buildopts['git'] = get('git')
1775 buildopts['git'] = get('git')
1724 if whitespace:
1776 if whitespace:
1725 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
1777 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
1726 buildopts['ignorewsamount'] = get('ignore_space_change',
1778 buildopts['ignorewsamount'] = get('ignore_space_change',
1727 'ignorewsamount')
1779 'ignorewsamount')
1728 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
1780 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
1729 'ignoreblanklines')
1781 'ignoreblanklines')
1730 if formatchanging:
1782 if formatchanging:
1731 buildopts['text'] = opts and opts.get('text')
1783 buildopts['text'] = opts and opts.get('text')
1732 buildopts['nobinary'] = get('nobinary')
1784 buildopts['nobinary'] = get('nobinary')
1733 buildopts['noprefix'] = get('noprefix', forceplain=False)
1785 buildopts['noprefix'] = get('noprefix', forceplain=False)
1734
1786
1735 return mdiff.diffopts(**buildopts)
1787 return mdiff.diffopts(**buildopts)
1736
1788
1737 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1789 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1738 losedatafn=None, prefix=''):
1790 losedatafn=None, prefix=''):
1739 '''yields diff of changes to files between two nodes, or node and
1791 '''yields diff of changes to files between two nodes, or node and
1740 working directory.
1792 working directory.
1741
1793
1742 if node1 is None, use first dirstate parent instead.
1794 if node1 is None, use first dirstate parent instead.
1743 if node2 is None, compare node1 with working directory.
1795 if node2 is None, compare node1 with working directory.
1744
1796
1745 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1797 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1746 every time some change cannot be represented with the current
1798 every time some change cannot be represented with the current
1747 patch format. Return False to upgrade to git patch format, True to
1799 patch format. Return False to upgrade to git patch format, True to
1748 accept the loss or raise an exception to abort the diff. It is
1800 accept the loss or raise an exception to abort the diff. It is
1749 called with the name of current file being diffed as 'fn'. If set
1801 called with the name of current file being diffed as 'fn'. If set
1750 to None, patches will always be upgraded to git format when
1802 to None, patches will always be upgraded to git format when
1751 necessary.
1803 necessary.
1752
1804
1753 prefix is a filename prefix that is prepended to all filenames on
1805 prefix is a filename prefix that is prepended to all filenames on
1754 display (used for subrepos).
1806 display (used for subrepos).
1755 '''
1807 '''
1756
1808
1757 if opts is None:
1809 if opts is None:
1758 opts = mdiff.defaultopts
1810 opts = mdiff.defaultopts
1759
1811
1760 if not node1 and not node2:
1812 if not node1 and not node2:
1761 node1 = repo.dirstate.p1()
1813 node1 = repo.dirstate.p1()
1762
1814
1763 def lrugetfilectx():
1815 def lrugetfilectx():
1764 cache = {}
1816 cache = {}
1765 order = util.deque()
1817 order = util.deque()
1766 def getfilectx(f, ctx):
1818 def getfilectx(f, ctx):
1767 fctx = ctx.filectx(f, filelog=cache.get(f))
1819 fctx = ctx.filectx(f, filelog=cache.get(f))
1768 if f not in cache:
1820 if f not in cache:
1769 if len(cache) > 20:
1821 if len(cache) > 20:
1770 del cache[order.popleft()]
1822 del cache[order.popleft()]
1771 cache[f] = fctx.filelog()
1823 cache[f] = fctx.filelog()
1772 else:
1824 else:
1773 order.remove(f)
1825 order.remove(f)
1774 order.append(f)
1826 order.append(f)
1775 return fctx
1827 return fctx
1776 return getfilectx
1828 return getfilectx
1777 getfilectx = lrugetfilectx()
1829 getfilectx = lrugetfilectx()
1778
1830
1779 ctx1 = repo[node1]
1831 ctx1 = repo[node1]
1780 ctx2 = repo[node2]
1832 ctx2 = repo[node2]
1781
1833
1782 if not changes:
1834 if not changes:
1783 changes = repo.status(ctx1, ctx2, match=match)
1835 changes = repo.status(ctx1, ctx2, match=match)
1784 modified, added, removed = changes[:3]
1836 modified, added, removed = changes[:3]
1785
1837
1786 if not modified and not added and not removed:
1838 if not modified and not added and not removed:
1787 return []
1839 return []
1788
1840
1789 hexfunc = repo.ui.debugflag and hex or short
1841 hexfunc = repo.ui.debugflag and hex or short
1790 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
1842 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
1791
1843
1792 copy = {}
1844 copy = {}
1793 if opts.git or opts.upgrade:
1845 if opts.git or opts.upgrade:
1794 copy = copies.pathcopies(ctx1, ctx2)
1846 copy = copies.pathcopies(ctx1, ctx2)
1795
1847
1796 def difffn(opts, losedata):
1848 def difffn(opts, losedata):
1797 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1849 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1798 copy, getfilectx, opts, losedata, prefix)
1850 copy, getfilectx, opts, losedata, prefix)
1799 if opts.upgrade and not opts.git:
1851 if opts.upgrade and not opts.git:
1800 try:
1852 try:
1801 def losedata(fn):
1853 def losedata(fn):
1802 if not losedatafn or not losedatafn(fn=fn):
1854 if not losedatafn or not losedatafn(fn=fn):
1803 raise GitDiffRequired
1855 raise GitDiffRequired
1804 # Buffer the whole output until we are sure it can be generated
1856 # Buffer the whole output until we are sure it can be generated
1805 return list(difffn(opts.copy(git=False), losedata))
1857 return list(difffn(opts.copy(git=False), losedata))
1806 except GitDiffRequired:
1858 except GitDiffRequired:
1807 return difffn(opts.copy(git=True), None)
1859 return difffn(opts.copy(git=True), None)
1808 else:
1860 else:
1809 return difffn(opts, None)
1861 return difffn(opts, None)
1810
1862
1811 def difflabel(func, *args, **kw):
1863 def difflabel(func, *args, **kw):
1812 '''yields 2-tuples of (output, label) based on the output of func()'''
1864 '''yields 2-tuples of (output, label) based on the output of func()'''
1813 headprefixes = [('diff', 'diff.diffline'),
1865 headprefixes = [('diff', 'diff.diffline'),
1814 ('copy', 'diff.extended'),
1866 ('copy', 'diff.extended'),
1815 ('rename', 'diff.extended'),
1867 ('rename', 'diff.extended'),
1816 ('old', 'diff.extended'),
1868 ('old', 'diff.extended'),
1817 ('new', 'diff.extended'),
1869 ('new', 'diff.extended'),
1818 ('deleted', 'diff.extended'),
1870 ('deleted', 'diff.extended'),
1819 ('---', 'diff.file_a'),
1871 ('---', 'diff.file_a'),
1820 ('+++', 'diff.file_b')]
1872 ('+++', 'diff.file_b')]
1821 textprefixes = [('@', 'diff.hunk'),
1873 textprefixes = [('@', 'diff.hunk'),
1822 ('-', 'diff.deleted'),
1874 ('-', 'diff.deleted'),
1823 ('+', 'diff.inserted')]
1875 ('+', 'diff.inserted')]
1824 head = False
1876 head = False
1825 for chunk in func(*args, **kw):
1877 for chunk in func(*args, **kw):
1826 lines = chunk.split('\n')
1878 lines = chunk.split('\n')
1827 for i, line in enumerate(lines):
1879 for i, line in enumerate(lines):
1828 if i != 0:
1880 if i != 0:
1829 yield ('\n', '')
1881 yield ('\n', '')
1830 if head:
1882 if head:
1831 if line.startswith('@'):
1883 if line.startswith('@'):
1832 head = False
1884 head = False
1833 else:
1885 else:
1834 if line and line[0] not in ' +-@\\':
1886 if line and line[0] not in ' +-@\\':
1835 head = True
1887 head = True
1836 stripline = line
1888 stripline = line
1837 diffline = False
1889 diffline = False
1838 if not head and line and line[0] in '+-':
1890 if not head and line and line[0] in '+-':
1839 # highlight tabs and trailing whitespace, but only in
1891 # highlight tabs and trailing whitespace, but only in
1840 # changed lines
1892 # changed lines
1841 stripline = line.rstrip()
1893 stripline = line.rstrip()
1842 diffline = True
1894 diffline = True
1843
1895
1844 prefixes = textprefixes
1896 prefixes = textprefixes
1845 if head:
1897 if head:
1846 prefixes = headprefixes
1898 prefixes = headprefixes
1847 for prefix, label in prefixes:
1899 for prefix, label in prefixes:
1848 if stripline.startswith(prefix):
1900 if stripline.startswith(prefix):
1849 if diffline:
1901 if diffline:
1850 for token in tabsplitter.findall(stripline):
1902 for token in tabsplitter.findall(stripline):
1851 if '\t' == token[0]:
1903 if '\t' == token[0]:
1852 yield (token, 'diff.tab')
1904 yield (token, 'diff.tab')
1853 else:
1905 else:
1854 yield (token, label)
1906 yield (token, label)
1855 else:
1907 else:
1856 yield (stripline, label)
1908 yield (stripline, label)
1857 break
1909 break
1858 else:
1910 else:
1859 yield (line, '')
1911 yield (line, '')
1860 if line != stripline:
1912 if line != stripline:
1861 yield (line[len(stripline):], 'diff.trailingwhitespace')
1913 yield (line[len(stripline):], 'diff.trailingwhitespace')
1862
1914
1863 def diffui(*args, **kw):
1915 def diffui(*args, **kw):
1864 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1916 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1865 return difflabel(diff, *args, **kw)
1917 return difflabel(diff, *args, **kw)
1866
1918
1867 def _filepairs(ctx1, modified, added, removed, copy, opts):
1919 def _filepairs(ctx1, modified, added, removed, copy, opts):
1868 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
1920 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
1869 before and f2 is the the name after. For added files, f1 will be None,
1921 before and f2 is the the name after. For added files, f1 will be None,
1870 and for removed files, f2 will be None. copyop may be set to None, 'copy'
1922 and for removed files, f2 will be None. copyop may be set to None, 'copy'
1871 or 'rename' (the latter two only if opts.git is set).'''
1923 or 'rename' (the latter two only if opts.git is set).'''
1872 gone = set()
1924 gone = set()
1873
1925
1874 copyto = dict([(v, k) for k, v in copy.items()])
1926 copyto = dict([(v, k) for k, v in copy.items()])
1875
1927
1876 addedset, removedset = set(added), set(removed)
1928 addedset, removedset = set(added), set(removed)
1877 # Fix up added, since merged-in additions appear as
1929 # Fix up added, since merged-in additions appear as
1878 # modifications during merges
1930 # modifications during merges
1879 for f in modified:
1931 for f in modified:
1880 if f not in ctx1:
1932 if f not in ctx1:
1881 addedset.add(f)
1933 addedset.add(f)
1882
1934
1883 for f in sorted(modified + added + removed):
1935 for f in sorted(modified + added + removed):
1884 copyop = None
1936 copyop = None
1885 f1, f2 = f, f
1937 f1, f2 = f, f
1886 if f in addedset:
1938 if f in addedset:
1887 f1 = None
1939 f1 = None
1888 if f in copy:
1940 if f in copy:
1889 if opts.git:
1941 if opts.git:
1890 f1 = copy[f]
1942 f1 = copy[f]
1891 if f1 in removedset and f1 not in gone:
1943 if f1 in removedset and f1 not in gone:
1892 copyop = 'rename'
1944 copyop = 'rename'
1893 gone.add(f1)
1945 gone.add(f1)
1894 else:
1946 else:
1895 copyop = 'copy'
1947 copyop = 'copy'
1896 elif f in removedset:
1948 elif f in removedset:
1897 f2 = None
1949 f2 = None
1898 if opts.git:
1950 if opts.git:
1899 # have we already reported a copy above?
1951 # have we already reported a copy above?
1900 if (f in copyto and copyto[f] in addedset
1952 if (f in copyto and copyto[f] in addedset
1901 and copy[copyto[f]] == f):
1953 and copy[copyto[f]] == f):
1902 continue
1954 continue
1903 yield f1, f2, copyop
1955 yield f1, f2, copyop
1904
1956
1905 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1957 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1906 copy, getfilectx, opts, losedatafn, prefix):
1958 copy, getfilectx, opts, losedatafn, prefix):
1907
1959
1908 def gitindex(text):
1960 def gitindex(text):
1909 if not text:
1961 if not text:
1910 text = ""
1962 text = ""
1911 l = len(text)
1963 l = len(text)
1912 s = util.sha1('blob %d\0' % l)
1964 s = util.sha1('blob %d\0' % l)
1913 s.update(text)
1965 s.update(text)
1914 return s.hexdigest()
1966 return s.hexdigest()
1915
1967
1916 if opts.noprefix:
1968 if opts.noprefix:
1917 aprefix = bprefix = ''
1969 aprefix = bprefix = ''
1918 else:
1970 else:
1919 aprefix = 'a/'
1971 aprefix = 'a/'
1920 bprefix = 'b/'
1972 bprefix = 'b/'
1921
1973
1922 def diffline(f, revs):
1974 def diffline(f, revs):
1923 revinfo = ' '.join(["-r %s" % rev for rev in revs])
1975 revinfo = ' '.join(["-r %s" % rev for rev in revs])
1924 return 'diff %s %s' % (revinfo, f)
1976 return 'diff %s %s' % (revinfo, f)
1925
1977
1926 date1 = util.datestr(ctx1.date())
1978 date1 = util.datestr(ctx1.date())
1927 date2 = util.datestr(ctx2.date())
1979 date2 = util.datestr(ctx2.date())
1928
1980
1929 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1981 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1930
1982
1931 for f1, f2, copyop in _filepairs(
1983 for f1, f2, copyop in _filepairs(
1932 ctx1, modified, added, removed, copy, opts):
1984 ctx1, modified, added, removed, copy, opts):
1933 content1 = None
1985 content1 = None
1934 content2 = None
1986 content2 = None
1935 flag1 = None
1987 flag1 = None
1936 flag2 = None
1988 flag2 = None
1937 if f1:
1989 if f1:
1938 content1 = getfilectx(f1, ctx1).data()
1990 content1 = getfilectx(f1, ctx1).data()
1939 if opts.git or losedatafn:
1991 if opts.git or losedatafn:
1940 flag1 = ctx1.flags(f1)
1992 flag1 = ctx1.flags(f1)
1941 if f2:
1993 if f2:
1942 content2 = getfilectx(f2, ctx2).data()
1994 content2 = getfilectx(f2, ctx2).data()
1943 if opts.git or losedatafn:
1995 if opts.git or losedatafn:
1944 flag2 = ctx2.flags(f2)
1996 flag2 = ctx2.flags(f2)
1945 binary = False
1997 binary = False
1946 if opts.git or losedatafn:
1998 if opts.git or losedatafn:
1947 binary = util.binary(content1) or util.binary(content2)
1999 binary = util.binary(content1) or util.binary(content2)
1948
2000
1949 if losedatafn and not opts.git:
2001 if losedatafn and not opts.git:
1950 if (binary or
2002 if (binary or
1951 # copy/rename
2003 # copy/rename
1952 f2 in copy or
2004 f2 in copy or
1953 # empty file creation
2005 # empty file creation
1954 (not f1 and not content2) or
2006 (not f1 and not content2) or
1955 # empty file deletion
2007 # empty file deletion
1956 (not content1 and not f2) or
2008 (not content1 and not f2) or
1957 # create with flags
2009 # create with flags
1958 (not f1 and flag2) or
2010 (not f1 and flag2) or
1959 # change flags
2011 # change flags
1960 (f1 and f2 and flag1 != flag2)):
2012 (f1 and f2 and flag1 != flag2)):
1961 losedatafn(f2 or f1)
2013 losedatafn(f2 or f1)
1962
2014
1963 path1 = posixpath.join(prefix, f1 or f2)
2015 path1 = posixpath.join(prefix, f1 or f2)
1964 path2 = posixpath.join(prefix, f2 or f1)
2016 path2 = posixpath.join(prefix, f2 or f1)
1965 header = []
2017 header = []
1966 if opts.git:
2018 if opts.git:
1967 header.append('diff --git %s%s %s%s' %
2019 header.append('diff --git %s%s %s%s' %
1968 (aprefix, path1, bprefix, path2))
2020 (aprefix, path1, bprefix, path2))
1969 if not f1: # added
2021 if not f1: # added
1970 header.append('new file mode %s' % gitmode[flag2])
2022 header.append('new file mode %s' % gitmode[flag2])
1971 elif not f2: # removed
2023 elif not f2: # removed
1972 header.append('deleted file mode %s' % gitmode[flag1])
2024 header.append('deleted file mode %s' % gitmode[flag1])
1973 else: # modified/copied/renamed
2025 else: # modified/copied/renamed
1974 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2026 mode1, mode2 = gitmode[flag1], gitmode[flag2]
1975 if mode1 != mode2:
2027 if mode1 != mode2:
1976 header.append('old mode %s' % mode1)
2028 header.append('old mode %s' % mode1)
1977 header.append('new mode %s' % mode2)
2029 header.append('new mode %s' % mode2)
1978 if copyop is not None:
2030 if copyop is not None:
1979 header.append('%s from %s' % (copyop, path1))
2031 header.append('%s from %s' % (copyop, path1))
1980 header.append('%s to %s' % (copyop, path2))
2032 header.append('%s to %s' % (copyop, path2))
1981 elif revs and not repo.ui.quiet:
2033 elif revs and not repo.ui.quiet:
1982 header.append(diffline(path1, revs))
2034 header.append(diffline(path1, revs))
1983
2035
1984 if binary and opts.git and not opts.nobinary:
2036 if binary and opts.git and not opts.nobinary:
1985 text = mdiff.b85diff(content1, content2)
2037 text = mdiff.b85diff(content1, content2)
1986 if text:
2038 if text:
1987 header.append('index %s..%s' %
2039 header.append('index %s..%s' %
1988 (gitindex(content1), gitindex(content2)))
2040 (gitindex(content1), gitindex(content2)))
1989 else:
2041 else:
1990 text = mdiff.unidiff(content1, date1,
2042 text = mdiff.unidiff(content1, date1,
1991 content2, date2,
2043 content2, date2,
1992 path1, path2, opts=opts)
2044 path1, path2, opts=opts)
1993 if header and (text or len(header) > 1):
2045 if header and (text or len(header) > 1):
1994 yield '\n'.join(header) + '\n'
2046 yield '\n'.join(header) + '\n'
1995 if text:
2047 if text:
1996 yield text
2048 yield text
1997
2049
1998 def diffstatsum(stats):
2050 def diffstatsum(stats):
1999 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2051 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2000 for f, a, r, b in stats:
2052 for f, a, r, b in stats:
2001 maxfile = max(maxfile, encoding.colwidth(f))
2053 maxfile = max(maxfile, encoding.colwidth(f))
2002 maxtotal = max(maxtotal, a + r)
2054 maxtotal = max(maxtotal, a + r)
2003 addtotal += a
2055 addtotal += a
2004 removetotal += r
2056 removetotal += r
2005 binary = binary or b
2057 binary = binary or b
2006
2058
2007 return maxfile, maxtotal, addtotal, removetotal, binary
2059 return maxfile, maxtotal, addtotal, removetotal, binary
2008
2060
2009 def diffstatdata(lines):
2061 def diffstatdata(lines):
2010 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2062 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2011
2063
2012 results = []
2064 results = []
2013 filename, adds, removes, isbinary = None, 0, 0, False
2065 filename, adds, removes, isbinary = None, 0, 0, False
2014
2066
2015 def addresult():
2067 def addresult():
2016 if filename:
2068 if filename:
2017 results.append((filename, adds, removes, isbinary))
2069 results.append((filename, adds, removes, isbinary))
2018
2070
2019 for line in lines:
2071 for line in lines:
2020 if line.startswith('diff'):
2072 if line.startswith('diff'):
2021 addresult()
2073 addresult()
2022 # set numbers to 0 anyway when starting new file
2074 # set numbers to 0 anyway when starting new file
2023 adds, removes, isbinary = 0, 0, False
2075 adds, removes, isbinary = 0, 0, False
2024 if line.startswith('diff --git a/'):
2076 if line.startswith('diff --git a/'):
2025 filename = gitre.search(line).group(2)
2077 filename = gitre.search(line).group(2)
2026 elif line.startswith('diff -r'):
2078 elif line.startswith('diff -r'):
2027 # format: "diff -r ... -r ... filename"
2079 # format: "diff -r ... -r ... filename"
2028 filename = diffre.search(line).group(1)
2080 filename = diffre.search(line).group(1)
2029 elif line.startswith('+') and not line.startswith('+++ '):
2081 elif line.startswith('+') and not line.startswith('+++ '):
2030 adds += 1
2082 adds += 1
2031 elif line.startswith('-') and not line.startswith('--- '):
2083 elif line.startswith('-') and not line.startswith('--- '):
2032 removes += 1
2084 removes += 1
2033 elif (line.startswith('GIT binary patch') or
2085 elif (line.startswith('GIT binary patch') or
2034 line.startswith('Binary file')):
2086 line.startswith('Binary file')):
2035 isbinary = True
2087 isbinary = True
2036 addresult()
2088 addresult()
2037 return results
2089 return results
2038
2090
2039 def diffstat(lines, width=80, git=False):
2091 def diffstat(lines, width=80, git=False):
2040 output = []
2092 output = []
2041 stats = diffstatdata(lines)
2093 stats = diffstatdata(lines)
2042 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2094 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2043
2095
2044 countwidth = len(str(maxtotal))
2096 countwidth = len(str(maxtotal))
2045 if hasbinary and countwidth < 3:
2097 if hasbinary and countwidth < 3:
2046 countwidth = 3
2098 countwidth = 3
2047 graphwidth = width - countwidth - maxname - 6
2099 graphwidth = width - countwidth - maxname - 6
2048 if graphwidth < 10:
2100 if graphwidth < 10:
2049 graphwidth = 10
2101 graphwidth = 10
2050
2102
2051 def scale(i):
2103 def scale(i):
2052 if maxtotal <= graphwidth:
2104 if maxtotal <= graphwidth:
2053 return i
2105 return i
2054 # If diffstat runs out of room it doesn't print anything,
2106 # If diffstat runs out of room it doesn't print anything,
2055 # which isn't very useful, so always print at least one + or -
2107 # which isn't very useful, so always print at least one + or -
2056 # if there were at least some changes.
2108 # if there were at least some changes.
2057 return max(i * graphwidth // maxtotal, int(bool(i)))
2109 return max(i * graphwidth // maxtotal, int(bool(i)))
2058
2110
2059 for filename, adds, removes, isbinary in stats:
2111 for filename, adds, removes, isbinary in stats:
2060 if isbinary:
2112 if isbinary:
2061 count = 'Bin'
2113 count = 'Bin'
2062 else:
2114 else:
2063 count = adds + removes
2115 count = adds + removes
2064 pluses = '+' * scale(adds)
2116 pluses = '+' * scale(adds)
2065 minuses = '-' * scale(removes)
2117 minuses = '-' * scale(removes)
2066 output.append(' %s%s | %*s %s%s\n' %
2118 output.append(' %s%s | %*s %s%s\n' %
2067 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2119 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2068 countwidth, count, pluses, minuses))
2120 countwidth, count, pluses, minuses))
2069
2121
2070 if stats:
2122 if stats:
2071 output.append(_(' %d files changed, %d insertions(+), '
2123 output.append(_(' %d files changed, %d insertions(+), '
2072 '%d deletions(-)\n')
2124 '%d deletions(-)\n')
2073 % (len(stats), totaladds, totalremoves))
2125 % (len(stats), totaladds, totalremoves))
2074
2126
2075 return ''.join(output)
2127 return ''.join(output)
2076
2128
2077 def diffstatui(*args, **kw):
2129 def diffstatui(*args, **kw):
2078 '''like diffstat(), but yields 2-tuples of (output, label) for
2130 '''like diffstat(), but yields 2-tuples of (output, label) for
2079 ui.write()
2131 ui.write()
2080 '''
2132 '''
2081
2133
2082 for line in diffstat(*args, **kw).splitlines():
2134 for line in diffstat(*args, **kw).splitlines():
2083 if line and line[-1] in '+-':
2135 if line and line[-1] in '+-':
2084 name, graph = line.rsplit(' ', 1)
2136 name, graph = line.rsplit(' ', 1)
2085 yield (name + ' ', '')
2137 yield (name + ' ', '')
2086 m = re.search(r'\++', graph)
2138 m = re.search(r'\++', graph)
2087 if m:
2139 if m:
2088 yield (m.group(0), 'diffstat.inserted')
2140 yield (m.group(0), 'diffstat.inserted')
2089 m = re.search(r'-+', graph)
2141 m = re.search(r'-+', graph)
2090 if m:
2142 if m:
2091 yield (m.group(0), 'diffstat.deleted')
2143 yield (m.group(0), 'diffstat.deleted')
2092 else:
2144 else:
2093 yield (line, '')
2145 yield (line, '')
2094 yield ('\n', '')
2146 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now