##// END OF EJS Templates
record: move parsepatch from record to patch...
Laurent Charignon -
r24265:dc655360 default
parent child Browse files
Show More
@@ -1,514 +1,433 b''
1 # record.py
1 # record.py
2 #
2 #
3 # Copyright 2007 Bryan O'Sullivan <bos@serpentine.com>
3 # Copyright 2007 Bryan O'Sullivan <bos@serpentine.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''commands to interactively select changes for commit/qrefresh'''
8 '''commands to interactively select changes for commit/qrefresh'''
9
9
10 from mercurial.i18n import _
10 from mercurial.i18n import _
11 from mercurial import cmdutil, commands, extensions, hg, patch
11 from mercurial import cmdutil, commands, extensions, hg, patch
12 from mercurial import util
12 from mercurial import util
13 import copy, cStringIO, errno, os, shutil, tempfile
13 import copy, cStringIO, errno, os, shutil, tempfile
14
14
15 cmdtable = {}
15 cmdtable = {}
16 command = cmdutil.command(cmdtable)
16 command = cmdutil.command(cmdtable)
17 testedwith = 'internal'
17 testedwith = 'internal'
18
18
19
20 def parsepatch(fp):
21 """patch -> [] of headers -> [] of hunks """
22 class parser(object):
23 """patch parsing state machine"""
24 def __init__(self):
25 self.fromline = 0
26 self.toline = 0
27 self.proc = ''
28 self.header = None
29 self.context = []
30 self.before = []
31 self.hunk = []
32 self.headers = []
33
34 def addrange(self, limits):
35 fromstart, fromend, tostart, toend, proc = limits
36 self.fromline = int(fromstart)
37 self.toline = int(tostart)
38 self.proc = proc
39
40 def addcontext(self, context):
41 if self.hunk:
42 h = patch.recordhunk(self.header, self.fromline, self.toline,
43 self.proc, self.before, self.hunk, context)
44 self.header.hunks.append(h)
45 self.fromline += len(self.before) + h.removed
46 self.toline += len(self.before) + h.added
47 self.before = []
48 self.hunk = []
49 self.proc = ''
50 self.context = context
51
52 def addhunk(self, hunk):
53 if self.context:
54 self.before = self.context
55 self.context = []
56 self.hunk = hunk
57
58 def newfile(self, hdr):
59 self.addcontext([])
60 h = patch.header(hdr)
61 self.headers.append(h)
62 self.header = h
63
64 def addother(self, line):
65 pass # 'other' lines are ignored
66
67 def finished(self):
68 self.addcontext([])
69 return self.headers
70
71 transitions = {
72 'file': {'context': addcontext,
73 'file': newfile,
74 'hunk': addhunk,
75 'range': addrange},
76 'context': {'file': newfile,
77 'hunk': addhunk,
78 'range': addrange,
79 'other': addother},
80 'hunk': {'context': addcontext,
81 'file': newfile,
82 'range': addrange},
83 'range': {'context': addcontext,
84 'hunk': addhunk},
85 'other': {'other': addother},
86 }
87
88 p = parser()
89
90 state = 'context'
91 for newstate, data in patch.scanpatch(fp):
92 try:
93 p.transitions[state][newstate](p, data)
94 except KeyError:
95 raise patch.PatchError('unhandled transition: %s -> %s' %
96 (state, newstate))
97 state = newstate
98 return p.finished()
99
100 def filterpatch(ui, headers):
19 def filterpatch(ui, headers):
101 """Interactively filter patch chunks into applied-only chunks"""
20 """Interactively filter patch chunks into applied-only chunks"""
102
21
103 def prompt(skipfile, skipall, query, chunk):
22 def prompt(skipfile, skipall, query, chunk):
104 """prompt query, and process base inputs
23 """prompt query, and process base inputs
105
24
106 - y/n for the rest of file
25 - y/n for the rest of file
107 - y/n for the rest
26 - y/n for the rest
108 - ? (help)
27 - ? (help)
109 - q (quit)
28 - q (quit)
110
29
111 Return True/False and possibly updated skipfile and skipall.
30 Return True/False and possibly updated skipfile and skipall.
112 """
31 """
113 newpatches = None
32 newpatches = None
114 if skipall is not None:
33 if skipall is not None:
115 return skipall, skipfile, skipall, newpatches
34 return skipall, skipfile, skipall, newpatches
116 if skipfile is not None:
35 if skipfile is not None:
117 return skipfile, skipfile, skipall, newpatches
36 return skipfile, skipfile, skipall, newpatches
118 while True:
37 while True:
119 resps = _('[Ynesfdaq?]'
38 resps = _('[Ynesfdaq?]'
120 '$$ &Yes, record this change'
39 '$$ &Yes, record this change'
121 '$$ &No, skip this change'
40 '$$ &No, skip this change'
122 '$$ &Edit this change manually'
41 '$$ &Edit this change manually'
123 '$$ &Skip remaining changes to this file'
42 '$$ &Skip remaining changes to this file'
124 '$$ Record remaining changes to this &file'
43 '$$ Record remaining changes to this &file'
125 '$$ &Done, skip remaining changes and files'
44 '$$ &Done, skip remaining changes and files'
126 '$$ Record &all changes to all remaining files'
45 '$$ Record &all changes to all remaining files'
127 '$$ &Quit, recording no changes'
46 '$$ &Quit, recording no changes'
128 '$$ &? (display help)')
47 '$$ &? (display help)')
129 r = ui.promptchoice("%s %s" % (query, resps))
48 r = ui.promptchoice("%s %s" % (query, resps))
130 ui.write("\n")
49 ui.write("\n")
131 if r == 8: # ?
50 if r == 8: # ?
132 for c, t in ui.extractchoices(resps)[1]:
51 for c, t in ui.extractchoices(resps)[1]:
133 ui.write('%s - %s\n' % (c, t.lower()))
52 ui.write('%s - %s\n' % (c, t.lower()))
134 continue
53 continue
135 elif r == 0: # yes
54 elif r == 0: # yes
136 ret = True
55 ret = True
137 elif r == 1: # no
56 elif r == 1: # no
138 ret = False
57 ret = False
139 elif r == 2: # Edit patch
58 elif r == 2: # Edit patch
140 if chunk is None:
59 if chunk is None:
141 ui.write(_('cannot edit patch for whole file'))
60 ui.write(_('cannot edit patch for whole file'))
142 ui.write("\n")
61 ui.write("\n")
143 continue
62 continue
144 if chunk.header.binary():
63 if chunk.header.binary():
145 ui.write(_('cannot edit patch for binary file'))
64 ui.write(_('cannot edit patch for binary file'))
146 ui.write("\n")
65 ui.write("\n")
147 continue
66 continue
148 # Patch comment based on the Git one (based on comment at end of
67 # Patch comment based on the Git one (based on comment at end of
149 # http://mercurial.selenic.com/wiki/RecordExtension)
68 # http://mercurial.selenic.com/wiki/RecordExtension)
150 phelp = '---' + _("""
69 phelp = '---' + _("""
151 To remove '-' lines, make them ' ' lines (context).
70 To remove '-' lines, make them ' ' lines (context).
152 To remove '+' lines, delete them.
71 To remove '+' lines, delete them.
153 Lines starting with # will be removed from the patch.
72 Lines starting with # will be removed from the patch.
154
73
155 If the patch applies cleanly, the edited hunk will immediately be
74 If the patch applies cleanly, the edited hunk will immediately be
156 added to the record list. If it does not apply cleanly, a rejects
75 added to the record list. If it does not apply cleanly, a rejects
157 file will be generated: you can use that when you try again. If
76 file will be generated: you can use that when you try again. If
158 all lines of the hunk are removed, then the edit is aborted and
77 all lines of the hunk are removed, then the edit is aborted and
159 the hunk is left unchanged.
78 the hunk is left unchanged.
160 """)
79 """)
161 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
80 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
162 suffix=".diff", text=True)
81 suffix=".diff", text=True)
163 ncpatchfp = None
82 ncpatchfp = None
164 try:
83 try:
165 # Write the initial patch
84 # Write the initial patch
166 f = os.fdopen(patchfd, "w")
85 f = os.fdopen(patchfd, "w")
167 chunk.header.write(f)
86 chunk.header.write(f)
168 chunk.write(f)
87 chunk.write(f)
169 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
88 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
170 f.close()
89 f.close()
171 # Start the editor and wait for it to complete
90 # Start the editor and wait for it to complete
172 editor = ui.geteditor()
91 editor = ui.geteditor()
173 ui.system("%s \"%s\"" % (editor, patchfn),
92 ui.system("%s \"%s\"" % (editor, patchfn),
174 environ={'HGUSER': ui.username()},
93 environ={'HGUSER': ui.username()},
175 onerr=util.Abort, errprefix=_("edit failed"))
94 onerr=util.Abort, errprefix=_("edit failed"))
176 # Remove comment lines
95 # Remove comment lines
177 patchfp = open(patchfn)
96 patchfp = open(patchfn)
178 ncpatchfp = cStringIO.StringIO()
97 ncpatchfp = cStringIO.StringIO()
179 for line in patchfp:
98 for line in patchfp:
180 if not line.startswith('#'):
99 if not line.startswith('#'):
181 ncpatchfp.write(line)
100 ncpatchfp.write(line)
182 patchfp.close()
101 patchfp.close()
183 ncpatchfp.seek(0)
102 ncpatchfp.seek(0)
184 newpatches = parsepatch(ncpatchfp)
103 newpatches = patch.parsepatch(ncpatchfp)
185 finally:
104 finally:
186 os.unlink(patchfn)
105 os.unlink(patchfn)
187 del ncpatchfp
106 del ncpatchfp
188 # Signal that the chunk shouldn't be applied as-is, but
107 # Signal that the chunk shouldn't be applied as-is, but
189 # provide the new patch to be used instead.
108 # provide the new patch to be used instead.
190 ret = False
109 ret = False
191 elif r == 3: # Skip
110 elif r == 3: # Skip
192 ret = skipfile = False
111 ret = skipfile = False
193 elif r == 4: # file (Record remaining)
112 elif r == 4: # file (Record remaining)
194 ret = skipfile = True
113 ret = skipfile = True
195 elif r == 5: # done, skip remaining
114 elif r == 5: # done, skip remaining
196 ret = skipall = False
115 ret = skipall = False
197 elif r == 6: # all
116 elif r == 6: # all
198 ret = skipall = True
117 ret = skipall = True
199 elif r == 7: # quit
118 elif r == 7: # quit
200 raise util.Abort(_('user quit'))
119 raise util.Abort(_('user quit'))
201 return ret, skipfile, skipall, newpatches
120 return ret, skipfile, skipall, newpatches
202
121
203 seen = set()
122 seen = set()
204 applied = {} # 'filename' -> [] of chunks
123 applied = {} # 'filename' -> [] of chunks
205 skipfile, skipall = None, None
124 skipfile, skipall = None, None
206 pos, total = 1, sum(len(h.hunks) for h in headers)
125 pos, total = 1, sum(len(h.hunks) for h in headers)
207 for h in headers:
126 for h in headers:
208 pos += len(h.hunks)
127 pos += len(h.hunks)
209 skipfile = None
128 skipfile = None
210 fixoffset = 0
129 fixoffset = 0
211 hdr = ''.join(h.header)
130 hdr = ''.join(h.header)
212 if hdr in seen:
131 if hdr in seen:
213 continue
132 continue
214 seen.add(hdr)
133 seen.add(hdr)
215 if skipall is None:
134 if skipall is None:
216 h.pretty(ui)
135 h.pretty(ui)
217 msg = (_('examine changes to %s?') %
136 msg = (_('examine changes to %s?') %
218 _(' and ').join("'%s'" % f for f in h.files()))
137 _(' and ').join("'%s'" % f for f in h.files()))
219 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
138 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
220 if not r:
139 if not r:
221 continue
140 continue
222 applied[h.filename()] = [h]
141 applied[h.filename()] = [h]
223 if h.allhunks():
142 if h.allhunks():
224 applied[h.filename()] += h.hunks
143 applied[h.filename()] += h.hunks
225 continue
144 continue
226 for i, chunk in enumerate(h.hunks):
145 for i, chunk in enumerate(h.hunks):
227 if skipfile is None and skipall is None:
146 if skipfile is None and skipall is None:
228 chunk.pretty(ui)
147 chunk.pretty(ui)
229 if total == 1:
148 if total == 1:
230 msg = _("record this change to '%s'?") % chunk.filename()
149 msg = _("record this change to '%s'?") % chunk.filename()
231 else:
150 else:
232 idx = pos - len(h.hunks) + i
151 idx = pos - len(h.hunks) + i
233 msg = _("record change %d/%d to '%s'?") % (idx, total,
152 msg = _("record change %d/%d to '%s'?") % (idx, total,
234 chunk.filename())
153 chunk.filename())
235 r, skipfile, skipall, newpatches = prompt(skipfile,
154 r, skipfile, skipall, newpatches = prompt(skipfile,
236 skipall, msg, chunk)
155 skipall, msg, chunk)
237 if r:
156 if r:
238 if fixoffset:
157 if fixoffset:
239 chunk = copy.copy(chunk)
158 chunk = copy.copy(chunk)
240 chunk.toline += fixoffset
159 chunk.toline += fixoffset
241 applied[chunk.filename()].append(chunk)
160 applied[chunk.filename()].append(chunk)
242 elif newpatches is not None:
161 elif newpatches is not None:
243 for newpatch in newpatches:
162 for newpatch in newpatches:
244 for newhunk in newpatch.hunks:
163 for newhunk in newpatch.hunks:
245 if fixoffset:
164 if fixoffset:
246 newhunk.toline += fixoffset
165 newhunk.toline += fixoffset
247 applied[newhunk.filename()].append(newhunk)
166 applied[newhunk.filename()].append(newhunk)
248 else:
167 else:
249 fixoffset += chunk.removed - chunk.added
168 fixoffset += chunk.removed - chunk.added
250 return sum([h for h in applied.itervalues()
169 return sum([h for h in applied.itervalues()
251 if h[0].special() or len(h) > 1], [])
170 if h[0].special() or len(h) > 1], [])
252
171
253 @command("record",
172 @command("record",
254 # same options as commit + white space diff options
173 # same options as commit + white space diff options
255 commands.table['^commit|ci'][1][:] + commands.diffwsopts,
174 commands.table['^commit|ci'][1][:] + commands.diffwsopts,
256 _('hg record [OPTION]... [FILE]...'))
175 _('hg record [OPTION]... [FILE]...'))
257 def record(ui, repo, *pats, **opts):
176 def record(ui, repo, *pats, **opts):
258 '''interactively select changes to commit
177 '''interactively select changes to commit
259
178
260 If a list of files is omitted, all changes reported by :hg:`status`
179 If a list of files is omitted, all changes reported by :hg:`status`
261 will be candidates for recording.
180 will be candidates for recording.
262
181
263 See :hg:`help dates` for a list of formats valid for -d/--date.
182 See :hg:`help dates` for a list of formats valid for -d/--date.
264
183
265 You will be prompted for whether to record changes to each
184 You will be prompted for whether to record changes to each
266 modified file, and for files with multiple changes, for each
185 modified file, and for files with multiple changes, for each
267 change to use. For each query, the following responses are
186 change to use. For each query, the following responses are
268 possible::
187 possible::
269
188
270 y - record this change
189 y - record this change
271 n - skip this change
190 n - skip this change
272 e - edit this change manually
191 e - edit this change manually
273
192
274 s - skip remaining changes to this file
193 s - skip remaining changes to this file
275 f - record remaining changes to this file
194 f - record remaining changes to this file
276
195
277 d - done, skip remaining changes and files
196 d - done, skip remaining changes and files
278 a - record all changes to all remaining files
197 a - record all changes to all remaining files
279 q - quit, recording no changes
198 q - quit, recording no changes
280
199
281 ? - display help
200 ? - display help
282
201
283 This command is not available when committing a merge.'''
202 This command is not available when committing a merge.'''
284
203
285 dorecord(ui, repo, commands.commit, 'commit', False, *pats, **opts)
204 dorecord(ui, repo, commands.commit, 'commit', False, *pats, **opts)
286
205
287 def qrefresh(origfn, ui, repo, *pats, **opts):
206 def qrefresh(origfn, ui, repo, *pats, **opts):
288 if not opts['interactive']:
207 if not opts['interactive']:
289 return origfn(ui, repo, *pats, **opts)
208 return origfn(ui, repo, *pats, **opts)
290
209
291 mq = extensions.find('mq')
210 mq = extensions.find('mq')
292
211
293 def committomq(ui, repo, *pats, **opts):
212 def committomq(ui, repo, *pats, **opts):
294 # At this point the working copy contains only changes that
213 # At this point the working copy contains only changes that
295 # were accepted. All other changes were reverted.
214 # were accepted. All other changes were reverted.
296 # We can't pass *pats here since qrefresh will undo all other
215 # We can't pass *pats here since qrefresh will undo all other
297 # changed files in the patch that aren't in pats.
216 # changed files in the patch that aren't in pats.
298 mq.refresh(ui, repo, **opts)
217 mq.refresh(ui, repo, **opts)
299
218
300 # backup all changed files
219 # backup all changed files
301 dorecord(ui, repo, committomq, 'qrefresh', True, *pats, **opts)
220 dorecord(ui, repo, committomq, 'qrefresh', True, *pats, **opts)
302
221
303 # This command registration is replaced during uisetup().
222 # This command registration is replaced during uisetup().
304 @command('qrecord',
223 @command('qrecord',
305 [],
224 [],
306 _('hg qrecord [OPTION]... PATCH [FILE]...'),
225 _('hg qrecord [OPTION]... PATCH [FILE]...'),
307 inferrepo=True)
226 inferrepo=True)
308 def qrecord(ui, repo, patch, *pats, **opts):
227 def qrecord(ui, repo, patch, *pats, **opts):
309 '''interactively record a new patch
228 '''interactively record a new patch
310
229
311 See :hg:`help qnew` & :hg:`help record` for more information and
230 See :hg:`help qnew` & :hg:`help record` for more information and
312 usage.
231 usage.
313 '''
232 '''
314
233
315 try:
234 try:
316 mq = extensions.find('mq')
235 mq = extensions.find('mq')
317 except KeyError:
236 except KeyError:
318 raise util.Abort(_("'mq' extension not loaded"))
237 raise util.Abort(_("'mq' extension not loaded"))
319
238
320 repo.mq.checkpatchname(patch)
239 repo.mq.checkpatchname(patch)
321
240
322 def committomq(ui, repo, *pats, **opts):
241 def committomq(ui, repo, *pats, **opts):
323 opts['checkname'] = False
242 opts['checkname'] = False
324 mq.new(ui, repo, patch, *pats, **opts)
243 mq.new(ui, repo, patch, *pats, **opts)
325
244
326 dorecord(ui, repo, committomq, 'qnew', False, *pats, **opts)
245 dorecord(ui, repo, committomq, 'qnew', False, *pats, **opts)
327
246
328 def qnew(origfn, ui, repo, patch, *args, **opts):
247 def qnew(origfn, ui, repo, patch, *args, **opts):
329 if opts['interactive']:
248 if opts['interactive']:
330 return qrecord(ui, repo, patch, *args, **opts)
249 return qrecord(ui, repo, patch, *args, **opts)
331 return origfn(ui, repo, patch, *args, **opts)
250 return origfn(ui, repo, patch, *args, **opts)
332
251
333 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall, *pats, **opts):
252 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall, *pats, **opts):
334 if not ui.interactive():
253 if not ui.interactive():
335 raise util.Abort(_('running non-interactively, use %s instead') %
254 raise util.Abort(_('running non-interactively, use %s instead') %
336 cmdsuggest)
255 cmdsuggest)
337
256
338 # make sure username is set before going interactive
257 # make sure username is set before going interactive
339 if not opts.get('user'):
258 if not opts.get('user'):
340 ui.username() # raise exception, username not provided
259 ui.username() # raise exception, username not provided
341
260
342 def recordfunc(ui, repo, message, match, opts):
261 def recordfunc(ui, repo, message, match, opts):
343 """This is generic record driver.
262 """This is generic record driver.
344
263
345 Its job is to interactively filter local changes, and
264 Its job is to interactively filter local changes, and
346 accordingly prepare working directory into a state in which the
265 accordingly prepare working directory into a state in which the
347 job can be delegated to a non-interactive commit command such as
266 job can be delegated to a non-interactive commit command such as
348 'commit' or 'qrefresh'.
267 'commit' or 'qrefresh'.
349
268
350 After the actual job is done by non-interactive command, the
269 After the actual job is done by non-interactive command, the
351 working directory is restored to its original state.
270 working directory is restored to its original state.
352
271
353 In the end we'll record interesting changes, and everything else
272 In the end we'll record interesting changes, and everything else
354 will be left in place, so the user can continue working.
273 will be left in place, so the user can continue working.
355 """
274 """
356
275
357 cmdutil.checkunfinished(repo, commit=True)
276 cmdutil.checkunfinished(repo, commit=True)
358 merge = len(repo[None].parents()) > 1
277 merge = len(repo[None].parents()) > 1
359 if merge:
278 if merge:
360 raise util.Abort(_('cannot partially commit a merge '
279 raise util.Abort(_('cannot partially commit a merge '
361 '(use "hg commit" instead)'))
280 '(use "hg commit" instead)'))
362
281
363 status = repo.status(match=match)
282 status = repo.status(match=match)
364 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
283 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
365 diffopts.nodates = True
284 diffopts.nodates = True
366 diffopts.git = True
285 diffopts.git = True
367 originalchunks = patch.diff(repo, changes=status, opts=diffopts)
286 originalchunks = patch.diff(repo, changes=status, opts=diffopts)
368 fp = cStringIO.StringIO()
287 fp = cStringIO.StringIO()
369 fp.write(''.join(originalchunks))
288 fp.write(''.join(originalchunks))
370 fp.seek(0)
289 fp.seek(0)
371
290
372 # 1. filter patch, so we have intending-to apply subset of it
291 # 1. filter patch, so we have intending-to apply subset of it
373 try:
292 try:
374 chunks = filterpatch(ui, parsepatch(fp))
293 chunks = filterpatch(ui, patch.parsepatch(fp))
375 except patch.PatchError, err:
294 except patch.PatchError, err:
376 raise util.Abort(_('error parsing patch: %s') % err)
295 raise util.Abort(_('error parsing patch: %s') % err)
377
296
378 del fp
297 del fp
379
298
380 contenders = set()
299 contenders = set()
381 for h in chunks:
300 for h in chunks:
382 try:
301 try:
383 contenders.update(set(h.files()))
302 contenders.update(set(h.files()))
384 except AttributeError:
303 except AttributeError:
385 pass
304 pass
386
305
387 changed = status.modified + status.added + status.removed
306 changed = status.modified + status.added + status.removed
388 newfiles = [f for f in changed if f in contenders]
307 newfiles = [f for f in changed if f in contenders]
389 if not newfiles:
308 if not newfiles:
390 ui.status(_('no changes to record\n'))
309 ui.status(_('no changes to record\n'))
391 return 0
310 return 0
392
311
393 newandmodifiedfiles = set()
312 newandmodifiedfiles = set()
394 for h in chunks:
313 for h in chunks:
395 ishunk = isinstance(h, patch.recordhunk)
314 ishunk = isinstance(h, patch.recordhunk)
396 isnew = h.filename() in status.added
315 isnew = h.filename() in status.added
397 if ishunk and isnew and not h in originalchunks:
316 if ishunk and isnew and not h in originalchunks:
398 newandmodifiedfiles.add(h.filename())
317 newandmodifiedfiles.add(h.filename())
399
318
400 modified = set(status.modified)
319 modified = set(status.modified)
401
320
402 # 2. backup changed files, so we can restore them in the end
321 # 2. backup changed files, so we can restore them in the end
403
322
404 if backupall:
323 if backupall:
405 tobackup = changed
324 tobackup = changed
406 else:
325 else:
407 tobackup = [f for f in newfiles
326 tobackup = [f for f in newfiles
408 if f in modified or f in newandmodifiedfiles]
327 if f in modified or f in newandmodifiedfiles]
409
328
410 backups = {}
329 backups = {}
411 if tobackup:
330 if tobackup:
412 backupdir = repo.join('record-backups')
331 backupdir = repo.join('record-backups')
413 try:
332 try:
414 os.mkdir(backupdir)
333 os.mkdir(backupdir)
415 except OSError, err:
334 except OSError, err:
416 if err.errno != errno.EEXIST:
335 if err.errno != errno.EEXIST:
417 raise
336 raise
418 try:
337 try:
419 # backup continues
338 # backup continues
420 for f in tobackup:
339 for f in tobackup:
421 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
340 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
422 dir=backupdir)
341 dir=backupdir)
423 os.close(fd)
342 os.close(fd)
424 ui.debug('backup %r as %r\n' % (f, tmpname))
343 ui.debug('backup %r as %r\n' % (f, tmpname))
425 util.copyfile(repo.wjoin(f), tmpname)
344 util.copyfile(repo.wjoin(f), tmpname)
426 shutil.copystat(repo.wjoin(f), tmpname)
345 shutil.copystat(repo.wjoin(f), tmpname)
427 backups[f] = tmpname
346 backups[f] = tmpname
428
347
429 fp = cStringIO.StringIO()
348 fp = cStringIO.StringIO()
430 for c in chunks:
349 for c in chunks:
431 fname = c.filename()
350 fname = c.filename()
432 if fname in backups or fname in newandmodifiedfiles:
351 if fname in backups or fname in newandmodifiedfiles:
433 c.write(fp)
352 c.write(fp)
434 dopatch = fp.tell()
353 dopatch = fp.tell()
435 fp.seek(0)
354 fp.seek(0)
436
355
437 [os.unlink(c) for c in newandmodifiedfiles]
356 [os.unlink(c) for c in newandmodifiedfiles]
438
357
439 # 3a. apply filtered patch to clean repo (clean)
358 # 3a. apply filtered patch to clean repo (clean)
440 if backups:
359 if backups:
441 hg.revert(repo, repo.dirstate.p1(),
360 hg.revert(repo, repo.dirstate.p1(),
442 lambda key: key in backups)
361 lambda key: key in backups)
443
362
444 # 3b. (apply)
363 # 3b. (apply)
445 if dopatch:
364 if dopatch:
446 try:
365 try:
447 ui.debug('applying patch\n')
366 ui.debug('applying patch\n')
448 ui.debug(fp.getvalue())
367 ui.debug(fp.getvalue())
449 patch.internalpatch(ui, repo, fp, 1, '', eolmode=None)
368 patch.internalpatch(ui, repo, fp, 1, '', eolmode=None)
450 except patch.PatchError, err:
369 except patch.PatchError, err:
451 raise util.Abort(str(err))
370 raise util.Abort(str(err))
452 del fp
371 del fp
453
372
454 # 4. We prepared working directory according to filtered
373 # 4. We prepared working directory according to filtered
455 # patch. Now is the time to delegate the job to
374 # patch. Now is the time to delegate the job to
456 # commit/qrefresh or the like!
375 # commit/qrefresh or the like!
457
376
458 # Make all of the pathnames absolute.
377 # Make all of the pathnames absolute.
459 newfiles = [repo.wjoin(nf) for nf in newfiles]
378 newfiles = [repo.wjoin(nf) for nf in newfiles]
460 commitfunc(ui, repo, *newfiles, **opts)
379 commitfunc(ui, repo, *newfiles, **opts)
461
380
462 return 0
381 return 0
463 finally:
382 finally:
464 # 5. finally restore backed-up files
383 # 5. finally restore backed-up files
465 try:
384 try:
466 for realname, tmpname in backups.iteritems():
385 for realname, tmpname in backups.iteritems():
467 ui.debug('restoring %r to %r\n' % (tmpname, realname))
386 ui.debug('restoring %r to %r\n' % (tmpname, realname))
468 util.copyfile(tmpname, repo.wjoin(realname))
387 util.copyfile(tmpname, repo.wjoin(realname))
469 # Our calls to copystat() here and above are a
388 # Our calls to copystat() here and above are a
470 # hack to trick any editors that have f open that
389 # hack to trick any editors that have f open that
471 # we haven't modified them.
390 # we haven't modified them.
472 #
391 #
473 # Also note that this racy as an editor could
392 # Also note that this racy as an editor could
474 # notice the file's mtime before we've finished
393 # notice the file's mtime before we've finished
475 # writing it.
394 # writing it.
476 shutil.copystat(tmpname, repo.wjoin(realname))
395 shutil.copystat(tmpname, repo.wjoin(realname))
477 os.unlink(tmpname)
396 os.unlink(tmpname)
478 if tobackup:
397 if tobackup:
479 os.rmdir(backupdir)
398 os.rmdir(backupdir)
480 except OSError:
399 except OSError:
481 pass
400 pass
482
401
483 # wrap ui.write so diff output can be labeled/colorized
402 # wrap ui.write so diff output can be labeled/colorized
484 def wrapwrite(orig, *args, **kw):
403 def wrapwrite(orig, *args, **kw):
485 label = kw.pop('label', '')
404 label = kw.pop('label', '')
486 for chunk, l in patch.difflabel(lambda: args):
405 for chunk, l in patch.difflabel(lambda: args):
487 orig(chunk, label=label + l)
406 orig(chunk, label=label + l)
488 oldwrite = ui.write
407 oldwrite = ui.write
489 extensions.wrapfunction(ui, 'write', wrapwrite)
408 extensions.wrapfunction(ui, 'write', wrapwrite)
490 try:
409 try:
491 return cmdutil.commit(ui, repo, recordfunc, pats, opts)
410 return cmdutil.commit(ui, repo, recordfunc, pats, opts)
492 finally:
411 finally:
493 ui.write = oldwrite
412 ui.write = oldwrite
494
413
495 def uisetup(ui):
414 def uisetup(ui):
496 try:
415 try:
497 mq = extensions.find('mq')
416 mq = extensions.find('mq')
498 except KeyError:
417 except KeyError:
499 return
418 return
500
419
501 cmdtable["qrecord"] = \
420 cmdtable["qrecord"] = \
502 (qrecord,
421 (qrecord,
503 # same options as qnew, but copy them so we don't get
422 # same options as qnew, but copy them so we don't get
504 # -i/--interactive for qrecord and add white space diff options
423 # -i/--interactive for qrecord and add white space diff options
505 mq.cmdtable['^qnew'][1][:] + commands.diffwsopts,
424 mq.cmdtable['^qnew'][1][:] + commands.diffwsopts,
506 _('hg qrecord [OPTION]... PATCH [FILE]...'))
425 _('hg qrecord [OPTION]... PATCH [FILE]...'))
507
426
508 _wrapcmd('qnew', mq.cmdtable, qnew, _("interactively record a new patch"))
427 _wrapcmd('qnew', mq.cmdtable, qnew, _("interactively record a new patch"))
509 _wrapcmd('qrefresh', mq.cmdtable, qrefresh,
428 _wrapcmd('qrefresh', mq.cmdtable, qrefresh,
510 _("interactively select changes to refresh"))
429 _("interactively select changes to refresh"))
511
430
512 def _wrapcmd(cmd, table, wrapfn, msg):
431 def _wrapcmd(cmd, table, wrapfn, msg):
513 entry = extensions.wrapcommand(table, cmd, wrapfn)
432 entry = extensions.wrapcommand(table, cmd, wrapfn)
514 entry[1].append(('i', 'interactive', None, msg))
433 entry[1].append(('i', 'interactive', None, msg))
@@ -1,2146 +1,2226 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import cStringIO, email, os, errno, re, posixpath
9 import cStringIO, email, os, errno, re, posixpath
10 import tempfile, zlib, shutil
10 import tempfile, zlib, shutil
11 # On python2.4 you have to import these by name or they fail to
11 # On python2.4 you have to import these by name or they fail to
12 # load. This was not a problem on Python 2.7.
12 # load. This was not a problem on Python 2.7.
13 import email.Generator
13 import email.Generator
14 import email.Parser
14 import email.Parser
15
15
16 from i18n import _
16 from i18n import _
17 from node import hex, short
17 from node import hex, short
18 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
18 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
19
19
20 gitre = re.compile('diff --git a/(.*) b/(.*)')
20 gitre = re.compile('diff --git a/(.*) b/(.*)')
21 tabsplitter = re.compile(r'(\t+|[^\t]+)')
21 tabsplitter = re.compile(r'(\t+|[^\t]+)')
22
22
23 class PatchError(Exception):
23 class PatchError(Exception):
24 pass
24 pass
25
25
26
26
27 # public functions
27 # public functions
28
28
29 def split(stream):
29 def split(stream):
30 '''return an iterator of individual patches from a stream'''
30 '''return an iterator of individual patches from a stream'''
31 def isheader(line, inheader):
31 def isheader(line, inheader):
32 if inheader and line[0] in (' ', '\t'):
32 if inheader and line[0] in (' ', '\t'):
33 # continuation
33 # continuation
34 return True
34 return True
35 if line[0] in (' ', '-', '+'):
35 if line[0] in (' ', '-', '+'):
36 # diff line - don't check for header pattern in there
36 # diff line - don't check for header pattern in there
37 return False
37 return False
38 l = line.split(': ', 1)
38 l = line.split(': ', 1)
39 return len(l) == 2 and ' ' not in l[0]
39 return len(l) == 2 and ' ' not in l[0]
40
40
41 def chunk(lines):
41 def chunk(lines):
42 return cStringIO.StringIO(''.join(lines))
42 return cStringIO.StringIO(''.join(lines))
43
43
44 def hgsplit(stream, cur):
44 def hgsplit(stream, cur):
45 inheader = True
45 inheader = True
46
46
47 for line in stream:
47 for line in stream:
48 if not line.strip():
48 if not line.strip():
49 inheader = False
49 inheader = False
50 if not inheader and line.startswith('# HG changeset patch'):
50 if not inheader and line.startswith('# HG changeset patch'):
51 yield chunk(cur)
51 yield chunk(cur)
52 cur = []
52 cur = []
53 inheader = True
53 inheader = True
54
54
55 cur.append(line)
55 cur.append(line)
56
56
57 if cur:
57 if cur:
58 yield chunk(cur)
58 yield chunk(cur)
59
59
60 def mboxsplit(stream, cur):
60 def mboxsplit(stream, cur):
61 for line in stream:
61 for line in stream:
62 if line.startswith('From '):
62 if line.startswith('From '):
63 for c in split(chunk(cur[1:])):
63 for c in split(chunk(cur[1:])):
64 yield c
64 yield c
65 cur = []
65 cur = []
66
66
67 cur.append(line)
67 cur.append(line)
68
68
69 if cur:
69 if cur:
70 for c in split(chunk(cur[1:])):
70 for c in split(chunk(cur[1:])):
71 yield c
71 yield c
72
72
73 def mimesplit(stream, cur):
73 def mimesplit(stream, cur):
74 def msgfp(m):
74 def msgfp(m):
75 fp = cStringIO.StringIO()
75 fp = cStringIO.StringIO()
76 g = email.Generator.Generator(fp, mangle_from_=False)
76 g = email.Generator.Generator(fp, mangle_from_=False)
77 g.flatten(m)
77 g.flatten(m)
78 fp.seek(0)
78 fp.seek(0)
79 return fp
79 return fp
80
80
81 for line in stream:
81 for line in stream:
82 cur.append(line)
82 cur.append(line)
83 c = chunk(cur)
83 c = chunk(cur)
84
84
85 m = email.Parser.Parser().parse(c)
85 m = email.Parser.Parser().parse(c)
86 if not m.is_multipart():
86 if not m.is_multipart():
87 yield msgfp(m)
87 yield msgfp(m)
88 else:
88 else:
89 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
89 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
90 for part in m.walk():
90 for part in m.walk():
91 ct = part.get_content_type()
91 ct = part.get_content_type()
92 if ct not in ok_types:
92 if ct not in ok_types:
93 continue
93 continue
94 yield msgfp(part)
94 yield msgfp(part)
95
95
96 def headersplit(stream, cur):
96 def headersplit(stream, cur):
97 inheader = False
97 inheader = False
98
98
99 for line in stream:
99 for line in stream:
100 if not inheader and isheader(line, inheader):
100 if not inheader and isheader(line, inheader):
101 yield chunk(cur)
101 yield chunk(cur)
102 cur = []
102 cur = []
103 inheader = True
103 inheader = True
104 if inheader and not isheader(line, inheader):
104 if inheader and not isheader(line, inheader):
105 inheader = False
105 inheader = False
106
106
107 cur.append(line)
107 cur.append(line)
108
108
109 if cur:
109 if cur:
110 yield chunk(cur)
110 yield chunk(cur)
111
111
112 def remainder(cur):
112 def remainder(cur):
113 yield chunk(cur)
113 yield chunk(cur)
114
114
115 class fiter(object):
115 class fiter(object):
116 def __init__(self, fp):
116 def __init__(self, fp):
117 self.fp = fp
117 self.fp = fp
118
118
119 def __iter__(self):
119 def __iter__(self):
120 return self
120 return self
121
121
122 def next(self):
122 def next(self):
123 l = self.fp.readline()
123 l = self.fp.readline()
124 if not l:
124 if not l:
125 raise StopIteration
125 raise StopIteration
126 return l
126 return l
127
127
128 inheader = False
128 inheader = False
129 cur = []
129 cur = []
130
130
131 mimeheaders = ['content-type']
131 mimeheaders = ['content-type']
132
132
133 if not util.safehasattr(stream, 'next'):
133 if not util.safehasattr(stream, 'next'):
134 # http responses, for example, have readline but not next
134 # http responses, for example, have readline but not next
135 stream = fiter(stream)
135 stream = fiter(stream)
136
136
137 for line in stream:
137 for line in stream:
138 cur.append(line)
138 cur.append(line)
139 if line.startswith('# HG changeset patch'):
139 if line.startswith('# HG changeset patch'):
140 return hgsplit(stream, cur)
140 return hgsplit(stream, cur)
141 elif line.startswith('From '):
141 elif line.startswith('From '):
142 return mboxsplit(stream, cur)
142 return mboxsplit(stream, cur)
143 elif isheader(line, inheader):
143 elif isheader(line, inheader):
144 inheader = True
144 inheader = True
145 if line.split(':', 1)[0].lower() in mimeheaders:
145 if line.split(':', 1)[0].lower() in mimeheaders:
146 # let email parser handle this
146 # let email parser handle this
147 return mimesplit(stream, cur)
147 return mimesplit(stream, cur)
148 elif line.startswith('--- ') and inheader:
148 elif line.startswith('--- ') and inheader:
149 # No evil headers seen by diff start, split by hand
149 # No evil headers seen by diff start, split by hand
150 return headersplit(stream, cur)
150 return headersplit(stream, cur)
151 # Not enough info, keep reading
151 # Not enough info, keep reading
152
152
153 # if we are here, we have a very plain patch
153 # if we are here, we have a very plain patch
154 return remainder(cur)
154 return remainder(cur)
155
155
156 def extract(ui, fileobj):
156 def extract(ui, fileobj):
157 '''extract patch from data read from fileobj.
157 '''extract patch from data read from fileobj.
158
158
159 patch can be a normal patch or contained in an email message.
159 patch can be a normal patch or contained in an email message.
160
160
161 return tuple (filename, message, user, date, branch, node, p1, p2).
161 return tuple (filename, message, user, date, branch, node, p1, p2).
162 Any item in the returned tuple can be None. If filename is None,
162 Any item in the returned tuple can be None. If filename is None,
163 fileobj did not contain a patch. Caller must unlink filename when done.'''
163 fileobj did not contain a patch. Caller must unlink filename when done.'''
164
164
165 # attempt to detect the start of a patch
165 # attempt to detect the start of a patch
166 # (this heuristic is borrowed from quilt)
166 # (this heuristic is borrowed from quilt)
167 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
167 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
168 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
168 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
169 r'---[ \t].*?^\+\+\+[ \t]|'
169 r'---[ \t].*?^\+\+\+[ \t]|'
170 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
170 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
171
171
172 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
172 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
173 tmpfp = os.fdopen(fd, 'w')
173 tmpfp = os.fdopen(fd, 'w')
174 try:
174 try:
175 msg = email.Parser.Parser().parse(fileobj)
175 msg = email.Parser.Parser().parse(fileobj)
176
176
177 subject = msg['Subject']
177 subject = msg['Subject']
178 user = msg['From']
178 user = msg['From']
179 if not subject and not user:
179 if not subject and not user:
180 # Not an email, restore parsed headers if any
180 # Not an email, restore parsed headers if any
181 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
181 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
182
182
183 # should try to parse msg['Date']
183 # should try to parse msg['Date']
184 date = None
184 date = None
185 nodeid = None
185 nodeid = None
186 branch = None
186 branch = None
187 parents = []
187 parents = []
188
188
189 if subject:
189 if subject:
190 if subject.startswith('[PATCH'):
190 if subject.startswith('[PATCH'):
191 pend = subject.find(']')
191 pend = subject.find(']')
192 if pend >= 0:
192 if pend >= 0:
193 subject = subject[pend + 1:].lstrip()
193 subject = subject[pend + 1:].lstrip()
194 subject = re.sub(r'\n[ \t]+', ' ', subject)
194 subject = re.sub(r'\n[ \t]+', ' ', subject)
195 ui.debug('Subject: %s\n' % subject)
195 ui.debug('Subject: %s\n' % subject)
196 if user:
196 if user:
197 ui.debug('From: %s\n' % user)
197 ui.debug('From: %s\n' % user)
198 diffs_seen = 0
198 diffs_seen = 0
199 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
199 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
200 message = ''
200 message = ''
201 for part in msg.walk():
201 for part in msg.walk():
202 content_type = part.get_content_type()
202 content_type = part.get_content_type()
203 ui.debug('Content-Type: %s\n' % content_type)
203 ui.debug('Content-Type: %s\n' % content_type)
204 if content_type not in ok_types:
204 if content_type not in ok_types:
205 continue
205 continue
206 payload = part.get_payload(decode=True)
206 payload = part.get_payload(decode=True)
207 m = diffre.search(payload)
207 m = diffre.search(payload)
208 if m:
208 if m:
209 hgpatch = False
209 hgpatch = False
210 hgpatchheader = False
210 hgpatchheader = False
211 ignoretext = False
211 ignoretext = False
212
212
213 ui.debug('found patch at byte %d\n' % m.start(0))
213 ui.debug('found patch at byte %d\n' % m.start(0))
214 diffs_seen += 1
214 diffs_seen += 1
215 cfp = cStringIO.StringIO()
215 cfp = cStringIO.StringIO()
216 for line in payload[:m.start(0)].splitlines():
216 for line in payload[:m.start(0)].splitlines():
217 if line.startswith('# HG changeset patch') and not hgpatch:
217 if line.startswith('# HG changeset patch') and not hgpatch:
218 ui.debug('patch generated by hg export\n')
218 ui.debug('patch generated by hg export\n')
219 hgpatch = True
219 hgpatch = True
220 hgpatchheader = True
220 hgpatchheader = True
221 # drop earlier commit message content
221 # drop earlier commit message content
222 cfp.seek(0)
222 cfp.seek(0)
223 cfp.truncate()
223 cfp.truncate()
224 subject = None
224 subject = None
225 elif hgpatchheader:
225 elif hgpatchheader:
226 if line.startswith('# User '):
226 if line.startswith('# User '):
227 user = line[7:]
227 user = line[7:]
228 ui.debug('From: %s\n' % user)
228 ui.debug('From: %s\n' % user)
229 elif line.startswith("# Date "):
229 elif line.startswith("# Date "):
230 date = line[7:]
230 date = line[7:]
231 elif line.startswith("# Branch "):
231 elif line.startswith("# Branch "):
232 branch = line[9:]
232 branch = line[9:]
233 elif line.startswith("# Node ID "):
233 elif line.startswith("# Node ID "):
234 nodeid = line[10:]
234 nodeid = line[10:]
235 elif line.startswith("# Parent "):
235 elif line.startswith("# Parent "):
236 parents.append(line[9:].lstrip())
236 parents.append(line[9:].lstrip())
237 elif not line.startswith("# "):
237 elif not line.startswith("# "):
238 hgpatchheader = False
238 hgpatchheader = False
239 elif line == '---':
239 elif line == '---':
240 ignoretext = True
240 ignoretext = True
241 if not hgpatchheader and not ignoretext:
241 if not hgpatchheader and not ignoretext:
242 cfp.write(line)
242 cfp.write(line)
243 cfp.write('\n')
243 cfp.write('\n')
244 message = cfp.getvalue()
244 message = cfp.getvalue()
245 if tmpfp:
245 if tmpfp:
246 tmpfp.write(payload)
246 tmpfp.write(payload)
247 if not payload.endswith('\n'):
247 if not payload.endswith('\n'):
248 tmpfp.write('\n')
248 tmpfp.write('\n')
249 elif not diffs_seen and message and content_type == 'text/plain':
249 elif not diffs_seen and message and content_type == 'text/plain':
250 message += '\n' + payload
250 message += '\n' + payload
251 except: # re-raises
251 except: # re-raises
252 tmpfp.close()
252 tmpfp.close()
253 os.unlink(tmpname)
253 os.unlink(tmpname)
254 raise
254 raise
255
255
256 if subject and not message.startswith(subject):
256 if subject and not message.startswith(subject):
257 message = '%s\n%s' % (subject, message)
257 message = '%s\n%s' % (subject, message)
258 tmpfp.close()
258 tmpfp.close()
259 if not diffs_seen:
259 if not diffs_seen:
260 os.unlink(tmpname)
260 os.unlink(tmpname)
261 return None, message, user, date, branch, None, None, None
261 return None, message, user, date, branch, None, None, None
262 p1 = parents and parents.pop(0) or None
262 p1 = parents and parents.pop(0) or None
263 p2 = parents and parents.pop(0) or None
263 p2 = parents and parents.pop(0) or None
264 return tmpname, message, user, date, branch, nodeid, p1, p2
264 return tmpname, message, user, date, branch, nodeid, p1, p2
265
265
266 class patchmeta(object):
266 class patchmeta(object):
267 """Patched file metadata
267 """Patched file metadata
268
268
269 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
269 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
270 or COPY. 'path' is patched file path. 'oldpath' is set to the
270 or COPY. 'path' is patched file path. 'oldpath' is set to the
271 origin file when 'op' is either COPY or RENAME, None otherwise. If
271 origin file when 'op' is either COPY or RENAME, None otherwise. If
272 file mode is changed, 'mode' is a tuple (islink, isexec) where
272 file mode is changed, 'mode' is a tuple (islink, isexec) where
273 'islink' is True if the file is a symlink and 'isexec' is True if
273 'islink' is True if the file is a symlink and 'isexec' is True if
274 the file is executable. Otherwise, 'mode' is None.
274 the file is executable. Otherwise, 'mode' is None.
275 """
275 """
276 def __init__(self, path):
276 def __init__(self, path):
277 self.path = path
277 self.path = path
278 self.oldpath = None
278 self.oldpath = None
279 self.mode = None
279 self.mode = None
280 self.op = 'MODIFY'
280 self.op = 'MODIFY'
281 self.binary = False
281 self.binary = False
282
282
283 def setmode(self, mode):
283 def setmode(self, mode):
284 islink = mode & 020000
284 islink = mode & 020000
285 isexec = mode & 0100
285 isexec = mode & 0100
286 self.mode = (islink, isexec)
286 self.mode = (islink, isexec)
287
287
288 def copy(self):
288 def copy(self):
289 other = patchmeta(self.path)
289 other = patchmeta(self.path)
290 other.oldpath = self.oldpath
290 other.oldpath = self.oldpath
291 other.mode = self.mode
291 other.mode = self.mode
292 other.op = self.op
292 other.op = self.op
293 other.binary = self.binary
293 other.binary = self.binary
294 return other
294 return other
295
295
296 def _ispatchinga(self, afile):
296 def _ispatchinga(self, afile):
297 if afile == '/dev/null':
297 if afile == '/dev/null':
298 return self.op == 'ADD'
298 return self.op == 'ADD'
299 return afile == 'a/' + (self.oldpath or self.path)
299 return afile == 'a/' + (self.oldpath or self.path)
300
300
301 def _ispatchingb(self, bfile):
301 def _ispatchingb(self, bfile):
302 if bfile == '/dev/null':
302 if bfile == '/dev/null':
303 return self.op == 'DELETE'
303 return self.op == 'DELETE'
304 return bfile == 'b/' + self.path
304 return bfile == 'b/' + self.path
305
305
306 def ispatching(self, afile, bfile):
306 def ispatching(self, afile, bfile):
307 return self._ispatchinga(afile) and self._ispatchingb(bfile)
307 return self._ispatchinga(afile) and self._ispatchingb(bfile)
308
308
309 def __repr__(self):
309 def __repr__(self):
310 return "<patchmeta %s %r>" % (self.op, self.path)
310 return "<patchmeta %s %r>" % (self.op, self.path)
311
311
312 def readgitpatch(lr):
312 def readgitpatch(lr):
313 """extract git-style metadata about patches from <patchname>"""
313 """extract git-style metadata about patches from <patchname>"""
314
314
315 # Filter patch for git information
315 # Filter patch for git information
316 gp = None
316 gp = None
317 gitpatches = []
317 gitpatches = []
318 for line in lr:
318 for line in lr:
319 line = line.rstrip(' \r\n')
319 line = line.rstrip(' \r\n')
320 if line.startswith('diff --git a/'):
320 if line.startswith('diff --git a/'):
321 m = gitre.match(line)
321 m = gitre.match(line)
322 if m:
322 if m:
323 if gp:
323 if gp:
324 gitpatches.append(gp)
324 gitpatches.append(gp)
325 dst = m.group(2)
325 dst = m.group(2)
326 gp = patchmeta(dst)
326 gp = patchmeta(dst)
327 elif gp:
327 elif gp:
328 if line.startswith('--- '):
328 if line.startswith('--- '):
329 gitpatches.append(gp)
329 gitpatches.append(gp)
330 gp = None
330 gp = None
331 continue
331 continue
332 if line.startswith('rename from '):
332 if line.startswith('rename from '):
333 gp.op = 'RENAME'
333 gp.op = 'RENAME'
334 gp.oldpath = line[12:]
334 gp.oldpath = line[12:]
335 elif line.startswith('rename to '):
335 elif line.startswith('rename to '):
336 gp.path = line[10:]
336 gp.path = line[10:]
337 elif line.startswith('copy from '):
337 elif line.startswith('copy from '):
338 gp.op = 'COPY'
338 gp.op = 'COPY'
339 gp.oldpath = line[10:]
339 gp.oldpath = line[10:]
340 elif line.startswith('copy to '):
340 elif line.startswith('copy to '):
341 gp.path = line[8:]
341 gp.path = line[8:]
342 elif line.startswith('deleted file'):
342 elif line.startswith('deleted file'):
343 gp.op = 'DELETE'
343 gp.op = 'DELETE'
344 elif line.startswith('new file mode '):
344 elif line.startswith('new file mode '):
345 gp.op = 'ADD'
345 gp.op = 'ADD'
346 gp.setmode(int(line[-6:], 8))
346 gp.setmode(int(line[-6:], 8))
347 elif line.startswith('new mode '):
347 elif line.startswith('new mode '):
348 gp.setmode(int(line[-6:], 8))
348 gp.setmode(int(line[-6:], 8))
349 elif line.startswith('GIT binary patch'):
349 elif line.startswith('GIT binary patch'):
350 gp.binary = True
350 gp.binary = True
351 if gp:
351 if gp:
352 gitpatches.append(gp)
352 gitpatches.append(gp)
353
353
354 return gitpatches
354 return gitpatches
355
355
356 class linereader(object):
356 class linereader(object):
357 # simple class to allow pushing lines back into the input stream
357 # simple class to allow pushing lines back into the input stream
358 def __init__(self, fp):
358 def __init__(self, fp):
359 self.fp = fp
359 self.fp = fp
360 self.buf = []
360 self.buf = []
361
361
362 def push(self, line):
362 def push(self, line):
363 if line is not None:
363 if line is not None:
364 self.buf.append(line)
364 self.buf.append(line)
365
365
366 def readline(self):
366 def readline(self):
367 if self.buf:
367 if self.buf:
368 l = self.buf[0]
368 l = self.buf[0]
369 del self.buf[0]
369 del self.buf[0]
370 return l
370 return l
371 return self.fp.readline()
371 return self.fp.readline()
372
372
373 def __iter__(self):
373 def __iter__(self):
374 while True:
374 while True:
375 l = self.readline()
375 l = self.readline()
376 if not l:
376 if not l:
377 break
377 break
378 yield l
378 yield l
379
379
380 class abstractbackend(object):
380 class abstractbackend(object):
381 def __init__(self, ui):
381 def __init__(self, ui):
382 self.ui = ui
382 self.ui = ui
383
383
384 def getfile(self, fname):
384 def getfile(self, fname):
385 """Return target file data and flags as a (data, (islink,
385 """Return target file data and flags as a (data, (islink,
386 isexec)) tuple. Data is None if file is missing/deleted.
386 isexec)) tuple. Data is None if file is missing/deleted.
387 """
387 """
388 raise NotImplementedError
388 raise NotImplementedError
389
389
390 def setfile(self, fname, data, mode, copysource):
390 def setfile(self, fname, data, mode, copysource):
391 """Write data to target file fname and set its mode. mode is a
391 """Write data to target file fname and set its mode. mode is a
392 (islink, isexec) tuple. If data is None, the file content should
392 (islink, isexec) tuple. If data is None, the file content should
393 be left unchanged. If the file is modified after being copied,
393 be left unchanged. If the file is modified after being copied,
394 copysource is set to the original file name.
394 copysource is set to the original file name.
395 """
395 """
396 raise NotImplementedError
396 raise NotImplementedError
397
397
398 def unlink(self, fname):
398 def unlink(self, fname):
399 """Unlink target file."""
399 """Unlink target file."""
400 raise NotImplementedError
400 raise NotImplementedError
401
401
402 def writerej(self, fname, failed, total, lines):
402 def writerej(self, fname, failed, total, lines):
403 """Write rejected lines for fname. total is the number of hunks
403 """Write rejected lines for fname. total is the number of hunks
404 which failed to apply and total the total number of hunks for this
404 which failed to apply and total the total number of hunks for this
405 files.
405 files.
406 """
406 """
407 pass
407 pass
408
408
409 def exists(self, fname):
409 def exists(self, fname):
410 raise NotImplementedError
410 raise NotImplementedError
411
411
412 class fsbackend(abstractbackend):
412 class fsbackend(abstractbackend):
413 def __init__(self, ui, basedir):
413 def __init__(self, ui, basedir):
414 super(fsbackend, self).__init__(ui)
414 super(fsbackend, self).__init__(ui)
415 self.opener = scmutil.opener(basedir)
415 self.opener = scmutil.opener(basedir)
416
416
417 def _join(self, f):
417 def _join(self, f):
418 return os.path.join(self.opener.base, f)
418 return os.path.join(self.opener.base, f)
419
419
420 def getfile(self, fname):
420 def getfile(self, fname):
421 if self.opener.islink(fname):
421 if self.opener.islink(fname):
422 return (self.opener.readlink(fname), (True, False))
422 return (self.opener.readlink(fname), (True, False))
423
423
424 isexec = False
424 isexec = False
425 try:
425 try:
426 isexec = self.opener.lstat(fname).st_mode & 0100 != 0
426 isexec = self.opener.lstat(fname).st_mode & 0100 != 0
427 except OSError, e:
427 except OSError, e:
428 if e.errno != errno.ENOENT:
428 if e.errno != errno.ENOENT:
429 raise
429 raise
430 try:
430 try:
431 return (self.opener.read(fname), (False, isexec))
431 return (self.opener.read(fname), (False, isexec))
432 except IOError, e:
432 except IOError, e:
433 if e.errno != errno.ENOENT:
433 if e.errno != errno.ENOENT:
434 raise
434 raise
435 return None, None
435 return None, None
436
436
437 def setfile(self, fname, data, mode, copysource):
437 def setfile(self, fname, data, mode, copysource):
438 islink, isexec = mode
438 islink, isexec = mode
439 if data is None:
439 if data is None:
440 self.opener.setflags(fname, islink, isexec)
440 self.opener.setflags(fname, islink, isexec)
441 return
441 return
442 if islink:
442 if islink:
443 self.opener.symlink(data, fname)
443 self.opener.symlink(data, fname)
444 else:
444 else:
445 self.opener.write(fname, data)
445 self.opener.write(fname, data)
446 if isexec:
446 if isexec:
447 self.opener.setflags(fname, False, True)
447 self.opener.setflags(fname, False, True)
448
448
449 def unlink(self, fname):
449 def unlink(self, fname):
450 self.opener.unlinkpath(fname, ignoremissing=True)
450 self.opener.unlinkpath(fname, ignoremissing=True)
451
451
452 def writerej(self, fname, failed, total, lines):
452 def writerej(self, fname, failed, total, lines):
453 fname = fname + ".rej"
453 fname = fname + ".rej"
454 self.ui.warn(
454 self.ui.warn(
455 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
455 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
456 (failed, total, fname))
456 (failed, total, fname))
457 fp = self.opener(fname, 'w')
457 fp = self.opener(fname, 'w')
458 fp.writelines(lines)
458 fp.writelines(lines)
459 fp.close()
459 fp.close()
460
460
461 def exists(self, fname):
461 def exists(self, fname):
462 return self.opener.lexists(fname)
462 return self.opener.lexists(fname)
463
463
464 class workingbackend(fsbackend):
464 class workingbackend(fsbackend):
465 def __init__(self, ui, repo, similarity):
465 def __init__(self, ui, repo, similarity):
466 super(workingbackend, self).__init__(ui, repo.root)
466 super(workingbackend, self).__init__(ui, repo.root)
467 self.repo = repo
467 self.repo = repo
468 self.similarity = similarity
468 self.similarity = similarity
469 self.removed = set()
469 self.removed = set()
470 self.changed = set()
470 self.changed = set()
471 self.copied = []
471 self.copied = []
472
472
473 def _checkknown(self, fname):
473 def _checkknown(self, fname):
474 if self.repo.dirstate[fname] == '?' and self.exists(fname):
474 if self.repo.dirstate[fname] == '?' and self.exists(fname):
475 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
475 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
476
476
477 def setfile(self, fname, data, mode, copysource):
477 def setfile(self, fname, data, mode, copysource):
478 self._checkknown(fname)
478 self._checkknown(fname)
479 super(workingbackend, self).setfile(fname, data, mode, copysource)
479 super(workingbackend, self).setfile(fname, data, mode, copysource)
480 if copysource is not None:
480 if copysource is not None:
481 self.copied.append((copysource, fname))
481 self.copied.append((copysource, fname))
482 self.changed.add(fname)
482 self.changed.add(fname)
483
483
484 def unlink(self, fname):
484 def unlink(self, fname):
485 self._checkknown(fname)
485 self._checkknown(fname)
486 super(workingbackend, self).unlink(fname)
486 super(workingbackend, self).unlink(fname)
487 self.removed.add(fname)
487 self.removed.add(fname)
488 self.changed.add(fname)
488 self.changed.add(fname)
489
489
490 def close(self):
490 def close(self):
491 wctx = self.repo[None]
491 wctx = self.repo[None]
492 changed = set(self.changed)
492 changed = set(self.changed)
493 for src, dst in self.copied:
493 for src, dst in self.copied:
494 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
494 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
495 if self.removed:
495 if self.removed:
496 wctx.forget(sorted(self.removed))
496 wctx.forget(sorted(self.removed))
497 for f in self.removed:
497 for f in self.removed:
498 if f not in self.repo.dirstate:
498 if f not in self.repo.dirstate:
499 # File was deleted and no longer belongs to the
499 # File was deleted and no longer belongs to the
500 # dirstate, it was probably marked added then
500 # dirstate, it was probably marked added then
501 # deleted, and should not be considered by
501 # deleted, and should not be considered by
502 # marktouched().
502 # marktouched().
503 changed.discard(f)
503 changed.discard(f)
504 if changed:
504 if changed:
505 scmutil.marktouched(self.repo, changed, self.similarity)
505 scmutil.marktouched(self.repo, changed, self.similarity)
506 return sorted(self.changed)
506 return sorted(self.changed)
507
507
508 class filestore(object):
508 class filestore(object):
509 def __init__(self, maxsize=None):
509 def __init__(self, maxsize=None):
510 self.opener = None
510 self.opener = None
511 self.files = {}
511 self.files = {}
512 self.created = 0
512 self.created = 0
513 self.maxsize = maxsize
513 self.maxsize = maxsize
514 if self.maxsize is None:
514 if self.maxsize is None:
515 self.maxsize = 4*(2**20)
515 self.maxsize = 4*(2**20)
516 self.size = 0
516 self.size = 0
517 self.data = {}
517 self.data = {}
518
518
519 def setfile(self, fname, data, mode, copied=None):
519 def setfile(self, fname, data, mode, copied=None):
520 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
520 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
521 self.data[fname] = (data, mode, copied)
521 self.data[fname] = (data, mode, copied)
522 self.size += len(data)
522 self.size += len(data)
523 else:
523 else:
524 if self.opener is None:
524 if self.opener is None:
525 root = tempfile.mkdtemp(prefix='hg-patch-')
525 root = tempfile.mkdtemp(prefix='hg-patch-')
526 self.opener = scmutil.opener(root)
526 self.opener = scmutil.opener(root)
527 # Avoid filename issues with these simple names
527 # Avoid filename issues with these simple names
528 fn = str(self.created)
528 fn = str(self.created)
529 self.opener.write(fn, data)
529 self.opener.write(fn, data)
530 self.created += 1
530 self.created += 1
531 self.files[fname] = (fn, mode, copied)
531 self.files[fname] = (fn, mode, copied)
532
532
533 def getfile(self, fname):
533 def getfile(self, fname):
534 if fname in self.data:
534 if fname in self.data:
535 return self.data[fname]
535 return self.data[fname]
536 if not self.opener or fname not in self.files:
536 if not self.opener or fname not in self.files:
537 return None, None, None
537 return None, None, None
538 fn, mode, copied = self.files[fname]
538 fn, mode, copied = self.files[fname]
539 return self.opener.read(fn), mode, copied
539 return self.opener.read(fn), mode, copied
540
540
541 def close(self):
541 def close(self):
542 if self.opener:
542 if self.opener:
543 shutil.rmtree(self.opener.base)
543 shutil.rmtree(self.opener.base)
544
544
545 class repobackend(abstractbackend):
545 class repobackend(abstractbackend):
546 def __init__(self, ui, repo, ctx, store):
546 def __init__(self, ui, repo, ctx, store):
547 super(repobackend, self).__init__(ui)
547 super(repobackend, self).__init__(ui)
548 self.repo = repo
548 self.repo = repo
549 self.ctx = ctx
549 self.ctx = ctx
550 self.store = store
550 self.store = store
551 self.changed = set()
551 self.changed = set()
552 self.removed = set()
552 self.removed = set()
553 self.copied = {}
553 self.copied = {}
554
554
555 def _checkknown(self, fname):
555 def _checkknown(self, fname):
556 if fname not in self.ctx:
556 if fname not in self.ctx:
557 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
557 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
558
558
559 def getfile(self, fname):
559 def getfile(self, fname):
560 try:
560 try:
561 fctx = self.ctx[fname]
561 fctx = self.ctx[fname]
562 except error.LookupError:
562 except error.LookupError:
563 return None, None
563 return None, None
564 flags = fctx.flags()
564 flags = fctx.flags()
565 return fctx.data(), ('l' in flags, 'x' in flags)
565 return fctx.data(), ('l' in flags, 'x' in flags)
566
566
567 def setfile(self, fname, data, mode, copysource):
567 def setfile(self, fname, data, mode, copysource):
568 if copysource:
568 if copysource:
569 self._checkknown(copysource)
569 self._checkknown(copysource)
570 if data is None:
570 if data is None:
571 data = self.ctx[fname].data()
571 data = self.ctx[fname].data()
572 self.store.setfile(fname, data, mode, copysource)
572 self.store.setfile(fname, data, mode, copysource)
573 self.changed.add(fname)
573 self.changed.add(fname)
574 if copysource:
574 if copysource:
575 self.copied[fname] = copysource
575 self.copied[fname] = copysource
576
576
577 def unlink(self, fname):
577 def unlink(self, fname):
578 self._checkknown(fname)
578 self._checkknown(fname)
579 self.removed.add(fname)
579 self.removed.add(fname)
580
580
581 def exists(self, fname):
581 def exists(self, fname):
582 return fname in self.ctx
582 return fname in self.ctx
583
583
584 def close(self):
584 def close(self):
585 return self.changed | self.removed
585 return self.changed | self.removed
586
586
587 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
587 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
588 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
588 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
589 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
589 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
590 eolmodes = ['strict', 'crlf', 'lf', 'auto']
590 eolmodes = ['strict', 'crlf', 'lf', 'auto']
591
591
592 class patchfile(object):
592 class patchfile(object):
593 def __init__(self, ui, gp, backend, store, eolmode='strict'):
593 def __init__(self, ui, gp, backend, store, eolmode='strict'):
594 self.fname = gp.path
594 self.fname = gp.path
595 self.eolmode = eolmode
595 self.eolmode = eolmode
596 self.eol = None
596 self.eol = None
597 self.backend = backend
597 self.backend = backend
598 self.ui = ui
598 self.ui = ui
599 self.lines = []
599 self.lines = []
600 self.exists = False
600 self.exists = False
601 self.missing = True
601 self.missing = True
602 self.mode = gp.mode
602 self.mode = gp.mode
603 self.copysource = gp.oldpath
603 self.copysource = gp.oldpath
604 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
604 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
605 self.remove = gp.op == 'DELETE'
605 self.remove = gp.op == 'DELETE'
606 if self.copysource is None:
606 if self.copysource is None:
607 data, mode = backend.getfile(self.fname)
607 data, mode = backend.getfile(self.fname)
608 else:
608 else:
609 data, mode = store.getfile(self.copysource)[:2]
609 data, mode = store.getfile(self.copysource)[:2]
610 if data is not None:
610 if data is not None:
611 self.exists = self.copysource is None or backend.exists(self.fname)
611 self.exists = self.copysource is None or backend.exists(self.fname)
612 self.missing = False
612 self.missing = False
613 if data:
613 if data:
614 self.lines = mdiff.splitnewlines(data)
614 self.lines = mdiff.splitnewlines(data)
615 if self.mode is None:
615 if self.mode is None:
616 self.mode = mode
616 self.mode = mode
617 if self.lines:
617 if self.lines:
618 # Normalize line endings
618 # Normalize line endings
619 if self.lines[0].endswith('\r\n'):
619 if self.lines[0].endswith('\r\n'):
620 self.eol = '\r\n'
620 self.eol = '\r\n'
621 elif self.lines[0].endswith('\n'):
621 elif self.lines[0].endswith('\n'):
622 self.eol = '\n'
622 self.eol = '\n'
623 if eolmode != 'strict':
623 if eolmode != 'strict':
624 nlines = []
624 nlines = []
625 for l in self.lines:
625 for l in self.lines:
626 if l.endswith('\r\n'):
626 if l.endswith('\r\n'):
627 l = l[:-2] + '\n'
627 l = l[:-2] + '\n'
628 nlines.append(l)
628 nlines.append(l)
629 self.lines = nlines
629 self.lines = nlines
630 else:
630 else:
631 if self.create:
631 if self.create:
632 self.missing = False
632 self.missing = False
633 if self.mode is None:
633 if self.mode is None:
634 self.mode = (False, False)
634 self.mode = (False, False)
635 if self.missing:
635 if self.missing:
636 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
636 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
637
637
638 self.hash = {}
638 self.hash = {}
639 self.dirty = 0
639 self.dirty = 0
640 self.offset = 0
640 self.offset = 0
641 self.skew = 0
641 self.skew = 0
642 self.rej = []
642 self.rej = []
643 self.fileprinted = False
643 self.fileprinted = False
644 self.printfile(False)
644 self.printfile(False)
645 self.hunks = 0
645 self.hunks = 0
646
646
647 def writelines(self, fname, lines, mode):
647 def writelines(self, fname, lines, mode):
648 if self.eolmode == 'auto':
648 if self.eolmode == 'auto':
649 eol = self.eol
649 eol = self.eol
650 elif self.eolmode == 'crlf':
650 elif self.eolmode == 'crlf':
651 eol = '\r\n'
651 eol = '\r\n'
652 else:
652 else:
653 eol = '\n'
653 eol = '\n'
654
654
655 if self.eolmode != 'strict' and eol and eol != '\n':
655 if self.eolmode != 'strict' and eol and eol != '\n':
656 rawlines = []
656 rawlines = []
657 for l in lines:
657 for l in lines:
658 if l and l[-1] == '\n':
658 if l and l[-1] == '\n':
659 l = l[:-1] + eol
659 l = l[:-1] + eol
660 rawlines.append(l)
660 rawlines.append(l)
661 lines = rawlines
661 lines = rawlines
662
662
663 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
663 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
664
664
665 def printfile(self, warn):
665 def printfile(self, warn):
666 if self.fileprinted:
666 if self.fileprinted:
667 return
667 return
668 if warn or self.ui.verbose:
668 if warn or self.ui.verbose:
669 self.fileprinted = True
669 self.fileprinted = True
670 s = _("patching file %s\n") % self.fname
670 s = _("patching file %s\n") % self.fname
671 if warn:
671 if warn:
672 self.ui.warn(s)
672 self.ui.warn(s)
673 else:
673 else:
674 self.ui.note(s)
674 self.ui.note(s)
675
675
676
676
677 def findlines(self, l, linenum):
677 def findlines(self, l, linenum):
678 # looks through the hash and finds candidate lines. The
678 # looks through the hash and finds candidate lines. The
679 # result is a list of line numbers sorted based on distance
679 # result is a list of line numbers sorted based on distance
680 # from linenum
680 # from linenum
681
681
682 cand = self.hash.get(l, [])
682 cand = self.hash.get(l, [])
683 if len(cand) > 1:
683 if len(cand) > 1:
684 # resort our list of potentials forward then back.
684 # resort our list of potentials forward then back.
685 cand.sort(key=lambda x: abs(x - linenum))
685 cand.sort(key=lambda x: abs(x - linenum))
686 return cand
686 return cand
687
687
688 def write_rej(self):
688 def write_rej(self):
689 # our rejects are a little different from patch(1). This always
689 # our rejects are a little different from patch(1). This always
690 # creates rejects in the same form as the original patch. A file
690 # creates rejects in the same form as the original patch. A file
691 # header is inserted so that you can run the reject through patch again
691 # header is inserted so that you can run the reject through patch again
692 # without having to type the filename.
692 # without having to type the filename.
693 if not self.rej:
693 if not self.rej:
694 return
694 return
695 base = os.path.basename(self.fname)
695 base = os.path.basename(self.fname)
696 lines = ["--- %s\n+++ %s\n" % (base, base)]
696 lines = ["--- %s\n+++ %s\n" % (base, base)]
697 for x in self.rej:
697 for x in self.rej:
698 for l in x.hunk:
698 for l in x.hunk:
699 lines.append(l)
699 lines.append(l)
700 if l[-1] != '\n':
700 if l[-1] != '\n':
701 lines.append("\n\ No newline at end of file\n")
701 lines.append("\n\ No newline at end of file\n")
702 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
702 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
703
703
704 def apply(self, h):
704 def apply(self, h):
705 if not h.complete():
705 if not h.complete():
706 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
706 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
707 (h.number, h.desc, len(h.a), h.lena, len(h.b),
707 (h.number, h.desc, len(h.a), h.lena, len(h.b),
708 h.lenb))
708 h.lenb))
709
709
710 self.hunks += 1
710 self.hunks += 1
711
711
712 if self.missing:
712 if self.missing:
713 self.rej.append(h)
713 self.rej.append(h)
714 return -1
714 return -1
715
715
716 if self.exists and self.create:
716 if self.exists and self.create:
717 if self.copysource:
717 if self.copysource:
718 self.ui.warn(_("cannot create %s: destination already "
718 self.ui.warn(_("cannot create %s: destination already "
719 "exists\n") % self.fname)
719 "exists\n") % self.fname)
720 else:
720 else:
721 self.ui.warn(_("file %s already exists\n") % self.fname)
721 self.ui.warn(_("file %s already exists\n") % self.fname)
722 self.rej.append(h)
722 self.rej.append(h)
723 return -1
723 return -1
724
724
725 if isinstance(h, binhunk):
725 if isinstance(h, binhunk):
726 if self.remove:
726 if self.remove:
727 self.backend.unlink(self.fname)
727 self.backend.unlink(self.fname)
728 else:
728 else:
729 l = h.new(self.lines)
729 l = h.new(self.lines)
730 self.lines[:] = l
730 self.lines[:] = l
731 self.offset += len(l)
731 self.offset += len(l)
732 self.dirty = True
732 self.dirty = True
733 return 0
733 return 0
734
734
735 horig = h
735 horig = h
736 if (self.eolmode in ('crlf', 'lf')
736 if (self.eolmode in ('crlf', 'lf')
737 or self.eolmode == 'auto' and self.eol):
737 or self.eolmode == 'auto' and self.eol):
738 # If new eols are going to be normalized, then normalize
738 # If new eols are going to be normalized, then normalize
739 # hunk data before patching. Otherwise, preserve input
739 # hunk data before patching. Otherwise, preserve input
740 # line-endings.
740 # line-endings.
741 h = h.getnormalized()
741 h = h.getnormalized()
742
742
743 # fast case first, no offsets, no fuzz
743 # fast case first, no offsets, no fuzz
744 old, oldstart, new, newstart = h.fuzzit(0, False)
744 old, oldstart, new, newstart = h.fuzzit(0, False)
745 oldstart += self.offset
745 oldstart += self.offset
746 orig_start = oldstart
746 orig_start = oldstart
747 # if there's skew we want to emit the "(offset %d lines)" even
747 # if there's skew we want to emit the "(offset %d lines)" even
748 # when the hunk cleanly applies at start + skew, so skip the
748 # when the hunk cleanly applies at start + skew, so skip the
749 # fast case code
749 # fast case code
750 if (self.skew == 0 and
750 if (self.skew == 0 and
751 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
751 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
752 if self.remove:
752 if self.remove:
753 self.backend.unlink(self.fname)
753 self.backend.unlink(self.fname)
754 else:
754 else:
755 self.lines[oldstart:oldstart + len(old)] = new
755 self.lines[oldstart:oldstart + len(old)] = new
756 self.offset += len(new) - len(old)
756 self.offset += len(new) - len(old)
757 self.dirty = True
757 self.dirty = True
758 return 0
758 return 0
759
759
760 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
760 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
761 self.hash = {}
761 self.hash = {}
762 for x, s in enumerate(self.lines):
762 for x, s in enumerate(self.lines):
763 self.hash.setdefault(s, []).append(x)
763 self.hash.setdefault(s, []).append(x)
764
764
765 for fuzzlen in xrange(3):
765 for fuzzlen in xrange(3):
766 for toponly in [True, False]:
766 for toponly in [True, False]:
767 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
767 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
768 oldstart = oldstart + self.offset + self.skew
768 oldstart = oldstart + self.offset + self.skew
769 oldstart = min(oldstart, len(self.lines))
769 oldstart = min(oldstart, len(self.lines))
770 if old:
770 if old:
771 cand = self.findlines(old[0][1:], oldstart)
771 cand = self.findlines(old[0][1:], oldstart)
772 else:
772 else:
773 # Only adding lines with no or fuzzed context, just
773 # Only adding lines with no or fuzzed context, just
774 # take the skew in account
774 # take the skew in account
775 cand = [oldstart]
775 cand = [oldstart]
776
776
777 for l in cand:
777 for l in cand:
778 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
778 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
779 self.lines[l : l + len(old)] = new
779 self.lines[l : l + len(old)] = new
780 self.offset += len(new) - len(old)
780 self.offset += len(new) - len(old)
781 self.skew = l - orig_start
781 self.skew = l - orig_start
782 self.dirty = True
782 self.dirty = True
783 offset = l - orig_start - fuzzlen
783 offset = l - orig_start - fuzzlen
784 if fuzzlen:
784 if fuzzlen:
785 msg = _("Hunk #%d succeeded at %d "
785 msg = _("Hunk #%d succeeded at %d "
786 "with fuzz %d "
786 "with fuzz %d "
787 "(offset %d lines).\n")
787 "(offset %d lines).\n")
788 self.printfile(True)
788 self.printfile(True)
789 self.ui.warn(msg %
789 self.ui.warn(msg %
790 (h.number, l + 1, fuzzlen, offset))
790 (h.number, l + 1, fuzzlen, offset))
791 else:
791 else:
792 msg = _("Hunk #%d succeeded at %d "
792 msg = _("Hunk #%d succeeded at %d "
793 "(offset %d lines).\n")
793 "(offset %d lines).\n")
794 self.ui.note(msg % (h.number, l + 1, offset))
794 self.ui.note(msg % (h.number, l + 1, offset))
795 return fuzzlen
795 return fuzzlen
796 self.printfile(True)
796 self.printfile(True)
797 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
797 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
798 self.rej.append(horig)
798 self.rej.append(horig)
799 return -1
799 return -1
800
800
801 def close(self):
801 def close(self):
802 if self.dirty:
802 if self.dirty:
803 self.writelines(self.fname, self.lines, self.mode)
803 self.writelines(self.fname, self.lines, self.mode)
804 self.write_rej()
804 self.write_rej()
805 return len(self.rej)
805 return len(self.rej)
806
806
807 class header(object):
807 class header(object):
808 """patch header
808 """patch header
809 """
809 """
810 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
810 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
811 diff_re = re.compile('diff -r .* (.*)$')
811 diff_re = re.compile('diff -r .* (.*)$')
812 allhunks_re = re.compile('(?:index|deleted file) ')
812 allhunks_re = re.compile('(?:index|deleted file) ')
813 pretty_re = re.compile('(?:new file|deleted file) ')
813 pretty_re = re.compile('(?:new file|deleted file) ')
814 special_re = re.compile('(?:index|new|deleted|copy|rename) ')
814 special_re = re.compile('(?:index|new|deleted|copy|rename) ')
815
815
816 def __init__(self, header):
816 def __init__(self, header):
817 self.header = header
817 self.header = header
818 self.hunks = []
818 self.hunks = []
819
819
820 def binary(self):
820 def binary(self):
821 return util.any(h.startswith('index ') for h in self.header)
821 return util.any(h.startswith('index ') for h in self.header)
822
822
823 def pretty(self, fp):
823 def pretty(self, fp):
824 for h in self.header:
824 for h in self.header:
825 if h.startswith('index '):
825 if h.startswith('index '):
826 fp.write(_('this modifies a binary file (all or nothing)\n'))
826 fp.write(_('this modifies a binary file (all or nothing)\n'))
827 break
827 break
828 if self.pretty_re.match(h):
828 if self.pretty_re.match(h):
829 fp.write(h)
829 fp.write(h)
830 if self.binary():
830 if self.binary():
831 fp.write(_('this is a binary file\n'))
831 fp.write(_('this is a binary file\n'))
832 break
832 break
833 if h.startswith('---'):
833 if h.startswith('---'):
834 fp.write(_('%d hunks, %d lines changed\n') %
834 fp.write(_('%d hunks, %d lines changed\n') %
835 (len(self.hunks),
835 (len(self.hunks),
836 sum([max(h.added, h.removed) for h in self.hunks])))
836 sum([max(h.added, h.removed) for h in self.hunks])))
837 break
837 break
838 fp.write(h)
838 fp.write(h)
839
839
840 def write(self, fp):
840 def write(self, fp):
841 fp.write(''.join(self.header))
841 fp.write(''.join(self.header))
842
842
843 def allhunks(self):
843 def allhunks(self):
844 return util.any(self.allhunks_re.match(h) for h in self.header)
844 return util.any(self.allhunks_re.match(h) for h in self.header)
845
845
846 def files(self):
846 def files(self):
847 match = self.diffgit_re.match(self.header[0])
847 match = self.diffgit_re.match(self.header[0])
848 if match:
848 if match:
849 fromfile, tofile = match.groups()
849 fromfile, tofile = match.groups()
850 if fromfile == tofile:
850 if fromfile == tofile:
851 return [fromfile]
851 return [fromfile]
852 return [fromfile, tofile]
852 return [fromfile, tofile]
853 else:
853 else:
854 return self.diff_re.match(self.header[0]).groups()
854 return self.diff_re.match(self.header[0]).groups()
855
855
856 def filename(self):
856 def filename(self):
857 return self.files()[-1]
857 return self.files()[-1]
858
858
859 def __repr__(self):
859 def __repr__(self):
860 return '<header %s>' % (' '.join(map(repr, self.files())))
860 return '<header %s>' % (' '.join(map(repr, self.files())))
861
861
862 def special(self):
862 def special(self):
863 return util.any(self.special_re.match(h) for h in self.header)
863 return util.any(self.special_re.match(h) for h in self.header)
864
864
865 class recordhunk(object):
865 class recordhunk(object):
866 """patch hunk
866 """patch hunk
867
867
868 XXX shouldn't we merge this with the other hunk class?
868 XXX shouldn't we merge this with the other hunk class?
869 """
869 """
870 maxcontext = 3
870 maxcontext = 3
871
871
872 def __init__(self, header, fromline, toline, proc, before, hunk, after):
872 def __init__(self, header, fromline, toline, proc, before, hunk, after):
873 def trimcontext(number, lines):
873 def trimcontext(number, lines):
874 delta = len(lines) - self.maxcontext
874 delta = len(lines) - self.maxcontext
875 if False and delta > 0:
875 if False and delta > 0:
876 return number + delta, lines[:self.maxcontext]
876 return number + delta, lines[:self.maxcontext]
877 return number, lines
877 return number, lines
878
878
879 self.header = header
879 self.header = header
880 self.fromline, self.before = trimcontext(fromline, before)
880 self.fromline, self.before = trimcontext(fromline, before)
881 self.toline, self.after = trimcontext(toline, after)
881 self.toline, self.after = trimcontext(toline, after)
882 self.proc = proc
882 self.proc = proc
883 self.hunk = hunk
883 self.hunk = hunk
884 self.added, self.removed = self.countchanges(self.hunk)
884 self.added, self.removed = self.countchanges(self.hunk)
885
885
886 def countchanges(self, hunk):
886 def countchanges(self, hunk):
887 """hunk -> (n+,n-)"""
887 """hunk -> (n+,n-)"""
888 add = len([h for h in hunk if h[0] == '+'])
888 add = len([h for h in hunk if h[0] == '+'])
889 rem = len([h for h in hunk if h[0] == '-'])
889 rem = len([h for h in hunk if h[0] == '-'])
890 return add, rem
890 return add, rem
891
891
892 def write(self, fp):
892 def write(self, fp):
893 delta = len(self.before) + len(self.after)
893 delta = len(self.before) + len(self.after)
894 if self.after and self.after[-1] == '\\ No newline at end of file\n':
894 if self.after and self.after[-1] == '\\ No newline at end of file\n':
895 delta -= 1
895 delta -= 1
896 fromlen = delta + self.removed
896 fromlen = delta + self.removed
897 tolen = delta + self.added
897 tolen = delta + self.added
898 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
898 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
899 (self.fromline, fromlen, self.toline, tolen,
899 (self.fromline, fromlen, self.toline, tolen,
900 self.proc and (' ' + self.proc)))
900 self.proc and (' ' + self.proc)))
901 fp.write(''.join(self.before + self.hunk + self.after))
901 fp.write(''.join(self.before + self.hunk + self.after))
902
902
903 pretty = write
903 pretty = write
904
904
905 def filename(self):
905 def filename(self):
906 return self.header.filename()
906 return self.header.filename()
907
907
908 def __repr__(self):
908 def __repr__(self):
909 return '<hunk %r@%d>' % (self.filename(), self.fromline)
909 return '<hunk %r@%d>' % (self.filename(), self.fromline)
910
910
911 class hunk(object):
911 class hunk(object):
912 def __init__(self, desc, num, lr, context):
912 def __init__(self, desc, num, lr, context):
913 self.number = num
913 self.number = num
914 self.desc = desc
914 self.desc = desc
915 self.hunk = [desc]
915 self.hunk = [desc]
916 self.a = []
916 self.a = []
917 self.b = []
917 self.b = []
918 self.starta = self.lena = None
918 self.starta = self.lena = None
919 self.startb = self.lenb = None
919 self.startb = self.lenb = None
920 if lr is not None:
920 if lr is not None:
921 if context:
921 if context:
922 self.read_context_hunk(lr)
922 self.read_context_hunk(lr)
923 else:
923 else:
924 self.read_unified_hunk(lr)
924 self.read_unified_hunk(lr)
925
925
926 def getnormalized(self):
926 def getnormalized(self):
927 """Return a copy with line endings normalized to LF."""
927 """Return a copy with line endings normalized to LF."""
928
928
929 def normalize(lines):
929 def normalize(lines):
930 nlines = []
930 nlines = []
931 for line in lines:
931 for line in lines:
932 if line.endswith('\r\n'):
932 if line.endswith('\r\n'):
933 line = line[:-2] + '\n'
933 line = line[:-2] + '\n'
934 nlines.append(line)
934 nlines.append(line)
935 return nlines
935 return nlines
936
936
937 # Dummy object, it is rebuilt manually
937 # Dummy object, it is rebuilt manually
938 nh = hunk(self.desc, self.number, None, None)
938 nh = hunk(self.desc, self.number, None, None)
939 nh.number = self.number
939 nh.number = self.number
940 nh.desc = self.desc
940 nh.desc = self.desc
941 nh.hunk = self.hunk
941 nh.hunk = self.hunk
942 nh.a = normalize(self.a)
942 nh.a = normalize(self.a)
943 nh.b = normalize(self.b)
943 nh.b = normalize(self.b)
944 nh.starta = self.starta
944 nh.starta = self.starta
945 nh.startb = self.startb
945 nh.startb = self.startb
946 nh.lena = self.lena
946 nh.lena = self.lena
947 nh.lenb = self.lenb
947 nh.lenb = self.lenb
948 return nh
948 return nh
949
949
950 def read_unified_hunk(self, lr):
950 def read_unified_hunk(self, lr):
951 m = unidesc.match(self.desc)
951 m = unidesc.match(self.desc)
952 if not m:
952 if not m:
953 raise PatchError(_("bad hunk #%d") % self.number)
953 raise PatchError(_("bad hunk #%d") % self.number)
954 self.starta, self.lena, self.startb, self.lenb = m.groups()
954 self.starta, self.lena, self.startb, self.lenb = m.groups()
955 if self.lena is None:
955 if self.lena is None:
956 self.lena = 1
956 self.lena = 1
957 else:
957 else:
958 self.lena = int(self.lena)
958 self.lena = int(self.lena)
959 if self.lenb is None:
959 if self.lenb is None:
960 self.lenb = 1
960 self.lenb = 1
961 else:
961 else:
962 self.lenb = int(self.lenb)
962 self.lenb = int(self.lenb)
963 self.starta = int(self.starta)
963 self.starta = int(self.starta)
964 self.startb = int(self.startb)
964 self.startb = int(self.startb)
965 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
965 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
966 self.b)
966 self.b)
967 # if we hit eof before finishing out the hunk, the last line will
967 # if we hit eof before finishing out the hunk, the last line will
968 # be zero length. Lets try to fix it up.
968 # be zero length. Lets try to fix it up.
969 while len(self.hunk[-1]) == 0:
969 while len(self.hunk[-1]) == 0:
970 del self.hunk[-1]
970 del self.hunk[-1]
971 del self.a[-1]
971 del self.a[-1]
972 del self.b[-1]
972 del self.b[-1]
973 self.lena -= 1
973 self.lena -= 1
974 self.lenb -= 1
974 self.lenb -= 1
975 self._fixnewline(lr)
975 self._fixnewline(lr)
976
976
977 def read_context_hunk(self, lr):
977 def read_context_hunk(self, lr):
978 self.desc = lr.readline()
978 self.desc = lr.readline()
979 m = contextdesc.match(self.desc)
979 m = contextdesc.match(self.desc)
980 if not m:
980 if not m:
981 raise PatchError(_("bad hunk #%d") % self.number)
981 raise PatchError(_("bad hunk #%d") % self.number)
982 self.starta, aend = m.groups()
982 self.starta, aend = m.groups()
983 self.starta = int(self.starta)
983 self.starta = int(self.starta)
984 if aend is None:
984 if aend is None:
985 aend = self.starta
985 aend = self.starta
986 self.lena = int(aend) - self.starta
986 self.lena = int(aend) - self.starta
987 if self.starta:
987 if self.starta:
988 self.lena += 1
988 self.lena += 1
989 for x in xrange(self.lena):
989 for x in xrange(self.lena):
990 l = lr.readline()
990 l = lr.readline()
991 if l.startswith('---'):
991 if l.startswith('---'):
992 # lines addition, old block is empty
992 # lines addition, old block is empty
993 lr.push(l)
993 lr.push(l)
994 break
994 break
995 s = l[2:]
995 s = l[2:]
996 if l.startswith('- ') or l.startswith('! '):
996 if l.startswith('- ') or l.startswith('! '):
997 u = '-' + s
997 u = '-' + s
998 elif l.startswith(' '):
998 elif l.startswith(' '):
999 u = ' ' + s
999 u = ' ' + s
1000 else:
1000 else:
1001 raise PatchError(_("bad hunk #%d old text line %d") %
1001 raise PatchError(_("bad hunk #%d old text line %d") %
1002 (self.number, x))
1002 (self.number, x))
1003 self.a.append(u)
1003 self.a.append(u)
1004 self.hunk.append(u)
1004 self.hunk.append(u)
1005
1005
1006 l = lr.readline()
1006 l = lr.readline()
1007 if l.startswith('\ '):
1007 if l.startswith('\ '):
1008 s = self.a[-1][:-1]
1008 s = self.a[-1][:-1]
1009 self.a[-1] = s
1009 self.a[-1] = s
1010 self.hunk[-1] = s
1010 self.hunk[-1] = s
1011 l = lr.readline()
1011 l = lr.readline()
1012 m = contextdesc.match(l)
1012 m = contextdesc.match(l)
1013 if not m:
1013 if not m:
1014 raise PatchError(_("bad hunk #%d") % self.number)
1014 raise PatchError(_("bad hunk #%d") % self.number)
1015 self.startb, bend = m.groups()
1015 self.startb, bend = m.groups()
1016 self.startb = int(self.startb)
1016 self.startb = int(self.startb)
1017 if bend is None:
1017 if bend is None:
1018 bend = self.startb
1018 bend = self.startb
1019 self.lenb = int(bend) - self.startb
1019 self.lenb = int(bend) - self.startb
1020 if self.startb:
1020 if self.startb:
1021 self.lenb += 1
1021 self.lenb += 1
1022 hunki = 1
1022 hunki = 1
1023 for x in xrange(self.lenb):
1023 for x in xrange(self.lenb):
1024 l = lr.readline()
1024 l = lr.readline()
1025 if l.startswith('\ '):
1025 if l.startswith('\ '):
1026 # XXX: the only way to hit this is with an invalid line range.
1026 # XXX: the only way to hit this is with an invalid line range.
1027 # The no-eol marker is not counted in the line range, but I
1027 # The no-eol marker is not counted in the line range, but I
1028 # guess there are diff(1) out there which behave differently.
1028 # guess there are diff(1) out there which behave differently.
1029 s = self.b[-1][:-1]
1029 s = self.b[-1][:-1]
1030 self.b[-1] = s
1030 self.b[-1] = s
1031 self.hunk[hunki - 1] = s
1031 self.hunk[hunki - 1] = s
1032 continue
1032 continue
1033 if not l:
1033 if not l:
1034 # line deletions, new block is empty and we hit EOF
1034 # line deletions, new block is empty and we hit EOF
1035 lr.push(l)
1035 lr.push(l)
1036 break
1036 break
1037 s = l[2:]
1037 s = l[2:]
1038 if l.startswith('+ ') or l.startswith('! '):
1038 if l.startswith('+ ') or l.startswith('! '):
1039 u = '+' + s
1039 u = '+' + s
1040 elif l.startswith(' '):
1040 elif l.startswith(' '):
1041 u = ' ' + s
1041 u = ' ' + s
1042 elif len(self.b) == 0:
1042 elif len(self.b) == 0:
1043 # line deletions, new block is empty
1043 # line deletions, new block is empty
1044 lr.push(l)
1044 lr.push(l)
1045 break
1045 break
1046 else:
1046 else:
1047 raise PatchError(_("bad hunk #%d old text line %d") %
1047 raise PatchError(_("bad hunk #%d old text line %d") %
1048 (self.number, x))
1048 (self.number, x))
1049 self.b.append(s)
1049 self.b.append(s)
1050 while True:
1050 while True:
1051 if hunki >= len(self.hunk):
1051 if hunki >= len(self.hunk):
1052 h = ""
1052 h = ""
1053 else:
1053 else:
1054 h = self.hunk[hunki]
1054 h = self.hunk[hunki]
1055 hunki += 1
1055 hunki += 1
1056 if h == u:
1056 if h == u:
1057 break
1057 break
1058 elif h.startswith('-'):
1058 elif h.startswith('-'):
1059 continue
1059 continue
1060 else:
1060 else:
1061 self.hunk.insert(hunki - 1, u)
1061 self.hunk.insert(hunki - 1, u)
1062 break
1062 break
1063
1063
1064 if not self.a:
1064 if not self.a:
1065 # this happens when lines were only added to the hunk
1065 # this happens when lines were only added to the hunk
1066 for x in self.hunk:
1066 for x in self.hunk:
1067 if x.startswith('-') or x.startswith(' '):
1067 if x.startswith('-') or x.startswith(' '):
1068 self.a.append(x)
1068 self.a.append(x)
1069 if not self.b:
1069 if not self.b:
1070 # this happens when lines were only deleted from the hunk
1070 # this happens when lines were only deleted from the hunk
1071 for x in self.hunk:
1071 for x in self.hunk:
1072 if x.startswith('+') or x.startswith(' '):
1072 if x.startswith('+') or x.startswith(' '):
1073 self.b.append(x[1:])
1073 self.b.append(x[1:])
1074 # @@ -start,len +start,len @@
1074 # @@ -start,len +start,len @@
1075 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1075 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1076 self.startb, self.lenb)
1076 self.startb, self.lenb)
1077 self.hunk[0] = self.desc
1077 self.hunk[0] = self.desc
1078 self._fixnewline(lr)
1078 self._fixnewline(lr)
1079
1079
1080 def _fixnewline(self, lr):
1080 def _fixnewline(self, lr):
1081 l = lr.readline()
1081 l = lr.readline()
1082 if l.startswith('\ '):
1082 if l.startswith('\ '):
1083 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1083 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1084 else:
1084 else:
1085 lr.push(l)
1085 lr.push(l)
1086
1086
1087 def complete(self):
1087 def complete(self):
1088 return len(self.a) == self.lena and len(self.b) == self.lenb
1088 return len(self.a) == self.lena and len(self.b) == self.lenb
1089
1089
1090 def _fuzzit(self, old, new, fuzz, toponly):
1090 def _fuzzit(self, old, new, fuzz, toponly):
1091 # this removes context lines from the top and bottom of list 'l'. It
1091 # this removes context lines from the top and bottom of list 'l'. It
1092 # checks the hunk to make sure only context lines are removed, and then
1092 # checks the hunk to make sure only context lines are removed, and then
1093 # returns a new shortened list of lines.
1093 # returns a new shortened list of lines.
1094 fuzz = min(fuzz, len(old))
1094 fuzz = min(fuzz, len(old))
1095 if fuzz:
1095 if fuzz:
1096 top = 0
1096 top = 0
1097 bot = 0
1097 bot = 0
1098 hlen = len(self.hunk)
1098 hlen = len(self.hunk)
1099 for x in xrange(hlen - 1):
1099 for x in xrange(hlen - 1):
1100 # the hunk starts with the @@ line, so use x+1
1100 # the hunk starts with the @@ line, so use x+1
1101 if self.hunk[x + 1][0] == ' ':
1101 if self.hunk[x + 1][0] == ' ':
1102 top += 1
1102 top += 1
1103 else:
1103 else:
1104 break
1104 break
1105 if not toponly:
1105 if not toponly:
1106 for x in xrange(hlen - 1):
1106 for x in xrange(hlen - 1):
1107 if self.hunk[hlen - bot - 1][0] == ' ':
1107 if self.hunk[hlen - bot - 1][0] == ' ':
1108 bot += 1
1108 bot += 1
1109 else:
1109 else:
1110 break
1110 break
1111
1111
1112 bot = min(fuzz, bot)
1112 bot = min(fuzz, bot)
1113 top = min(fuzz, top)
1113 top = min(fuzz, top)
1114 return old[top:len(old) - bot], new[top:len(new) - bot], top
1114 return old[top:len(old) - bot], new[top:len(new) - bot], top
1115 return old, new, 0
1115 return old, new, 0
1116
1116
1117 def fuzzit(self, fuzz, toponly):
1117 def fuzzit(self, fuzz, toponly):
1118 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1118 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1119 oldstart = self.starta + top
1119 oldstart = self.starta + top
1120 newstart = self.startb + top
1120 newstart = self.startb + top
1121 # zero length hunk ranges already have their start decremented
1121 # zero length hunk ranges already have their start decremented
1122 if self.lena and oldstart > 0:
1122 if self.lena and oldstart > 0:
1123 oldstart -= 1
1123 oldstart -= 1
1124 if self.lenb and newstart > 0:
1124 if self.lenb and newstart > 0:
1125 newstart -= 1
1125 newstart -= 1
1126 return old, oldstart, new, newstart
1126 return old, oldstart, new, newstart
1127
1127
1128 class binhunk(object):
1128 class binhunk(object):
1129 'A binary patch file.'
1129 'A binary patch file.'
1130 def __init__(self, lr, fname):
1130 def __init__(self, lr, fname):
1131 self.text = None
1131 self.text = None
1132 self.delta = False
1132 self.delta = False
1133 self.hunk = ['GIT binary patch\n']
1133 self.hunk = ['GIT binary patch\n']
1134 self._fname = fname
1134 self._fname = fname
1135 self._read(lr)
1135 self._read(lr)
1136
1136
1137 def complete(self):
1137 def complete(self):
1138 return self.text is not None
1138 return self.text is not None
1139
1139
1140 def new(self, lines):
1140 def new(self, lines):
1141 if self.delta:
1141 if self.delta:
1142 return [applybindelta(self.text, ''.join(lines))]
1142 return [applybindelta(self.text, ''.join(lines))]
1143 return [self.text]
1143 return [self.text]
1144
1144
1145 def _read(self, lr):
1145 def _read(self, lr):
1146 def getline(lr, hunk):
1146 def getline(lr, hunk):
1147 l = lr.readline()
1147 l = lr.readline()
1148 hunk.append(l)
1148 hunk.append(l)
1149 return l.rstrip('\r\n')
1149 return l.rstrip('\r\n')
1150
1150
1151 size = 0
1151 size = 0
1152 while True:
1152 while True:
1153 line = getline(lr, self.hunk)
1153 line = getline(lr, self.hunk)
1154 if not line:
1154 if not line:
1155 raise PatchError(_('could not extract "%s" binary data')
1155 raise PatchError(_('could not extract "%s" binary data')
1156 % self._fname)
1156 % self._fname)
1157 if line.startswith('literal '):
1157 if line.startswith('literal '):
1158 size = int(line[8:].rstrip())
1158 size = int(line[8:].rstrip())
1159 break
1159 break
1160 if line.startswith('delta '):
1160 if line.startswith('delta '):
1161 size = int(line[6:].rstrip())
1161 size = int(line[6:].rstrip())
1162 self.delta = True
1162 self.delta = True
1163 break
1163 break
1164 dec = []
1164 dec = []
1165 line = getline(lr, self.hunk)
1165 line = getline(lr, self.hunk)
1166 while len(line) > 1:
1166 while len(line) > 1:
1167 l = line[0]
1167 l = line[0]
1168 if l <= 'Z' and l >= 'A':
1168 if l <= 'Z' and l >= 'A':
1169 l = ord(l) - ord('A') + 1
1169 l = ord(l) - ord('A') + 1
1170 else:
1170 else:
1171 l = ord(l) - ord('a') + 27
1171 l = ord(l) - ord('a') + 27
1172 try:
1172 try:
1173 dec.append(base85.b85decode(line[1:])[:l])
1173 dec.append(base85.b85decode(line[1:])[:l])
1174 except ValueError, e:
1174 except ValueError, e:
1175 raise PatchError(_('could not decode "%s" binary patch: %s')
1175 raise PatchError(_('could not decode "%s" binary patch: %s')
1176 % (self._fname, str(e)))
1176 % (self._fname, str(e)))
1177 line = getline(lr, self.hunk)
1177 line = getline(lr, self.hunk)
1178 text = zlib.decompress(''.join(dec))
1178 text = zlib.decompress(''.join(dec))
1179 if len(text) != size:
1179 if len(text) != size:
1180 raise PatchError(_('"%s" length is %d bytes, should be %d')
1180 raise PatchError(_('"%s" length is %d bytes, should be %d')
1181 % (self._fname, len(text), size))
1181 % (self._fname, len(text), size))
1182 self.text = text
1182 self.text = text
1183
1183
1184 def parsefilename(str):
1184 def parsefilename(str):
1185 # --- filename \t|space stuff
1185 # --- filename \t|space stuff
1186 s = str[4:].rstrip('\r\n')
1186 s = str[4:].rstrip('\r\n')
1187 i = s.find('\t')
1187 i = s.find('\t')
1188 if i < 0:
1188 if i < 0:
1189 i = s.find(' ')
1189 i = s.find(' ')
1190 if i < 0:
1190 if i < 0:
1191 return s
1191 return s
1192 return s[:i]
1192 return s[:i]
1193
1193
1194 def parsepatch(fp):
1195 """patch -> [] of headers -> [] of hunks """
1196 class parser(object):
1197 """patch parsing state machine"""
1198 def __init__(self):
1199 self.fromline = 0
1200 self.toline = 0
1201 self.proc = ''
1202 self.header = None
1203 self.context = []
1204 self.before = []
1205 self.hunk = []
1206 self.headers = []
1207
1208 def addrange(self, limits):
1209 fromstart, fromend, tostart, toend, proc = limits
1210 self.fromline = int(fromstart)
1211 self.toline = int(tostart)
1212 self.proc = proc
1213
1214 def addcontext(self, context):
1215 if self.hunk:
1216 h = recordhunk(self.header, self.fromline, self.toline,
1217 self.proc, self.before, self.hunk, context)
1218 self.header.hunks.append(h)
1219 self.fromline += len(self.before) + h.removed
1220 self.toline += len(self.before) + h.added
1221 self.before = []
1222 self.hunk = []
1223 self.proc = ''
1224 self.context = context
1225
1226 def addhunk(self, hunk):
1227 if self.context:
1228 self.before = self.context
1229 self.context = []
1230 self.hunk = hunk
1231
1232 def newfile(self, hdr):
1233 self.addcontext([])
1234 h = header(hdr)
1235 self.headers.append(h)
1236 self.header = h
1237
1238 def addother(self, line):
1239 pass # 'other' lines are ignored
1240
1241 def finished(self):
1242 self.addcontext([])
1243 return self.headers
1244
1245 transitions = {
1246 'file': {'context': addcontext,
1247 'file': newfile,
1248 'hunk': addhunk,
1249 'range': addrange},
1250 'context': {'file': newfile,
1251 'hunk': addhunk,
1252 'range': addrange,
1253 'other': addother},
1254 'hunk': {'context': addcontext,
1255 'file': newfile,
1256 'range': addrange},
1257 'range': {'context': addcontext,
1258 'hunk': addhunk},
1259 'other': {'other': addother},
1260 }
1261
1262 p = parser()
1263
1264 state = 'context'
1265 for newstate, data in scanpatch(fp):
1266 try:
1267 p.transitions[state][newstate](p, data)
1268 except KeyError:
1269 raise PatchError('unhandled transition: %s -> %s' %
1270 (state, newstate))
1271 state = newstate
1272 return p.finished()
1273
1194 def pathtransform(path, strip, prefix):
1274 def pathtransform(path, strip, prefix):
1195 '''turn a path from a patch into a path suitable for the repository
1275 '''turn a path from a patch into a path suitable for the repository
1196
1276
1197 prefix, if not empty, is expected to be normalized with a / at the end.
1277 prefix, if not empty, is expected to be normalized with a / at the end.
1198
1278
1199 Returns (stripped components, path in repository).
1279 Returns (stripped components, path in repository).
1200
1280
1201 >>> pathtransform('a/b/c', 0, '')
1281 >>> pathtransform('a/b/c', 0, '')
1202 ('', 'a/b/c')
1282 ('', 'a/b/c')
1203 >>> pathtransform(' a/b/c ', 0, '')
1283 >>> pathtransform(' a/b/c ', 0, '')
1204 ('', ' a/b/c')
1284 ('', ' a/b/c')
1205 >>> pathtransform(' a/b/c ', 2, '')
1285 >>> pathtransform(' a/b/c ', 2, '')
1206 ('a/b/', 'c')
1286 ('a/b/', 'c')
1207 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1287 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1208 ('a//b/', 'd/e/c')
1288 ('a//b/', 'd/e/c')
1209 >>> pathtransform('a/b/c', 3, '')
1289 >>> pathtransform('a/b/c', 3, '')
1210 Traceback (most recent call last):
1290 Traceback (most recent call last):
1211 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1291 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1212 '''
1292 '''
1213 pathlen = len(path)
1293 pathlen = len(path)
1214 i = 0
1294 i = 0
1215 if strip == 0:
1295 if strip == 0:
1216 return '', path.rstrip()
1296 return '', path.rstrip()
1217 count = strip
1297 count = strip
1218 while count > 0:
1298 while count > 0:
1219 i = path.find('/', i)
1299 i = path.find('/', i)
1220 if i == -1:
1300 if i == -1:
1221 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1301 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1222 (count, strip, path))
1302 (count, strip, path))
1223 i += 1
1303 i += 1
1224 # consume '//' in the path
1304 # consume '//' in the path
1225 while i < pathlen - 1 and path[i] == '/':
1305 while i < pathlen - 1 and path[i] == '/':
1226 i += 1
1306 i += 1
1227 count -= 1
1307 count -= 1
1228 return path[:i].lstrip(), prefix + path[i:].rstrip()
1308 return path[:i].lstrip(), prefix + path[i:].rstrip()
1229
1309
1230 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1310 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1231 nulla = afile_orig == "/dev/null"
1311 nulla = afile_orig == "/dev/null"
1232 nullb = bfile_orig == "/dev/null"
1312 nullb = bfile_orig == "/dev/null"
1233 create = nulla and hunk.starta == 0 and hunk.lena == 0
1313 create = nulla and hunk.starta == 0 and hunk.lena == 0
1234 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1314 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1235 abase, afile = pathtransform(afile_orig, strip, prefix)
1315 abase, afile = pathtransform(afile_orig, strip, prefix)
1236 gooda = not nulla and backend.exists(afile)
1316 gooda = not nulla and backend.exists(afile)
1237 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1317 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1238 if afile == bfile:
1318 if afile == bfile:
1239 goodb = gooda
1319 goodb = gooda
1240 else:
1320 else:
1241 goodb = not nullb and backend.exists(bfile)
1321 goodb = not nullb and backend.exists(bfile)
1242 missing = not goodb and not gooda and not create
1322 missing = not goodb and not gooda and not create
1243
1323
1244 # some diff programs apparently produce patches where the afile is
1324 # some diff programs apparently produce patches where the afile is
1245 # not /dev/null, but afile starts with bfile
1325 # not /dev/null, but afile starts with bfile
1246 abasedir = afile[:afile.rfind('/') + 1]
1326 abasedir = afile[:afile.rfind('/') + 1]
1247 bbasedir = bfile[:bfile.rfind('/') + 1]
1327 bbasedir = bfile[:bfile.rfind('/') + 1]
1248 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1328 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1249 and hunk.starta == 0 and hunk.lena == 0):
1329 and hunk.starta == 0 and hunk.lena == 0):
1250 create = True
1330 create = True
1251 missing = False
1331 missing = False
1252
1332
1253 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1333 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1254 # diff is between a file and its backup. In this case, the original
1334 # diff is between a file and its backup. In this case, the original
1255 # file should be patched (see original mpatch code).
1335 # file should be patched (see original mpatch code).
1256 isbackup = (abase == bbase and bfile.startswith(afile))
1336 isbackup = (abase == bbase and bfile.startswith(afile))
1257 fname = None
1337 fname = None
1258 if not missing:
1338 if not missing:
1259 if gooda and goodb:
1339 if gooda and goodb:
1260 fname = isbackup and afile or bfile
1340 fname = isbackup and afile or bfile
1261 elif gooda:
1341 elif gooda:
1262 fname = afile
1342 fname = afile
1263
1343
1264 if not fname:
1344 if not fname:
1265 if not nullb:
1345 if not nullb:
1266 fname = isbackup and afile or bfile
1346 fname = isbackup and afile or bfile
1267 elif not nulla:
1347 elif not nulla:
1268 fname = afile
1348 fname = afile
1269 else:
1349 else:
1270 raise PatchError(_("undefined source and destination files"))
1350 raise PatchError(_("undefined source and destination files"))
1271
1351
1272 gp = patchmeta(fname)
1352 gp = patchmeta(fname)
1273 if create:
1353 if create:
1274 gp.op = 'ADD'
1354 gp.op = 'ADD'
1275 elif remove:
1355 elif remove:
1276 gp.op = 'DELETE'
1356 gp.op = 'DELETE'
1277 return gp
1357 return gp
1278
1358
1279 def scanpatch(fp):
1359 def scanpatch(fp):
1280 """like patch.iterhunks, but yield different events
1360 """like patch.iterhunks, but yield different events
1281
1361
1282 - ('file', [header_lines + fromfile + tofile])
1362 - ('file', [header_lines + fromfile + tofile])
1283 - ('context', [context_lines])
1363 - ('context', [context_lines])
1284 - ('hunk', [hunk_lines])
1364 - ('hunk', [hunk_lines])
1285 - ('range', (-start,len, +start,len, proc))
1365 - ('range', (-start,len, +start,len, proc))
1286 """
1366 """
1287 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1367 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1288 lr = linereader(fp)
1368 lr = linereader(fp)
1289
1369
1290 def scanwhile(first, p):
1370 def scanwhile(first, p):
1291 """scan lr while predicate holds"""
1371 """scan lr while predicate holds"""
1292 lines = [first]
1372 lines = [first]
1293 while True:
1373 while True:
1294 line = lr.readline()
1374 line = lr.readline()
1295 if not line:
1375 if not line:
1296 break
1376 break
1297 if p(line):
1377 if p(line):
1298 lines.append(line)
1378 lines.append(line)
1299 else:
1379 else:
1300 lr.push(line)
1380 lr.push(line)
1301 break
1381 break
1302 return lines
1382 return lines
1303
1383
1304 while True:
1384 while True:
1305 line = lr.readline()
1385 line = lr.readline()
1306 if not line:
1386 if not line:
1307 break
1387 break
1308 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1388 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1309 def notheader(line):
1389 def notheader(line):
1310 s = line.split(None, 1)
1390 s = line.split(None, 1)
1311 return not s or s[0] not in ('---', 'diff')
1391 return not s or s[0] not in ('---', 'diff')
1312 header = scanwhile(line, notheader)
1392 header = scanwhile(line, notheader)
1313 fromfile = lr.readline()
1393 fromfile = lr.readline()
1314 if fromfile.startswith('---'):
1394 if fromfile.startswith('---'):
1315 tofile = lr.readline()
1395 tofile = lr.readline()
1316 header += [fromfile, tofile]
1396 header += [fromfile, tofile]
1317 else:
1397 else:
1318 lr.push(fromfile)
1398 lr.push(fromfile)
1319 yield 'file', header
1399 yield 'file', header
1320 elif line[0] == ' ':
1400 elif line[0] == ' ':
1321 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1401 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1322 elif line[0] in '-+':
1402 elif line[0] in '-+':
1323 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1403 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1324 else:
1404 else:
1325 m = lines_re.match(line)
1405 m = lines_re.match(line)
1326 if m:
1406 if m:
1327 yield 'range', m.groups()
1407 yield 'range', m.groups()
1328 else:
1408 else:
1329 yield 'other', line
1409 yield 'other', line
1330
1410
1331 def scangitpatch(lr, firstline):
1411 def scangitpatch(lr, firstline):
1332 """
1412 """
1333 Git patches can emit:
1413 Git patches can emit:
1334 - rename a to b
1414 - rename a to b
1335 - change b
1415 - change b
1336 - copy a to c
1416 - copy a to c
1337 - change c
1417 - change c
1338
1418
1339 We cannot apply this sequence as-is, the renamed 'a' could not be
1419 We cannot apply this sequence as-is, the renamed 'a' could not be
1340 found for it would have been renamed already. And we cannot copy
1420 found for it would have been renamed already. And we cannot copy
1341 from 'b' instead because 'b' would have been changed already. So
1421 from 'b' instead because 'b' would have been changed already. So
1342 we scan the git patch for copy and rename commands so we can
1422 we scan the git patch for copy and rename commands so we can
1343 perform the copies ahead of time.
1423 perform the copies ahead of time.
1344 """
1424 """
1345 pos = 0
1425 pos = 0
1346 try:
1426 try:
1347 pos = lr.fp.tell()
1427 pos = lr.fp.tell()
1348 fp = lr.fp
1428 fp = lr.fp
1349 except IOError:
1429 except IOError:
1350 fp = cStringIO.StringIO(lr.fp.read())
1430 fp = cStringIO.StringIO(lr.fp.read())
1351 gitlr = linereader(fp)
1431 gitlr = linereader(fp)
1352 gitlr.push(firstline)
1432 gitlr.push(firstline)
1353 gitpatches = readgitpatch(gitlr)
1433 gitpatches = readgitpatch(gitlr)
1354 fp.seek(pos)
1434 fp.seek(pos)
1355 return gitpatches
1435 return gitpatches
1356
1436
1357 def iterhunks(fp):
1437 def iterhunks(fp):
1358 """Read a patch and yield the following events:
1438 """Read a patch and yield the following events:
1359 - ("file", afile, bfile, firsthunk): select a new target file.
1439 - ("file", afile, bfile, firsthunk): select a new target file.
1360 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1440 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1361 "file" event.
1441 "file" event.
1362 - ("git", gitchanges): current diff is in git format, gitchanges
1442 - ("git", gitchanges): current diff is in git format, gitchanges
1363 maps filenames to gitpatch records. Unique event.
1443 maps filenames to gitpatch records. Unique event.
1364 """
1444 """
1365 afile = ""
1445 afile = ""
1366 bfile = ""
1446 bfile = ""
1367 state = None
1447 state = None
1368 hunknum = 0
1448 hunknum = 0
1369 emitfile = newfile = False
1449 emitfile = newfile = False
1370 gitpatches = None
1450 gitpatches = None
1371
1451
1372 # our states
1452 # our states
1373 BFILE = 1
1453 BFILE = 1
1374 context = None
1454 context = None
1375 lr = linereader(fp)
1455 lr = linereader(fp)
1376
1456
1377 while True:
1457 while True:
1378 x = lr.readline()
1458 x = lr.readline()
1379 if not x:
1459 if not x:
1380 break
1460 break
1381 if state == BFILE and (
1461 if state == BFILE and (
1382 (not context and x[0] == '@')
1462 (not context and x[0] == '@')
1383 or (context is not False and x.startswith('***************'))
1463 or (context is not False and x.startswith('***************'))
1384 or x.startswith('GIT binary patch')):
1464 or x.startswith('GIT binary patch')):
1385 gp = None
1465 gp = None
1386 if (gitpatches and
1466 if (gitpatches and
1387 gitpatches[-1].ispatching(afile, bfile)):
1467 gitpatches[-1].ispatching(afile, bfile)):
1388 gp = gitpatches.pop()
1468 gp = gitpatches.pop()
1389 if x.startswith('GIT binary patch'):
1469 if x.startswith('GIT binary patch'):
1390 h = binhunk(lr, gp.path)
1470 h = binhunk(lr, gp.path)
1391 else:
1471 else:
1392 if context is None and x.startswith('***************'):
1472 if context is None and x.startswith('***************'):
1393 context = True
1473 context = True
1394 h = hunk(x, hunknum + 1, lr, context)
1474 h = hunk(x, hunknum + 1, lr, context)
1395 hunknum += 1
1475 hunknum += 1
1396 if emitfile:
1476 if emitfile:
1397 emitfile = False
1477 emitfile = False
1398 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1478 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1399 yield 'hunk', h
1479 yield 'hunk', h
1400 elif x.startswith('diff --git a/'):
1480 elif x.startswith('diff --git a/'):
1401 m = gitre.match(x.rstrip(' \r\n'))
1481 m = gitre.match(x.rstrip(' \r\n'))
1402 if not m:
1482 if not m:
1403 continue
1483 continue
1404 if gitpatches is None:
1484 if gitpatches is None:
1405 # scan whole input for git metadata
1485 # scan whole input for git metadata
1406 gitpatches = scangitpatch(lr, x)
1486 gitpatches = scangitpatch(lr, x)
1407 yield 'git', [g.copy() for g in gitpatches
1487 yield 'git', [g.copy() for g in gitpatches
1408 if g.op in ('COPY', 'RENAME')]
1488 if g.op in ('COPY', 'RENAME')]
1409 gitpatches.reverse()
1489 gitpatches.reverse()
1410 afile = 'a/' + m.group(1)
1490 afile = 'a/' + m.group(1)
1411 bfile = 'b/' + m.group(2)
1491 bfile = 'b/' + m.group(2)
1412 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1492 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1413 gp = gitpatches.pop()
1493 gp = gitpatches.pop()
1414 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1494 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1415 if not gitpatches:
1495 if not gitpatches:
1416 raise PatchError(_('failed to synchronize metadata for "%s"')
1496 raise PatchError(_('failed to synchronize metadata for "%s"')
1417 % afile[2:])
1497 % afile[2:])
1418 gp = gitpatches[-1]
1498 gp = gitpatches[-1]
1419 newfile = True
1499 newfile = True
1420 elif x.startswith('---'):
1500 elif x.startswith('---'):
1421 # check for a unified diff
1501 # check for a unified diff
1422 l2 = lr.readline()
1502 l2 = lr.readline()
1423 if not l2.startswith('+++'):
1503 if not l2.startswith('+++'):
1424 lr.push(l2)
1504 lr.push(l2)
1425 continue
1505 continue
1426 newfile = True
1506 newfile = True
1427 context = False
1507 context = False
1428 afile = parsefilename(x)
1508 afile = parsefilename(x)
1429 bfile = parsefilename(l2)
1509 bfile = parsefilename(l2)
1430 elif x.startswith('***'):
1510 elif x.startswith('***'):
1431 # check for a context diff
1511 # check for a context diff
1432 l2 = lr.readline()
1512 l2 = lr.readline()
1433 if not l2.startswith('---'):
1513 if not l2.startswith('---'):
1434 lr.push(l2)
1514 lr.push(l2)
1435 continue
1515 continue
1436 l3 = lr.readline()
1516 l3 = lr.readline()
1437 lr.push(l3)
1517 lr.push(l3)
1438 if not l3.startswith("***************"):
1518 if not l3.startswith("***************"):
1439 lr.push(l2)
1519 lr.push(l2)
1440 continue
1520 continue
1441 newfile = True
1521 newfile = True
1442 context = True
1522 context = True
1443 afile = parsefilename(x)
1523 afile = parsefilename(x)
1444 bfile = parsefilename(l2)
1524 bfile = parsefilename(l2)
1445
1525
1446 if newfile:
1526 if newfile:
1447 newfile = False
1527 newfile = False
1448 emitfile = True
1528 emitfile = True
1449 state = BFILE
1529 state = BFILE
1450 hunknum = 0
1530 hunknum = 0
1451
1531
1452 while gitpatches:
1532 while gitpatches:
1453 gp = gitpatches.pop()
1533 gp = gitpatches.pop()
1454 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1534 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1455
1535
1456 def applybindelta(binchunk, data):
1536 def applybindelta(binchunk, data):
1457 """Apply a binary delta hunk
1537 """Apply a binary delta hunk
1458 The algorithm used is the algorithm from git's patch-delta.c
1538 The algorithm used is the algorithm from git's patch-delta.c
1459 """
1539 """
1460 def deltahead(binchunk):
1540 def deltahead(binchunk):
1461 i = 0
1541 i = 0
1462 for c in binchunk:
1542 for c in binchunk:
1463 i += 1
1543 i += 1
1464 if not (ord(c) & 0x80):
1544 if not (ord(c) & 0x80):
1465 return i
1545 return i
1466 return i
1546 return i
1467 out = ""
1547 out = ""
1468 s = deltahead(binchunk)
1548 s = deltahead(binchunk)
1469 binchunk = binchunk[s:]
1549 binchunk = binchunk[s:]
1470 s = deltahead(binchunk)
1550 s = deltahead(binchunk)
1471 binchunk = binchunk[s:]
1551 binchunk = binchunk[s:]
1472 i = 0
1552 i = 0
1473 while i < len(binchunk):
1553 while i < len(binchunk):
1474 cmd = ord(binchunk[i])
1554 cmd = ord(binchunk[i])
1475 i += 1
1555 i += 1
1476 if (cmd & 0x80):
1556 if (cmd & 0x80):
1477 offset = 0
1557 offset = 0
1478 size = 0
1558 size = 0
1479 if (cmd & 0x01):
1559 if (cmd & 0x01):
1480 offset = ord(binchunk[i])
1560 offset = ord(binchunk[i])
1481 i += 1
1561 i += 1
1482 if (cmd & 0x02):
1562 if (cmd & 0x02):
1483 offset |= ord(binchunk[i]) << 8
1563 offset |= ord(binchunk[i]) << 8
1484 i += 1
1564 i += 1
1485 if (cmd & 0x04):
1565 if (cmd & 0x04):
1486 offset |= ord(binchunk[i]) << 16
1566 offset |= ord(binchunk[i]) << 16
1487 i += 1
1567 i += 1
1488 if (cmd & 0x08):
1568 if (cmd & 0x08):
1489 offset |= ord(binchunk[i]) << 24
1569 offset |= ord(binchunk[i]) << 24
1490 i += 1
1570 i += 1
1491 if (cmd & 0x10):
1571 if (cmd & 0x10):
1492 size = ord(binchunk[i])
1572 size = ord(binchunk[i])
1493 i += 1
1573 i += 1
1494 if (cmd & 0x20):
1574 if (cmd & 0x20):
1495 size |= ord(binchunk[i]) << 8
1575 size |= ord(binchunk[i]) << 8
1496 i += 1
1576 i += 1
1497 if (cmd & 0x40):
1577 if (cmd & 0x40):
1498 size |= ord(binchunk[i]) << 16
1578 size |= ord(binchunk[i]) << 16
1499 i += 1
1579 i += 1
1500 if size == 0:
1580 if size == 0:
1501 size = 0x10000
1581 size = 0x10000
1502 offset_end = offset + size
1582 offset_end = offset + size
1503 out += data[offset:offset_end]
1583 out += data[offset:offset_end]
1504 elif cmd != 0:
1584 elif cmd != 0:
1505 offset_end = i + cmd
1585 offset_end = i + cmd
1506 out += binchunk[i:offset_end]
1586 out += binchunk[i:offset_end]
1507 i += cmd
1587 i += cmd
1508 else:
1588 else:
1509 raise PatchError(_('unexpected delta opcode 0'))
1589 raise PatchError(_('unexpected delta opcode 0'))
1510 return out
1590 return out
1511
1591
1512 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1592 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1513 """Reads a patch from fp and tries to apply it.
1593 """Reads a patch from fp and tries to apply it.
1514
1594
1515 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1595 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1516 there was any fuzz.
1596 there was any fuzz.
1517
1597
1518 If 'eolmode' is 'strict', the patch content and patched file are
1598 If 'eolmode' is 'strict', the patch content and patched file are
1519 read in binary mode. Otherwise, line endings are ignored when
1599 read in binary mode. Otherwise, line endings are ignored when
1520 patching then normalized according to 'eolmode'.
1600 patching then normalized according to 'eolmode'.
1521 """
1601 """
1522 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1602 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1523 prefix=prefix, eolmode=eolmode)
1603 prefix=prefix, eolmode=eolmode)
1524
1604
1525 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1605 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1526 eolmode='strict'):
1606 eolmode='strict'):
1527
1607
1528 if prefix:
1608 if prefix:
1529 # clean up double slashes, lack of trailing slashes, etc
1609 # clean up double slashes, lack of trailing slashes, etc
1530 prefix = util.normpath(prefix) + '/'
1610 prefix = util.normpath(prefix) + '/'
1531 def pstrip(p):
1611 def pstrip(p):
1532 return pathtransform(p, strip - 1, prefix)[1]
1612 return pathtransform(p, strip - 1, prefix)[1]
1533
1613
1534 rejects = 0
1614 rejects = 0
1535 err = 0
1615 err = 0
1536 current_file = None
1616 current_file = None
1537
1617
1538 for state, values in iterhunks(fp):
1618 for state, values in iterhunks(fp):
1539 if state == 'hunk':
1619 if state == 'hunk':
1540 if not current_file:
1620 if not current_file:
1541 continue
1621 continue
1542 ret = current_file.apply(values)
1622 ret = current_file.apply(values)
1543 if ret > 0:
1623 if ret > 0:
1544 err = 1
1624 err = 1
1545 elif state == 'file':
1625 elif state == 'file':
1546 if current_file:
1626 if current_file:
1547 rejects += current_file.close()
1627 rejects += current_file.close()
1548 current_file = None
1628 current_file = None
1549 afile, bfile, first_hunk, gp = values
1629 afile, bfile, first_hunk, gp = values
1550 if gp:
1630 if gp:
1551 gp.path = pstrip(gp.path)
1631 gp.path = pstrip(gp.path)
1552 if gp.oldpath:
1632 if gp.oldpath:
1553 gp.oldpath = pstrip(gp.oldpath)
1633 gp.oldpath = pstrip(gp.oldpath)
1554 else:
1634 else:
1555 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1635 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1556 prefix)
1636 prefix)
1557 if gp.op == 'RENAME':
1637 if gp.op == 'RENAME':
1558 backend.unlink(gp.oldpath)
1638 backend.unlink(gp.oldpath)
1559 if not first_hunk:
1639 if not first_hunk:
1560 if gp.op == 'DELETE':
1640 if gp.op == 'DELETE':
1561 backend.unlink(gp.path)
1641 backend.unlink(gp.path)
1562 continue
1642 continue
1563 data, mode = None, None
1643 data, mode = None, None
1564 if gp.op in ('RENAME', 'COPY'):
1644 if gp.op in ('RENAME', 'COPY'):
1565 data, mode = store.getfile(gp.oldpath)[:2]
1645 data, mode = store.getfile(gp.oldpath)[:2]
1566 # FIXME: failing getfile has never been handled here
1646 # FIXME: failing getfile has never been handled here
1567 assert data is not None
1647 assert data is not None
1568 if gp.mode:
1648 if gp.mode:
1569 mode = gp.mode
1649 mode = gp.mode
1570 if gp.op == 'ADD':
1650 if gp.op == 'ADD':
1571 # Added files without content have no hunk and
1651 # Added files without content have no hunk and
1572 # must be created
1652 # must be created
1573 data = ''
1653 data = ''
1574 if data or mode:
1654 if data or mode:
1575 if (gp.op in ('ADD', 'RENAME', 'COPY')
1655 if (gp.op in ('ADD', 'RENAME', 'COPY')
1576 and backend.exists(gp.path)):
1656 and backend.exists(gp.path)):
1577 raise PatchError(_("cannot create %s: destination "
1657 raise PatchError(_("cannot create %s: destination "
1578 "already exists") % gp.path)
1658 "already exists") % gp.path)
1579 backend.setfile(gp.path, data, mode, gp.oldpath)
1659 backend.setfile(gp.path, data, mode, gp.oldpath)
1580 continue
1660 continue
1581 try:
1661 try:
1582 current_file = patcher(ui, gp, backend, store,
1662 current_file = patcher(ui, gp, backend, store,
1583 eolmode=eolmode)
1663 eolmode=eolmode)
1584 except PatchError, inst:
1664 except PatchError, inst:
1585 ui.warn(str(inst) + '\n')
1665 ui.warn(str(inst) + '\n')
1586 current_file = None
1666 current_file = None
1587 rejects += 1
1667 rejects += 1
1588 continue
1668 continue
1589 elif state == 'git':
1669 elif state == 'git':
1590 for gp in values:
1670 for gp in values:
1591 path = pstrip(gp.oldpath)
1671 path = pstrip(gp.oldpath)
1592 data, mode = backend.getfile(path)
1672 data, mode = backend.getfile(path)
1593 if data is None:
1673 if data is None:
1594 # The error ignored here will trigger a getfile()
1674 # The error ignored here will trigger a getfile()
1595 # error in a place more appropriate for error
1675 # error in a place more appropriate for error
1596 # handling, and will not interrupt the patching
1676 # handling, and will not interrupt the patching
1597 # process.
1677 # process.
1598 pass
1678 pass
1599 else:
1679 else:
1600 store.setfile(path, data, mode)
1680 store.setfile(path, data, mode)
1601 else:
1681 else:
1602 raise util.Abort(_('unsupported parser state: %s') % state)
1682 raise util.Abort(_('unsupported parser state: %s') % state)
1603
1683
1604 if current_file:
1684 if current_file:
1605 rejects += current_file.close()
1685 rejects += current_file.close()
1606
1686
1607 if rejects:
1687 if rejects:
1608 return -1
1688 return -1
1609 return err
1689 return err
1610
1690
1611 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1691 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1612 similarity):
1692 similarity):
1613 """use <patcher> to apply <patchname> to the working directory.
1693 """use <patcher> to apply <patchname> to the working directory.
1614 returns whether patch was applied with fuzz factor."""
1694 returns whether patch was applied with fuzz factor."""
1615
1695
1616 fuzz = False
1696 fuzz = False
1617 args = []
1697 args = []
1618 cwd = repo.root
1698 cwd = repo.root
1619 if cwd:
1699 if cwd:
1620 args.append('-d %s' % util.shellquote(cwd))
1700 args.append('-d %s' % util.shellquote(cwd))
1621 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1701 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1622 util.shellquote(patchname)))
1702 util.shellquote(patchname)))
1623 try:
1703 try:
1624 for line in fp:
1704 for line in fp:
1625 line = line.rstrip()
1705 line = line.rstrip()
1626 ui.note(line + '\n')
1706 ui.note(line + '\n')
1627 if line.startswith('patching file '):
1707 if line.startswith('patching file '):
1628 pf = util.parsepatchoutput(line)
1708 pf = util.parsepatchoutput(line)
1629 printed_file = False
1709 printed_file = False
1630 files.add(pf)
1710 files.add(pf)
1631 elif line.find('with fuzz') >= 0:
1711 elif line.find('with fuzz') >= 0:
1632 fuzz = True
1712 fuzz = True
1633 if not printed_file:
1713 if not printed_file:
1634 ui.warn(pf + '\n')
1714 ui.warn(pf + '\n')
1635 printed_file = True
1715 printed_file = True
1636 ui.warn(line + '\n')
1716 ui.warn(line + '\n')
1637 elif line.find('saving rejects to file') >= 0:
1717 elif line.find('saving rejects to file') >= 0:
1638 ui.warn(line + '\n')
1718 ui.warn(line + '\n')
1639 elif line.find('FAILED') >= 0:
1719 elif line.find('FAILED') >= 0:
1640 if not printed_file:
1720 if not printed_file:
1641 ui.warn(pf + '\n')
1721 ui.warn(pf + '\n')
1642 printed_file = True
1722 printed_file = True
1643 ui.warn(line + '\n')
1723 ui.warn(line + '\n')
1644 finally:
1724 finally:
1645 if files:
1725 if files:
1646 scmutil.marktouched(repo, files, similarity)
1726 scmutil.marktouched(repo, files, similarity)
1647 code = fp.close()
1727 code = fp.close()
1648 if code:
1728 if code:
1649 raise PatchError(_("patch command failed: %s") %
1729 raise PatchError(_("patch command failed: %s") %
1650 util.explainexit(code)[0])
1730 util.explainexit(code)[0])
1651 return fuzz
1731 return fuzz
1652
1732
1653 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
1733 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
1654 eolmode='strict'):
1734 eolmode='strict'):
1655 if files is None:
1735 if files is None:
1656 files = set()
1736 files = set()
1657 if eolmode is None:
1737 if eolmode is None:
1658 eolmode = ui.config('patch', 'eol', 'strict')
1738 eolmode = ui.config('patch', 'eol', 'strict')
1659 if eolmode.lower() not in eolmodes:
1739 if eolmode.lower() not in eolmodes:
1660 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1740 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1661 eolmode = eolmode.lower()
1741 eolmode = eolmode.lower()
1662
1742
1663 store = filestore()
1743 store = filestore()
1664 try:
1744 try:
1665 fp = open(patchobj, 'rb')
1745 fp = open(patchobj, 'rb')
1666 except TypeError:
1746 except TypeError:
1667 fp = patchobj
1747 fp = patchobj
1668 try:
1748 try:
1669 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
1749 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
1670 eolmode=eolmode)
1750 eolmode=eolmode)
1671 finally:
1751 finally:
1672 if fp != patchobj:
1752 if fp != patchobj:
1673 fp.close()
1753 fp.close()
1674 files.update(backend.close())
1754 files.update(backend.close())
1675 store.close()
1755 store.close()
1676 if ret < 0:
1756 if ret < 0:
1677 raise PatchError(_('patch failed to apply'))
1757 raise PatchError(_('patch failed to apply'))
1678 return ret > 0
1758 return ret > 0
1679
1759
1680 def internalpatch(ui, repo, patchobj, strip, prefix, files=None,
1760 def internalpatch(ui, repo, patchobj, strip, prefix, files=None,
1681 eolmode='strict', similarity=0):
1761 eolmode='strict', similarity=0):
1682 """use builtin patch to apply <patchobj> to the working directory.
1762 """use builtin patch to apply <patchobj> to the working directory.
1683 returns whether patch was applied with fuzz factor."""
1763 returns whether patch was applied with fuzz factor."""
1684 backend = workingbackend(ui, repo, similarity)
1764 backend = workingbackend(ui, repo, similarity)
1685 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1765 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1686
1766
1687 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
1767 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
1688 eolmode='strict'):
1768 eolmode='strict'):
1689 backend = repobackend(ui, repo, ctx, store)
1769 backend = repobackend(ui, repo, ctx, store)
1690 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1770 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1691
1771
1692 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
1772 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
1693 similarity=0):
1773 similarity=0):
1694 """Apply <patchname> to the working directory.
1774 """Apply <patchname> to the working directory.
1695
1775
1696 'eolmode' specifies how end of lines should be handled. It can be:
1776 'eolmode' specifies how end of lines should be handled. It can be:
1697 - 'strict': inputs are read in binary mode, EOLs are preserved
1777 - 'strict': inputs are read in binary mode, EOLs are preserved
1698 - 'crlf': EOLs are ignored when patching and reset to CRLF
1778 - 'crlf': EOLs are ignored when patching and reset to CRLF
1699 - 'lf': EOLs are ignored when patching and reset to LF
1779 - 'lf': EOLs are ignored when patching and reset to LF
1700 - None: get it from user settings, default to 'strict'
1780 - None: get it from user settings, default to 'strict'
1701 'eolmode' is ignored when using an external patcher program.
1781 'eolmode' is ignored when using an external patcher program.
1702
1782
1703 Returns whether patch was applied with fuzz factor.
1783 Returns whether patch was applied with fuzz factor.
1704 """
1784 """
1705 patcher = ui.config('ui', 'patch')
1785 patcher = ui.config('ui', 'patch')
1706 if files is None:
1786 if files is None:
1707 files = set()
1787 files = set()
1708 if patcher:
1788 if patcher:
1709 return _externalpatch(ui, repo, patcher, patchname, strip,
1789 return _externalpatch(ui, repo, patcher, patchname, strip,
1710 files, similarity)
1790 files, similarity)
1711 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
1791 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
1712 similarity)
1792 similarity)
1713
1793
1714 def changedfiles(ui, repo, patchpath, strip=1):
1794 def changedfiles(ui, repo, patchpath, strip=1):
1715 backend = fsbackend(ui, repo.root)
1795 backend = fsbackend(ui, repo.root)
1716 fp = open(patchpath, 'rb')
1796 fp = open(patchpath, 'rb')
1717 try:
1797 try:
1718 changed = set()
1798 changed = set()
1719 for state, values in iterhunks(fp):
1799 for state, values in iterhunks(fp):
1720 if state == 'file':
1800 if state == 'file':
1721 afile, bfile, first_hunk, gp = values
1801 afile, bfile, first_hunk, gp = values
1722 if gp:
1802 if gp:
1723 gp.path = pathtransform(gp.path, strip - 1, '')[1]
1803 gp.path = pathtransform(gp.path, strip - 1, '')[1]
1724 if gp.oldpath:
1804 if gp.oldpath:
1725 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
1805 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
1726 else:
1806 else:
1727 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1807 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1728 '')
1808 '')
1729 changed.add(gp.path)
1809 changed.add(gp.path)
1730 if gp.op == 'RENAME':
1810 if gp.op == 'RENAME':
1731 changed.add(gp.oldpath)
1811 changed.add(gp.oldpath)
1732 elif state not in ('hunk', 'git'):
1812 elif state not in ('hunk', 'git'):
1733 raise util.Abort(_('unsupported parser state: %s') % state)
1813 raise util.Abort(_('unsupported parser state: %s') % state)
1734 return changed
1814 return changed
1735 finally:
1815 finally:
1736 fp.close()
1816 fp.close()
1737
1817
1738 class GitDiffRequired(Exception):
1818 class GitDiffRequired(Exception):
1739 pass
1819 pass
1740
1820
1741 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
1821 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
1742 '''return diffopts with all features supported and parsed'''
1822 '''return diffopts with all features supported and parsed'''
1743 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
1823 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
1744 git=True, whitespace=True, formatchanging=True)
1824 git=True, whitespace=True, formatchanging=True)
1745
1825
1746 diffopts = diffallopts
1826 diffopts = diffallopts
1747
1827
1748 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
1828 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
1749 whitespace=False, formatchanging=False):
1829 whitespace=False, formatchanging=False):
1750 '''return diffopts with only opted-in features parsed
1830 '''return diffopts with only opted-in features parsed
1751
1831
1752 Features:
1832 Features:
1753 - git: git-style diffs
1833 - git: git-style diffs
1754 - whitespace: whitespace options like ignoreblanklines and ignorews
1834 - whitespace: whitespace options like ignoreblanklines and ignorews
1755 - formatchanging: options that will likely break or cause correctness issues
1835 - formatchanging: options that will likely break or cause correctness issues
1756 with most diff parsers
1836 with most diff parsers
1757 '''
1837 '''
1758 def get(key, name=None, getter=ui.configbool, forceplain=None):
1838 def get(key, name=None, getter=ui.configbool, forceplain=None):
1759 if opts:
1839 if opts:
1760 v = opts.get(key)
1840 v = opts.get(key)
1761 if v:
1841 if v:
1762 return v
1842 return v
1763 if forceplain is not None and ui.plain():
1843 if forceplain is not None and ui.plain():
1764 return forceplain
1844 return forceplain
1765 return getter(section, name or key, None, untrusted=untrusted)
1845 return getter(section, name or key, None, untrusted=untrusted)
1766
1846
1767 # core options, expected to be understood by every diff parser
1847 # core options, expected to be understood by every diff parser
1768 buildopts = {
1848 buildopts = {
1769 'nodates': get('nodates'),
1849 'nodates': get('nodates'),
1770 'showfunc': get('show_function', 'showfunc'),
1850 'showfunc': get('show_function', 'showfunc'),
1771 'context': get('unified', getter=ui.config),
1851 'context': get('unified', getter=ui.config),
1772 }
1852 }
1773
1853
1774 if git:
1854 if git:
1775 buildopts['git'] = get('git')
1855 buildopts['git'] = get('git')
1776 if whitespace:
1856 if whitespace:
1777 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
1857 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
1778 buildopts['ignorewsamount'] = get('ignore_space_change',
1858 buildopts['ignorewsamount'] = get('ignore_space_change',
1779 'ignorewsamount')
1859 'ignorewsamount')
1780 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
1860 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
1781 'ignoreblanklines')
1861 'ignoreblanklines')
1782 if formatchanging:
1862 if formatchanging:
1783 buildopts['text'] = opts and opts.get('text')
1863 buildopts['text'] = opts and opts.get('text')
1784 buildopts['nobinary'] = get('nobinary')
1864 buildopts['nobinary'] = get('nobinary')
1785 buildopts['noprefix'] = get('noprefix', forceplain=False)
1865 buildopts['noprefix'] = get('noprefix', forceplain=False)
1786
1866
1787 return mdiff.diffopts(**buildopts)
1867 return mdiff.diffopts(**buildopts)
1788
1868
1789 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1869 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1790 losedatafn=None, prefix=''):
1870 losedatafn=None, prefix=''):
1791 '''yields diff of changes to files between two nodes, or node and
1871 '''yields diff of changes to files between two nodes, or node and
1792 working directory.
1872 working directory.
1793
1873
1794 if node1 is None, use first dirstate parent instead.
1874 if node1 is None, use first dirstate parent instead.
1795 if node2 is None, compare node1 with working directory.
1875 if node2 is None, compare node1 with working directory.
1796
1876
1797 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1877 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1798 every time some change cannot be represented with the current
1878 every time some change cannot be represented with the current
1799 patch format. Return False to upgrade to git patch format, True to
1879 patch format. Return False to upgrade to git patch format, True to
1800 accept the loss or raise an exception to abort the diff. It is
1880 accept the loss or raise an exception to abort the diff. It is
1801 called with the name of current file being diffed as 'fn'. If set
1881 called with the name of current file being diffed as 'fn'. If set
1802 to None, patches will always be upgraded to git format when
1882 to None, patches will always be upgraded to git format when
1803 necessary.
1883 necessary.
1804
1884
1805 prefix is a filename prefix that is prepended to all filenames on
1885 prefix is a filename prefix that is prepended to all filenames on
1806 display (used for subrepos).
1886 display (used for subrepos).
1807 '''
1887 '''
1808
1888
1809 if opts is None:
1889 if opts is None:
1810 opts = mdiff.defaultopts
1890 opts = mdiff.defaultopts
1811
1891
1812 if not node1 and not node2:
1892 if not node1 and not node2:
1813 node1 = repo.dirstate.p1()
1893 node1 = repo.dirstate.p1()
1814
1894
1815 def lrugetfilectx():
1895 def lrugetfilectx():
1816 cache = {}
1896 cache = {}
1817 order = util.deque()
1897 order = util.deque()
1818 def getfilectx(f, ctx):
1898 def getfilectx(f, ctx):
1819 fctx = ctx.filectx(f, filelog=cache.get(f))
1899 fctx = ctx.filectx(f, filelog=cache.get(f))
1820 if f not in cache:
1900 if f not in cache:
1821 if len(cache) > 20:
1901 if len(cache) > 20:
1822 del cache[order.popleft()]
1902 del cache[order.popleft()]
1823 cache[f] = fctx.filelog()
1903 cache[f] = fctx.filelog()
1824 else:
1904 else:
1825 order.remove(f)
1905 order.remove(f)
1826 order.append(f)
1906 order.append(f)
1827 return fctx
1907 return fctx
1828 return getfilectx
1908 return getfilectx
1829 getfilectx = lrugetfilectx()
1909 getfilectx = lrugetfilectx()
1830
1910
1831 ctx1 = repo[node1]
1911 ctx1 = repo[node1]
1832 ctx2 = repo[node2]
1912 ctx2 = repo[node2]
1833
1913
1834 if not changes:
1914 if not changes:
1835 changes = repo.status(ctx1, ctx2, match=match)
1915 changes = repo.status(ctx1, ctx2, match=match)
1836 modified, added, removed = changes[:3]
1916 modified, added, removed = changes[:3]
1837
1917
1838 if not modified and not added and not removed:
1918 if not modified and not added and not removed:
1839 return []
1919 return []
1840
1920
1841 hexfunc = repo.ui.debugflag and hex or short
1921 hexfunc = repo.ui.debugflag and hex or short
1842 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
1922 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
1843
1923
1844 copy = {}
1924 copy = {}
1845 if opts.git or opts.upgrade:
1925 if opts.git or opts.upgrade:
1846 copy = copies.pathcopies(ctx1, ctx2)
1926 copy = copies.pathcopies(ctx1, ctx2)
1847
1927
1848 def difffn(opts, losedata):
1928 def difffn(opts, losedata):
1849 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1929 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1850 copy, getfilectx, opts, losedata, prefix)
1930 copy, getfilectx, opts, losedata, prefix)
1851 if opts.upgrade and not opts.git:
1931 if opts.upgrade and not opts.git:
1852 try:
1932 try:
1853 def losedata(fn):
1933 def losedata(fn):
1854 if not losedatafn or not losedatafn(fn=fn):
1934 if not losedatafn or not losedatafn(fn=fn):
1855 raise GitDiffRequired
1935 raise GitDiffRequired
1856 # Buffer the whole output until we are sure it can be generated
1936 # Buffer the whole output until we are sure it can be generated
1857 return list(difffn(opts.copy(git=False), losedata))
1937 return list(difffn(opts.copy(git=False), losedata))
1858 except GitDiffRequired:
1938 except GitDiffRequired:
1859 return difffn(opts.copy(git=True), None)
1939 return difffn(opts.copy(git=True), None)
1860 else:
1940 else:
1861 return difffn(opts, None)
1941 return difffn(opts, None)
1862
1942
1863 def difflabel(func, *args, **kw):
1943 def difflabel(func, *args, **kw):
1864 '''yields 2-tuples of (output, label) based on the output of func()'''
1944 '''yields 2-tuples of (output, label) based on the output of func()'''
1865 headprefixes = [('diff', 'diff.diffline'),
1945 headprefixes = [('diff', 'diff.diffline'),
1866 ('copy', 'diff.extended'),
1946 ('copy', 'diff.extended'),
1867 ('rename', 'diff.extended'),
1947 ('rename', 'diff.extended'),
1868 ('old', 'diff.extended'),
1948 ('old', 'diff.extended'),
1869 ('new', 'diff.extended'),
1949 ('new', 'diff.extended'),
1870 ('deleted', 'diff.extended'),
1950 ('deleted', 'diff.extended'),
1871 ('---', 'diff.file_a'),
1951 ('---', 'diff.file_a'),
1872 ('+++', 'diff.file_b')]
1952 ('+++', 'diff.file_b')]
1873 textprefixes = [('@', 'diff.hunk'),
1953 textprefixes = [('@', 'diff.hunk'),
1874 ('-', 'diff.deleted'),
1954 ('-', 'diff.deleted'),
1875 ('+', 'diff.inserted')]
1955 ('+', 'diff.inserted')]
1876 head = False
1956 head = False
1877 for chunk in func(*args, **kw):
1957 for chunk in func(*args, **kw):
1878 lines = chunk.split('\n')
1958 lines = chunk.split('\n')
1879 for i, line in enumerate(lines):
1959 for i, line in enumerate(lines):
1880 if i != 0:
1960 if i != 0:
1881 yield ('\n', '')
1961 yield ('\n', '')
1882 if head:
1962 if head:
1883 if line.startswith('@'):
1963 if line.startswith('@'):
1884 head = False
1964 head = False
1885 else:
1965 else:
1886 if line and line[0] not in ' +-@\\':
1966 if line and line[0] not in ' +-@\\':
1887 head = True
1967 head = True
1888 stripline = line
1968 stripline = line
1889 diffline = False
1969 diffline = False
1890 if not head and line and line[0] in '+-':
1970 if not head and line and line[0] in '+-':
1891 # highlight tabs and trailing whitespace, but only in
1971 # highlight tabs and trailing whitespace, but only in
1892 # changed lines
1972 # changed lines
1893 stripline = line.rstrip()
1973 stripline = line.rstrip()
1894 diffline = True
1974 diffline = True
1895
1975
1896 prefixes = textprefixes
1976 prefixes = textprefixes
1897 if head:
1977 if head:
1898 prefixes = headprefixes
1978 prefixes = headprefixes
1899 for prefix, label in prefixes:
1979 for prefix, label in prefixes:
1900 if stripline.startswith(prefix):
1980 if stripline.startswith(prefix):
1901 if diffline:
1981 if diffline:
1902 for token in tabsplitter.findall(stripline):
1982 for token in tabsplitter.findall(stripline):
1903 if '\t' == token[0]:
1983 if '\t' == token[0]:
1904 yield (token, 'diff.tab')
1984 yield (token, 'diff.tab')
1905 else:
1985 else:
1906 yield (token, label)
1986 yield (token, label)
1907 else:
1987 else:
1908 yield (stripline, label)
1988 yield (stripline, label)
1909 break
1989 break
1910 else:
1990 else:
1911 yield (line, '')
1991 yield (line, '')
1912 if line != stripline:
1992 if line != stripline:
1913 yield (line[len(stripline):], 'diff.trailingwhitespace')
1993 yield (line[len(stripline):], 'diff.trailingwhitespace')
1914
1994
1915 def diffui(*args, **kw):
1995 def diffui(*args, **kw):
1916 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1996 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1917 return difflabel(diff, *args, **kw)
1997 return difflabel(diff, *args, **kw)
1918
1998
1919 def _filepairs(ctx1, modified, added, removed, copy, opts):
1999 def _filepairs(ctx1, modified, added, removed, copy, opts):
1920 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2000 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
1921 before and f2 is the the name after. For added files, f1 will be None,
2001 before and f2 is the the name after. For added files, f1 will be None,
1922 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2002 and for removed files, f2 will be None. copyop may be set to None, 'copy'
1923 or 'rename' (the latter two only if opts.git is set).'''
2003 or 'rename' (the latter two only if opts.git is set).'''
1924 gone = set()
2004 gone = set()
1925
2005
1926 copyto = dict([(v, k) for k, v in copy.items()])
2006 copyto = dict([(v, k) for k, v in copy.items()])
1927
2007
1928 addedset, removedset = set(added), set(removed)
2008 addedset, removedset = set(added), set(removed)
1929 # Fix up added, since merged-in additions appear as
2009 # Fix up added, since merged-in additions appear as
1930 # modifications during merges
2010 # modifications during merges
1931 for f in modified:
2011 for f in modified:
1932 if f not in ctx1:
2012 if f not in ctx1:
1933 addedset.add(f)
2013 addedset.add(f)
1934
2014
1935 for f in sorted(modified + added + removed):
2015 for f in sorted(modified + added + removed):
1936 copyop = None
2016 copyop = None
1937 f1, f2 = f, f
2017 f1, f2 = f, f
1938 if f in addedset:
2018 if f in addedset:
1939 f1 = None
2019 f1 = None
1940 if f in copy:
2020 if f in copy:
1941 if opts.git:
2021 if opts.git:
1942 f1 = copy[f]
2022 f1 = copy[f]
1943 if f1 in removedset and f1 not in gone:
2023 if f1 in removedset and f1 not in gone:
1944 copyop = 'rename'
2024 copyop = 'rename'
1945 gone.add(f1)
2025 gone.add(f1)
1946 else:
2026 else:
1947 copyop = 'copy'
2027 copyop = 'copy'
1948 elif f in removedset:
2028 elif f in removedset:
1949 f2 = None
2029 f2 = None
1950 if opts.git:
2030 if opts.git:
1951 # have we already reported a copy above?
2031 # have we already reported a copy above?
1952 if (f in copyto and copyto[f] in addedset
2032 if (f in copyto and copyto[f] in addedset
1953 and copy[copyto[f]] == f):
2033 and copy[copyto[f]] == f):
1954 continue
2034 continue
1955 yield f1, f2, copyop
2035 yield f1, f2, copyop
1956
2036
1957 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2037 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1958 copy, getfilectx, opts, losedatafn, prefix):
2038 copy, getfilectx, opts, losedatafn, prefix):
1959
2039
1960 def gitindex(text):
2040 def gitindex(text):
1961 if not text:
2041 if not text:
1962 text = ""
2042 text = ""
1963 l = len(text)
2043 l = len(text)
1964 s = util.sha1('blob %d\0' % l)
2044 s = util.sha1('blob %d\0' % l)
1965 s.update(text)
2045 s.update(text)
1966 return s.hexdigest()
2046 return s.hexdigest()
1967
2047
1968 if opts.noprefix:
2048 if opts.noprefix:
1969 aprefix = bprefix = ''
2049 aprefix = bprefix = ''
1970 else:
2050 else:
1971 aprefix = 'a/'
2051 aprefix = 'a/'
1972 bprefix = 'b/'
2052 bprefix = 'b/'
1973
2053
1974 def diffline(f, revs):
2054 def diffline(f, revs):
1975 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2055 revinfo = ' '.join(["-r %s" % rev for rev in revs])
1976 return 'diff %s %s' % (revinfo, f)
2056 return 'diff %s %s' % (revinfo, f)
1977
2057
1978 date1 = util.datestr(ctx1.date())
2058 date1 = util.datestr(ctx1.date())
1979 date2 = util.datestr(ctx2.date())
2059 date2 = util.datestr(ctx2.date())
1980
2060
1981 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2061 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1982
2062
1983 for f1, f2, copyop in _filepairs(
2063 for f1, f2, copyop in _filepairs(
1984 ctx1, modified, added, removed, copy, opts):
2064 ctx1, modified, added, removed, copy, opts):
1985 content1 = None
2065 content1 = None
1986 content2 = None
2066 content2 = None
1987 flag1 = None
2067 flag1 = None
1988 flag2 = None
2068 flag2 = None
1989 if f1:
2069 if f1:
1990 content1 = getfilectx(f1, ctx1).data()
2070 content1 = getfilectx(f1, ctx1).data()
1991 if opts.git or losedatafn:
2071 if opts.git or losedatafn:
1992 flag1 = ctx1.flags(f1)
2072 flag1 = ctx1.flags(f1)
1993 if f2:
2073 if f2:
1994 content2 = getfilectx(f2, ctx2).data()
2074 content2 = getfilectx(f2, ctx2).data()
1995 if opts.git or losedatafn:
2075 if opts.git or losedatafn:
1996 flag2 = ctx2.flags(f2)
2076 flag2 = ctx2.flags(f2)
1997 binary = False
2077 binary = False
1998 if opts.git or losedatafn:
2078 if opts.git or losedatafn:
1999 binary = util.binary(content1) or util.binary(content2)
2079 binary = util.binary(content1) or util.binary(content2)
2000
2080
2001 if losedatafn and not opts.git:
2081 if losedatafn and not opts.git:
2002 if (binary or
2082 if (binary or
2003 # copy/rename
2083 # copy/rename
2004 f2 in copy or
2084 f2 in copy or
2005 # empty file creation
2085 # empty file creation
2006 (not f1 and not content2) or
2086 (not f1 and not content2) or
2007 # empty file deletion
2087 # empty file deletion
2008 (not content1 and not f2) or
2088 (not content1 and not f2) or
2009 # create with flags
2089 # create with flags
2010 (not f1 and flag2) or
2090 (not f1 and flag2) or
2011 # change flags
2091 # change flags
2012 (f1 and f2 and flag1 != flag2)):
2092 (f1 and f2 and flag1 != flag2)):
2013 losedatafn(f2 or f1)
2093 losedatafn(f2 or f1)
2014
2094
2015 path1 = posixpath.join(prefix, f1 or f2)
2095 path1 = posixpath.join(prefix, f1 or f2)
2016 path2 = posixpath.join(prefix, f2 or f1)
2096 path2 = posixpath.join(prefix, f2 or f1)
2017 header = []
2097 header = []
2018 if opts.git:
2098 if opts.git:
2019 header.append('diff --git %s%s %s%s' %
2099 header.append('diff --git %s%s %s%s' %
2020 (aprefix, path1, bprefix, path2))
2100 (aprefix, path1, bprefix, path2))
2021 if not f1: # added
2101 if not f1: # added
2022 header.append('new file mode %s' % gitmode[flag2])
2102 header.append('new file mode %s' % gitmode[flag2])
2023 elif not f2: # removed
2103 elif not f2: # removed
2024 header.append('deleted file mode %s' % gitmode[flag1])
2104 header.append('deleted file mode %s' % gitmode[flag1])
2025 else: # modified/copied/renamed
2105 else: # modified/copied/renamed
2026 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2106 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2027 if mode1 != mode2:
2107 if mode1 != mode2:
2028 header.append('old mode %s' % mode1)
2108 header.append('old mode %s' % mode1)
2029 header.append('new mode %s' % mode2)
2109 header.append('new mode %s' % mode2)
2030 if copyop is not None:
2110 if copyop is not None:
2031 header.append('%s from %s' % (copyop, path1))
2111 header.append('%s from %s' % (copyop, path1))
2032 header.append('%s to %s' % (copyop, path2))
2112 header.append('%s to %s' % (copyop, path2))
2033 elif revs and not repo.ui.quiet:
2113 elif revs and not repo.ui.quiet:
2034 header.append(diffline(path1, revs))
2114 header.append(diffline(path1, revs))
2035
2115
2036 if binary and opts.git and not opts.nobinary:
2116 if binary and opts.git and not opts.nobinary:
2037 text = mdiff.b85diff(content1, content2)
2117 text = mdiff.b85diff(content1, content2)
2038 if text:
2118 if text:
2039 header.append('index %s..%s' %
2119 header.append('index %s..%s' %
2040 (gitindex(content1), gitindex(content2)))
2120 (gitindex(content1), gitindex(content2)))
2041 else:
2121 else:
2042 text = mdiff.unidiff(content1, date1,
2122 text = mdiff.unidiff(content1, date1,
2043 content2, date2,
2123 content2, date2,
2044 path1, path2, opts=opts)
2124 path1, path2, opts=opts)
2045 if header and (text or len(header) > 1):
2125 if header and (text or len(header) > 1):
2046 yield '\n'.join(header) + '\n'
2126 yield '\n'.join(header) + '\n'
2047 if text:
2127 if text:
2048 yield text
2128 yield text
2049
2129
2050 def diffstatsum(stats):
2130 def diffstatsum(stats):
2051 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2131 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2052 for f, a, r, b in stats:
2132 for f, a, r, b in stats:
2053 maxfile = max(maxfile, encoding.colwidth(f))
2133 maxfile = max(maxfile, encoding.colwidth(f))
2054 maxtotal = max(maxtotal, a + r)
2134 maxtotal = max(maxtotal, a + r)
2055 addtotal += a
2135 addtotal += a
2056 removetotal += r
2136 removetotal += r
2057 binary = binary or b
2137 binary = binary or b
2058
2138
2059 return maxfile, maxtotal, addtotal, removetotal, binary
2139 return maxfile, maxtotal, addtotal, removetotal, binary
2060
2140
2061 def diffstatdata(lines):
2141 def diffstatdata(lines):
2062 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2142 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2063
2143
2064 results = []
2144 results = []
2065 filename, adds, removes, isbinary = None, 0, 0, False
2145 filename, adds, removes, isbinary = None, 0, 0, False
2066
2146
2067 def addresult():
2147 def addresult():
2068 if filename:
2148 if filename:
2069 results.append((filename, adds, removes, isbinary))
2149 results.append((filename, adds, removes, isbinary))
2070
2150
2071 for line in lines:
2151 for line in lines:
2072 if line.startswith('diff'):
2152 if line.startswith('diff'):
2073 addresult()
2153 addresult()
2074 # set numbers to 0 anyway when starting new file
2154 # set numbers to 0 anyway when starting new file
2075 adds, removes, isbinary = 0, 0, False
2155 adds, removes, isbinary = 0, 0, False
2076 if line.startswith('diff --git a/'):
2156 if line.startswith('diff --git a/'):
2077 filename = gitre.search(line).group(2)
2157 filename = gitre.search(line).group(2)
2078 elif line.startswith('diff -r'):
2158 elif line.startswith('diff -r'):
2079 # format: "diff -r ... -r ... filename"
2159 # format: "diff -r ... -r ... filename"
2080 filename = diffre.search(line).group(1)
2160 filename = diffre.search(line).group(1)
2081 elif line.startswith('+') and not line.startswith('+++ '):
2161 elif line.startswith('+') and not line.startswith('+++ '):
2082 adds += 1
2162 adds += 1
2083 elif line.startswith('-') and not line.startswith('--- '):
2163 elif line.startswith('-') and not line.startswith('--- '):
2084 removes += 1
2164 removes += 1
2085 elif (line.startswith('GIT binary patch') or
2165 elif (line.startswith('GIT binary patch') or
2086 line.startswith('Binary file')):
2166 line.startswith('Binary file')):
2087 isbinary = True
2167 isbinary = True
2088 addresult()
2168 addresult()
2089 return results
2169 return results
2090
2170
2091 def diffstat(lines, width=80, git=False):
2171 def diffstat(lines, width=80, git=False):
2092 output = []
2172 output = []
2093 stats = diffstatdata(lines)
2173 stats = diffstatdata(lines)
2094 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2174 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2095
2175
2096 countwidth = len(str(maxtotal))
2176 countwidth = len(str(maxtotal))
2097 if hasbinary and countwidth < 3:
2177 if hasbinary and countwidth < 3:
2098 countwidth = 3
2178 countwidth = 3
2099 graphwidth = width - countwidth - maxname - 6
2179 graphwidth = width - countwidth - maxname - 6
2100 if graphwidth < 10:
2180 if graphwidth < 10:
2101 graphwidth = 10
2181 graphwidth = 10
2102
2182
2103 def scale(i):
2183 def scale(i):
2104 if maxtotal <= graphwidth:
2184 if maxtotal <= graphwidth:
2105 return i
2185 return i
2106 # If diffstat runs out of room it doesn't print anything,
2186 # If diffstat runs out of room it doesn't print anything,
2107 # which isn't very useful, so always print at least one + or -
2187 # which isn't very useful, so always print at least one + or -
2108 # if there were at least some changes.
2188 # if there were at least some changes.
2109 return max(i * graphwidth // maxtotal, int(bool(i)))
2189 return max(i * graphwidth // maxtotal, int(bool(i)))
2110
2190
2111 for filename, adds, removes, isbinary in stats:
2191 for filename, adds, removes, isbinary in stats:
2112 if isbinary:
2192 if isbinary:
2113 count = 'Bin'
2193 count = 'Bin'
2114 else:
2194 else:
2115 count = adds + removes
2195 count = adds + removes
2116 pluses = '+' * scale(adds)
2196 pluses = '+' * scale(adds)
2117 minuses = '-' * scale(removes)
2197 minuses = '-' * scale(removes)
2118 output.append(' %s%s | %*s %s%s\n' %
2198 output.append(' %s%s | %*s %s%s\n' %
2119 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2199 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2120 countwidth, count, pluses, minuses))
2200 countwidth, count, pluses, minuses))
2121
2201
2122 if stats:
2202 if stats:
2123 output.append(_(' %d files changed, %d insertions(+), '
2203 output.append(_(' %d files changed, %d insertions(+), '
2124 '%d deletions(-)\n')
2204 '%d deletions(-)\n')
2125 % (len(stats), totaladds, totalremoves))
2205 % (len(stats), totaladds, totalremoves))
2126
2206
2127 return ''.join(output)
2207 return ''.join(output)
2128
2208
2129 def diffstatui(*args, **kw):
2209 def diffstatui(*args, **kw):
2130 '''like diffstat(), but yields 2-tuples of (output, label) for
2210 '''like diffstat(), but yields 2-tuples of (output, label) for
2131 ui.write()
2211 ui.write()
2132 '''
2212 '''
2133
2213
2134 for line in diffstat(*args, **kw).splitlines():
2214 for line in diffstat(*args, **kw).splitlines():
2135 if line and line[-1] in '+-':
2215 if line and line[-1] in '+-':
2136 name, graph = line.rsplit(' ', 1)
2216 name, graph = line.rsplit(' ', 1)
2137 yield (name + ' ', '')
2217 yield (name + ' ', '')
2138 m = re.search(r'\++', graph)
2218 m = re.search(r'\++', graph)
2139 if m:
2219 if m:
2140 yield (m.group(0), 'diffstat.inserted')
2220 yield (m.group(0), 'diffstat.inserted')
2141 m = re.search(r'-+', graph)
2221 m = re.search(r'-+', graph)
2142 if m:
2222 if m:
2143 yield (m.group(0), 'diffstat.deleted')
2223 yield (m.group(0), 'diffstat.deleted')
2144 else:
2224 else:
2145 yield (line, '')
2225 yield (line, '')
2146 yield ('\n', '')
2226 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now