##// END OF EJS Templates
record: move filterpatch from record to patch...
Laurent Charignon -
r24269:9a745ced default
parent child Browse files
Show More
@@ -1,433 +1,281
1 # record.py
1 # record.py
2 #
2 #
3 # Copyright 2007 Bryan O'Sullivan <bos@serpentine.com>
3 # Copyright 2007 Bryan O'Sullivan <bos@serpentine.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''commands to interactively select changes for commit/qrefresh'''
8 '''commands to interactively select changes for commit/qrefresh'''
9
9
10 from mercurial.i18n import _
10 from mercurial.i18n import _
11 from mercurial import cmdutil, commands, extensions, hg, patch
11 from mercurial import cmdutil, commands, extensions, hg, patch
12 from mercurial import util
12 from mercurial import util
13 import copy, cStringIO, errno, os, shutil, tempfile
13 import cStringIO, errno, os, shutil, tempfile
14
14
15 cmdtable = {}
15 cmdtable = {}
16 command = cmdutil.command(cmdtable)
16 command = cmdutil.command(cmdtable)
17 testedwith = 'internal'
17 testedwith = 'internal'
18
18
19 def filterpatch(ui, headers):
20 """Interactively filter patch chunks into applied-only chunks"""
21
22 def prompt(skipfile, skipall, query, chunk):
23 """prompt query, and process base inputs
24
25 - y/n for the rest of file
26 - y/n for the rest
27 - ? (help)
28 - q (quit)
29
30 Return True/False and possibly updated skipfile and skipall.
31 """
32 newpatches = None
33 if skipall is not None:
34 return skipall, skipfile, skipall, newpatches
35 if skipfile is not None:
36 return skipfile, skipfile, skipall, newpatches
37 while True:
38 resps = _('[Ynesfdaq?]'
39 '$$ &Yes, record this change'
40 '$$ &No, skip this change'
41 '$$ &Edit this change manually'
42 '$$ &Skip remaining changes to this file'
43 '$$ Record remaining changes to this &file'
44 '$$ &Done, skip remaining changes and files'
45 '$$ Record &all changes to all remaining files'
46 '$$ &Quit, recording no changes'
47 '$$ &? (display help)')
48 r = ui.promptchoice("%s %s" % (query, resps))
49 ui.write("\n")
50 if r == 8: # ?
51 for c, t in ui.extractchoices(resps)[1]:
52 ui.write('%s - %s\n' % (c, t.lower()))
53 continue
54 elif r == 0: # yes
55 ret = True
56 elif r == 1: # no
57 ret = False
58 elif r == 2: # Edit patch
59 if chunk is None:
60 ui.write(_('cannot edit patch for whole file'))
61 ui.write("\n")
62 continue
63 if chunk.header.binary():
64 ui.write(_('cannot edit patch for binary file'))
65 ui.write("\n")
66 continue
67 # Patch comment based on the Git one (based on comment at end of
68 # http://mercurial.selenic.com/wiki/RecordExtension)
69 phelp = '---' + _("""
70 To remove '-' lines, make them ' ' lines (context).
71 To remove '+' lines, delete them.
72 Lines starting with # will be removed from the patch.
73
74 If the patch applies cleanly, the edited hunk will immediately be
75 added to the record list. If it does not apply cleanly, a rejects
76 file will be generated: you can use that when you try again. If
77 all lines of the hunk are removed, then the edit is aborted and
78 the hunk is left unchanged.
79 """)
80 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
81 suffix=".diff", text=True)
82 ncpatchfp = None
83 try:
84 # Write the initial patch
85 f = os.fdopen(patchfd, "w")
86 chunk.header.write(f)
87 chunk.write(f)
88 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
89 f.close()
90 # Start the editor and wait for it to complete
91 editor = ui.geteditor()
92 ui.system("%s \"%s\"" % (editor, patchfn),
93 environ={'HGUSER': ui.username()},
94 onerr=util.Abort, errprefix=_("edit failed"))
95 # Remove comment lines
96 patchfp = open(patchfn)
97 ncpatchfp = cStringIO.StringIO()
98 for line in patchfp:
99 if not line.startswith('#'):
100 ncpatchfp.write(line)
101 patchfp.close()
102 ncpatchfp.seek(0)
103 newpatches = patch.parsepatch(ncpatchfp)
104 finally:
105 os.unlink(patchfn)
106 del ncpatchfp
107 # Signal that the chunk shouldn't be applied as-is, but
108 # provide the new patch to be used instead.
109 ret = False
110 elif r == 3: # Skip
111 ret = skipfile = False
112 elif r == 4: # file (Record remaining)
113 ret = skipfile = True
114 elif r == 5: # done, skip remaining
115 ret = skipall = False
116 elif r == 6: # all
117 ret = skipall = True
118 elif r == 7: # quit
119 raise util.Abort(_('user quit'))
120 return ret, skipfile, skipall, newpatches
121
122 seen = set()
123 applied = {} # 'filename' -> [] of chunks
124 skipfile, skipall = None, None
125 pos, total = 1, sum(len(h.hunks) for h in headers)
126 for h in headers:
127 pos += len(h.hunks)
128 skipfile = None
129 fixoffset = 0
130 hdr = ''.join(h.header)
131 if hdr in seen:
132 continue
133 seen.add(hdr)
134 if skipall is None:
135 h.pretty(ui)
136 msg = (_('examine changes to %s?') %
137 _(' and ').join("'%s'" % f for f in h.files()))
138 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
139 if not r:
140 continue
141 applied[h.filename()] = [h]
142 if h.allhunks():
143 applied[h.filename()] += h.hunks
144 continue
145 for i, chunk in enumerate(h.hunks):
146 if skipfile is None and skipall is None:
147 chunk.pretty(ui)
148 if total == 1:
149 msg = _("record this change to '%s'?") % chunk.filename()
150 else:
151 idx = pos - len(h.hunks) + i
152 msg = _("record change %d/%d to '%s'?") % (idx, total,
153 chunk.filename())
154 r, skipfile, skipall, newpatches = prompt(skipfile,
155 skipall, msg, chunk)
156 if r:
157 if fixoffset:
158 chunk = copy.copy(chunk)
159 chunk.toline += fixoffset
160 applied[chunk.filename()].append(chunk)
161 elif newpatches is not None:
162 for newpatch in newpatches:
163 for newhunk in newpatch.hunks:
164 if fixoffset:
165 newhunk.toline += fixoffset
166 applied[newhunk.filename()].append(newhunk)
167 else:
168 fixoffset += chunk.removed - chunk.added
169 return sum([h for h in applied.itervalues()
170 if h[0].special() or len(h) > 1], [])
171
19
172 @command("record",
20 @command("record",
173 # same options as commit + white space diff options
21 # same options as commit + white space diff options
174 commands.table['^commit|ci'][1][:] + commands.diffwsopts,
22 commands.table['^commit|ci'][1][:] + commands.diffwsopts,
175 _('hg record [OPTION]... [FILE]...'))
23 _('hg record [OPTION]... [FILE]...'))
176 def record(ui, repo, *pats, **opts):
24 def record(ui, repo, *pats, **opts):
177 '''interactively select changes to commit
25 '''interactively select changes to commit
178
26
179 If a list of files is omitted, all changes reported by :hg:`status`
27 If a list of files is omitted, all changes reported by :hg:`status`
180 will be candidates for recording.
28 will be candidates for recording.
181
29
182 See :hg:`help dates` for a list of formats valid for -d/--date.
30 See :hg:`help dates` for a list of formats valid for -d/--date.
183
31
184 You will be prompted for whether to record changes to each
32 You will be prompted for whether to record changes to each
185 modified file, and for files with multiple changes, for each
33 modified file, and for files with multiple changes, for each
186 change to use. For each query, the following responses are
34 change to use. For each query, the following responses are
187 possible::
35 possible::
188
36
189 y - record this change
37 y - record this change
190 n - skip this change
38 n - skip this change
191 e - edit this change manually
39 e - edit this change manually
192
40
193 s - skip remaining changes to this file
41 s - skip remaining changes to this file
194 f - record remaining changes to this file
42 f - record remaining changes to this file
195
43
196 d - done, skip remaining changes and files
44 d - done, skip remaining changes and files
197 a - record all changes to all remaining files
45 a - record all changes to all remaining files
198 q - quit, recording no changes
46 q - quit, recording no changes
199
47
200 ? - display help
48 ? - display help
201
49
202 This command is not available when committing a merge.'''
50 This command is not available when committing a merge.'''
203
51
204 dorecord(ui, repo, commands.commit, 'commit', False, *pats, **opts)
52 dorecord(ui, repo, commands.commit, 'commit', False, *pats, **opts)
205
53
206 def qrefresh(origfn, ui, repo, *pats, **opts):
54 def qrefresh(origfn, ui, repo, *pats, **opts):
207 if not opts['interactive']:
55 if not opts['interactive']:
208 return origfn(ui, repo, *pats, **opts)
56 return origfn(ui, repo, *pats, **opts)
209
57
210 mq = extensions.find('mq')
58 mq = extensions.find('mq')
211
59
212 def committomq(ui, repo, *pats, **opts):
60 def committomq(ui, repo, *pats, **opts):
213 # At this point the working copy contains only changes that
61 # At this point the working copy contains only changes that
214 # were accepted. All other changes were reverted.
62 # were accepted. All other changes were reverted.
215 # We can't pass *pats here since qrefresh will undo all other
63 # We can't pass *pats here since qrefresh will undo all other
216 # changed files in the patch that aren't in pats.
64 # changed files in the patch that aren't in pats.
217 mq.refresh(ui, repo, **opts)
65 mq.refresh(ui, repo, **opts)
218
66
219 # backup all changed files
67 # backup all changed files
220 dorecord(ui, repo, committomq, 'qrefresh', True, *pats, **opts)
68 dorecord(ui, repo, committomq, 'qrefresh', True, *pats, **opts)
221
69
222 # This command registration is replaced during uisetup().
70 # This command registration is replaced during uisetup().
223 @command('qrecord',
71 @command('qrecord',
224 [],
72 [],
225 _('hg qrecord [OPTION]... PATCH [FILE]...'),
73 _('hg qrecord [OPTION]... PATCH [FILE]...'),
226 inferrepo=True)
74 inferrepo=True)
227 def qrecord(ui, repo, patch, *pats, **opts):
75 def qrecord(ui, repo, patch, *pats, **opts):
228 '''interactively record a new patch
76 '''interactively record a new patch
229
77
230 See :hg:`help qnew` & :hg:`help record` for more information and
78 See :hg:`help qnew` & :hg:`help record` for more information and
231 usage.
79 usage.
232 '''
80 '''
233
81
234 try:
82 try:
235 mq = extensions.find('mq')
83 mq = extensions.find('mq')
236 except KeyError:
84 except KeyError:
237 raise util.Abort(_("'mq' extension not loaded"))
85 raise util.Abort(_("'mq' extension not loaded"))
238
86
239 repo.mq.checkpatchname(patch)
87 repo.mq.checkpatchname(patch)
240
88
241 def committomq(ui, repo, *pats, **opts):
89 def committomq(ui, repo, *pats, **opts):
242 opts['checkname'] = False
90 opts['checkname'] = False
243 mq.new(ui, repo, patch, *pats, **opts)
91 mq.new(ui, repo, patch, *pats, **opts)
244
92
245 dorecord(ui, repo, committomq, 'qnew', False, *pats, **opts)
93 dorecord(ui, repo, committomq, 'qnew', False, *pats, **opts)
246
94
247 def qnew(origfn, ui, repo, patch, *args, **opts):
95 def qnew(origfn, ui, repo, patch, *args, **opts):
248 if opts['interactive']:
96 if opts['interactive']:
249 return qrecord(ui, repo, patch, *args, **opts)
97 return qrecord(ui, repo, patch, *args, **opts)
250 return origfn(ui, repo, patch, *args, **opts)
98 return origfn(ui, repo, patch, *args, **opts)
251
99
252 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall, *pats, **opts):
100 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall, *pats, **opts):
253 if not ui.interactive():
101 if not ui.interactive():
254 raise util.Abort(_('running non-interactively, use %s instead') %
102 raise util.Abort(_('running non-interactively, use %s instead') %
255 cmdsuggest)
103 cmdsuggest)
256
104
257 # make sure username is set before going interactive
105 # make sure username is set before going interactive
258 if not opts.get('user'):
106 if not opts.get('user'):
259 ui.username() # raise exception, username not provided
107 ui.username() # raise exception, username not provided
260
108
261 def recordfunc(ui, repo, message, match, opts):
109 def recordfunc(ui, repo, message, match, opts):
262 """This is generic record driver.
110 """This is generic record driver.
263
111
264 Its job is to interactively filter local changes, and
112 Its job is to interactively filter local changes, and
265 accordingly prepare working directory into a state in which the
113 accordingly prepare working directory into a state in which the
266 job can be delegated to a non-interactive commit command such as
114 job can be delegated to a non-interactive commit command such as
267 'commit' or 'qrefresh'.
115 'commit' or 'qrefresh'.
268
116
269 After the actual job is done by non-interactive command, the
117 After the actual job is done by non-interactive command, the
270 working directory is restored to its original state.
118 working directory is restored to its original state.
271
119
272 In the end we'll record interesting changes, and everything else
120 In the end we'll record interesting changes, and everything else
273 will be left in place, so the user can continue working.
121 will be left in place, so the user can continue working.
274 """
122 """
275
123
276 cmdutil.checkunfinished(repo, commit=True)
124 cmdutil.checkunfinished(repo, commit=True)
277 merge = len(repo[None].parents()) > 1
125 merge = len(repo[None].parents()) > 1
278 if merge:
126 if merge:
279 raise util.Abort(_('cannot partially commit a merge '
127 raise util.Abort(_('cannot partially commit a merge '
280 '(use "hg commit" instead)'))
128 '(use "hg commit" instead)'))
281
129
282 status = repo.status(match=match)
130 status = repo.status(match=match)
283 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
131 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
284 diffopts.nodates = True
132 diffopts.nodates = True
285 diffopts.git = True
133 diffopts.git = True
286 originalchunks = patch.diff(repo, changes=status, opts=diffopts)
134 originalchunks = patch.diff(repo, changes=status, opts=diffopts)
287 fp = cStringIO.StringIO()
135 fp = cStringIO.StringIO()
288 fp.write(''.join(originalchunks))
136 fp.write(''.join(originalchunks))
289 fp.seek(0)
137 fp.seek(0)
290
138
291 # 1. filter patch, so we have intending-to apply subset of it
139 # 1. filter patch, so we have intending-to apply subset of it
292 try:
140 try:
293 chunks = filterpatch(ui, patch.parsepatch(fp))
141 chunks = patch.filterpatch(ui, patch.parsepatch(fp))
294 except patch.PatchError, err:
142 except patch.PatchError, err:
295 raise util.Abort(_('error parsing patch: %s') % err)
143 raise util.Abort(_('error parsing patch: %s') % err)
296
144
297 del fp
145 del fp
298
146
299 contenders = set()
147 contenders = set()
300 for h in chunks:
148 for h in chunks:
301 try:
149 try:
302 contenders.update(set(h.files()))
150 contenders.update(set(h.files()))
303 except AttributeError:
151 except AttributeError:
304 pass
152 pass
305
153
306 changed = status.modified + status.added + status.removed
154 changed = status.modified + status.added + status.removed
307 newfiles = [f for f in changed if f in contenders]
155 newfiles = [f for f in changed if f in contenders]
308 if not newfiles:
156 if not newfiles:
309 ui.status(_('no changes to record\n'))
157 ui.status(_('no changes to record\n'))
310 return 0
158 return 0
311
159
312 newandmodifiedfiles = set()
160 newandmodifiedfiles = set()
313 for h in chunks:
161 for h in chunks:
314 ishunk = isinstance(h, patch.recordhunk)
162 ishunk = isinstance(h, patch.recordhunk)
315 isnew = h.filename() in status.added
163 isnew = h.filename() in status.added
316 if ishunk and isnew and not h in originalchunks:
164 if ishunk and isnew and not h in originalchunks:
317 newandmodifiedfiles.add(h.filename())
165 newandmodifiedfiles.add(h.filename())
318
166
319 modified = set(status.modified)
167 modified = set(status.modified)
320
168
321 # 2. backup changed files, so we can restore them in the end
169 # 2. backup changed files, so we can restore them in the end
322
170
323 if backupall:
171 if backupall:
324 tobackup = changed
172 tobackup = changed
325 else:
173 else:
326 tobackup = [f for f in newfiles
174 tobackup = [f for f in newfiles
327 if f in modified or f in newandmodifiedfiles]
175 if f in modified or f in newandmodifiedfiles]
328
176
329 backups = {}
177 backups = {}
330 if tobackup:
178 if tobackup:
331 backupdir = repo.join('record-backups')
179 backupdir = repo.join('record-backups')
332 try:
180 try:
333 os.mkdir(backupdir)
181 os.mkdir(backupdir)
334 except OSError, err:
182 except OSError, err:
335 if err.errno != errno.EEXIST:
183 if err.errno != errno.EEXIST:
336 raise
184 raise
337 try:
185 try:
338 # backup continues
186 # backup continues
339 for f in tobackup:
187 for f in tobackup:
340 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
188 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
341 dir=backupdir)
189 dir=backupdir)
342 os.close(fd)
190 os.close(fd)
343 ui.debug('backup %r as %r\n' % (f, tmpname))
191 ui.debug('backup %r as %r\n' % (f, tmpname))
344 util.copyfile(repo.wjoin(f), tmpname)
192 util.copyfile(repo.wjoin(f), tmpname)
345 shutil.copystat(repo.wjoin(f), tmpname)
193 shutil.copystat(repo.wjoin(f), tmpname)
346 backups[f] = tmpname
194 backups[f] = tmpname
347
195
348 fp = cStringIO.StringIO()
196 fp = cStringIO.StringIO()
349 for c in chunks:
197 for c in chunks:
350 fname = c.filename()
198 fname = c.filename()
351 if fname in backups or fname in newandmodifiedfiles:
199 if fname in backups or fname in newandmodifiedfiles:
352 c.write(fp)
200 c.write(fp)
353 dopatch = fp.tell()
201 dopatch = fp.tell()
354 fp.seek(0)
202 fp.seek(0)
355
203
356 [os.unlink(c) for c in newandmodifiedfiles]
204 [os.unlink(c) for c in newandmodifiedfiles]
357
205
358 # 3a. apply filtered patch to clean repo (clean)
206 # 3a. apply filtered patch to clean repo (clean)
359 if backups:
207 if backups:
360 hg.revert(repo, repo.dirstate.p1(),
208 hg.revert(repo, repo.dirstate.p1(),
361 lambda key: key in backups)
209 lambda key: key in backups)
362
210
363 # 3b. (apply)
211 # 3b. (apply)
364 if dopatch:
212 if dopatch:
365 try:
213 try:
366 ui.debug('applying patch\n')
214 ui.debug('applying patch\n')
367 ui.debug(fp.getvalue())
215 ui.debug(fp.getvalue())
368 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
216 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
369 except patch.PatchError, err:
217 except patch.PatchError, err:
370 raise util.Abort(str(err))
218 raise util.Abort(str(err))
371 del fp
219 del fp
372
220
373 # 4. We prepared working directory according to filtered
221 # 4. We prepared working directory according to filtered
374 # patch. Now is the time to delegate the job to
222 # patch. Now is the time to delegate the job to
375 # commit/qrefresh or the like!
223 # commit/qrefresh or the like!
376
224
377 # Make all of the pathnames absolute.
225 # Make all of the pathnames absolute.
378 newfiles = [repo.wjoin(nf) for nf in newfiles]
226 newfiles = [repo.wjoin(nf) for nf in newfiles]
379 commitfunc(ui, repo, *newfiles, **opts)
227 commitfunc(ui, repo, *newfiles, **opts)
380
228
381 return 0
229 return 0
382 finally:
230 finally:
383 # 5. finally restore backed-up files
231 # 5. finally restore backed-up files
384 try:
232 try:
385 for realname, tmpname in backups.iteritems():
233 for realname, tmpname in backups.iteritems():
386 ui.debug('restoring %r to %r\n' % (tmpname, realname))
234 ui.debug('restoring %r to %r\n' % (tmpname, realname))
387 util.copyfile(tmpname, repo.wjoin(realname))
235 util.copyfile(tmpname, repo.wjoin(realname))
388 # Our calls to copystat() here and above are a
236 # Our calls to copystat() here and above are a
389 # hack to trick any editors that have f open that
237 # hack to trick any editors that have f open that
390 # we haven't modified them.
238 # we haven't modified them.
391 #
239 #
392 # Also note that this racy as an editor could
240 # Also note that this racy as an editor could
393 # notice the file's mtime before we've finished
241 # notice the file's mtime before we've finished
394 # writing it.
242 # writing it.
395 shutil.copystat(tmpname, repo.wjoin(realname))
243 shutil.copystat(tmpname, repo.wjoin(realname))
396 os.unlink(tmpname)
244 os.unlink(tmpname)
397 if tobackup:
245 if tobackup:
398 os.rmdir(backupdir)
246 os.rmdir(backupdir)
399 except OSError:
247 except OSError:
400 pass
248 pass
401
249
402 # wrap ui.write so diff output can be labeled/colorized
250 # wrap ui.write so diff output can be labeled/colorized
403 def wrapwrite(orig, *args, **kw):
251 def wrapwrite(orig, *args, **kw):
404 label = kw.pop('label', '')
252 label = kw.pop('label', '')
405 for chunk, l in patch.difflabel(lambda: args):
253 for chunk, l in patch.difflabel(lambda: args):
406 orig(chunk, label=label + l)
254 orig(chunk, label=label + l)
407 oldwrite = ui.write
255 oldwrite = ui.write
408 extensions.wrapfunction(ui, 'write', wrapwrite)
256 extensions.wrapfunction(ui, 'write', wrapwrite)
409 try:
257 try:
410 return cmdutil.commit(ui, repo, recordfunc, pats, opts)
258 return cmdutil.commit(ui, repo, recordfunc, pats, opts)
411 finally:
259 finally:
412 ui.write = oldwrite
260 ui.write = oldwrite
413
261
414 def uisetup(ui):
262 def uisetup(ui):
415 try:
263 try:
416 mq = extensions.find('mq')
264 mq = extensions.find('mq')
417 except KeyError:
265 except KeyError:
418 return
266 return
419
267
420 cmdtable["qrecord"] = \
268 cmdtable["qrecord"] = \
421 (qrecord,
269 (qrecord,
422 # same options as qnew, but copy them so we don't get
270 # same options as qnew, but copy them so we don't get
423 # -i/--interactive for qrecord and add white space diff options
271 # -i/--interactive for qrecord and add white space diff options
424 mq.cmdtable['^qnew'][1][:] + commands.diffwsopts,
272 mq.cmdtable['^qnew'][1][:] + commands.diffwsopts,
425 _('hg qrecord [OPTION]... PATCH [FILE]...'))
273 _('hg qrecord [OPTION]... PATCH [FILE]...'))
426
274
427 _wrapcmd('qnew', mq.cmdtable, qnew, _("interactively record a new patch"))
275 _wrapcmd('qnew', mq.cmdtable, qnew, _("interactively record a new patch"))
428 _wrapcmd('qrefresh', mq.cmdtable, qrefresh,
276 _wrapcmd('qrefresh', mq.cmdtable, qrefresh,
429 _("interactively select changes to refresh"))
277 _("interactively select changes to refresh"))
430
278
431 def _wrapcmd(cmd, table, wrapfn, msg):
279 def _wrapcmd(cmd, table, wrapfn, msg):
432 entry = extensions.wrapcommand(table, cmd, wrapfn)
280 entry = extensions.wrapcommand(table, cmd, wrapfn)
433 entry[1].append(('i', 'interactive', None, msg))
281 entry[1].append(('i', 'interactive', None, msg))
@@ -1,2226 +1,2378
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import cStringIO, email, os, errno, re, posixpath
9 import cStringIO, email, os, errno, re, posixpath, copy
10 import tempfile, zlib, shutil
10 import tempfile, zlib, shutil
11 # On python2.4 you have to import these by name or they fail to
11 # On python2.4 you have to import these by name or they fail to
12 # load. This was not a problem on Python 2.7.
12 # load. This was not a problem on Python 2.7.
13 import email.Generator
13 import email.Generator
14 import email.Parser
14 import email.Parser
15
15
16 from i18n import _
16 from i18n import _
17 from node import hex, short
17 from node import hex, short
18 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
18 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
19
19
20 gitre = re.compile('diff --git a/(.*) b/(.*)')
20 gitre = re.compile('diff --git a/(.*) b/(.*)')
21 tabsplitter = re.compile(r'(\t+|[^\t]+)')
21 tabsplitter = re.compile(r'(\t+|[^\t]+)')
22
22
23 class PatchError(Exception):
23 class PatchError(Exception):
24 pass
24 pass
25
25
26
26
27 # public functions
27 # public functions
28
28
29 def split(stream):
29 def split(stream):
30 '''return an iterator of individual patches from a stream'''
30 '''return an iterator of individual patches from a stream'''
31 def isheader(line, inheader):
31 def isheader(line, inheader):
32 if inheader and line[0] in (' ', '\t'):
32 if inheader and line[0] in (' ', '\t'):
33 # continuation
33 # continuation
34 return True
34 return True
35 if line[0] in (' ', '-', '+'):
35 if line[0] in (' ', '-', '+'):
36 # diff line - don't check for header pattern in there
36 # diff line - don't check for header pattern in there
37 return False
37 return False
38 l = line.split(': ', 1)
38 l = line.split(': ', 1)
39 return len(l) == 2 and ' ' not in l[0]
39 return len(l) == 2 and ' ' not in l[0]
40
40
41 def chunk(lines):
41 def chunk(lines):
42 return cStringIO.StringIO(''.join(lines))
42 return cStringIO.StringIO(''.join(lines))
43
43
44 def hgsplit(stream, cur):
44 def hgsplit(stream, cur):
45 inheader = True
45 inheader = True
46
46
47 for line in stream:
47 for line in stream:
48 if not line.strip():
48 if not line.strip():
49 inheader = False
49 inheader = False
50 if not inheader and line.startswith('# HG changeset patch'):
50 if not inheader and line.startswith('# HG changeset patch'):
51 yield chunk(cur)
51 yield chunk(cur)
52 cur = []
52 cur = []
53 inheader = True
53 inheader = True
54
54
55 cur.append(line)
55 cur.append(line)
56
56
57 if cur:
57 if cur:
58 yield chunk(cur)
58 yield chunk(cur)
59
59
60 def mboxsplit(stream, cur):
60 def mboxsplit(stream, cur):
61 for line in stream:
61 for line in stream:
62 if line.startswith('From '):
62 if line.startswith('From '):
63 for c in split(chunk(cur[1:])):
63 for c in split(chunk(cur[1:])):
64 yield c
64 yield c
65 cur = []
65 cur = []
66
66
67 cur.append(line)
67 cur.append(line)
68
68
69 if cur:
69 if cur:
70 for c in split(chunk(cur[1:])):
70 for c in split(chunk(cur[1:])):
71 yield c
71 yield c
72
72
73 def mimesplit(stream, cur):
73 def mimesplit(stream, cur):
74 def msgfp(m):
74 def msgfp(m):
75 fp = cStringIO.StringIO()
75 fp = cStringIO.StringIO()
76 g = email.Generator.Generator(fp, mangle_from_=False)
76 g = email.Generator.Generator(fp, mangle_from_=False)
77 g.flatten(m)
77 g.flatten(m)
78 fp.seek(0)
78 fp.seek(0)
79 return fp
79 return fp
80
80
81 for line in stream:
81 for line in stream:
82 cur.append(line)
82 cur.append(line)
83 c = chunk(cur)
83 c = chunk(cur)
84
84
85 m = email.Parser.Parser().parse(c)
85 m = email.Parser.Parser().parse(c)
86 if not m.is_multipart():
86 if not m.is_multipart():
87 yield msgfp(m)
87 yield msgfp(m)
88 else:
88 else:
89 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
89 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
90 for part in m.walk():
90 for part in m.walk():
91 ct = part.get_content_type()
91 ct = part.get_content_type()
92 if ct not in ok_types:
92 if ct not in ok_types:
93 continue
93 continue
94 yield msgfp(part)
94 yield msgfp(part)
95
95
96 def headersplit(stream, cur):
96 def headersplit(stream, cur):
97 inheader = False
97 inheader = False
98
98
99 for line in stream:
99 for line in stream:
100 if not inheader and isheader(line, inheader):
100 if not inheader and isheader(line, inheader):
101 yield chunk(cur)
101 yield chunk(cur)
102 cur = []
102 cur = []
103 inheader = True
103 inheader = True
104 if inheader and not isheader(line, inheader):
104 if inheader and not isheader(line, inheader):
105 inheader = False
105 inheader = False
106
106
107 cur.append(line)
107 cur.append(line)
108
108
109 if cur:
109 if cur:
110 yield chunk(cur)
110 yield chunk(cur)
111
111
112 def remainder(cur):
112 def remainder(cur):
113 yield chunk(cur)
113 yield chunk(cur)
114
114
115 class fiter(object):
115 class fiter(object):
116 def __init__(self, fp):
116 def __init__(self, fp):
117 self.fp = fp
117 self.fp = fp
118
118
119 def __iter__(self):
119 def __iter__(self):
120 return self
120 return self
121
121
122 def next(self):
122 def next(self):
123 l = self.fp.readline()
123 l = self.fp.readline()
124 if not l:
124 if not l:
125 raise StopIteration
125 raise StopIteration
126 return l
126 return l
127
127
128 inheader = False
128 inheader = False
129 cur = []
129 cur = []
130
130
131 mimeheaders = ['content-type']
131 mimeheaders = ['content-type']
132
132
133 if not util.safehasattr(stream, 'next'):
133 if not util.safehasattr(stream, 'next'):
134 # http responses, for example, have readline but not next
134 # http responses, for example, have readline but not next
135 stream = fiter(stream)
135 stream = fiter(stream)
136
136
137 for line in stream:
137 for line in stream:
138 cur.append(line)
138 cur.append(line)
139 if line.startswith('# HG changeset patch'):
139 if line.startswith('# HG changeset patch'):
140 return hgsplit(stream, cur)
140 return hgsplit(stream, cur)
141 elif line.startswith('From '):
141 elif line.startswith('From '):
142 return mboxsplit(stream, cur)
142 return mboxsplit(stream, cur)
143 elif isheader(line, inheader):
143 elif isheader(line, inheader):
144 inheader = True
144 inheader = True
145 if line.split(':', 1)[0].lower() in mimeheaders:
145 if line.split(':', 1)[0].lower() in mimeheaders:
146 # let email parser handle this
146 # let email parser handle this
147 return mimesplit(stream, cur)
147 return mimesplit(stream, cur)
148 elif line.startswith('--- ') and inheader:
148 elif line.startswith('--- ') and inheader:
149 # No evil headers seen by diff start, split by hand
149 # No evil headers seen by diff start, split by hand
150 return headersplit(stream, cur)
150 return headersplit(stream, cur)
151 # Not enough info, keep reading
151 # Not enough info, keep reading
152
152
153 # if we are here, we have a very plain patch
153 # if we are here, we have a very plain patch
154 return remainder(cur)
154 return remainder(cur)
155
155
156 def extract(ui, fileobj):
156 def extract(ui, fileobj):
157 '''extract patch from data read from fileobj.
157 '''extract patch from data read from fileobj.
158
158
159 patch can be a normal patch or contained in an email message.
159 patch can be a normal patch or contained in an email message.
160
160
161 return tuple (filename, message, user, date, branch, node, p1, p2).
161 return tuple (filename, message, user, date, branch, node, p1, p2).
162 Any item in the returned tuple can be None. If filename is None,
162 Any item in the returned tuple can be None. If filename is None,
163 fileobj did not contain a patch. Caller must unlink filename when done.'''
163 fileobj did not contain a patch. Caller must unlink filename when done.'''
164
164
165 # attempt to detect the start of a patch
165 # attempt to detect the start of a patch
166 # (this heuristic is borrowed from quilt)
166 # (this heuristic is borrowed from quilt)
167 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
167 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
168 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
168 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
169 r'---[ \t].*?^\+\+\+[ \t]|'
169 r'---[ \t].*?^\+\+\+[ \t]|'
170 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
170 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
171
171
172 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
172 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
173 tmpfp = os.fdopen(fd, 'w')
173 tmpfp = os.fdopen(fd, 'w')
174 try:
174 try:
175 msg = email.Parser.Parser().parse(fileobj)
175 msg = email.Parser.Parser().parse(fileobj)
176
176
177 subject = msg['Subject']
177 subject = msg['Subject']
178 user = msg['From']
178 user = msg['From']
179 if not subject and not user:
179 if not subject and not user:
180 # Not an email, restore parsed headers if any
180 # Not an email, restore parsed headers if any
181 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
181 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
182
182
183 # should try to parse msg['Date']
183 # should try to parse msg['Date']
184 date = None
184 date = None
185 nodeid = None
185 nodeid = None
186 branch = None
186 branch = None
187 parents = []
187 parents = []
188
188
189 if subject:
189 if subject:
190 if subject.startswith('[PATCH'):
190 if subject.startswith('[PATCH'):
191 pend = subject.find(']')
191 pend = subject.find(']')
192 if pend >= 0:
192 if pend >= 0:
193 subject = subject[pend + 1:].lstrip()
193 subject = subject[pend + 1:].lstrip()
194 subject = re.sub(r'\n[ \t]+', ' ', subject)
194 subject = re.sub(r'\n[ \t]+', ' ', subject)
195 ui.debug('Subject: %s\n' % subject)
195 ui.debug('Subject: %s\n' % subject)
196 if user:
196 if user:
197 ui.debug('From: %s\n' % user)
197 ui.debug('From: %s\n' % user)
198 diffs_seen = 0
198 diffs_seen = 0
199 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
199 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
200 message = ''
200 message = ''
201 for part in msg.walk():
201 for part in msg.walk():
202 content_type = part.get_content_type()
202 content_type = part.get_content_type()
203 ui.debug('Content-Type: %s\n' % content_type)
203 ui.debug('Content-Type: %s\n' % content_type)
204 if content_type not in ok_types:
204 if content_type not in ok_types:
205 continue
205 continue
206 payload = part.get_payload(decode=True)
206 payload = part.get_payload(decode=True)
207 m = diffre.search(payload)
207 m = diffre.search(payload)
208 if m:
208 if m:
209 hgpatch = False
209 hgpatch = False
210 hgpatchheader = False
210 hgpatchheader = False
211 ignoretext = False
211 ignoretext = False
212
212
213 ui.debug('found patch at byte %d\n' % m.start(0))
213 ui.debug('found patch at byte %d\n' % m.start(0))
214 diffs_seen += 1
214 diffs_seen += 1
215 cfp = cStringIO.StringIO()
215 cfp = cStringIO.StringIO()
216 for line in payload[:m.start(0)].splitlines():
216 for line in payload[:m.start(0)].splitlines():
217 if line.startswith('# HG changeset patch') and not hgpatch:
217 if line.startswith('# HG changeset patch') and not hgpatch:
218 ui.debug('patch generated by hg export\n')
218 ui.debug('patch generated by hg export\n')
219 hgpatch = True
219 hgpatch = True
220 hgpatchheader = True
220 hgpatchheader = True
221 # drop earlier commit message content
221 # drop earlier commit message content
222 cfp.seek(0)
222 cfp.seek(0)
223 cfp.truncate()
223 cfp.truncate()
224 subject = None
224 subject = None
225 elif hgpatchheader:
225 elif hgpatchheader:
226 if line.startswith('# User '):
226 if line.startswith('# User '):
227 user = line[7:]
227 user = line[7:]
228 ui.debug('From: %s\n' % user)
228 ui.debug('From: %s\n' % user)
229 elif line.startswith("# Date "):
229 elif line.startswith("# Date "):
230 date = line[7:]
230 date = line[7:]
231 elif line.startswith("# Branch "):
231 elif line.startswith("# Branch "):
232 branch = line[9:]
232 branch = line[9:]
233 elif line.startswith("# Node ID "):
233 elif line.startswith("# Node ID "):
234 nodeid = line[10:]
234 nodeid = line[10:]
235 elif line.startswith("# Parent "):
235 elif line.startswith("# Parent "):
236 parents.append(line[9:].lstrip())
236 parents.append(line[9:].lstrip())
237 elif not line.startswith("# "):
237 elif not line.startswith("# "):
238 hgpatchheader = False
238 hgpatchheader = False
239 elif line == '---':
239 elif line == '---':
240 ignoretext = True
240 ignoretext = True
241 if not hgpatchheader and not ignoretext:
241 if not hgpatchheader and not ignoretext:
242 cfp.write(line)
242 cfp.write(line)
243 cfp.write('\n')
243 cfp.write('\n')
244 message = cfp.getvalue()
244 message = cfp.getvalue()
245 if tmpfp:
245 if tmpfp:
246 tmpfp.write(payload)
246 tmpfp.write(payload)
247 if not payload.endswith('\n'):
247 if not payload.endswith('\n'):
248 tmpfp.write('\n')
248 tmpfp.write('\n')
249 elif not diffs_seen and message and content_type == 'text/plain':
249 elif not diffs_seen and message and content_type == 'text/plain':
250 message += '\n' + payload
250 message += '\n' + payload
251 except: # re-raises
251 except: # re-raises
252 tmpfp.close()
252 tmpfp.close()
253 os.unlink(tmpname)
253 os.unlink(tmpname)
254 raise
254 raise
255
255
256 if subject and not message.startswith(subject):
256 if subject and not message.startswith(subject):
257 message = '%s\n%s' % (subject, message)
257 message = '%s\n%s' % (subject, message)
258 tmpfp.close()
258 tmpfp.close()
259 if not diffs_seen:
259 if not diffs_seen:
260 os.unlink(tmpname)
260 os.unlink(tmpname)
261 return None, message, user, date, branch, None, None, None
261 return None, message, user, date, branch, None, None, None
262 p1 = parents and parents.pop(0) or None
262 p1 = parents and parents.pop(0) or None
263 p2 = parents and parents.pop(0) or None
263 p2 = parents and parents.pop(0) or None
264 return tmpname, message, user, date, branch, nodeid, p1, p2
264 return tmpname, message, user, date, branch, nodeid, p1, p2
265
265
266 class patchmeta(object):
266 class patchmeta(object):
267 """Patched file metadata
267 """Patched file metadata
268
268
269 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
269 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
270 or COPY. 'path' is patched file path. 'oldpath' is set to the
270 or COPY. 'path' is patched file path. 'oldpath' is set to the
271 origin file when 'op' is either COPY or RENAME, None otherwise. If
271 origin file when 'op' is either COPY or RENAME, None otherwise. If
272 file mode is changed, 'mode' is a tuple (islink, isexec) where
272 file mode is changed, 'mode' is a tuple (islink, isexec) where
273 'islink' is True if the file is a symlink and 'isexec' is True if
273 'islink' is True if the file is a symlink and 'isexec' is True if
274 the file is executable. Otherwise, 'mode' is None.
274 the file is executable. Otherwise, 'mode' is None.
275 """
275 """
276 def __init__(self, path):
276 def __init__(self, path):
277 self.path = path
277 self.path = path
278 self.oldpath = None
278 self.oldpath = None
279 self.mode = None
279 self.mode = None
280 self.op = 'MODIFY'
280 self.op = 'MODIFY'
281 self.binary = False
281 self.binary = False
282
282
283 def setmode(self, mode):
283 def setmode(self, mode):
284 islink = mode & 020000
284 islink = mode & 020000
285 isexec = mode & 0100
285 isexec = mode & 0100
286 self.mode = (islink, isexec)
286 self.mode = (islink, isexec)
287
287
288 def copy(self):
288 def copy(self):
289 other = patchmeta(self.path)
289 other = patchmeta(self.path)
290 other.oldpath = self.oldpath
290 other.oldpath = self.oldpath
291 other.mode = self.mode
291 other.mode = self.mode
292 other.op = self.op
292 other.op = self.op
293 other.binary = self.binary
293 other.binary = self.binary
294 return other
294 return other
295
295
296 def _ispatchinga(self, afile):
296 def _ispatchinga(self, afile):
297 if afile == '/dev/null':
297 if afile == '/dev/null':
298 return self.op == 'ADD'
298 return self.op == 'ADD'
299 return afile == 'a/' + (self.oldpath or self.path)
299 return afile == 'a/' + (self.oldpath or self.path)
300
300
301 def _ispatchingb(self, bfile):
301 def _ispatchingb(self, bfile):
302 if bfile == '/dev/null':
302 if bfile == '/dev/null':
303 return self.op == 'DELETE'
303 return self.op == 'DELETE'
304 return bfile == 'b/' + self.path
304 return bfile == 'b/' + self.path
305
305
306 def ispatching(self, afile, bfile):
306 def ispatching(self, afile, bfile):
307 return self._ispatchinga(afile) and self._ispatchingb(bfile)
307 return self._ispatchinga(afile) and self._ispatchingb(bfile)
308
308
309 def __repr__(self):
309 def __repr__(self):
310 return "<patchmeta %s %r>" % (self.op, self.path)
310 return "<patchmeta %s %r>" % (self.op, self.path)
311
311
312 def readgitpatch(lr):
312 def readgitpatch(lr):
313 """extract git-style metadata about patches from <patchname>"""
313 """extract git-style metadata about patches from <patchname>"""
314
314
315 # Filter patch for git information
315 # Filter patch for git information
316 gp = None
316 gp = None
317 gitpatches = []
317 gitpatches = []
318 for line in lr:
318 for line in lr:
319 line = line.rstrip(' \r\n')
319 line = line.rstrip(' \r\n')
320 if line.startswith('diff --git a/'):
320 if line.startswith('diff --git a/'):
321 m = gitre.match(line)
321 m = gitre.match(line)
322 if m:
322 if m:
323 if gp:
323 if gp:
324 gitpatches.append(gp)
324 gitpatches.append(gp)
325 dst = m.group(2)
325 dst = m.group(2)
326 gp = patchmeta(dst)
326 gp = patchmeta(dst)
327 elif gp:
327 elif gp:
328 if line.startswith('--- '):
328 if line.startswith('--- '):
329 gitpatches.append(gp)
329 gitpatches.append(gp)
330 gp = None
330 gp = None
331 continue
331 continue
332 if line.startswith('rename from '):
332 if line.startswith('rename from '):
333 gp.op = 'RENAME'
333 gp.op = 'RENAME'
334 gp.oldpath = line[12:]
334 gp.oldpath = line[12:]
335 elif line.startswith('rename to '):
335 elif line.startswith('rename to '):
336 gp.path = line[10:]
336 gp.path = line[10:]
337 elif line.startswith('copy from '):
337 elif line.startswith('copy from '):
338 gp.op = 'COPY'
338 gp.op = 'COPY'
339 gp.oldpath = line[10:]
339 gp.oldpath = line[10:]
340 elif line.startswith('copy to '):
340 elif line.startswith('copy to '):
341 gp.path = line[8:]
341 gp.path = line[8:]
342 elif line.startswith('deleted file'):
342 elif line.startswith('deleted file'):
343 gp.op = 'DELETE'
343 gp.op = 'DELETE'
344 elif line.startswith('new file mode '):
344 elif line.startswith('new file mode '):
345 gp.op = 'ADD'
345 gp.op = 'ADD'
346 gp.setmode(int(line[-6:], 8))
346 gp.setmode(int(line[-6:], 8))
347 elif line.startswith('new mode '):
347 elif line.startswith('new mode '):
348 gp.setmode(int(line[-6:], 8))
348 gp.setmode(int(line[-6:], 8))
349 elif line.startswith('GIT binary patch'):
349 elif line.startswith('GIT binary patch'):
350 gp.binary = True
350 gp.binary = True
351 if gp:
351 if gp:
352 gitpatches.append(gp)
352 gitpatches.append(gp)
353
353
354 return gitpatches
354 return gitpatches
355
355
356 class linereader(object):
356 class linereader(object):
357 # simple class to allow pushing lines back into the input stream
357 # simple class to allow pushing lines back into the input stream
358 def __init__(self, fp):
358 def __init__(self, fp):
359 self.fp = fp
359 self.fp = fp
360 self.buf = []
360 self.buf = []
361
361
362 def push(self, line):
362 def push(self, line):
363 if line is not None:
363 if line is not None:
364 self.buf.append(line)
364 self.buf.append(line)
365
365
366 def readline(self):
366 def readline(self):
367 if self.buf:
367 if self.buf:
368 l = self.buf[0]
368 l = self.buf[0]
369 del self.buf[0]
369 del self.buf[0]
370 return l
370 return l
371 return self.fp.readline()
371 return self.fp.readline()
372
372
373 def __iter__(self):
373 def __iter__(self):
374 while True:
374 while True:
375 l = self.readline()
375 l = self.readline()
376 if not l:
376 if not l:
377 break
377 break
378 yield l
378 yield l
379
379
380 class abstractbackend(object):
380 class abstractbackend(object):
381 def __init__(self, ui):
381 def __init__(self, ui):
382 self.ui = ui
382 self.ui = ui
383
383
384 def getfile(self, fname):
384 def getfile(self, fname):
385 """Return target file data and flags as a (data, (islink,
385 """Return target file data and flags as a (data, (islink,
386 isexec)) tuple. Data is None if file is missing/deleted.
386 isexec)) tuple. Data is None if file is missing/deleted.
387 """
387 """
388 raise NotImplementedError
388 raise NotImplementedError
389
389
390 def setfile(self, fname, data, mode, copysource):
390 def setfile(self, fname, data, mode, copysource):
391 """Write data to target file fname and set its mode. mode is a
391 """Write data to target file fname and set its mode. mode is a
392 (islink, isexec) tuple. If data is None, the file content should
392 (islink, isexec) tuple. If data is None, the file content should
393 be left unchanged. If the file is modified after being copied,
393 be left unchanged. If the file is modified after being copied,
394 copysource is set to the original file name.
394 copysource is set to the original file name.
395 """
395 """
396 raise NotImplementedError
396 raise NotImplementedError
397
397
398 def unlink(self, fname):
398 def unlink(self, fname):
399 """Unlink target file."""
399 """Unlink target file."""
400 raise NotImplementedError
400 raise NotImplementedError
401
401
402 def writerej(self, fname, failed, total, lines):
402 def writerej(self, fname, failed, total, lines):
403 """Write rejected lines for fname. total is the number of hunks
403 """Write rejected lines for fname. total is the number of hunks
404 which failed to apply and total the total number of hunks for this
404 which failed to apply and total the total number of hunks for this
405 files.
405 files.
406 """
406 """
407 pass
407 pass
408
408
409 def exists(self, fname):
409 def exists(self, fname):
410 raise NotImplementedError
410 raise NotImplementedError
411
411
412 class fsbackend(abstractbackend):
412 class fsbackend(abstractbackend):
413 def __init__(self, ui, basedir):
413 def __init__(self, ui, basedir):
414 super(fsbackend, self).__init__(ui)
414 super(fsbackend, self).__init__(ui)
415 self.opener = scmutil.opener(basedir)
415 self.opener = scmutil.opener(basedir)
416
416
417 def _join(self, f):
417 def _join(self, f):
418 return os.path.join(self.opener.base, f)
418 return os.path.join(self.opener.base, f)
419
419
420 def getfile(self, fname):
420 def getfile(self, fname):
421 if self.opener.islink(fname):
421 if self.opener.islink(fname):
422 return (self.opener.readlink(fname), (True, False))
422 return (self.opener.readlink(fname), (True, False))
423
423
424 isexec = False
424 isexec = False
425 try:
425 try:
426 isexec = self.opener.lstat(fname).st_mode & 0100 != 0
426 isexec = self.opener.lstat(fname).st_mode & 0100 != 0
427 except OSError, e:
427 except OSError, e:
428 if e.errno != errno.ENOENT:
428 if e.errno != errno.ENOENT:
429 raise
429 raise
430 try:
430 try:
431 return (self.opener.read(fname), (False, isexec))
431 return (self.opener.read(fname), (False, isexec))
432 except IOError, e:
432 except IOError, e:
433 if e.errno != errno.ENOENT:
433 if e.errno != errno.ENOENT:
434 raise
434 raise
435 return None, None
435 return None, None
436
436
437 def setfile(self, fname, data, mode, copysource):
437 def setfile(self, fname, data, mode, copysource):
438 islink, isexec = mode
438 islink, isexec = mode
439 if data is None:
439 if data is None:
440 self.opener.setflags(fname, islink, isexec)
440 self.opener.setflags(fname, islink, isexec)
441 return
441 return
442 if islink:
442 if islink:
443 self.opener.symlink(data, fname)
443 self.opener.symlink(data, fname)
444 else:
444 else:
445 self.opener.write(fname, data)
445 self.opener.write(fname, data)
446 if isexec:
446 if isexec:
447 self.opener.setflags(fname, False, True)
447 self.opener.setflags(fname, False, True)
448
448
449 def unlink(self, fname):
449 def unlink(self, fname):
450 self.opener.unlinkpath(fname, ignoremissing=True)
450 self.opener.unlinkpath(fname, ignoremissing=True)
451
451
452 def writerej(self, fname, failed, total, lines):
452 def writerej(self, fname, failed, total, lines):
453 fname = fname + ".rej"
453 fname = fname + ".rej"
454 self.ui.warn(
454 self.ui.warn(
455 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
455 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
456 (failed, total, fname))
456 (failed, total, fname))
457 fp = self.opener(fname, 'w')
457 fp = self.opener(fname, 'w')
458 fp.writelines(lines)
458 fp.writelines(lines)
459 fp.close()
459 fp.close()
460
460
461 def exists(self, fname):
461 def exists(self, fname):
462 return self.opener.lexists(fname)
462 return self.opener.lexists(fname)
463
463
464 class workingbackend(fsbackend):
464 class workingbackend(fsbackend):
465 def __init__(self, ui, repo, similarity):
465 def __init__(self, ui, repo, similarity):
466 super(workingbackend, self).__init__(ui, repo.root)
466 super(workingbackend, self).__init__(ui, repo.root)
467 self.repo = repo
467 self.repo = repo
468 self.similarity = similarity
468 self.similarity = similarity
469 self.removed = set()
469 self.removed = set()
470 self.changed = set()
470 self.changed = set()
471 self.copied = []
471 self.copied = []
472
472
473 def _checkknown(self, fname):
473 def _checkknown(self, fname):
474 if self.repo.dirstate[fname] == '?' and self.exists(fname):
474 if self.repo.dirstate[fname] == '?' and self.exists(fname):
475 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
475 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
476
476
477 def setfile(self, fname, data, mode, copysource):
477 def setfile(self, fname, data, mode, copysource):
478 self._checkknown(fname)
478 self._checkknown(fname)
479 super(workingbackend, self).setfile(fname, data, mode, copysource)
479 super(workingbackend, self).setfile(fname, data, mode, copysource)
480 if copysource is not None:
480 if copysource is not None:
481 self.copied.append((copysource, fname))
481 self.copied.append((copysource, fname))
482 self.changed.add(fname)
482 self.changed.add(fname)
483
483
484 def unlink(self, fname):
484 def unlink(self, fname):
485 self._checkknown(fname)
485 self._checkknown(fname)
486 super(workingbackend, self).unlink(fname)
486 super(workingbackend, self).unlink(fname)
487 self.removed.add(fname)
487 self.removed.add(fname)
488 self.changed.add(fname)
488 self.changed.add(fname)
489
489
490 def close(self):
490 def close(self):
491 wctx = self.repo[None]
491 wctx = self.repo[None]
492 changed = set(self.changed)
492 changed = set(self.changed)
493 for src, dst in self.copied:
493 for src, dst in self.copied:
494 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
494 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
495 if self.removed:
495 if self.removed:
496 wctx.forget(sorted(self.removed))
496 wctx.forget(sorted(self.removed))
497 for f in self.removed:
497 for f in self.removed:
498 if f not in self.repo.dirstate:
498 if f not in self.repo.dirstate:
499 # File was deleted and no longer belongs to the
499 # File was deleted and no longer belongs to the
500 # dirstate, it was probably marked added then
500 # dirstate, it was probably marked added then
501 # deleted, and should not be considered by
501 # deleted, and should not be considered by
502 # marktouched().
502 # marktouched().
503 changed.discard(f)
503 changed.discard(f)
504 if changed:
504 if changed:
505 scmutil.marktouched(self.repo, changed, self.similarity)
505 scmutil.marktouched(self.repo, changed, self.similarity)
506 return sorted(self.changed)
506 return sorted(self.changed)
507
507
508 class filestore(object):
508 class filestore(object):
509 def __init__(self, maxsize=None):
509 def __init__(self, maxsize=None):
510 self.opener = None
510 self.opener = None
511 self.files = {}
511 self.files = {}
512 self.created = 0
512 self.created = 0
513 self.maxsize = maxsize
513 self.maxsize = maxsize
514 if self.maxsize is None:
514 if self.maxsize is None:
515 self.maxsize = 4*(2**20)
515 self.maxsize = 4*(2**20)
516 self.size = 0
516 self.size = 0
517 self.data = {}
517 self.data = {}
518
518
519 def setfile(self, fname, data, mode, copied=None):
519 def setfile(self, fname, data, mode, copied=None):
520 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
520 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
521 self.data[fname] = (data, mode, copied)
521 self.data[fname] = (data, mode, copied)
522 self.size += len(data)
522 self.size += len(data)
523 else:
523 else:
524 if self.opener is None:
524 if self.opener is None:
525 root = tempfile.mkdtemp(prefix='hg-patch-')
525 root = tempfile.mkdtemp(prefix='hg-patch-')
526 self.opener = scmutil.opener(root)
526 self.opener = scmutil.opener(root)
527 # Avoid filename issues with these simple names
527 # Avoid filename issues with these simple names
528 fn = str(self.created)
528 fn = str(self.created)
529 self.opener.write(fn, data)
529 self.opener.write(fn, data)
530 self.created += 1
530 self.created += 1
531 self.files[fname] = (fn, mode, copied)
531 self.files[fname] = (fn, mode, copied)
532
532
533 def getfile(self, fname):
533 def getfile(self, fname):
534 if fname in self.data:
534 if fname in self.data:
535 return self.data[fname]
535 return self.data[fname]
536 if not self.opener or fname not in self.files:
536 if not self.opener or fname not in self.files:
537 return None, None, None
537 return None, None, None
538 fn, mode, copied = self.files[fname]
538 fn, mode, copied = self.files[fname]
539 return self.opener.read(fn), mode, copied
539 return self.opener.read(fn), mode, copied
540
540
541 def close(self):
541 def close(self):
542 if self.opener:
542 if self.opener:
543 shutil.rmtree(self.opener.base)
543 shutil.rmtree(self.opener.base)
544
544
545 class repobackend(abstractbackend):
545 class repobackend(abstractbackend):
546 def __init__(self, ui, repo, ctx, store):
546 def __init__(self, ui, repo, ctx, store):
547 super(repobackend, self).__init__(ui)
547 super(repobackend, self).__init__(ui)
548 self.repo = repo
548 self.repo = repo
549 self.ctx = ctx
549 self.ctx = ctx
550 self.store = store
550 self.store = store
551 self.changed = set()
551 self.changed = set()
552 self.removed = set()
552 self.removed = set()
553 self.copied = {}
553 self.copied = {}
554
554
555 def _checkknown(self, fname):
555 def _checkknown(self, fname):
556 if fname not in self.ctx:
556 if fname not in self.ctx:
557 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
557 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
558
558
559 def getfile(self, fname):
559 def getfile(self, fname):
560 try:
560 try:
561 fctx = self.ctx[fname]
561 fctx = self.ctx[fname]
562 except error.LookupError:
562 except error.LookupError:
563 return None, None
563 return None, None
564 flags = fctx.flags()
564 flags = fctx.flags()
565 return fctx.data(), ('l' in flags, 'x' in flags)
565 return fctx.data(), ('l' in flags, 'x' in flags)
566
566
567 def setfile(self, fname, data, mode, copysource):
567 def setfile(self, fname, data, mode, copysource):
568 if copysource:
568 if copysource:
569 self._checkknown(copysource)
569 self._checkknown(copysource)
570 if data is None:
570 if data is None:
571 data = self.ctx[fname].data()
571 data = self.ctx[fname].data()
572 self.store.setfile(fname, data, mode, copysource)
572 self.store.setfile(fname, data, mode, copysource)
573 self.changed.add(fname)
573 self.changed.add(fname)
574 if copysource:
574 if copysource:
575 self.copied[fname] = copysource
575 self.copied[fname] = copysource
576
576
577 def unlink(self, fname):
577 def unlink(self, fname):
578 self._checkknown(fname)
578 self._checkknown(fname)
579 self.removed.add(fname)
579 self.removed.add(fname)
580
580
581 def exists(self, fname):
581 def exists(self, fname):
582 return fname in self.ctx
582 return fname in self.ctx
583
583
584 def close(self):
584 def close(self):
585 return self.changed | self.removed
585 return self.changed | self.removed
586
586
587 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
587 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
588 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
588 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
589 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
589 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
590 eolmodes = ['strict', 'crlf', 'lf', 'auto']
590 eolmodes = ['strict', 'crlf', 'lf', 'auto']
591
591
592 class patchfile(object):
592 class patchfile(object):
593 def __init__(self, ui, gp, backend, store, eolmode='strict'):
593 def __init__(self, ui, gp, backend, store, eolmode='strict'):
594 self.fname = gp.path
594 self.fname = gp.path
595 self.eolmode = eolmode
595 self.eolmode = eolmode
596 self.eol = None
596 self.eol = None
597 self.backend = backend
597 self.backend = backend
598 self.ui = ui
598 self.ui = ui
599 self.lines = []
599 self.lines = []
600 self.exists = False
600 self.exists = False
601 self.missing = True
601 self.missing = True
602 self.mode = gp.mode
602 self.mode = gp.mode
603 self.copysource = gp.oldpath
603 self.copysource = gp.oldpath
604 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
604 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
605 self.remove = gp.op == 'DELETE'
605 self.remove = gp.op == 'DELETE'
606 if self.copysource is None:
606 if self.copysource is None:
607 data, mode = backend.getfile(self.fname)
607 data, mode = backend.getfile(self.fname)
608 else:
608 else:
609 data, mode = store.getfile(self.copysource)[:2]
609 data, mode = store.getfile(self.copysource)[:2]
610 if data is not None:
610 if data is not None:
611 self.exists = self.copysource is None or backend.exists(self.fname)
611 self.exists = self.copysource is None or backend.exists(self.fname)
612 self.missing = False
612 self.missing = False
613 if data:
613 if data:
614 self.lines = mdiff.splitnewlines(data)
614 self.lines = mdiff.splitnewlines(data)
615 if self.mode is None:
615 if self.mode is None:
616 self.mode = mode
616 self.mode = mode
617 if self.lines:
617 if self.lines:
618 # Normalize line endings
618 # Normalize line endings
619 if self.lines[0].endswith('\r\n'):
619 if self.lines[0].endswith('\r\n'):
620 self.eol = '\r\n'
620 self.eol = '\r\n'
621 elif self.lines[0].endswith('\n'):
621 elif self.lines[0].endswith('\n'):
622 self.eol = '\n'
622 self.eol = '\n'
623 if eolmode != 'strict':
623 if eolmode != 'strict':
624 nlines = []
624 nlines = []
625 for l in self.lines:
625 for l in self.lines:
626 if l.endswith('\r\n'):
626 if l.endswith('\r\n'):
627 l = l[:-2] + '\n'
627 l = l[:-2] + '\n'
628 nlines.append(l)
628 nlines.append(l)
629 self.lines = nlines
629 self.lines = nlines
630 else:
630 else:
631 if self.create:
631 if self.create:
632 self.missing = False
632 self.missing = False
633 if self.mode is None:
633 if self.mode is None:
634 self.mode = (False, False)
634 self.mode = (False, False)
635 if self.missing:
635 if self.missing:
636 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
636 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
637
637
638 self.hash = {}
638 self.hash = {}
639 self.dirty = 0
639 self.dirty = 0
640 self.offset = 0
640 self.offset = 0
641 self.skew = 0
641 self.skew = 0
642 self.rej = []
642 self.rej = []
643 self.fileprinted = False
643 self.fileprinted = False
644 self.printfile(False)
644 self.printfile(False)
645 self.hunks = 0
645 self.hunks = 0
646
646
647 def writelines(self, fname, lines, mode):
647 def writelines(self, fname, lines, mode):
648 if self.eolmode == 'auto':
648 if self.eolmode == 'auto':
649 eol = self.eol
649 eol = self.eol
650 elif self.eolmode == 'crlf':
650 elif self.eolmode == 'crlf':
651 eol = '\r\n'
651 eol = '\r\n'
652 else:
652 else:
653 eol = '\n'
653 eol = '\n'
654
654
655 if self.eolmode != 'strict' and eol and eol != '\n':
655 if self.eolmode != 'strict' and eol and eol != '\n':
656 rawlines = []
656 rawlines = []
657 for l in lines:
657 for l in lines:
658 if l and l[-1] == '\n':
658 if l and l[-1] == '\n':
659 l = l[:-1] + eol
659 l = l[:-1] + eol
660 rawlines.append(l)
660 rawlines.append(l)
661 lines = rawlines
661 lines = rawlines
662
662
663 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
663 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
664
664
665 def printfile(self, warn):
665 def printfile(self, warn):
666 if self.fileprinted:
666 if self.fileprinted:
667 return
667 return
668 if warn or self.ui.verbose:
668 if warn or self.ui.verbose:
669 self.fileprinted = True
669 self.fileprinted = True
670 s = _("patching file %s\n") % self.fname
670 s = _("patching file %s\n") % self.fname
671 if warn:
671 if warn:
672 self.ui.warn(s)
672 self.ui.warn(s)
673 else:
673 else:
674 self.ui.note(s)
674 self.ui.note(s)
675
675
676
676
677 def findlines(self, l, linenum):
677 def findlines(self, l, linenum):
678 # looks through the hash and finds candidate lines. The
678 # looks through the hash and finds candidate lines. The
679 # result is a list of line numbers sorted based on distance
679 # result is a list of line numbers sorted based on distance
680 # from linenum
680 # from linenum
681
681
682 cand = self.hash.get(l, [])
682 cand = self.hash.get(l, [])
683 if len(cand) > 1:
683 if len(cand) > 1:
684 # resort our list of potentials forward then back.
684 # resort our list of potentials forward then back.
685 cand.sort(key=lambda x: abs(x - linenum))
685 cand.sort(key=lambda x: abs(x - linenum))
686 return cand
686 return cand
687
687
688 def write_rej(self):
688 def write_rej(self):
689 # our rejects are a little different from patch(1). This always
689 # our rejects are a little different from patch(1). This always
690 # creates rejects in the same form as the original patch. A file
690 # creates rejects in the same form as the original patch. A file
691 # header is inserted so that you can run the reject through patch again
691 # header is inserted so that you can run the reject through patch again
692 # without having to type the filename.
692 # without having to type the filename.
693 if not self.rej:
693 if not self.rej:
694 return
694 return
695 base = os.path.basename(self.fname)
695 base = os.path.basename(self.fname)
696 lines = ["--- %s\n+++ %s\n" % (base, base)]
696 lines = ["--- %s\n+++ %s\n" % (base, base)]
697 for x in self.rej:
697 for x in self.rej:
698 for l in x.hunk:
698 for l in x.hunk:
699 lines.append(l)
699 lines.append(l)
700 if l[-1] != '\n':
700 if l[-1] != '\n':
701 lines.append("\n\ No newline at end of file\n")
701 lines.append("\n\ No newline at end of file\n")
702 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
702 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
703
703
704 def apply(self, h):
704 def apply(self, h):
705 if not h.complete():
705 if not h.complete():
706 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
706 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
707 (h.number, h.desc, len(h.a), h.lena, len(h.b),
707 (h.number, h.desc, len(h.a), h.lena, len(h.b),
708 h.lenb))
708 h.lenb))
709
709
710 self.hunks += 1
710 self.hunks += 1
711
711
712 if self.missing:
712 if self.missing:
713 self.rej.append(h)
713 self.rej.append(h)
714 return -1
714 return -1
715
715
716 if self.exists and self.create:
716 if self.exists and self.create:
717 if self.copysource:
717 if self.copysource:
718 self.ui.warn(_("cannot create %s: destination already "
718 self.ui.warn(_("cannot create %s: destination already "
719 "exists\n") % self.fname)
719 "exists\n") % self.fname)
720 else:
720 else:
721 self.ui.warn(_("file %s already exists\n") % self.fname)
721 self.ui.warn(_("file %s already exists\n") % self.fname)
722 self.rej.append(h)
722 self.rej.append(h)
723 return -1
723 return -1
724
724
725 if isinstance(h, binhunk):
725 if isinstance(h, binhunk):
726 if self.remove:
726 if self.remove:
727 self.backend.unlink(self.fname)
727 self.backend.unlink(self.fname)
728 else:
728 else:
729 l = h.new(self.lines)
729 l = h.new(self.lines)
730 self.lines[:] = l
730 self.lines[:] = l
731 self.offset += len(l)
731 self.offset += len(l)
732 self.dirty = True
732 self.dirty = True
733 return 0
733 return 0
734
734
735 horig = h
735 horig = h
736 if (self.eolmode in ('crlf', 'lf')
736 if (self.eolmode in ('crlf', 'lf')
737 or self.eolmode == 'auto' and self.eol):
737 or self.eolmode == 'auto' and self.eol):
738 # If new eols are going to be normalized, then normalize
738 # If new eols are going to be normalized, then normalize
739 # hunk data before patching. Otherwise, preserve input
739 # hunk data before patching. Otherwise, preserve input
740 # line-endings.
740 # line-endings.
741 h = h.getnormalized()
741 h = h.getnormalized()
742
742
743 # fast case first, no offsets, no fuzz
743 # fast case first, no offsets, no fuzz
744 old, oldstart, new, newstart = h.fuzzit(0, False)
744 old, oldstart, new, newstart = h.fuzzit(0, False)
745 oldstart += self.offset
745 oldstart += self.offset
746 orig_start = oldstart
746 orig_start = oldstart
747 # if there's skew we want to emit the "(offset %d lines)" even
747 # if there's skew we want to emit the "(offset %d lines)" even
748 # when the hunk cleanly applies at start + skew, so skip the
748 # when the hunk cleanly applies at start + skew, so skip the
749 # fast case code
749 # fast case code
750 if (self.skew == 0 and
750 if (self.skew == 0 and
751 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
751 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
752 if self.remove:
752 if self.remove:
753 self.backend.unlink(self.fname)
753 self.backend.unlink(self.fname)
754 else:
754 else:
755 self.lines[oldstart:oldstart + len(old)] = new
755 self.lines[oldstart:oldstart + len(old)] = new
756 self.offset += len(new) - len(old)
756 self.offset += len(new) - len(old)
757 self.dirty = True
757 self.dirty = True
758 return 0
758 return 0
759
759
760 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
760 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
761 self.hash = {}
761 self.hash = {}
762 for x, s in enumerate(self.lines):
762 for x, s in enumerate(self.lines):
763 self.hash.setdefault(s, []).append(x)
763 self.hash.setdefault(s, []).append(x)
764
764
765 for fuzzlen in xrange(3):
765 for fuzzlen in xrange(3):
766 for toponly in [True, False]:
766 for toponly in [True, False]:
767 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
767 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
768 oldstart = oldstart + self.offset + self.skew
768 oldstart = oldstart + self.offset + self.skew
769 oldstart = min(oldstart, len(self.lines))
769 oldstart = min(oldstart, len(self.lines))
770 if old:
770 if old:
771 cand = self.findlines(old[0][1:], oldstart)
771 cand = self.findlines(old[0][1:], oldstart)
772 else:
772 else:
773 # Only adding lines with no or fuzzed context, just
773 # Only adding lines with no or fuzzed context, just
774 # take the skew in account
774 # take the skew in account
775 cand = [oldstart]
775 cand = [oldstart]
776
776
777 for l in cand:
777 for l in cand:
778 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
778 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
779 self.lines[l : l + len(old)] = new
779 self.lines[l : l + len(old)] = new
780 self.offset += len(new) - len(old)
780 self.offset += len(new) - len(old)
781 self.skew = l - orig_start
781 self.skew = l - orig_start
782 self.dirty = True
782 self.dirty = True
783 offset = l - orig_start - fuzzlen
783 offset = l - orig_start - fuzzlen
784 if fuzzlen:
784 if fuzzlen:
785 msg = _("Hunk #%d succeeded at %d "
785 msg = _("Hunk #%d succeeded at %d "
786 "with fuzz %d "
786 "with fuzz %d "
787 "(offset %d lines).\n")
787 "(offset %d lines).\n")
788 self.printfile(True)
788 self.printfile(True)
789 self.ui.warn(msg %
789 self.ui.warn(msg %
790 (h.number, l + 1, fuzzlen, offset))
790 (h.number, l + 1, fuzzlen, offset))
791 else:
791 else:
792 msg = _("Hunk #%d succeeded at %d "
792 msg = _("Hunk #%d succeeded at %d "
793 "(offset %d lines).\n")
793 "(offset %d lines).\n")
794 self.ui.note(msg % (h.number, l + 1, offset))
794 self.ui.note(msg % (h.number, l + 1, offset))
795 return fuzzlen
795 return fuzzlen
796 self.printfile(True)
796 self.printfile(True)
797 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
797 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
798 self.rej.append(horig)
798 self.rej.append(horig)
799 return -1
799 return -1
800
800
801 def close(self):
801 def close(self):
802 if self.dirty:
802 if self.dirty:
803 self.writelines(self.fname, self.lines, self.mode)
803 self.writelines(self.fname, self.lines, self.mode)
804 self.write_rej()
804 self.write_rej()
805 return len(self.rej)
805 return len(self.rej)
806
806
807 class header(object):
807 class header(object):
808 """patch header
808 """patch header
809 """
809 """
810 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
810 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
811 diff_re = re.compile('diff -r .* (.*)$')
811 diff_re = re.compile('diff -r .* (.*)$')
812 allhunks_re = re.compile('(?:index|deleted file) ')
812 allhunks_re = re.compile('(?:index|deleted file) ')
813 pretty_re = re.compile('(?:new file|deleted file) ')
813 pretty_re = re.compile('(?:new file|deleted file) ')
814 special_re = re.compile('(?:index|new|deleted|copy|rename) ')
814 special_re = re.compile('(?:index|new|deleted|copy|rename) ')
815
815
816 def __init__(self, header):
816 def __init__(self, header):
817 self.header = header
817 self.header = header
818 self.hunks = []
818 self.hunks = []
819
819
820 def binary(self):
820 def binary(self):
821 return util.any(h.startswith('index ') for h in self.header)
821 return util.any(h.startswith('index ') for h in self.header)
822
822
823 def pretty(self, fp):
823 def pretty(self, fp):
824 for h in self.header:
824 for h in self.header:
825 if h.startswith('index '):
825 if h.startswith('index '):
826 fp.write(_('this modifies a binary file (all or nothing)\n'))
826 fp.write(_('this modifies a binary file (all or nothing)\n'))
827 break
827 break
828 if self.pretty_re.match(h):
828 if self.pretty_re.match(h):
829 fp.write(h)
829 fp.write(h)
830 if self.binary():
830 if self.binary():
831 fp.write(_('this is a binary file\n'))
831 fp.write(_('this is a binary file\n'))
832 break
832 break
833 if h.startswith('---'):
833 if h.startswith('---'):
834 fp.write(_('%d hunks, %d lines changed\n') %
834 fp.write(_('%d hunks, %d lines changed\n') %
835 (len(self.hunks),
835 (len(self.hunks),
836 sum([max(h.added, h.removed) for h in self.hunks])))
836 sum([max(h.added, h.removed) for h in self.hunks])))
837 break
837 break
838 fp.write(h)
838 fp.write(h)
839
839
840 def write(self, fp):
840 def write(self, fp):
841 fp.write(''.join(self.header))
841 fp.write(''.join(self.header))
842
842
843 def allhunks(self):
843 def allhunks(self):
844 return util.any(self.allhunks_re.match(h) for h in self.header)
844 return util.any(self.allhunks_re.match(h) for h in self.header)
845
845
846 def files(self):
846 def files(self):
847 match = self.diffgit_re.match(self.header[0])
847 match = self.diffgit_re.match(self.header[0])
848 if match:
848 if match:
849 fromfile, tofile = match.groups()
849 fromfile, tofile = match.groups()
850 if fromfile == tofile:
850 if fromfile == tofile:
851 return [fromfile]
851 return [fromfile]
852 return [fromfile, tofile]
852 return [fromfile, tofile]
853 else:
853 else:
854 return self.diff_re.match(self.header[0]).groups()
854 return self.diff_re.match(self.header[0]).groups()
855
855
856 def filename(self):
856 def filename(self):
857 return self.files()[-1]
857 return self.files()[-1]
858
858
859 def __repr__(self):
859 def __repr__(self):
860 return '<header %s>' % (' '.join(map(repr, self.files())))
860 return '<header %s>' % (' '.join(map(repr, self.files())))
861
861
862 def special(self):
862 def special(self):
863 return util.any(self.special_re.match(h) for h in self.header)
863 return util.any(self.special_re.match(h) for h in self.header)
864
864
865 class recordhunk(object):
865 class recordhunk(object):
866 """patch hunk
866 """patch hunk
867
867
868 XXX shouldn't we merge this with the other hunk class?
868 XXX shouldn't we merge this with the other hunk class?
869 """
869 """
870 maxcontext = 3
870 maxcontext = 3
871
871
872 def __init__(self, header, fromline, toline, proc, before, hunk, after):
872 def __init__(self, header, fromline, toline, proc, before, hunk, after):
873 def trimcontext(number, lines):
873 def trimcontext(number, lines):
874 delta = len(lines) - self.maxcontext
874 delta = len(lines) - self.maxcontext
875 if False and delta > 0:
875 if False and delta > 0:
876 return number + delta, lines[:self.maxcontext]
876 return number + delta, lines[:self.maxcontext]
877 return number, lines
877 return number, lines
878
878
879 self.header = header
879 self.header = header
880 self.fromline, self.before = trimcontext(fromline, before)
880 self.fromline, self.before = trimcontext(fromline, before)
881 self.toline, self.after = trimcontext(toline, after)
881 self.toline, self.after = trimcontext(toline, after)
882 self.proc = proc
882 self.proc = proc
883 self.hunk = hunk
883 self.hunk = hunk
884 self.added, self.removed = self.countchanges(self.hunk)
884 self.added, self.removed = self.countchanges(self.hunk)
885
885
886 def countchanges(self, hunk):
886 def countchanges(self, hunk):
887 """hunk -> (n+,n-)"""
887 """hunk -> (n+,n-)"""
888 add = len([h for h in hunk if h[0] == '+'])
888 add = len([h for h in hunk if h[0] == '+'])
889 rem = len([h for h in hunk if h[0] == '-'])
889 rem = len([h for h in hunk if h[0] == '-'])
890 return add, rem
890 return add, rem
891
891
892 def write(self, fp):
892 def write(self, fp):
893 delta = len(self.before) + len(self.after)
893 delta = len(self.before) + len(self.after)
894 if self.after and self.after[-1] == '\\ No newline at end of file\n':
894 if self.after and self.after[-1] == '\\ No newline at end of file\n':
895 delta -= 1
895 delta -= 1
896 fromlen = delta + self.removed
896 fromlen = delta + self.removed
897 tolen = delta + self.added
897 tolen = delta + self.added
898 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
898 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
899 (self.fromline, fromlen, self.toline, tolen,
899 (self.fromline, fromlen, self.toline, tolen,
900 self.proc and (' ' + self.proc)))
900 self.proc and (' ' + self.proc)))
901 fp.write(''.join(self.before + self.hunk + self.after))
901 fp.write(''.join(self.before + self.hunk + self.after))
902
902
903 pretty = write
903 pretty = write
904
904
905 def filename(self):
905 def filename(self):
906 return self.header.filename()
906 return self.header.filename()
907
907
908 def __repr__(self):
908 def __repr__(self):
909 return '<hunk %r@%d>' % (self.filename(), self.fromline)
909 return '<hunk %r@%d>' % (self.filename(), self.fromline)
910
910
911 def filterpatch(ui, headers):
912 """Interactively filter patch chunks into applied-only chunks"""
913
914 def prompt(skipfile, skipall, query, chunk):
915 """prompt query, and process base inputs
916
917 - y/n for the rest of file
918 - y/n for the rest
919 - ? (help)
920 - q (quit)
921
922 Return True/False and possibly updated skipfile and skipall.
923 """
924 newpatches = None
925 if skipall is not None:
926 return skipall, skipfile, skipall, newpatches
927 if skipfile is not None:
928 return skipfile, skipfile, skipall, newpatches
929 while True:
930 resps = _('[Ynesfdaq?]'
931 '$$ &Yes, record this change'
932 '$$ &No, skip this change'
933 '$$ &Edit this change manually'
934 '$$ &Skip remaining changes to this file'
935 '$$ Record remaining changes to this &file'
936 '$$ &Done, skip remaining changes and files'
937 '$$ Record &all changes to all remaining files'
938 '$$ &Quit, recording no changes'
939 '$$ &? (display help)')
940 r = ui.promptchoice("%s %s" % (query, resps))
941 ui.write("\n")
942 if r == 8: # ?
943 for c, t in ui.extractchoices(resps)[1]:
944 ui.write('%s - %s\n' % (c, t.lower()))
945 continue
946 elif r == 0: # yes
947 ret = True
948 elif r == 1: # no
949 ret = False
950 elif r == 2: # Edit patch
951 if chunk is None:
952 ui.write(_('cannot edit patch for whole file'))
953 ui.write("\n")
954 continue
955 if chunk.header.binary():
956 ui.write(_('cannot edit patch for binary file'))
957 ui.write("\n")
958 continue
959 # Patch comment based on the Git one (based on comment at end of
960 # http://mercurial.selenic.com/wiki/RecordExtension)
961 phelp = '---' + _("""
962 To remove '-' lines, make them ' ' lines (context).
963 To remove '+' lines, delete them.
964 Lines starting with # will be removed from the patch.
965
966 If the patch applies cleanly, the edited hunk will immediately be
967 added to the record list. If it does not apply cleanly, a rejects
968 file will be generated: you can use that when you try again. If
969 all lines of the hunk are removed, then the edit is aborted and
970 the hunk is left unchanged.
971 """)
972 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
973 suffix=".diff", text=True)
974 ncpatchfp = None
975 try:
976 # Write the initial patch
977 f = os.fdopen(patchfd, "w")
978 chunk.header.write(f)
979 chunk.write(f)
980 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
981 f.close()
982 # Start the editor and wait for it to complete
983 editor = ui.geteditor()
984 ui.system("%s \"%s\"" % (editor, patchfn),
985 environ={'HGUSER': ui.username()},
986 onerr=util.Abort, errprefix=_("edit failed"))
987 # Remove comment lines
988 patchfp = open(patchfn)
989 ncpatchfp = cStringIO.StringIO()
990 for line in patchfp:
991 if not line.startswith('#'):
992 ncpatchfp.write(line)
993 patchfp.close()
994 ncpatchfp.seek(0)
995 newpatches = parsepatch(ncpatchfp)
996 finally:
997 os.unlink(patchfn)
998 del ncpatchfp
999 # Signal that the chunk shouldn't be applied as-is, but
1000 # provide the new patch to be used instead.
1001 ret = False
1002 elif r == 3: # Skip
1003 ret = skipfile = False
1004 elif r == 4: # file (Record remaining)
1005 ret = skipfile = True
1006 elif r == 5: # done, skip remaining
1007 ret = skipall = False
1008 elif r == 6: # all
1009 ret = skipall = True
1010 elif r == 7: # quit
1011 raise util.Abort(_('user quit'))
1012 return ret, skipfile, skipall, newpatches
1013
1014 seen = set()
1015 applied = {} # 'filename' -> [] of chunks
1016 skipfile, skipall = None, None
1017 pos, total = 1, sum(len(h.hunks) for h in headers)
1018 for h in headers:
1019 pos += len(h.hunks)
1020 skipfile = None
1021 fixoffset = 0
1022 hdr = ''.join(h.header)
1023 if hdr in seen:
1024 continue
1025 seen.add(hdr)
1026 if skipall is None:
1027 h.pretty(ui)
1028 msg = (_('examine changes to %s?') %
1029 _(' and ').join("'%s'" % f for f in h.files()))
1030 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1031 if not r:
1032 continue
1033 applied[h.filename()] = [h]
1034 if h.allhunks():
1035 applied[h.filename()] += h.hunks
1036 continue
1037 for i, chunk in enumerate(h.hunks):
1038 if skipfile is None and skipall is None:
1039 chunk.pretty(ui)
1040 if total == 1:
1041 msg = _("record this change to '%s'?") % chunk.filename()
1042 else:
1043 idx = pos - len(h.hunks) + i
1044 msg = _("record change %d/%d to '%s'?") % (idx, total,
1045 chunk.filename())
1046 r, skipfile, skipall, newpatches = prompt(skipfile,
1047 skipall, msg, chunk)
1048 if r:
1049 if fixoffset:
1050 chunk = copy.copy(chunk)
1051 chunk.toline += fixoffset
1052 applied[chunk.filename()].append(chunk)
1053 elif newpatches is not None:
1054 for newpatch in newpatches:
1055 for newhunk in newpatch.hunks:
1056 if fixoffset:
1057 newhunk.toline += fixoffset
1058 applied[newhunk.filename()].append(newhunk)
1059 else:
1060 fixoffset += chunk.removed - chunk.added
1061 return sum([h for h in applied.itervalues()
1062 if h[0].special() or len(h) > 1], [])
911 class hunk(object):
1063 class hunk(object):
912 def __init__(self, desc, num, lr, context):
1064 def __init__(self, desc, num, lr, context):
913 self.number = num
1065 self.number = num
914 self.desc = desc
1066 self.desc = desc
915 self.hunk = [desc]
1067 self.hunk = [desc]
916 self.a = []
1068 self.a = []
917 self.b = []
1069 self.b = []
918 self.starta = self.lena = None
1070 self.starta = self.lena = None
919 self.startb = self.lenb = None
1071 self.startb = self.lenb = None
920 if lr is not None:
1072 if lr is not None:
921 if context:
1073 if context:
922 self.read_context_hunk(lr)
1074 self.read_context_hunk(lr)
923 else:
1075 else:
924 self.read_unified_hunk(lr)
1076 self.read_unified_hunk(lr)
925
1077
926 def getnormalized(self):
1078 def getnormalized(self):
927 """Return a copy with line endings normalized to LF."""
1079 """Return a copy with line endings normalized to LF."""
928
1080
929 def normalize(lines):
1081 def normalize(lines):
930 nlines = []
1082 nlines = []
931 for line in lines:
1083 for line in lines:
932 if line.endswith('\r\n'):
1084 if line.endswith('\r\n'):
933 line = line[:-2] + '\n'
1085 line = line[:-2] + '\n'
934 nlines.append(line)
1086 nlines.append(line)
935 return nlines
1087 return nlines
936
1088
937 # Dummy object, it is rebuilt manually
1089 # Dummy object, it is rebuilt manually
938 nh = hunk(self.desc, self.number, None, None)
1090 nh = hunk(self.desc, self.number, None, None)
939 nh.number = self.number
1091 nh.number = self.number
940 nh.desc = self.desc
1092 nh.desc = self.desc
941 nh.hunk = self.hunk
1093 nh.hunk = self.hunk
942 nh.a = normalize(self.a)
1094 nh.a = normalize(self.a)
943 nh.b = normalize(self.b)
1095 nh.b = normalize(self.b)
944 nh.starta = self.starta
1096 nh.starta = self.starta
945 nh.startb = self.startb
1097 nh.startb = self.startb
946 nh.lena = self.lena
1098 nh.lena = self.lena
947 nh.lenb = self.lenb
1099 nh.lenb = self.lenb
948 return nh
1100 return nh
949
1101
950 def read_unified_hunk(self, lr):
1102 def read_unified_hunk(self, lr):
951 m = unidesc.match(self.desc)
1103 m = unidesc.match(self.desc)
952 if not m:
1104 if not m:
953 raise PatchError(_("bad hunk #%d") % self.number)
1105 raise PatchError(_("bad hunk #%d") % self.number)
954 self.starta, self.lena, self.startb, self.lenb = m.groups()
1106 self.starta, self.lena, self.startb, self.lenb = m.groups()
955 if self.lena is None:
1107 if self.lena is None:
956 self.lena = 1
1108 self.lena = 1
957 else:
1109 else:
958 self.lena = int(self.lena)
1110 self.lena = int(self.lena)
959 if self.lenb is None:
1111 if self.lenb is None:
960 self.lenb = 1
1112 self.lenb = 1
961 else:
1113 else:
962 self.lenb = int(self.lenb)
1114 self.lenb = int(self.lenb)
963 self.starta = int(self.starta)
1115 self.starta = int(self.starta)
964 self.startb = int(self.startb)
1116 self.startb = int(self.startb)
965 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1117 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
966 self.b)
1118 self.b)
967 # if we hit eof before finishing out the hunk, the last line will
1119 # if we hit eof before finishing out the hunk, the last line will
968 # be zero length. Lets try to fix it up.
1120 # be zero length. Lets try to fix it up.
969 while len(self.hunk[-1]) == 0:
1121 while len(self.hunk[-1]) == 0:
970 del self.hunk[-1]
1122 del self.hunk[-1]
971 del self.a[-1]
1123 del self.a[-1]
972 del self.b[-1]
1124 del self.b[-1]
973 self.lena -= 1
1125 self.lena -= 1
974 self.lenb -= 1
1126 self.lenb -= 1
975 self._fixnewline(lr)
1127 self._fixnewline(lr)
976
1128
977 def read_context_hunk(self, lr):
1129 def read_context_hunk(self, lr):
978 self.desc = lr.readline()
1130 self.desc = lr.readline()
979 m = contextdesc.match(self.desc)
1131 m = contextdesc.match(self.desc)
980 if not m:
1132 if not m:
981 raise PatchError(_("bad hunk #%d") % self.number)
1133 raise PatchError(_("bad hunk #%d") % self.number)
982 self.starta, aend = m.groups()
1134 self.starta, aend = m.groups()
983 self.starta = int(self.starta)
1135 self.starta = int(self.starta)
984 if aend is None:
1136 if aend is None:
985 aend = self.starta
1137 aend = self.starta
986 self.lena = int(aend) - self.starta
1138 self.lena = int(aend) - self.starta
987 if self.starta:
1139 if self.starta:
988 self.lena += 1
1140 self.lena += 1
989 for x in xrange(self.lena):
1141 for x in xrange(self.lena):
990 l = lr.readline()
1142 l = lr.readline()
991 if l.startswith('---'):
1143 if l.startswith('---'):
992 # lines addition, old block is empty
1144 # lines addition, old block is empty
993 lr.push(l)
1145 lr.push(l)
994 break
1146 break
995 s = l[2:]
1147 s = l[2:]
996 if l.startswith('- ') or l.startswith('! '):
1148 if l.startswith('- ') or l.startswith('! '):
997 u = '-' + s
1149 u = '-' + s
998 elif l.startswith(' '):
1150 elif l.startswith(' '):
999 u = ' ' + s
1151 u = ' ' + s
1000 else:
1152 else:
1001 raise PatchError(_("bad hunk #%d old text line %d") %
1153 raise PatchError(_("bad hunk #%d old text line %d") %
1002 (self.number, x))
1154 (self.number, x))
1003 self.a.append(u)
1155 self.a.append(u)
1004 self.hunk.append(u)
1156 self.hunk.append(u)
1005
1157
1006 l = lr.readline()
1158 l = lr.readline()
1007 if l.startswith('\ '):
1159 if l.startswith('\ '):
1008 s = self.a[-1][:-1]
1160 s = self.a[-1][:-1]
1009 self.a[-1] = s
1161 self.a[-1] = s
1010 self.hunk[-1] = s
1162 self.hunk[-1] = s
1011 l = lr.readline()
1163 l = lr.readline()
1012 m = contextdesc.match(l)
1164 m = contextdesc.match(l)
1013 if not m:
1165 if not m:
1014 raise PatchError(_("bad hunk #%d") % self.number)
1166 raise PatchError(_("bad hunk #%d") % self.number)
1015 self.startb, bend = m.groups()
1167 self.startb, bend = m.groups()
1016 self.startb = int(self.startb)
1168 self.startb = int(self.startb)
1017 if bend is None:
1169 if bend is None:
1018 bend = self.startb
1170 bend = self.startb
1019 self.lenb = int(bend) - self.startb
1171 self.lenb = int(bend) - self.startb
1020 if self.startb:
1172 if self.startb:
1021 self.lenb += 1
1173 self.lenb += 1
1022 hunki = 1
1174 hunki = 1
1023 for x in xrange(self.lenb):
1175 for x in xrange(self.lenb):
1024 l = lr.readline()
1176 l = lr.readline()
1025 if l.startswith('\ '):
1177 if l.startswith('\ '):
1026 # XXX: the only way to hit this is with an invalid line range.
1178 # XXX: the only way to hit this is with an invalid line range.
1027 # The no-eol marker is not counted in the line range, but I
1179 # The no-eol marker is not counted in the line range, but I
1028 # guess there are diff(1) out there which behave differently.
1180 # guess there are diff(1) out there which behave differently.
1029 s = self.b[-1][:-1]
1181 s = self.b[-1][:-1]
1030 self.b[-1] = s
1182 self.b[-1] = s
1031 self.hunk[hunki - 1] = s
1183 self.hunk[hunki - 1] = s
1032 continue
1184 continue
1033 if not l:
1185 if not l:
1034 # line deletions, new block is empty and we hit EOF
1186 # line deletions, new block is empty and we hit EOF
1035 lr.push(l)
1187 lr.push(l)
1036 break
1188 break
1037 s = l[2:]
1189 s = l[2:]
1038 if l.startswith('+ ') or l.startswith('! '):
1190 if l.startswith('+ ') or l.startswith('! '):
1039 u = '+' + s
1191 u = '+' + s
1040 elif l.startswith(' '):
1192 elif l.startswith(' '):
1041 u = ' ' + s
1193 u = ' ' + s
1042 elif len(self.b) == 0:
1194 elif len(self.b) == 0:
1043 # line deletions, new block is empty
1195 # line deletions, new block is empty
1044 lr.push(l)
1196 lr.push(l)
1045 break
1197 break
1046 else:
1198 else:
1047 raise PatchError(_("bad hunk #%d old text line %d") %
1199 raise PatchError(_("bad hunk #%d old text line %d") %
1048 (self.number, x))
1200 (self.number, x))
1049 self.b.append(s)
1201 self.b.append(s)
1050 while True:
1202 while True:
1051 if hunki >= len(self.hunk):
1203 if hunki >= len(self.hunk):
1052 h = ""
1204 h = ""
1053 else:
1205 else:
1054 h = self.hunk[hunki]
1206 h = self.hunk[hunki]
1055 hunki += 1
1207 hunki += 1
1056 if h == u:
1208 if h == u:
1057 break
1209 break
1058 elif h.startswith('-'):
1210 elif h.startswith('-'):
1059 continue
1211 continue
1060 else:
1212 else:
1061 self.hunk.insert(hunki - 1, u)
1213 self.hunk.insert(hunki - 1, u)
1062 break
1214 break
1063
1215
1064 if not self.a:
1216 if not self.a:
1065 # this happens when lines were only added to the hunk
1217 # this happens when lines were only added to the hunk
1066 for x in self.hunk:
1218 for x in self.hunk:
1067 if x.startswith('-') or x.startswith(' '):
1219 if x.startswith('-') or x.startswith(' '):
1068 self.a.append(x)
1220 self.a.append(x)
1069 if not self.b:
1221 if not self.b:
1070 # this happens when lines were only deleted from the hunk
1222 # this happens when lines were only deleted from the hunk
1071 for x in self.hunk:
1223 for x in self.hunk:
1072 if x.startswith('+') or x.startswith(' '):
1224 if x.startswith('+') or x.startswith(' '):
1073 self.b.append(x[1:])
1225 self.b.append(x[1:])
1074 # @@ -start,len +start,len @@
1226 # @@ -start,len +start,len @@
1075 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1227 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1076 self.startb, self.lenb)
1228 self.startb, self.lenb)
1077 self.hunk[0] = self.desc
1229 self.hunk[0] = self.desc
1078 self._fixnewline(lr)
1230 self._fixnewline(lr)
1079
1231
1080 def _fixnewline(self, lr):
1232 def _fixnewline(self, lr):
1081 l = lr.readline()
1233 l = lr.readline()
1082 if l.startswith('\ '):
1234 if l.startswith('\ '):
1083 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1235 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1084 else:
1236 else:
1085 lr.push(l)
1237 lr.push(l)
1086
1238
1087 def complete(self):
1239 def complete(self):
1088 return len(self.a) == self.lena and len(self.b) == self.lenb
1240 return len(self.a) == self.lena and len(self.b) == self.lenb
1089
1241
1090 def _fuzzit(self, old, new, fuzz, toponly):
1242 def _fuzzit(self, old, new, fuzz, toponly):
1091 # this removes context lines from the top and bottom of list 'l'. It
1243 # this removes context lines from the top and bottom of list 'l'. It
1092 # checks the hunk to make sure only context lines are removed, and then
1244 # checks the hunk to make sure only context lines are removed, and then
1093 # returns a new shortened list of lines.
1245 # returns a new shortened list of lines.
1094 fuzz = min(fuzz, len(old))
1246 fuzz = min(fuzz, len(old))
1095 if fuzz:
1247 if fuzz:
1096 top = 0
1248 top = 0
1097 bot = 0
1249 bot = 0
1098 hlen = len(self.hunk)
1250 hlen = len(self.hunk)
1099 for x in xrange(hlen - 1):
1251 for x in xrange(hlen - 1):
1100 # the hunk starts with the @@ line, so use x+1
1252 # the hunk starts with the @@ line, so use x+1
1101 if self.hunk[x + 1][0] == ' ':
1253 if self.hunk[x + 1][0] == ' ':
1102 top += 1
1254 top += 1
1103 else:
1255 else:
1104 break
1256 break
1105 if not toponly:
1257 if not toponly:
1106 for x in xrange(hlen - 1):
1258 for x in xrange(hlen - 1):
1107 if self.hunk[hlen - bot - 1][0] == ' ':
1259 if self.hunk[hlen - bot - 1][0] == ' ':
1108 bot += 1
1260 bot += 1
1109 else:
1261 else:
1110 break
1262 break
1111
1263
1112 bot = min(fuzz, bot)
1264 bot = min(fuzz, bot)
1113 top = min(fuzz, top)
1265 top = min(fuzz, top)
1114 return old[top:len(old) - bot], new[top:len(new) - bot], top
1266 return old[top:len(old) - bot], new[top:len(new) - bot], top
1115 return old, new, 0
1267 return old, new, 0
1116
1268
1117 def fuzzit(self, fuzz, toponly):
1269 def fuzzit(self, fuzz, toponly):
1118 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1270 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1119 oldstart = self.starta + top
1271 oldstart = self.starta + top
1120 newstart = self.startb + top
1272 newstart = self.startb + top
1121 # zero length hunk ranges already have their start decremented
1273 # zero length hunk ranges already have their start decremented
1122 if self.lena and oldstart > 0:
1274 if self.lena and oldstart > 0:
1123 oldstart -= 1
1275 oldstart -= 1
1124 if self.lenb and newstart > 0:
1276 if self.lenb and newstart > 0:
1125 newstart -= 1
1277 newstart -= 1
1126 return old, oldstart, new, newstart
1278 return old, oldstart, new, newstart
1127
1279
1128 class binhunk(object):
1280 class binhunk(object):
1129 'A binary patch file.'
1281 'A binary patch file.'
1130 def __init__(self, lr, fname):
1282 def __init__(self, lr, fname):
1131 self.text = None
1283 self.text = None
1132 self.delta = False
1284 self.delta = False
1133 self.hunk = ['GIT binary patch\n']
1285 self.hunk = ['GIT binary patch\n']
1134 self._fname = fname
1286 self._fname = fname
1135 self._read(lr)
1287 self._read(lr)
1136
1288
1137 def complete(self):
1289 def complete(self):
1138 return self.text is not None
1290 return self.text is not None
1139
1291
1140 def new(self, lines):
1292 def new(self, lines):
1141 if self.delta:
1293 if self.delta:
1142 return [applybindelta(self.text, ''.join(lines))]
1294 return [applybindelta(self.text, ''.join(lines))]
1143 return [self.text]
1295 return [self.text]
1144
1296
1145 def _read(self, lr):
1297 def _read(self, lr):
1146 def getline(lr, hunk):
1298 def getline(lr, hunk):
1147 l = lr.readline()
1299 l = lr.readline()
1148 hunk.append(l)
1300 hunk.append(l)
1149 return l.rstrip('\r\n')
1301 return l.rstrip('\r\n')
1150
1302
1151 size = 0
1303 size = 0
1152 while True:
1304 while True:
1153 line = getline(lr, self.hunk)
1305 line = getline(lr, self.hunk)
1154 if not line:
1306 if not line:
1155 raise PatchError(_('could not extract "%s" binary data')
1307 raise PatchError(_('could not extract "%s" binary data')
1156 % self._fname)
1308 % self._fname)
1157 if line.startswith('literal '):
1309 if line.startswith('literal '):
1158 size = int(line[8:].rstrip())
1310 size = int(line[8:].rstrip())
1159 break
1311 break
1160 if line.startswith('delta '):
1312 if line.startswith('delta '):
1161 size = int(line[6:].rstrip())
1313 size = int(line[6:].rstrip())
1162 self.delta = True
1314 self.delta = True
1163 break
1315 break
1164 dec = []
1316 dec = []
1165 line = getline(lr, self.hunk)
1317 line = getline(lr, self.hunk)
1166 while len(line) > 1:
1318 while len(line) > 1:
1167 l = line[0]
1319 l = line[0]
1168 if l <= 'Z' and l >= 'A':
1320 if l <= 'Z' and l >= 'A':
1169 l = ord(l) - ord('A') + 1
1321 l = ord(l) - ord('A') + 1
1170 else:
1322 else:
1171 l = ord(l) - ord('a') + 27
1323 l = ord(l) - ord('a') + 27
1172 try:
1324 try:
1173 dec.append(base85.b85decode(line[1:])[:l])
1325 dec.append(base85.b85decode(line[1:])[:l])
1174 except ValueError, e:
1326 except ValueError, e:
1175 raise PatchError(_('could not decode "%s" binary patch: %s')
1327 raise PatchError(_('could not decode "%s" binary patch: %s')
1176 % (self._fname, str(e)))
1328 % (self._fname, str(e)))
1177 line = getline(lr, self.hunk)
1329 line = getline(lr, self.hunk)
1178 text = zlib.decompress(''.join(dec))
1330 text = zlib.decompress(''.join(dec))
1179 if len(text) != size:
1331 if len(text) != size:
1180 raise PatchError(_('"%s" length is %d bytes, should be %d')
1332 raise PatchError(_('"%s" length is %d bytes, should be %d')
1181 % (self._fname, len(text), size))
1333 % (self._fname, len(text), size))
1182 self.text = text
1334 self.text = text
1183
1335
1184 def parsefilename(str):
1336 def parsefilename(str):
1185 # --- filename \t|space stuff
1337 # --- filename \t|space stuff
1186 s = str[4:].rstrip('\r\n')
1338 s = str[4:].rstrip('\r\n')
1187 i = s.find('\t')
1339 i = s.find('\t')
1188 if i < 0:
1340 if i < 0:
1189 i = s.find(' ')
1341 i = s.find(' ')
1190 if i < 0:
1342 if i < 0:
1191 return s
1343 return s
1192 return s[:i]
1344 return s[:i]
1193
1345
1194 def parsepatch(fp):
1346 def parsepatch(fp):
1195 """patch -> [] of headers -> [] of hunks """
1347 """patch -> [] of headers -> [] of hunks """
1196 class parser(object):
1348 class parser(object):
1197 """patch parsing state machine"""
1349 """patch parsing state machine"""
1198 def __init__(self):
1350 def __init__(self):
1199 self.fromline = 0
1351 self.fromline = 0
1200 self.toline = 0
1352 self.toline = 0
1201 self.proc = ''
1353 self.proc = ''
1202 self.header = None
1354 self.header = None
1203 self.context = []
1355 self.context = []
1204 self.before = []
1356 self.before = []
1205 self.hunk = []
1357 self.hunk = []
1206 self.headers = []
1358 self.headers = []
1207
1359
1208 def addrange(self, limits):
1360 def addrange(self, limits):
1209 fromstart, fromend, tostart, toend, proc = limits
1361 fromstart, fromend, tostart, toend, proc = limits
1210 self.fromline = int(fromstart)
1362 self.fromline = int(fromstart)
1211 self.toline = int(tostart)
1363 self.toline = int(tostart)
1212 self.proc = proc
1364 self.proc = proc
1213
1365
1214 def addcontext(self, context):
1366 def addcontext(self, context):
1215 if self.hunk:
1367 if self.hunk:
1216 h = recordhunk(self.header, self.fromline, self.toline,
1368 h = recordhunk(self.header, self.fromline, self.toline,
1217 self.proc, self.before, self.hunk, context)
1369 self.proc, self.before, self.hunk, context)
1218 self.header.hunks.append(h)
1370 self.header.hunks.append(h)
1219 self.fromline += len(self.before) + h.removed
1371 self.fromline += len(self.before) + h.removed
1220 self.toline += len(self.before) + h.added
1372 self.toline += len(self.before) + h.added
1221 self.before = []
1373 self.before = []
1222 self.hunk = []
1374 self.hunk = []
1223 self.proc = ''
1375 self.proc = ''
1224 self.context = context
1376 self.context = context
1225
1377
1226 def addhunk(self, hunk):
1378 def addhunk(self, hunk):
1227 if self.context:
1379 if self.context:
1228 self.before = self.context
1380 self.before = self.context
1229 self.context = []
1381 self.context = []
1230 self.hunk = hunk
1382 self.hunk = hunk
1231
1383
1232 def newfile(self, hdr):
1384 def newfile(self, hdr):
1233 self.addcontext([])
1385 self.addcontext([])
1234 h = header(hdr)
1386 h = header(hdr)
1235 self.headers.append(h)
1387 self.headers.append(h)
1236 self.header = h
1388 self.header = h
1237
1389
1238 def addother(self, line):
1390 def addother(self, line):
1239 pass # 'other' lines are ignored
1391 pass # 'other' lines are ignored
1240
1392
1241 def finished(self):
1393 def finished(self):
1242 self.addcontext([])
1394 self.addcontext([])
1243 return self.headers
1395 return self.headers
1244
1396
1245 transitions = {
1397 transitions = {
1246 'file': {'context': addcontext,
1398 'file': {'context': addcontext,
1247 'file': newfile,
1399 'file': newfile,
1248 'hunk': addhunk,
1400 'hunk': addhunk,
1249 'range': addrange},
1401 'range': addrange},
1250 'context': {'file': newfile,
1402 'context': {'file': newfile,
1251 'hunk': addhunk,
1403 'hunk': addhunk,
1252 'range': addrange,
1404 'range': addrange,
1253 'other': addother},
1405 'other': addother},
1254 'hunk': {'context': addcontext,
1406 'hunk': {'context': addcontext,
1255 'file': newfile,
1407 'file': newfile,
1256 'range': addrange},
1408 'range': addrange},
1257 'range': {'context': addcontext,
1409 'range': {'context': addcontext,
1258 'hunk': addhunk},
1410 'hunk': addhunk},
1259 'other': {'other': addother},
1411 'other': {'other': addother},
1260 }
1412 }
1261
1413
1262 p = parser()
1414 p = parser()
1263
1415
1264 state = 'context'
1416 state = 'context'
1265 for newstate, data in scanpatch(fp):
1417 for newstate, data in scanpatch(fp):
1266 try:
1418 try:
1267 p.transitions[state][newstate](p, data)
1419 p.transitions[state][newstate](p, data)
1268 except KeyError:
1420 except KeyError:
1269 raise PatchError('unhandled transition: %s -> %s' %
1421 raise PatchError('unhandled transition: %s -> %s' %
1270 (state, newstate))
1422 (state, newstate))
1271 state = newstate
1423 state = newstate
1272 return p.finished()
1424 return p.finished()
1273
1425
1274 def pathtransform(path, strip, prefix):
1426 def pathtransform(path, strip, prefix):
1275 '''turn a path from a patch into a path suitable for the repository
1427 '''turn a path from a patch into a path suitable for the repository
1276
1428
1277 prefix, if not empty, is expected to be normalized with a / at the end.
1429 prefix, if not empty, is expected to be normalized with a / at the end.
1278
1430
1279 Returns (stripped components, path in repository).
1431 Returns (stripped components, path in repository).
1280
1432
1281 >>> pathtransform('a/b/c', 0, '')
1433 >>> pathtransform('a/b/c', 0, '')
1282 ('', 'a/b/c')
1434 ('', 'a/b/c')
1283 >>> pathtransform(' a/b/c ', 0, '')
1435 >>> pathtransform(' a/b/c ', 0, '')
1284 ('', ' a/b/c')
1436 ('', ' a/b/c')
1285 >>> pathtransform(' a/b/c ', 2, '')
1437 >>> pathtransform(' a/b/c ', 2, '')
1286 ('a/b/', 'c')
1438 ('a/b/', 'c')
1287 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1439 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1288 ('a//b/', 'd/e/c')
1440 ('a//b/', 'd/e/c')
1289 >>> pathtransform('a/b/c', 3, '')
1441 >>> pathtransform('a/b/c', 3, '')
1290 Traceback (most recent call last):
1442 Traceback (most recent call last):
1291 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1443 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1292 '''
1444 '''
1293 pathlen = len(path)
1445 pathlen = len(path)
1294 i = 0
1446 i = 0
1295 if strip == 0:
1447 if strip == 0:
1296 return '', path.rstrip()
1448 return '', path.rstrip()
1297 count = strip
1449 count = strip
1298 while count > 0:
1450 while count > 0:
1299 i = path.find('/', i)
1451 i = path.find('/', i)
1300 if i == -1:
1452 if i == -1:
1301 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1453 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1302 (count, strip, path))
1454 (count, strip, path))
1303 i += 1
1455 i += 1
1304 # consume '//' in the path
1456 # consume '//' in the path
1305 while i < pathlen - 1 and path[i] == '/':
1457 while i < pathlen - 1 and path[i] == '/':
1306 i += 1
1458 i += 1
1307 count -= 1
1459 count -= 1
1308 return path[:i].lstrip(), prefix + path[i:].rstrip()
1460 return path[:i].lstrip(), prefix + path[i:].rstrip()
1309
1461
1310 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1462 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1311 nulla = afile_orig == "/dev/null"
1463 nulla = afile_orig == "/dev/null"
1312 nullb = bfile_orig == "/dev/null"
1464 nullb = bfile_orig == "/dev/null"
1313 create = nulla and hunk.starta == 0 and hunk.lena == 0
1465 create = nulla and hunk.starta == 0 and hunk.lena == 0
1314 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1466 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1315 abase, afile = pathtransform(afile_orig, strip, prefix)
1467 abase, afile = pathtransform(afile_orig, strip, prefix)
1316 gooda = not nulla and backend.exists(afile)
1468 gooda = not nulla and backend.exists(afile)
1317 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1469 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1318 if afile == bfile:
1470 if afile == bfile:
1319 goodb = gooda
1471 goodb = gooda
1320 else:
1472 else:
1321 goodb = not nullb and backend.exists(bfile)
1473 goodb = not nullb and backend.exists(bfile)
1322 missing = not goodb and not gooda and not create
1474 missing = not goodb and not gooda and not create
1323
1475
1324 # some diff programs apparently produce patches where the afile is
1476 # some diff programs apparently produce patches where the afile is
1325 # not /dev/null, but afile starts with bfile
1477 # not /dev/null, but afile starts with bfile
1326 abasedir = afile[:afile.rfind('/') + 1]
1478 abasedir = afile[:afile.rfind('/') + 1]
1327 bbasedir = bfile[:bfile.rfind('/') + 1]
1479 bbasedir = bfile[:bfile.rfind('/') + 1]
1328 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1480 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1329 and hunk.starta == 0 and hunk.lena == 0):
1481 and hunk.starta == 0 and hunk.lena == 0):
1330 create = True
1482 create = True
1331 missing = False
1483 missing = False
1332
1484
1333 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1485 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1334 # diff is between a file and its backup. In this case, the original
1486 # diff is between a file and its backup. In this case, the original
1335 # file should be patched (see original mpatch code).
1487 # file should be patched (see original mpatch code).
1336 isbackup = (abase == bbase and bfile.startswith(afile))
1488 isbackup = (abase == bbase and bfile.startswith(afile))
1337 fname = None
1489 fname = None
1338 if not missing:
1490 if not missing:
1339 if gooda and goodb:
1491 if gooda and goodb:
1340 fname = isbackup and afile or bfile
1492 fname = isbackup and afile or bfile
1341 elif gooda:
1493 elif gooda:
1342 fname = afile
1494 fname = afile
1343
1495
1344 if not fname:
1496 if not fname:
1345 if not nullb:
1497 if not nullb:
1346 fname = isbackup and afile or bfile
1498 fname = isbackup and afile or bfile
1347 elif not nulla:
1499 elif not nulla:
1348 fname = afile
1500 fname = afile
1349 else:
1501 else:
1350 raise PatchError(_("undefined source and destination files"))
1502 raise PatchError(_("undefined source and destination files"))
1351
1503
1352 gp = patchmeta(fname)
1504 gp = patchmeta(fname)
1353 if create:
1505 if create:
1354 gp.op = 'ADD'
1506 gp.op = 'ADD'
1355 elif remove:
1507 elif remove:
1356 gp.op = 'DELETE'
1508 gp.op = 'DELETE'
1357 return gp
1509 return gp
1358
1510
1359 def scanpatch(fp):
1511 def scanpatch(fp):
1360 """like patch.iterhunks, but yield different events
1512 """like patch.iterhunks, but yield different events
1361
1513
1362 - ('file', [header_lines + fromfile + tofile])
1514 - ('file', [header_lines + fromfile + tofile])
1363 - ('context', [context_lines])
1515 - ('context', [context_lines])
1364 - ('hunk', [hunk_lines])
1516 - ('hunk', [hunk_lines])
1365 - ('range', (-start,len, +start,len, proc))
1517 - ('range', (-start,len, +start,len, proc))
1366 """
1518 """
1367 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1519 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1368 lr = linereader(fp)
1520 lr = linereader(fp)
1369
1521
1370 def scanwhile(first, p):
1522 def scanwhile(first, p):
1371 """scan lr while predicate holds"""
1523 """scan lr while predicate holds"""
1372 lines = [first]
1524 lines = [first]
1373 while True:
1525 while True:
1374 line = lr.readline()
1526 line = lr.readline()
1375 if not line:
1527 if not line:
1376 break
1528 break
1377 if p(line):
1529 if p(line):
1378 lines.append(line)
1530 lines.append(line)
1379 else:
1531 else:
1380 lr.push(line)
1532 lr.push(line)
1381 break
1533 break
1382 return lines
1534 return lines
1383
1535
1384 while True:
1536 while True:
1385 line = lr.readline()
1537 line = lr.readline()
1386 if not line:
1538 if not line:
1387 break
1539 break
1388 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1540 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1389 def notheader(line):
1541 def notheader(line):
1390 s = line.split(None, 1)
1542 s = line.split(None, 1)
1391 return not s or s[0] not in ('---', 'diff')
1543 return not s or s[0] not in ('---', 'diff')
1392 header = scanwhile(line, notheader)
1544 header = scanwhile(line, notheader)
1393 fromfile = lr.readline()
1545 fromfile = lr.readline()
1394 if fromfile.startswith('---'):
1546 if fromfile.startswith('---'):
1395 tofile = lr.readline()
1547 tofile = lr.readline()
1396 header += [fromfile, tofile]
1548 header += [fromfile, tofile]
1397 else:
1549 else:
1398 lr.push(fromfile)
1550 lr.push(fromfile)
1399 yield 'file', header
1551 yield 'file', header
1400 elif line[0] == ' ':
1552 elif line[0] == ' ':
1401 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1553 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1402 elif line[0] in '-+':
1554 elif line[0] in '-+':
1403 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1555 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1404 else:
1556 else:
1405 m = lines_re.match(line)
1557 m = lines_re.match(line)
1406 if m:
1558 if m:
1407 yield 'range', m.groups()
1559 yield 'range', m.groups()
1408 else:
1560 else:
1409 yield 'other', line
1561 yield 'other', line
1410
1562
1411 def scangitpatch(lr, firstline):
1563 def scangitpatch(lr, firstline):
1412 """
1564 """
1413 Git patches can emit:
1565 Git patches can emit:
1414 - rename a to b
1566 - rename a to b
1415 - change b
1567 - change b
1416 - copy a to c
1568 - copy a to c
1417 - change c
1569 - change c
1418
1570
1419 We cannot apply this sequence as-is, the renamed 'a' could not be
1571 We cannot apply this sequence as-is, the renamed 'a' could not be
1420 found for it would have been renamed already. And we cannot copy
1572 found for it would have been renamed already. And we cannot copy
1421 from 'b' instead because 'b' would have been changed already. So
1573 from 'b' instead because 'b' would have been changed already. So
1422 we scan the git patch for copy and rename commands so we can
1574 we scan the git patch for copy and rename commands so we can
1423 perform the copies ahead of time.
1575 perform the copies ahead of time.
1424 """
1576 """
1425 pos = 0
1577 pos = 0
1426 try:
1578 try:
1427 pos = lr.fp.tell()
1579 pos = lr.fp.tell()
1428 fp = lr.fp
1580 fp = lr.fp
1429 except IOError:
1581 except IOError:
1430 fp = cStringIO.StringIO(lr.fp.read())
1582 fp = cStringIO.StringIO(lr.fp.read())
1431 gitlr = linereader(fp)
1583 gitlr = linereader(fp)
1432 gitlr.push(firstline)
1584 gitlr.push(firstline)
1433 gitpatches = readgitpatch(gitlr)
1585 gitpatches = readgitpatch(gitlr)
1434 fp.seek(pos)
1586 fp.seek(pos)
1435 return gitpatches
1587 return gitpatches
1436
1588
1437 def iterhunks(fp):
1589 def iterhunks(fp):
1438 """Read a patch and yield the following events:
1590 """Read a patch and yield the following events:
1439 - ("file", afile, bfile, firsthunk): select a new target file.
1591 - ("file", afile, bfile, firsthunk): select a new target file.
1440 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1592 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1441 "file" event.
1593 "file" event.
1442 - ("git", gitchanges): current diff is in git format, gitchanges
1594 - ("git", gitchanges): current diff is in git format, gitchanges
1443 maps filenames to gitpatch records. Unique event.
1595 maps filenames to gitpatch records. Unique event.
1444 """
1596 """
1445 afile = ""
1597 afile = ""
1446 bfile = ""
1598 bfile = ""
1447 state = None
1599 state = None
1448 hunknum = 0
1600 hunknum = 0
1449 emitfile = newfile = False
1601 emitfile = newfile = False
1450 gitpatches = None
1602 gitpatches = None
1451
1603
1452 # our states
1604 # our states
1453 BFILE = 1
1605 BFILE = 1
1454 context = None
1606 context = None
1455 lr = linereader(fp)
1607 lr = linereader(fp)
1456
1608
1457 while True:
1609 while True:
1458 x = lr.readline()
1610 x = lr.readline()
1459 if not x:
1611 if not x:
1460 break
1612 break
1461 if state == BFILE and (
1613 if state == BFILE and (
1462 (not context and x[0] == '@')
1614 (not context and x[0] == '@')
1463 or (context is not False and x.startswith('***************'))
1615 or (context is not False and x.startswith('***************'))
1464 or x.startswith('GIT binary patch')):
1616 or x.startswith('GIT binary patch')):
1465 gp = None
1617 gp = None
1466 if (gitpatches and
1618 if (gitpatches and
1467 gitpatches[-1].ispatching(afile, bfile)):
1619 gitpatches[-1].ispatching(afile, bfile)):
1468 gp = gitpatches.pop()
1620 gp = gitpatches.pop()
1469 if x.startswith('GIT binary patch'):
1621 if x.startswith('GIT binary patch'):
1470 h = binhunk(lr, gp.path)
1622 h = binhunk(lr, gp.path)
1471 else:
1623 else:
1472 if context is None and x.startswith('***************'):
1624 if context is None and x.startswith('***************'):
1473 context = True
1625 context = True
1474 h = hunk(x, hunknum + 1, lr, context)
1626 h = hunk(x, hunknum + 1, lr, context)
1475 hunknum += 1
1627 hunknum += 1
1476 if emitfile:
1628 if emitfile:
1477 emitfile = False
1629 emitfile = False
1478 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1630 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1479 yield 'hunk', h
1631 yield 'hunk', h
1480 elif x.startswith('diff --git a/'):
1632 elif x.startswith('diff --git a/'):
1481 m = gitre.match(x.rstrip(' \r\n'))
1633 m = gitre.match(x.rstrip(' \r\n'))
1482 if not m:
1634 if not m:
1483 continue
1635 continue
1484 if gitpatches is None:
1636 if gitpatches is None:
1485 # scan whole input for git metadata
1637 # scan whole input for git metadata
1486 gitpatches = scangitpatch(lr, x)
1638 gitpatches = scangitpatch(lr, x)
1487 yield 'git', [g.copy() for g in gitpatches
1639 yield 'git', [g.copy() for g in gitpatches
1488 if g.op in ('COPY', 'RENAME')]
1640 if g.op in ('COPY', 'RENAME')]
1489 gitpatches.reverse()
1641 gitpatches.reverse()
1490 afile = 'a/' + m.group(1)
1642 afile = 'a/' + m.group(1)
1491 bfile = 'b/' + m.group(2)
1643 bfile = 'b/' + m.group(2)
1492 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1644 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1493 gp = gitpatches.pop()
1645 gp = gitpatches.pop()
1494 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1646 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1495 if not gitpatches:
1647 if not gitpatches:
1496 raise PatchError(_('failed to synchronize metadata for "%s"')
1648 raise PatchError(_('failed to synchronize metadata for "%s"')
1497 % afile[2:])
1649 % afile[2:])
1498 gp = gitpatches[-1]
1650 gp = gitpatches[-1]
1499 newfile = True
1651 newfile = True
1500 elif x.startswith('---'):
1652 elif x.startswith('---'):
1501 # check for a unified diff
1653 # check for a unified diff
1502 l2 = lr.readline()
1654 l2 = lr.readline()
1503 if not l2.startswith('+++'):
1655 if not l2.startswith('+++'):
1504 lr.push(l2)
1656 lr.push(l2)
1505 continue
1657 continue
1506 newfile = True
1658 newfile = True
1507 context = False
1659 context = False
1508 afile = parsefilename(x)
1660 afile = parsefilename(x)
1509 bfile = parsefilename(l2)
1661 bfile = parsefilename(l2)
1510 elif x.startswith('***'):
1662 elif x.startswith('***'):
1511 # check for a context diff
1663 # check for a context diff
1512 l2 = lr.readline()
1664 l2 = lr.readline()
1513 if not l2.startswith('---'):
1665 if not l2.startswith('---'):
1514 lr.push(l2)
1666 lr.push(l2)
1515 continue
1667 continue
1516 l3 = lr.readline()
1668 l3 = lr.readline()
1517 lr.push(l3)
1669 lr.push(l3)
1518 if not l3.startswith("***************"):
1670 if not l3.startswith("***************"):
1519 lr.push(l2)
1671 lr.push(l2)
1520 continue
1672 continue
1521 newfile = True
1673 newfile = True
1522 context = True
1674 context = True
1523 afile = parsefilename(x)
1675 afile = parsefilename(x)
1524 bfile = parsefilename(l2)
1676 bfile = parsefilename(l2)
1525
1677
1526 if newfile:
1678 if newfile:
1527 newfile = False
1679 newfile = False
1528 emitfile = True
1680 emitfile = True
1529 state = BFILE
1681 state = BFILE
1530 hunknum = 0
1682 hunknum = 0
1531
1683
1532 while gitpatches:
1684 while gitpatches:
1533 gp = gitpatches.pop()
1685 gp = gitpatches.pop()
1534 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1686 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1535
1687
1536 def applybindelta(binchunk, data):
1688 def applybindelta(binchunk, data):
1537 """Apply a binary delta hunk
1689 """Apply a binary delta hunk
1538 The algorithm used is the algorithm from git's patch-delta.c
1690 The algorithm used is the algorithm from git's patch-delta.c
1539 """
1691 """
1540 def deltahead(binchunk):
1692 def deltahead(binchunk):
1541 i = 0
1693 i = 0
1542 for c in binchunk:
1694 for c in binchunk:
1543 i += 1
1695 i += 1
1544 if not (ord(c) & 0x80):
1696 if not (ord(c) & 0x80):
1545 return i
1697 return i
1546 return i
1698 return i
1547 out = ""
1699 out = ""
1548 s = deltahead(binchunk)
1700 s = deltahead(binchunk)
1549 binchunk = binchunk[s:]
1701 binchunk = binchunk[s:]
1550 s = deltahead(binchunk)
1702 s = deltahead(binchunk)
1551 binchunk = binchunk[s:]
1703 binchunk = binchunk[s:]
1552 i = 0
1704 i = 0
1553 while i < len(binchunk):
1705 while i < len(binchunk):
1554 cmd = ord(binchunk[i])
1706 cmd = ord(binchunk[i])
1555 i += 1
1707 i += 1
1556 if (cmd & 0x80):
1708 if (cmd & 0x80):
1557 offset = 0
1709 offset = 0
1558 size = 0
1710 size = 0
1559 if (cmd & 0x01):
1711 if (cmd & 0x01):
1560 offset = ord(binchunk[i])
1712 offset = ord(binchunk[i])
1561 i += 1
1713 i += 1
1562 if (cmd & 0x02):
1714 if (cmd & 0x02):
1563 offset |= ord(binchunk[i]) << 8
1715 offset |= ord(binchunk[i]) << 8
1564 i += 1
1716 i += 1
1565 if (cmd & 0x04):
1717 if (cmd & 0x04):
1566 offset |= ord(binchunk[i]) << 16
1718 offset |= ord(binchunk[i]) << 16
1567 i += 1
1719 i += 1
1568 if (cmd & 0x08):
1720 if (cmd & 0x08):
1569 offset |= ord(binchunk[i]) << 24
1721 offset |= ord(binchunk[i]) << 24
1570 i += 1
1722 i += 1
1571 if (cmd & 0x10):
1723 if (cmd & 0x10):
1572 size = ord(binchunk[i])
1724 size = ord(binchunk[i])
1573 i += 1
1725 i += 1
1574 if (cmd & 0x20):
1726 if (cmd & 0x20):
1575 size |= ord(binchunk[i]) << 8
1727 size |= ord(binchunk[i]) << 8
1576 i += 1
1728 i += 1
1577 if (cmd & 0x40):
1729 if (cmd & 0x40):
1578 size |= ord(binchunk[i]) << 16
1730 size |= ord(binchunk[i]) << 16
1579 i += 1
1731 i += 1
1580 if size == 0:
1732 if size == 0:
1581 size = 0x10000
1733 size = 0x10000
1582 offset_end = offset + size
1734 offset_end = offset + size
1583 out += data[offset:offset_end]
1735 out += data[offset:offset_end]
1584 elif cmd != 0:
1736 elif cmd != 0:
1585 offset_end = i + cmd
1737 offset_end = i + cmd
1586 out += binchunk[i:offset_end]
1738 out += binchunk[i:offset_end]
1587 i += cmd
1739 i += cmd
1588 else:
1740 else:
1589 raise PatchError(_('unexpected delta opcode 0'))
1741 raise PatchError(_('unexpected delta opcode 0'))
1590 return out
1742 return out
1591
1743
1592 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1744 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1593 """Reads a patch from fp and tries to apply it.
1745 """Reads a patch from fp and tries to apply it.
1594
1746
1595 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1747 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1596 there was any fuzz.
1748 there was any fuzz.
1597
1749
1598 If 'eolmode' is 'strict', the patch content and patched file are
1750 If 'eolmode' is 'strict', the patch content and patched file are
1599 read in binary mode. Otherwise, line endings are ignored when
1751 read in binary mode. Otherwise, line endings are ignored when
1600 patching then normalized according to 'eolmode'.
1752 patching then normalized according to 'eolmode'.
1601 """
1753 """
1602 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1754 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1603 prefix=prefix, eolmode=eolmode)
1755 prefix=prefix, eolmode=eolmode)
1604
1756
1605 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1757 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1606 eolmode='strict'):
1758 eolmode='strict'):
1607
1759
1608 if prefix:
1760 if prefix:
1609 # clean up double slashes, lack of trailing slashes, etc
1761 # clean up double slashes, lack of trailing slashes, etc
1610 prefix = util.normpath(prefix) + '/'
1762 prefix = util.normpath(prefix) + '/'
1611 def pstrip(p):
1763 def pstrip(p):
1612 return pathtransform(p, strip - 1, prefix)[1]
1764 return pathtransform(p, strip - 1, prefix)[1]
1613
1765
1614 rejects = 0
1766 rejects = 0
1615 err = 0
1767 err = 0
1616 current_file = None
1768 current_file = None
1617
1769
1618 for state, values in iterhunks(fp):
1770 for state, values in iterhunks(fp):
1619 if state == 'hunk':
1771 if state == 'hunk':
1620 if not current_file:
1772 if not current_file:
1621 continue
1773 continue
1622 ret = current_file.apply(values)
1774 ret = current_file.apply(values)
1623 if ret > 0:
1775 if ret > 0:
1624 err = 1
1776 err = 1
1625 elif state == 'file':
1777 elif state == 'file':
1626 if current_file:
1778 if current_file:
1627 rejects += current_file.close()
1779 rejects += current_file.close()
1628 current_file = None
1780 current_file = None
1629 afile, bfile, first_hunk, gp = values
1781 afile, bfile, first_hunk, gp = values
1630 if gp:
1782 if gp:
1631 gp.path = pstrip(gp.path)
1783 gp.path = pstrip(gp.path)
1632 if gp.oldpath:
1784 if gp.oldpath:
1633 gp.oldpath = pstrip(gp.oldpath)
1785 gp.oldpath = pstrip(gp.oldpath)
1634 else:
1786 else:
1635 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1787 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1636 prefix)
1788 prefix)
1637 if gp.op == 'RENAME':
1789 if gp.op == 'RENAME':
1638 backend.unlink(gp.oldpath)
1790 backend.unlink(gp.oldpath)
1639 if not first_hunk:
1791 if not first_hunk:
1640 if gp.op == 'DELETE':
1792 if gp.op == 'DELETE':
1641 backend.unlink(gp.path)
1793 backend.unlink(gp.path)
1642 continue
1794 continue
1643 data, mode = None, None
1795 data, mode = None, None
1644 if gp.op in ('RENAME', 'COPY'):
1796 if gp.op in ('RENAME', 'COPY'):
1645 data, mode = store.getfile(gp.oldpath)[:2]
1797 data, mode = store.getfile(gp.oldpath)[:2]
1646 # FIXME: failing getfile has never been handled here
1798 # FIXME: failing getfile has never been handled here
1647 assert data is not None
1799 assert data is not None
1648 if gp.mode:
1800 if gp.mode:
1649 mode = gp.mode
1801 mode = gp.mode
1650 if gp.op == 'ADD':
1802 if gp.op == 'ADD':
1651 # Added files without content have no hunk and
1803 # Added files without content have no hunk and
1652 # must be created
1804 # must be created
1653 data = ''
1805 data = ''
1654 if data or mode:
1806 if data or mode:
1655 if (gp.op in ('ADD', 'RENAME', 'COPY')
1807 if (gp.op in ('ADD', 'RENAME', 'COPY')
1656 and backend.exists(gp.path)):
1808 and backend.exists(gp.path)):
1657 raise PatchError(_("cannot create %s: destination "
1809 raise PatchError(_("cannot create %s: destination "
1658 "already exists") % gp.path)
1810 "already exists") % gp.path)
1659 backend.setfile(gp.path, data, mode, gp.oldpath)
1811 backend.setfile(gp.path, data, mode, gp.oldpath)
1660 continue
1812 continue
1661 try:
1813 try:
1662 current_file = patcher(ui, gp, backend, store,
1814 current_file = patcher(ui, gp, backend, store,
1663 eolmode=eolmode)
1815 eolmode=eolmode)
1664 except PatchError, inst:
1816 except PatchError, inst:
1665 ui.warn(str(inst) + '\n')
1817 ui.warn(str(inst) + '\n')
1666 current_file = None
1818 current_file = None
1667 rejects += 1
1819 rejects += 1
1668 continue
1820 continue
1669 elif state == 'git':
1821 elif state == 'git':
1670 for gp in values:
1822 for gp in values:
1671 path = pstrip(gp.oldpath)
1823 path = pstrip(gp.oldpath)
1672 data, mode = backend.getfile(path)
1824 data, mode = backend.getfile(path)
1673 if data is None:
1825 if data is None:
1674 # The error ignored here will trigger a getfile()
1826 # The error ignored here will trigger a getfile()
1675 # error in a place more appropriate for error
1827 # error in a place more appropriate for error
1676 # handling, and will not interrupt the patching
1828 # handling, and will not interrupt the patching
1677 # process.
1829 # process.
1678 pass
1830 pass
1679 else:
1831 else:
1680 store.setfile(path, data, mode)
1832 store.setfile(path, data, mode)
1681 else:
1833 else:
1682 raise util.Abort(_('unsupported parser state: %s') % state)
1834 raise util.Abort(_('unsupported parser state: %s') % state)
1683
1835
1684 if current_file:
1836 if current_file:
1685 rejects += current_file.close()
1837 rejects += current_file.close()
1686
1838
1687 if rejects:
1839 if rejects:
1688 return -1
1840 return -1
1689 return err
1841 return err
1690
1842
1691 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1843 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1692 similarity):
1844 similarity):
1693 """use <patcher> to apply <patchname> to the working directory.
1845 """use <patcher> to apply <patchname> to the working directory.
1694 returns whether patch was applied with fuzz factor."""
1846 returns whether patch was applied with fuzz factor."""
1695
1847
1696 fuzz = False
1848 fuzz = False
1697 args = []
1849 args = []
1698 cwd = repo.root
1850 cwd = repo.root
1699 if cwd:
1851 if cwd:
1700 args.append('-d %s' % util.shellquote(cwd))
1852 args.append('-d %s' % util.shellquote(cwd))
1701 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1853 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1702 util.shellquote(patchname)))
1854 util.shellquote(patchname)))
1703 try:
1855 try:
1704 for line in fp:
1856 for line in fp:
1705 line = line.rstrip()
1857 line = line.rstrip()
1706 ui.note(line + '\n')
1858 ui.note(line + '\n')
1707 if line.startswith('patching file '):
1859 if line.startswith('patching file '):
1708 pf = util.parsepatchoutput(line)
1860 pf = util.parsepatchoutput(line)
1709 printed_file = False
1861 printed_file = False
1710 files.add(pf)
1862 files.add(pf)
1711 elif line.find('with fuzz') >= 0:
1863 elif line.find('with fuzz') >= 0:
1712 fuzz = True
1864 fuzz = True
1713 if not printed_file:
1865 if not printed_file:
1714 ui.warn(pf + '\n')
1866 ui.warn(pf + '\n')
1715 printed_file = True
1867 printed_file = True
1716 ui.warn(line + '\n')
1868 ui.warn(line + '\n')
1717 elif line.find('saving rejects to file') >= 0:
1869 elif line.find('saving rejects to file') >= 0:
1718 ui.warn(line + '\n')
1870 ui.warn(line + '\n')
1719 elif line.find('FAILED') >= 0:
1871 elif line.find('FAILED') >= 0:
1720 if not printed_file:
1872 if not printed_file:
1721 ui.warn(pf + '\n')
1873 ui.warn(pf + '\n')
1722 printed_file = True
1874 printed_file = True
1723 ui.warn(line + '\n')
1875 ui.warn(line + '\n')
1724 finally:
1876 finally:
1725 if files:
1877 if files:
1726 scmutil.marktouched(repo, files, similarity)
1878 scmutil.marktouched(repo, files, similarity)
1727 code = fp.close()
1879 code = fp.close()
1728 if code:
1880 if code:
1729 raise PatchError(_("patch command failed: %s") %
1881 raise PatchError(_("patch command failed: %s") %
1730 util.explainexit(code)[0])
1882 util.explainexit(code)[0])
1731 return fuzz
1883 return fuzz
1732
1884
1733 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
1885 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
1734 eolmode='strict'):
1886 eolmode='strict'):
1735 if files is None:
1887 if files is None:
1736 files = set()
1888 files = set()
1737 if eolmode is None:
1889 if eolmode is None:
1738 eolmode = ui.config('patch', 'eol', 'strict')
1890 eolmode = ui.config('patch', 'eol', 'strict')
1739 if eolmode.lower() not in eolmodes:
1891 if eolmode.lower() not in eolmodes:
1740 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1892 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1741 eolmode = eolmode.lower()
1893 eolmode = eolmode.lower()
1742
1894
1743 store = filestore()
1895 store = filestore()
1744 try:
1896 try:
1745 fp = open(patchobj, 'rb')
1897 fp = open(patchobj, 'rb')
1746 except TypeError:
1898 except TypeError:
1747 fp = patchobj
1899 fp = patchobj
1748 try:
1900 try:
1749 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
1901 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
1750 eolmode=eolmode)
1902 eolmode=eolmode)
1751 finally:
1903 finally:
1752 if fp != patchobj:
1904 if fp != patchobj:
1753 fp.close()
1905 fp.close()
1754 files.update(backend.close())
1906 files.update(backend.close())
1755 store.close()
1907 store.close()
1756 if ret < 0:
1908 if ret < 0:
1757 raise PatchError(_('patch failed to apply'))
1909 raise PatchError(_('patch failed to apply'))
1758 return ret > 0
1910 return ret > 0
1759
1911
1760 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
1912 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
1761 eolmode='strict', similarity=0):
1913 eolmode='strict', similarity=0):
1762 """use builtin patch to apply <patchobj> to the working directory.
1914 """use builtin patch to apply <patchobj> to the working directory.
1763 returns whether patch was applied with fuzz factor."""
1915 returns whether patch was applied with fuzz factor."""
1764 backend = workingbackend(ui, repo, similarity)
1916 backend = workingbackend(ui, repo, similarity)
1765 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1917 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1766
1918
1767 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
1919 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
1768 eolmode='strict'):
1920 eolmode='strict'):
1769 backend = repobackend(ui, repo, ctx, store)
1921 backend = repobackend(ui, repo, ctx, store)
1770 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1922 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1771
1923
1772 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
1924 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
1773 similarity=0):
1925 similarity=0):
1774 """Apply <patchname> to the working directory.
1926 """Apply <patchname> to the working directory.
1775
1927
1776 'eolmode' specifies how end of lines should be handled. It can be:
1928 'eolmode' specifies how end of lines should be handled. It can be:
1777 - 'strict': inputs are read in binary mode, EOLs are preserved
1929 - 'strict': inputs are read in binary mode, EOLs are preserved
1778 - 'crlf': EOLs are ignored when patching and reset to CRLF
1930 - 'crlf': EOLs are ignored when patching and reset to CRLF
1779 - 'lf': EOLs are ignored when patching and reset to LF
1931 - 'lf': EOLs are ignored when patching and reset to LF
1780 - None: get it from user settings, default to 'strict'
1932 - None: get it from user settings, default to 'strict'
1781 'eolmode' is ignored when using an external patcher program.
1933 'eolmode' is ignored when using an external patcher program.
1782
1934
1783 Returns whether patch was applied with fuzz factor.
1935 Returns whether patch was applied with fuzz factor.
1784 """
1936 """
1785 patcher = ui.config('ui', 'patch')
1937 patcher = ui.config('ui', 'patch')
1786 if files is None:
1938 if files is None:
1787 files = set()
1939 files = set()
1788 if patcher:
1940 if patcher:
1789 return _externalpatch(ui, repo, patcher, patchname, strip,
1941 return _externalpatch(ui, repo, patcher, patchname, strip,
1790 files, similarity)
1942 files, similarity)
1791 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
1943 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
1792 similarity)
1944 similarity)
1793
1945
1794 def changedfiles(ui, repo, patchpath, strip=1):
1946 def changedfiles(ui, repo, patchpath, strip=1):
1795 backend = fsbackend(ui, repo.root)
1947 backend = fsbackend(ui, repo.root)
1796 fp = open(patchpath, 'rb')
1948 fp = open(patchpath, 'rb')
1797 try:
1949 try:
1798 changed = set()
1950 changed = set()
1799 for state, values in iterhunks(fp):
1951 for state, values in iterhunks(fp):
1800 if state == 'file':
1952 if state == 'file':
1801 afile, bfile, first_hunk, gp = values
1953 afile, bfile, first_hunk, gp = values
1802 if gp:
1954 if gp:
1803 gp.path = pathtransform(gp.path, strip - 1, '')[1]
1955 gp.path = pathtransform(gp.path, strip - 1, '')[1]
1804 if gp.oldpath:
1956 if gp.oldpath:
1805 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
1957 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
1806 else:
1958 else:
1807 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1959 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1808 '')
1960 '')
1809 changed.add(gp.path)
1961 changed.add(gp.path)
1810 if gp.op == 'RENAME':
1962 if gp.op == 'RENAME':
1811 changed.add(gp.oldpath)
1963 changed.add(gp.oldpath)
1812 elif state not in ('hunk', 'git'):
1964 elif state not in ('hunk', 'git'):
1813 raise util.Abort(_('unsupported parser state: %s') % state)
1965 raise util.Abort(_('unsupported parser state: %s') % state)
1814 return changed
1966 return changed
1815 finally:
1967 finally:
1816 fp.close()
1968 fp.close()
1817
1969
1818 class GitDiffRequired(Exception):
1970 class GitDiffRequired(Exception):
1819 pass
1971 pass
1820
1972
1821 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
1973 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
1822 '''return diffopts with all features supported and parsed'''
1974 '''return diffopts with all features supported and parsed'''
1823 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
1975 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
1824 git=True, whitespace=True, formatchanging=True)
1976 git=True, whitespace=True, formatchanging=True)
1825
1977
1826 diffopts = diffallopts
1978 diffopts = diffallopts
1827
1979
1828 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
1980 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
1829 whitespace=False, formatchanging=False):
1981 whitespace=False, formatchanging=False):
1830 '''return diffopts with only opted-in features parsed
1982 '''return diffopts with only opted-in features parsed
1831
1983
1832 Features:
1984 Features:
1833 - git: git-style diffs
1985 - git: git-style diffs
1834 - whitespace: whitespace options like ignoreblanklines and ignorews
1986 - whitespace: whitespace options like ignoreblanklines and ignorews
1835 - formatchanging: options that will likely break or cause correctness issues
1987 - formatchanging: options that will likely break or cause correctness issues
1836 with most diff parsers
1988 with most diff parsers
1837 '''
1989 '''
1838 def get(key, name=None, getter=ui.configbool, forceplain=None):
1990 def get(key, name=None, getter=ui.configbool, forceplain=None):
1839 if opts:
1991 if opts:
1840 v = opts.get(key)
1992 v = opts.get(key)
1841 if v:
1993 if v:
1842 return v
1994 return v
1843 if forceplain is not None and ui.plain():
1995 if forceplain is not None and ui.plain():
1844 return forceplain
1996 return forceplain
1845 return getter(section, name or key, None, untrusted=untrusted)
1997 return getter(section, name or key, None, untrusted=untrusted)
1846
1998
1847 # core options, expected to be understood by every diff parser
1999 # core options, expected to be understood by every diff parser
1848 buildopts = {
2000 buildopts = {
1849 'nodates': get('nodates'),
2001 'nodates': get('nodates'),
1850 'showfunc': get('show_function', 'showfunc'),
2002 'showfunc': get('show_function', 'showfunc'),
1851 'context': get('unified', getter=ui.config),
2003 'context': get('unified', getter=ui.config),
1852 }
2004 }
1853
2005
1854 if git:
2006 if git:
1855 buildopts['git'] = get('git')
2007 buildopts['git'] = get('git')
1856 if whitespace:
2008 if whitespace:
1857 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2009 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
1858 buildopts['ignorewsamount'] = get('ignore_space_change',
2010 buildopts['ignorewsamount'] = get('ignore_space_change',
1859 'ignorewsamount')
2011 'ignorewsamount')
1860 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2012 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
1861 'ignoreblanklines')
2013 'ignoreblanklines')
1862 if formatchanging:
2014 if formatchanging:
1863 buildopts['text'] = opts and opts.get('text')
2015 buildopts['text'] = opts and opts.get('text')
1864 buildopts['nobinary'] = get('nobinary')
2016 buildopts['nobinary'] = get('nobinary')
1865 buildopts['noprefix'] = get('noprefix', forceplain=False)
2017 buildopts['noprefix'] = get('noprefix', forceplain=False)
1866
2018
1867 return mdiff.diffopts(**buildopts)
2019 return mdiff.diffopts(**buildopts)
1868
2020
1869 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2021 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1870 losedatafn=None, prefix=''):
2022 losedatafn=None, prefix=''):
1871 '''yields diff of changes to files between two nodes, or node and
2023 '''yields diff of changes to files between two nodes, or node and
1872 working directory.
2024 working directory.
1873
2025
1874 if node1 is None, use first dirstate parent instead.
2026 if node1 is None, use first dirstate parent instead.
1875 if node2 is None, compare node1 with working directory.
2027 if node2 is None, compare node1 with working directory.
1876
2028
1877 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2029 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1878 every time some change cannot be represented with the current
2030 every time some change cannot be represented with the current
1879 patch format. Return False to upgrade to git patch format, True to
2031 patch format. Return False to upgrade to git patch format, True to
1880 accept the loss or raise an exception to abort the diff. It is
2032 accept the loss or raise an exception to abort the diff. It is
1881 called with the name of current file being diffed as 'fn'. If set
2033 called with the name of current file being diffed as 'fn'. If set
1882 to None, patches will always be upgraded to git format when
2034 to None, patches will always be upgraded to git format when
1883 necessary.
2035 necessary.
1884
2036
1885 prefix is a filename prefix that is prepended to all filenames on
2037 prefix is a filename prefix that is prepended to all filenames on
1886 display (used for subrepos).
2038 display (used for subrepos).
1887 '''
2039 '''
1888
2040
1889 if opts is None:
2041 if opts is None:
1890 opts = mdiff.defaultopts
2042 opts = mdiff.defaultopts
1891
2043
1892 if not node1 and not node2:
2044 if not node1 and not node2:
1893 node1 = repo.dirstate.p1()
2045 node1 = repo.dirstate.p1()
1894
2046
1895 def lrugetfilectx():
2047 def lrugetfilectx():
1896 cache = {}
2048 cache = {}
1897 order = util.deque()
2049 order = util.deque()
1898 def getfilectx(f, ctx):
2050 def getfilectx(f, ctx):
1899 fctx = ctx.filectx(f, filelog=cache.get(f))
2051 fctx = ctx.filectx(f, filelog=cache.get(f))
1900 if f not in cache:
2052 if f not in cache:
1901 if len(cache) > 20:
2053 if len(cache) > 20:
1902 del cache[order.popleft()]
2054 del cache[order.popleft()]
1903 cache[f] = fctx.filelog()
2055 cache[f] = fctx.filelog()
1904 else:
2056 else:
1905 order.remove(f)
2057 order.remove(f)
1906 order.append(f)
2058 order.append(f)
1907 return fctx
2059 return fctx
1908 return getfilectx
2060 return getfilectx
1909 getfilectx = lrugetfilectx()
2061 getfilectx = lrugetfilectx()
1910
2062
1911 ctx1 = repo[node1]
2063 ctx1 = repo[node1]
1912 ctx2 = repo[node2]
2064 ctx2 = repo[node2]
1913
2065
1914 if not changes:
2066 if not changes:
1915 changes = repo.status(ctx1, ctx2, match=match)
2067 changes = repo.status(ctx1, ctx2, match=match)
1916 modified, added, removed = changes[:3]
2068 modified, added, removed = changes[:3]
1917
2069
1918 if not modified and not added and not removed:
2070 if not modified and not added and not removed:
1919 return []
2071 return []
1920
2072
1921 hexfunc = repo.ui.debugflag and hex or short
2073 hexfunc = repo.ui.debugflag and hex or short
1922 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2074 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
1923
2075
1924 copy = {}
2076 copy = {}
1925 if opts.git or opts.upgrade:
2077 if opts.git or opts.upgrade:
1926 copy = copies.pathcopies(ctx1, ctx2)
2078 copy = copies.pathcopies(ctx1, ctx2)
1927
2079
1928 def difffn(opts, losedata):
2080 def difffn(opts, losedata):
1929 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2081 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1930 copy, getfilectx, opts, losedata, prefix)
2082 copy, getfilectx, opts, losedata, prefix)
1931 if opts.upgrade and not opts.git:
2083 if opts.upgrade and not opts.git:
1932 try:
2084 try:
1933 def losedata(fn):
2085 def losedata(fn):
1934 if not losedatafn or not losedatafn(fn=fn):
2086 if not losedatafn or not losedatafn(fn=fn):
1935 raise GitDiffRequired
2087 raise GitDiffRequired
1936 # Buffer the whole output until we are sure it can be generated
2088 # Buffer the whole output until we are sure it can be generated
1937 return list(difffn(opts.copy(git=False), losedata))
2089 return list(difffn(opts.copy(git=False), losedata))
1938 except GitDiffRequired:
2090 except GitDiffRequired:
1939 return difffn(opts.copy(git=True), None)
2091 return difffn(opts.copy(git=True), None)
1940 else:
2092 else:
1941 return difffn(opts, None)
2093 return difffn(opts, None)
1942
2094
1943 def difflabel(func, *args, **kw):
2095 def difflabel(func, *args, **kw):
1944 '''yields 2-tuples of (output, label) based on the output of func()'''
2096 '''yields 2-tuples of (output, label) based on the output of func()'''
1945 headprefixes = [('diff', 'diff.diffline'),
2097 headprefixes = [('diff', 'diff.diffline'),
1946 ('copy', 'diff.extended'),
2098 ('copy', 'diff.extended'),
1947 ('rename', 'diff.extended'),
2099 ('rename', 'diff.extended'),
1948 ('old', 'diff.extended'),
2100 ('old', 'diff.extended'),
1949 ('new', 'diff.extended'),
2101 ('new', 'diff.extended'),
1950 ('deleted', 'diff.extended'),
2102 ('deleted', 'diff.extended'),
1951 ('---', 'diff.file_a'),
2103 ('---', 'diff.file_a'),
1952 ('+++', 'diff.file_b')]
2104 ('+++', 'diff.file_b')]
1953 textprefixes = [('@', 'diff.hunk'),
2105 textprefixes = [('@', 'diff.hunk'),
1954 ('-', 'diff.deleted'),
2106 ('-', 'diff.deleted'),
1955 ('+', 'diff.inserted')]
2107 ('+', 'diff.inserted')]
1956 head = False
2108 head = False
1957 for chunk in func(*args, **kw):
2109 for chunk in func(*args, **kw):
1958 lines = chunk.split('\n')
2110 lines = chunk.split('\n')
1959 for i, line in enumerate(lines):
2111 for i, line in enumerate(lines):
1960 if i != 0:
2112 if i != 0:
1961 yield ('\n', '')
2113 yield ('\n', '')
1962 if head:
2114 if head:
1963 if line.startswith('@'):
2115 if line.startswith('@'):
1964 head = False
2116 head = False
1965 else:
2117 else:
1966 if line and line[0] not in ' +-@\\':
2118 if line and line[0] not in ' +-@\\':
1967 head = True
2119 head = True
1968 stripline = line
2120 stripline = line
1969 diffline = False
2121 diffline = False
1970 if not head and line and line[0] in '+-':
2122 if not head and line and line[0] in '+-':
1971 # highlight tabs and trailing whitespace, but only in
2123 # highlight tabs and trailing whitespace, but only in
1972 # changed lines
2124 # changed lines
1973 stripline = line.rstrip()
2125 stripline = line.rstrip()
1974 diffline = True
2126 diffline = True
1975
2127
1976 prefixes = textprefixes
2128 prefixes = textprefixes
1977 if head:
2129 if head:
1978 prefixes = headprefixes
2130 prefixes = headprefixes
1979 for prefix, label in prefixes:
2131 for prefix, label in prefixes:
1980 if stripline.startswith(prefix):
2132 if stripline.startswith(prefix):
1981 if diffline:
2133 if diffline:
1982 for token in tabsplitter.findall(stripline):
2134 for token in tabsplitter.findall(stripline):
1983 if '\t' == token[0]:
2135 if '\t' == token[0]:
1984 yield (token, 'diff.tab')
2136 yield (token, 'diff.tab')
1985 else:
2137 else:
1986 yield (token, label)
2138 yield (token, label)
1987 else:
2139 else:
1988 yield (stripline, label)
2140 yield (stripline, label)
1989 break
2141 break
1990 else:
2142 else:
1991 yield (line, '')
2143 yield (line, '')
1992 if line != stripline:
2144 if line != stripline:
1993 yield (line[len(stripline):], 'diff.trailingwhitespace')
2145 yield (line[len(stripline):], 'diff.trailingwhitespace')
1994
2146
1995 def diffui(*args, **kw):
2147 def diffui(*args, **kw):
1996 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2148 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1997 return difflabel(diff, *args, **kw)
2149 return difflabel(diff, *args, **kw)
1998
2150
1999 def _filepairs(ctx1, modified, added, removed, copy, opts):
2151 def _filepairs(ctx1, modified, added, removed, copy, opts):
2000 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2152 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2001 before and f2 is the the name after. For added files, f1 will be None,
2153 before and f2 is the the name after. For added files, f1 will be None,
2002 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2154 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2003 or 'rename' (the latter two only if opts.git is set).'''
2155 or 'rename' (the latter two only if opts.git is set).'''
2004 gone = set()
2156 gone = set()
2005
2157
2006 copyto = dict([(v, k) for k, v in copy.items()])
2158 copyto = dict([(v, k) for k, v in copy.items()])
2007
2159
2008 addedset, removedset = set(added), set(removed)
2160 addedset, removedset = set(added), set(removed)
2009 # Fix up added, since merged-in additions appear as
2161 # Fix up added, since merged-in additions appear as
2010 # modifications during merges
2162 # modifications during merges
2011 for f in modified:
2163 for f in modified:
2012 if f not in ctx1:
2164 if f not in ctx1:
2013 addedset.add(f)
2165 addedset.add(f)
2014
2166
2015 for f in sorted(modified + added + removed):
2167 for f in sorted(modified + added + removed):
2016 copyop = None
2168 copyop = None
2017 f1, f2 = f, f
2169 f1, f2 = f, f
2018 if f in addedset:
2170 if f in addedset:
2019 f1 = None
2171 f1 = None
2020 if f in copy:
2172 if f in copy:
2021 if opts.git:
2173 if opts.git:
2022 f1 = copy[f]
2174 f1 = copy[f]
2023 if f1 in removedset and f1 not in gone:
2175 if f1 in removedset and f1 not in gone:
2024 copyop = 'rename'
2176 copyop = 'rename'
2025 gone.add(f1)
2177 gone.add(f1)
2026 else:
2178 else:
2027 copyop = 'copy'
2179 copyop = 'copy'
2028 elif f in removedset:
2180 elif f in removedset:
2029 f2 = None
2181 f2 = None
2030 if opts.git:
2182 if opts.git:
2031 # have we already reported a copy above?
2183 # have we already reported a copy above?
2032 if (f in copyto and copyto[f] in addedset
2184 if (f in copyto and copyto[f] in addedset
2033 and copy[copyto[f]] == f):
2185 and copy[copyto[f]] == f):
2034 continue
2186 continue
2035 yield f1, f2, copyop
2187 yield f1, f2, copyop
2036
2188
2037 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2189 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2038 copy, getfilectx, opts, losedatafn, prefix):
2190 copy, getfilectx, opts, losedatafn, prefix):
2039
2191
2040 def gitindex(text):
2192 def gitindex(text):
2041 if not text:
2193 if not text:
2042 text = ""
2194 text = ""
2043 l = len(text)
2195 l = len(text)
2044 s = util.sha1('blob %d\0' % l)
2196 s = util.sha1('blob %d\0' % l)
2045 s.update(text)
2197 s.update(text)
2046 return s.hexdigest()
2198 return s.hexdigest()
2047
2199
2048 if opts.noprefix:
2200 if opts.noprefix:
2049 aprefix = bprefix = ''
2201 aprefix = bprefix = ''
2050 else:
2202 else:
2051 aprefix = 'a/'
2203 aprefix = 'a/'
2052 bprefix = 'b/'
2204 bprefix = 'b/'
2053
2205
2054 def diffline(f, revs):
2206 def diffline(f, revs):
2055 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2207 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2056 return 'diff %s %s' % (revinfo, f)
2208 return 'diff %s %s' % (revinfo, f)
2057
2209
2058 date1 = util.datestr(ctx1.date())
2210 date1 = util.datestr(ctx1.date())
2059 date2 = util.datestr(ctx2.date())
2211 date2 = util.datestr(ctx2.date())
2060
2212
2061 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2213 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2062
2214
2063 for f1, f2, copyop in _filepairs(
2215 for f1, f2, copyop in _filepairs(
2064 ctx1, modified, added, removed, copy, opts):
2216 ctx1, modified, added, removed, copy, opts):
2065 content1 = None
2217 content1 = None
2066 content2 = None
2218 content2 = None
2067 flag1 = None
2219 flag1 = None
2068 flag2 = None
2220 flag2 = None
2069 if f1:
2221 if f1:
2070 content1 = getfilectx(f1, ctx1).data()
2222 content1 = getfilectx(f1, ctx1).data()
2071 if opts.git or losedatafn:
2223 if opts.git or losedatafn:
2072 flag1 = ctx1.flags(f1)
2224 flag1 = ctx1.flags(f1)
2073 if f2:
2225 if f2:
2074 content2 = getfilectx(f2, ctx2).data()
2226 content2 = getfilectx(f2, ctx2).data()
2075 if opts.git or losedatafn:
2227 if opts.git or losedatafn:
2076 flag2 = ctx2.flags(f2)
2228 flag2 = ctx2.flags(f2)
2077 binary = False
2229 binary = False
2078 if opts.git or losedatafn:
2230 if opts.git or losedatafn:
2079 binary = util.binary(content1) or util.binary(content2)
2231 binary = util.binary(content1) or util.binary(content2)
2080
2232
2081 if losedatafn and not opts.git:
2233 if losedatafn and not opts.git:
2082 if (binary or
2234 if (binary or
2083 # copy/rename
2235 # copy/rename
2084 f2 in copy or
2236 f2 in copy or
2085 # empty file creation
2237 # empty file creation
2086 (not f1 and not content2) or
2238 (not f1 and not content2) or
2087 # empty file deletion
2239 # empty file deletion
2088 (not content1 and not f2) or
2240 (not content1 and not f2) or
2089 # create with flags
2241 # create with flags
2090 (not f1 and flag2) or
2242 (not f1 and flag2) or
2091 # change flags
2243 # change flags
2092 (f1 and f2 and flag1 != flag2)):
2244 (f1 and f2 and flag1 != flag2)):
2093 losedatafn(f2 or f1)
2245 losedatafn(f2 or f1)
2094
2246
2095 path1 = posixpath.join(prefix, f1 or f2)
2247 path1 = posixpath.join(prefix, f1 or f2)
2096 path2 = posixpath.join(prefix, f2 or f1)
2248 path2 = posixpath.join(prefix, f2 or f1)
2097 header = []
2249 header = []
2098 if opts.git:
2250 if opts.git:
2099 header.append('diff --git %s%s %s%s' %
2251 header.append('diff --git %s%s %s%s' %
2100 (aprefix, path1, bprefix, path2))
2252 (aprefix, path1, bprefix, path2))
2101 if not f1: # added
2253 if not f1: # added
2102 header.append('new file mode %s' % gitmode[flag2])
2254 header.append('new file mode %s' % gitmode[flag2])
2103 elif not f2: # removed
2255 elif not f2: # removed
2104 header.append('deleted file mode %s' % gitmode[flag1])
2256 header.append('deleted file mode %s' % gitmode[flag1])
2105 else: # modified/copied/renamed
2257 else: # modified/copied/renamed
2106 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2258 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2107 if mode1 != mode2:
2259 if mode1 != mode2:
2108 header.append('old mode %s' % mode1)
2260 header.append('old mode %s' % mode1)
2109 header.append('new mode %s' % mode2)
2261 header.append('new mode %s' % mode2)
2110 if copyop is not None:
2262 if copyop is not None:
2111 header.append('%s from %s' % (copyop, path1))
2263 header.append('%s from %s' % (copyop, path1))
2112 header.append('%s to %s' % (copyop, path2))
2264 header.append('%s to %s' % (copyop, path2))
2113 elif revs and not repo.ui.quiet:
2265 elif revs and not repo.ui.quiet:
2114 header.append(diffline(path1, revs))
2266 header.append(diffline(path1, revs))
2115
2267
2116 if binary and opts.git and not opts.nobinary:
2268 if binary and opts.git and not opts.nobinary:
2117 text = mdiff.b85diff(content1, content2)
2269 text = mdiff.b85diff(content1, content2)
2118 if text:
2270 if text:
2119 header.append('index %s..%s' %
2271 header.append('index %s..%s' %
2120 (gitindex(content1), gitindex(content2)))
2272 (gitindex(content1), gitindex(content2)))
2121 else:
2273 else:
2122 text = mdiff.unidiff(content1, date1,
2274 text = mdiff.unidiff(content1, date1,
2123 content2, date2,
2275 content2, date2,
2124 path1, path2, opts=opts)
2276 path1, path2, opts=opts)
2125 if header and (text or len(header) > 1):
2277 if header and (text or len(header) > 1):
2126 yield '\n'.join(header) + '\n'
2278 yield '\n'.join(header) + '\n'
2127 if text:
2279 if text:
2128 yield text
2280 yield text
2129
2281
2130 def diffstatsum(stats):
2282 def diffstatsum(stats):
2131 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2283 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2132 for f, a, r, b in stats:
2284 for f, a, r, b in stats:
2133 maxfile = max(maxfile, encoding.colwidth(f))
2285 maxfile = max(maxfile, encoding.colwidth(f))
2134 maxtotal = max(maxtotal, a + r)
2286 maxtotal = max(maxtotal, a + r)
2135 addtotal += a
2287 addtotal += a
2136 removetotal += r
2288 removetotal += r
2137 binary = binary or b
2289 binary = binary or b
2138
2290
2139 return maxfile, maxtotal, addtotal, removetotal, binary
2291 return maxfile, maxtotal, addtotal, removetotal, binary
2140
2292
2141 def diffstatdata(lines):
2293 def diffstatdata(lines):
2142 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2294 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2143
2295
2144 results = []
2296 results = []
2145 filename, adds, removes, isbinary = None, 0, 0, False
2297 filename, adds, removes, isbinary = None, 0, 0, False
2146
2298
2147 def addresult():
2299 def addresult():
2148 if filename:
2300 if filename:
2149 results.append((filename, adds, removes, isbinary))
2301 results.append((filename, adds, removes, isbinary))
2150
2302
2151 for line in lines:
2303 for line in lines:
2152 if line.startswith('diff'):
2304 if line.startswith('diff'):
2153 addresult()
2305 addresult()
2154 # set numbers to 0 anyway when starting new file
2306 # set numbers to 0 anyway when starting new file
2155 adds, removes, isbinary = 0, 0, False
2307 adds, removes, isbinary = 0, 0, False
2156 if line.startswith('diff --git a/'):
2308 if line.startswith('diff --git a/'):
2157 filename = gitre.search(line).group(2)
2309 filename = gitre.search(line).group(2)
2158 elif line.startswith('diff -r'):
2310 elif line.startswith('diff -r'):
2159 # format: "diff -r ... -r ... filename"
2311 # format: "diff -r ... -r ... filename"
2160 filename = diffre.search(line).group(1)
2312 filename = diffre.search(line).group(1)
2161 elif line.startswith('+') and not line.startswith('+++ '):
2313 elif line.startswith('+') and not line.startswith('+++ '):
2162 adds += 1
2314 adds += 1
2163 elif line.startswith('-') and not line.startswith('--- '):
2315 elif line.startswith('-') and not line.startswith('--- '):
2164 removes += 1
2316 removes += 1
2165 elif (line.startswith('GIT binary patch') or
2317 elif (line.startswith('GIT binary patch') or
2166 line.startswith('Binary file')):
2318 line.startswith('Binary file')):
2167 isbinary = True
2319 isbinary = True
2168 addresult()
2320 addresult()
2169 return results
2321 return results
2170
2322
2171 def diffstat(lines, width=80, git=False):
2323 def diffstat(lines, width=80, git=False):
2172 output = []
2324 output = []
2173 stats = diffstatdata(lines)
2325 stats = diffstatdata(lines)
2174 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2326 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2175
2327
2176 countwidth = len(str(maxtotal))
2328 countwidth = len(str(maxtotal))
2177 if hasbinary and countwidth < 3:
2329 if hasbinary and countwidth < 3:
2178 countwidth = 3
2330 countwidth = 3
2179 graphwidth = width - countwidth - maxname - 6
2331 graphwidth = width - countwidth - maxname - 6
2180 if graphwidth < 10:
2332 if graphwidth < 10:
2181 graphwidth = 10
2333 graphwidth = 10
2182
2334
2183 def scale(i):
2335 def scale(i):
2184 if maxtotal <= graphwidth:
2336 if maxtotal <= graphwidth:
2185 return i
2337 return i
2186 # If diffstat runs out of room it doesn't print anything,
2338 # If diffstat runs out of room it doesn't print anything,
2187 # which isn't very useful, so always print at least one + or -
2339 # which isn't very useful, so always print at least one + or -
2188 # if there were at least some changes.
2340 # if there were at least some changes.
2189 return max(i * graphwidth // maxtotal, int(bool(i)))
2341 return max(i * graphwidth // maxtotal, int(bool(i)))
2190
2342
2191 for filename, adds, removes, isbinary in stats:
2343 for filename, adds, removes, isbinary in stats:
2192 if isbinary:
2344 if isbinary:
2193 count = 'Bin'
2345 count = 'Bin'
2194 else:
2346 else:
2195 count = adds + removes
2347 count = adds + removes
2196 pluses = '+' * scale(adds)
2348 pluses = '+' * scale(adds)
2197 minuses = '-' * scale(removes)
2349 minuses = '-' * scale(removes)
2198 output.append(' %s%s | %*s %s%s\n' %
2350 output.append(' %s%s | %*s %s%s\n' %
2199 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2351 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2200 countwidth, count, pluses, minuses))
2352 countwidth, count, pluses, minuses))
2201
2353
2202 if stats:
2354 if stats:
2203 output.append(_(' %d files changed, %d insertions(+), '
2355 output.append(_(' %d files changed, %d insertions(+), '
2204 '%d deletions(-)\n')
2356 '%d deletions(-)\n')
2205 % (len(stats), totaladds, totalremoves))
2357 % (len(stats), totaladds, totalremoves))
2206
2358
2207 return ''.join(output)
2359 return ''.join(output)
2208
2360
2209 def diffstatui(*args, **kw):
2361 def diffstatui(*args, **kw):
2210 '''like diffstat(), but yields 2-tuples of (output, label) for
2362 '''like diffstat(), but yields 2-tuples of (output, label) for
2211 ui.write()
2363 ui.write()
2212 '''
2364 '''
2213
2365
2214 for line in diffstat(*args, **kw).splitlines():
2366 for line in diffstat(*args, **kw).splitlines():
2215 if line and line[-1] in '+-':
2367 if line and line[-1] in '+-':
2216 name, graph = line.rsplit(' ', 1)
2368 name, graph = line.rsplit(' ', 1)
2217 yield (name + ' ', '')
2369 yield (name + ' ', '')
2218 m = re.search(r'\++', graph)
2370 m = re.search(r'\++', graph)
2219 if m:
2371 if m:
2220 yield (m.group(0), 'diffstat.inserted')
2372 yield (m.group(0), 'diffstat.inserted')
2221 m = re.search(r'-+', graph)
2373 m = re.search(r'-+', graph)
2222 if m:
2374 if m:
2223 yield (m.group(0), 'diffstat.deleted')
2375 yield (m.group(0), 'diffstat.deleted')
2224 else:
2376 else:
2225 yield (line, '')
2377 yield (line, '')
2226 yield ('\n', '')
2378 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now