##// END OF EJS Templates
record: move filterpatch from record to patch...
Laurent Charignon -
r24269:9a745ced default
parent child Browse files
Show More
@@ -1,433 +1,281
1 1 # record.py
2 2 #
3 3 # Copyright 2007 Bryan O'Sullivan <bos@serpentine.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''commands to interactively select changes for commit/qrefresh'''
9 9
10 10 from mercurial.i18n import _
11 11 from mercurial import cmdutil, commands, extensions, hg, patch
12 12 from mercurial import util
13 import copy, cStringIO, errno, os, shutil, tempfile
13 import cStringIO, errno, os, shutil, tempfile
14 14
15 15 cmdtable = {}
16 16 command = cmdutil.command(cmdtable)
17 17 testedwith = 'internal'
18 18
19 def filterpatch(ui, headers):
20 """Interactively filter patch chunks into applied-only chunks"""
21
22 def prompt(skipfile, skipall, query, chunk):
23 """prompt query, and process base inputs
24
25 - y/n for the rest of file
26 - y/n for the rest
27 - ? (help)
28 - q (quit)
29
30 Return True/False and possibly updated skipfile and skipall.
31 """
32 newpatches = None
33 if skipall is not None:
34 return skipall, skipfile, skipall, newpatches
35 if skipfile is not None:
36 return skipfile, skipfile, skipall, newpatches
37 while True:
38 resps = _('[Ynesfdaq?]'
39 '$$ &Yes, record this change'
40 '$$ &No, skip this change'
41 '$$ &Edit this change manually'
42 '$$ &Skip remaining changes to this file'
43 '$$ Record remaining changes to this &file'
44 '$$ &Done, skip remaining changes and files'
45 '$$ Record &all changes to all remaining files'
46 '$$ &Quit, recording no changes'
47 '$$ &? (display help)')
48 r = ui.promptchoice("%s %s" % (query, resps))
49 ui.write("\n")
50 if r == 8: # ?
51 for c, t in ui.extractchoices(resps)[1]:
52 ui.write('%s - %s\n' % (c, t.lower()))
53 continue
54 elif r == 0: # yes
55 ret = True
56 elif r == 1: # no
57 ret = False
58 elif r == 2: # Edit patch
59 if chunk is None:
60 ui.write(_('cannot edit patch for whole file'))
61 ui.write("\n")
62 continue
63 if chunk.header.binary():
64 ui.write(_('cannot edit patch for binary file'))
65 ui.write("\n")
66 continue
67 # Patch comment based on the Git one (based on comment at end of
68 # http://mercurial.selenic.com/wiki/RecordExtension)
69 phelp = '---' + _("""
70 To remove '-' lines, make them ' ' lines (context).
71 To remove '+' lines, delete them.
72 Lines starting with # will be removed from the patch.
73
74 If the patch applies cleanly, the edited hunk will immediately be
75 added to the record list. If it does not apply cleanly, a rejects
76 file will be generated: you can use that when you try again. If
77 all lines of the hunk are removed, then the edit is aborted and
78 the hunk is left unchanged.
79 """)
80 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
81 suffix=".diff", text=True)
82 ncpatchfp = None
83 try:
84 # Write the initial patch
85 f = os.fdopen(patchfd, "w")
86 chunk.header.write(f)
87 chunk.write(f)
88 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
89 f.close()
90 # Start the editor and wait for it to complete
91 editor = ui.geteditor()
92 ui.system("%s \"%s\"" % (editor, patchfn),
93 environ={'HGUSER': ui.username()},
94 onerr=util.Abort, errprefix=_("edit failed"))
95 # Remove comment lines
96 patchfp = open(patchfn)
97 ncpatchfp = cStringIO.StringIO()
98 for line in patchfp:
99 if not line.startswith('#'):
100 ncpatchfp.write(line)
101 patchfp.close()
102 ncpatchfp.seek(0)
103 newpatches = patch.parsepatch(ncpatchfp)
104 finally:
105 os.unlink(patchfn)
106 del ncpatchfp
107 # Signal that the chunk shouldn't be applied as-is, but
108 # provide the new patch to be used instead.
109 ret = False
110 elif r == 3: # Skip
111 ret = skipfile = False
112 elif r == 4: # file (Record remaining)
113 ret = skipfile = True
114 elif r == 5: # done, skip remaining
115 ret = skipall = False
116 elif r == 6: # all
117 ret = skipall = True
118 elif r == 7: # quit
119 raise util.Abort(_('user quit'))
120 return ret, skipfile, skipall, newpatches
121
122 seen = set()
123 applied = {} # 'filename' -> [] of chunks
124 skipfile, skipall = None, None
125 pos, total = 1, sum(len(h.hunks) for h in headers)
126 for h in headers:
127 pos += len(h.hunks)
128 skipfile = None
129 fixoffset = 0
130 hdr = ''.join(h.header)
131 if hdr in seen:
132 continue
133 seen.add(hdr)
134 if skipall is None:
135 h.pretty(ui)
136 msg = (_('examine changes to %s?') %
137 _(' and ').join("'%s'" % f for f in h.files()))
138 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
139 if not r:
140 continue
141 applied[h.filename()] = [h]
142 if h.allhunks():
143 applied[h.filename()] += h.hunks
144 continue
145 for i, chunk in enumerate(h.hunks):
146 if skipfile is None and skipall is None:
147 chunk.pretty(ui)
148 if total == 1:
149 msg = _("record this change to '%s'?") % chunk.filename()
150 else:
151 idx = pos - len(h.hunks) + i
152 msg = _("record change %d/%d to '%s'?") % (idx, total,
153 chunk.filename())
154 r, skipfile, skipall, newpatches = prompt(skipfile,
155 skipall, msg, chunk)
156 if r:
157 if fixoffset:
158 chunk = copy.copy(chunk)
159 chunk.toline += fixoffset
160 applied[chunk.filename()].append(chunk)
161 elif newpatches is not None:
162 for newpatch in newpatches:
163 for newhunk in newpatch.hunks:
164 if fixoffset:
165 newhunk.toline += fixoffset
166 applied[newhunk.filename()].append(newhunk)
167 else:
168 fixoffset += chunk.removed - chunk.added
169 return sum([h for h in applied.itervalues()
170 if h[0].special() or len(h) > 1], [])
171 19
172 20 @command("record",
173 21 # same options as commit + white space diff options
174 22 commands.table['^commit|ci'][1][:] + commands.diffwsopts,
175 23 _('hg record [OPTION]... [FILE]...'))
176 24 def record(ui, repo, *pats, **opts):
177 25 '''interactively select changes to commit
178 26
179 27 If a list of files is omitted, all changes reported by :hg:`status`
180 28 will be candidates for recording.
181 29
182 30 See :hg:`help dates` for a list of formats valid for -d/--date.
183 31
184 32 You will be prompted for whether to record changes to each
185 33 modified file, and for files with multiple changes, for each
186 34 change to use. For each query, the following responses are
187 35 possible::
188 36
189 37 y - record this change
190 38 n - skip this change
191 39 e - edit this change manually
192 40
193 41 s - skip remaining changes to this file
194 42 f - record remaining changes to this file
195 43
196 44 d - done, skip remaining changes and files
197 45 a - record all changes to all remaining files
198 46 q - quit, recording no changes
199 47
200 48 ? - display help
201 49
202 50 This command is not available when committing a merge.'''
203 51
204 52 dorecord(ui, repo, commands.commit, 'commit', False, *pats, **opts)
205 53
206 54 def qrefresh(origfn, ui, repo, *pats, **opts):
207 55 if not opts['interactive']:
208 56 return origfn(ui, repo, *pats, **opts)
209 57
210 58 mq = extensions.find('mq')
211 59
212 60 def committomq(ui, repo, *pats, **opts):
213 61 # At this point the working copy contains only changes that
214 62 # were accepted. All other changes were reverted.
215 63 # We can't pass *pats here since qrefresh will undo all other
216 64 # changed files in the patch that aren't in pats.
217 65 mq.refresh(ui, repo, **opts)
218 66
219 67 # backup all changed files
220 68 dorecord(ui, repo, committomq, 'qrefresh', True, *pats, **opts)
221 69
222 70 # This command registration is replaced during uisetup().
223 71 @command('qrecord',
224 72 [],
225 73 _('hg qrecord [OPTION]... PATCH [FILE]...'),
226 74 inferrepo=True)
227 75 def qrecord(ui, repo, patch, *pats, **opts):
228 76 '''interactively record a new patch
229 77
230 78 See :hg:`help qnew` & :hg:`help record` for more information and
231 79 usage.
232 80 '''
233 81
234 82 try:
235 83 mq = extensions.find('mq')
236 84 except KeyError:
237 85 raise util.Abort(_("'mq' extension not loaded"))
238 86
239 87 repo.mq.checkpatchname(patch)
240 88
241 89 def committomq(ui, repo, *pats, **opts):
242 90 opts['checkname'] = False
243 91 mq.new(ui, repo, patch, *pats, **opts)
244 92
245 93 dorecord(ui, repo, committomq, 'qnew', False, *pats, **opts)
246 94
247 95 def qnew(origfn, ui, repo, patch, *args, **opts):
248 96 if opts['interactive']:
249 97 return qrecord(ui, repo, patch, *args, **opts)
250 98 return origfn(ui, repo, patch, *args, **opts)
251 99
252 100 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall, *pats, **opts):
253 101 if not ui.interactive():
254 102 raise util.Abort(_('running non-interactively, use %s instead') %
255 103 cmdsuggest)
256 104
257 105 # make sure username is set before going interactive
258 106 if not opts.get('user'):
259 107 ui.username() # raise exception, username not provided
260 108
261 109 def recordfunc(ui, repo, message, match, opts):
262 110 """This is generic record driver.
263 111
264 112 Its job is to interactively filter local changes, and
265 113 accordingly prepare working directory into a state in which the
266 114 job can be delegated to a non-interactive commit command such as
267 115 'commit' or 'qrefresh'.
268 116
269 117 After the actual job is done by non-interactive command, the
270 118 working directory is restored to its original state.
271 119
272 120 In the end we'll record interesting changes, and everything else
273 121 will be left in place, so the user can continue working.
274 122 """
275 123
276 124 cmdutil.checkunfinished(repo, commit=True)
277 125 merge = len(repo[None].parents()) > 1
278 126 if merge:
279 127 raise util.Abort(_('cannot partially commit a merge '
280 128 '(use "hg commit" instead)'))
281 129
282 130 status = repo.status(match=match)
283 131 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
284 132 diffopts.nodates = True
285 133 diffopts.git = True
286 134 originalchunks = patch.diff(repo, changes=status, opts=diffopts)
287 135 fp = cStringIO.StringIO()
288 136 fp.write(''.join(originalchunks))
289 137 fp.seek(0)
290 138
291 139 # 1. filter patch, so we have intending-to apply subset of it
292 140 try:
293 chunks = filterpatch(ui, patch.parsepatch(fp))
141 chunks = patch.filterpatch(ui, patch.parsepatch(fp))
294 142 except patch.PatchError, err:
295 143 raise util.Abort(_('error parsing patch: %s') % err)
296 144
297 145 del fp
298 146
299 147 contenders = set()
300 148 for h in chunks:
301 149 try:
302 150 contenders.update(set(h.files()))
303 151 except AttributeError:
304 152 pass
305 153
306 154 changed = status.modified + status.added + status.removed
307 155 newfiles = [f for f in changed if f in contenders]
308 156 if not newfiles:
309 157 ui.status(_('no changes to record\n'))
310 158 return 0
311 159
312 160 newandmodifiedfiles = set()
313 161 for h in chunks:
314 162 ishunk = isinstance(h, patch.recordhunk)
315 163 isnew = h.filename() in status.added
316 164 if ishunk and isnew and not h in originalchunks:
317 165 newandmodifiedfiles.add(h.filename())
318 166
319 167 modified = set(status.modified)
320 168
321 169 # 2. backup changed files, so we can restore them in the end
322 170
323 171 if backupall:
324 172 tobackup = changed
325 173 else:
326 174 tobackup = [f for f in newfiles
327 175 if f in modified or f in newandmodifiedfiles]
328 176
329 177 backups = {}
330 178 if tobackup:
331 179 backupdir = repo.join('record-backups')
332 180 try:
333 181 os.mkdir(backupdir)
334 182 except OSError, err:
335 183 if err.errno != errno.EEXIST:
336 184 raise
337 185 try:
338 186 # backup continues
339 187 for f in tobackup:
340 188 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
341 189 dir=backupdir)
342 190 os.close(fd)
343 191 ui.debug('backup %r as %r\n' % (f, tmpname))
344 192 util.copyfile(repo.wjoin(f), tmpname)
345 193 shutil.copystat(repo.wjoin(f), tmpname)
346 194 backups[f] = tmpname
347 195
348 196 fp = cStringIO.StringIO()
349 197 for c in chunks:
350 198 fname = c.filename()
351 199 if fname in backups or fname in newandmodifiedfiles:
352 200 c.write(fp)
353 201 dopatch = fp.tell()
354 202 fp.seek(0)
355 203
356 204 [os.unlink(c) for c in newandmodifiedfiles]
357 205
358 206 # 3a. apply filtered patch to clean repo (clean)
359 207 if backups:
360 208 hg.revert(repo, repo.dirstate.p1(),
361 209 lambda key: key in backups)
362 210
363 211 # 3b. (apply)
364 212 if dopatch:
365 213 try:
366 214 ui.debug('applying patch\n')
367 215 ui.debug(fp.getvalue())
368 216 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
369 217 except patch.PatchError, err:
370 218 raise util.Abort(str(err))
371 219 del fp
372 220
373 221 # 4. We prepared working directory according to filtered
374 222 # patch. Now is the time to delegate the job to
375 223 # commit/qrefresh or the like!
376 224
377 225 # Make all of the pathnames absolute.
378 226 newfiles = [repo.wjoin(nf) for nf in newfiles]
379 227 commitfunc(ui, repo, *newfiles, **opts)
380 228
381 229 return 0
382 230 finally:
383 231 # 5. finally restore backed-up files
384 232 try:
385 233 for realname, tmpname in backups.iteritems():
386 234 ui.debug('restoring %r to %r\n' % (tmpname, realname))
387 235 util.copyfile(tmpname, repo.wjoin(realname))
388 236 # Our calls to copystat() here and above are a
389 237 # hack to trick any editors that have f open that
390 238 # we haven't modified them.
391 239 #
392 240 # Also note that this racy as an editor could
393 241 # notice the file's mtime before we've finished
394 242 # writing it.
395 243 shutil.copystat(tmpname, repo.wjoin(realname))
396 244 os.unlink(tmpname)
397 245 if tobackup:
398 246 os.rmdir(backupdir)
399 247 except OSError:
400 248 pass
401 249
402 250 # wrap ui.write so diff output can be labeled/colorized
403 251 def wrapwrite(orig, *args, **kw):
404 252 label = kw.pop('label', '')
405 253 for chunk, l in patch.difflabel(lambda: args):
406 254 orig(chunk, label=label + l)
407 255 oldwrite = ui.write
408 256 extensions.wrapfunction(ui, 'write', wrapwrite)
409 257 try:
410 258 return cmdutil.commit(ui, repo, recordfunc, pats, opts)
411 259 finally:
412 260 ui.write = oldwrite
413 261
414 262 def uisetup(ui):
415 263 try:
416 264 mq = extensions.find('mq')
417 265 except KeyError:
418 266 return
419 267
420 268 cmdtable["qrecord"] = \
421 269 (qrecord,
422 270 # same options as qnew, but copy them so we don't get
423 271 # -i/--interactive for qrecord and add white space diff options
424 272 mq.cmdtable['^qnew'][1][:] + commands.diffwsopts,
425 273 _('hg qrecord [OPTION]... PATCH [FILE]...'))
426 274
427 275 _wrapcmd('qnew', mq.cmdtable, qnew, _("interactively record a new patch"))
428 276 _wrapcmd('qrefresh', mq.cmdtable, qrefresh,
429 277 _("interactively select changes to refresh"))
430 278
431 279 def _wrapcmd(cmd, table, wrapfn, msg):
432 280 entry = extensions.wrapcommand(table, cmd, wrapfn)
433 281 entry[1].append(('i', 'interactive', None, msg))
@@ -1,2226 +1,2378
1 1 # patch.py - patch file parsing routines
2 2 #
3 3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 import cStringIO, email, os, errno, re, posixpath
9 import cStringIO, email, os, errno, re, posixpath, copy
10 10 import tempfile, zlib, shutil
11 11 # On python2.4 you have to import these by name or they fail to
12 12 # load. This was not a problem on Python 2.7.
13 13 import email.Generator
14 14 import email.Parser
15 15
16 16 from i18n import _
17 17 from node import hex, short
18 18 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
19 19
20 20 gitre = re.compile('diff --git a/(.*) b/(.*)')
21 21 tabsplitter = re.compile(r'(\t+|[^\t]+)')
22 22
23 23 class PatchError(Exception):
24 24 pass
25 25
26 26
27 27 # public functions
28 28
29 29 def split(stream):
30 30 '''return an iterator of individual patches from a stream'''
31 31 def isheader(line, inheader):
32 32 if inheader and line[0] in (' ', '\t'):
33 33 # continuation
34 34 return True
35 35 if line[0] in (' ', '-', '+'):
36 36 # diff line - don't check for header pattern in there
37 37 return False
38 38 l = line.split(': ', 1)
39 39 return len(l) == 2 and ' ' not in l[0]
40 40
41 41 def chunk(lines):
42 42 return cStringIO.StringIO(''.join(lines))
43 43
44 44 def hgsplit(stream, cur):
45 45 inheader = True
46 46
47 47 for line in stream:
48 48 if not line.strip():
49 49 inheader = False
50 50 if not inheader and line.startswith('# HG changeset patch'):
51 51 yield chunk(cur)
52 52 cur = []
53 53 inheader = True
54 54
55 55 cur.append(line)
56 56
57 57 if cur:
58 58 yield chunk(cur)
59 59
60 60 def mboxsplit(stream, cur):
61 61 for line in stream:
62 62 if line.startswith('From '):
63 63 for c in split(chunk(cur[1:])):
64 64 yield c
65 65 cur = []
66 66
67 67 cur.append(line)
68 68
69 69 if cur:
70 70 for c in split(chunk(cur[1:])):
71 71 yield c
72 72
73 73 def mimesplit(stream, cur):
74 74 def msgfp(m):
75 75 fp = cStringIO.StringIO()
76 76 g = email.Generator.Generator(fp, mangle_from_=False)
77 77 g.flatten(m)
78 78 fp.seek(0)
79 79 return fp
80 80
81 81 for line in stream:
82 82 cur.append(line)
83 83 c = chunk(cur)
84 84
85 85 m = email.Parser.Parser().parse(c)
86 86 if not m.is_multipart():
87 87 yield msgfp(m)
88 88 else:
89 89 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
90 90 for part in m.walk():
91 91 ct = part.get_content_type()
92 92 if ct not in ok_types:
93 93 continue
94 94 yield msgfp(part)
95 95
96 96 def headersplit(stream, cur):
97 97 inheader = False
98 98
99 99 for line in stream:
100 100 if not inheader and isheader(line, inheader):
101 101 yield chunk(cur)
102 102 cur = []
103 103 inheader = True
104 104 if inheader and not isheader(line, inheader):
105 105 inheader = False
106 106
107 107 cur.append(line)
108 108
109 109 if cur:
110 110 yield chunk(cur)
111 111
112 112 def remainder(cur):
113 113 yield chunk(cur)
114 114
115 115 class fiter(object):
116 116 def __init__(self, fp):
117 117 self.fp = fp
118 118
119 119 def __iter__(self):
120 120 return self
121 121
122 122 def next(self):
123 123 l = self.fp.readline()
124 124 if not l:
125 125 raise StopIteration
126 126 return l
127 127
128 128 inheader = False
129 129 cur = []
130 130
131 131 mimeheaders = ['content-type']
132 132
133 133 if not util.safehasattr(stream, 'next'):
134 134 # http responses, for example, have readline but not next
135 135 stream = fiter(stream)
136 136
137 137 for line in stream:
138 138 cur.append(line)
139 139 if line.startswith('# HG changeset patch'):
140 140 return hgsplit(stream, cur)
141 141 elif line.startswith('From '):
142 142 return mboxsplit(stream, cur)
143 143 elif isheader(line, inheader):
144 144 inheader = True
145 145 if line.split(':', 1)[0].lower() in mimeheaders:
146 146 # let email parser handle this
147 147 return mimesplit(stream, cur)
148 148 elif line.startswith('--- ') and inheader:
149 149 # No evil headers seen by diff start, split by hand
150 150 return headersplit(stream, cur)
151 151 # Not enough info, keep reading
152 152
153 153 # if we are here, we have a very plain patch
154 154 return remainder(cur)
155 155
156 156 def extract(ui, fileobj):
157 157 '''extract patch from data read from fileobj.
158 158
159 159 patch can be a normal patch or contained in an email message.
160 160
161 161 return tuple (filename, message, user, date, branch, node, p1, p2).
162 162 Any item in the returned tuple can be None. If filename is None,
163 163 fileobj did not contain a patch. Caller must unlink filename when done.'''
164 164
165 165 # attempt to detect the start of a patch
166 166 # (this heuristic is borrowed from quilt)
167 167 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
168 168 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
169 169 r'---[ \t].*?^\+\+\+[ \t]|'
170 170 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
171 171
172 172 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
173 173 tmpfp = os.fdopen(fd, 'w')
174 174 try:
175 175 msg = email.Parser.Parser().parse(fileobj)
176 176
177 177 subject = msg['Subject']
178 178 user = msg['From']
179 179 if not subject and not user:
180 180 # Not an email, restore parsed headers if any
181 181 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
182 182
183 183 # should try to parse msg['Date']
184 184 date = None
185 185 nodeid = None
186 186 branch = None
187 187 parents = []
188 188
189 189 if subject:
190 190 if subject.startswith('[PATCH'):
191 191 pend = subject.find(']')
192 192 if pend >= 0:
193 193 subject = subject[pend + 1:].lstrip()
194 194 subject = re.sub(r'\n[ \t]+', ' ', subject)
195 195 ui.debug('Subject: %s\n' % subject)
196 196 if user:
197 197 ui.debug('From: %s\n' % user)
198 198 diffs_seen = 0
199 199 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
200 200 message = ''
201 201 for part in msg.walk():
202 202 content_type = part.get_content_type()
203 203 ui.debug('Content-Type: %s\n' % content_type)
204 204 if content_type not in ok_types:
205 205 continue
206 206 payload = part.get_payload(decode=True)
207 207 m = diffre.search(payload)
208 208 if m:
209 209 hgpatch = False
210 210 hgpatchheader = False
211 211 ignoretext = False
212 212
213 213 ui.debug('found patch at byte %d\n' % m.start(0))
214 214 diffs_seen += 1
215 215 cfp = cStringIO.StringIO()
216 216 for line in payload[:m.start(0)].splitlines():
217 217 if line.startswith('# HG changeset patch') and not hgpatch:
218 218 ui.debug('patch generated by hg export\n')
219 219 hgpatch = True
220 220 hgpatchheader = True
221 221 # drop earlier commit message content
222 222 cfp.seek(0)
223 223 cfp.truncate()
224 224 subject = None
225 225 elif hgpatchheader:
226 226 if line.startswith('# User '):
227 227 user = line[7:]
228 228 ui.debug('From: %s\n' % user)
229 229 elif line.startswith("# Date "):
230 230 date = line[7:]
231 231 elif line.startswith("# Branch "):
232 232 branch = line[9:]
233 233 elif line.startswith("# Node ID "):
234 234 nodeid = line[10:]
235 235 elif line.startswith("# Parent "):
236 236 parents.append(line[9:].lstrip())
237 237 elif not line.startswith("# "):
238 238 hgpatchheader = False
239 239 elif line == '---':
240 240 ignoretext = True
241 241 if not hgpatchheader and not ignoretext:
242 242 cfp.write(line)
243 243 cfp.write('\n')
244 244 message = cfp.getvalue()
245 245 if tmpfp:
246 246 tmpfp.write(payload)
247 247 if not payload.endswith('\n'):
248 248 tmpfp.write('\n')
249 249 elif not diffs_seen and message and content_type == 'text/plain':
250 250 message += '\n' + payload
251 251 except: # re-raises
252 252 tmpfp.close()
253 253 os.unlink(tmpname)
254 254 raise
255 255
256 256 if subject and not message.startswith(subject):
257 257 message = '%s\n%s' % (subject, message)
258 258 tmpfp.close()
259 259 if not diffs_seen:
260 260 os.unlink(tmpname)
261 261 return None, message, user, date, branch, None, None, None
262 262 p1 = parents and parents.pop(0) or None
263 263 p2 = parents and parents.pop(0) or None
264 264 return tmpname, message, user, date, branch, nodeid, p1, p2
265 265
266 266 class patchmeta(object):
267 267 """Patched file metadata
268 268
269 269 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
270 270 or COPY. 'path' is patched file path. 'oldpath' is set to the
271 271 origin file when 'op' is either COPY or RENAME, None otherwise. If
272 272 file mode is changed, 'mode' is a tuple (islink, isexec) where
273 273 'islink' is True if the file is a symlink and 'isexec' is True if
274 274 the file is executable. Otherwise, 'mode' is None.
275 275 """
276 276 def __init__(self, path):
277 277 self.path = path
278 278 self.oldpath = None
279 279 self.mode = None
280 280 self.op = 'MODIFY'
281 281 self.binary = False
282 282
283 283 def setmode(self, mode):
284 284 islink = mode & 020000
285 285 isexec = mode & 0100
286 286 self.mode = (islink, isexec)
287 287
288 288 def copy(self):
289 289 other = patchmeta(self.path)
290 290 other.oldpath = self.oldpath
291 291 other.mode = self.mode
292 292 other.op = self.op
293 293 other.binary = self.binary
294 294 return other
295 295
296 296 def _ispatchinga(self, afile):
297 297 if afile == '/dev/null':
298 298 return self.op == 'ADD'
299 299 return afile == 'a/' + (self.oldpath or self.path)
300 300
301 301 def _ispatchingb(self, bfile):
302 302 if bfile == '/dev/null':
303 303 return self.op == 'DELETE'
304 304 return bfile == 'b/' + self.path
305 305
306 306 def ispatching(self, afile, bfile):
307 307 return self._ispatchinga(afile) and self._ispatchingb(bfile)
308 308
309 309 def __repr__(self):
310 310 return "<patchmeta %s %r>" % (self.op, self.path)
311 311
312 312 def readgitpatch(lr):
313 313 """extract git-style metadata about patches from <patchname>"""
314 314
315 315 # Filter patch for git information
316 316 gp = None
317 317 gitpatches = []
318 318 for line in lr:
319 319 line = line.rstrip(' \r\n')
320 320 if line.startswith('diff --git a/'):
321 321 m = gitre.match(line)
322 322 if m:
323 323 if gp:
324 324 gitpatches.append(gp)
325 325 dst = m.group(2)
326 326 gp = patchmeta(dst)
327 327 elif gp:
328 328 if line.startswith('--- '):
329 329 gitpatches.append(gp)
330 330 gp = None
331 331 continue
332 332 if line.startswith('rename from '):
333 333 gp.op = 'RENAME'
334 334 gp.oldpath = line[12:]
335 335 elif line.startswith('rename to '):
336 336 gp.path = line[10:]
337 337 elif line.startswith('copy from '):
338 338 gp.op = 'COPY'
339 339 gp.oldpath = line[10:]
340 340 elif line.startswith('copy to '):
341 341 gp.path = line[8:]
342 342 elif line.startswith('deleted file'):
343 343 gp.op = 'DELETE'
344 344 elif line.startswith('new file mode '):
345 345 gp.op = 'ADD'
346 346 gp.setmode(int(line[-6:], 8))
347 347 elif line.startswith('new mode '):
348 348 gp.setmode(int(line[-6:], 8))
349 349 elif line.startswith('GIT binary patch'):
350 350 gp.binary = True
351 351 if gp:
352 352 gitpatches.append(gp)
353 353
354 354 return gitpatches
355 355
356 356 class linereader(object):
357 357 # simple class to allow pushing lines back into the input stream
358 358 def __init__(self, fp):
359 359 self.fp = fp
360 360 self.buf = []
361 361
362 362 def push(self, line):
363 363 if line is not None:
364 364 self.buf.append(line)
365 365
366 366 def readline(self):
367 367 if self.buf:
368 368 l = self.buf[0]
369 369 del self.buf[0]
370 370 return l
371 371 return self.fp.readline()
372 372
373 373 def __iter__(self):
374 374 while True:
375 375 l = self.readline()
376 376 if not l:
377 377 break
378 378 yield l
379 379
380 380 class abstractbackend(object):
381 381 def __init__(self, ui):
382 382 self.ui = ui
383 383
384 384 def getfile(self, fname):
385 385 """Return target file data and flags as a (data, (islink,
386 386 isexec)) tuple. Data is None if file is missing/deleted.
387 387 """
388 388 raise NotImplementedError
389 389
390 390 def setfile(self, fname, data, mode, copysource):
391 391 """Write data to target file fname and set its mode. mode is a
392 392 (islink, isexec) tuple. If data is None, the file content should
393 393 be left unchanged. If the file is modified after being copied,
394 394 copysource is set to the original file name.
395 395 """
396 396 raise NotImplementedError
397 397
398 398 def unlink(self, fname):
399 399 """Unlink target file."""
400 400 raise NotImplementedError
401 401
402 402 def writerej(self, fname, failed, total, lines):
403 403 """Write rejected lines for fname. total is the number of hunks
404 404 which failed to apply and total the total number of hunks for this
405 405 files.
406 406 """
407 407 pass
408 408
409 409 def exists(self, fname):
410 410 raise NotImplementedError
411 411
412 412 class fsbackend(abstractbackend):
413 413 def __init__(self, ui, basedir):
414 414 super(fsbackend, self).__init__(ui)
415 415 self.opener = scmutil.opener(basedir)
416 416
417 417 def _join(self, f):
418 418 return os.path.join(self.opener.base, f)
419 419
420 420 def getfile(self, fname):
421 421 if self.opener.islink(fname):
422 422 return (self.opener.readlink(fname), (True, False))
423 423
424 424 isexec = False
425 425 try:
426 426 isexec = self.opener.lstat(fname).st_mode & 0100 != 0
427 427 except OSError, e:
428 428 if e.errno != errno.ENOENT:
429 429 raise
430 430 try:
431 431 return (self.opener.read(fname), (False, isexec))
432 432 except IOError, e:
433 433 if e.errno != errno.ENOENT:
434 434 raise
435 435 return None, None
436 436
437 437 def setfile(self, fname, data, mode, copysource):
438 438 islink, isexec = mode
439 439 if data is None:
440 440 self.opener.setflags(fname, islink, isexec)
441 441 return
442 442 if islink:
443 443 self.opener.symlink(data, fname)
444 444 else:
445 445 self.opener.write(fname, data)
446 446 if isexec:
447 447 self.opener.setflags(fname, False, True)
448 448
449 449 def unlink(self, fname):
450 450 self.opener.unlinkpath(fname, ignoremissing=True)
451 451
452 452 def writerej(self, fname, failed, total, lines):
453 453 fname = fname + ".rej"
454 454 self.ui.warn(
455 455 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
456 456 (failed, total, fname))
457 457 fp = self.opener(fname, 'w')
458 458 fp.writelines(lines)
459 459 fp.close()
460 460
461 461 def exists(self, fname):
462 462 return self.opener.lexists(fname)
463 463
464 464 class workingbackend(fsbackend):
465 465 def __init__(self, ui, repo, similarity):
466 466 super(workingbackend, self).__init__(ui, repo.root)
467 467 self.repo = repo
468 468 self.similarity = similarity
469 469 self.removed = set()
470 470 self.changed = set()
471 471 self.copied = []
472 472
473 473 def _checkknown(self, fname):
474 474 if self.repo.dirstate[fname] == '?' and self.exists(fname):
475 475 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
476 476
477 477 def setfile(self, fname, data, mode, copysource):
478 478 self._checkknown(fname)
479 479 super(workingbackend, self).setfile(fname, data, mode, copysource)
480 480 if copysource is not None:
481 481 self.copied.append((copysource, fname))
482 482 self.changed.add(fname)
483 483
484 484 def unlink(self, fname):
485 485 self._checkknown(fname)
486 486 super(workingbackend, self).unlink(fname)
487 487 self.removed.add(fname)
488 488 self.changed.add(fname)
489 489
490 490 def close(self):
491 491 wctx = self.repo[None]
492 492 changed = set(self.changed)
493 493 for src, dst in self.copied:
494 494 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
495 495 if self.removed:
496 496 wctx.forget(sorted(self.removed))
497 497 for f in self.removed:
498 498 if f not in self.repo.dirstate:
499 499 # File was deleted and no longer belongs to the
500 500 # dirstate, it was probably marked added then
501 501 # deleted, and should not be considered by
502 502 # marktouched().
503 503 changed.discard(f)
504 504 if changed:
505 505 scmutil.marktouched(self.repo, changed, self.similarity)
506 506 return sorted(self.changed)
507 507
508 508 class filestore(object):
509 509 def __init__(self, maxsize=None):
510 510 self.opener = None
511 511 self.files = {}
512 512 self.created = 0
513 513 self.maxsize = maxsize
514 514 if self.maxsize is None:
515 515 self.maxsize = 4*(2**20)
516 516 self.size = 0
517 517 self.data = {}
518 518
519 519 def setfile(self, fname, data, mode, copied=None):
520 520 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
521 521 self.data[fname] = (data, mode, copied)
522 522 self.size += len(data)
523 523 else:
524 524 if self.opener is None:
525 525 root = tempfile.mkdtemp(prefix='hg-patch-')
526 526 self.opener = scmutil.opener(root)
527 527 # Avoid filename issues with these simple names
528 528 fn = str(self.created)
529 529 self.opener.write(fn, data)
530 530 self.created += 1
531 531 self.files[fname] = (fn, mode, copied)
532 532
533 533 def getfile(self, fname):
534 534 if fname in self.data:
535 535 return self.data[fname]
536 536 if not self.opener or fname not in self.files:
537 537 return None, None, None
538 538 fn, mode, copied = self.files[fname]
539 539 return self.opener.read(fn), mode, copied
540 540
541 541 def close(self):
542 542 if self.opener:
543 543 shutil.rmtree(self.opener.base)
544 544
545 545 class repobackend(abstractbackend):
546 546 def __init__(self, ui, repo, ctx, store):
547 547 super(repobackend, self).__init__(ui)
548 548 self.repo = repo
549 549 self.ctx = ctx
550 550 self.store = store
551 551 self.changed = set()
552 552 self.removed = set()
553 553 self.copied = {}
554 554
555 555 def _checkknown(self, fname):
556 556 if fname not in self.ctx:
557 557 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
558 558
559 559 def getfile(self, fname):
560 560 try:
561 561 fctx = self.ctx[fname]
562 562 except error.LookupError:
563 563 return None, None
564 564 flags = fctx.flags()
565 565 return fctx.data(), ('l' in flags, 'x' in flags)
566 566
567 567 def setfile(self, fname, data, mode, copysource):
568 568 if copysource:
569 569 self._checkknown(copysource)
570 570 if data is None:
571 571 data = self.ctx[fname].data()
572 572 self.store.setfile(fname, data, mode, copysource)
573 573 self.changed.add(fname)
574 574 if copysource:
575 575 self.copied[fname] = copysource
576 576
577 577 def unlink(self, fname):
578 578 self._checkknown(fname)
579 579 self.removed.add(fname)
580 580
581 581 def exists(self, fname):
582 582 return fname in self.ctx
583 583
584 584 def close(self):
585 585 return self.changed | self.removed
586 586
587 587 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
588 588 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
589 589 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
590 590 eolmodes = ['strict', 'crlf', 'lf', 'auto']
591 591
592 592 class patchfile(object):
593 593 def __init__(self, ui, gp, backend, store, eolmode='strict'):
594 594 self.fname = gp.path
595 595 self.eolmode = eolmode
596 596 self.eol = None
597 597 self.backend = backend
598 598 self.ui = ui
599 599 self.lines = []
600 600 self.exists = False
601 601 self.missing = True
602 602 self.mode = gp.mode
603 603 self.copysource = gp.oldpath
604 604 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
605 605 self.remove = gp.op == 'DELETE'
606 606 if self.copysource is None:
607 607 data, mode = backend.getfile(self.fname)
608 608 else:
609 609 data, mode = store.getfile(self.copysource)[:2]
610 610 if data is not None:
611 611 self.exists = self.copysource is None or backend.exists(self.fname)
612 612 self.missing = False
613 613 if data:
614 614 self.lines = mdiff.splitnewlines(data)
615 615 if self.mode is None:
616 616 self.mode = mode
617 617 if self.lines:
618 618 # Normalize line endings
619 619 if self.lines[0].endswith('\r\n'):
620 620 self.eol = '\r\n'
621 621 elif self.lines[0].endswith('\n'):
622 622 self.eol = '\n'
623 623 if eolmode != 'strict':
624 624 nlines = []
625 625 for l in self.lines:
626 626 if l.endswith('\r\n'):
627 627 l = l[:-2] + '\n'
628 628 nlines.append(l)
629 629 self.lines = nlines
630 630 else:
631 631 if self.create:
632 632 self.missing = False
633 633 if self.mode is None:
634 634 self.mode = (False, False)
635 635 if self.missing:
636 636 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
637 637
638 638 self.hash = {}
639 639 self.dirty = 0
640 640 self.offset = 0
641 641 self.skew = 0
642 642 self.rej = []
643 643 self.fileprinted = False
644 644 self.printfile(False)
645 645 self.hunks = 0
646 646
647 647 def writelines(self, fname, lines, mode):
648 648 if self.eolmode == 'auto':
649 649 eol = self.eol
650 650 elif self.eolmode == 'crlf':
651 651 eol = '\r\n'
652 652 else:
653 653 eol = '\n'
654 654
655 655 if self.eolmode != 'strict' and eol and eol != '\n':
656 656 rawlines = []
657 657 for l in lines:
658 658 if l and l[-1] == '\n':
659 659 l = l[:-1] + eol
660 660 rawlines.append(l)
661 661 lines = rawlines
662 662
663 663 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
664 664
665 665 def printfile(self, warn):
666 666 if self.fileprinted:
667 667 return
668 668 if warn or self.ui.verbose:
669 669 self.fileprinted = True
670 670 s = _("patching file %s\n") % self.fname
671 671 if warn:
672 672 self.ui.warn(s)
673 673 else:
674 674 self.ui.note(s)
675 675
676 676
677 677 def findlines(self, l, linenum):
678 678 # looks through the hash and finds candidate lines. The
679 679 # result is a list of line numbers sorted based on distance
680 680 # from linenum
681 681
682 682 cand = self.hash.get(l, [])
683 683 if len(cand) > 1:
684 684 # resort our list of potentials forward then back.
685 685 cand.sort(key=lambda x: abs(x - linenum))
686 686 return cand
687 687
688 688 def write_rej(self):
689 689 # our rejects are a little different from patch(1). This always
690 690 # creates rejects in the same form as the original patch. A file
691 691 # header is inserted so that you can run the reject through patch again
692 692 # without having to type the filename.
693 693 if not self.rej:
694 694 return
695 695 base = os.path.basename(self.fname)
696 696 lines = ["--- %s\n+++ %s\n" % (base, base)]
697 697 for x in self.rej:
698 698 for l in x.hunk:
699 699 lines.append(l)
700 700 if l[-1] != '\n':
701 701 lines.append("\n\ No newline at end of file\n")
702 702 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
703 703
704 704 def apply(self, h):
705 705 if not h.complete():
706 706 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
707 707 (h.number, h.desc, len(h.a), h.lena, len(h.b),
708 708 h.lenb))
709 709
710 710 self.hunks += 1
711 711
712 712 if self.missing:
713 713 self.rej.append(h)
714 714 return -1
715 715
716 716 if self.exists and self.create:
717 717 if self.copysource:
718 718 self.ui.warn(_("cannot create %s: destination already "
719 719 "exists\n") % self.fname)
720 720 else:
721 721 self.ui.warn(_("file %s already exists\n") % self.fname)
722 722 self.rej.append(h)
723 723 return -1
724 724
725 725 if isinstance(h, binhunk):
726 726 if self.remove:
727 727 self.backend.unlink(self.fname)
728 728 else:
729 729 l = h.new(self.lines)
730 730 self.lines[:] = l
731 731 self.offset += len(l)
732 732 self.dirty = True
733 733 return 0
734 734
735 735 horig = h
736 736 if (self.eolmode in ('crlf', 'lf')
737 737 or self.eolmode == 'auto' and self.eol):
738 738 # If new eols are going to be normalized, then normalize
739 739 # hunk data before patching. Otherwise, preserve input
740 740 # line-endings.
741 741 h = h.getnormalized()
742 742
743 743 # fast case first, no offsets, no fuzz
744 744 old, oldstart, new, newstart = h.fuzzit(0, False)
745 745 oldstart += self.offset
746 746 orig_start = oldstart
747 747 # if there's skew we want to emit the "(offset %d lines)" even
748 748 # when the hunk cleanly applies at start + skew, so skip the
749 749 # fast case code
750 750 if (self.skew == 0 and
751 751 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
752 752 if self.remove:
753 753 self.backend.unlink(self.fname)
754 754 else:
755 755 self.lines[oldstart:oldstart + len(old)] = new
756 756 self.offset += len(new) - len(old)
757 757 self.dirty = True
758 758 return 0
759 759
760 760 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
761 761 self.hash = {}
762 762 for x, s in enumerate(self.lines):
763 763 self.hash.setdefault(s, []).append(x)
764 764
765 765 for fuzzlen in xrange(3):
766 766 for toponly in [True, False]:
767 767 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
768 768 oldstart = oldstart + self.offset + self.skew
769 769 oldstart = min(oldstart, len(self.lines))
770 770 if old:
771 771 cand = self.findlines(old[0][1:], oldstart)
772 772 else:
773 773 # Only adding lines with no or fuzzed context, just
774 774 # take the skew in account
775 775 cand = [oldstart]
776 776
777 777 for l in cand:
778 778 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
779 779 self.lines[l : l + len(old)] = new
780 780 self.offset += len(new) - len(old)
781 781 self.skew = l - orig_start
782 782 self.dirty = True
783 783 offset = l - orig_start - fuzzlen
784 784 if fuzzlen:
785 785 msg = _("Hunk #%d succeeded at %d "
786 786 "with fuzz %d "
787 787 "(offset %d lines).\n")
788 788 self.printfile(True)
789 789 self.ui.warn(msg %
790 790 (h.number, l + 1, fuzzlen, offset))
791 791 else:
792 792 msg = _("Hunk #%d succeeded at %d "
793 793 "(offset %d lines).\n")
794 794 self.ui.note(msg % (h.number, l + 1, offset))
795 795 return fuzzlen
796 796 self.printfile(True)
797 797 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
798 798 self.rej.append(horig)
799 799 return -1
800 800
801 801 def close(self):
802 802 if self.dirty:
803 803 self.writelines(self.fname, self.lines, self.mode)
804 804 self.write_rej()
805 805 return len(self.rej)
806 806
807 807 class header(object):
808 808 """patch header
809 809 """
810 810 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
811 811 diff_re = re.compile('diff -r .* (.*)$')
812 812 allhunks_re = re.compile('(?:index|deleted file) ')
813 813 pretty_re = re.compile('(?:new file|deleted file) ')
814 814 special_re = re.compile('(?:index|new|deleted|copy|rename) ')
815 815
816 816 def __init__(self, header):
817 817 self.header = header
818 818 self.hunks = []
819 819
820 820 def binary(self):
821 821 return util.any(h.startswith('index ') for h in self.header)
822 822
823 823 def pretty(self, fp):
824 824 for h in self.header:
825 825 if h.startswith('index '):
826 826 fp.write(_('this modifies a binary file (all or nothing)\n'))
827 827 break
828 828 if self.pretty_re.match(h):
829 829 fp.write(h)
830 830 if self.binary():
831 831 fp.write(_('this is a binary file\n'))
832 832 break
833 833 if h.startswith('---'):
834 834 fp.write(_('%d hunks, %d lines changed\n') %
835 835 (len(self.hunks),
836 836 sum([max(h.added, h.removed) for h in self.hunks])))
837 837 break
838 838 fp.write(h)
839 839
840 840 def write(self, fp):
841 841 fp.write(''.join(self.header))
842 842
843 843 def allhunks(self):
844 844 return util.any(self.allhunks_re.match(h) for h in self.header)
845 845
846 846 def files(self):
847 847 match = self.diffgit_re.match(self.header[0])
848 848 if match:
849 849 fromfile, tofile = match.groups()
850 850 if fromfile == tofile:
851 851 return [fromfile]
852 852 return [fromfile, tofile]
853 853 else:
854 854 return self.diff_re.match(self.header[0]).groups()
855 855
856 856 def filename(self):
857 857 return self.files()[-1]
858 858
859 859 def __repr__(self):
860 860 return '<header %s>' % (' '.join(map(repr, self.files())))
861 861
862 862 def special(self):
863 863 return util.any(self.special_re.match(h) for h in self.header)
864 864
865 865 class recordhunk(object):
866 866 """patch hunk
867 867
868 868 XXX shouldn't we merge this with the other hunk class?
869 869 """
870 870 maxcontext = 3
871 871
872 872 def __init__(self, header, fromline, toline, proc, before, hunk, after):
873 873 def trimcontext(number, lines):
874 874 delta = len(lines) - self.maxcontext
875 875 if False and delta > 0:
876 876 return number + delta, lines[:self.maxcontext]
877 877 return number, lines
878 878
879 879 self.header = header
880 880 self.fromline, self.before = trimcontext(fromline, before)
881 881 self.toline, self.after = trimcontext(toline, after)
882 882 self.proc = proc
883 883 self.hunk = hunk
884 884 self.added, self.removed = self.countchanges(self.hunk)
885 885
886 886 def countchanges(self, hunk):
887 887 """hunk -> (n+,n-)"""
888 888 add = len([h for h in hunk if h[0] == '+'])
889 889 rem = len([h for h in hunk if h[0] == '-'])
890 890 return add, rem
891 891
892 892 def write(self, fp):
893 893 delta = len(self.before) + len(self.after)
894 894 if self.after and self.after[-1] == '\\ No newline at end of file\n':
895 895 delta -= 1
896 896 fromlen = delta + self.removed
897 897 tolen = delta + self.added
898 898 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
899 899 (self.fromline, fromlen, self.toline, tolen,
900 900 self.proc and (' ' + self.proc)))
901 901 fp.write(''.join(self.before + self.hunk + self.after))
902 902
903 903 pretty = write
904 904
905 905 def filename(self):
906 906 return self.header.filename()
907 907
908 908 def __repr__(self):
909 909 return '<hunk %r@%d>' % (self.filename(), self.fromline)
910 910
911 def filterpatch(ui, headers):
912 """Interactively filter patch chunks into applied-only chunks"""
913
914 def prompt(skipfile, skipall, query, chunk):
915 """prompt query, and process base inputs
916
917 - y/n for the rest of file
918 - y/n for the rest
919 - ? (help)
920 - q (quit)
921
922 Return True/False and possibly updated skipfile and skipall.
923 """
924 newpatches = None
925 if skipall is not None:
926 return skipall, skipfile, skipall, newpatches
927 if skipfile is not None:
928 return skipfile, skipfile, skipall, newpatches
929 while True:
930 resps = _('[Ynesfdaq?]'
931 '$$ &Yes, record this change'
932 '$$ &No, skip this change'
933 '$$ &Edit this change manually'
934 '$$ &Skip remaining changes to this file'
935 '$$ Record remaining changes to this &file'
936 '$$ &Done, skip remaining changes and files'
937 '$$ Record &all changes to all remaining files'
938 '$$ &Quit, recording no changes'
939 '$$ &? (display help)')
940 r = ui.promptchoice("%s %s" % (query, resps))
941 ui.write("\n")
942 if r == 8: # ?
943 for c, t in ui.extractchoices(resps)[1]:
944 ui.write('%s - %s\n' % (c, t.lower()))
945 continue
946 elif r == 0: # yes
947 ret = True
948 elif r == 1: # no
949 ret = False
950 elif r == 2: # Edit patch
951 if chunk is None:
952 ui.write(_('cannot edit patch for whole file'))
953 ui.write("\n")
954 continue
955 if chunk.header.binary():
956 ui.write(_('cannot edit patch for binary file'))
957 ui.write("\n")
958 continue
959 # Patch comment based on the Git one (based on comment at end of
960 # http://mercurial.selenic.com/wiki/RecordExtension)
961 phelp = '---' + _("""
962 To remove '-' lines, make them ' ' lines (context).
963 To remove '+' lines, delete them.
964 Lines starting with # will be removed from the patch.
965
966 If the patch applies cleanly, the edited hunk will immediately be
967 added to the record list. If it does not apply cleanly, a rejects
968 file will be generated: you can use that when you try again. If
969 all lines of the hunk are removed, then the edit is aborted and
970 the hunk is left unchanged.
971 """)
972 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
973 suffix=".diff", text=True)
974 ncpatchfp = None
975 try:
976 # Write the initial patch
977 f = os.fdopen(patchfd, "w")
978 chunk.header.write(f)
979 chunk.write(f)
980 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
981 f.close()
982 # Start the editor and wait for it to complete
983 editor = ui.geteditor()
984 ui.system("%s \"%s\"" % (editor, patchfn),
985 environ={'HGUSER': ui.username()},
986 onerr=util.Abort, errprefix=_("edit failed"))
987 # Remove comment lines
988 patchfp = open(patchfn)
989 ncpatchfp = cStringIO.StringIO()
990 for line in patchfp:
991 if not line.startswith('#'):
992 ncpatchfp.write(line)
993 patchfp.close()
994 ncpatchfp.seek(0)
995 newpatches = parsepatch(ncpatchfp)
996 finally:
997 os.unlink(patchfn)
998 del ncpatchfp
999 # Signal that the chunk shouldn't be applied as-is, but
1000 # provide the new patch to be used instead.
1001 ret = False
1002 elif r == 3: # Skip
1003 ret = skipfile = False
1004 elif r == 4: # file (Record remaining)
1005 ret = skipfile = True
1006 elif r == 5: # done, skip remaining
1007 ret = skipall = False
1008 elif r == 6: # all
1009 ret = skipall = True
1010 elif r == 7: # quit
1011 raise util.Abort(_('user quit'))
1012 return ret, skipfile, skipall, newpatches
1013
1014 seen = set()
1015 applied = {} # 'filename' -> [] of chunks
1016 skipfile, skipall = None, None
1017 pos, total = 1, sum(len(h.hunks) for h in headers)
1018 for h in headers:
1019 pos += len(h.hunks)
1020 skipfile = None
1021 fixoffset = 0
1022 hdr = ''.join(h.header)
1023 if hdr in seen:
1024 continue
1025 seen.add(hdr)
1026 if skipall is None:
1027 h.pretty(ui)
1028 msg = (_('examine changes to %s?') %
1029 _(' and ').join("'%s'" % f for f in h.files()))
1030 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1031 if not r:
1032 continue
1033 applied[h.filename()] = [h]
1034 if h.allhunks():
1035 applied[h.filename()] += h.hunks
1036 continue
1037 for i, chunk in enumerate(h.hunks):
1038 if skipfile is None and skipall is None:
1039 chunk.pretty(ui)
1040 if total == 1:
1041 msg = _("record this change to '%s'?") % chunk.filename()
1042 else:
1043 idx = pos - len(h.hunks) + i
1044 msg = _("record change %d/%d to '%s'?") % (idx, total,
1045 chunk.filename())
1046 r, skipfile, skipall, newpatches = prompt(skipfile,
1047 skipall, msg, chunk)
1048 if r:
1049 if fixoffset:
1050 chunk = copy.copy(chunk)
1051 chunk.toline += fixoffset
1052 applied[chunk.filename()].append(chunk)
1053 elif newpatches is not None:
1054 for newpatch in newpatches:
1055 for newhunk in newpatch.hunks:
1056 if fixoffset:
1057 newhunk.toline += fixoffset
1058 applied[newhunk.filename()].append(newhunk)
1059 else:
1060 fixoffset += chunk.removed - chunk.added
1061 return sum([h for h in applied.itervalues()
1062 if h[0].special() or len(h) > 1], [])
911 1063 class hunk(object):
912 1064 def __init__(self, desc, num, lr, context):
913 1065 self.number = num
914 1066 self.desc = desc
915 1067 self.hunk = [desc]
916 1068 self.a = []
917 1069 self.b = []
918 1070 self.starta = self.lena = None
919 1071 self.startb = self.lenb = None
920 1072 if lr is not None:
921 1073 if context:
922 1074 self.read_context_hunk(lr)
923 1075 else:
924 1076 self.read_unified_hunk(lr)
925 1077
926 1078 def getnormalized(self):
927 1079 """Return a copy with line endings normalized to LF."""
928 1080
929 1081 def normalize(lines):
930 1082 nlines = []
931 1083 for line in lines:
932 1084 if line.endswith('\r\n'):
933 1085 line = line[:-2] + '\n'
934 1086 nlines.append(line)
935 1087 return nlines
936 1088
937 1089 # Dummy object, it is rebuilt manually
938 1090 nh = hunk(self.desc, self.number, None, None)
939 1091 nh.number = self.number
940 1092 nh.desc = self.desc
941 1093 nh.hunk = self.hunk
942 1094 nh.a = normalize(self.a)
943 1095 nh.b = normalize(self.b)
944 1096 nh.starta = self.starta
945 1097 nh.startb = self.startb
946 1098 nh.lena = self.lena
947 1099 nh.lenb = self.lenb
948 1100 return nh
949 1101
950 1102 def read_unified_hunk(self, lr):
951 1103 m = unidesc.match(self.desc)
952 1104 if not m:
953 1105 raise PatchError(_("bad hunk #%d") % self.number)
954 1106 self.starta, self.lena, self.startb, self.lenb = m.groups()
955 1107 if self.lena is None:
956 1108 self.lena = 1
957 1109 else:
958 1110 self.lena = int(self.lena)
959 1111 if self.lenb is None:
960 1112 self.lenb = 1
961 1113 else:
962 1114 self.lenb = int(self.lenb)
963 1115 self.starta = int(self.starta)
964 1116 self.startb = int(self.startb)
965 1117 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
966 1118 self.b)
967 1119 # if we hit eof before finishing out the hunk, the last line will
968 1120 # be zero length. Lets try to fix it up.
969 1121 while len(self.hunk[-1]) == 0:
970 1122 del self.hunk[-1]
971 1123 del self.a[-1]
972 1124 del self.b[-1]
973 1125 self.lena -= 1
974 1126 self.lenb -= 1
975 1127 self._fixnewline(lr)
976 1128
977 1129 def read_context_hunk(self, lr):
978 1130 self.desc = lr.readline()
979 1131 m = contextdesc.match(self.desc)
980 1132 if not m:
981 1133 raise PatchError(_("bad hunk #%d") % self.number)
982 1134 self.starta, aend = m.groups()
983 1135 self.starta = int(self.starta)
984 1136 if aend is None:
985 1137 aend = self.starta
986 1138 self.lena = int(aend) - self.starta
987 1139 if self.starta:
988 1140 self.lena += 1
989 1141 for x in xrange(self.lena):
990 1142 l = lr.readline()
991 1143 if l.startswith('---'):
992 1144 # lines addition, old block is empty
993 1145 lr.push(l)
994 1146 break
995 1147 s = l[2:]
996 1148 if l.startswith('- ') or l.startswith('! '):
997 1149 u = '-' + s
998 1150 elif l.startswith(' '):
999 1151 u = ' ' + s
1000 1152 else:
1001 1153 raise PatchError(_("bad hunk #%d old text line %d") %
1002 1154 (self.number, x))
1003 1155 self.a.append(u)
1004 1156 self.hunk.append(u)
1005 1157
1006 1158 l = lr.readline()
1007 1159 if l.startswith('\ '):
1008 1160 s = self.a[-1][:-1]
1009 1161 self.a[-1] = s
1010 1162 self.hunk[-1] = s
1011 1163 l = lr.readline()
1012 1164 m = contextdesc.match(l)
1013 1165 if not m:
1014 1166 raise PatchError(_("bad hunk #%d") % self.number)
1015 1167 self.startb, bend = m.groups()
1016 1168 self.startb = int(self.startb)
1017 1169 if bend is None:
1018 1170 bend = self.startb
1019 1171 self.lenb = int(bend) - self.startb
1020 1172 if self.startb:
1021 1173 self.lenb += 1
1022 1174 hunki = 1
1023 1175 for x in xrange(self.lenb):
1024 1176 l = lr.readline()
1025 1177 if l.startswith('\ '):
1026 1178 # XXX: the only way to hit this is with an invalid line range.
1027 1179 # The no-eol marker is not counted in the line range, but I
1028 1180 # guess there are diff(1) out there which behave differently.
1029 1181 s = self.b[-1][:-1]
1030 1182 self.b[-1] = s
1031 1183 self.hunk[hunki - 1] = s
1032 1184 continue
1033 1185 if not l:
1034 1186 # line deletions, new block is empty and we hit EOF
1035 1187 lr.push(l)
1036 1188 break
1037 1189 s = l[2:]
1038 1190 if l.startswith('+ ') or l.startswith('! '):
1039 1191 u = '+' + s
1040 1192 elif l.startswith(' '):
1041 1193 u = ' ' + s
1042 1194 elif len(self.b) == 0:
1043 1195 # line deletions, new block is empty
1044 1196 lr.push(l)
1045 1197 break
1046 1198 else:
1047 1199 raise PatchError(_("bad hunk #%d old text line %d") %
1048 1200 (self.number, x))
1049 1201 self.b.append(s)
1050 1202 while True:
1051 1203 if hunki >= len(self.hunk):
1052 1204 h = ""
1053 1205 else:
1054 1206 h = self.hunk[hunki]
1055 1207 hunki += 1
1056 1208 if h == u:
1057 1209 break
1058 1210 elif h.startswith('-'):
1059 1211 continue
1060 1212 else:
1061 1213 self.hunk.insert(hunki - 1, u)
1062 1214 break
1063 1215
1064 1216 if not self.a:
1065 1217 # this happens when lines were only added to the hunk
1066 1218 for x in self.hunk:
1067 1219 if x.startswith('-') or x.startswith(' '):
1068 1220 self.a.append(x)
1069 1221 if not self.b:
1070 1222 # this happens when lines were only deleted from the hunk
1071 1223 for x in self.hunk:
1072 1224 if x.startswith('+') or x.startswith(' '):
1073 1225 self.b.append(x[1:])
1074 1226 # @@ -start,len +start,len @@
1075 1227 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1076 1228 self.startb, self.lenb)
1077 1229 self.hunk[0] = self.desc
1078 1230 self._fixnewline(lr)
1079 1231
1080 1232 def _fixnewline(self, lr):
1081 1233 l = lr.readline()
1082 1234 if l.startswith('\ '):
1083 1235 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1084 1236 else:
1085 1237 lr.push(l)
1086 1238
1087 1239 def complete(self):
1088 1240 return len(self.a) == self.lena and len(self.b) == self.lenb
1089 1241
1090 1242 def _fuzzit(self, old, new, fuzz, toponly):
1091 1243 # this removes context lines from the top and bottom of list 'l'. It
1092 1244 # checks the hunk to make sure only context lines are removed, and then
1093 1245 # returns a new shortened list of lines.
1094 1246 fuzz = min(fuzz, len(old))
1095 1247 if fuzz:
1096 1248 top = 0
1097 1249 bot = 0
1098 1250 hlen = len(self.hunk)
1099 1251 for x in xrange(hlen - 1):
1100 1252 # the hunk starts with the @@ line, so use x+1
1101 1253 if self.hunk[x + 1][0] == ' ':
1102 1254 top += 1
1103 1255 else:
1104 1256 break
1105 1257 if not toponly:
1106 1258 for x in xrange(hlen - 1):
1107 1259 if self.hunk[hlen - bot - 1][0] == ' ':
1108 1260 bot += 1
1109 1261 else:
1110 1262 break
1111 1263
1112 1264 bot = min(fuzz, bot)
1113 1265 top = min(fuzz, top)
1114 1266 return old[top:len(old) - bot], new[top:len(new) - bot], top
1115 1267 return old, new, 0
1116 1268
1117 1269 def fuzzit(self, fuzz, toponly):
1118 1270 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1119 1271 oldstart = self.starta + top
1120 1272 newstart = self.startb + top
1121 1273 # zero length hunk ranges already have their start decremented
1122 1274 if self.lena and oldstart > 0:
1123 1275 oldstart -= 1
1124 1276 if self.lenb and newstart > 0:
1125 1277 newstart -= 1
1126 1278 return old, oldstart, new, newstart
1127 1279
1128 1280 class binhunk(object):
1129 1281 'A binary patch file.'
1130 1282 def __init__(self, lr, fname):
1131 1283 self.text = None
1132 1284 self.delta = False
1133 1285 self.hunk = ['GIT binary patch\n']
1134 1286 self._fname = fname
1135 1287 self._read(lr)
1136 1288
1137 1289 def complete(self):
1138 1290 return self.text is not None
1139 1291
1140 1292 def new(self, lines):
1141 1293 if self.delta:
1142 1294 return [applybindelta(self.text, ''.join(lines))]
1143 1295 return [self.text]
1144 1296
1145 1297 def _read(self, lr):
1146 1298 def getline(lr, hunk):
1147 1299 l = lr.readline()
1148 1300 hunk.append(l)
1149 1301 return l.rstrip('\r\n')
1150 1302
1151 1303 size = 0
1152 1304 while True:
1153 1305 line = getline(lr, self.hunk)
1154 1306 if not line:
1155 1307 raise PatchError(_('could not extract "%s" binary data')
1156 1308 % self._fname)
1157 1309 if line.startswith('literal '):
1158 1310 size = int(line[8:].rstrip())
1159 1311 break
1160 1312 if line.startswith('delta '):
1161 1313 size = int(line[6:].rstrip())
1162 1314 self.delta = True
1163 1315 break
1164 1316 dec = []
1165 1317 line = getline(lr, self.hunk)
1166 1318 while len(line) > 1:
1167 1319 l = line[0]
1168 1320 if l <= 'Z' and l >= 'A':
1169 1321 l = ord(l) - ord('A') + 1
1170 1322 else:
1171 1323 l = ord(l) - ord('a') + 27
1172 1324 try:
1173 1325 dec.append(base85.b85decode(line[1:])[:l])
1174 1326 except ValueError, e:
1175 1327 raise PatchError(_('could not decode "%s" binary patch: %s')
1176 1328 % (self._fname, str(e)))
1177 1329 line = getline(lr, self.hunk)
1178 1330 text = zlib.decompress(''.join(dec))
1179 1331 if len(text) != size:
1180 1332 raise PatchError(_('"%s" length is %d bytes, should be %d')
1181 1333 % (self._fname, len(text), size))
1182 1334 self.text = text
1183 1335
1184 1336 def parsefilename(str):
1185 1337 # --- filename \t|space stuff
1186 1338 s = str[4:].rstrip('\r\n')
1187 1339 i = s.find('\t')
1188 1340 if i < 0:
1189 1341 i = s.find(' ')
1190 1342 if i < 0:
1191 1343 return s
1192 1344 return s[:i]
1193 1345
1194 1346 def parsepatch(fp):
1195 1347 """patch -> [] of headers -> [] of hunks """
1196 1348 class parser(object):
1197 1349 """patch parsing state machine"""
1198 1350 def __init__(self):
1199 1351 self.fromline = 0
1200 1352 self.toline = 0
1201 1353 self.proc = ''
1202 1354 self.header = None
1203 1355 self.context = []
1204 1356 self.before = []
1205 1357 self.hunk = []
1206 1358 self.headers = []
1207 1359
1208 1360 def addrange(self, limits):
1209 1361 fromstart, fromend, tostart, toend, proc = limits
1210 1362 self.fromline = int(fromstart)
1211 1363 self.toline = int(tostart)
1212 1364 self.proc = proc
1213 1365
1214 1366 def addcontext(self, context):
1215 1367 if self.hunk:
1216 1368 h = recordhunk(self.header, self.fromline, self.toline,
1217 1369 self.proc, self.before, self.hunk, context)
1218 1370 self.header.hunks.append(h)
1219 1371 self.fromline += len(self.before) + h.removed
1220 1372 self.toline += len(self.before) + h.added
1221 1373 self.before = []
1222 1374 self.hunk = []
1223 1375 self.proc = ''
1224 1376 self.context = context
1225 1377
1226 1378 def addhunk(self, hunk):
1227 1379 if self.context:
1228 1380 self.before = self.context
1229 1381 self.context = []
1230 1382 self.hunk = hunk
1231 1383
1232 1384 def newfile(self, hdr):
1233 1385 self.addcontext([])
1234 1386 h = header(hdr)
1235 1387 self.headers.append(h)
1236 1388 self.header = h
1237 1389
1238 1390 def addother(self, line):
1239 1391 pass # 'other' lines are ignored
1240 1392
1241 1393 def finished(self):
1242 1394 self.addcontext([])
1243 1395 return self.headers
1244 1396
1245 1397 transitions = {
1246 1398 'file': {'context': addcontext,
1247 1399 'file': newfile,
1248 1400 'hunk': addhunk,
1249 1401 'range': addrange},
1250 1402 'context': {'file': newfile,
1251 1403 'hunk': addhunk,
1252 1404 'range': addrange,
1253 1405 'other': addother},
1254 1406 'hunk': {'context': addcontext,
1255 1407 'file': newfile,
1256 1408 'range': addrange},
1257 1409 'range': {'context': addcontext,
1258 1410 'hunk': addhunk},
1259 1411 'other': {'other': addother},
1260 1412 }
1261 1413
1262 1414 p = parser()
1263 1415
1264 1416 state = 'context'
1265 1417 for newstate, data in scanpatch(fp):
1266 1418 try:
1267 1419 p.transitions[state][newstate](p, data)
1268 1420 except KeyError:
1269 1421 raise PatchError('unhandled transition: %s -> %s' %
1270 1422 (state, newstate))
1271 1423 state = newstate
1272 1424 return p.finished()
1273 1425
1274 1426 def pathtransform(path, strip, prefix):
1275 1427 '''turn a path from a patch into a path suitable for the repository
1276 1428
1277 1429 prefix, if not empty, is expected to be normalized with a / at the end.
1278 1430
1279 1431 Returns (stripped components, path in repository).
1280 1432
1281 1433 >>> pathtransform('a/b/c', 0, '')
1282 1434 ('', 'a/b/c')
1283 1435 >>> pathtransform(' a/b/c ', 0, '')
1284 1436 ('', ' a/b/c')
1285 1437 >>> pathtransform(' a/b/c ', 2, '')
1286 1438 ('a/b/', 'c')
1287 1439 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1288 1440 ('a//b/', 'd/e/c')
1289 1441 >>> pathtransform('a/b/c', 3, '')
1290 1442 Traceback (most recent call last):
1291 1443 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1292 1444 '''
1293 1445 pathlen = len(path)
1294 1446 i = 0
1295 1447 if strip == 0:
1296 1448 return '', path.rstrip()
1297 1449 count = strip
1298 1450 while count > 0:
1299 1451 i = path.find('/', i)
1300 1452 if i == -1:
1301 1453 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1302 1454 (count, strip, path))
1303 1455 i += 1
1304 1456 # consume '//' in the path
1305 1457 while i < pathlen - 1 and path[i] == '/':
1306 1458 i += 1
1307 1459 count -= 1
1308 1460 return path[:i].lstrip(), prefix + path[i:].rstrip()
1309 1461
1310 1462 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1311 1463 nulla = afile_orig == "/dev/null"
1312 1464 nullb = bfile_orig == "/dev/null"
1313 1465 create = nulla and hunk.starta == 0 and hunk.lena == 0
1314 1466 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1315 1467 abase, afile = pathtransform(afile_orig, strip, prefix)
1316 1468 gooda = not nulla and backend.exists(afile)
1317 1469 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1318 1470 if afile == bfile:
1319 1471 goodb = gooda
1320 1472 else:
1321 1473 goodb = not nullb and backend.exists(bfile)
1322 1474 missing = not goodb and not gooda and not create
1323 1475
1324 1476 # some diff programs apparently produce patches where the afile is
1325 1477 # not /dev/null, but afile starts with bfile
1326 1478 abasedir = afile[:afile.rfind('/') + 1]
1327 1479 bbasedir = bfile[:bfile.rfind('/') + 1]
1328 1480 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1329 1481 and hunk.starta == 0 and hunk.lena == 0):
1330 1482 create = True
1331 1483 missing = False
1332 1484
1333 1485 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1334 1486 # diff is between a file and its backup. In this case, the original
1335 1487 # file should be patched (see original mpatch code).
1336 1488 isbackup = (abase == bbase and bfile.startswith(afile))
1337 1489 fname = None
1338 1490 if not missing:
1339 1491 if gooda and goodb:
1340 1492 fname = isbackup and afile or bfile
1341 1493 elif gooda:
1342 1494 fname = afile
1343 1495
1344 1496 if not fname:
1345 1497 if not nullb:
1346 1498 fname = isbackup and afile or bfile
1347 1499 elif not nulla:
1348 1500 fname = afile
1349 1501 else:
1350 1502 raise PatchError(_("undefined source and destination files"))
1351 1503
1352 1504 gp = patchmeta(fname)
1353 1505 if create:
1354 1506 gp.op = 'ADD'
1355 1507 elif remove:
1356 1508 gp.op = 'DELETE'
1357 1509 return gp
1358 1510
1359 1511 def scanpatch(fp):
1360 1512 """like patch.iterhunks, but yield different events
1361 1513
1362 1514 - ('file', [header_lines + fromfile + tofile])
1363 1515 - ('context', [context_lines])
1364 1516 - ('hunk', [hunk_lines])
1365 1517 - ('range', (-start,len, +start,len, proc))
1366 1518 """
1367 1519 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1368 1520 lr = linereader(fp)
1369 1521
1370 1522 def scanwhile(first, p):
1371 1523 """scan lr while predicate holds"""
1372 1524 lines = [first]
1373 1525 while True:
1374 1526 line = lr.readline()
1375 1527 if not line:
1376 1528 break
1377 1529 if p(line):
1378 1530 lines.append(line)
1379 1531 else:
1380 1532 lr.push(line)
1381 1533 break
1382 1534 return lines
1383 1535
1384 1536 while True:
1385 1537 line = lr.readline()
1386 1538 if not line:
1387 1539 break
1388 1540 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1389 1541 def notheader(line):
1390 1542 s = line.split(None, 1)
1391 1543 return not s or s[0] not in ('---', 'diff')
1392 1544 header = scanwhile(line, notheader)
1393 1545 fromfile = lr.readline()
1394 1546 if fromfile.startswith('---'):
1395 1547 tofile = lr.readline()
1396 1548 header += [fromfile, tofile]
1397 1549 else:
1398 1550 lr.push(fromfile)
1399 1551 yield 'file', header
1400 1552 elif line[0] == ' ':
1401 1553 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1402 1554 elif line[0] in '-+':
1403 1555 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1404 1556 else:
1405 1557 m = lines_re.match(line)
1406 1558 if m:
1407 1559 yield 'range', m.groups()
1408 1560 else:
1409 1561 yield 'other', line
1410 1562
1411 1563 def scangitpatch(lr, firstline):
1412 1564 """
1413 1565 Git patches can emit:
1414 1566 - rename a to b
1415 1567 - change b
1416 1568 - copy a to c
1417 1569 - change c
1418 1570
1419 1571 We cannot apply this sequence as-is, the renamed 'a' could not be
1420 1572 found for it would have been renamed already. And we cannot copy
1421 1573 from 'b' instead because 'b' would have been changed already. So
1422 1574 we scan the git patch for copy and rename commands so we can
1423 1575 perform the copies ahead of time.
1424 1576 """
1425 1577 pos = 0
1426 1578 try:
1427 1579 pos = lr.fp.tell()
1428 1580 fp = lr.fp
1429 1581 except IOError:
1430 1582 fp = cStringIO.StringIO(lr.fp.read())
1431 1583 gitlr = linereader(fp)
1432 1584 gitlr.push(firstline)
1433 1585 gitpatches = readgitpatch(gitlr)
1434 1586 fp.seek(pos)
1435 1587 return gitpatches
1436 1588
1437 1589 def iterhunks(fp):
1438 1590 """Read a patch and yield the following events:
1439 1591 - ("file", afile, bfile, firsthunk): select a new target file.
1440 1592 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1441 1593 "file" event.
1442 1594 - ("git", gitchanges): current diff is in git format, gitchanges
1443 1595 maps filenames to gitpatch records. Unique event.
1444 1596 """
1445 1597 afile = ""
1446 1598 bfile = ""
1447 1599 state = None
1448 1600 hunknum = 0
1449 1601 emitfile = newfile = False
1450 1602 gitpatches = None
1451 1603
1452 1604 # our states
1453 1605 BFILE = 1
1454 1606 context = None
1455 1607 lr = linereader(fp)
1456 1608
1457 1609 while True:
1458 1610 x = lr.readline()
1459 1611 if not x:
1460 1612 break
1461 1613 if state == BFILE and (
1462 1614 (not context and x[0] == '@')
1463 1615 or (context is not False and x.startswith('***************'))
1464 1616 or x.startswith('GIT binary patch')):
1465 1617 gp = None
1466 1618 if (gitpatches and
1467 1619 gitpatches[-1].ispatching(afile, bfile)):
1468 1620 gp = gitpatches.pop()
1469 1621 if x.startswith('GIT binary patch'):
1470 1622 h = binhunk(lr, gp.path)
1471 1623 else:
1472 1624 if context is None and x.startswith('***************'):
1473 1625 context = True
1474 1626 h = hunk(x, hunknum + 1, lr, context)
1475 1627 hunknum += 1
1476 1628 if emitfile:
1477 1629 emitfile = False
1478 1630 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1479 1631 yield 'hunk', h
1480 1632 elif x.startswith('diff --git a/'):
1481 1633 m = gitre.match(x.rstrip(' \r\n'))
1482 1634 if not m:
1483 1635 continue
1484 1636 if gitpatches is None:
1485 1637 # scan whole input for git metadata
1486 1638 gitpatches = scangitpatch(lr, x)
1487 1639 yield 'git', [g.copy() for g in gitpatches
1488 1640 if g.op in ('COPY', 'RENAME')]
1489 1641 gitpatches.reverse()
1490 1642 afile = 'a/' + m.group(1)
1491 1643 bfile = 'b/' + m.group(2)
1492 1644 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1493 1645 gp = gitpatches.pop()
1494 1646 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1495 1647 if not gitpatches:
1496 1648 raise PatchError(_('failed to synchronize metadata for "%s"')
1497 1649 % afile[2:])
1498 1650 gp = gitpatches[-1]
1499 1651 newfile = True
1500 1652 elif x.startswith('---'):
1501 1653 # check for a unified diff
1502 1654 l2 = lr.readline()
1503 1655 if not l2.startswith('+++'):
1504 1656 lr.push(l2)
1505 1657 continue
1506 1658 newfile = True
1507 1659 context = False
1508 1660 afile = parsefilename(x)
1509 1661 bfile = parsefilename(l2)
1510 1662 elif x.startswith('***'):
1511 1663 # check for a context diff
1512 1664 l2 = lr.readline()
1513 1665 if not l2.startswith('---'):
1514 1666 lr.push(l2)
1515 1667 continue
1516 1668 l3 = lr.readline()
1517 1669 lr.push(l3)
1518 1670 if not l3.startswith("***************"):
1519 1671 lr.push(l2)
1520 1672 continue
1521 1673 newfile = True
1522 1674 context = True
1523 1675 afile = parsefilename(x)
1524 1676 bfile = parsefilename(l2)
1525 1677
1526 1678 if newfile:
1527 1679 newfile = False
1528 1680 emitfile = True
1529 1681 state = BFILE
1530 1682 hunknum = 0
1531 1683
1532 1684 while gitpatches:
1533 1685 gp = gitpatches.pop()
1534 1686 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1535 1687
1536 1688 def applybindelta(binchunk, data):
1537 1689 """Apply a binary delta hunk
1538 1690 The algorithm used is the algorithm from git's patch-delta.c
1539 1691 """
1540 1692 def deltahead(binchunk):
1541 1693 i = 0
1542 1694 for c in binchunk:
1543 1695 i += 1
1544 1696 if not (ord(c) & 0x80):
1545 1697 return i
1546 1698 return i
1547 1699 out = ""
1548 1700 s = deltahead(binchunk)
1549 1701 binchunk = binchunk[s:]
1550 1702 s = deltahead(binchunk)
1551 1703 binchunk = binchunk[s:]
1552 1704 i = 0
1553 1705 while i < len(binchunk):
1554 1706 cmd = ord(binchunk[i])
1555 1707 i += 1
1556 1708 if (cmd & 0x80):
1557 1709 offset = 0
1558 1710 size = 0
1559 1711 if (cmd & 0x01):
1560 1712 offset = ord(binchunk[i])
1561 1713 i += 1
1562 1714 if (cmd & 0x02):
1563 1715 offset |= ord(binchunk[i]) << 8
1564 1716 i += 1
1565 1717 if (cmd & 0x04):
1566 1718 offset |= ord(binchunk[i]) << 16
1567 1719 i += 1
1568 1720 if (cmd & 0x08):
1569 1721 offset |= ord(binchunk[i]) << 24
1570 1722 i += 1
1571 1723 if (cmd & 0x10):
1572 1724 size = ord(binchunk[i])
1573 1725 i += 1
1574 1726 if (cmd & 0x20):
1575 1727 size |= ord(binchunk[i]) << 8
1576 1728 i += 1
1577 1729 if (cmd & 0x40):
1578 1730 size |= ord(binchunk[i]) << 16
1579 1731 i += 1
1580 1732 if size == 0:
1581 1733 size = 0x10000
1582 1734 offset_end = offset + size
1583 1735 out += data[offset:offset_end]
1584 1736 elif cmd != 0:
1585 1737 offset_end = i + cmd
1586 1738 out += binchunk[i:offset_end]
1587 1739 i += cmd
1588 1740 else:
1589 1741 raise PatchError(_('unexpected delta opcode 0'))
1590 1742 return out
1591 1743
1592 1744 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1593 1745 """Reads a patch from fp and tries to apply it.
1594 1746
1595 1747 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1596 1748 there was any fuzz.
1597 1749
1598 1750 If 'eolmode' is 'strict', the patch content and patched file are
1599 1751 read in binary mode. Otherwise, line endings are ignored when
1600 1752 patching then normalized according to 'eolmode'.
1601 1753 """
1602 1754 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1603 1755 prefix=prefix, eolmode=eolmode)
1604 1756
1605 1757 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1606 1758 eolmode='strict'):
1607 1759
1608 1760 if prefix:
1609 1761 # clean up double slashes, lack of trailing slashes, etc
1610 1762 prefix = util.normpath(prefix) + '/'
1611 1763 def pstrip(p):
1612 1764 return pathtransform(p, strip - 1, prefix)[1]
1613 1765
1614 1766 rejects = 0
1615 1767 err = 0
1616 1768 current_file = None
1617 1769
1618 1770 for state, values in iterhunks(fp):
1619 1771 if state == 'hunk':
1620 1772 if not current_file:
1621 1773 continue
1622 1774 ret = current_file.apply(values)
1623 1775 if ret > 0:
1624 1776 err = 1
1625 1777 elif state == 'file':
1626 1778 if current_file:
1627 1779 rejects += current_file.close()
1628 1780 current_file = None
1629 1781 afile, bfile, first_hunk, gp = values
1630 1782 if gp:
1631 1783 gp.path = pstrip(gp.path)
1632 1784 if gp.oldpath:
1633 1785 gp.oldpath = pstrip(gp.oldpath)
1634 1786 else:
1635 1787 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1636 1788 prefix)
1637 1789 if gp.op == 'RENAME':
1638 1790 backend.unlink(gp.oldpath)
1639 1791 if not first_hunk:
1640 1792 if gp.op == 'DELETE':
1641 1793 backend.unlink(gp.path)
1642 1794 continue
1643 1795 data, mode = None, None
1644 1796 if gp.op in ('RENAME', 'COPY'):
1645 1797 data, mode = store.getfile(gp.oldpath)[:2]
1646 1798 # FIXME: failing getfile has never been handled here
1647 1799 assert data is not None
1648 1800 if gp.mode:
1649 1801 mode = gp.mode
1650 1802 if gp.op == 'ADD':
1651 1803 # Added files without content have no hunk and
1652 1804 # must be created
1653 1805 data = ''
1654 1806 if data or mode:
1655 1807 if (gp.op in ('ADD', 'RENAME', 'COPY')
1656 1808 and backend.exists(gp.path)):
1657 1809 raise PatchError(_("cannot create %s: destination "
1658 1810 "already exists") % gp.path)
1659 1811 backend.setfile(gp.path, data, mode, gp.oldpath)
1660 1812 continue
1661 1813 try:
1662 1814 current_file = patcher(ui, gp, backend, store,
1663 1815 eolmode=eolmode)
1664 1816 except PatchError, inst:
1665 1817 ui.warn(str(inst) + '\n')
1666 1818 current_file = None
1667 1819 rejects += 1
1668 1820 continue
1669 1821 elif state == 'git':
1670 1822 for gp in values:
1671 1823 path = pstrip(gp.oldpath)
1672 1824 data, mode = backend.getfile(path)
1673 1825 if data is None:
1674 1826 # The error ignored here will trigger a getfile()
1675 1827 # error in a place more appropriate for error
1676 1828 # handling, and will not interrupt the patching
1677 1829 # process.
1678 1830 pass
1679 1831 else:
1680 1832 store.setfile(path, data, mode)
1681 1833 else:
1682 1834 raise util.Abort(_('unsupported parser state: %s') % state)
1683 1835
1684 1836 if current_file:
1685 1837 rejects += current_file.close()
1686 1838
1687 1839 if rejects:
1688 1840 return -1
1689 1841 return err
1690 1842
1691 1843 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1692 1844 similarity):
1693 1845 """use <patcher> to apply <patchname> to the working directory.
1694 1846 returns whether patch was applied with fuzz factor."""
1695 1847
1696 1848 fuzz = False
1697 1849 args = []
1698 1850 cwd = repo.root
1699 1851 if cwd:
1700 1852 args.append('-d %s' % util.shellquote(cwd))
1701 1853 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1702 1854 util.shellquote(patchname)))
1703 1855 try:
1704 1856 for line in fp:
1705 1857 line = line.rstrip()
1706 1858 ui.note(line + '\n')
1707 1859 if line.startswith('patching file '):
1708 1860 pf = util.parsepatchoutput(line)
1709 1861 printed_file = False
1710 1862 files.add(pf)
1711 1863 elif line.find('with fuzz') >= 0:
1712 1864 fuzz = True
1713 1865 if not printed_file:
1714 1866 ui.warn(pf + '\n')
1715 1867 printed_file = True
1716 1868 ui.warn(line + '\n')
1717 1869 elif line.find('saving rejects to file') >= 0:
1718 1870 ui.warn(line + '\n')
1719 1871 elif line.find('FAILED') >= 0:
1720 1872 if not printed_file:
1721 1873 ui.warn(pf + '\n')
1722 1874 printed_file = True
1723 1875 ui.warn(line + '\n')
1724 1876 finally:
1725 1877 if files:
1726 1878 scmutil.marktouched(repo, files, similarity)
1727 1879 code = fp.close()
1728 1880 if code:
1729 1881 raise PatchError(_("patch command failed: %s") %
1730 1882 util.explainexit(code)[0])
1731 1883 return fuzz
1732 1884
1733 1885 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
1734 1886 eolmode='strict'):
1735 1887 if files is None:
1736 1888 files = set()
1737 1889 if eolmode is None:
1738 1890 eolmode = ui.config('patch', 'eol', 'strict')
1739 1891 if eolmode.lower() not in eolmodes:
1740 1892 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1741 1893 eolmode = eolmode.lower()
1742 1894
1743 1895 store = filestore()
1744 1896 try:
1745 1897 fp = open(patchobj, 'rb')
1746 1898 except TypeError:
1747 1899 fp = patchobj
1748 1900 try:
1749 1901 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
1750 1902 eolmode=eolmode)
1751 1903 finally:
1752 1904 if fp != patchobj:
1753 1905 fp.close()
1754 1906 files.update(backend.close())
1755 1907 store.close()
1756 1908 if ret < 0:
1757 1909 raise PatchError(_('patch failed to apply'))
1758 1910 return ret > 0
1759 1911
1760 1912 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
1761 1913 eolmode='strict', similarity=0):
1762 1914 """use builtin patch to apply <patchobj> to the working directory.
1763 1915 returns whether patch was applied with fuzz factor."""
1764 1916 backend = workingbackend(ui, repo, similarity)
1765 1917 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1766 1918
1767 1919 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
1768 1920 eolmode='strict'):
1769 1921 backend = repobackend(ui, repo, ctx, store)
1770 1922 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1771 1923
1772 1924 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
1773 1925 similarity=0):
1774 1926 """Apply <patchname> to the working directory.
1775 1927
1776 1928 'eolmode' specifies how end of lines should be handled. It can be:
1777 1929 - 'strict': inputs are read in binary mode, EOLs are preserved
1778 1930 - 'crlf': EOLs are ignored when patching and reset to CRLF
1779 1931 - 'lf': EOLs are ignored when patching and reset to LF
1780 1932 - None: get it from user settings, default to 'strict'
1781 1933 'eolmode' is ignored when using an external patcher program.
1782 1934
1783 1935 Returns whether patch was applied with fuzz factor.
1784 1936 """
1785 1937 patcher = ui.config('ui', 'patch')
1786 1938 if files is None:
1787 1939 files = set()
1788 1940 if patcher:
1789 1941 return _externalpatch(ui, repo, patcher, patchname, strip,
1790 1942 files, similarity)
1791 1943 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
1792 1944 similarity)
1793 1945
1794 1946 def changedfiles(ui, repo, patchpath, strip=1):
1795 1947 backend = fsbackend(ui, repo.root)
1796 1948 fp = open(patchpath, 'rb')
1797 1949 try:
1798 1950 changed = set()
1799 1951 for state, values in iterhunks(fp):
1800 1952 if state == 'file':
1801 1953 afile, bfile, first_hunk, gp = values
1802 1954 if gp:
1803 1955 gp.path = pathtransform(gp.path, strip - 1, '')[1]
1804 1956 if gp.oldpath:
1805 1957 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
1806 1958 else:
1807 1959 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1808 1960 '')
1809 1961 changed.add(gp.path)
1810 1962 if gp.op == 'RENAME':
1811 1963 changed.add(gp.oldpath)
1812 1964 elif state not in ('hunk', 'git'):
1813 1965 raise util.Abort(_('unsupported parser state: %s') % state)
1814 1966 return changed
1815 1967 finally:
1816 1968 fp.close()
1817 1969
1818 1970 class GitDiffRequired(Exception):
1819 1971 pass
1820 1972
1821 1973 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
1822 1974 '''return diffopts with all features supported and parsed'''
1823 1975 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
1824 1976 git=True, whitespace=True, formatchanging=True)
1825 1977
1826 1978 diffopts = diffallopts
1827 1979
1828 1980 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
1829 1981 whitespace=False, formatchanging=False):
1830 1982 '''return diffopts with only opted-in features parsed
1831 1983
1832 1984 Features:
1833 1985 - git: git-style diffs
1834 1986 - whitespace: whitespace options like ignoreblanklines and ignorews
1835 1987 - formatchanging: options that will likely break or cause correctness issues
1836 1988 with most diff parsers
1837 1989 '''
1838 1990 def get(key, name=None, getter=ui.configbool, forceplain=None):
1839 1991 if opts:
1840 1992 v = opts.get(key)
1841 1993 if v:
1842 1994 return v
1843 1995 if forceplain is not None and ui.plain():
1844 1996 return forceplain
1845 1997 return getter(section, name or key, None, untrusted=untrusted)
1846 1998
1847 1999 # core options, expected to be understood by every diff parser
1848 2000 buildopts = {
1849 2001 'nodates': get('nodates'),
1850 2002 'showfunc': get('show_function', 'showfunc'),
1851 2003 'context': get('unified', getter=ui.config),
1852 2004 }
1853 2005
1854 2006 if git:
1855 2007 buildopts['git'] = get('git')
1856 2008 if whitespace:
1857 2009 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
1858 2010 buildopts['ignorewsamount'] = get('ignore_space_change',
1859 2011 'ignorewsamount')
1860 2012 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
1861 2013 'ignoreblanklines')
1862 2014 if formatchanging:
1863 2015 buildopts['text'] = opts and opts.get('text')
1864 2016 buildopts['nobinary'] = get('nobinary')
1865 2017 buildopts['noprefix'] = get('noprefix', forceplain=False)
1866 2018
1867 2019 return mdiff.diffopts(**buildopts)
1868 2020
1869 2021 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1870 2022 losedatafn=None, prefix=''):
1871 2023 '''yields diff of changes to files between two nodes, or node and
1872 2024 working directory.
1873 2025
1874 2026 if node1 is None, use first dirstate parent instead.
1875 2027 if node2 is None, compare node1 with working directory.
1876 2028
1877 2029 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1878 2030 every time some change cannot be represented with the current
1879 2031 patch format. Return False to upgrade to git patch format, True to
1880 2032 accept the loss or raise an exception to abort the diff. It is
1881 2033 called with the name of current file being diffed as 'fn'. If set
1882 2034 to None, patches will always be upgraded to git format when
1883 2035 necessary.
1884 2036
1885 2037 prefix is a filename prefix that is prepended to all filenames on
1886 2038 display (used for subrepos).
1887 2039 '''
1888 2040
1889 2041 if opts is None:
1890 2042 opts = mdiff.defaultopts
1891 2043
1892 2044 if not node1 and not node2:
1893 2045 node1 = repo.dirstate.p1()
1894 2046
1895 2047 def lrugetfilectx():
1896 2048 cache = {}
1897 2049 order = util.deque()
1898 2050 def getfilectx(f, ctx):
1899 2051 fctx = ctx.filectx(f, filelog=cache.get(f))
1900 2052 if f not in cache:
1901 2053 if len(cache) > 20:
1902 2054 del cache[order.popleft()]
1903 2055 cache[f] = fctx.filelog()
1904 2056 else:
1905 2057 order.remove(f)
1906 2058 order.append(f)
1907 2059 return fctx
1908 2060 return getfilectx
1909 2061 getfilectx = lrugetfilectx()
1910 2062
1911 2063 ctx1 = repo[node1]
1912 2064 ctx2 = repo[node2]
1913 2065
1914 2066 if not changes:
1915 2067 changes = repo.status(ctx1, ctx2, match=match)
1916 2068 modified, added, removed = changes[:3]
1917 2069
1918 2070 if not modified and not added and not removed:
1919 2071 return []
1920 2072
1921 2073 hexfunc = repo.ui.debugflag and hex or short
1922 2074 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
1923 2075
1924 2076 copy = {}
1925 2077 if opts.git or opts.upgrade:
1926 2078 copy = copies.pathcopies(ctx1, ctx2)
1927 2079
1928 2080 def difffn(opts, losedata):
1929 2081 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1930 2082 copy, getfilectx, opts, losedata, prefix)
1931 2083 if opts.upgrade and not opts.git:
1932 2084 try:
1933 2085 def losedata(fn):
1934 2086 if not losedatafn or not losedatafn(fn=fn):
1935 2087 raise GitDiffRequired
1936 2088 # Buffer the whole output until we are sure it can be generated
1937 2089 return list(difffn(opts.copy(git=False), losedata))
1938 2090 except GitDiffRequired:
1939 2091 return difffn(opts.copy(git=True), None)
1940 2092 else:
1941 2093 return difffn(opts, None)
1942 2094
1943 2095 def difflabel(func, *args, **kw):
1944 2096 '''yields 2-tuples of (output, label) based on the output of func()'''
1945 2097 headprefixes = [('diff', 'diff.diffline'),
1946 2098 ('copy', 'diff.extended'),
1947 2099 ('rename', 'diff.extended'),
1948 2100 ('old', 'diff.extended'),
1949 2101 ('new', 'diff.extended'),
1950 2102 ('deleted', 'diff.extended'),
1951 2103 ('---', 'diff.file_a'),
1952 2104 ('+++', 'diff.file_b')]
1953 2105 textprefixes = [('@', 'diff.hunk'),
1954 2106 ('-', 'diff.deleted'),
1955 2107 ('+', 'diff.inserted')]
1956 2108 head = False
1957 2109 for chunk in func(*args, **kw):
1958 2110 lines = chunk.split('\n')
1959 2111 for i, line in enumerate(lines):
1960 2112 if i != 0:
1961 2113 yield ('\n', '')
1962 2114 if head:
1963 2115 if line.startswith('@'):
1964 2116 head = False
1965 2117 else:
1966 2118 if line and line[0] not in ' +-@\\':
1967 2119 head = True
1968 2120 stripline = line
1969 2121 diffline = False
1970 2122 if not head and line and line[0] in '+-':
1971 2123 # highlight tabs and trailing whitespace, but only in
1972 2124 # changed lines
1973 2125 stripline = line.rstrip()
1974 2126 diffline = True
1975 2127
1976 2128 prefixes = textprefixes
1977 2129 if head:
1978 2130 prefixes = headprefixes
1979 2131 for prefix, label in prefixes:
1980 2132 if stripline.startswith(prefix):
1981 2133 if diffline:
1982 2134 for token in tabsplitter.findall(stripline):
1983 2135 if '\t' == token[0]:
1984 2136 yield (token, 'diff.tab')
1985 2137 else:
1986 2138 yield (token, label)
1987 2139 else:
1988 2140 yield (stripline, label)
1989 2141 break
1990 2142 else:
1991 2143 yield (line, '')
1992 2144 if line != stripline:
1993 2145 yield (line[len(stripline):], 'diff.trailingwhitespace')
1994 2146
1995 2147 def diffui(*args, **kw):
1996 2148 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1997 2149 return difflabel(diff, *args, **kw)
1998 2150
1999 2151 def _filepairs(ctx1, modified, added, removed, copy, opts):
2000 2152 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2001 2153 before and f2 is the the name after. For added files, f1 will be None,
2002 2154 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2003 2155 or 'rename' (the latter two only if opts.git is set).'''
2004 2156 gone = set()
2005 2157
2006 2158 copyto = dict([(v, k) for k, v in copy.items()])
2007 2159
2008 2160 addedset, removedset = set(added), set(removed)
2009 2161 # Fix up added, since merged-in additions appear as
2010 2162 # modifications during merges
2011 2163 for f in modified:
2012 2164 if f not in ctx1:
2013 2165 addedset.add(f)
2014 2166
2015 2167 for f in sorted(modified + added + removed):
2016 2168 copyop = None
2017 2169 f1, f2 = f, f
2018 2170 if f in addedset:
2019 2171 f1 = None
2020 2172 if f in copy:
2021 2173 if opts.git:
2022 2174 f1 = copy[f]
2023 2175 if f1 in removedset and f1 not in gone:
2024 2176 copyop = 'rename'
2025 2177 gone.add(f1)
2026 2178 else:
2027 2179 copyop = 'copy'
2028 2180 elif f in removedset:
2029 2181 f2 = None
2030 2182 if opts.git:
2031 2183 # have we already reported a copy above?
2032 2184 if (f in copyto and copyto[f] in addedset
2033 2185 and copy[copyto[f]] == f):
2034 2186 continue
2035 2187 yield f1, f2, copyop
2036 2188
2037 2189 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2038 2190 copy, getfilectx, opts, losedatafn, prefix):
2039 2191
2040 2192 def gitindex(text):
2041 2193 if not text:
2042 2194 text = ""
2043 2195 l = len(text)
2044 2196 s = util.sha1('blob %d\0' % l)
2045 2197 s.update(text)
2046 2198 return s.hexdigest()
2047 2199
2048 2200 if opts.noprefix:
2049 2201 aprefix = bprefix = ''
2050 2202 else:
2051 2203 aprefix = 'a/'
2052 2204 bprefix = 'b/'
2053 2205
2054 2206 def diffline(f, revs):
2055 2207 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2056 2208 return 'diff %s %s' % (revinfo, f)
2057 2209
2058 2210 date1 = util.datestr(ctx1.date())
2059 2211 date2 = util.datestr(ctx2.date())
2060 2212
2061 2213 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2062 2214
2063 2215 for f1, f2, copyop in _filepairs(
2064 2216 ctx1, modified, added, removed, copy, opts):
2065 2217 content1 = None
2066 2218 content2 = None
2067 2219 flag1 = None
2068 2220 flag2 = None
2069 2221 if f1:
2070 2222 content1 = getfilectx(f1, ctx1).data()
2071 2223 if opts.git or losedatafn:
2072 2224 flag1 = ctx1.flags(f1)
2073 2225 if f2:
2074 2226 content2 = getfilectx(f2, ctx2).data()
2075 2227 if opts.git or losedatafn:
2076 2228 flag2 = ctx2.flags(f2)
2077 2229 binary = False
2078 2230 if opts.git or losedatafn:
2079 2231 binary = util.binary(content1) or util.binary(content2)
2080 2232
2081 2233 if losedatafn and not opts.git:
2082 2234 if (binary or
2083 2235 # copy/rename
2084 2236 f2 in copy or
2085 2237 # empty file creation
2086 2238 (not f1 and not content2) or
2087 2239 # empty file deletion
2088 2240 (not content1 and not f2) or
2089 2241 # create with flags
2090 2242 (not f1 and flag2) or
2091 2243 # change flags
2092 2244 (f1 and f2 and flag1 != flag2)):
2093 2245 losedatafn(f2 or f1)
2094 2246
2095 2247 path1 = posixpath.join(prefix, f1 or f2)
2096 2248 path2 = posixpath.join(prefix, f2 or f1)
2097 2249 header = []
2098 2250 if opts.git:
2099 2251 header.append('diff --git %s%s %s%s' %
2100 2252 (aprefix, path1, bprefix, path2))
2101 2253 if not f1: # added
2102 2254 header.append('new file mode %s' % gitmode[flag2])
2103 2255 elif not f2: # removed
2104 2256 header.append('deleted file mode %s' % gitmode[flag1])
2105 2257 else: # modified/copied/renamed
2106 2258 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2107 2259 if mode1 != mode2:
2108 2260 header.append('old mode %s' % mode1)
2109 2261 header.append('new mode %s' % mode2)
2110 2262 if copyop is not None:
2111 2263 header.append('%s from %s' % (copyop, path1))
2112 2264 header.append('%s to %s' % (copyop, path2))
2113 2265 elif revs and not repo.ui.quiet:
2114 2266 header.append(diffline(path1, revs))
2115 2267
2116 2268 if binary and opts.git and not opts.nobinary:
2117 2269 text = mdiff.b85diff(content1, content2)
2118 2270 if text:
2119 2271 header.append('index %s..%s' %
2120 2272 (gitindex(content1), gitindex(content2)))
2121 2273 else:
2122 2274 text = mdiff.unidiff(content1, date1,
2123 2275 content2, date2,
2124 2276 path1, path2, opts=opts)
2125 2277 if header and (text or len(header) > 1):
2126 2278 yield '\n'.join(header) + '\n'
2127 2279 if text:
2128 2280 yield text
2129 2281
2130 2282 def diffstatsum(stats):
2131 2283 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2132 2284 for f, a, r, b in stats:
2133 2285 maxfile = max(maxfile, encoding.colwidth(f))
2134 2286 maxtotal = max(maxtotal, a + r)
2135 2287 addtotal += a
2136 2288 removetotal += r
2137 2289 binary = binary or b
2138 2290
2139 2291 return maxfile, maxtotal, addtotal, removetotal, binary
2140 2292
2141 2293 def diffstatdata(lines):
2142 2294 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2143 2295
2144 2296 results = []
2145 2297 filename, adds, removes, isbinary = None, 0, 0, False
2146 2298
2147 2299 def addresult():
2148 2300 if filename:
2149 2301 results.append((filename, adds, removes, isbinary))
2150 2302
2151 2303 for line in lines:
2152 2304 if line.startswith('diff'):
2153 2305 addresult()
2154 2306 # set numbers to 0 anyway when starting new file
2155 2307 adds, removes, isbinary = 0, 0, False
2156 2308 if line.startswith('diff --git a/'):
2157 2309 filename = gitre.search(line).group(2)
2158 2310 elif line.startswith('diff -r'):
2159 2311 # format: "diff -r ... -r ... filename"
2160 2312 filename = diffre.search(line).group(1)
2161 2313 elif line.startswith('+') and not line.startswith('+++ '):
2162 2314 adds += 1
2163 2315 elif line.startswith('-') and not line.startswith('--- '):
2164 2316 removes += 1
2165 2317 elif (line.startswith('GIT binary patch') or
2166 2318 line.startswith('Binary file')):
2167 2319 isbinary = True
2168 2320 addresult()
2169 2321 return results
2170 2322
2171 2323 def diffstat(lines, width=80, git=False):
2172 2324 output = []
2173 2325 stats = diffstatdata(lines)
2174 2326 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2175 2327
2176 2328 countwidth = len(str(maxtotal))
2177 2329 if hasbinary and countwidth < 3:
2178 2330 countwidth = 3
2179 2331 graphwidth = width - countwidth - maxname - 6
2180 2332 if graphwidth < 10:
2181 2333 graphwidth = 10
2182 2334
2183 2335 def scale(i):
2184 2336 if maxtotal <= graphwidth:
2185 2337 return i
2186 2338 # If diffstat runs out of room it doesn't print anything,
2187 2339 # which isn't very useful, so always print at least one + or -
2188 2340 # if there were at least some changes.
2189 2341 return max(i * graphwidth // maxtotal, int(bool(i)))
2190 2342
2191 2343 for filename, adds, removes, isbinary in stats:
2192 2344 if isbinary:
2193 2345 count = 'Bin'
2194 2346 else:
2195 2347 count = adds + removes
2196 2348 pluses = '+' * scale(adds)
2197 2349 minuses = '-' * scale(removes)
2198 2350 output.append(' %s%s | %*s %s%s\n' %
2199 2351 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2200 2352 countwidth, count, pluses, minuses))
2201 2353
2202 2354 if stats:
2203 2355 output.append(_(' %d files changed, %d insertions(+), '
2204 2356 '%d deletions(-)\n')
2205 2357 % (len(stats), totaladds, totalremoves))
2206 2358
2207 2359 return ''.join(output)
2208 2360
2209 2361 def diffstatui(*args, **kw):
2210 2362 '''like diffstat(), but yields 2-tuples of (output, label) for
2211 2363 ui.write()
2212 2364 '''
2213 2365
2214 2366 for line in diffstat(*args, **kw).splitlines():
2215 2367 if line and line[-1] in '+-':
2216 2368 name, graph = line.rsplit(' ', 1)
2217 2369 yield (name + ' ', '')
2218 2370 m = re.search(r'\++', graph)
2219 2371 if m:
2220 2372 yield (m.group(0), 'diffstat.inserted')
2221 2373 m = re.search(r'-+', graph)
2222 2374 if m:
2223 2375 yield (m.group(0), 'diffstat.deleted')
2224 2376 else:
2225 2377 yield (line, '')
2226 2378 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now