##// END OF EJS Templates
diff: clarify as_html variable names
Mads Kiilerich -
r8309:81fae675 default
parent child Browse files
Show More
@@ -1,681 +1,681 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2 # This program is free software: you can redistribute it and/or modify
2 # This program is free software: you can redistribute it and/or modify
3 # it under the terms of the GNU General Public License as published by
3 # it under the terms of the GNU General Public License as published by
4 # the Free Software Foundation, either version 3 of the License, or
4 # the Free Software Foundation, either version 3 of the License, or
5 # (at your option) any later version.
5 # (at your option) any later version.
6 #
6 #
7 # This program is distributed in the hope that it will be useful,
7 # This program is distributed in the hope that it will be useful,
8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 # GNU General Public License for more details.
10 # GNU General Public License for more details.
11 #
11 #
12 # You should have received a copy of the GNU General Public License
12 # You should have received a copy of the GNU General Public License
13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
14 """
14 """
15 kallithea.lib.diffs
15 kallithea.lib.diffs
16 ~~~~~~~~~~~~~~~~~~~
16 ~~~~~~~~~~~~~~~~~~~
17
17
18 Set of diffing helpers, previously part of vcs
18 Set of diffing helpers, previously part of vcs
19
19
20
20
21 This file was forked by the Kallithea project in July 2014.
21 This file was forked by the Kallithea project in July 2014.
22 Original author and date, and relevant copyright and licensing information is below:
22 Original author and date, and relevant copyright and licensing information is below:
23 :created_on: Dec 4, 2011
23 :created_on: Dec 4, 2011
24 :author: marcink
24 :author: marcink
25 :copyright: (c) 2013 RhodeCode GmbH, and others.
25 :copyright: (c) 2013 RhodeCode GmbH, and others.
26 :license: GPLv3, see LICENSE.md for more details.
26 :license: GPLv3, see LICENSE.md for more details.
27 """
27 """
28 import difflib
28 import difflib
29 import logging
29 import logging
30 import re
30 import re
31
31
32 from tg.i18n import ugettext as _
32 from tg.i18n import ugettext as _
33
33
34 from kallithea.lib import helpers as h
34 from kallithea.lib import helpers as h
35 from kallithea.lib.utils2 import safe_str
35 from kallithea.lib.utils2 import safe_str
36 from kallithea.lib.vcs.backends.base import EmptyChangeset
36 from kallithea.lib.vcs.backends.base import EmptyChangeset
37 from kallithea.lib.vcs.exceptions import VCSError
37 from kallithea.lib.vcs.exceptions import VCSError
38 from kallithea.lib.vcs.nodes import FileNode, SubModuleNode
38 from kallithea.lib.vcs.nodes import FileNode, SubModuleNode
39
39
40
40
41 log = logging.getLogger(__name__)
41 log = logging.getLogger(__name__)
42
42
43
43
44 def _safe_id(idstring):
44 def _safe_id(idstring):
45 r"""Make a string safe for including in an id attribute.
45 r"""Make a string safe for including in an id attribute.
46
46
47 The HTML spec says that id attributes 'must begin with
47 The HTML spec says that id attributes 'must begin with
48 a letter ([A-Za-z]) and may be followed by any number
48 a letter ([A-Za-z]) and may be followed by any number
49 of letters, digits ([0-9]), hyphens ("-"), underscores
49 of letters, digits ([0-9]), hyphens ("-"), underscores
50 ("_"), colons (":"), and periods (".")'. These regexps
50 ("_"), colons (":"), and periods (".")'. These regexps
51 are slightly over-zealous, in that they remove colons
51 are slightly over-zealous, in that they remove colons
52 and periods unnecessarily.
52 and periods unnecessarily.
53
53
54 Whitespace is transformed into underscores, and then
54 Whitespace is transformed into underscores, and then
55 anything which is not a hyphen or a character that
55 anything which is not a hyphen or a character that
56 matches \w (alphanumerics and underscore) is removed.
56 matches \w (alphanumerics and underscore) is removed.
57
57
58 """
58 """
59 # Transform all whitespace to underscore
59 # Transform all whitespace to underscore
60 idstring = re.sub(r'\s', "_", idstring)
60 idstring = re.sub(r'\s', "_", idstring)
61 # Remove everything that is not a hyphen or a member of \w
61 # Remove everything that is not a hyphen or a member of \w
62 idstring = re.sub(r'(?!-)\W', "", idstring).lower()
62 idstring = re.sub(r'(?!-)\W', "", idstring).lower()
63 return idstring
63 return idstring
64
64
65
65
66 def as_html(table_class='code-difftable', line_class='line',
66 def as_html(table_class='code-difftable', line_class='line',
67 old_lineno_class='lineno old', new_lineno_class='lineno new',
67 old_lineno_class='lineno old', new_lineno_class='lineno new',
68 no_lineno_class='lineno',
68 no_lineno_class='lineno',
69 code_class='code', enable_comments=False, parsed_lines=None):
69 code_class='code', enable_comments=False, parsed_lines=None):
70 """
70 """
71 Return given diff as html table with customized css classes
71 Return given diff as html table with customized css classes
72 """
72 """
73 def _link_to_if(condition, label, url):
73 def _link_to_if(condition, label, url):
74 """
74 """
75 Generates a link if condition is meet or just the label if not.
75 Generates a link if condition is meet or just the label if not.
76 """
76 """
77
77
78 if condition:
78 if condition:
79 return '''<a href="%(url)s" data-pseudo-content="%(label)s"></a>''' % {
79 return '''<a href="%(url)s" data-pseudo-content="%(label)s"></a>''' % {
80 'url': url,
80 'url': url,
81 'label': label
81 'label': label
82 }
82 }
83 else:
83 else:
84 return label
84 return label
85
85
86 _html_empty = True
86 _html_empty = True
87 _html = []
87 _html = []
88 _html.append('''<table class="%(table_class)s">\n''' % {
88 _html.append('''<table class="%(table_class)s">\n''' % {
89 'table_class': table_class
89 'table_class': table_class
90 })
90 })
91
91
92 for diff in parsed_lines:
92 for file_info in parsed_lines:
93 for line in diff['chunks']:
93 for chunk in file_info['chunks']:
94 _html_empty = False
94 _html_empty = False
95 for change in line:
95 for change in chunk:
96 _html.append('''<tr class="%(lc)s %(action)s">\n''' % {
96 _html.append('''<tr class="%(lc)s %(action)s">\n''' % {
97 'lc': line_class,
97 'lc': line_class,
98 'action': change['action']
98 'action': change['action']
99 })
99 })
100 anchor_old_id = ''
100 anchor_old_id = ''
101 anchor_new_id = ''
101 anchor_new_id = ''
102 anchor_old = "%(filename)s_o%(oldline_no)s" % {
102 anchor_old = "%(filename)s_o%(oldline_no)s" % {
103 'filename': _safe_id(diff['filename']),
103 'filename': _safe_id(file_info['filename']),
104 'oldline_no': change['old_lineno']
104 'oldline_no': change['old_lineno']
105 }
105 }
106 anchor_new = "%(filename)s_n%(oldline_no)s" % {
106 anchor_new = "%(filename)s_n%(newline_no)s" % {
107 'filename': _safe_id(diff['filename']),
107 'filename': _safe_id(file_info['filename']),
108 'oldline_no': change['new_lineno']
108 'newline_no': change['new_lineno']
109 }
109 }
110 cond_old = (change['old_lineno'] != '...' and
110 cond_old = (change['old_lineno'] != '...' and
111 change['old_lineno'])
111 change['old_lineno'])
112 cond_new = (change['new_lineno'] != '...' and
112 cond_new = (change['new_lineno'] != '...' and
113 change['new_lineno'])
113 change['new_lineno'])
114 no_lineno = (change['old_lineno'] == '...' and
114 no_lineno = (change['old_lineno'] == '...' and
115 change['new_lineno'] == '...')
115 change['new_lineno'] == '...')
116 if cond_old:
116 if cond_old:
117 anchor_old_id = 'id="%s"' % anchor_old
117 anchor_old_id = 'id="%s"' % anchor_old
118 if cond_new:
118 if cond_new:
119 anchor_new_id = 'id="%s"' % anchor_new
119 anchor_new_id = 'id="%s"' % anchor_new
120 ###########################################################
120 ###########################################################
121 # OLD LINE NUMBER
121 # OLD LINE NUMBER
122 ###########################################################
122 ###########################################################
123 _html.append('''\t<td %(a_id)s class="%(olc)s" %(colspan)s>''' % {
123 _html.append('''\t<td %(a_id)s class="%(olc)s" %(colspan)s>''' % {
124 'a_id': anchor_old_id,
124 'a_id': anchor_old_id,
125 'olc': no_lineno_class if no_lineno else old_lineno_class,
125 'olc': no_lineno_class if no_lineno else old_lineno_class,
126 'colspan': 'colspan="2"' if no_lineno else ''
126 'colspan': 'colspan="2"' if no_lineno else ''
127 })
127 })
128
128
129 _html.append('''%(link)s''' % {
129 _html.append('''%(link)s''' % {
130 'link': _link_to_if(not no_lineno, change['old_lineno'],
130 'link': _link_to_if(not no_lineno, change['old_lineno'],
131 '#%s' % anchor_old)
131 '#%s' % anchor_old)
132 })
132 })
133 _html.append('''</td>\n''')
133 _html.append('''</td>\n''')
134 ###########################################################
134 ###########################################################
135 # NEW LINE NUMBER
135 # NEW LINE NUMBER
136 ###########################################################
136 ###########################################################
137
137
138 if not no_lineno:
138 if not no_lineno:
139 _html.append('''\t<td %(a_id)s class="%(nlc)s">''' % {
139 _html.append('''\t<td %(a_id)s class="%(nlc)s">''' % {
140 'a_id': anchor_new_id,
140 'a_id': anchor_new_id,
141 'nlc': new_lineno_class
141 'nlc': new_lineno_class
142 })
142 })
143
143
144 _html.append('''%(link)s''' % {
144 _html.append('''%(link)s''' % {
145 'link': _link_to_if(True, change['new_lineno'],
145 'link': _link_to_if(True, change['new_lineno'],
146 '#%s' % anchor_new)
146 '#%s' % anchor_new)
147 })
147 })
148 _html.append('''</td>\n''')
148 _html.append('''</td>\n''')
149 ###########################################################
149 ###########################################################
150 # CODE
150 # CODE
151 ###########################################################
151 ###########################################################
152 comments = '' if enable_comments else 'no-comment'
152 comments = '' if enable_comments else 'no-comment'
153 _html.append('''\t<td class="%(cc)s %(inc)s">''' % {
153 _html.append('''\t<td class="%(cc)s %(inc)s">''' % {
154 'cc': code_class,
154 'cc': code_class,
155 'inc': comments
155 'inc': comments
156 })
156 })
157 _html.append('''\n\t\t<div class="add-bubble"><div>&nbsp;</div></div><pre>%(code)s</pre>\n''' % {
157 _html.append('''\n\t\t<div class="add-bubble"><div>&nbsp;</div></div><pre>%(code)s</pre>\n''' % {
158 'code': change['line']
158 'code': change['line']
159 })
159 })
160
160
161 _html.append('''\t</td>''')
161 _html.append('''\t</td>''')
162 _html.append('''\n</tr>\n''')
162 _html.append('''\n</tr>\n''')
163 _html.append('''</table>''')
163 _html.append('''</table>''')
164 if _html_empty:
164 if _html_empty:
165 return None
165 return None
166 return ''.join(_html)
166 return ''.join(_html)
167
167
168
168
169 def wrap_to_table(html):
169 def wrap_to_table(html):
170 """Given a string with html, return it wrapped in a table, similar to what
170 """Given a string with html, return it wrapped in a table, similar to what
171 DiffProcessor returns."""
171 DiffProcessor returns."""
172 return '''\
172 return '''\
173 <table class="code-difftable">
173 <table class="code-difftable">
174 <tr class="line no-comment">
174 <tr class="line no-comment">
175 <td class="lineno new"></td>
175 <td class="lineno new"></td>
176 <td class="code no-comment"><pre>%s</pre></td>
176 <td class="code no-comment"><pre>%s</pre></td>
177 </tr>
177 </tr>
178 </table>''' % html
178 </table>''' % html
179
179
180
180
181 def wrapped_diff(filenode_old, filenode_new, diff_limit=None,
181 def wrapped_diff(filenode_old, filenode_new, diff_limit=None,
182 ignore_whitespace=True, line_context=3,
182 ignore_whitespace=True, line_context=3,
183 enable_comments=False):
183 enable_comments=False):
184 """
184 """
185 Returns a file diff wrapped into a table.
185 Returns a file diff wrapped into a table.
186 Checks for diff_limit and presents a message if the diff is too big.
186 Checks for diff_limit and presents a message if the diff is too big.
187 """
187 """
188 if filenode_old is None:
188 if filenode_old is None:
189 filenode_old = FileNode(filenode_new.path, '', EmptyChangeset())
189 filenode_old = FileNode(filenode_new.path, '', EmptyChangeset())
190
190
191 op = None
191 op = None
192 a_path = filenode_old.path # default, might be overriden by actual rename in diff
192 a_path = filenode_old.path # default, might be overriden by actual rename in diff
193 if filenode_old.is_binary or filenode_new.is_binary:
193 if filenode_old.is_binary or filenode_new.is_binary:
194 html_diff = wrap_to_table(_('Binary file'))
194 html_diff = wrap_to_table(_('Binary file'))
195 stats = (0, 0)
195 stats = (0, 0)
196
196
197 elif diff_limit != -1 and (
197 elif diff_limit != -1 and (
198 diff_limit is None or
198 diff_limit is None or
199 (filenode_old.size < diff_limit and filenode_new.size < diff_limit)):
199 (filenode_old.size < diff_limit and filenode_new.size < diff_limit)):
200
200
201 raw_diff = get_gitdiff(filenode_old, filenode_new,
201 raw_diff = get_gitdiff(filenode_old, filenode_new,
202 ignore_whitespace=ignore_whitespace,
202 ignore_whitespace=ignore_whitespace,
203 context=line_context)
203 context=line_context)
204 diff_processor = DiffProcessor(raw_diff)
204 diff_processor = DiffProcessor(raw_diff)
205 if diff_processor.parsed: # there should be exactly one element, for the specified file
205 if diff_processor.parsed: # there should be exactly one element, for the specified file
206 f = diff_processor.parsed[0]
206 f = diff_processor.parsed[0]
207 op = f['operation']
207 op = f['operation']
208 a_path = f['old_filename']
208 a_path = f['old_filename']
209
209
210 html_diff = as_html(parsed_lines=diff_processor.parsed, enable_comments=enable_comments)
210 html_diff = as_html(parsed_lines=diff_processor.parsed, enable_comments=enable_comments)
211 stats = diff_processor.stat()
211 stats = diff_processor.stat()
212
212
213 else:
213 else:
214 html_diff = wrap_to_table(_('Changeset was too big and was cut off, use '
214 html_diff = wrap_to_table(_('Changeset was too big and was cut off, use '
215 'diff menu to display this diff'))
215 'diff menu to display this diff'))
216 stats = (0, 0)
216 stats = (0, 0)
217
217
218 if not html_diff:
218 if not html_diff:
219 submodules = [o for o in [filenode_new, filenode_old] if isinstance(o, SubModuleNode)]
219 submodules = [o for o in [filenode_new, filenode_old] if isinstance(o, SubModuleNode)]
220 if submodules:
220 if submodules:
221 html_diff = wrap_to_table(h.escape('Submodule %r' % submodules[0]))
221 html_diff = wrap_to_table(h.escape('Submodule %r' % submodules[0]))
222 else:
222 else:
223 html_diff = wrap_to_table(_('No changes detected'))
223 html_diff = wrap_to_table(_('No changes detected'))
224
224
225 cs1 = filenode_old.changeset.raw_id
225 cs1 = filenode_old.changeset.raw_id
226 cs2 = filenode_new.changeset.raw_id
226 cs2 = filenode_new.changeset.raw_id
227
227
228 return cs1, cs2, a_path, html_diff, stats, op
228 return cs1, cs2, a_path, html_diff, stats, op
229
229
230
230
231 def get_gitdiff(filenode_old, filenode_new, ignore_whitespace=True, context=3):
231 def get_gitdiff(filenode_old, filenode_new, ignore_whitespace=True, context=3):
232 """
232 """
233 Returns git style diff between given ``filenode_old`` and ``filenode_new``.
233 Returns git style diff between given ``filenode_old`` and ``filenode_new``.
234 """
234 """
235 # make sure we pass in default context
235 # make sure we pass in default context
236 context = context or 3
236 context = context or 3
237 submodules = [o for o in [filenode_new, filenode_old] if isinstance(o, SubModuleNode)]
237 submodules = [o for o in [filenode_new, filenode_old] if isinstance(o, SubModuleNode)]
238 if submodules:
238 if submodules:
239 return b''
239 return b''
240
240
241 for filenode in (filenode_old, filenode_new):
241 for filenode in (filenode_old, filenode_new):
242 if not isinstance(filenode, FileNode):
242 if not isinstance(filenode, FileNode):
243 raise VCSError("Given object should be FileNode object, not %s"
243 raise VCSError("Given object should be FileNode object, not %s"
244 % filenode.__class__)
244 % filenode.__class__)
245
245
246 repo = filenode_new.changeset.repository
246 repo = filenode_new.changeset.repository
247 old_raw_id = getattr(filenode_old.changeset, 'raw_id', repo.EMPTY_CHANGESET)
247 old_raw_id = getattr(filenode_old.changeset, 'raw_id', repo.EMPTY_CHANGESET)
248 new_raw_id = getattr(filenode_new.changeset, 'raw_id', repo.EMPTY_CHANGESET)
248 new_raw_id = getattr(filenode_new.changeset, 'raw_id', repo.EMPTY_CHANGESET)
249
249
250 vcs_gitdiff = get_diff(repo, old_raw_id, new_raw_id, filenode_new.path,
250 vcs_gitdiff = get_diff(repo, old_raw_id, new_raw_id, filenode_new.path,
251 ignore_whitespace, context)
251 ignore_whitespace, context)
252 return vcs_gitdiff
252 return vcs_gitdiff
253
253
254
254
255 def get_diff(scm_instance, rev1, rev2, path=None, ignore_whitespace=False, context=3):
255 def get_diff(scm_instance, rev1, rev2, path=None, ignore_whitespace=False, context=3):
256 """
256 """
257 A thin wrapper around vcs lib get_diff.
257 A thin wrapper around vcs lib get_diff.
258 """
258 """
259 try:
259 try:
260 return scm_instance.get_diff(rev1, rev2, path=path,
260 return scm_instance.get_diff(rev1, rev2, path=path,
261 ignore_whitespace=ignore_whitespace, context=context)
261 ignore_whitespace=ignore_whitespace, context=context)
262 except MemoryError:
262 except MemoryError:
263 h.flash('MemoryError: Diff is too big', category='error')
263 h.flash('MemoryError: Diff is too big', category='error')
264 return b''
264 return b''
265
265
266
266
267 NEW_FILENODE = 1
267 NEW_FILENODE = 1
268 DEL_FILENODE = 2
268 DEL_FILENODE = 2
269 MOD_FILENODE = 3
269 MOD_FILENODE = 3
270 RENAMED_FILENODE = 4
270 RENAMED_FILENODE = 4
271 COPIED_FILENODE = 5
271 COPIED_FILENODE = 5
272 CHMOD_FILENODE = 6
272 CHMOD_FILENODE = 6
273 BIN_FILENODE = 7
273 BIN_FILENODE = 7
274
274
275
275
276 class DiffProcessor(object):
276 class DiffProcessor(object):
277 """
277 """
278 Give it a unified or git diff and it returns a list of the files that were
278 Give it a unified or git diff and it returns a list of the files that were
279 mentioned in the diff together with a dict of meta information that
279 mentioned in the diff together with a dict of meta information that
280 can be used to render it in a HTML template.
280 can be used to render it in a HTML template.
281 """
281 """
282 _diff_git_re = re.compile(b'^diff --git', re.MULTILINE)
282 _diff_git_re = re.compile(b'^diff --git', re.MULTILINE)
283
283
284 def __init__(self, diff, vcs='hg', diff_limit=None, inline_diff=True):
284 def __init__(self, diff, vcs='hg', diff_limit=None, inline_diff=True):
285 """
285 """
286 :param diff: a text in diff format
286 :param diff: a text in diff format
287 :param vcs: type of version control hg or git
287 :param vcs: type of version control hg or git
288 :param diff_limit: define the size of diff that is considered "big"
288 :param diff_limit: define the size of diff that is considered "big"
289 based on that parameter cut off will be triggered, set to None
289 based on that parameter cut off will be triggered, set to None
290 to show full diff
290 to show full diff
291 """
291 """
292 if not isinstance(diff, bytes):
292 if not isinstance(diff, bytes):
293 raise Exception('Diff must be bytes - got %s' % type(diff))
293 raise Exception('Diff must be bytes - got %s' % type(diff))
294
294
295 self._diff = memoryview(diff)
295 self._diff = memoryview(diff)
296 self.adds = 0
296 self.adds = 0
297 self.removes = 0
297 self.removes = 0
298 self.diff_limit = diff_limit
298 self.diff_limit = diff_limit
299 self.limited_diff = False
299 self.limited_diff = False
300 self.vcs = vcs
300 self.vcs = vcs
301 self.parsed = self._parse_gitdiff(inline_diff=inline_diff)
301 self.parsed = self._parse_gitdiff(inline_diff=inline_diff)
302
302
303 def _parse_gitdiff(self, inline_diff):
303 def _parse_gitdiff(self, inline_diff):
304 """Parse self._diff and return a list of dicts with meta info and chunks for each file.
304 """Parse self._diff and return a list of dicts with meta info and chunks for each file.
305 Might set limited_diff.
305 Might set limited_diff.
306 Optionally, do an extra pass and to extra markup of one-liner changes.
306 Optionally, do an extra pass and to extra markup of one-liner changes.
307 """
307 """
308 _files = [] # list of dicts with meta info and chunks
308 _files = [] # list of dicts with meta info and chunks
309
309
310 starts = [m.start() for m in self._diff_git_re.finditer(self._diff)]
310 starts = [m.start() for m in self._diff_git_re.finditer(self._diff)]
311 starts.append(len(self._diff))
311 starts.append(len(self._diff))
312
312
313 for start, end in zip(starts, starts[1:]):
313 for start, end in zip(starts, starts[1:]):
314 if self.diff_limit and end > self.diff_limit:
314 if self.diff_limit and end > self.diff_limit:
315 self.limited_diff = True
315 self.limited_diff = True
316 continue
316 continue
317
317
318 head, diff_lines = _get_header(self.vcs, self._diff[start:end])
318 head, diff_lines = _get_header(self.vcs, self._diff[start:end])
319
319
320 op = None
320 op = None
321 stats = {
321 stats = {
322 'added': 0,
322 'added': 0,
323 'deleted': 0,
323 'deleted': 0,
324 'binary': False,
324 'binary': False,
325 'ops': {},
325 'ops': {},
326 }
326 }
327
327
328 if head['deleted_file_mode']:
328 if head['deleted_file_mode']:
329 op = 'removed'
329 op = 'removed'
330 stats['binary'] = True
330 stats['binary'] = True
331 stats['ops'][DEL_FILENODE] = 'deleted file'
331 stats['ops'][DEL_FILENODE] = 'deleted file'
332
332
333 elif head['new_file_mode']:
333 elif head['new_file_mode']:
334 op = 'added'
334 op = 'added'
335 stats['binary'] = True
335 stats['binary'] = True
336 stats['ops'][NEW_FILENODE] = 'new file %s' % head['new_file_mode']
336 stats['ops'][NEW_FILENODE] = 'new file %s' % head['new_file_mode']
337 else: # modify operation, can be cp, rename, chmod
337 else: # modify operation, can be cp, rename, chmod
338 # CHMOD
338 # CHMOD
339 if head['new_mode'] and head['old_mode']:
339 if head['new_mode'] and head['old_mode']:
340 op = 'modified'
340 op = 'modified'
341 stats['binary'] = True
341 stats['binary'] = True
342 stats['ops'][CHMOD_FILENODE] = ('modified file chmod %s => %s'
342 stats['ops'][CHMOD_FILENODE] = ('modified file chmod %s => %s'
343 % (head['old_mode'], head['new_mode']))
343 % (head['old_mode'], head['new_mode']))
344 # RENAME
344 # RENAME
345 if (head['rename_from'] and head['rename_to']
345 if (head['rename_from'] and head['rename_to']
346 and head['rename_from'] != head['rename_to']):
346 and head['rename_from'] != head['rename_to']):
347 op = 'renamed'
347 op = 'renamed'
348 stats['binary'] = True
348 stats['binary'] = True
349 stats['ops'][RENAMED_FILENODE] = ('file renamed from %s to %s'
349 stats['ops'][RENAMED_FILENODE] = ('file renamed from %s to %s'
350 % (head['rename_from'], head['rename_to']))
350 % (head['rename_from'], head['rename_to']))
351 # COPY
351 # COPY
352 if head.get('copy_from') and head.get('copy_to'):
352 if head.get('copy_from') and head.get('copy_to'):
353 op = 'modified'
353 op = 'modified'
354 stats['binary'] = True
354 stats['binary'] = True
355 stats['ops'][COPIED_FILENODE] = ('file copied from %s to %s'
355 stats['ops'][COPIED_FILENODE] = ('file copied from %s to %s'
356 % (head['copy_from'], head['copy_to']))
356 % (head['copy_from'], head['copy_to']))
357 # FALL BACK: detect missed old style add or remove
357 # FALL BACK: detect missed old style add or remove
358 if op is None:
358 if op is None:
359 if not head['a_file'] and head['b_file']:
359 if not head['a_file'] and head['b_file']:
360 op = 'added'
360 op = 'added'
361 stats['binary'] = True
361 stats['binary'] = True
362 stats['ops'][NEW_FILENODE] = 'new file'
362 stats['ops'][NEW_FILENODE] = 'new file'
363
363
364 elif head['a_file'] and not head['b_file']:
364 elif head['a_file'] and not head['b_file']:
365 op = 'removed'
365 op = 'removed'
366 stats['binary'] = True
366 stats['binary'] = True
367 stats['ops'][DEL_FILENODE] = 'deleted file'
367 stats['ops'][DEL_FILENODE] = 'deleted file'
368
368
369 # it's not ADD not DELETE
369 # it's not ADD not DELETE
370 if op is None:
370 if op is None:
371 op = 'modified'
371 op = 'modified'
372 stats['binary'] = True
372 stats['binary'] = True
373 stats['ops'][MOD_FILENODE] = 'modified file'
373 stats['ops'][MOD_FILENODE] = 'modified file'
374
374
375 # a real non-binary diff
375 # a real non-binary diff
376 if head['a_file'] or head['b_file']:
376 if head['a_file'] or head['b_file']:
377 chunks, added, deleted = _parse_lines(diff_lines)
377 chunks, added, deleted = _parse_lines(diff_lines)
378 stats['binary'] = False
378 stats['binary'] = False
379 stats['added'] = added
379 stats['added'] = added
380 stats['deleted'] = deleted
380 stats['deleted'] = deleted
381 # explicit mark that it's a modified file
381 # explicit mark that it's a modified file
382 if op == 'modified':
382 if op == 'modified':
383 stats['ops'][MOD_FILENODE] = 'modified file'
383 stats['ops'][MOD_FILENODE] = 'modified file'
384 else: # Git binary patch (or empty diff)
384 else: # Git binary patch (or empty diff)
385 # Git binary patch
385 # Git binary patch
386 if head['bin_patch']:
386 if head['bin_patch']:
387 stats['ops'][BIN_FILENODE] = 'binary diff not shown'
387 stats['ops'][BIN_FILENODE] = 'binary diff not shown'
388 chunks = []
388 chunks = []
389
389
390 if op == 'removed' and chunks:
390 if op == 'removed' and chunks:
391 # a way of seeing deleted content could perhaps be nice - but
391 # a way of seeing deleted content could perhaps be nice - but
392 # not with the current UI
392 # not with the current UI
393 chunks = []
393 chunks = []
394
394
395 chunks.insert(0, [{
395 chunks.insert(0, [{
396 'old_lineno': '',
396 'old_lineno': '',
397 'new_lineno': '',
397 'new_lineno': '',
398 'action': 'context',
398 'action': 'context',
399 'line': msg,
399 'line': msg,
400 } for _op, msg in stats['ops'].items()
400 } for _op, msg in stats['ops'].items()
401 if _op not in [MOD_FILENODE]])
401 if _op not in [MOD_FILENODE]])
402
402
403 _files.append({
403 _files.append({
404 'old_filename': head['a_path'],
404 'old_filename': head['a_path'],
405 'filename': head['b_path'],
405 'filename': head['b_path'],
406 'old_revision': head['a_blob_id'],
406 'old_revision': head['a_blob_id'],
407 'new_revision': head['b_blob_id'],
407 'new_revision': head['b_blob_id'],
408 'chunks': chunks,
408 'chunks': chunks,
409 'operation': op,
409 'operation': op,
410 'stats': stats,
410 'stats': stats,
411 })
411 })
412
412
413 if not inline_diff:
413 if not inline_diff:
414 return _files
414 return _files
415
415
416 # highlight inline changes when one del is followed by one add
416 # highlight inline changes when one del is followed by one add
417 for diff_data in _files:
417 for diff_data in _files:
418 for chunk in diff_data['chunks']:
418 for chunk in diff_data['chunks']:
419 lineiter = iter(chunk)
419 lineiter = iter(chunk)
420 try:
420 try:
421 peekline = next(lineiter)
421 peekline = next(lineiter)
422 while True:
422 while True:
423 # find a first del line
423 # find a first del line
424 while peekline['action'] != 'del':
424 while peekline['action'] != 'del':
425 peekline = next(lineiter)
425 peekline = next(lineiter)
426 delline = peekline
426 delline = peekline
427 peekline = next(lineiter)
427 peekline = next(lineiter)
428 # if not followed by add, eat all following del lines
428 # if not followed by add, eat all following del lines
429 if peekline['action'] != 'add':
429 if peekline['action'] != 'add':
430 while peekline['action'] == 'del':
430 while peekline['action'] == 'del':
431 peekline = next(lineiter)
431 peekline = next(lineiter)
432 continue
432 continue
433 # found an add - make sure it is the only one
433 # found an add - make sure it is the only one
434 addline = peekline
434 addline = peekline
435 try:
435 try:
436 peekline = next(lineiter)
436 peekline = next(lineiter)
437 except StopIteration:
437 except StopIteration:
438 # add was last line - ok
438 # add was last line - ok
439 _highlight_inline_diff(delline, addline)
439 _highlight_inline_diff(delline, addline)
440 raise
440 raise
441 if peekline['action'] != 'add':
441 if peekline['action'] != 'add':
442 # there was only one add line - ok
442 # there was only one add line - ok
443 _highlight_inline_diff(delline, addline)
443 _highlight_inline_diff(delline, addline)
444 except StopIteration:
444 except StopIteration:
445 pass
445 pass
446
446
447 return _files
447 return _files
448
448
449 def stat(self):
449 def stat(self):
450 """
450 """
451 Returns tuple of added, and removed lines for this instance
451 Returns tuple of added, and removed lines for this instance
452 """
452 """
453 return self.adds, self.removes
453 return self.adds, self.removes
454
454
455
455
456 _escape_re = re.compile(r'(&)|(<)|(>)|(\t)|(\r)|(?<=.)( \n| $)|(\t\n|\t$)')
456 _escape_re = re.compile(r'(&)|(<)|(>)|(\t)|(\r)|(?<=.)( \n| $)|(\t\n|\t$)')
457
457
458
458
459 def _escaper(string):
459 def _escaper(string):
460 """
460 """
461 Do HTML escaping/markup
461 Do HTML escaping/markup
462 """
462 """
463
463
464 def substitute(m):
464 def substitute(m):
465 groups = m.groups()
465 groups = m.groups()
466 if groups[0]:
466 if groups[0]:
467 return '&amp;'
467 return '&amp;'
468 if groups[1]:
468 if groups[1]:
469 return '&lt;'
469 return '&lt;'
470 if groups[2]:
470 if groups[2]:
471 return '&gt;'
471 return '&gt;'
472 if groups[3]:
472 if groups[3]:
473 return '<u>\t</u>' # Note: trailing tabs will get a longer match later
473 return '<u>\t</u>' # Note: trailing tabs will get a longer match later
474 if groups[4]:
474 if groups[4]:
475 return '<u class="cr"></u>'
475 return '<u class="cr"></u>'
476 if groups[5]:
476 if groups[5]:
477 return ' <i></i>'
477 return ' <i></i>'
478 if groups[6]:
478 if groups[6]:
479 return '<u>\t</u><i></i>'
479 return '<u>\t</u><i></i>'
480 assert False
480 assert False
481
481
482 return _escape_re.sub(substitute, safe_str(string))
482 return _escape_re.sub(substitute, safe_str(string))
483
483
484
484
485 _git_header_re = re.compile(br"""
485 _git_header_re = re.compile(br"""
486 ^diff[ ]--git[ ]a/(?P<a_path>.+?)[ ]b/(?P<b_path>.+?)\n
486 ^diff[ ]--git[ ]a/(?P<a_path>.+?)[ ]b/(?P<b_path>.+?)\n
487 (?:^old[ ]mode[ ](?P<old_mode>\d+)\n
487 (?:^old[ ]mode[ ](?P<old_mode>\d+)\n
488 ^new[ ]mode[ ](?P<new_mode>\d+)(?:\n|$))?
488 ^new[ ]mode[ ](?P<new_mode>\d+)(?:\n|$))?
489 (?:^similarity[ ]index[ ](?P<similarity_index>\d+)%\n
489 (?:^similarity[ ]index[ ](?P<similarity_index>\d+)%\n
490 ^rename[ ]from[ ](?P<rename_from>.+)\n
490 ^rename[ ]from[ ](?P<rename_from>.+)\n
491 ^rename[ ]to[ ](?P<rename_to>.+)(?:\n|$))?
491 ^rename[ ]to[ ](?P<rename_to>.+)(?:\n|$))?
492 (?:^new[ ]file[ ]mode[ ](?P<new_file_mode>.+)(?:\n|$))?
492 (?:^new[ ]file[ ]mode[ ](?P<new_file_mode>.+)(?:\n|$))?
493 (?:^deleted[ ]file[ ]mode[ ](?P<deleted_file_mode>.+)(?:\n|$))?
493 (?:^deleted[ ]file[ ]mode[ ](?P<deleted_file_mode>.+)(?:\n|$))?
494 (?:^index[ ](?P<a_blob_id>[0-9A-Fa-f]+)
494 (?:^index[ ](?P<a_blob_id>[0-9A-Fa-f]+)
495 \.\.(?P<b_blob_id>[0-9A-Fa-f]+)[ ]?(?P<b_mode>.+)?(?:\n|$))?
495 \.\.(?P<b_blob_id>[0-9A-Fa-f]+)[ ]?(?P<b_mode>.+)?(?:\n|$))?
496 (?:^(?P<bin_patch>GIT[ ]binary[ ]patch)(?:\n|$))?
496 (?:^(?P<bin_patch>GIT[ ]binary[ ]patch)(?:\n|$))?
497 (?:^---[ ](a/(?P<a_file>.+?)|/dev/null)\t?(?:\n|$))?
497 (?:^---[ ](a/(?P<a_file>.+?)|/dev/null)\t?(?:\n|$))?
498 (?:^\+\+\+[ ](b/(?P<b_file>.+?)|/dev/null)\t?(?:\n|$))?
498 (?:^\+\+\+[ ](b/(?P<b_file>.+?)|/dev/null)\t?(?:\n|$))?
499 """, re.VERBOSE | re.MULTILINE)
499 """, re.VERBOSE | re.MULTILINE)
500
500
501
501
502 _hg_header_re = re.compile(br"""
502 _hg_header_re = re.compile(br"""
503 ^diff[ ]--git[ ]a/(?P<a_path>.+?)[ ]b/(?P<b_path>.+?)\n
503 ^diff[ ]--git[ ]a/(?P<a_path>.+?)[ ]b/(?P<b_path>.+?)\n
504 (?:^old[ ]mode[ ](?P<old_mode>\d+)\n
504 (?:^old[ ]mode[ ](?P<old_mode>\d+)\n
505 ^new[ ]mode[ ](?P<new_mode>\d+)(?:\n|$))?
505 ^new[ ]mode[ ](?P<new_mode>\d+)(?:\n|$))?
506 (?:^similarity[ ]index[ ](?P<similarity_index>\d+)%(?:\n|$))?
506 (?:^similarity[ ]index[ ](?P<similarity_index>\d+)%(?:\n|$))?
507 (?:^rename[ ]from[ ](?P<rename_from>.+)\n
507 (?:^rename[ ]from[ ](?P<rename_from>.+)\n
508 ^rename[ ]to[ ](?P<rename_to>.+)(?:\n|$))?
508 ^rename[ ]to[ ](?P<rename_to>.+)(?:\n|$))?
509 (?:^copy[ ]from[ ](?P<copy_from>.+)\n
509 (?:^copy[ ]from[ ](?P<copy_from>.+)\n
510 ^copy[ ]to[ ](?P<copy_to>.+)(?:\n|$))?
510 ^copy[ ]to[ ](?P<copy_to>.+)(?:\n|$))?
511 (?:^new[ ]file[ ]mode[ ](?P<new_file_mode>.+)(?:\n|$))?
511 (?:^new[ ]file[ ]mode[ ](?P<new_file_mode>.+)(?:\n|$))?
512 (?:^deleted[ ]file[ ]mode[ ](?P<deleted_file_mode>.+)(?:\n|$))?
512 (?:^deleted[ ]file[ ]mode[ ](?P<deleted_file_mode>.+)(?:\n|$))?
513 (?:^index[ ](?P<a_blob_id>[0-9A-Fa-f]+)
513 (?:^index[ ](?P<a_blob_id>[0-9A-Fa-f]+)
514 \.\.(?P<b_blob_id>[0-9A-Fa-f]+)[ ]?(?P<b_mode>.+)?(?:\n|$))?
514 \.\.(?P<b_blob_id>[0-9A-Fa-f]+)[ ]?(?P<b_mode>.+)?(?:\n|$))?
515 (?:^(?P<bin_patch>GIT[ ]binary[ ]patch)(?:\n|$))?
515 (?:^(?P<bin_patch>GIT[ ]binary[ ]patch)(?:\n|$))?
516 (?:^---[ ](a/(?P<a_file>.+?)|/dev/null)\t?(?:\n|$))?
516 (?:^---[ ](a/(?P<a_file>.+?)|/dev/null)\t?(?:\n|$))?
517 (?:^\+\+\+[ ](b/(?P<b_file>.+?)|/dev/null)\t?(?:\n|$))?
517 (?:^\+\+\+[ ](b/(?P<b_file>.+?)|/dev/null)\t?(?:\n|$))?
518 """, re.VERBOSE | re.MULTILINE)
518 """, re.VERBOSE | re.MULTILINE)
519
519
520
520
521 _header_next_check = re.compile(br'''(?!@)(?!literal )(?!delta )''')
521 _header_next_check = re.compile(br'''(?!@)(?!literal )(?!delta )''')
522
522
523
523
524 def _get_header(vcs, diff_chunk):
524 def _get_header(vcs, diff_chunk):
525 """
525 """
526 Parses a Git diff for a single file (header and chunks) and returns a tuple with:
526 Parses a Git diff for a single file (header and chunks) and returns a tuple with:
527
527
528 1. A dict with meta info:
528 1. A dict with meta info:
529
529
530 a_path, b_path, similarity_index, rename_from, rename_to,
530 a_path, b_path, similarity_index, rename_from, rename_to,
531 old_mode, new_mode, new_file_mode, deleted_file_mode,
531 old_mode, new_mode, new_file_mode, deleted_file_mode,
532 a_blob_id, b_blob_id, b_mode, a_file, b_file
532 a_blob_id, b_blob_id, b_mode, a_file, b_file
533
533
534 2. An iterator yielding lines with simple HTML markup.
534 2. An iterator yielding lines with simple HTML markup.
535 """
535 """
536 match = None
536 match = None
537 if vcs == 'git':
537 if vcs == 'git':
538 match = _git_header_re.match(diff_chunk)
538 match = _git_header_re.match(diff_chunk)
539 elif vcs == 'hg':
539 elif vcs == 'hg':
540 match = _hg_header_re.match(diff_chunk)
540 match = _hg_header_re.match(diff_chunk)
541 if match is None:
541 if match is None:
542 raise Exception('diff not recognized as valid %s diff' % vcs)
542 raise Exception('diff not recognized as valid %s diff' % vcs)
543 meta_info = {k: None if v is None else safe_str(v) for k, v in match.groupdict().items()}
543 meta_info = {k: None if v is None else safe_str(v) for k, v in match.groupdict().items()}
544 rest = diff_chunk[match.end():]
544 rest = diff_chunk[match.end():]
545 if rest and _header_next_check.match(rest):
545 if rest and _header_next_check.match(rest):
546 raise Exception('cannot parse %s diff header: %r followed by %r' % (vcs, safe_str(bytes(diff_chunk[:match.end()])), safe_str(bytes(rest[:1000]))))
546 raise Exception('cannot parse %s diff header: %r followed by %r' % (vcs, safe_str(bytes(diff_chunk[:match.end()])), safe_str(bytes(rest[:1000]))))
547 diff_lines = (_escaper(m.group(0)) for m in re.finditer(br'.*\n|.+$', rest)) # don't split on \r as str.splitlines do
547 diff_lines = (_escaper(m.group(0)) for m in re.finditer(br'.*\n|.+$', rest)) # don't split on \r as str.splitlines do
548 return meta_info, diff_lines
548 return meta_info, diff_lines
549
549
550
550
551 _chunk_re = re.compile(r'^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@(.*)')
551 _chunk_re = re.compile(r'^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@(.*)')
552 _newline_marker = re.compile(r'^\\ No newline at end of file')
552 _newline_marker = re.compile(r'^\\ No newline at end of file')
553
553
554
554
555 def _parse_lines(diff_lines):
555 def _parse_lines(diff_lines):
556 """
556 """
557 Given an iterator of diff body lines, parse them and return a dict per
557 Given an iterator of diff body lines, parse them and return a dict per
558 line and added/removed totals.
558 line and added/removed totals.
559 """
559 """
560 added = deleted = 0
560 added = deleted = 0
561 old_line = old_end = new_line = new_end = None
561 old_line = old_end = new_line = new_end = None
562
562
563 chunks = []
563 chunks = []
564 try:
564 try:
565 line = next(diff_lines)
565 line = next(diff_lines)
566
566
567 while True:
567 while True:
568 lines = []
568 lines = []
569 chunks.append(lines)
569 chunks.append(lines)
570
570
571 match = _chunk_re.match(line)
571 match = _chunk_re.match(line)
572
572
573 if not match:
573 if not match:
574 raise Exception('error parsing diff @@ line %r' % line)
574 raise Exception('error parsing diff @@ line %r' % line)
575
575
576 gr = match.groups()
576 gr = match.groups()
577 (old_line, old_end,
577 (old_line, old_end,
578 new_line, new_end) = [int(x or 1) for x in gr[:-1]]
578 new_line, new_end) = [int(x or 1) for x in gr[:-1]]
579 old_line -= 1
579 old_line -= 1
580 new_line -= 1
580 new_line -= 1
581
581
582 context = len(gr) == 5
582 context = len(gr) == 5
583 old_end += old_line
583 old_end += old_line
584 new_end += new_line
584 new_end += new_line
585
585
586 if context:
586 if context:
587 # skip context only if it's first line
587 # skip context only if it's first line
588 if int(gr[0]) > 1:
588 if int(gr[0]) > 1:
589 lines.append({
589 lines.append({
590 'old_lineno': '...',
590 'old_lineno': '...',
591 'new_lineno': '...',
591 'new_lineno': '...',
592 'action': 'context',
592 'action': 'context',
593 'line': line,
593 'line': line,
594 })
594 })
595
595
596 line = next(diff_lines)
596 line = next(diff_lines)
597
597
598 while old_line < old_end or new_line < new_end:
598 while old_line < old_end or new_line < new_end:
599 if not line:
599 if not line:
600 raise Exception('error parsing diff - empty line at -%s+%s' % (old_line, new_line))
600 raise Exception('error parsing diff - empty line at -%s+%s' % (old_line, new_line))
601
601
602 affects_old = affects_new = False
602 affects_old = affects_new = False
603
603
604 command = line[0]
604 command = line[0]
605 if command == '+':
605 if command == '+':
606 affects_new = True
606 affects_new = True
607 action = 'add'
607 action = 'add'
608 added += 1
608 added += 1
609 elif command == '-':
609 elif command == '-':
610 affects_old = True
610 affects_old = True
611 action = 'del'
611 action = 'del'
612 deleted += 1
612 deleted += 1
613 elif command == ' ':
613 elif command == ' ':
614 affects_old = affects_new = True
614 affects_old = affects_new = True
615 action = 'unmod'
615 action = 'unmod'
616 else:
616 else:
617 raise Exception('error parsing diff - unknown command in line %r at -%s+%s' % (line, old_line, new_line))
617 raise Exception('error parsing diff - unknown command in line %r at -%s+%s' % (line, old_line, new_line))
618
618
619 if not _newline_marker.match(line):
619 if not _newline_marker.match(line):
620 old_line += affects_old
620 old_line += affects_old
621 new_line += affects_new
621 new_line += affects_new
622 lines.append({
622 lines.append({
623 'old_lineno': affects_old and old_line or '',
623 'old_lineno': affects_old and old_line or '',
624 'new_lineno': affects_new and new_line or '',
624 'new_lineno': affects_new and new_line or '',
625 'action': action,
625 'action': action,
626 'line': line[1:],
626 'line': line[1:],
627 })
627 })
628
628
629 line = next(diff_lines)
629 line = next(diff_lines)
630
630
631 if _newline_marker.match(line):
631 if _newline_marker.match(line):
632 # we need to append to lines, since this is not
632 # we need to append to lines, since this is not
633 # counted in the line specs of diff
633 # counted in the line specs of diff
634 lines.append({
634 lines.append({
635 'old_lineno': '...',
635 'old_lineno': '...',
636 'new_lineno': '...',
636 'new_lineno': '...',
637 'action': 'context',
637 'action': 'context',
638 'line': line,
638 'line': line,
639 })
639 })
640 line = next(diff_lines)
640 line = next(diff_lines)
641 if old_line > old_end:
641 if old_line > old_end:
642 raise Exception('error parsing diff - more than %s "-" lines at -%s+%s' % (old_end, old_line, new_line))
642 raise Exception('error parsing diff - more than %s "-" lines at -%s+%s' % (old_end, old_line, new_line))
643 if new_line > new_end:
643 if new_line > new_end:
644 raise Exception('error parsing diff - more than %s "+" lines at -%s+%s' % (new_end, old_line, new_line))
644 raise Exception('error parsing diff - more than %s "+" lines at -%s+%s' % (new_end, old_line, new_line))
645 except StopIteration:
645 except StopIteration:
646 pass
646 pass
647 if old_line != old_end or new_line != new_end:
647 if old_line != old_end or new_line != new_end:
648 raise Exception('diff processing broken when old %s<>%s or new %s<>%s line %r' % (old_line, old_end, new_line, new_end, line))
648 raise Exception('diff processing broken when old %s<>%s or new %s<>%s line %r' % (old_line, old_end, new_line, new_end, line))
649
649
650 return chunks, added, deleted
650 return chunks, added, deleted
651
651
652 # Used for inline highlighter word split, must match the substitutions in _escaper
652 # Used for inline highlighter word split, must match the substitutions in _escaper
653 _token_re = re.compile(r'()(&amp;|&lt;|&gt;|<u>\t</u>|<u class="cr"></u>| <i></i>|\W+?)')
653 _token_re = re.compile(r'()(&amp;|&lt;|&gt;|<u>\t</u>|<u class="cr"></u>| <i></i>|\W+?)')
654
654
655
655
656 def _highlight_inline_diff(old, new):
656 def _highlight_inline_diff(old, new):
657 """
657 """
658 Highlight simple add/remove in two lines given as info dicts. They are
658 Highlight simple add/remove in two lines given as info dicts. They are
659 modified in place and given markup with <del>/<ins>.
659 modified in place and given markup with <del>/<ins>.
660 """
660 """
661 assert old['action'] == 'del'
661 assert old['action'] == 'del'
662 assert new['action'] == 'add'
662 assert new['action'] == 'add'
663
663
664 oldwords = _token_re.split(old['line'])
664 oldwords = _token_re.split(old['line'])
665 newwords = _token_re.split(new['line'])
665 newwords = _token_re.split(new['line'])
666 sequence = difflib.SequenceMatcher(None, oldwords, newwords)
666 sequence = difflib.SequenceMatcher(None, oldwords, newwords)
667
667
668 oldfragments, newfragments = [], []
668 oldfragments, newfragments = [], []
669 for tag, i1, i2, j1, j2 in sequence.get_opcodes():
669 for tag, i1, i2, j1, j2 in sequence.get_opcodes():
670 oldfrag = ''.join(oldwords[i1:i2])
670 oldfrag = ''.join(oldwords[i1:i2])
671 newfrag = ''.join(newwords[j1:j2])
671 newfrag = ''.join(newwords[j1:j2])
672 if tag != 'equal':
672 if tag != 'equal':
673 if oldfrag:
673 if oldfrag:
674 oldfrag = '<del>%s</del>' % oldfrag
674 oldfrag = '<del>%s</del>' % oldfrag
675 if newfrag:
675 if newfrag:
676 newfrag = '<ins>%s</ins>' % newfrag
676 newfrag = '<ins>%s</ins>' % newfrag
677 oldfragments.append(oldfrag)
677 oldfragments.append(oldfrag)
678 newfragments.append(newfrag)
678 newfragments.append(newfrag)
679
679
680 old['line'] = "".join(oldfragments)
680 old['line'] = "".join(oldfragments)
681 new['line'] = "".join(newfragments)
681 new['line'] = "".join(newfragments)
General Comments 0
You need to be logged in to leave comments. Login now