##// END OF EJS Templates
White space cleanup
marcink -
r3029:685ebc84 beta
parent child Browse files
Show More
@@ -1,183 +1,182 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2 """
2 """
3 rhodecode.controllers.feed
3 rhodecode.controllers.feed
4 ~~~~~~~~~~~~~~~~~~~~~~~~~~
4 ~~~~~~~~~~~~~~~~~~~~~~~~~~
5
5
6 Feed controller for rhodecode
6 Feed controller for rhodecode
7
7
8 :created_on: Apr 23, 2010
8 :created_on: Apr 23, 2010
9 :author: marcink
9 :author: marcink
10 :copyright: (C) 2010-2012 Marcin Kuzminski <marcin@python-works.com>
10 :copyright: (C) 2010-2012 Marcin Kuzminski <marcin@python-works.com>
11 :license: GPLv3, see COPYING for more details.
11 :license: GPLv3, see COPYING for more details.
12 """
12 """
13 # This program is free software: you can redistribute it and/or modify
13 # This program is free software: you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation, either version 3 of the License, or
15 # the Free Software Foundation, either version 3 of the License, or
16 # (at your option) any later version.
16 # (at your option) any later version.
17 #
17 #
18 # This program is distributed in the hope that it will be useful,
18 # This program is distributed in the hope that it will be useful,
19 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # but WITHOUT ANY WARRANTY; without even the implied warranty of
20 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 # GNU General Public License for more details.
21 # GNU General Public License for more details.
22 #
22 #
23 # You should have received a copy of the GNU General Public License
23 # You should have received a copy of the GNU General Public License
24 # along with this program. If not, see <http://www.gnu.org/licenses/>.
24 # along with this program. If not, see <http://www.gnu.org/licenses/>.
25
25
26 import logging
26 import logging
27
27
28 from pylons import url, response, tmpl_context as c
28 from pylons import url, response, tmpl_context as c
29 from pylons.i18n.translation import _
29 from pylons.i18n.translation import _
30
30
31 from beaker.cache import cache_region, region_invalidate
31 from beaker.cache import cache_region, region_invalidate
32 from webhelpers.feedgenerator import Atom1Feed, Rss201rev2Feed
32 from webhelpers.feedgenerator import Atom1Feed, Rss201rev2Feed
33
33
34 from rhodecode.lib import helpers as h
34 from rhodecode.lib import helpers as h
35 from rhodecode.lib.auth import LoginRequired, HasRepoPermissionAnyDecorator
35 from rhodecode.lib.auth import LoginRequired, HasRepoPermissionAnyDecorator
36 from rhodecode.lib.base import BaseRepoController
36 from rhodecode.lib.base import BaseRepoController
37 from rhodecode.lib.diffs import DiffProcessor, LimitedDiffContainer
37 from rhodecode.lib.diffs import DiffProcessor, LimitedDiffContainer
38 from rhodecode.model.db import CacheInvalidation
38 from rhodecode.model.db import CacheInvalidation
39 from rhodecode.lib.utils2 import safe_int, str2bool
39 from rhodecode.lib.utils2 import safe_int, str2bool
40
40
41 log = logging.getLogger(__name__)
41 log = logging.getLogger(__name__)
42
42
43
43
44 class FeedController(BaseRepoController):
44 class FeedController(BaseRepoController):
45
45
46 @LoginRequired(api_access=True)
46 @LoginRequired(api_access=True)
47 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
47 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
48 'repository.admin')
48 'repository.admin')
49 def __before__(self):
49 def __before__(self):
50 super(FeedController, self).__before__()
50 super(FeedController, self).__before__()
51 #common values for feeds
51 #common values for feeds
52 self.description = _('Changes on %s repository')
52 self.description = _('Changes on %s repository')
53 self.title = self.title = _('%s %s feed') % (c.rhodecode_name, '%s')
53 self.title = self.title = _('%s %s feed') % (c.rhodecode_name, '%s')
54 self.language = 'en-us'
54 self.language = 'en-us'
55 self.ttl = "5"
55 self.ttl = "5"
56 import rhodecode
56 import rhodecode
57 CONF = rhodecode.CONFIG
57 CONF = rhodecode.CONFIG
58 self.include_diff = str2bool(CONF.get('rss_include_diff', False))
58 self.include_diff = str2bool(CONF.get('rss_include_diff', False))
59 self.feed_nr = safe_int(CONF.get('rss_items_per_page', 20))
59 self.feed_nr = safe_int(CONF.get('rss_items_per_page', 20))
60 # we need to protect from parsing huge diffs here other way
60 # we need to protect from parsing huge diffs here other way
61 # we can kill the server
61 # we can kill the server
62 self.feed_diff_limit = safe_int(CONF.get('rss_cut_off_limit'), 32 * 1024)
62 self.feed_diff_limit = safe_int(CONF.get('rss_cut_off_limit'), 32 * 1024)
63
63
64 def _get_title(self, cs):
64 def _get_title(self, cs):
65 return "%s" % (
65 return "%s" % (
66 h.shorter(cs.message, 160)
66 h.shorter(cs.message, 160)
67 )
67 )
68
68
69 def __changes(self, cs):
69 def __changes(self, cs):
70 changes = []
70 changes = []
71 diff_processor = DiffProcessor(cs.diff(),
71 diff_processor = DiffProcessor(cs.diff(),
72 diff_limit=self.feed_diff_limit)
72 diff_limit=self.feed_diff_limit)
73 _parsed = diff_processor.prepare(inline_diff=False)
73 _parsed = diff_processor.prepare(inline_diff=False)
74 limited_diff = False
74 limited_diff = False
75 if isinstance(_parsed, LimitedDiffContainer):
75 if isinstance(_parsed, LimitedDiffContainer):
76 limited_diff = True
76 limited_diff = True
77
77
78 for st in _parsed:
78 for st in _parsed:
79 st.update({'added': st['stats'][0],
79 st.update({'added': st['stats'][0],
80 'removed': st['stats'][1]})
80 'removed': st['stats'][1]})
81 changes.append('\n %(operation)s %(filename)s '
81 changes.append('\n %(operation)s %(filename)s '
82 '(%(added)s lines added, %(removed)s lines removed)'
82 '(%(added)s lines added, %(removed)s lines removed)'
83 % st)
83 % st)
84 if limited_diff:
84 if limited_diff:
85 changes = changes + ['\n ' +
85 changes = changes + ['\n ' +
86 _('Changeset was too big and was cut off...')]
86 _('Changeset was too big and was cut off...')]
87 return diff_processor, changes
87 return diff_processor, changes
88
88
89 def __get_desc(self, cs):
89 def __get_desc(self, cs):
90 desc_msg = []
90 desc_msg = []
91 desc_msg.append('%s %s %s<br/>' % (h.person(cs.author),
91 desc_msg.append('%s %s %s<br/>' % (h.person(cs.author),
92 _('commited on'),
92 _('commited on'),
93 h.fmt_date(cs.date)))
93 h.fmt_date(cs.date)))
94 #branches, tags, bookmarks
94 #branches, tags, bookmarks
95 if cs.branch:
95 if cs.branch:
96 desc_msg.append('branch: %s<br/>' % cs.branch)
96 desc_msg.append('branch: %s<br/>' % cs.branch)
97 if h.is_hg(c.rhodecode_repo):
97 if h.is_hg(c.rhodecode_repo):
98 for book in cs.bookmarks:
98 for book in cs.bookmarks:
99 desc_msg.append('bookmark: %s<br/>' % book)
99 desc_msg.append('bookmark: %s<br/>' % book)
100 for tag in cs.tags:
100 for tag in cs.tags:
101 desc_msg.append('tag: %s<br/>' % tag)
101 desc_msg.append('tag: %s<br/>' % tag)
102 diff_processor, changes = self.__changes(cs)
102 diff_processor, changes = self.__changes(cs)
103 # rev link
103 # rev link
104 _url = url('changeset_home', repo_name=cs.repository.name,
104 _url = url('changeset_home', repo_name=cs.repository.name,
105 revision=cs.raw_id, qualified=True)
105 revision=cs.raw_id, qualified=True)
106 desc_msg.append('changesest: <a href="%s">%s</a>' % (_url, cs.raw_id[:8]))
106 desc_msg.append('changesest: <a href="%s">%s</a>' % (_url, cs.raw_id[:8]))
107
107
108 desc_msg.append('<pre>')
108 desc_msg.append('<pre>')
109 desc_msg.append(cs.message)
109 desc_msg.append(cs.message)
110 desc_msg.append('\n')
110 desc_msg.append('\n')
111 desc_msg.extend(changes)
111 desc_msg.extend(changes)
112 if self.include_diff:
112 if self.include_diff:
113 desc_msg.append('\n\n')
113 desc_msg.append('\n\n')
114 desc_msg.append(diff_processor.as_raw())
114 desc_msg.append(diff_processor.as_raw())
115 desc_msg.append('</pre>')
115 desc_msg.append('</pre>')
116 return desc_msg
116 return desc_msg
117
117
118 def atom(self, repo_name):
118 def atom(self, repo_name):
119 """Produce an atom-1.0 feed via feedgenerator module"""
119 """Produce an atom-1.0 feed via feedgenerator module"""
120
120
121 @cache_region('long_term')
121 @cache_region('long_term')
122 def _get_feed_from_cache(key):
122 def _get_feed_from_cache(key):
123 feed = Atom1Feed(
123 feed = Atom1Feed(
124 title=self.title % repo_name,
124 title=self.title % repo_name,
125 link=url('summary_home', repo_name=repo_name,
125 link=url('summary_home', repo_name=repo_name,
126 qualified=True),
126 qualified=True),
127 description=self.description % repo_name,
127 description=self.description % repo_name,
128 language=self.language,
128 language=self.language,
129 ttl=self.ttl
129 ttl=self.ttl
130 )
130 )
131
131
132 for cs in reversed(list(c.rhodecode_repo[-self.feed_nr:])):
132 for cs in reversed(list(c.rhodecode_repo[-self.feed_nr:])):
133 feed.add_item(title=self._get_title(cs),
133 feed.add_item(title=self._get_title(cs),
134 link=url('changeset_home', repo_name=repo_name,
134 link=url('changeset_home', repo_name=repo_name,
135 revision=cs.raw_id, qualified=True),
135 revision=cs.raw_id, qualified=True),
136 author_name=cs.author,
136 author_name=cs.author,
137 description=''.join(self.__get_desc(cs)),
137 description=''.join(self.__get_desc(cs)),
138 pubdate=cs.date,
138 pubdate=cs.date,
139 )
139 )
140
140
141 response.content_type = feed.mime_type
141 response.content_type = feed.mime_type
142 return feed.writeString('utf-8')
142 return feed.writeString('utf-8')
143
143
144 key = repo_name + '_ATOM'
144 key = repo_name + '_ATOM'
145 inv = CacheInvalidation.invalidate(key)
145 inv = CacheInvalidation.invalidate(key)
146 if inv is not None:
146 if inv is not None:
147 region_invalidate(_get_feed_from_cache, None, key)
147 region_invalidate(_get_feed_from_cache, None, key)
148 CacheInvalidation.set_valid(inv.cache_key)
148 CacheInvalidation.set_valid(inv.cache_key)
149 return _get_feed_from_cache(key)
149 return _get_feed_from_cache(key)
150
150
151 def rss(self, repo_name):
151 def rss(self, repo_name):
152 """Produce an rss2 feed via feedgenerator module"""
152 """Produce an rss2 feed via feedgenerator module"""
153
153
154 @cache_region('long_term')
154 @cache_region('long_term')
155 def _get_feed_from_cache(key):
155 def _get_feed_from_cache(key):
156 feed = Rss201rev2Feed(
156 feed = Rss201rev2Feed(
157 title=self.title % repo_name,
157 title=self.title % repo_name,
158 link=url('summary_home', repo_name=repo_name,
158 link=url('summary_home', repo_name=repo_name,
159 qualified=True),
159 qualified=True),
160 description=self.description % repo_name,
160 description=self.description % repo_name,
161 language=self.language,
161 language=self.language,
162 ttl=self.ttl
162 ttl=self.ttl
163 )
163 )
164
164
165 for cs in reversed(list(c.rhodecode_repo[-self.feed_nr:])):
165 for cs in reversed(list(c.rhodecode_repo[-self.feed_nr:])):
166 feed.add_item(title=self._get_title(cs),
166 feed.add_item(title=self._get_title(cs),
167 link=url('changeset_home', repo_name=repo_name,
167 link=url('changeset_home', repo_name=repo_name,
168 revision=cs.raw_id, qualified=True),
168 revision=cs.raw_id, qualified=True),
169 author_name=cs.author,
169 author_name=cs.author,
170 description=''.join(self.__get_desc(cs)),
170 description=''.join(self.__get_desc(cs)),
171 pubdate=cs.date,
171 pubdate=cs.date,
172 )
172 )
173
173
174 response.content_type = feed.mime_type
174 response.content_type = feed.mime_type
175 return feed.writeString('utf-8')
175 return feed.writeString('utf-8')
176
176
177 key = repo_name + '_RSS'
177 key = repo_name + '_RSS'
178 inv = CacheInvalidation.invalidate(key)
178 inv = CacheInvalidation.invalidate(key)
179 if inv is not None:
179 if inv is not None:
180 region_invalidate(_get_feed_from_cache, None, key)
180 region_invalidate(_get_feed_from_cache, None, key)
181 CacheInvalidation.set_valid(inv.cache_key)
181 CacheInvalidation.set_valid(inv.cache_key)
182 return _get_feed_from_cache(key)
182 return _get_feed_from_cache(key)
183
@@ -1,768 +1,768 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2 """
2 """
3 rhodecode.lib.diffs
3 rhodecode.lib.diffs
4 ~~~~~~~~~~~~~~~~~~~
4 ~~~~~~~~~~~~~~~~~~~
5
5
6 Set of diffing helpers, previously part of vcs
6 Set of diffing helpers, previously part of vcs
7
7
8
8
9 :created_on: Dec 4, 2011
9 :created_on: Dec 4, 2011
10 :author: marcink
10 :author: marcink
11 :copyright: (C) 2010-2012 Marcin Kuzminski <marcin@python-works.com>
11 :copyright: (C) 2010-2012 Marcin Kuzminski <marcin@python-works.com>
12 :original copyright: 2007-2008 by Armin Ronacher
12 :original copyright: 2007-2008 by Armin Ronacher
13 :license: GPLv3, see COPYING for more details.
13 :license: GPLv3, see COPYING for more details.
14 """
14 """
15 # This program is free software: you can redistribute it and/or modify
15 # This program is free software: you can redistribute it and/or modify
16 # it under the terms of the GNU General Public License as published by
16 # it under the terms of the GNU General Public License as published by
17 # the Free Software Foundation, either version 3 of the License, or
17 # the Free Software Foundation, either version 3 of the License, or
18 # (at your option) any later version.
18 # (at your option) any later version.
19 #
19 #
20 # This program is distributed in the hope that it will be useful,
20 # This program is distributed in the hope that it will be useful,
21 # but WITHOUT ANY WARRANTY; without even the implied warranty of
21 # but WITHOUT ANY WARRANTY; without even the implied warranty of
22 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 # GNU General Public License for more details.
23 # GNU General Public License for more details.
24 #
24 #
25 # You should have received a copy of the GNU General Public License
25 # You should have received a copy of the GNU General Public License
26 # along with this program. If not, see <http://www.gnu.org/licenses/>.
26 # along with this program. If not, see <http://www.gnu.org/licenses/>.
27
27
28 import re
28 import re
29 import difflib
29 import difflib
30 import logging
30 import logging
31 import traceback
31 import traceback
32
32
33 from itertools import tee, imap
33 from itertools import tee, imap
34
34
35 from mercurial import patch
35 from mercurial import patch
36 from mercurial.mdiff import diffopts
36 from mercurial.mdiff import diffopts
37 from mercurial.bundlerepo import bundlerepository
37 from mercurial.bundlerepo import bundlerepository
38
38
39 from pylons.i18n.translation import _
39 from pylons.i18n.translation import _
40
40
41 from rhodecode.lib.compat import BytesIO
41 from rhodecode.lib.compat import BytesIO
42 from rhodecode.lib.vcs.utils.hgcompat import localrepo
42 from rhodecode.lib.vcs.utils.hgcompat import localrepo
43 from rhodecode.lib.vcs.exceptions import VCSError
43 from rhodecode.lib.vcs.exceptions import VCSError
44 from rhodecode.lib.vcs.nodes import FileNode, SubModuleNode
44 from rhodecode.lib.vcs.nodes import FileNode, SubModuleNode
45 from rhodecode.lib.vcs.backends.base import EmptyChangeset
45 from rhodecode.lib.vcs.backends.base import EmptyChangeset
46 from rhodecode.lib.helpers import escape
46 from rhodecode.lib.helpers import escape
47 from rhodecode.lib.utils import make_ui
47 from rhodecode.lib.utils import make_ui
48 from rhodecode.lib.utils2 import safe_unicode
48 from rhodecode.lib.utils2 import safe_unicode
49
49
50 log = logging.getLogger(__name__)
50 log = logging.getLogger(__name__)
51
51
52
52
53 def wrap_to_table(str_):
53 def wrap_to_table(str_):
54 return '''<table class="code-difftable">
54 return '''<table class="code-difftable">
55 <tr class="line no-comment">
55 <tr class="line no-comment">
56 <td class="lineno new"></td>
56 <td class="lineno new"></td>
57 <td class="code no-comment"><pre>%s</pre></td>
57 <td class="code no-comment"><pre>%s</pre></td>
58 </tr>
58 </tr>
59 </table>''' % str_
59 </table>''' % str_
60
60
61
61
62 def wrapped_diff(filenode_old, filenode_new, cut_off_limit=None,
62 def wrapped_diff(filenode_old, filenode_new, cut_off_limit=None,
63 ignore_whitespace=True, line_context=3,
63 ignore_whitespace=True, line_context=3,
64 enable_comments=False):
64 enable_comments=False):
65 """
65 """
66 returns a wrapped diff into a table, checks for cut_off_limit and presents
66 returns a wrapped diff into a table, checks for cut_off_limit and presents
67 proper message
67 proper message
68 """
68 """
69
69
70 if filenode_old is None:
70 if filenode_old is None:
71 filenode_old = FileNode(filenode_new.path, '', EmptyChangeset())
71 filenode_old = FileNode(filenode_new.path, '', EmptyChangeset())
72
72
73 if filenode_old.is_binary or filenode_new.is_binary:
73 if filenode_old.is_binary or filenode_new.is_binary:
74 diff = wrap_to_table(_('binary file'))
74 diff = wrap_to_table(_('binary file'))
75 stats = (0, 0)
75 stats = (0, 0)
76 size = 0
76 size = 0
77
77
78 elif cut_off_limit != -1 and (cut_off_limit is None or
78 elif cut_off_limit != -1 and (cut_off_limit is None or
79 (filenode_old.size < cut_off_limit and filenode_new.size < cut_off_limit)):
79 (filenode_old.size < cut_off_limit and filenode_new.size < cut_off_limit)):
80
80
81 f_gitdiff = get_gitdiff(filenode_old, filenode_new,
81 f_gitdiff = get_gitdiff(filenode_old, filenode_new,
82 ignore_whitespace=ignore_whitespace,
82 ignore_whitespace=ignore_whitespace,
83 context=line_context)
83 context=line_context)
84 diff_processor = DiffProcessor(f_gitdiff, format='gitdiff')
84 diff_processor = DiffProcessor(f_gitdiff, format='gitdiff')
85
85
86 diff = diff_processor.as_html(enable_comments=enable_comments)
86 diff = diff_processor.as_html(enable_comments=enable_comments)
87 stats = diff_processor.stat()
87 stats = diff_processor.stat()
88 size = len(diff or '')
88 size = len(diff or '')
89 else:
89 else:
90 diff = wrap_to_table(_('Changeset was too big and was cut off, use '
90 diff = wrap_to_table(_('Changeset was too big and was cut off, use '
91 'diff menu to display this diff'))
91 'diff menu to display this diff'))
92 stats = (0, 0)
92 stats = (0, 0)
93 size = 0
93 size = 0
94 if not diff:
94 if not diff:
95 submodules = filter(lambda o: isinstance(o, SubModuleNode),
95 submodules = filter(lambda o: isinstance(o, SubModuleNode),
96 [filenode_new, filenode_old])
96 [filenode_new, filenode_old])
97 if submodules:
97 if submodules:
98 diff = wrap_to_table(escape('Submodule %r' % submodules[0]))
98 diff = wrap_to_table(escape('Submodule %r' % submodules[0]))
99 else:
99 else:
100 diff = wrap_to_table(_('No changes detected'))
100 diff = wrap_to_table(_('No changes detected'))
101
101
102 cs1 = filenode_old.changeset.raw_id
102 cs1 = filenode_old.changeset.raw_id
103 cs2 = filenode_new.changeset.raw_id
103 cs2 = filenode_new.changeset.raw_id
104
104
105 return size, cs1, cs2, diff, stats
105 return size, cs1, cs2, diff, stats
106
106
107
107
108 def get_gitdiff(filenode_old, filenode_new, ignore_whitespace=True, context=3):
108 def get_gitdiff(filenode_old, filenode_new, ignore_whitespace=True, context=3):
109 """
109 """
110 Returns git style diff between given ``filenode_old`` and ``filenode_new``.
110 Returns git style diff between given ``filenode_old`` and ``filenode_new``.
111
111
112 :param ignore_whitespace: ignore whitespaces in diff
112 :param ignore_whitespace: ignore whitespaces in diff
113 """
113 """
114 # make sure we pass in default context
114 # make sure we pass in default context
115 context = context or 3
115 context = context or 3
116 submodules = filter(lambda o: isinstance(o, SubModuleNode),
116 submodules = filter(lambda o: isinstance(o, SubModuleNode),
117 [filenode_new, filenode_old])
117 [filenode_new, filenode_old])
118 if submodules:
118 if submodules:
119 return ''
119 return ''
120
120
121 for filenode in (filenode_old, filenode_new):
121 for filenode in (filenode_old, filenode_new):
122 if not isinstance(filenode, FileNode):
122 if not isinstance(filenode, FileNode):
123 raise VCSError("Given object should be FileNode object, not %s"
123 raise VCSError("Given object should be FileNode object, not %s"
124 % filenode.__class__)
124 % filenode.__class__)
125
125
126 repo = filenode_new.changeset.repository
126 repo = filenode_new.changeset.repository
127 old_raw_id = getattr(filenode_old.changeset, 'raw_id', repo.EMPTY_CHANGESET)
127 old_raw_id = getattr(filenode_old.changeset, 'raw_id', repo.EMPTY_CHANGESET)
128 new_raw_id = getattr(filenode_new.changeset, 'raw_id', repo.EMPTY_CHANGESET)
128 new_raw_id = getattr(filenode_new.changeset, 'raw_id', repo.EMPTY_CHANGESET)
129
129
130 vcs_gitdiff = repo.get_diff(old_raw_id, new_raw_id, filenode_new.path,
130 vcs_gitdiff = repo.get_diff(old_raw_id, new_raw_id, filenode_new.path,
131 ignore_whitespace, context)
131 ignore_whitespace, context)
132 return vcs_gitdiff
132 return vcs_gitdiff
133
133
134 NEW_FILENODE = 1
134 NEW_FILENODE = 1
135 DEL_FILENODE = 2
135 DEL_FILENODE = 2
136 MOD_FILENODE = 3
136 MOD_FILENODE = 3
137 RENAMED_FILENODE = 4
137 RENAMED_FILENODE = 4
138 CHMOD_FILENODE = 5
138 CHMOD_FILENODE = 5
139
139
140
140
141 class DiffLimitExceeded(Exception):
141 class DiffLimitExceeded(Exception):
142 pass
142 pass
143
143
144
144
145 class LimitedDiffContainer(object):
145 class LimitedDiffContainer(object):
146
146
147 def __init__(self, diff_limit, cur_diff_size, diff):
147 def __init__(self, diff_limit, cur_diff_size, diff):
148 self.diff = diff
148 self.diff = diff
149 self.diff_limit = diff_limit
149 self.diff_limit = diff_limit
150 self.cur_diff_size = cur_diff_size
150 self.cur_diff_size = cur_diff_size
151
151
152 def __iter__(self):
152 def __iter__(self):
153 for l in self.diff:
153 for l in self.diff:
154 yield l
154 yield l
155
155
156
156
157 class DiffProcessor(object):
157 class DiffProcessor(object):
158 """
158 """
159 Give it a unified or git diff and it returns a list of the files that were
159 Give it a unified or git diff and it returns a list of the files that were
160 mentioned in the diff together with a dict of meta information that
160 mentioned in the diff together with a dict of meta information that
161 can be used to render it in a HTML template.
161 can be used to render it in a HTML template.
162 """
162 """
163 _chunk_re = re.compile(r'^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@(.*)')
163 _chunk_re = re.compile(r'^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@(.*)')
164 _newline_marker = re.compile(r'^\\ No newline at end of file')
164 _newline_marker = re.compile(r'^\\ No newline at end of file')
165 _git_header_re = re.compile(r"""
165 _git_header_re = re.compile(r"""
166 #^diff[ ]--git
166 #^diff[ ]--git
167 [ ]a/(?P<a_path>.+?)[ ]b/(?P<b_path>.+?)\n
167 [ ]a/(?P<a_path>.+?)[ ]b/(?P<b_path>.+?)\n
168 (?:^similarity[ ]index[ ](?P<similarity_index>\d+)%\n
168 (?:^similarity[ ]index[ ](?P<similarity_index>\d+)%\n
169 ^rename[ ]from[ ](?P<rename_from>\S+)\n
169 ^rename[ ]from[ ](?P<rename_from>\S+)\n
170 ^rename[ ]to[ ](?P<rename_to>\S+)(?:\n|$))?
170 ^rename[ ]to[ ](?P<rename_to>\S+)(?:\n|$))?
171 (?:^old[ ]mode[ ](?P<old_mode>\d+)\n
171 (?:^old[ ]mode[ ](?P<old_mode>\d+)\n
172 ^new[ ]mode[ ](?P<new_mode>\d+)(?:\n|$))?
172 ^new[ ]mode[ ](?P<new_mode>\d+)(?:\n|$))?
173 (?:^new[ ]file[ ]mode[ ](?P<new_file_mode>.+)(?:\n|$))?
173 (?:^new[ ]file[ ]mode[ ](?P<new_file_mode>.+)(?:\n|$))?
174 (?:^deleted[ ]file[ ]mode[ ](?P<deleted_file_mode>.+)(?:\n|$))?
174 (?:^deleted[ ]file[ ]mode[ ](?P<deleted_file_mode>.+)(?:\n|$))?
175 (?:^index[ ](?P<a_blob_id>[0-9A-Fa-f]+)
175 (?:^index[ ](?P<a_blob_id>[0-9A-Fa-f]+)
176 \.\.(?P<b_blob_id>[0-9A-Fa-f]+)[ ]?(?P<b_mode>.+)?(?:\n|$))?
176 \.\.(?P<b_blob_id>[0-9A-Fa-f]+)[ ]?(?P<b_mode>.+)?(?:\n|$))?
177 (?:^---[ ](a/(?P<a_file>.+)|/dev/null)(?:\n|$))?
177 (?:^---[ ](a/(?P<a_file>.+)|/dev/null)(?:\n|$))?
178 (?:^\+\+\+[ ](b/(?P<b_file>.+)|/dev/null)(?:\n|$))?
178 (?:^\+\+\+[ ](b/(?P<b_file>.+)|/dev/null)(?:\n|$))?
179 """, re.VERBOSE | re.MULTILINE)
179 """, re.VERBOSE | re.MULTILINE)
180 _hg_header_re = re.compile(r"""
180 _hg_header_re = re.compile(r"""
181 #^diff[ ]--git
181 #^diff[ ]--git
182 [ ]a/(?P<a_path>.+?)[ ]b/(?P<b_path>.+?)\n
182 [ ]a/(?P<a_path>.+?)[ ]b/(?P<b_path>.+?)\n
183 (?:^similarity[ ]index[ ](?P<similarity_index>\d+)%(?:\n|$))?
183 (?:^similarity[ ]index[ ](?P<similarity_index>\d+)%(?:\n|$))?
184 (?:^rename[ ]from[ ](?P<rename_from>\S+)\n
184 (?:^rename[ ]from[ ](?P<rename_from>\S+)\n
185 ^rename[ ]to[ ](?P<rename_to>\S+)(?:\n|$))?
185 ^rename[ ]to[ ](?P<rename_to>\S+)(?:\n|$))?
186 (?:^old[ ]mode[ ](?P<old_mode>\d+)\n
186 (?:^old[ ]mode[ ](?P<old_mode>\d+)\n
187 ^new[ ]mode[ ](?P<new_mode>\d+)(?:\n|$))?
187 ^new[ ]mode[ ](?P<new_mode>\d+)(?:\n|$))?
188 (?:^new[ ]file[ ]mode[ ](?P<new_file_mode>.+)(?:\n|$))?
188 (?:^new[ ]file[ ]mode[ ](?P<new_file_mode>.+)(?:\n|$))?
189 (?:^deleted[ ]file[ ]mode[ ](?P<deleted_file_mode>.+)(?:\n|$))?
189 (?:^deleted[ ]file[ ]mode[ ](?P<deleted_file_mode>.+)(?:\n|$))?
190 (?:^index[ ](?P<a_blob_id>[0-9A-Fa-f]+)
190 (?:^index[ ](?P<a_blob_id>[0-9A-Fa-f]+)
191 \.\.(?P<b_blob_id>[0-9A-Fa-f]+)[ ]?(?P<b_mode>.+)?(?:\n|$))?
191 \.\.(?P<b_blob_id>[0-9A-Fa-f]+)[ ]?(?P<b_mode>.+)?(?:\n|$))?
192 (?:^---[ ](a/(?P<a_file>.+)|/dev/null)(?:\n|$))?
192 (?:^---[ ](a/(?P<a_file>.+)|/dev/null)(?:\n|$))?
193 (?:^\+\+\+[ ](b/(?P<b_file>.+)|/dev/null)(?:\n|$))?
193 (?:^\+\+\+[ ](b/(?P<b_file>.+)|/dev/null)(?:\n|$))?
194 """, re.VERBOSE | re.MULTILINE)
194 """, re.VERBOSE | re.MULTILINE)
195
195
196 def __init__(self, diff, vcs='hg', format='gitdiff', diff_limit=None):
196 def __init__(self, diff, vcs='hg', format='gitdiff', diff_limit=None):
197 """
197 """
198 :param diff: a text in diff format
198 :param diff: a text in diff format
199 :param vcs: type of version controll hg or git
199 :param vcs: type of version controll hg or git
200 :param format: format of diff passed, `udiff` or `gitdiff`
200 :param format: format of diff passed, `udiff` or `gitdiff`
201 :param diff_limit: define the size of diff that is considered "big"
201 :param diff_limit: define the size of diff that is considered "big"
202 based on that parameter cut off will be triggered, set to None
202 based on that parameter cut off will be triggered, set to None
203 to show full diff
203 to show full diff
204 """
204 """
205 if not isinstance(diff, basestring):
205 if not isinstance(diff, basestring):
206 raise Exception('Diff must be a basestring got %s instead' % type(diff))
206 raise Exception('Diff must be a basestring got %s instead' % type(diff))
207
207
208 self._diff = diff
208 self._diff = diff
209 self._format = format
209 self._format = format
210 self.adds = 0
210 self.adds = 0
211 self.removes = 0
211 self.removes = 0
212 # calculate diff size
212 # calculate diff size
213 self.diff_size = len(diff)
213 self.diff_size = len(diff)
214 self.diff_limit = diff_limit
214 self.diff_limit = diff_limit
215 self.cur_diff_size = 0
215 self.cur_diff_size = 0
216 self.parsed = False
216 self.parsed = False
217 self.parsed_diff = []
217 self.parsed_diff = []
218 self.vcs = vcs
218 self.vcs = vcs
219
219
220 if format == 'gitdiff':
220 if format == 'gitdiff':
221 self.differ = self._highlight_line_difflib
221 self.differ = self._highlight_line_difflib
222 self._parser = self._parse_gitdiff
222 self._parser = self._parse_gitdiff
223 else:
223 else:
224 self.differ = self._highlight_line_udiff
224 self.differ = self._highlight_line_udiff
225 self._parser = self._parse_udiff
225 self._parser = self._parse_udiff
226
226
227 def _copy_iterator(self):
227 def _copy_iterator(self):
228 """
228 """
229 make a fresh copy of generator, we should not iterate thru
229 make a fresh copy of generator, we should not iterate thru
230 an original as it's needed for repeating operations on
230 an original as it's needed for repeating operations on
231 this instance of DiffProcessor
231 this instance of DiffProcessor
232 """
232 """
233 self.__udiff, iterator_copy = tee(self.__udiff)
233 self.__udiff, iterator_copy = tee(self.__udiff)
234 return iterator_copy
234 return iterator_copy
235
235
236 def _escaper(self, string):
236 def _escaper(self, string):
237 """
237 """
238 Escaper for diff escapes special chars and checks the diff limit
238 Escaper for diff escapes special chars and checks the diff limit
239
239
240 :param string:
240 :param string:
241 :type string:
241 :type string:
242 """
242 """
243
243
244 self.cur_diff_size += len(string)
244 self.cur_diff_size += len(string)
245
245
246 # escaper get's iterated on each .next() call and it checks if each
246 # escaper get's iterated on each .next() call and it checks if each
247 # parsed line doesn't exceed the diff limit
247 # parsed line doesn't exceed the diff limit
248 if self.diff_limit is not None and self.cur_diff_size > self.diff_limit:
248 if self.diff_limit is not None and self.cur_diff_size > self.diff_limit:
249 raise DiffLimitExceeded('Diff Limit Exceeded')
249 raise DiffLimitExceeded('Diff Limit Exceeded')
250
250
251 return safe_unicode(string).replace('&', '&amp;')\
251 return safe_unicode(string).replace('&', '&amp;')\
252 .replace('<', '&lt;')\
252 .replace('<', '&lt;')\
253 .replace('>', '&gt;')
253 .replace('>', '&gt;')
254
254
255 def _line_counter(self, l):
255 def _line_counter(self, l):
256 """
256 """
257 Checks each line and bumps total adds/removes for this diff
257 Checks each line and bumps total adds/removes for this diff
258
258
259 :param l:
259 :param l:
260 """
260 """
261 if l.startswith('+') and not l.startswith('+++'):
261 if l.startswith('+') and not l.startswith('+++'):
262 self.adds += 1
262 self.adds += 1
263 elif l.startswith('-') and not l.startswith('---'):
263 elif l.startswith('-') and not l.startswith('---'):
264 self.removes += 1
264 self.removes += 1
265 return safe_unicode(l)
265 return safe_unicode(l)
266
266
267 def _highlight_line_difflib(self, line, next_):
267 def _highlight_line_difflib(self, line, next_):
268 """
268 """
269 Highlight inline changes in both lines.
269 Highlight inline changes in both lines.
270 """
270 """
271
271
272 if line['action'] == 'del':
272 if line['action'] == 'del':
273 old, new = line, next_
273 old, new = line, next_
274 else:
274 else:
275 old, new = next_, line
275 old, new = next_, line
276
276
277 oldwords = re.split(r'(\W)', old['line'])
277 oldwords = re.split(r'(\W)', old['line'])
278 newwords = re.split(r'(\W)', new['line'])
278 newwords = re.split(r'(\W)', new['line'])
279
279
280 sequence = difflib.SequenceMatcher(None, oldwords, newwords)
280 sequence = difflib.SequenceMatcher(None, oldwords, newwords)
281
281
282 oldfragments, newfragments = [], []
282 oldfragments, newfragments = [], []
283 for tag, i1, i2, j1, j2 in sequence.get_opcodes():
283 for tag, i1, i2, j1, j2 in sequence.get_opcodes():
284 oldfrag = ''.join(oldwords[i1:i2])
284 oldfrag = ''.join(oldwords[i1:i2])
285 newfrag = ''.join(newwords[j1:j2])
285 newfrag = ''.join(newwords[j1:j2])
286 if tag != 'equal':
286 if tag != 'equal':
287 if oldfrag:
287 if oldfrag:
288 oldfrag = '<del>%s</del>' % oldfrag
288 oldfrag = '<del>%s</del>' % oldfrag
289 if newfrag:
289 if newfrag:
290 newfrag = '<ins>%s</ins>' % newfrag
290 newfrag = '<ins>%s</ins>' % newfrag
291 oldfragments.append(oldfrag)
291 oldfragments.append(oldfrag)
292 newfragments.append(newfrag)
292 newfragments.append(newfrag)
293
293
294 old['line'] = "".join(oldfragments)
294 old['line'] = "".join(oldfragments)
295 new['line'] = "".join(newfragments)
295 new['line'] = "".join(newfragments)
296
296
297 def _highlight_line_udiff(self, line, next_):
297 def _highlight_line_udiff(self, line, next_):
298 """
298 """
299 Highlight inline changes in both lines.
299 Highlight inline changes in both lines.
300 """
300 """
301 start = 0
301 start = 0
302 limit = min(len(line['line']), len(next_['line']))
302 limit = min(len(line['line']), len(next_['line']))
303 while start < limit and line['line'][start] == next_['line'][start]:
303 while start < limit and line['line'][start] == next_['line'][start]:
304 start += 1
304 start += 1
305 end = -1
305 end = -1
306 limit -= start
306 limit -= start
307 while -end <= limit and line['line'][end] == next_['line'][end]:
307 while -end <= limit and line['line'][end] == next_['line'][end]:
308 end -= 1
308 end -= 1
309 end += 1
309 end += 1
310 if start or end:
310 if start or end:
311 def do(l):
311 def do(l):
312 last = end + len(l['line'])
312 last = end + len(l['line'])
313 if l['action'] == 'add':
313 if l['action'] == 'add':
314 tag = 'ins'
314 tag = 'ins'
315 else:
315 else:
316 tag = 'del'
316 tag = 'del'
317 l['line'] = '%s<%s>%s</%s>%s' % (
317 l['line'] = '%s<%s>%s</%s>%s' % (
318 l['line'][:start],
318 l['line'][:start],
319 tag,
319 tag,
320 l['line'][start:last],
320 l['line'][start:last],
321 tag,
321 tag,
322 l['line'][last:]
322 l['line'][last:]
323 )
323 )
324 do(line)
324 do(line)
325 do(next_)
325 do(next_)
326
326
327 def _get_header(self, diff_chunk):
327 def _get_header(self, diff_chunk):
328 """
328 """
329 parses the diff header, and returns parts, and leftover diff
329 parses the diff header, and returns parts, and leftover diff
330 parts consists of 14 elements::
330 parts consists of 14 elements::
331
331
332 a_path, b_path, similarity_index, rename_from, rename_to,
332 a_path, b_path, similarity_index, rename_from, rename_to,
333 old_mode, new_mode, new_file_mode, deleted_file_mode,
333 old_mode, new_mode, new_file_mode, deleted_file_mode,
334 a_blob_id, b_blob_id, b_mode, a_file, b_file
334 a_blob_id, b_blob_id, b_mode, a_file, b_file
335
335
336 :param diff_chunk:
336 :param diff_chunk:
337 :type diff_chunk:
337 :type diff_chunk:
338 """
338 """
339
339
340 if self.vcs == 'git':
340 if self.vcs == 'git':
341 match = self._git_header_re.match(diff_chunk)
341 match = self._git_header_re.match(diff_chunk)
342 diff = diff_chunk[match.end():]
342 diff = diff_chunk[match.end():]
343 return match.groupdict(), imap(self._escaper, diff.splitlines(1))
343 return match.groupdict(), imap(self._escaper, diff.splitlines(1))
344 elif self.vcs == 'hg':
344 elif self.vcs == 'hg':
345 match = self._hg_header_re.match(diff_chunk)
345 match = self._hg_header_re.match(diff_chunk)
346 diff = diff_chunk[match.end():]
346 diff = diff_chunk[match.end():]
347 return match.groupdict(), imap(self._escaper, diff.splitlines(1))
347 return match.groupdict(), imap(self._escaper, diff.splitlines(1))
348 else:
348 else:
349 raise Exception('VCS type %s is not supported' % self.vcs)
349 raise Exception('VCS type %s is not supported' % self.vcs)
350
350
351 def _clean_line(self, line, command):
351 def _clean_line(self, line, command):
352 if command in ['+', '-', ' ']:
352 if command in ['+', '-', ' ']:
353 #only modify the line if it's actually a diff thing
353 #only modify the line if it's actually a diff thing
354 line = line[1:]
354 line = line[1:]
355 return line
355 return line
356
356
357 def _parse_gitdiff(self, inline_diff=True):
357 def _parse_gitdiff(self, inline_diff=True):
358 _files = []
358 _files = []
359 diff_container = lambda arg: arg
359 diff_container = lambda arg: arg
360
360
361 ##split the diff in chunks of separate --git a/file b/file chunks
361 ##split the diff in chunks of separate --git a/file b/file chunks
362 for raw_diff in ('\n' + self._diff).split('\ndiff --git')[1:]:
362 for raw_diff in ('\n' + self._diff).split('\ndiff --git')[1:]:
363 binary = False
363 binary = False
364 binary_msg = 'unknown binary'
364 binary_msg = 'unknown binary'
365 head, diff = self._get_header(raw_diff)
365 head, diff = self._get_header(raw_diff)
366
366
367 if not head['a_file'] and head['b_file']:
367 if not head['a_file'] and head['b_file']:
368 op = 'A'
368 op = 'A'
369 elif head['a_file'] and head['b_file']:
369 elif head['a_file'] and head['b_file']:
370 op = 'M'
370 op = 'M'
371 elif head['a_file'] and not head['b_file']:
371 elif head['a_file'] and not head['b_file']:
372 op = 'D'
372 op = 'D'
373 else:
373 else:
374 #probably we're dealing with a binary file 1
374 #probably we're dealing with a binary file 1
375 binary = True
375 binary = True
376 if head['deleted_file_mode']:
376 if head['deleted_file_mode']:
377 op = 'D'
377 op = 'D'
378 stats = ['b', DEL_FILENODE]
378 stats = ['b', DEL_FILENODE]
379 binary_msg = 'deleted binary file'
379 binary_msg = 'deleted binary file'
380 elif head['new_file_mode']:
380 elif head['new_file_mode']:
381 op = 'A'
381 op = 'A'
382 stats = ['b', NEW_FILENODE]
382 stats = ['b', NEW_FILENODE]
383 binary_msg = 'new binary file %s' % head['new_file_mode']
383 binary_msg = 'new binary file %s' % head['new_file_mode']
384 else:
384 else:
385 if head['new_mode'] and head['old_mode']:
385 if head['new_mode'] and head['old_mode']:
386 stats = ['b', CHMOD_FILENODE]
386 stats = ['b', CHMOD_FILENODE]
387 op = 'M'
387 op = 'M'
388 binary_msg = ('modified binary file chmod %s => %s'
388 binary_msg = ('modified binary file chmod %s => %s'
389 % (head['old_mode'], head['new_mode']))
389 % (head['old_mode'], head['new_mode']))
390 elif (head['rename_from'] and head['rename_to']
390 elif (head['rename_from'] and head['rename_to']
391 and head['rename_from'] != head['rename_to']):
391 and head['rename_from'] != head['rename_to']):
392 stats = ['b', RENAMED_FILENODE]
392 stats = ['b', RENAMED_FILENODE]
393 op = 'M'
393 op = 'M'
394 binary_msg = ('file renamed from %s to %s'
394 binary_msg = ('file renamed from %s to %s'
395 % (head['rename_from'], head['rename_to']))
395 % (head['rename_from'], head['rename_to']))
396 else:
396 else:
397 stats = ['b', MOD_FILENODE]
397 stats = ['b', MOD_FILENODE]
398 op = 'M'
398 op = 'M'
399 binary_msg = 'modified binary file'
399 binary_msg = 'modified binary file'
400
400
401 if not binary:
401 if not binary:
402 try:
402 try:
403 chunks, stats = self._parse_lines(diff)
403 chunks, stats = self._parse_lines(diff)
404 except DiffLimitExceeded:
404 except DiffLimitExceeded:
405 diff_container = lambda _diff: LimitedDiffContainer(
405 diff_container = lambda _diff: LimitedDiffContainer(
406 self.diff_limit,
406 self.diff_limit,
407 self.cur_diff_size,
407 self.cur_diff_size,
408 _diff)
408 _diff)
409 break
409 break
410 else:
410 else:
411 chunks = []
411 chunks = []
412 chunks.append([{
412 chunks.append([{
413 'old_lineno': '',
413 'old_lineno': '',
414 'new_lineno': '',
414 'new_lineno': '',
415 'action': 'binary',
415 'action': 'binary',
416 'line': binary_msg,
416 'line': binary_msg,
417 }])
417 }])
418
418
419 _files.append({
419 _files.append({
420 'filename': head['b_path'],
420 'filename': head['b_path'],
421 'old_revision': head['a_blob_id'],
421 'old_revision': head['a_blob_id'],
422 'new_revision': head['b_blob_id'],
422 'new_revision': head['b_blob_id'],
423 'chunks': chunks,
423 'chunks': chunks,
424 'operation': op,
424 'operation': op,
425 'stats': stats,
425 'stats': stats,
426 })
426 })
427
427
428 sorter = lambda info: {'A': 0, 'M': 1, 'D': 2}.get(info['operation'])
428 sorter = lambda info: {'A': 0, 'M': 1, 'D': 2}.get(info['operation'])
429
429
430 if inline_diff is False:
430 if inline_diff is False:
431 return diff_container(sorted(_files, key=sorter))
431 return diff_container(sorted(_files, key=sorter))
432
432
433 # highlight inline changes
433 # highlight inline changes
434 for diff_data in _files:
434 for diff_data in _files:
435 for chunk in diff_data['chunks']:
435 for chunk in diff_data['chunks']:
436 lineiter = iter(chunk)
436 lineiter = iter(chunk)
437 try:
437 try:
438 while 1:
438 while 1:
439 line = lineiter.next()
439 line = lineiter.next()
440 if line['action'] not in ['unmod', 'context']:
440 if line['action'] not in ['unmod', 'context']:
441 nextline = lineiter.next()
441 nextline = lineiter.next()
442 if nextline['action'] in ['unmod', 'context'] or \
442 if nextline['action'] in ['unmod', 'context'] or \
443 nextline['action'] == line['action']:
443 nextline['action'] == line['action']:
444 continue
444 continue
445 self.differ(line, nextline)
445 self.differ(line, nextline)
446 except StopIteration:
446 except StopIteration:
447 pass
447 pass
448
448
449 return diff_container(sorted(_files, key=sorter))
449 return diff_container(sorted(_files, key=sorter))
450
450
451 def _parse_udiff(self, inline_diff=True):
451 def _parse_udiff(self, inline_diff=True):
452 raise NotImplementedError()
452 raise NotImplementedError()
453
453
454 def _parse_lines(self, diff):
454 def _parse_lines(self, diff):
455 """
455 """
456 Parse the diff an return data for the template.
456 Parse the diff an return data for the template.
457 """
457 """
458
458
459 lineiter = iter(diff)
459 lineiter = iter(diff)
460 stats = [0, 0]
460 stats = [0, 0]
461
461
462 try:
462 try:
463 chunks = []
463 chunks = []
464 line = lineiter.next()
464 line = lineiter.next()
465
465
466 while line:
466 while line:
467 lines = []
467 lines = []
468 chunks.append(lines)
468 chunks.append(lines)
469
469
470 match = self._chunk_re.match(line)
470 match = self._chunk_re.match(line)
471
471
472 if not match:
472 if not match:
473 break
473 break
474
474
475 gr = match.groups()
475 gr = match.groups()
476 (old_line, old_end,
476 (old_line, old_end,
477 new_line, new_end) = [int(x or 1) for x in gr[:-1]]
477 new_line, new_end) = [int(x or 1) for x in gr[:-1]]
478 old_line -= 1
478 old_line -= 1
479 new_line -= 1
479 new_line -= 1
480
480
481 context = len(gr) == 5
481 context = len(gr) == 5
482 old_end += old_line
482 old_end += old_line
483 new_end += new_line
483 new_end += new_line
484
484
485 if context:
485 if context:
486 # skip context only if it's first line
486 # skip context only if it's first line
487 if int(gr[0]) > 1:
487 if int(gr[0]) > 1:
488 lines.append({
488 lines.append({
489 'old_lineno': '...',
489 'old_lineno': '...',
490 'new_lineno': '...',
490 'new_lineno': '...',
491 'action': 'context',
491 'action': 'context',
492 'line': line,
492 'line': line,
493 })
493 })
494
494
495 line = lineiter.next()
495 line = lineiter.next()
496
496
497 while old_line < old_end or new_line < new_end:
497 while old_line < old_end or new_line < new_end:
498 command = ' '
498 command = ' '
499 if line:
499 if line:
500 command = line[0]
500 command = line[0]
501
501
502 affects_old = affects_new = False
502 affects_old = affects_new = False
503
503
504 # ignore those if we don't expect them
504 # ignore those if we don't expect them
505 if command in '#@':
505 if command in '#@':
506 continue
506 continue
507 elif command == '+':
507 elif command == '+':
508 affects_new = True
508 affects_new = True
509 action = 'add'
509 action = 'add'
510 stats[0] += 1
510 stats[0] += 1
511 elif command == '-':
511 elif command == '-':
512 affects_old = True
512 affects_old = True
513 action = 'del'
513 action = 'del'
514 stats[1] += 1
514 stats[1] += 1
515 else:
515 else:
516 affects_old = affects_new = True
516 affects_old = affects_new = True
517 action = 'unmod'
517 action = 'unmod'
518
518
519 if not self._newline_marker.match(line):
519 if not self._newline_marker.match(line):
520 old_line += affects_old
520 old_line += affects_old
521 new_line += affects_new
521 new_line += affects_new
522 lines.append({
522 lines.append({
523 'old_lineno': affects_old and old_line or '',
523 'old_lineno': affects_old and old_line or '',
524 'new_lineno': affects_new and new_line or '',
524 'new_lineno': affects_new and new_line or '',
525 'action': action,
525 'action': action,
526 'line': self._clean_line(line, command)
526 'line': self._clean_line(line, command)
527 })
527 })
528
528
529 line = lineiter.next()
529 line = lineiter.next()
530
530
531 if self._newline_marker.match(line):
531 if self._newline_marker.match(line):
532 # we need to append to lines, since this is not
532 # we need to append to lines, since this is not
533 # counted in the line specs of diff
533 # counted in the line specs of diff
534 lines.append({
534 lines.append({
535 'old_lineno': '...',
535 'old_lineno': '...',
536 'new_lineno': '...',
536 'new_lineno': '...',
537 'action': 'context',
537 'action': 'context',
538 'line': self._clean_line(line, command)
538 'line': self._clean_line(line, command)
539 })
539 })
540
540
541 except StopIteration:
541 except StopIteration:
542 pass
542 pass
543 return chunks, stats
543 return chunks, stats
544
544
545 def _safe_id(self, idstring):
545 def _safe_id(self, idstring):
546 """Make a string safe for including in an id attribute.
546 """Make a string safe for including in an id attribute.
547
547
548 The HTML spec says that id attributes 'must begin with
548 The HTML spec says that id attributes 'must begin with
549 a letter ([A-Za-z]) and may be followed by any number
549 a letter ([A-Za-z]) and may be followed by any number
550 of letters, digits ([0-9]), hyphens ("-"), underscores
550 of letters, digits ([0-9]), hyphens ("-"), underscores
551 ("_"), colons (":"), and periods (".")'. These regexps
551 ("_"), colons (":"), and periods (".")'. These regexps
552 are slightly over-zealous, in that they remove colons
552 are slightly over-zealous, in that they remove colons
553 and periods unnecessarily.
553 and periods unnecessarily.
554
554
555 Whitespace is transformed into underscores, and then
555 Whitespace is transformed into underscores, and then
556 anything which is not a hyphen or a character that
556 anything which is not a hyphen or a character that
557 matches \w (alphanumerics and underscore) is removed.
557 matches \w (alphanumerics and underscore) is removed.
558
558
559 """
559 """
560 # Transform all whitespace to underscore
560 # Transform all whitespace to underscore
561 idstring = re.sub(r'\s', "_", '%s' % idstring)
561 idstring = re.sub(r'\s', "_", '%s' % idstring)
562 # Remove everything that is not a hyphen or a member of \w
562 # Remove everything that is not a hyphen or a member of \w
563 idstring = re.sub(r'(?!-)\W', "", idstring).lower()
563 idstring = re.sub(r'(?!-)\W', "", idstring).lower()
564 return idstring
564 return idstring
565
565
566 def prepare(self, inline_diff=True):
566 def prepare(self, inline_diff=True):
567 """
567 """
568 Prepare the passed udiff for HTML rendering. It'l return a list
568 Prepare the passed udiff for HTML rendering. It'l return a list
569 of dicts with diff information
569 of dicts with diff information
570 """
570 """
571 parsed = self._parser(inline_diff=inline_diff)
571 parsed = self._parser(inline_diff=inline_diff)
572 self.parsed = True
572 self.parsed = True
573 self.parsed_diff = parsed
573 self.parsed_diff = parsed
574 return parsed
574 return parsed
575
575
576 def as_raw(self, diff_lines=None):
576 def as_raw(self, diff_lines=None):
577 """
577 """
578 Returns raw string diff
578 Returns raw string diff
579 """
579 """
580 return self._diff
580 return self._diff
581 #return u''.join(imap(self._line_counter, self._diff.splitlines(1)))
581 #return u''.join(imap(self._line_counter, self._diff.splitlines(1)))
582
582
583 def as_html(self, table_class='code-difftable', line_class='line',
583 def as_html(self, table_class='code-difftable', line_class='line',
584 new_lineno_class='lineno old', old_lineno_class='lineno new',
584 new_lineno_class='lineno old', old_lineno_class='lineno new',
585 code_class='code', enable_comments=False, parsed_lines=None):
585 code_class='code', enable_comments=False, parsed_lines=None):
586 """
586 """
587 Return given diff as html table with customized css classes
587 Return given diff as html table with customized css classes
588 """
588 """
589 def _link_to_if(condition, label, url):
589 def _link_to_if(condition, label, url):
590 """
590 """
591 Generates a link if condition is meet or just the label if not.
591 Generates a link if condition is meet or just the label if not.
592 """
592 """
593
593
594 if condition:
594 if condition:
595 return '''<a href="%(url)s">%(label)s</a>''' % {
595 return '''<a href="%(url)s">%(label)s</a>''' % {
596 'url': url,
596 'url': url,
597 'label': label
597 'label': label
598 }
598 }
599 else:
599 else:
600 return label
600 return label
601 if not self.parsed:
601 if not self.parsed:
602 self.prepare()
602 self.prepare()
603
603
604 diff_lines = self.parsed_diff
604 diff_lines = self.parsed_diff
605 if parsed_lines:
605 if parsed_lines:
606 diff_lines = parsed_lines
606 diff_lines = parsed_lines
607
607
608 _html_empty = True
608 _html_empty = True
609 _html = []
609 _html = []
610 _html.append('''<table class="%(table_class)s">\n''' % {
610 _html.append('''<table class="%(table_class)s">\n''' % {
611 'table_class': table_class
611 'table_class': table_class
612 })
612 })
613
613
614 for diff in diff_lines:
614 for diff in diff_lines:
615 for line in diff['chunks']:
615 for line in diff['chunks']:
616 _html_empty = False
616 _html_empty = False
617 for change in line:
617 for change in line:
618 _html.append('''<tr class="%(lc)s %(action)s">\n''' % {
618 _html.append('''<tr class="%(lc)s %(action)s">\n''' % {
619 'lc': line_class,
619 'lc': line_class,
620 'action': change['action']
620 'action': change['action']
621 })
621 })
622 anchor_old_id = ''
622 anchor_old_id = ''
623 anchor_new_id = ''
623 anchor_new_id = ''
624 anchor_old = "%(filename)s_o%(oldline_no)s" % {
624 anchor_old = "%(filename)s_o%(oldline_no)s" % {
625 'filename': self._safe_id(diff['filename']),
625 'filename': self._safe_id(diff['filename']),
626 'oldline_no': change['old_lineno']
626 'oldline_no': change['old_lineno']
627 }
627 }
628 anchor_new = "%(filename)s_n%(oldline_no)s" % {
628 anchor_new = "%(filename)s_n%(oldline_no)s" % {
629 'filename': self._safe_id(diff['filename']),
629 'filename': self._safe_id(diff['filename']),
630 'oldline_no': change['new_lineno']
630 'oldline_no': change['new_lineno']
631 }
631 }
632 cond_old = (change['old_lineno'] != '...' and
632 cond_old = (change['old_lineno'] != '...' and
633 change['old_lineno'])
633 change['old_lineno'])
634 cond_new = (change['new_lineno'] != '...' and
634 cond_new = (change['new_lineno'] != '...' and
635 change['new_lineno'])
635 change['new_lineno'])
636 if cond_old:
636 if cond_old:
637 anchor_old_id = 'id="%s"' % anchor_old
637 anchor_old_id = 'id="%s"' % anchor_old
638 if cond_new:
638 if cond_new:
639 anchor_new_id = 'id="%s"' % anchor_new
639 anchor_new_id = 'id="%s"' % anchor_new
640 ###########################################################
640 ###########################################################
641 # OLD LINE NUMBER
641 # OLD LINE NUMBER
642 ###########################################################
642 ###########################################################
643 _html.append('''\t<td %(a_id)s class="%(olc)s">''' % {
643 _html.append('''\t<td %(a_id)s class="%(olc)s">''' % {
644 'a_id': anchor_old_id,
644 'a_id': anchor_old_id,
645 'olc': old_lineno_class
645 'olc': old_lineno_class
646 })
646 })
647
647
648 _html.append('''%(link)s''' % {
648 _html.append('''%(link)s''' % {
649 'link': _link_to_if(True, change['old_lineno'],
649 'link': _link_to_if(True, change['old_lineno'],
650 '#%s' % anchor_old)
650 '#%s' % anchor_old)
651 })
651 })
652 _html.append('''</td>\n''')
652 _html.append('''</td>\n''')
653 ###########################################################
653 ###########################################################
654 # NEW LINE NUMBER
654 # NEW LINE NUMBER
655 ###########################################################
655 ###########################################################
656
656
657 _html.append('''\t<td %(a_id)s class="%(nlc)s">''' % {
657 _html.append('''\t<td %(a_id)s class="%(nlc)s">''' % {
658 'a_id': anchor_new_id,
658 'a_id': anchor_new_id,
659 'nlc': new_lineno_class
659 'nlc': new_lineno_class
660 })
660 })
661
661
662 _html.append('''%(link)s''' % {
662 _html.append('''%(link)s''' % {
663 'link': _link_to_if(True, change['new_lineno'],
663 'link': _link_to_if(True, change['new_lineno'],
664 '#%s' % anchor_new)
664 '#%s' % anchor_new)
665 })
665 })
666 _html.append('''</td>\n''')
666 _html.append('''</td>\n''')
667 ###########################################################
667 ###########################################################
668 # CODE
668 # CODE
669 ###########################################################
669 ###########################################################
670 comments = '' if enable_comments else 'no-comment'
670 comments = '' if enable_comments else 'no-comment'
671 _html.append('''\t<td class="%(cc)s %(inc)s">''' % {
671 _html.append('''\t<td class="%(cc)s %(inc)s">''' % {
672 'cc': code_class,
672 'cc': code_class,
673 'inc': comments
673 'inc': comments
674 })
674 })
675 _html.append('''\n\t\t<pre>%(code)s</pre>\n''' % {
675 _html.append('''\n\t\t<pre>%(code)s</pre>\n''' % {
676 'code': change['line']
676 'code': change['line']
677 })
677 })
678
678
679 _html.append('''\t</td>''')
679 _html.append('''\t</td>''')
680 _html.append('''\n</tr>\n''')
680 _html.append('''\n</tr>\n''')
681 _html.append('''</table>''')
681 _html.append('''</table>''')
682 if _html_empty:
682 if _html_empty:
683 return None
683 return None
684 return ''.join(_html)
684 return ''.join(_html)
685
685
686 def stat(self):
686 def stat(self):
687 """
687 """
688 Returns tuple of added, and removed lines for this instance
688 Returns tuple of added, and removed lines for this instance
689 """
689 """
690 return self.adds, self.removes
690 return self.adds, self.removes
691
691
692
692
693 class InMemoryBundleRepo(bundlerepository):
693 class InMemoryBundleRepo(bundlerepository):
694 def __init__(self, ui, path, bundlestream):
694 def __init__(self, ui, path, bundlestream):
695 self._tempparent = None
695 self._tempparent = None
696 localrepo.localrepository.__init__(self, ui, path)
696 localrepo.localrepository.__init__(self, ui, path)
697 self.ui.setconfig('phases', 'publish', False)
697 self.ui.setconfig('phases', 'publish', False)
698
698
699 self.bundle = bundlestream
699 self.bundle = bundlestream
700
700
701 # dict with the mapping 'filename' -> position in the bundle
701 # dict with the mapping 'filename' -> position in the bundle
702 self.bundlefilespos = {}
702 self.bundlefilespos = {}
703
703
704
704
705 def differ(org_repo, org_ref, other_repo, other_ref, discovery_data=None,
705 def differ(org_repo, org_ref, other_repo, other_ref, discovery_data=None,
706 remote_compare=False, context=3, ignore_whitespace=False):
706 remote_compare=False, context=3, ignore_whitespace=False):
707 """
707 """
708 General differ between branches, bookmarks, revisions of two remote or
708 General differ between branches, bookmarks, revisions of two remote or
709 local but related repositories
709 local but related repositories
710
710
711 :param org_repo:
711 :param org_repo:
712 :param org_ref:
712 :param org_ref:
713 :param other_repo:
713 :param other_repo:
714 :type other_repo:
714 :type other_repo:
715 :type other_ref:
715 :type other_ref:
716 """
716 """
717
717
718 org_repo_scm = org_repo.scm_instance
718 org_repo_scm = org_repo.scm_instance
719 other_repo_scm = other_repo.scm_instance
719 other_repo_scm = other_repo.scm_instance
720
720
721 org_repo = org_repo_scm._repo
721 org_repo = org_repo_scm._repo
722 other_repo = other_repo_scm._repo
722 other_repo = other_repo_scm._repo
723
723
724 org_ref = org_ref[1]
724 org_ref = org_ref[1]
725 other_ref = other_ref[1]
725 other_ref = other_ref[1]
726
726
727 if org_repo == other_repo:
727 if org_repo == other_repo:
728 log.debug('running diff between %s@%s and %s@%s'
728 log.debug('running diff between %s@%s and %s@%s'
729 % (org_repo.path, org_ref, other_repo.path, other_ref))
729 % (org_repo.path, org_ref, other_repo.path, other_ref))
730 _diff = org_repo_scm.get_diff(rev1=org_ref, rev2=other_ref,
730 _diff = org_repo_scm.get_diff(rev1=org_ref, rev2=other_ref,
731 ignore_whitespace=ignore_whitespace, context=context)
731 ignore_whitespace=ignore_whitespace, context=context)
732 return _diff
732 return _diff
733
733
734 elif remote_compare:
734 elif remote_compare:
735 opts = diffopts(git=True, ignorews=ignore_whitespace, context=context)
735 opts = diffopts(git=True, ignorews=ignore_whitespace, context=context)
736 common, incoming, rheads = discovery_data
736 common, incoming, rheads = discovery_data
737 org_repo_peer = localrepo.locallegacypeer(org_repo.local())
737 org_repo_peer = localrepo.locallegacypeer(org_repo.local())
738 # create a bundle (uncompressed if other repo is not local)
738 # create a bundle (uncompressed if other repo is not local)
739 if org_repo_peer.capable('getbundle'):
739 if org_repo_peer.capable('getbundle'):
740 # disable repo hooks here since it's just bundle !
740 # disable repo hooks here since it's just bundle !
741 # patch and reset hooks section of UI config to not run any
741 # patch and reset hooks section of UI config to not run any
742 # hooks on fetching archives with subrepos
742 # hooks on fetching archives with subrepos
743 for k, _ in org_repo.ui.configitems('hooks'):
743 for k, _ in org_repo.ui.configitems('hooks'):
744 org_repo.ui.setconfig('hooks', k, None)
744 org_repo.ui.setconfig('hooks', k, None)
745 unbundle = org_repo.getbundle('incoming', common=None,
745 unbundle = org_repo.getbundle('incoming', common=None,
746 heads=None)
746 heads=None)
747
747
748 buf = BytesIO()
748 buf = BytesIO()
749 while True:
749 while True:
750 chunk = unbundle._stream.read(1024 * 4)
750 chunk = unbundle._stream.read(1024 * 4)
751 if not chunk:
751 if not chunk:
752 break
752 break
753 buf.write(chunk)
753 buf.write(chunk)
754
754
755 buf.seek(0)
755 buf.seek(0)
756 # replace chunked _stream with data that can do tell() and seek()
756 # replace chunked _stream with data that can do tell() and seek()
757 unbundle._stream = buf
757 unbundle._stream = buf
758
758
759 ui = make_ui('db')
759 ui = make_ui('db')
760 bundlerepo = InMemoryBundleRepo(ui, path=org_repo.root,
760 bundlerepo = InMemoryBundleRepo(ui, path=org_repo.root,
761 bundlestream=unbundle)
761 bundlestream=unbundle)
762
762
763 return ''.join(patch.diff(bundlerepo,
763 return ''.join(patch.diff(bundlerepo,
764 node1=other_repo[other_ref].node(),
764 node1=other_repo[other_ref].node(),
765 node2=org_repo[org_ref].node(),
765 node2=org_repo[org_ref].node(),
766 opts=opts))
766 opts=opts))
767
767
768 return '' No newline at end of file
768 return ''
@@ -1,122 +1,122 b''
1 import datetime
1 import datetime
2 import functools
2 import functools
3 import decimal
3 import decimal
4 import imp
4 import imp
5
5
6 __all__ = ['json', 'simplejson', 'stdlibjson']
6 __all__ = ['json', 'simplejson', 'stdlibjson']
7
7
8
8
9 def _is_aware(value):
9 def _is_aware(value):
10 """
10 """
11 Determines if a given datetime.time is aware.
11 Determines if a given datetime.time is aware.
12
12
13 The logic is described in Python's docs:
13 The logic is described in Python's docs:
14 http://docs.python.org/library/datetime.html#datetime.tzinfo
14 http://docs.python.org/library/datetime.html#datetime.tzinfo
15 """
15 """
16 return (value.tzinfo is not None
16 return (value.tzinfo is not None
17 and value.tzinfo.utcoffset(value) is not None)
17 and value.tzinfo.utcoffset(value) is not None)
18
18
19
19
20 def _obj_dump(obj):
20 def _obj_dump(obj):
21 """
21 """
22 Custom function for dumping objects to JSON, if obj has __json__ attribute
22 Custom function for dumping objects to JSON, if obj has __json__ attribute
23 or method defined it will be used for serialization
23 or method defined it will be used for serialization
24
24
25 :param obj:
25 :param obj:
26 """
26 """
27
27
28 if isinstance(obj, complex):
28 if isinstance(obj, complex):
29 return [obj.real, obj.imag]
29 return [obj.real, obj.imag]
30 # See "Date Time String Format" in the ECMA-262 specification.
30 # See "Date Time String Format" in the ECMA-262 specification.
31 # some code borrowed from django 1.4
31 # some code borrowed from django 1.4
32 elif isinstance(obj, datetime.datetime):
32 elif isinstance(obj, datetime.datetime):
33 r = obj.isoformat()
33 r = obj.isoformat()
34 if obj.microsecond:
34 if obj.microsecond:
35 r = r[:23] + r[26:]
35 r = r[:23] + r[26:]
36 if r.endswith('+00:00'):
36 if r.endswith('+00:00'):
37 r = r[:-6] + 'Z'
37 r = r[:-6] + 'Z'
38 return r
38 return r
39 elif isinstance(obj, datetime.date):
39 elif isinstance(obj, datetime.date):
40 return obj.isoformat()
40 return obj.isoformat()
41 elif isinstance(obj, decimal.Decimal):
41 elif isinstance(obj, decimal.Decimal):
42 return str(obj)
42 return str(obj)
43 elif isinstance(obj, datetime.time):
43 elif isinstance(obj, datetime.time):
44 if _is_aware(obj):
44 if _is_aware(obj):
45 raise ValueError("JSON can't represent timezone-aware times.")
45 raise ValueError("JSON can't represent timezone-aware times.")
46 r = obj.isoformat()
46 r = obj.isoformat()
47 if obj.microsecond:
47 if obj.microsecond:
48 r = r[:12]
48 r = r[:12]
49 return r
49 return r
50 elif isinstance(obj, set):
50 elif isinstance(obj, set):
51 return list(obj)
51 return list(obj)
52 elif hasattr(obj, '__json__'):
52 elif hasattr(obj, '__json__'):
53 if callable(obj.__json__):
53 if callable(obj.__json__):
54 return obj.__json__()
54 return obj.__json__()
55 else:
55 else:
56 return obj.__json__
56 return obj.__json__
57 else:
57 else:
58 raise NotImplementedError
58 raise NotImplementedError
59
59
60
60
61 # Import simplejson
61 # Import simplejson
62 try:
62 try:
63 # import simplejson initially
63 # import simplejson initially
64 _sj = imp.load_module('_sj', *imp.find_module('simplejson'))
64 _sj = imp.load_module('_sj', *imp.find_module('simplejson'))
65
65
66 def extended_encode(obj):
66 def extended_encode(obj):
67 try:
67 try:
68 return _obj_dump(obj)
68 return _obj_dump(obj)
69 except NotImplementedError:
69 except NotImplementedError:
70 pass
70 pass
71 raise TypeError("%r is not JSON serializable" % (obj,))
71 raise TypeError("%r is not JSON serializable" % (obj,))
72 # we handle decimals our own it makes unified behavior of json vs
72 # we handle decimals our own it makes unified behavior of json vs
73 # simplejson
73 # simplejson
74 sj_version = [int(x) for x in _sj.__version__.split('.')]
74 sj_version = [int(x) for x in _sj.__version__.split('.')]
75 major, minor = sj_version[0], sj_version[1]
75 major, minor = sj_version[0], sj_version[1]
76 if major < 2 or (major == 2 and minor < 1):
76 if major < 2 or (major == 2 and minor < 1):
77 # simplejson < 2.1 doesnt support use_decimal
77 # simplejson < 2.1 doesnt support use_decimal
78 _sj.dumps = functools.partial(_sj.dumps,
78 _sj.dumps = functools.partial(_sj.dumps,
79 default=extended_encode)
79 default=extended_encode)
80 _sj.dump = functools.partial(_sj.dump,
80 _sj.dump = functools.partial(_sj.dump,
81 default=extended_encode)
81 default=extended_encode)
82 else:
82 else:
83 _sj.dumps = functools.partial(_sj.dumps,
83 _sj.dumps = functools.partial(_sj.dumps,
84 default=extended_encode,
84 default=extended_encode,
85 use_decimal=False)
85 use_decimal=False)
86 _sj.dump = functools.partial(_sj.dump,
86 _sj.dump = functools.partial(_sj.dump,
87 default=extended_encode,
87 default=extended_encode,
88 use_decimal=False)
88 use_decimal=False)
89 simplejson = _sj
89 simplejson = _sj
90
90
91 except ImportError:
91 except ImportError:
92 # no simplejson set it to None
92 # no simplejson set it to None
93 simplejson = None
93 simplejson = None
94
94
95
95
96 try:
96 try:
97 # simplejson not found try out regular json module
97 # simplejson not found try out regular json module
98 _json = imp.load_module('_json', *imp.find_module('json'))
98 _json = imp.load_module('_json', *imp.find_module('json'))
99
99
100 # extended JSON encoder for json
100 # extended JSON encoder for json
101 class ExtendedEncoder(_json.JSONEncoder):
101 class ExtendedEncoder(_json.JSONEncoder):
102 def default(self, obj):
102 def default(self, obj):
103 try:
103 try:
104 return _obj_dump(obj)
104 return _obj_dump(obj)
105 except NotImplementedError:
105 except NotImplementedError:
106 pass
106 pass
107 raise TypeError("%r is not JSON serializable" % (obj,))
107 raise TypeError("%r is not JSON serializable" % (obj,))
108 # monkey-patch JSON encoder to use extended version
108 # monkey-patch JSON encoder to use extended version
109 _json.dumps = functools.partial(_json.dumps, cls=ExtendedEncoder)
109 _json.dumps = functools.partial(_json.dumps, cls=ExtendedEncoder)
110 _json.dump = functools.partial(_json.dump, cls=ExtendedEncoder)
110 _json.dump = functools.partial(_json.dump, cls=ExtendedEncoder)
111
111
112 stdlibjson = _json
112 stdlibjson = _json
113 except ImportError:
113 except ImportError:
114 stdlibjson = None
114 stdlibjson = None
115
115
116 # set all available json modules
116 # set all available json modules
117 if simplejson:
117 if simplejson:
118 json = _sj
118 json = _sj
119 elif stdlibjson:
119 elif stdlibjson:
120 json = _json
120 json = _json
121 else:
121 else:
122 raise ImportError('Could not find any json modules') No newline at end of file
122 raise ImportError('Could not find any json modules')
@@ -1,25 +1,25 b''
1 <dl>
1 <dl>
2 <dt class="file_history">${_('History')}</dt>
2 <dt class="file_history">${_('History')}</dt>
3 <dd>
3 <dd>
4 <div>
4 <div>
5 <div style="float:left">
5 <div style="float:left">
6 ${h.form(h.url('files_diff_home',repo_name=c.repo_name,f_path=c.f_path),method='get')}
6 ${h.form(h.url('files_diff_home',repo_name=c.repo_name,f_path=c.f_path),method='get')}
7 ${h.hidden('diff2',c.file_changeset.raw_id)}
7 ${h.hidden('diff2',c.file_changeset.raw_id)}
8 ${h.select('diff1',c.file_changeset.raw_id,c.file_history)}
8 ${h.select('diff1',c.file_changeset.raw_id,c.file_history)}
9 ${h.submit('diff',_('diff to revision'),class_="ui-btn")}
9 ${h.submit('diff',_('diff to revision'),class_="ui-btn")}
10 ${h.submit('show_rev',_('show at revision'),class_="ui-btn")}
10 ${h.submit('show_rev',_('show at revision'),class_="ui-btn")}
11 ${h.hidden('annotate', c.annotate)}
11 ${h.hidden('annotate', c.annotate)}
12 ${h.end_form()}
12 ${h.end_form()}
13 </div>
13 </div>
14 <div class="file_author">
14 <div class="file_author">
15 <div class="item">${h.literal(ungettext(u'%s author',u'%s authors',len(c.authors)) % ('<b>%s</b>' % len(c.authors))) }</div>
15 <div class="item">${h.literal(ungettext(u'%s author',u'%s authors',len(c.authors)) % ('<b>%s</b>' % len(c.authors))) }</div>
16 %for email, user in c.authors:
16 %for email, user in c.authors:
17 <div class="contributor tooltip" style="float:left" title="${h.tooltip(user)}">
17 <div class="contributor tooltip" style="float:left" title="${h.tooltip(user)}">
18 <div class="gravatar" style="margin:1px"><img alt="gravatar" src="${h.gravatar_url(email, 20)}"/> </div>
18 <div class="gravatar" style="margin:1px"><img alt="gravatar" src="${h.gravatar_url(email, 20)}"/> </div>
19 </div>
19 </div>
20 %endfor
20 %endfor
21 </div>
21 </div>
22 </div>
22 </div>
23 <div style="clear:both"></div>
23 <div style="clear:both"></div>
24 </dd>
24 </dd>
25 </dl> No newline at end of file
25 </dl>
General Comments 0
You need to be logged in to leave comments. Login now