Show More
@@ -28,12 +28,14 b' import logging' | |||||
28 | from pylons import url, response, tmpl_context as c |
|
28 | from pylons import url, response, tmpl_context as c | |
29 | from pylons.i18n.translation import _ |
|
29 | from pylons.i18n.translation import _ | |
30 |
|
30 | |||
|
31 | from beaker.cache import cache_region, region_invalidate | |||
31 | from webhelpers.feedgenerator import Atom1Feed, Rss201rev2Feed |
|
32 | from webhelpers.feedgenerator import Atom1Feed, Rss201rev2Feed | |
32 |
|
33 | |||
33 | from rhodecode.lib import helpers as h |
|
34 | from rhodecode.lib import helpers as h | |
34 | from rhodecode.lib.auth import LoginRequired, HasRepoPermissionAnyDecorator |
|
35 | from rhodecode.lib.auth import LoginRequired, HasRepoPermissionAnyDecorator | |
35 | from rhodecode.lib.base import BaseRepoController |
|
36 | from rhodecode.lib.base import BaseRepoController | |
36 | from rhodecode.lib.diffs import DiffProcessor |
|
37 | from rhodecode.lib.diffs import DiffProcessor, LimitedDiffContainer | |
|
38 | from rhodecode.model.db import CacheInvalidation | |||
37 |
|
39 | |||
38 | log = logging.getLogger(__name__) |
|
40 | log = logging.getLogger(__name__) | |
39 |
|
41 | |||
@@ -51,6 +53,9 b' class FeedController(BaseRepoController)' | |||||
51 | self.language = 'en-us' |
|
53 | self.language = 'en-us' | |
52 | self.ttl = "5" |
|
54 | self.ttl = "5" | |
53 | self.feed_nr = 20 |
|
55 | self.feed_nr = 20 | |
|
56 | # we need to protect from parsing huge diffs here other way | |||
|
57 | # we can kill the server, 32*1024 chars is a reasonable limit | |||
|
58 | self.feed_diff_limit = 32 * 1024 | |||
54 |
|
59 | |||
55 | def _get_title(self, cs): |
|
60 | def _get_title(self, cs): | |
56 | return "%s" % ( |
|
61 | return "%s" % ( | |
@@ -59,26 +64,28 b' class FeedController(BaseRepoController)' | |||||
59 |
|
64 | |||
60 | def __changes(self, cs): |
|
65 | def __changes(self, cs): | |
61 | changes = [] |
|
66 | changes = [] | |
62 |
|
|
67 | diff_processor = DiffProcessor(cs.diff(), | |
63 | # we need to protect from parsing huge diffs here other way |
|
68 | diff_limit=self.feed_diff_limit) | |
64 | # we can kill the server, 32*1024 chars is a reasonable limit |
|
69 | _parsed = diff_processor.prepare(inline_diff=False) | |
65 | HUGE_DIFF = 32 * 1024 |
|
70 | limited_diff = False | |
66 | if len(_diff) > HUGE_DIFF: |
|
71 | if isinstance(_parsed, LimitedDiffContainer): | |
67 | changes = ['\n ' + _('Changeset was too big and was cut off...')] |
|
72 | limited_diff = True | |
68 | return changes |
|
73 | ||
69 | diffprocessor = DiffProcessor(_diff) |
|
74 | for st in _parsed: | |
70 | stats = diffprocessor.prepare(inline_diff=False) |
|
|||
71 | for st in stats: |
|
|||
72 | st.update({'added': st['stats'][0], |
|
75 | st.update({'added': st['stats'][0], | |
73 | 'removed': st['stats'][1]}) |
|
76 | 'removed': st['stats'][1]}) | |
74 | changes.append('\n %(operation)s %(filename)s ' |
|
77 | changes.append('\n %(operation)s %(filename)s ' | |
75 | '(%(added)s lines added, %(removed)s lines removed)' |
|
78 | '(%(added)s lines added, %(removed)s lines removed)' | |
76 | % st) |
|
79 | % st) | |
|
80 | if limited_diff: | |||
|
81 | changes = changes + ['\n ' + | |||
|
82 | _('Changeset was too big and was cut off...')] | |||
77 | return changes |
|
83 | return changes | |
78 |
|
84 | |||
79 | def __get_desc(self, cs): |
|
85 | def __get_desc(self, cs): | |
80 | desc_msg = [] |
|
86 | desc_msg = [] | |
81 |
desc_msg.append('%s %s %s |
|
87 | desc_msg.append('%s %s %s<br/>' % (h.person(cs.author), | |
|
88 | _('commited on'), | |||
82 | h.fmt_date(cs.date))) |
|
89 | h.fmt_date(cs.date))) | |
83 | #branches, tags, bookmarks |
|
90 | #branches, tags, bookmarks | |
84 | if cs.branch: |
|
91 | if cs.branch: | |
@@ -102,46 +109,67 b' class FeedController(BaseRepoController)' | |||||
102 |
|
109 | |||
103 | def atom(self, repo_name): |
|
110 | def atom(self, repo_name): | |
104 | """Produce an atom-1.0 feed via feedgenerator module""" |
|
111 | """Produce an atom-1.0 feed via feedgenerator module""" | |
105 | feed = Atom1Feed( |
|
112 | ||
106 | title=self.title % repo_name, |
|
113 | @cache_region('long_term') | |
107 | link=url('summary_home', repo_name=repo_name, |
|
114 | def _get_feed_from_cache(key): | |
108 | qualified=True), |
|
115 | feed = Atom1Feed( | |
109 |
|
|
116 | title=self.title % repo_name, | |
110 | language=self.language, |
|
117 | link=url('summary_home', repo_name=repo_name, | |
111 | ttl=self.ttl |
|
118 | qualified=True), | |
112 | ) |
|
119 | description=self.description % repo_name, | |
|
120 | language=self.language, | |||
|
121 | ttl=self.ttl | |||
|
122 | ) | |||
113 |
|
123 | |||
114 | for cs in reversed(list(c.rhodecode_repo[-self.feed_nr:])): |
|
124 | for cs in reversed(list(c.rhodecode_repo[-self.feed_nr:])): | |
115 | feed.add_item(title=self._get_title(cs), |
|
125 | feed.add_item(title=self._get_title(cs), | |
116 | link=url('changeset_home', repo_name=repo_name, |
|
126 | link=url('changeset_home', repo_name=repo_name, | |
117 | revision=cs.raw_id, qualified=True), |
|
127 | revision=cs.raw_id, qualified=True), | |
118 | author_name=cs.author, |
|
128 | author_name=cs.author, | |
119 | description=''.join(self.__get_desc(cs)), |
|
129 | description=''.join(self.__get_desc(cs)), | |
120 | pubdate=cs.date, |
|
130 | pubdate=cs.date, | |
121 | ) |
|
131 | ) | |
122 |
|
132 | |||
123 | response.content_type = feed.mime_type |
|
133 | response.content_type = feed.mime_type | |
124 | return feed.writeString('utf-8') |
|
134 | return feed.writeString('utf-8') | |
|
135 | ||||
|
136 | key = repo_name + '_ATOM' | |||
|
137 | inv = CacheInvalidation.invalidate(key) | |||
|
138 | if inv is not None: | |||
|
139 | region_invalidate(_get_feed_from_cache, None, key) | |||
|
140 | CacheInvalidation.set_valid(inv.cache_key) | |||
|
141 | return _get_feed_from_cache(key) | |||
125 |
|
142 | |||
126 | def rss(self, repo_name): |
|
143 | def rss(self, repo_name): | |
127 | """Produce an rss2 feed via feedgenerator module""" |
|
144 | """Produce an rss2 feed via feedgenerator module""" | |
128 | feed = Rss201rev2Feed( |
|
145 | ||
129 | title=self.title % repo_name, |
|
146 | @cache_region('long_term') | |
130 | link=url('summary_home', repo_name=repo_name, |
|
147 | def _get_feed_from_cache(key): | |
131 | qualified=True), |
|
148 | feed = Rss201rev2Feed( | |
132 |
|
|
149 | title=self.title % repo_name, | |
133 | language=self.language, |
|
150 | link=url('summary_home', repo_name=repo_name, | |
134 | ttl=self.ttl |
|
151 | qualified=True), | |
135 | ) |
|
152 | description=self.description % repo_name, | |
|
153 | language=self.language, | |||
|
154 | ttl=self.ttl | |||
|
155 | ) | |||
136 |
|
156 | |||
137 | for cs in reversed(list(c.rhodecode_repo[-self.feed_nr:])): |
|
157 | for cs in reversed(list(c.rhodecode_repo[-self.feed_nr:])): | |
138 | feed.add_item(title=self._get_title(cs), |
|
158 | feed.add_item(title=self._get_title(cs), | |
139 | link=url('changeset_home', repo_name=repo_name, |
|
159 | link=url('changeset_home', repo_name=repo_name, | |
140 | revision=cs.raw_id, qualified=True), |
|
160 | revision=cs.raw_id, qualified=True), | |
141 | author_name=cs.author, |
|
161 | author_name=cs.author, | |
142 | description=''.join(self.__get_desc(cs)), |
|
162 | description=''.join(self.__get_desc(cs)), | |
143 | pubdate=cs.date, |
|
163 | pubdate=cs.date, | |
144 | ) |
|
164 | ) | |
145 |
|
165 | |||
146 | response.content_type = feed.mime_type |
|
166 | response.content_type = feed.mime_type | |
147 | return feed.writeString('utf-8') |
|
167 | return feed.writeString('utf-8') | |
|
168 | ||||
|
169 | key = repo_name + '_RSS' | |||
|
170 | inv = CacheInvalidation.invalidate(key) | |||
|
171 | if inv is not None: | |||
|
172 | region_invalidate(_get_feed_from_cache, None, key) | |||
|
173 | CacheInvalidation.set_valid(inv.cache_key) | |||
|
174 | return _get_feed_from_cache(key) | |||
|
175 |
@@ -279,6 +279,18 b' def safe_str(unicode_, to_encoding=None)' | |||||
279 | return safe_str |
|
279 | return safe_str | |
280 |
|
280 | |||
281 |
|
281 | |||
|
282 | def remove_suffix(s, suffix): | |||
|
283 | if s.endswith(suffix): | |||
|
284 | s = s[:-1 * len(suffix)] | |||
|
285 | return s | |||
|
286 | ||||
|
287 | ||||
|
288 | def remove_prefix(s, prefix): | |||
|
289 | if s.startswith(prefix): | |||
|
290 | s = s[:-1 * len(prefix)] | |||
|
291 | return s | |||
|
292 | ||||
|
293 | ||||
282 | def engine_from_config(configuration, prefix='sqlalchemy.', **kwargs): |
|
294 | def engine_from_config(configuration, prefix='sqlalchemy.', **kwargs): | |
283 | """ |
|
295 | """ | |
284 | Custom engine_from_config functions that makes sure we use NullPool for |
|
296 | Custom engine_from_config functions that makes sure we use NullPool for |
@@ -46,7 +46,7 b' from rhodecode.lib.vcs.exceptions import' | |||||
46 | from rhodecode.lib.vcs.utils.lazy import LazyProperty |
|
46 | from rhodecode.lib.vcs.utils.lazy import LazyProperty | |
47 |
|
47 | |||
48 | from rhodecode.lib.utils2 import str2bool, safe_str, get_changeset_safe, \ |
|
48 | from rhodecode.lib.utils2 import str2bool, safe_str, get_changeset_safe, \ | |
49 | safe_unicode |
|
49 | safe_unicode, remove_suffix | |
50 | from rhodecode.lib.compat import json |
|
50 | from rhodecode.lib.compat import json | |
51 | from rhodecode.lib.caching_query import FromCache |
|
51 | from rhodecode.lib.caching_query import FromCache | |
52 |
|
52 | |||
@@ -941,6 +941,7 b' class Repository(Base, BaseModel):' | |||||
941 |
|
941 | |||
942 | @LazyProperty |
|
942 | @LazyProperty | |
943 | def scm_instance(self): |
|
943 | def scm_instance(self): | |
|
944 | return self.scm_instance_cached() | |||
944 | return self.__get_instance() |
|
945 | return self.__get_instance() | |
945 |
|
946 | |||
946 | def scm_instance_cached(self, cache_map=None): |
|
947 | def scm_instance_cached(self, cache_map=None): | |
@@ -1440,7 +1441,11 b' class CacheInvalidation(Base, BaseModel)' | |||||
1440 | iid = rhodecode.CONFIG.get('instance_id') |
|
1441 | iid = rhodecode.CONFIG.get('instance_id') | |
1441 | if iid: |
|
1442 | if iid: | |
1442 | prefix = iid |
|
1443 | prefix = iid | |
1443 | return "%s%s" % (prefix, key), prefix, key.rstrip('_README') |
|
1444 | #remove specific suffixes like _README or _RSS | |
|
1445 | key = remove_suffix(key, '_README') | |||
|
1446 | key = remove_suffix(key, '_RSS') | |||
|
1447 | key = remove_suffix(key, '_ATOM') | |||
|
1448 | return "%s%s" % (prefix, key), prefix, key | |||
1444 |
|
1449 | |||
1445 | @classmethod |
|
1450 | @classmethod | |
1446 | def get_by_key(cls, key): |
|
1451 | def get_by_key(cls, key): |
General Comments 0
You need to be logged in to leave comments.
Login now