Show More
@@ -1,60 +1,143 | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | """ |
|
3 | 3 | rhodecode.controllers.admin.admin |
|
4 | 4 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
|
5 | 5 | |
|
6 | 6 | Controller for Admin panel of Rhodecode |
|
7 | 7 | |
|
8 | 8 | :created_on: Apr 7, 2010 |
|
9 | 9 | :author: marcink |
|
10 | 10 | :copyright: (C) 2010-2012 Marcin Kuzminski <marcin@python-works.com> |
|
11 | 11 | :license: GPLv3, see COPYING for more details. |
|
12 | 12 | """ |
|
13 | 13 | # This program is free software: you can redistribute it and/or modify |
|
14 | 14 | # it under the terms of the GNU General Public License as published by |
|
15 | 15 | # the Free Software Foundation, either version 3 of the License, or |
|
16 | 16 | # (at your option) any later version. |
|
17 | 17 | # |
|
18 | 18 | # This program is distributed in the hope that it will be useful, |
|
19 | 19 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
20 | 20 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
21 | 21 | # GNU General Public License for more details. |
|
22 | 22 | # |
|
23 | 23 | # You should have received a copy of the GNU General Public License |
|
24 | 24 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
25 | 25 | |
|
26 | 26 | import logging |
|
27 | 27 | |
|
28 | from pylons import request, tmpl_context as c | |
|
28 | from pylons import request, tmpl_context as c, url | |
|
29 | 29 | from sqlalchemy.orm import joinedload |
|
30 | 30 | from webhelpers.paginate import Page |
|
31 | from whoosh.qparser.default import QueryParser | |
|
32 | from whoosh import query | |
|
33 | from sqlalchemy.sql.expression import or_ | |
|
31 | 34 | |
|
32 | 35 | from rhodecode.lib.auth import LoginRequired, HasPermissionAllDecorator |
|
33 | 36 | from rhodecode.lib.base import BaseController, render |
|
34 | from rhodecode.model.db import UserLog | |
|
35 | from rhodecode.lib.utils2 import safe_int | |
|
37 | from rhodecode.model.db import UserLog, User | |
|
38 | from rhodecode.lib.utils2 import safe_int, remove_prefix | |
|
39 | from rhodecode.lib.indexers import JOURNAL_SCHEMA | |
|
40 | ||
|
36 | 41 | |
|
37 | 42 | log = logging.getLogger(__name__) |
|
38 | 43 | |
|
39 | 44 | |
|
45 | def _filter(user_log, search_term): | |
|
46 | """ | |
|
47 | Filters sqlalchemy user_log based on search_term with whoosh Query language | |
|
48 | http://packages.python.org/Whoosh/querylang.html | |
|
49 | ||
|
50 | :param user_log: | |
|
51 | :param search_term: | |
|
52 | """ | |
|
53 | qry = None | |
|
54 | if search_term: | |
|
55 | qp = QueryParser('repository', schema=JOURNAL_SCHEMA) | |
|
56 | qry = qp.parse(unicode(search_term)) | |
|
57 | log.debug('Filtering using query %r' % qry) | |
|
58 | ||
|
59 | def get_filterion(field, val, term): | |
|
60 | if field == 'repository': | |
|
61 | field = getattr(UserLog, 'repository_name') | |
|
62 | elif field == 'ip': | |
|
63 | field = getattr(UserLog, 'user_ip') | |
|
64 | elif field == 'date': | |
|
65 | field = getattr(UserLog, 'action_date') | |
|
66 | elif field == 'username': | |
|
67 | ##special case for username | |
|
68 | if isinstance(term, query.Wildcard): | |
|
69 | #only support wildcards with * at beggining | |
|
70 | val = remove_prefix(val, prefix='*') | |
|
71 | return getattr(UserLog, 'user_id').in_( | |
|
72 | [x.user_id for x in | |
|
73 | User.query().filter(User.username.endswith(val))]) | |
|
74 | elif isinstance(term, query.Prefix): | |
|
75 | return getattr(UserLog, 'user_id').in_( | |
|
76 | [x.user_id for x in | |
|
77 | User.query().filter(User.username.startswith(val))]) | |
|
78 | # term == exact match, case insensitive | |
|
79 | field = getattr(UserLog, 'user') | |
|
80 | val = User.get_by_username(val, case_insensitive=True) | |
|
81 | ||
|
82 | else: | |
|
83 | field = getattr(UserLog, field) | |
|
84 | ||
|
85 | #sql filtering | |
|
86 | if isinstance(term, query.Wildcard): | |
|
87 | return field.endsswith(val) | |
|
88 | elif isinstance(term, query.Prefix): | |
|
89 | return field.startswith(val) | |
|
90 | return field == val | |
|
91 | ||
|
92 | if isinstance(qry, (query.And, query.Term, query.Prefix, query.Wildcard)): | |
|
93 | if not isinstance(qry, query.And): | |
|
94 | qry = [qry] | |
|
95 | for term in qry: | |
|
96 | field = term.fieldname | |
|
97 | val = term.text | |
|
98 | user_log = user_log.filter(get_filterion(field, val, term)) | |
|
99 | elif isinstance(qry, query.Or): | |
|
100 | filters = [] | |
|
101 | for term in qry: | |
|
102 | field = term.fieldname | |
|
103 | val = term.text | |
|
104 | if isinstance(term, query.Term): | |
|
105 | filters.append(get_filterion(field, val, term)) | |
|
106 | user_log = user_log.filter(or_(*filters)) | |
|
107 | ||
|
108 | return user_log | |
|
109 | ||
|
110 | ||
|
40 | 111 | class AdminController(BaseController): |
|
41 | 112 | |
|
42 | 113 | @LoginRequired() |
|
43 | 114 | def __before__(self): |
|
44 | 115 | super(AdminController, self).__before__() |
|
45 | 116 | |
|
46 | 117 | @HasPermissionAllDecorator('hg.admin') |
|
47 | 118 | def index(self): |
|
48 | ||
|
49 | 119 | users_log = UserLog.query()\ |
|
50 | 120 | .options(joinedload(UserLog.user))\ |
|
51 |
.options(joinedload(UserLog.repository)) |
|
|
52 | .order_by(UserLog.action_date.desc()) | |
|
121 | .options(joinedload(UserLog.repository)) | |
|
122 | ||
|
123 | #FILTERING | |
|
124 | c.search_term = request.GET.get('filter') | |
|
125 | try: | |
|
126 | users_log = _filter(users_log, c.search_term) | |
|
127 | except: | |
|
128 | # we want this to crash for now | |
|
129 | raise | |
|
130 | ||
|
131 | users_log = users_log.order_by(UserLog.action_date.desc()) | |
|
53 | 132 | |
|
54 | 133 | p = safe_int(request.params.get('page', 1), 1) |
|
55 | c.users_log = Page(users_log, page=p, items_per_page=10) | |
|
134 | ||
|
135 | def url_generator(**kw): | |
|
136 | return url.current(filter=c.search_term, **kw) | |
|
137 | ||
|
138 | c.users_log = Page(users_log, page=p, items_per_page=10, url=url_generator) | |
|
56 | 139 | c.log_data = render('admin/admin_log.html') |
|
57 | 140 | |
|
58 | 141 | if request.environ.get('HTTP_X_PARTIAL_XHR'): |
|
59 | 142 | return c.log_data |
|
60 | 143 | return render('admin/admin.html') |
@@ -1,271 +1,280 | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | """ |
|
3 | 3 | rhodecode.lib.indexers.__init__ |
|
4 | 4 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
|
5 | 5 | |
|
6 | 6 | Whoosh indexing module for RhodeCode |
|
7 | 7 | |
|
8 | 8 | :created_on: Aug 17, 2010 |
|
9 | 9 | :author: marcink |
|
10 | 10 | :copyright: (C) 2010-2012 Marcin Kuzminski <marcin@python-works.com> |
|
11 | 11 | :license: GPLv3, see COPYING for more details. |
|
12 | 12 | """ |
|
13 | 13 | # This program is free software: you can redistribute it and/or modify |
|
14 | 14 | # it under the terms of the GNU General Public License as published by |
|
15 | 15 | # the Free Software Foundation, either version 3 of the License, or |
|
16 | 16 | # (at your option) any later version. |
|
17 | 17 | # |
|
18 | 18 | # This program is distributed in the hope that it will be useful, |
|
19 | 19 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
20 | 20 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
21 | 21 | # GNU General Public License for more details. |
|
22 | 22 | # |
|
23 | 23 | # You should have received a copy of the GNU General Public License |
|
24 | 24 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
25 | 25 | import os |
|
26 | 26 | import sys |
|
27 | 27 | import traceback |
|
28 | 28 | import logging |
|
29 | 29 | from os.path import dirname as dn, join as jn |
|
30 | 30 | |
|
31 | 31 | #to get the rhodecode import |
|
32 | 32 | sys.path.append(dn(dn(dn(os.path.realpath(__file__))))) |
|
33 | 33 | |
|
34 | 34 | from string import strip |
|
35 | 35 | from shutil import rmtree |
|
36 | 36 | |
|
37 | 37 | from whoosh.analysis import RegexTokenizer, LowercaseFilter, StopFilter |
|
38 | from whoosh.fields import TEXT, ID, STORED, NUMERIC, BOOLEAN, Schema, FieldType | |
|
38 | from whoosh.fields import TEXT, ID, STORED, NUMERIC, BOOLEAN, Schema, FieldType, DATETIME | |
|
39 | 39 | from whoosh.index import create_in, open_dir |
|
40 | 40 | from whoosh.formats import Characters |
|
41 | 41 | from whoosh.highlight import highlight, HtmlFormatter, ContextFragmenter |
|
42 | 42 | |
|
43 | 43 | from webhelpers.html.builder import escape, literal |
|
44 | 44 | from sqlalchemy import engine_from_config |
|
45 | 45 | |
|
46 | 46 | from rhodecode.model import init_model |
|
47 | 47 | from rhodecode.model.scm import ScmModel |
|
48 | 48 | from rhodecode.model.repo import RepoModel |
|
49 | 49 | from rhodecode.config.environment import load_environment |
|
50 | 50 | from rhodecode.lib.utils2 import LazyProperty |
|
51 | 51 | from rhodecode.lib.utils import BasePasterCommand, Command, add_cache,\ |
|
52 | 52 | load_rcextensions |
|
53 | 53 | |
|
54 | 54 | log = logging.getLogger(__name__) |
|
55 | 55 | |
|
56 | 56 | # CUSTOM ANALYZER wordsplit + lowercase filter |
|
57 | 57 | ANALYZER = RegexTokenizer(expression=r"\w+") | LowercaseFilter() |
|
58 | 58 | |
|
59 | 59 | #INDEX SCHEMA DEFINITION |
|
60 | 60 | SCHEMA = Schema( |
|
61 | 61 | fileid=ID(unique=True), |
|
62 | 62 | owner=TEXT(), |
|
63 | 63 | repository=TEXT(stored=True), |
|
64 | 64 | path=TEXT(stored=True), |
|
65 | 65 | content=FieldType(format=Characters(), analyzer=ANALYZER, |
|
66 | 66 | scorable=True, stored=True), |
|
67 | 67 | modtime=STORED(), |
|
68 | 68 | extension=TEXT(stored=True) |
|
69 | 69 | ) |
|
70 | 70 | |
|
71 | 71 | IDX_NAME = 'HG_INDEX' |
|
72 | 72 | FORMATTER = HtmlFormatter('span', between='\n<span class="break">...</span>\n') |
|
73 | 73 | FRAGMENTER = ContextFragmenter(200) |
|
74 | 74 | |
|
75 | 75 | CHGSETS_SCHEMA = Schema( |
|
76 | 76 | raw_id=ID(unique=True, stored=True), |
|
77 | 77 | date=NUMERIC(stored=True), |
|
78 | 78 | last=BOOLEAN(), |
|
79 | 79 | owner=TEXT(), |
|
80 | 80 | repository=ID(unique=True, stored=True), |
|
81 | 81 | author=TEXT(stored=True), |
|
82 | 82 | message=FieldType(format=Characters(), analyzer=ANALYZER, |
|
83 | 83 | scorable=True, stored=True), |
|
84 | 84 | parents=TEXT(), |
|
85 | 85 | added=TEXT(), |
|
86 | 86 | removed=TEXT(), |
|
87 | 87 | changed=TEXT(), |
|
88 | 88 | ) |
|
89 | 89 | |
|
90 | 90 | CHGSET_IDX_NAME = 'CHGSET_INDEX' |
|
91 | 91 | |
|
92 | # used only to generate queries in journal | |
|
93 | JOURNAL_SCHEMA = Schema( | |
|
94 | username=TEXT(), | |
|
95 | date=DATETIME(), | |
|
96 | action=TEXT(), | |
|
97 | repository=TEXT(), | |
|
98 | ip=TEXT(), | |
|
99 | ) | |
|
100 | ||
|
92 | 101 | |
|
93 | 102 | class MakeIndex(BasePasterCommand): |
|
94 | 103 | |
|
95 | 104 | max_args = 1 |
|
96 | 105 | min_args = 1 |
|
97 | 106 | |
|
98 | 107 | usage = "CONFIG_FILE" |
|
99 | 108 | summary = "Creates index for full text search given configuration file" |
|
100 | 109 | group_name = "RhodeCode" |
|
101 | 110 | takes_config_file = -1 |
|
102 | 111 | parser = Command.standard_parser(verbose=True) |
|
103 | 112 | |
|
104 | 113 | def command(self): |
|
105 | 114 | logging.config.fileConfig(self.path_to_ini_file) |
|
106 | 115 | from pylons import config |
|
107 | 116 | add_cache(config) |
|
108 | 117 | engine = engine_from_config(config, 'sqlalchemy.db1.') |
|
109 | 118 | init_model(engine) |
|
110 | 119 | index_location = config['index_dir'] |
|
111 | 120 | repo_location = self.options.repo_location \ |
|
112 | 121 | if self.options.repo_location else RepoModel().repos_path |
|
113 | 122 | repo_list = map(strip, self.options.repo_list.split(',')) \ |
|
114 | 123 | if self.options.repo_list else None |
|
115 | 124 | repo_update_list = map(strip, self.options.repo_update_list.split(',')) \ |
|
116 | 125 | if self.options.repo_update_list else None |
|
117 | 126 | load_rcextensions(config['here']) |
|
118 | 127 | #====================================================================== |
|
119 | 128 | # WHOOSH DAEMON |
|
120 | 129 | #====================================================================== |
|
121 | 130 | from rhodecode.lib.pidlock import LockHeld, DaemonLock |
|
122 | 131 | from rhodecode.lib.indexers.daemon import WhooshIndexingDaemon |
|
123 | 132 | try: |
|
124 | 133 | l = DaemonLock(file_=jn(dn(dn(index_location)), 'make_index.lock')) |
|
125 | 134 | WhooshIndexingDaemon(index_location=index_location, |
|
126 | 135 | repo_location=repo_location, |
|
127 | 136 | repo_list=repo_list, |
|
128 | 137 | repo_update_list=repo_update_list)\ |
|
129 | 138 | .run(full_index=self.options.full_index) |
|
130 | 139 | l.release() |
|
131 | 140 | except LockHeld: |
|
132 | 141 | sys.exit(1) |
|
133 | 142 | |
|
134 | 143 | def update_parser(self): |
|
135 | 144 | self.parser.add_option('--repo-location', |
|
136 | 145 | action='store', |
|
137 | 146 | dest='repo_location', |
|
138 | 147 | help="Specifies repositories location to index OPTIONAL", |
|
139 | 148 | ) |
|
140 | 149 | self.parser.add_option('--index-only', |
|
141 | 150 | action='store', |
|
142 | 151 | dest='repo_list', |
|
143 | 152 | help="Specifies a comma separated list of repositores " |
|
144 | 153 | "to build index on. If not given all repositories " |
|
145 | 154 | "are scanned for indexing. OPTIONAL", |
|
146 | 155 | ) |
|
147 | 156 | self.parser.add_option('--update-only', |
|
148 | 157 | action='store', |
|
149 | 158 | dest='repo_update_list', |
|
150 | 159 | help="Specifies a comma separated list of repositores " |
|
151 | 160 | "to re-build index on. OPTIONAL", |
|
152 | 161 | ) |
|
153 | 162 | self.parser.add_option('-f', |
|
154 | 163 | action='store_true', |
|
155 | 164 | dest='full_index', |
|
156 | 165 | help="Specifies that index should be made full i.e" |
|
157 | 166 | " destroy old and build from scratch", |
|
158 | 167 | default=False) |
|
159 | 168 | |
|
160 | 169 | |
|
161 | 170 | class WhooshResultWrapper(object): |
|
162 | 171 | def __init__(self, search_type, searcher, matcher, highlight_items, |
|
163 | 172 | repo_location): |
|
164 | 173 | self.search_type = search_type |
|
165 | 174 | self.searcher = searcher |
|
166 | 175 | self.matcher = matcher |
|
167 | 176 | self.highlight_items = highlight_items |
|
168 | 177 | self.fragment_size = 200 |
|
169 | 178 | self.repo_location = repo_location |
|
170 | 179 | |
|
171 | 180 | @LazyProperty |
|
172 | 181 | def doc_ids(self): |
|
173 | 182 | docs_id = [] |
|
174 | 183 | while self.matcher.is_active(): |
|
175 | 184 | docnum = self.matcher.id() |
|
176 | 185 | chunks = [offsets for offsets in self.get_chunks()] |
|
177 | 186 | docs_id.append([docnum, chunks]) |
|
178 | 187 | self.matcher.next() |
|
179 | 188 | return docs_id |
|
180 | 189 | |
|
181 | 190 | def __str__(self): |
|
182 | 191 | return '<%s at %s>' % (self.__class__.__name__, len(self.doc_ids)) |
|
183 | 192 | |
|
184 | 193 | def __repr__(self): |
|
185 | 194 | return self.__str__() |
|
186 | 195 | |
|
187 | 196 | def __len__(self): |
|
188 | 197 | return len(self.doc_ids) |
|
189 | 198 | |
|
190 | 199 | def __iter__(self): |
|
191 | 200 | """ |
|
192 | 201 | Allows Iteration over results,and lazy generate content |
|
193 | 202 | |
|
194 | 203 | *Requires* implementation of ``__getitem__`` method. |
|
195 | 204 | """ |
|
196 | 205 | for docid in self.doc_ids: |
|
197 | 206 | yield self.get_full_content(docid) |
|
198 | 207 | |
|
199 | 208 | def __getitem__(self, key): |
|
200 | 209 | """ |
|
201 | 210 | Slicing of resultWrapper |
|
202 | 211 | """ |
|
203 | 212 | i, j = key.start, key.stop |
|
204 | 213 | |
|
205 | 214 | slices = [] |
|
206 | 215 | for docid in self.doc_ids[i:j]: |
|
207 | 216 | slices.append(self.get_full_content(docid)) |
|
208 | 217 | return slices |
|
209 | 218 | |
|
210 | 219 | def get_full_content(self, docid): |
|
211 | 220 | res = self.searcher.stored_fields(docid[0]) |
|
212 | 221 | log.debug('result: %s' % res) |
|
213 | 222 | if self.search_type == 'content': |
|
214 | 223 | full_repo_path = jn(self.repo_location, res['repository']) |
|
215 | 224 | f_path = res['path'].split(full_repo_path)[-1] |
|
216 | 225 | f_path = f_path.lstrip(os.sep) |
|
217 | 226 | content_short = self.get_short_content(res, docid[1]) |
|
218 | 227 | res.update({'content_short': content_short, |
|
219 | 228 | 'content_short_hl': self.highlight(content_short), |
|
220 | 229 | 'f_path': f_path |
|
221 | 230 | }) |
|
222 | 231 | elif self.search_type == 'path': |
|
223 | 232 | full_repo_path = jn(self.repo_location, res['repository']) |
|
224 | 233 | f_path = res['path'].split(full_repo_path)[-1] |
|
225 | 234 | f_path = f_path.lstrip(os.sep) |
|
226 | 235 | res.update({'f_path': f_path}) |
|
227 | 236 | elif self.search_type == 'message': |
|
228 | 237 | res.update({'message_hl': self.highlight(res['message'])}) |
|
229 | 238 | |
|
230 | 239 | log.debug('result: %s' % res) |
|
231 | 240 | |
|
232 | 241 | return res |
|
233 | 242 | |
|
234 | 243 | def get_short_content(self, res, chunks): |
|
235 | 244 | |
|
236 | 245 | return ''.join([res['content'][chunk[0]:chunk[1]] for chunk in chunks]) |
|
237 | 246 | |
|
238 | 247 | def get_chunks(self): |
|
239 | 248 | """ |
|
240 | 249 | Smart function that implements chunking the content |
|
241 | 250 | but not overlap chunks so it doesn't highlight the same |
|
242 | 251 | close occurrences twice. |
|
243 | 252 | |
|
244 | 253 | :param matcher: |
|
245 | 254 | :param size: |
|
246 | 255 | """ |
|
247 | 256 | memory = [(0, 0)] |
|
248 | 257 | if self.matcher.supports('positions'): |
|
249 | 258 | for span in self.matcher.spans(): |
|
250 | 259 | start = span.startchar or 0 |
|
251 | 260 | end = span.endchar or 0 |
|
252 | 261 | start_offseted = max(0, start - self.fragment_size) |
|
253 | 262 | end_offseted = end + self.fragment_size |
|
254 | 263 | |
|
255 | 264 | if start_offseted < memory[-1][1]: |
|
256 | 265 | start_offseted = memory[-1][1] |
|
257 | 266 | memory.append((start_offseted, end_offseted,)) |
|
258 | 267 | yield (start_offseted, end_offseted,) |
|
259 | 268 | |
|
260 | 269 | def highlight(self, content, top=5): |
|
261 | 270 | if self.search_type not in ['content', 'message']: |
|
262 | 271 | return '' |
|
263 | 272 | hl = highlight( |
|
264 | 273 | text=content, |
|
265 | 274 | terms=self.highlight_items, |
|
266 | 275 | analyzer=ANALYZER, |
|
267 | 276 | fragmenter=FRAGMENTER, |
|
268 | 277 | formatter=FORMATTER, |
|
269 | 278 | top=top |
|
270 | 279 | ) |
|
271 | 280 | return hl |
@@ -1,28 +1,45 | |||
|
1 | 1 | ## -*- coding: utf-8 -*- |
|
2 | 2 | <%inherit file="/base/base.html"/> |
|
3 | 3 | |
|
4 | 4 | <%def name="title()"> |
|
5 | 5 | ${_('Admin journal')} - ${c.rhodecode_name} |
|
6 | 6 | </%def> |
|
7 | 7 | |
|
8 | 8 | <%def name="breadcrumbs_links()"> |
|
9 | <form id="filter_form"> | |
|
10 | <input class="q_filter_box" id="q_filter" size="15" type="text" name="filter" value="${c.search_term or _('quick filter...')}"/> | |
|
11 | <input type='submit' value="${_('filter')}" class="ui-btn"/> | |
|
9 | 12 | ${_('Admin journal')} |
|
13 | </form> | |
|
14 | ${h.end_form()} | |
|
10 | 15 | </%def> |
|
11 | 16 | |
|
12 | 17 | <%def name="page_nav()"> |
|
13 | 18 | ${self.menu('admin')} |
|
14 | 19 | </%def> |
|
15 | 20 | <%def name="main()"> |
|
16 | 21 | <div class="box"> |
|
17 | 22 | <!-- box / title --> |
|
18 | 23 | <div class="title"> |
|
19 | 24 | ${self.breadcrumbs()} |
|
20 | 25 | </div> |
|
21 | 26 | <!-- end box / title --> |
|
22 | 27 | <div class="table"> |
|
23 | 28 | <div id="user_log"> |
|
24 | 29 | ${c.log_data} |
|
25 | 30 | </div> |
|
26 | 31 | </div> |
|
27 | 32 | </div> |
|
33 | ||
|
34 | <script> | |
|
35 | YUE.on('q_filter','click',function(){ | |
|
36 | YUD.get('q_filter').value = ''; | |
|
37 | }); | |
|
38 | YUE.on('filter_form','submit',function(e){ | |
|
39 | YUE.preventDefault(e) | |
|
40 | var val = YUD.get('q_filter').value; | |
|
41 | window.location = "${url.current(filter='__FILTER__')}".replace('__FILTER__',val); | |
|
42 | }); | |
|
43 | </script> | |
|
28 | 44 | </%def> |
|
45 |
General Comments 0
You need to be logged in to leave comments.
Login now