##// END OF EJS Templates
when indexing changesets use the raw_id to locate the point from...
Indra Talip -
r2643:2ad50c44 beta
parent child Browse files
Show More
@@ -1,265 +1,264 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2 """
2 """
3 rhodecode.lib.indexers.__init__
3 rhodecode.lib.indexers.__init__
4 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
4 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
5
5
6 Whoosh indexing module for RhodeCode
6 Whoosh indexing module for RhodeCode
7
7
8 :created_on: Aug 17, 2010
8 :created_on: Aug 17, 2010
9 :author: marcink
9 :author: marcink
10 :copyright: (C) 2010-2012 Marcin Kuzminski <marcin@python-works.com>
10 :copyright: (C) 2010-2012 Marcin Kuzminski <marcin@python-works.com>
11 :license: GPLv3, see COPYING for more details.
11 :license: GPLv3, see COPYING for more details.
12 """
12 """
13 # This program is free software: you can redistribute it and/or modify
13 # This program is free software: you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation, either version 3 of the License, or
15 # the Free Software Foundation, either version 3 of the License, or
16 # (at your option) any later version.
16 # (at your option) any later version.
17 #
17 #
18 # This program is distributed in the hope that it will be useful,
18 # This program is distributed in the hope that it will be useful,
19 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # but WITHOUT ANY WARRANTY; without even the implied warranty of
20 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 # GNU General Public License for more details.
21 # GNU General Public License for more details.
22 #
22 #
23 # You should have received a copy of the GNU General Public License
23 # You should have received a copy of the GNU General Public License
24 # along with this program. If not, see <http://www.gnu.org/licenses/>.
24 # along with this program. If not, see <http://www.gnu.org/licenses/>.
25 import os
25 import os
26 import sys
26 import sys
27 import traceback
27 import traceback
28 import logging
28 import logging
29 from os.path import dirname as dn, join as jn
29 from os.path import dirname as dn, join as jn
30
30
31 #to get the rhodecode import
31 #to get the rhodecode import
32 sys.path.append(dn(dn(dn(os.path.realpath(__file__)))))
32 sys.path.append(dn(dn(dn(os.path.realpath(__file__)))))
33
33
34 from string import strip
34 from string import strip
35 from shutil import rmtree
35 from shutil import rmtree
36
36
37 from whoosh.analysis import RegexTokenizer, LowercaseFilter, StopFilter
37 from whoosh.analysis import RegexTokenizer, LowercaseFilter, StopFilter
38 from whoosh.fields import TEXT, ID, STORED, NUMERIC, BOOLEAN, Schema, FieldType
38 from whoosh.fields import TEXT, ID, STORED, NUMERIC, BOOLEAN, Schema, FieldType
39 from whoosh.index import create_in, open_dir
39 from whoosh.index import create_in, open_dir
40 from whoosh.formats import Characters
40 from whoosh.formats import Characters
41 from whoosh.highlight import highlight, HtmlFormatter, ContextFragmenter
41 from whoosh.highlight import highlight, HtmlFormatter, ContextFragmenter
42
42
43 from webhelpers.html.builder import escape, literal
43 from webhelpers.html.builder import escape, literal
44 from sqlalchemy import engine_from_config
44 from sqlalchemy import engine_from_config
45
45
46 from rhodecode.model import init_model
46 from rhodecode.model import init_model
47 from rhodecode.model.scm import ScmModel
47 from rhodecode.model.scm import ScmModel
48 from rhodecode.model.repo import RepoModel
48 from rhodecode.model.repo import RepoModel
49 from rhodecode.config.environment import load_environment
49 from rhodecode.config.environment import load_environment
50 from rhodecode.lib.utils2 import LazyProperty
50 from rhodecode.lib.utils2 import LazyProperty
51 from rhodecode.lib.utils import BasePasterCommand, Command, add_cache,\
51 from rhodecode.lib.utils import BasePasterCommand, Command, add_cache,\
52 load_rcextensions
52 load_rcextensions
53
53
54 log = logging.getLogger(__name__)
54 log = logging.getLogger(__name__)
55
55
56 # CUSTOM ANALYZER wordsplit + lowercase filter
56 # CUSTOM ANALYZER wordsplit + lowercase filter
57 ANALYZER = RegexTokenizer(expression=r"\w+") | LowercaseFilter()
57 ANALYZER = RegexTokenizer(expression=r"\w+") | LowercaseFilter()
58
58
59 #INDEX SCHEMA DEFINITION
59 #INDEX SCHEMA DEFINITION
60 SCHEMA = Schema(
60 SCHEMA = Schema(
61 fileid=ID(unique=True),
61 fileid=ID(unique=True),
62 owner=TEXT(),
62 owner=TEXT(),
63 repository=TEXT(stored=True),
63 repository=TEXT(stored=True),
64 path=TEXT(stored=True),
64 path=TEXT(stored=True),
65 content=FieldType(format=Characters(), analyzer=ANALYZER,
65 content=FieldType(format=Characters(), analyzer=ANALYZER,
66 scorable=True, stored=True),
66 scorable=True, stored=True),
67 modtime=STORED(),
67 modtime=STORED(),
68 extension=TEXT(stored=True)
68 extension=TEXT(stored=True)
69 )
69 )
70
70
71 IDX_NAME = 'HG_INDEX'
71 IDX_NAME = 'HG_INDEX'
72 FORMATTER = HtmlFormatter('span', between='\n<span class="break">...</span>\n')
72 FORMATTER = HtmlFormatter('span', between='\n<span class="break">...</span>\n')
73 FRAGMENTER = ContextFragmenter(200)
73 FRAGMENTER = ContextFragmenter(200)
74
74
75 CHGSETS_SCHEMA = Schema(
75 CHGSETS_SCHEMA = Schema(
76 raw_id=ID(unique=True, stored=True),
76 raw_id=ID(unique=True, stored=True),
77 revision=NUMERIC(unique=True, stored=True),
78 last=BOOLEAN(),
77 last=BOOLEAN(),
79 owner=TEXT(),
78 owner=TEXT(),
80 repository=ID(unique=True, stored=True),
79 repository=ID(unique=True, stored=True),
81 author=TEXT(stored=True),
80 author=TEXT(stored=True),
82 message=FieldType(format=Characters(), analyzer=ANALYZER,
81 message=FieldType(format=Characters(), analyzer=ANALYZER,
83 scorable=True, stored=True),
82 scorable=True, stored=True),
84 parents=TEXT(),
83 parents=TEXT(),
85 added=TEXT(),
84 added=TEXT(),
86 removed=TEXT(),
85 removed=TEXT(),
87 changed=TEXT(),
86 changed=TEXT(),
88 )
87 )
89
88
90 CHGSET_IDX_NAME = 'CHGSET_INDEX'
89 CHGSET_IDX_NAME = 'CHGSET_INDEX'
91
90
92 class MakeIndex(BasePasterCommand):
91 class MakeIndex(BasePasterCommand):
93
92
94 max_args = 1
93 max_args = 1
95 min_args = 1
94 min_args = 1
96
95
97 usage = "CONFIG_FILE"
96 usage = "CONFIG_FILE"
98 summary = "Creates index for full text search given configuration file"
97 summary = "Creates index for full text search given configuration file"
99 group_name = "RhodeCode"
98 group_name = "RhodeCode"
100 takes_config_file = -1
99 takes_config_file = -1
101 parser = Command.standard_parser(verbose=True)
100 parser = Command.standard_parser(verbose=True)
102
101
103 def command(self):
102 def command(self):
104 logging.config.fileConfig(self.path_to_ini_file)
103 logging.config.fileConfig(self.path_to_ini_file)
105 from pylons import config
104 from pylons import config
106 add_cache(config)
105 add_cache(config)
107 engine = engine_from_config(config, 'sqlalchemy.db1.')
106 engine = engine_from_config(config, 'sqlalchemy.db1.')
108 init_model(engine)
107 init_model(engine)
109 index_location = config['index_dir']
108 index_location = config['index_dir']
110 repo_location = self.options.repo_location \
109 repo_location = self.options.repo_location \
111 if self.options.repo_location else RepoModel().repos_path
110 if self.options.repo_location else RepoModel().repos_path
112 repo_list = map(strip, self.options.repo_list.split(',')) \
111 repo_list = map(strip, self.options.repo_list.split(',')) \
113 if self.options.repo_list else None
112 if self.options.repo_list else None
114 repo_update_list = map(strip, self.options.repo_update_list.split(',')) \
113 repo_update_list = map(strip, self.options.repo_update_list.split(',')) \
115 if self.options.repo_update_list else None
114 if self.options.repo_update_list else None
116 load_rcextensions(config['here'])
115 load_rcextensions(config['here'])
117 #======================================================================
116 #======================================================================
118 # WHOOSH DAEMON
117 # WHOOSH DAEMON
119 #======================================================================
118 #======================================================================
120 from rhodecode.lib.pidlock import LockHeld, DaemonLock
119 from rhodecode.lib.pidlock import LockHeld, DaemonLock
121 from rhodecode.lib.indexers.daemon import WhooshIndexingDaemon
120 from rhodecode.lib.indexers.daemon import WhooshIndexingDaemon
122 try:
121 try:
123 l = DaemonLock(file_=jn(dn(dn(index_location)), 'make_index.lock'))
122 l = DaemonLock(file_=jn(dn(dn(index_location)), 'make_index.lock'))
124 WhooshIndexingDaemon(index_location=index_location,
123 WhooshIndexingDaemon(index_location=index_location,
125 repo_location=repo_location,
124 repo_location=repo_location,
126 repo_list=repo_list,
125 repo_list=repo_list,
127 repo_update_list=repo_update_list)\
126 repo_update_list=repo_update_list)\
128 .run(full_index=self.options.full_index)
127 .run(full_index=self.options.full_index)
129 l.release()
128 l.release()
130 except LockHeld:
129 except LockHeld:
131 sys.exit(1)
130 sys.exit(1)
132
131
133 def update_parser(self):
132 def update_parser(self):
134 self.parser.add_option('--repo-location',
133 self.parser.add_option('--repo-location',
135 action='store',
134 action='store',
136 dest='repo_location',
135 dest='repo_location',
137 help="Specifies repositories location to index OPTIONAL",
136 help="Specifies repositories location to index OPTIONAL",
138 )
137 )
139 self.parser.add_option('--index-only',
138 self.parser.add_option('--index-only',
140 action='store',
139 action='store',
141 dest='repo_list',
140 dest='repo_list',
142 help="Specifies a comma separated list of repositores "
141 help="Specifies a comma separated list of repositores "
143 "to build index on. If not given all repositories "
142 "to build index on. If not given all repositories "
144 "are scanned for indexing. OPTIONAL",
143 "are scanned for indexing. OPTIONAL",
145 )
144 )
146 self.parser.add_option('--update-only',
145 self.parser.add_option('--update-only',
147 action='store',
146 action='store',
148 dest='repo_update_list',
147 dest='repo_update_list',
149 help="Specifies a comma separated list of repositores "
148 help="Specifies a comma separated list of repositores "
150 "to re-build index on. OPTIONAL",
149 "to re-build index on. OPTIONAL",
151 )
150 )
152 self.parser.add_option('-f',
151 self.parser.add_option('-f',
153 action='store_true',
152 action='store_true',
154 dest='full_index',
153 dest='full_index',
155 help="Specifies that index should be made full i.e"
154 help="Specifies that index should be made full i.e"
156 " destroy old and build from scratch",
155 " destroy old and build from scratch",
157 default=False)
156 default=False)
158
157
159
158
160 class WhooshResultWrapper(object):
159 class WhooshResultWrapper(object):
161 def __init__(self, search_type, searcher, matcher, highlight_items,
160 def __init__(self, search_type, searcher, matcher, highlight_items,
162 repo_location):
161 repo_location):
163 self.search_type = search_type
162 self.search_type = search_type
164 self.searcher = searcher
163 self.searcher = searcher
165 self.matcher = matcher
164 self.matcher = matcher
166 self.highlight_items = highlight_items
165 self.highlight_items = highlight_items
167 self.fragment_size = 200
166 self.fragment_size = 200
168 self.repo_location = repo_location
167 self.repo_location = repo_location
169
168
170 @LazyProperty
169 @LazyProperty
171 def doc_ids(self):
170 def doc_ids(self):
172 docs_id = []
171 docs_id = []
173 while self.matcher.is_active():
172 while self.matcher.is_active():
174 docnum = self.matcher.id()
173 docnum = self.matcher.id()
175 chunks = [offsets for offsets in self.get_chunks()]
174 chunks = [offsets for offsets in self.get_chunks()]
176 docs_id.append([docnum, chunks])
175 docs_id.append([docnum, chunks])
177 self.matcher.next()
176 self.matcher.next()
178 return docs_id
177 return docs_id
179
178
180 def __str__(self):
179 def __str__(self):
181 return '<%s at %s>' % (self.__class__.__name__, len(self.doc_ids))
180 return '<%s at %s>' % (self.__class__.__name__, len(self.doc_ids))
182
181
183 def __repr__(self):
182 def __repr__(self):
184 return self.__str__()
183 return self.__str__()
185
184
186 def __len__(self):
185 def __len__(self):
187 return len(self.doc_ids)
186 return len(self.doc_ids)
188
187
189 def __iter__(self):
188 def __iter__(self):
190 """
189 """
191 Allows Iteration over results,and lazy generate content
190 Allows Iteration over results,and lazy generate content
192
191
193 *Requires* implementation of ``__getitem__`` method.
192 *Requires* implementation of ``__getitem__`` method.
194 """
193 """
195 for docid in self.doc_ids:
194 for docid in self.doc_ids:
196 yield self.get_full_content(docid)
195 yield self.get_full_content(docid)
197
196
198 def __getitem__(self, key):
197 def __getitem__(self, key):
199 """
198 """
200 Slicing of resultWrapper
199 Slicing of resultWrapper
201 """
200 """
202 i, j = key.start, key.stop
201 i, j = key.start, key.stop
203
202
204 slices = []
203 slices = []
205 for docid in self.doc_ids[i:j]:
204 for docid in self.doc_ids[i:j]:
206 slices.append(self.get_full_content(docid))
205 slices.append(self.get_full_content(docid))
207 return slices
206 return slices
208
207
209 def get_full_content(self, docid):
208 def get_full_content(self, docid):
210 res = self.searcher.stored_fields(docid[0])
209 res = self.searcher.stored_fields(docid[0])
211 log.debug('result: %s' % res)
210 log.debug('result: %s' % res)
212 if self.search_type == 'content':
211 if self.search_type == 'content':
213 full_repo_path = jn(self.repo_location, res['repository'])
212 full_repo_path = jn(self.repo_location, res['repository'])
214 f_path = res['path'].split(full_repo_path)[-1]
213 f_path = res['path'].split(full_repo_path)[-1]
215 f_path = f_path.lstrip(os.sep)
214 f_path = f_path.lstrip(os.sep)
216 content_short = self.get_short_content(res, docid[1])
215 content_short = self.get_short_content(res, docid[1])
217 res.update({'content_short': content_short,
216 res.update({'content_short': content_short,
218 'content_short_hl': self.highlight(content_short),
217 'content_short_hl': self.highlight(content_short),
219 'f_path': f_path
218 'f_path': f_path
220 })
219 })
221 elif self.search_type == 'message':
220 elif self.search_type == 'message':
222 res.update({'message_hl': self.highlight(res['message'])})
221 res.update({'message_hl': self.highlight(res['message'])})
223
222
224 log.debug('result: %s' % res)
223 log.debug('result: %s' % res)
225
224
226 return res
225 return res
227
226
228 def get_short_content(self, res, chunks):
227 def get_short_content(self, res, chunks):
229
228
230 return ''.join([res['content'][chunk[0]:chunk[1]] for chunk in chunks])
229 return ''.join([res['content'][chunk[0]:chunk[1]] for chunk in chunks])
231
230
232 def get_chunks(self):
231 def get_chunks(self):
233 """
232 """
234 Smart function that implements chunking the content
233 Smart function that implements chunking the content
235 but not overlap chunks so it doesn't highlight the same
234 but not overlap chunks so it doesn't highlight the same
236 close occurrences twice.
235 close occurrences twice.
237
236
238 :param matcher:
237 :param matcher:
239 :param size:
238 :param size:
240 """
239 """
241 memory = [(0, 0)]
240 memory = [(0, 0)]
242 if self.matcher.supports('positions'):
241 if self.matcher.supports('positions'):
243 for span in self.matcher.spans():
242 for span in self.matcher.spans():
244 start = span.startchar or 0
243 start = span.startchar or 0
245 end = span.endchar or 0
244 end = span.endchar or 0
246 start_offseted = max(0, start - self.fragment_size)
245 start_offseted = max(0, start - self.fragment_size)
247 end_offseted = end + self.fragment_size
246 end_offseted = end + self.fragment_size
248
247
249 if start_offseted < memory[-1][1]:
248 if start_offseted < memory[-1][1]:
250 start_offseted = memory[-1][1]
249 start_offseted = memory[-1][1]
251 memory.append((start_offseted, end_offseted,))
250 memory.append((start_offseted, end_offseted,))
252 yield (start_offseted, end_offseted,)
251 yield (start_offseted, end_offseted,)
253
252
254 def highlight(self, content, top=5):
253 def highlight(self, content, top=5):
255 if self.search_type not in ['content', 'message']:
254 if self.search_type not in ['content', 'message']:
256 return ''
255 return ''
257 hl = highlight(
256 hl = highlight(
258 text=content,
257 text=content,
259 terms=self.highlight_items,
258 terms=self.highlight_items,
260 analyzer=ANALYZER,
259 analyzer=ANALYZER,
261 fragmenter=FRAGMENTER,
260 fragmenter=FRAGMENTER,
262 formatter=FORMATTER,
261 formatter=FORMATTER,
263 top=top
262 top=top
264 )
263 )
265 return hl
264 return hl
@@ -1,373 +1,390 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2 """
2 """
3 rhodecode.lib.indexers.daemon
3 rhodecode.lib.indexers.daemon
4 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
4 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
5
5
6 A daemon will read from task table and run tasks
6 A daemon will read from task table and run tasks
7
7
8 :created_on: Jan 26, 2010
8 :created_on: Jan 26, 2010
9 :author: marcink
9 :author: marcink
10 :copyright: (C) 2010-2012 Marcin Kuzminski <marcin@python-works.com>
10 :copyright: (C) 2010-2012 Marcin Kuzminski <marcin@python-works.com>
11 :license: GPLv3, see COPYING for more details.
11 :license: GPLv3, see COPYING for more details.
12 """
12 """
13 # This program is free software: you can redistribute it and/or modify
13 # This program is free software: you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation, either version 3 of the License, or
15 # the Free Software Foundation, either version 3 of the License, or
16 # (at your option) any later version.
16 # (at your option) any later version.
17 #
17 #
18 # This program is distributed in the hope that it will be useful,
18 # This program is distributed in the hope that it will be useful,
19 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # but WITHOUT ANY WARRANTY; without even the implied warranty of
20 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 # GNU General Public License for more details.
21 # GNU General Public License for more details.
22 #
22 #
23 # You should have received a copy of the GNU General Public License
23 # You should have received a copy of the GNU General Public License
24 # along with this program. If not, see <http://www.gnu.org/licenses/>.
24 # along with this program. If not, see <http://www.gnu.org/licenses/>.
25 from __future__ import with_statement
25 from __future__ import with_statement
26
26
27 import os
27 import os
28 import sys
28 import sys
29 import logging
29 import logging
30 import traceback
30 import traceback
31
31
32 from shutil import rmtree
32 from shutil import rmtree
33 from time import mktime
33 from time import mktime
34
34
35 from os.path import dirname as dn
35 from os.path import dirname as dn
36 from os.path import join as jn
36 from os.path import join as jn
37
37
38 #to get the rhodecode import
38 #to get the rhodecode import
39 project_path = dn(dn(dn(dn(os.path.realpath(__file__)))))
39 project_path = dn(dn(dn(dn(os.path.realpath(__file__)))))
40 sys.path.append(project_path)
40 sys.path.append(project_path)
41
41
42 from rhodecode.config.conf import INDEX_EXTENSIONS
42 from rhodecode.config.conf import INDEX_EXTENSIONS
43 from rhodecode.model.scm import ScmModel
43 from rhodecode.model.scm import ScmModel
44 from rhodecode.lib.utils2 import safe_unicode
44 from rhodecode.lib.utils2 import safe_unicode
45 from rhodecode.lib.indexers import SCHEMA, IDX_NAME, CHGSETS_SCHEMA, CHGSET_IDX_NAME
45 from rhodecode.lib.indexers import SCHEMA, IDX_NAME, CHGSETS_SCHEMA, CHGSET_IDX_NAME
46
46
47 from rhodecode.lib.vcs.exceptions import ChangesetError, RepositoryError, \
47 from rhodecode.lib.vcs.exceptions import ChangesetError, RepositoryError, \
48 NodeDoesNotExistError
48 NodeDoesNotExistError
49
49
50 from whoosh.index import create_in, open_dir, exists_in
50 from whoosh.index import create_in, open_dir, exists_in
51 from whoosh.query import *
51 from whoosh.query import *
52 from whoosh.qparser import QueryParser
52 from whoosh.qparser import QueryParser
53
53
54 log = logging.getLogger('whoosh_indexer')
54 log = logging.getLogger('whoosh_indexer')
55
55
56
56
57 class WhooshIndexingDaemon(object):
57 class WhooshIndexingDaemon(object):
58 """
58 """
59 Daemon for atomic indexing jobs
59 Daemon for atomic indexing jobs
60 """
60 """
61
61
62 def __init__(self, indexname=IDX_NAME, index_location=None,
62 def __init__(self, indexname=IDX_NAME, index_location=None,
63 repo_location=None, sa=None, repo_list=None,
63 repo_location=None, sa=None, repo_list=None,
64 repo_update_list=None):
64 repo_update_list=None):
65 self.indexname = indexname
65 self.indexname = indexname
66
66
67 self.index_location = index_location
67 self.index_location = index_location
68 if not index_location:
68 if not index_location:
69 raise Exception('You have to provide index location')
69 raise Exception('You have to provide index location')
70
70
71 self.repo_location = repo_location
71 self.repo_location = repo_location
72 if not repo_location:
72 if not repo_location:
73 raise Exception('You have to provide repositories location')
73 raise Exception('You have to provide repositories location')
74
74
75 self.repo_paths = ScmModel(sa).repo_scan(self.repo_location)
75 self.repo_paths = ScmModel(sa).repo_scan(self.repo_location)
76
76
77 #filter repo list
77 #filter repo list
78 if repo_list:
78 if repo_list:
79 self.filtered_repo_paths = {}
79 self.filtered_repo_paths = {}
80 for repo_name, repo in self.repo_paths.items():
80 for repo_name, repo in self.repo_paths.items():
81 if repo_name in repo_list:
81 if repo_name in repo_list:
82 self.filtered_repo_paths[repo_name] = repo
82 self.filtered_repo_paths[repo_name] = repo
83
83
84 self.repo_paths = self.filtered_repo_paths
84 self.repo_paths = self.filtered_repo_paths
85
85
86 #filter update repo list
86 #filter update repo list
87 self.filtered_repo_update_paths = {}
87 self.filtered_repo_update_paths = {}
88 if repo_update_list:
88 if repo_update_list:
89 self.filtered_repo_update_paths = {}
89 self.filtered_repo_update_paths = {}
90 for repo_name, repo in self.repo_paths.items():
90 for repo_name, repo in self.repo_paths.items():
91 if repo_name in repo_update_list:
91 if repo_name in repo_update_list:
92 self.filtered_repo_update_paths[repo_name] = repo
92 self.filtered_repo_update_paths[repo_name] = repo
93 self.repo_paths = self.filtered_repo_update_paths
93 self.repo_paths = self.filtered_repo_update_paths
94
94
95 self.initial = True
95 self.initial = True
96 if not os.path.isdir(self.index_location):
96 if not os.path.isdir(self.index_location):
97 os.makedirs(self.index_location)
97 os.makedirs(self.index_location)
98 log.info('Cannot run incremental index since it does not'
98 log.info('Cannot run incremental index since it does not'
99 ' yet exist running full build')
99 ' yet exist running full build')
100 elif not exists_in(self.index_location, IDX_NAME):
100 elif not exists_in(self.index_location, IDX_NAME):
101 log.info('Running full index build as the file content'
101 log.info('Running full index build as the file content'
102 ' index does not exist')
102 ' index does not exist')
103 elif not exists_in(self.index_location, CHGSET_IDX_NAME):
103 elif not exists_in(self.index_location, CHGSET_IDX_NAME):
104 log.info('Running full index build as the changeset'
104 log.info('Running full index build as the changeset'
105 ' index does not exist')
105 ' index does not exist')
106 else:
106 else:
107 self.initial = False
107 self.initial = False
108
108
109 def get_paths(self, repo):
109 def get_paths(self, repo):
110 """
110 """
111 recursive walk in root dir and return a set of all path in that dir
111 recursive walk in root dir and return a set of all path in that dir
112 based on repository walk function
112 based on repository walk function
113 """
113 """
114 index_paths_ = set()
114 index_paths_ = set()
115 try:
115 try:
116 tip = repo.get_changeset('tip')
116 tip = repo.get_changeset('tip')
117 for topnode, dirs, files in tip.walk('/'):
117 for topnode, dirs, files in tip.walk('/'):
118 for f in files:
118 for f in files:
119 index_paths_.add(jn(repo.path, f.path))
119 index_paths_.add(jn(repo.path, f.path))
120
120
121 except RepositoryError, e:
121 except RepositoryError, e:
122 log.debug(traceback.format_exc())
122 log.debug(traceback.format_exc())
123 pass
123 pass
124 return index_paths_
124 return index_paths_
125
125
126 def get_node(self, repo, path):
126 def get_node(self, repo, path):
127 n_path = path[len(repo.path) + 1:]
127 n_path = path[len(repo.path) + 1:]
128 node = repo.get_changeset().get_node(n_path)
128 node = repo.get_changeset().get_node(n_path)
129 return node
129 return node
130
130
131 def get_node_mtime(self, node):
131 def get_node_mtime(self, node):
132 return mktime(node.last_changeset.date.timetuple())
132 return mktime(node.last_changeset.date.timetuple())
133
133
134 def add_doc(self, writer, path, repo, repo_name):
134 def add_doc(self, writer, path, repo, repo_name):
135 """
135 """
136 Adding doc to writer this function itself fetches data from
136 Adding doc to writer this function itself fetches data from
137 the instance of vcs backend
137 the instance of vcs backend
138 """
138 """
139
139
140 node = self.get_node(repo, path)
140 node = self.get_node(repo, path)
141 indexed = indexed_w_content = 0
141 indexed = indexed_w_content = 0
142 # we just index the content of chosen files, and skip binary files
142 # we just index the content of chosen files, and skip binary files
143 if node.extension in INDEX_EXTENSIONS and not node.is_binary:
143 if node.extension in INDEX_EXTENSIONS and not node.is_binary:
144 u_content = node.content
144 u_content = node.content
145 if not isinstance(u_content, unicode):
145 if not isinstance(u_content, unicode):
146 log.warning(' >> %s Could not get this content as unicode '
146 log.warning(' >> %s Could not get this content as unicode '
147 'replacing with empty content' % path)
147 'replacing with empty content' % path)
148 u_content = u''
148 u_content = u''
149 else:
149 else:
150 log.debug(' >> %s [WITH CONTENT]' % path)
150 log.debug(' >> %s [WITH CONTENT]' % path)
151 indexed_w_content += 1
151 indexed_w_content += 1
152
152
153 else:
153 else:
154 log.debug(' >> %s' % path)
154 log.debug(' >> %s' % path)
155 # just index file name without it's content
155 # just index file name without it's content
156 u_content = u''
156 u_content = u''
157 indexed += 1
157 indexed += 1
158
158
159 p = safe_unicode(path)
159 p = safe_unicode(path)
160 writer.add_document(
160 writer.add_document(
161 fileid=p,
161 fileid=p,
162 owner=unicode(repo.contact),
162 owner=unicode(repo.contact),
163 repository=safe_unicode(repo_name),
163 repository=safe_unicode(repo_name),
164 path=p,
164 path=p,
165 content=u_content,
165 content=u_content,
166 modtime=self.get_node_mtime(node),
166 modtime=self.get_node_mtime(node),
167 extension=node.extension
167 extension=node.extension
168 )
168 )
169 return indexed, indexed_w_content
169 return indexed, indexed_w_content
170
170
171 def index_changesets(self, writer, repo_name, repo, start_rev=0):
171 def index_changesets(self, writer, repo_name, repo, start_rev=None):
172 """
172 """
173 Add all changeset in the vcs repo starting at start_rev
173 Add all changeset in the vcs repo starting at start_rev
174 to the index writer
174 to the index writer
175
176 :param writer: the whoosh index writer to add to
177 :param repo_name: name of the repository from whence the
178 changeset originates including the repository group
179 :param repo: the vcs repository instance to index changesets for,
180 the presumption is the repo has changesets to index
181 :param start_rev=None: the full sha id to start indexing from
182 if start_rev is None then index from the first changeset in
183 the repo
175 """
184 """
176
185
177 log.debug('indexing changesets in %s[%d:]' % (repo_name, start_rev))
186 if start_rev is None:
187 start_rev = repo[0].raw_id
188
189 log.debug('indexing changesets in %s starting at rev: %s' % (repo_name, start_rev))
178
190
179 indexed=0
191 indexed=0
180 for cs in repo[start_rev:]:
192 for cs in repo.get_changesets(start=start_rev):
181 writer.add_document(
193 writer.add_document(
182 raw_id=unicode(cs.raw_id),
194 raw_id=unicode(cs.raw_id),
183 owner=unicode(repo.contact),
195 owner=unicode(repo.contact),
184 repository=safe_unicode(repo_name),
196 repository=safe_unicode(repo_name),
185 author=cs.author,
197 author=cs.author,
186 message=cs.message,
198 message=cs.message,
187 revision=cs.revision,
188 last=cs.last,
199 last=cs.last,
189 added=u' '.join([node.path for node in cs.added]).lower(),
200 added=u' '.join([node.path for node in cs.added]).lower(),
190 removed=u' '.join([node.path for node in cs.removed]).lower(),
201 removed=u' '.join([node.path for node in cs.removed]).lower(),
191 changed=u' '.join([node.path for node in cs.changed]).lower(),
202 changed=u' '.join([node.path for node in cs.changed]).lower(),
192 parents=u' '.join([cs.raw_id for cs in cs.parents]),
203 parents=u' '.join([cs.raw_id for cs in cs.parents]),
193 )
204 )
194 indexed += 1
205 indexed += 1
195
206
196 log.debug('indexed %d changesets for repo %s' % (indexed, repo_name))
207 log.debug('indexed %d changesets for repo %s' % (indexed, repo_name))
197
208
198 def index_files(self, file_idx_writer, repo_name, repo):
209 def index_files(self, file_idx_writer, repo_name, repo):
199 i_cnt = iwc_cnt = 0
210 i_cnt = iwc_cnt = 0
200 log.debug('building index for [%s]' % repo.path)
211 log.debug('building index for [%s]' % repo.path)
201 for idx_path in self.get_paths(repo):
212 for idx_path in self.get_paths(repo):
202 i, iwc = self.add_doc(file_idx_writer, idx_path, repo, repo_name)
213 i, iwc = self.add_doc(file_idx_writer, idx_path, repo, repo_name)
203 i_cnt += i
214 i_cnt += i
204 iwc_cnt += iwc
215 iwc_cnt += iwc
205
216
206 log.debug('added %s files %s with content for repo %s' % (i_cnt + iwc_cnt, iwc_cnt, repo.path))
217 log.debug('added %s files %s with content for repo %s' % (i_cnt + iwc_cnt, iwc_cnt, repo.path))
207
218
208 def update_changeset_index(self):
219 def update_changeset_index(self):
209 idx = open_dir(self.index_location, indexname=CHGSET_IDX_NAME)
220 idx = open_dir(self.index_location, indexname=CHGSET_IDX_NAME)
210
221
211 with idx.searcher() as searcher:
222 with idx.searcher() as searcher:
212 writer = idx.writer()
223 writer = idx.writer()
213 writer_is_dirty = False
224 writer_is_dirty = False
214 try:
225 try:
215 for repo_name, repo in self.repo_paths.items():
226 for repo_name, repo in self.repo_paths.items():
216 # skip indexing if there aren't any revs in the repo
227 # skip indexing if there aren't any revs in the repo
217 revs = repo.revisions
228 num_of_revs = len(repo)
218 if len(revs) < 1:
229 if num_of_revs < 1:
219 continue
230 continue
220
231
221 qp = QueryParser('repository', schema=CHGSETS_SCHEMA)
232 qp = QueryParser('repository', schema=CHGSETS_SCHEMA)
222 q = qp.parse(u"last:t AND %s" % repo_name)
233 q = qp.parse(u"last:t AND %s" % repo_name)
223
234
224 results = searcher.search(q, sortedby='revision')
235 results = searcher.search(q)
225
236
237 # default to scanning the entire repo
226 last_rev = 0
238 last_rev = 0
239 start_id = None
240
227 if len(results) > 0:
241 if len(results) > 0:
228 last_rev = results[0]['revision']
242 # assuming that there is only one result, if not this
243 # may require a full re-index.
244 start_id = results[0]['raw_id']
245 last_rev = repo.get_changeset(revision=start_id).revision
229
246
230 # there are new changesets to index or a new repo to index
247 # there are new changesets to index or a new repo to index
231 if last_rev == 0 or len(revs) > last_rev + 1:
248 if last_rev == 0 or num_of_revs > last_rev + 1:
232 # delete the docs in the index for the previous last changeset(s)
249 # delete the docs in the index for the previous last changeset(s)
233 for hit in results:
250 for hit in results:
234 q = qp.parse(u"last:t AND %s AND raw_id:%s" %
251 q = qp.parse(u"last:t AND %s AND raw_id:%s" %
235 (repo_name, hit['raw_id']))
252 (repo_name, hit['raw_id']))
236 writer.delete_by_query(q)
253 writer.delete_by_query(q)
237
254
238 # index from the previous last changeset + all new ones
255 # index from the previous last changeset + all new ones
239 self.index_changesets(writer, repo_name, repo, last_rev)
256 self.index_changesets(writer, repo_name, repo, start_id)
240 writer_is_dirty = True
257 writer_is_dirty = True
241
258
242 finally:
259 finally:
243 if writer_is_dirty:
260 if writer_is_dirty:
244 log.debug('>> COMMITING CHANGES TO CHANGESET INDEX<<')
261 log.debug('>> COMMITING CHANGES TO CHANGESET INDEX<<')
245 writer.commit(merge=True)
262 writer.commit(merge=True)
246 log.debug('>> COMMITTED CHANGES TO CHANGESET INDEX<<')
263 log.debug('>> COMMITTED CHANGES TO CHANGESET INDEX<<')
247 else:
264 else:
248 writer.cancel
265 writer.cancel
249
266
250 def update_file_index(self):
267 def update_file_index(self):
251 log.debug((u'STARTING INCREMENTAL INDEXING UPDATE FOR EXTENSIONS %s '
268 log.debug((u'STARTING INCREMENTAL INDEXING UPDATE FOR EXTENSIONS %s '
252 'AND REPOS %s') % (INDEX_EXTENSIONS, self.repo_paths.keys()))
269 'AND REPOS %s') % (INDEX_EXTENSIONS, self.repo_paths.keys()))
253
270
254 idx = open_dir(self.index_location, indexname=self.indexname)
271 idx = open_dir(self.index_location, indexname=self.indexname)
255 # The set of all paths in the index
272 # The set of all paths in the index
256 indexed_paths = set()
273 indexed_paths = set()
257 # The set of all paths we need to re-index
274 # The set of all paths we need to re-index
258 to_index = set()
275 to_index = set()
259
276
260 writer = idx.writer()
277 writer = idx.writer()
261 writer_is_dirty = False
278 writer_is_dirty = False
262 try:
279 try:
263 with idx.reader() as reader:
280 with idx.reader() as reader:
264
281
265 # Loop over the stored fields in the index
282 # Loop over the stored fields in the index
266 for fields in reader.all_stored_fields():
283 for fields in reader.all_stored_fields():
267 indexed_path = fields['path']
284 indexed_path = fields['path']
268 indexed_repo_path = fields['repository']
285 indexed_repo_path = fields['repository']
269 indexed_paths.add(indexed_path)
286 indexed_paths.add(indexed_path)
270
287
271 if not indexed_repo_path in self.filtered_repo_update_paths:
288 if not indexed_repo_path in self.filtered_repo_update_paths:
272 continue
289 continue
273
290
274 repo = self.repo_paths[indexed_repo_path]
291 repo = self.repo_paths[indexed_repo_path]
275
292
276 try:
293 try:
277 node = self.get_node(repo, indexed_path)
294 node = self.get_node(repo, indexed_path)
278 # Check if this file was changed since it was indexed
295 # Check if this file was changed since it was indexed
279 indexed_time = fields['modtime']
296 indexed_time = fields['modtime']
280 mtime = self.get_node_mtime(node)
297 mtime = self.get_node_mtime(node)
281 if mtime > indexed_time:
298 if mtime > indexed_time:
282 # The file has changed, delete it and add it to the list of
299 # The file has changed, delete it and add it to the list of
283 # files to reindex
300 # files to reindex
284 log.debug('adding to reindex list %s mtime: %s vs %s' % (
301 log.debug('adding to reindex list %s mtime: %s vs %s' % (
285 indexed_path, mtime, indexed_time)
302 indexed_path, mtime, indexed_time)
286 )
303 )
287 writer.delete_by_term('fileid', indexed_path)
304 writer.delete_by_term('fileid', indexed_path)
288 writer_is_dirty = True
305 writer_is_dirty = True
289
306
290 to_index.add(indexed_path)
307 to_index.add(indexed_path)
291 except (ChangesetError, NodeDoesNotExistError):
308 except (ChangesetError, NodeDoesNotExistError):
292 # This file was deleted since it was indexed
309 # This file was deleted since it was indexed
293 log.debug('removing from index %s' % indexed_path)
310 log.debug('removing from index %s' % indexed_path)
294 writer.delete_by_term('path', indexed_path)
311 writer.delete_by_term('path', indexed_path)
295 writer_is_dirty = True
312 writer_is_dirty = True
296
313
297 # Loop over the files in the filesystem
314 # Loop over the files in the filesystem
298 # Assume we have a function that gathers the filenames of the
315 # Assume we have a function that gathers the filenames of the
299 # documents to be indexed
316 # documents to be indexed
300 ri_cnt_total = 0 # indexed
317 ri_cnt_total = 0 # indexed
301 riwc_cnt_total = 0 # indexed with content
318 riwc_cnt_total = 0 # indexed with content
302 for repo_name, repo in self.repo_paths.items():
319 for repo_name, repo in self.repo_paths.items():
303 # skip indexing if there aren't any revisions
320 # skip indexing if there aren't any revisions
304 if len(repo) < 1:
321 if len(repo) < 1:
305 continue
322 continue
306 ri_cnt = 0 # indexed
323 ri_cnt = 0 # indexed
307 riwc_cnt = 0 # indexed with content
324 riwc_cnt = 0 # indexed with content
308 for path in self.get_paths(repo):
325 for path in self.get_paths(repo):
309 path = safe_unicode(path)
326 path = safe_unicode(path)
310 if path in to_index or path not in indexed_paths:
327 if path in to_index or path not in indexed_paths:
311
328
312 # This is either a file that's changed, or a new file
329 # This is either a file that's changed, or a new file
313 # that wasn't indexed before. So index it!
330 # that wasn't indexed before. So index it!
314 i, iwc = self.add_doc(writer, path, repo, repo_name)
331 i, iwc = self.add_doc(writer, path, repo, repo_name)
315 writer_is_dirty = True
332 writer_is_dirty = True
316 log.debug('re indexing %s' % path)
333 log.debug('re indexing %s' % path)
317 ri_cnt += i
334 ri_cnt += i
318 ri_cnt_total += 1
335 ri_cnt_total += 1
319 riwc_cnt += iwc
336 riwc_cnt += iwc
320 riwc_cnt_total += iwc
337 riwc_cnt_total += iwc
321 log.debug('added %s files %s with content for repo %s' % (
338 log.debug('added %s files %s with content for repo %s' % (
322 ri_cnt + riwc_cnt, riwc_cnt, repo.path)
339 ri_cnt + riwc_cnt, riwc_cnt, repo.path)
323 )
340 )
324 log.debug('indexed %s files in total and %s with content' % (
341 log.debug('indexed %s files in total and %s with content' % (
325 ri_cnt_total, riwc_cnt_total)
342 ri_cnt_total, riwc_cnt_total)
326 )
343 )
327 finally:
344 finally:
328 if writer_is_dirty:
345 if writer_is_dirty:
329 log.debug('>> COMMITING CHANGES <<')
346 log.debug('>> COMMITING CHANGES <<')
330 writer.commit(merge=True)
347 writer.commit(merge=True)
331 log.debug('>>> FINISHED REBUILDING INDEX <<<')
348 log.debug('>>> FINISHED REBUILDING INDEX <<<')
332 else:
349 else:
333 writer.cancel()
350 writer.cancel()
334
351
335 def build_indexes(self):
352 def build_indexes(self):
336 if os.path.exists(self.index_location):
353 if os.path.exists(self.index_location):
337 log.debug('removing previous index')
354 log.debug('removing previous index')
338 rmtree(self.index_location)
355 rmtree(self.index_location)
339
356
340 if not os.path.exists(self.index_location):
357 if not os.path.exists(self.index_location):
341 os.mkdir(self.index_location)
358 os.mkdir(self.index_location)
342
359
343 chgset_idx = create_in(self.index_location, CHGSETS_SCHEMA, indexname=CHGSET_IDX_NAME)
360 chgset_idx = create_in(self.index_location, CHGSETS_SCHEMA, indexname=CHGSET_IDX_NAME)
344 chgset_idx_writer = chgset_idx.writer()
361 chgset_idx_writer = chgset_idx.writer()
345
362
346 file_idx = create_in(self.index_location, SCHEMA, indexname=IDX_NAME)
363 file_idx = create_in(self.index_location, SCHEMA, indexname=IDX_NAME)
347 file_idx_writer = file_idx.writer()
364 file_idx_writer = file_idx.writer()
348 log.debug('BUILDING INDEX FOR EXTENSIONS %s '
365 log.debug('BUILDING INDEX FOR EXTENSIONS %s '
349 'AND REPOS %s' % (INDEX_EXTENSIONS, self.repo_paths.keys()))
366 'AND REPOS %s' % (INDEX_EXTENSIONS, self.repo_paths.keys()))
350
367
351 for repo_name, repo in self.repo_paths.items():
368 for repo_name, repo in self.repo_paths.items():
352 # skip indexing if there aren't any revisions
369 # skip indexing if there aren't any revisions
353 if len(repo) < 1:
370 if len(repo) < 1:
354 continue
371 continue
355
372
356 self.index_files(file_idx_writer, repo_name, repo)
373 self.index_files(file_idx_writer, repo_name, repo)
357 self.index_changesets(chgset_idx_writer, repo_name, repo)
374 self.index_changesets(chgset_idx_writer, repo_name, repo)
358
375
359 log.debug('>> COMMITING CHANGES <<')
376 log.debug('>> COMMITING CHANGES <<')
360 file_idx_writer.commit(merge=True)
377 file_idx_writer.commit(merge=True)
361 chgset_idx_writer.commit(merge=True)
378 chgset_idx_writer.commit(merge=True)
362 log.debug('>>> FINISHED BUILDING INDEX <<<')
379 log.debug('>>> FINISHED BUILDING INDEX <<<')
363
380
364 def update_indexes(self):
381 def update_indexes(self):
365 self.update_file_index()
382 self.update_file_index()
366 self.update_changeset_index()
383 self.update_changeset_index()
367
384
368 def run(self, full_index=False):
385 def run(self, full_index=False):
369 """Run daemon"""
386 """Run daemon"""
370 if full_index or self.initial:
387 if full_index or self.initial:
371 self.build_indexes()
388 self.build_indexes()
372 else:
389 else:
373 self.update_indexes()
390 self.update_indexes()
@@ -1,78 +1,78 b''
1 import os
1 import os
2 from rhodecode.tests import *
2 from rhodecode.tests import *
3 from nose.plugins.skip import SkipTest
3 from nose.plugins.skip import SkipTest
4
4
5
5
6 class TestSearchController(TestController):
6 class TestSearchController(TestController):
7
7
8 def test_index(self):
8 def test_index(self):
9 self.log_user()
9 self.log_user()
10 response = self.app.get(url(controller='search', action='index'))
10 response = self.app.get(url(controller='search', action='index'))
11
11
12 self.assertTrue('class="small" id="q" name="q" type="text"' in
12 self.assertTrue('class="small" id="q" name="q" type="text"' in
13 response.body)
13 response.body)
14 # Test response...
14 # Test response...
15
15
16 def test_empty_search(self):
16 def test_empty_search(self):
17 if os.path.isdir(self.index_location):
17 if os.path.isdir(self.index_location):
18 raise SkipTest('skipped due to existing index')
18 raise SkipTest('skipped due to existing index')
19 else:
19 else:
20 self.log_user()
20 self.log_user()
21 response = self.app.get(url(controller='search', action='index'),
21 response = self.app.get(url(controller='search', action='index'),
22 {'q': HG_REPO})
22 {'q': HG_REPO})
23 self.assertTrue('There is no index to search in. '
23 self.assertTrue('There is no index to search in. '
24 'Please run whoosh indexer' in response.body)
24 'Please run whoosh indexer' in response.body)
25
25
26 def test_normal_search(self):
26 def test_normal_search(self):
27 self.log_user()
27 self.log_user()
28 response = self.app.get(url(controller='search', action='index'),
28 response = self.app.get(url(controller='search', action='index'),
29 {'q': 'def repo'})
29 {'q': 'def repo'})
30 response.mustcontain('10 results')
30 response.mustcontain('10 results')
31
31
32 def test_repo_search(self):
32 def test_repo_search(self):
33 self.log_user()
33 self.log_user()
34 response = self.app.get(url(controller='search', action='index'),
34 response = self.app.get(url(controller='search', action='index'),
35 {'q': 'repository:%s def test' % HG_REPO})
35 {'q': 'repository:%s def test' % HG_REPO})
36
36
37 response.mustcontain('4 results')
37 response.mustcontain('4 results')
38
38
39 def test_search_last(self):
39 def test_search_last(self):
40 self.log_user()
40 self.log_user()
41 response = self.app.get(url(controller='search', action='index'),
41 response = self.app.get(url(controller='search', action='index'),
42 {'q': 'last:t', 'type': 'commit'})
42 {'q': 'last:t', 'type': 'commit'})
43
43
44 response.mustcontain('1 results')
44 response.mustcontain('1 results')
45
45
46 def test_search_commit_message(self):
46 def test_search_commit_message(self):
47 self.log_user()
47 self.log_user()
48 response = self.app.get(url(controller='search', action='index'),
48 response = self.app.get(url(controller='search', action='index'),
49 {'q': 'bother to ask where to fetch repo during tests',
49 {'q': 'bother to ask where to fetch repo during tests',
50 'type': 'commit'})
50 'type': 'commit'})
51
51
52 response.mustcontain('1 results')
52 response.mustcontain('1 results')
53 response.mustcontain('a00c1b6f5d7a6ae678fd553a8b81d92367f7ecf1')
53 response.mustcontain('a00c1b6f5d7a6ae678fd553a8b81d92367f7ecf1')
54
54
55 def test_search_commit_changed_file(self):
55 def test_search_commit_changed_file(self):
56 self.log_user()
56 self.log_user()
57 response = self.app.get(url(controller='search', action='index'),
57 response = self.app.get(url(controller='search', action='index'),
58 {'q': 'changed:tests/utils.py',
58 {'q': 'changed:tests/utils.py',
59 'type': 'commit'})
59 'type': 'commit'})
60
60
61 response.mustcontain('a00c1b6f5d7a6ae678fd553a8b81d92367f7ecf1')
61 response.mustcontain('a00c1b6f5d7a6ae678fd553a8b81d92367f7ecf1')
62
62
63 def test_search_commit_added_file(self):
63 def test_search_commit_added_file(self):
64 self.log_user()
64 self.log_user()
65 response = self.app.get(url(controller='search', action='index'),
65 response = self.app.get(url(controller='search', action='index'),
66 {'q': 'added:README.rst',
66 {'q': 'added:README.rst',
67 'type': 'commit'})
67 'type': 'commit'})
68
68
69 response.mustcontain('1 results')
69 response.mustcontain('1 results')
70 response.mustcontain('3803844fdbd3b711175fc3da9bdacfcd6d29a6fb')
70 response.mustcontain('3803844fdbd3b711175fc3da9bdacfcd6d29a6fb')
71
71
72 def test_search_author(self):
72 def test_search_author(self):
73 self.log_user()
73 self.log_user()
74 response = self.app.get(url(controller='search', action='index'),
74 response = self.app.get(url(controller='search', action='index'),
75 {'q': 'author:marcin@python-blog.com revision:0',
75 {'q': 'author:marcin@python-blog.com raw_id:b986218ba1c9b0d6a259fac9b050b1724ed8e545',
76 'type': 'commit'})
76 'type': 'commit'})
77
77
78 response.mustcontain('1 results')
78 response.mustcontain('1 results')
General Comments 0
You need to be logged in to leave comments. Login now