__init__.py
194 lines
| 6.2 KiB
| text/x-python
|
PythonLexer
r903 | # -*- coding: utf-8 -*- | |||
""" | ||||
rhodecode.lib.indexers.__init__ | ||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||||
Whoosh indexing module for RhodeCode | ||||
r1203 | ||||
r903 | :created_on: Aug 17, 2010 | |||
:author: marcink | ||||
r1824 | :copyright: (C) 2010-2012 Marcin Kuzminski <marcin@python-works.com> | |||
r903 | :license: GPLv3, see COPYING for more details. | |||
""" | ||||
r1206 | # This program is free software: you can redistribute it and/or modify | |||
# it under the terms of the GNU General Public License as published by | ||||
# the Free Software Foundation, either version 3 of the License, or | ||||
# (at your option) any later version. | ||||
r1203 | # | |||
r903 | # This program is distributed in the hope that it will be useful, | |||
# but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||||
# GNU General Public License for more details. | ||||
r1203 | # | |||
r903 | # You should have received a copy of the GNU General Public License | |||
r1206 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |||
r631 | import os | |||
import sys | ||||
r2102 | import logging | |||
r547 | from os.path import dirname as dn, join as jn | |||
r631 | ||||
#to get the rhodecode import | ||||
sys.path.append(dn(dn(dn(os.path.realpath(__file__))))) | ||||
r547 | from whoosh.analysis import RegexTokenizer, LowercaseFilter, StopFilter | |||
r3062 | from whoosh.fields import TEXT, ID, STORED, NUMERIC, BOOLEAN, Schema, FieldType, DATETIME | |||
r547 | from whoosh.formats import Characters | |||
r3915 | from whoosh.highlight import highlight as whoosh_highlight, HtmlFormatter, ContextFragmenter | |||
r2109 | from rhodecode.lib.utils2 import LazyProperty | |||
r547 | ||||
Indra Talip
|
r2640 | log = logging.getLogger(__name__) | ||
r1995 | # CUSTOM ANALYZER wordsplit + lowercase filter | |||
r547 | ANALYZER = RegexTokenizer(expression=r"\w+") | LowercaseFilter() | |||
#INDEX SCHEMA DEFINITION | ||||
r1995 | SCHEMA = Schema( | |||
r2388 | fileid=ID(unique=True), | |||
r1995 | owner=TEXT(), | |||
repository=TEXT(stored=True), | ||||
path=TEXT(stored=True), | ||||
content=FieldType(format=Characters(), analyzer=ANALYZER, | ||||
scorable=True, stored=True), | ||||
modtime=STORED(), | ||||
extension=TEXT(stored=True) | ||||
) | ||||
r547 | ||||
IDX_NAME = 'HG_INDEX' | ||||
r631 | FORMATTER = HtmlFormatter('span', between='\n<span class="break">...</span>\n') | |||
r1995 | FRAGMENTER = ContextFragmenter(200) | |||
r631 | ||||
Indra Talip
|
r2640 | CHGSETS_SCHEMA = Schema( | ||
Indra Talip
|
r2642 | raw_id=ID(unique=True, stored=True), | ||
r2693 | date=NUMERIC(stored=True), | |||
Indra Talip
|
r2640 | last=BOOLEAN(), | ||
owner=TEXT(), | ||||
repository=ID(unique=True, stored=True), | ||||
author=TEXT(stored=True), | ||||
message=FieldType(format=Characters(), analyzer=ANALYZER, | ||||
scorable=True, stored=True), | ||||
parents=TEXT(), | ||||
added=TEXT(), | ||||
removed=TEXT(), | ||||
changed=TEXT(), | ||||
) | ||||
CHGSET_IDX_NAME = 'CHGSET_INDEX' | ||||
r631 | ||||
r3062 | # used only to generate queries in journal | |||
JOURNAL_SCHEMA = Schema( | ||||
username=TEXT(), | ||||
date=DATETIME(), | ||||
action=TEXT(), | ||||
repository=TEXT(), | ||||
ip=TEXT(), | ||||
) | ||||
r2718 | ||||
r2319 | class WhooshResultWrapper(object): | |||
def __init__(self, search_type, searcher, matcher, highlight_items, | ||||
repo_location): | ||||
r556 | self.search_type = search_type | |||
r547 | self.searcher = searcher | |||
self.matcher = matcher | ||||
self.highlight_items = highlight_items | ||||
r1995 | self.fragment_size = 200 | |||
r2319 | self.repo_location = repo_location | |||
r631 | ||||
r547 | @LazyProperty | |||
def doc_ids(self): | ||||
docs_id = [] | ||||
while self.matcher.is_active(): | ||||
docnum = self.matcher.id() | ||||
chunks = [offsets for offsets in self.get_chunks()] | ||||
docs_id.append([docnum, chunks]) | ||||
self.matcher.next() | ||||
r631 | return docs_id | |||
r547 | def __str__(self): | |||
return '<%s at %s>' % (self.__class__.__name__, len(self.doc_ids)) | ||||
def __repr__(self): | ||||
return self.__str__() | ||||
def __len__(self): | ||||
return len(self.doc_ids) | ||||
def __iter__(self): | ||||
""" | ||||
Allows Iteration over results,and lazy generate content | ||||
*Requires* implementation of ``__getitem__`` method. | ||||
""" | ||||
for docid in self.doc_ids: | ||||
yield self.get_full_content(docid) | ||||
r1198 | def __getitem__(self, key): | |||
r547 | """ | |||
Slicing of resultWrapper | ||||
""" | ||||
r1198 | i, j = key.start, key.stop | |||
r1995 | slices = [] | |||
r547 | for docid in self.doc_ids[i:j]: | |||
r1995 | slices.append(self.get_full_content(docid)) | |||
return slices | ||||
r631 | ||||
r547 | def get_full_content(self, docid): | |||
res = self.searcher.stored_fields(docid[0]) | ||||
Indra Talip
|
r2640 | log.debug('result: %s' % res) | ||
if self.search_type == 'content': | ||||
Indra Talip
|
r2642 | full_repo_path = jn(self.repo_location, res['repository']) | ||
f_path = res['path'].split(full_repo_path)[-1] | ||||
f_path = f_path.lstrip(os.sep) | ||||
Indra Talip
|
r2640 | content_short = self.get_short_content(res, docid[1]) | ||
res.update({'content_short': content_short, | ||||
Indra Talip
|
r2642 | 'content_short_hl': self.highlight(content_short), | ||
'f_path': f_path | ||||
}) | ||||
r2718 | elif self.search_type == 'path': | |||
full_repo_path = jn(self.repo_location, res['repository']) | ||||
f_path = res['path'].split(full_repo_path)[-1] | ||||
f_path = f_path.lstrip(os.sep) | ||||
res.update({'f_path': f_path}) | ||||
Indra Talip
|
r2640 | elif self.search_type == 'message': | ||
res.update({'message_hl': self.highlight(res['message'])}) | ||||
log.debug('result: %s' % res) | ||||
r631 | ||||
return res | ||||
r547 | def get_short_content(self, res, chunks): | |||
r631 | ||||
r547 | return ''.join([res['content'][chunk[0]:chunk[1]] for chunk in chunks]) | |||
r631 | ||||
r547 | def get_chunks(self): | |||
""" | ||||
Smart function that implements chunking the content | ||||
but not overlap chunks so it doesn't highlight the same | ||||
r556 | close occurrences twice. | |||
r547 | """ | |||
memory = [(0, 0)] | ||||
r2673 | if self.matcher.supports('positions'): | |||
Indra Talip
|
r2640 | for span in self.matcher.spans(): | ||
start = span.startchar or 0 | ||||
end = span.endchar or 0 | ||||
start_offseted = max(0, start - self.fragment_size) | ||||
end_offseted = end + self.fragment_size | ||||
r631 | ||||
Indra Talip
|
r2640 | if start_offseted < memory[-1][1]: | ||
start_offseted = memory[-1][1] | ||||
memory.append((start_offseted, end_offseted,)) | ||||
yield (start_offseted, end_offseted,) | ||||
r631 | ||||
r547 | def highlight(self, content, top=5): | |||
Indra Talip
|
r2640 | if self.search_type not in ['content', 'message']: | ||
r556 | return '' | |||
r3915 | hl = whoosh_highlight( | |||
r2389 | text=content, | |||
r1995 | terms=self.highlight_items, | |||
analyzer=ANALYZER, | ||||
fragmenter=FRAGMENTER, | ||||
formatter=FORMATTER, | ||||
top=top | ||||
) | ||||
r631 | return hl | |||