__init__.py
232 lines
| 8.1 KiB
| text/x-python
|
PythonLexer
r903 | # -*- coding: utf-8 -*- | |||
""" | ||||
rhodecode.lib.indexers.__init__ | ||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||||
Whoosh indexing module for RhodeCode | ||||
:created_on: Aug 17, 2010 | ||||
:author: marcink | ||||
:copyright: (C) 2009-2010 Marcin Kuzminski <marcin@python-works.com> | ||||
:license: GPLv3, see COPYING for more details. | ||||
""" | ||||
# This program is free software; you can redistribute it and/or | ||||
# modify it under the terms of the GNU General Public License | ||||
# as published by the Free Software Foundation; version 2 | ||||
# of the License or (at your opinion) any later version of the license. | ||||
# | ||||
# This program is distributed in the hope that it will be useful, | ||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||||
# GNU General Public License for more details. | ||||
# | ||||
# You should have received a copy of the GNU General Public License | ||||
# along with this program; if not, write to the Free Software | ||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, | ||||
# MA 02110-1301, USA. | ||||
r631 | import os | |||
import sys | ||||
r785 | import traceback | |||
r547 | from os.path import dirname as dn, join as jn | |||
r631 | ||||
#to get the rhodecode import | ||||
sys.path.append(dn(dn(dn(os.path.realpath(__file__))))) | ||||
r894 | from string import strip | |||
r785 | from rhodecode.model import init_model | |||
from rhodecode.model.scm import ScmModel | ||||
r547 | from rhodecode.config.environment import load_environment | |||
r785 | from rhodecode.lib.utils import BasePasterCommand, Command, add_cache | |||
r547 | from shutil import rmtree | |||
from webhelpers.html.builder import escape | ||||
from vcs.utils.lazy import LazyProperty | ||||
r785 | from sqlalchemy import engine_from_config | |||
r547 | from whoosh.analysis import RegexTokenizer, LowercaseFilter, StopFilter | |||
from whoosh.fields import TEXT, ID, STORED, Schema, FieldType | ||||
from whoosh.index import create_in, open_dir | ||||
from whoosh.formats import Characters | ||||
r631 | from whoosh.highlight import highlight, SimpleFragmenter, HtmlFormatter | |||
r547 | ||||
#EXTENSIONS WE WANT TO INDEX CONTENT OFF | ||||
INDEX_EXTENSIONS = ['action', 'adp', 'ashx', 'asmx', 'aspx', 'asx', 'axd', 'c', | ||||
'cfg', 'cfm', 'cpp', 'cs', 'css', 'diff', 'do', 'el', 'erl', | ||||
'h', 'htm', 'html', 'ini', 'java', 'js', 'jsp', 'jspx', 'lisp', | ||||
'lua', 'm', 'mako', 'ml', 'pas', 'patch', 'php', 'php3', | ||||
'php4', 'phtml', 'pm', 'py', 'rb', 'rst', 's', 'sh', 'sql', | ||||
'tpl', 'txt', 'vim', 'wss', 'xhtml', 'xml', 'xsl', 'xslt', | ||||
'yaws'] | ||||
#CUSTOM ANALYZER wordsplit + lowercase filter | ||||
ANALYZER = RegexTokenizer(expression=r"\w+") | LowercaseFilter() | ||||
#INDEX SCHEMA DEFINITION | ||||
SCHEMA = Schema(owner=TEXT(), | ||||
repository=TEXT(stored=True), | ||||
r556 | path=TEXT(stored=True), | |||
r547 | content=FieldType(format=Characters(ANALYZER), | |||
scorable=True, stored=True), | ||||
modtime=STORED(), extension=TEXT(stored=True)) | ||||
IDX_NAME = 'HG_INDEX' | ||||
r631 | FORMATTER = HtmlFormatter('span', between='\n<span class="break">...</span>\n') | |||
r547 | FRAGMENTER = SimpleFragmenter(200) | |||
r631 | ||||
r785 | class MakeIndex(BasePasterCommand): | |||
r631 | ||||
max_args = 1 | ||||
min_args = 1 | ||||
usage = "CONFIG_FILE" | ||||
summary = "Creates index for full text search given configuration file" | ||||
r683 | group_name = "RhodeCode" | |||
takes_config_file = -1 | ||||
r785 | parser = Command.standard_parser(verbose=True) | |||
r631 | def command(self): | |||
r683 | ||||
r785 | from pylons import config | |||
add_cache(config) | ||||
engine = engine_from_config(config, 'sqlalchemy.db1.') | ||||
init_model(engine) | ||||
index_location = config['index_dir'] | ||||
r683 | repo_location = self.options.repo_location | |||
r1183 | repo_list = map(strip, self.options.repo_list.split(',')) \ | |||
if self.options.repo_list else None | ||||
r631 | ||||
r683 | #====================================================================== | |||
r631 | # WHOOSH DAEMON | |||
r683 | #====================================================================== | |||
r631 | from rhodecode.lib.pidlock import LockHeld, DaemonLock | |||
from rhodecode.lib.indexers.daemon import WhooshIndexingDaemon | ||||
try: | ||||
l = DaemonLock() | ||||
r683 | WhooshIndexingDaemon(index_location=index_location, | |||
r894 | repo_location=repo_location, | |||
repo_list=repo_list)\ | ||||
r631 | .run(full_index=self.options.full_index) | |||
l.release() | ||||
except LockHeld: | ||||
sys.exit(1) | ||||
r785 | def update_parser(self): | |||
self.parser.add_option('--repo-location', | ||||
action='store', | ||||
dest='repo_location', | ||||
help="Specifies repositories location to index REQUIRED", | ||||
) | ||||
r894 | self.parser.add_option('--index-only', | |||
action='store', | ||||
dest='repo_list', | ||||
help="Specifies a comma separated list of repositores " | ||||
"to build index on OPTIONAL", | ||||
) | ||||
r785 | self.parser.add_option('-f', | |||
action='store_true', | ||||
dest='full_index', | ||||
help="Specifies that index should be made full i.e" | ||||
" destroy old and build from scratch", | ||||
default=False) | ||||
r631 | ||||
r547 | class ResultWrapper(object): | |||
r556 | def __init__(self, search_type, searcher, matcher, highlight_items): | |||
self.search_type = search_type | ||||
r547 | self.searcher = searcher | |||
self.matcher = matcher | ||||
self.highlight_items = highlight_items | ||||
self.fragment_size = 200 / 2 | ||||
r631 | ||||
r547 | @LazyProperty | |||
def doc_ids(self): | ||||
docs_id = [] | ||||
while self.matcher.is_active(): | ||||
docnum = self.matcher.id() | ||||
chunks = [offsets for offsets in self.get_chunks()] | ||||
docs_id.append([docnum, chunks]) | ||||
self.matcher.next() | ||||
r631 | return docs_id | |||
r547 | def __str__(self): | |||
return '<%s at %s>' % (self.__class__.__name__, len(self.doc_ids)) | ||||
def __repr__(self): | ||||
return self.__str__() | ||||
def __len__(self): | ||||
return len(self.doc_ids) | ||||
def __iter__(self): | ||||
""" | ||||
Allows Iteration over results,and lazy generate content | ||||
*Requires* implementation of ``__getitem__`` method. | ||||
""" | ||||
for docid in self.doc_ids: | ||||
yield self.get_full_content(docid) | ||||
r1198 | def __getitem__(self, key): | |||
r547 | """ | |||
Slicing of resultWrapper | ||||
""" | ||||
r1198 | i, j = key.start, key.stop | |||
r547 | slice = [] | |||
for docid in self.doc_ids[i:j]: | ||||
slice.append(self.get_full_content(docid)) | ||||
r631 | return slice | |||
r547 | ||||
def get_full_content(self, docid): | ||||
res = self.searcher.stored_fields(docid[0]) | ||||
f_path = res['path'][res['path'].find(res['repository']) \ | ||||
+ len(res['repository']):].lstrip('/') | ||||
r631 | ||||
r547 | content_short = self.get_short_content(res, docid[1]) | |||
res.update({'content_short':content_short, | ||||
'content_short_hl':self.highlight(content_short), | ||||
'f_path':f_path}) | ||||
r631 | ||||
return res | ||||
r547 | def get_short_content(self, res, chunks): | |||
r631 | ||||
r547 | return ''.join([res['content'][chunk[0]:chunk[1]] for chunk in chunks]) | |||
r631 | ||||
r547 | def get_chunks(self): | |||
""" | ||||
Smart function that implements chunking the content | ||||
but not overlap chunks so it doesn't highlight the same | ||||
r556 | close occurrences twice. | |||
r631 | @param matcher: | |||
@param size: | ||||
r547 | """ | |||
memory = [(0, 0)] | ||||
for span in self.matcher.spans(): | ||||
start = span.startchar or 0 | ||||
end = span.endchar or 0 | ||||
start_offseted = max(0, start - self.fragment_size) | ||||
end_offseted = end + self.fragment_size | ||||
r631 | ||||
r547 | if start_offseted < memory[-1][1]: | |||
start_offseted = memory[-1][1] | ||||
r631 | memory.append((start_offseted, end_offseted,)) | |||
yield (start_offseted, end_offseted,) | ||||
r547 | def highlight(self, content, top=5): | |||
r556 | if self.search_type != 'content': | |||
return '' | ||||
r547 | hl = highlight(escape(content), | |||
self.highlight_items, | ||||
analyzer=ANALYZER, | ||||
fragmenter=FRAGMENTER, | ||||
formatter=FORMATTER, | ||||
top=top) | ||||
r631 | return hl | |||