##// END OF EJS Templates
revset: use phasecache.getrevset...
revset: use phasecache.getrevset This is part of a refactoring that moves some phase query optimization from revset.py to phases.py. See the previous patch for motivation. This patch changes revset code to use phasecache.getrevset so it no longer accesses the private field: _phasecache._phasesets directly. For performance impact, this patch was tested using the following query, on my hg-committed repo: for i in 'public()' 'not public()' 'draft()' 'not draft()'; do echo $i; hg perfrevset "$i"; hg perfrevset "$i" --hidden; done For the CPython implementation, most operations are unchanged (within +/- 1%), while "not public()" and "draft()" is noticeably faster on an unfiltered repo. It may be because the new code avoids a set copy if filteredrevs is empty. revset | public() | not public() | draft() | not draft() hidden | yes | no | yes | no | yes | no | yes | no ------------------------------------------------------------------ before | 19006 | 17352 | 239 | 286 | 180 | 228 | 7690 | 5745 after | 19137 | 17231 | 240 | 207 | 182 | 150 | 7687 | 5658 delta | | -38% | | -52% | (timed in microseconds) For the pure Python implementation, some operations are faster while "not draft()" is noticeably slower: revset | public() | not public() | draft() | not draft() hidden | yes | no | yes | no | yes | no | yes | no ------------------------------------------------------------------------ before | 18852 | 17183 | 17758 | 15921 | 17505 | 15973 | 41521 | 39822 after | 18924 | 17380 | 17558 | 14545 | 16727 | 13593 | 48356 | 43992 delta | | -9% | -5% | -15% | +16% | +10% That may be the different performance characters of generatorset vs. filteredset. The "not draft()" query could be optimized in this case where both "public" and "secret" are passed to "getrevsets" so it won't iterate the whole repo twice.

File last commit:

r30809:86145461 default
r31017:17b5cda5 default
Show More
similar.py
115 lines | 3.8 KiB | text/x-python | PythonLexer
# similar.py - mechanisms for finding similar files
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import hashlib
from .i18n import _
from . import (
bdiff,
mdiff,
)
def _findexactmatches(repo, added, removed):
'''find renamed files that have no changes
Takes a list of new filectxs and a list of removed filectxs, and yields
(before, after) tuples of exact matches.
'''
numfiles = len(added) + len(removed)
# Get hashes of removed files.
hashes = {}
for i, fctx in enumerate(removed):
repo.ui.progress(_('searching for exact renames'), i, total=numfiles,
unit=_('files'))
h = hashlib.sha1(fctx.data()).digest()
hashes[h] = fctx
# For each added file, see if it corresponds to a removed file.
for i, fctx in enumerate(added):
repo.ui.progress(_('searching for exact renames'), i + len(removed),
total=numfiles, unit=_('files'))
h = hashlib.sha1(fctx.data()).digest()
if h in hashes:
yield (hashes[h], fctx)
# Done
repo.ui.progress(_('searching for exact renames'), None)
def _ctxdata(fctx):
# lazily load text
orig = fctx.data()
return orig, mdiff.splitnewlines(orig)
def _score(fctx, otherdata):
orig, lines = otherdata
text = fctx.data()
# bdiff.blocks() returns blocks of matching lines
# count the number of bytes in each
equal = 0
matches = bdiff.blocks(text, orig)
for x1, x2, y1, y2 in matches:
for line in lines[y1:y2]:
equal += len(line)
lengths = len(text) + len(orig)
return equal * 2.0 / lengths
def score(fctx1, fctx2):
return _score(fctx1, _ctxdata(fctx2))
def _findsimilarmatches(repo, added, removed, threshold):
'''find potentially renamed files based on similar file content
Takes a list of new filectxs and a list of removed filectxs, and yields
(before, after, score) tuples of partial matches.
'''
copies = {}
for i, r in enumerate(removed):
repo.ui.progress(_('searching for similar files'), i,
total=len(removed), unit=_('files'))
data = None
for a in added:
bestscore = copies.get(a, (None, threshold))[1]
if data is None:
data = _ctxdata(r)
myscore = _score(a, data)
if myscore >= bestscore:
copies[a] = (r, myscore)
repo.ui.progress(_('searching'), None)
for dest, v in copies.iteritems():
source, bscore = v
yield source, dest, bscore
def findrenames(repo, added, removed, threshold):
'''find renamed files -- yields (before, after, score) tuples'''
parentctx = repo['.']
workingctx = repo[None]
# Zero length files will be frequently unrelated to each other, and
# tracking the deletion/addition of such a file will probably cause more
# harm than good. We strip them out here to avoid matching them later on.
addedfiles = set([workingctx[fp] for fp in added
if workingctx[fp].size() > 0])
removedfiles = set([parentctx[fp] for fp in removed
if fp in parentctx and parentctx[fp].size() > 0])
# Find exact matches.
for (a, b) in _findexactmatches(repo,
sorted(addedfiles), sorted(removedfiles)):
addedfiles.remove(b)
yield (a.path(), b.path(), 1.0)
# If the user requested similar files to be matched, search for them also.
if threshold < 1.0:
for (a, b, score) in _findsimilarmatches(repo,
sorted(addedfiles), sorted(removedfiles), threshold):
yield (a.path(), b.path(), score)