##// END OF EJS Templates
largefiles: for update -C, only update largefiles when necessary...
largefiles: for update -C, only update largefiles when necessary Before, a --clean update with largefiles would use the "optimization" that it didn't read hashes from standin files before and after the update. Instead of trusting the content of the standin files, it would rehash all the actual largefiles that lfdirstate reported clean and update the standins that didn't have the expected content. It could thus in some "impossible" situations automatically recover from some "largefile got out sync with its standin" issues (even there apparently still were weird corner cases where it could fail). This extra checking is similar to what core --clean intentionally do not do, and it made update --clean unbearable slow. Usually in core Mercurial, --clean will rely on the dirstate to find the files it should update. (It is thus intentionally possible (when trying to trick the system or if there should be bugs) to end up in situations where --clean not will restore the working directory content correctly.) Checking every file when we "know" it is ok is however not an option - that would be too slow. Instead, trust the content of the standin files. Use the same logic for --clean as for linear updates and trust the dirstate and that our "logic" will keep them in sync. It is much cheaper to just rehash the largefiles reported dirty by a status walk and read all standins than to hash largefiles. Most of the changes are just a change of indentation now when the different kinds of updates no longer are handled that differently. Standins for added files are however only written when doing a normal update, while deleted and removed files only will be updated for --clean updates.

File last commit:

r20045:b3684fd2 default
r24787:9d5c2789 default
Show More
test-filecache.py
184 lines | 4.2 KiB | text/x-python | PythonLexer
import sys, os, subprocess
if subprocess.call(['python', '%s/hghave' % os.environ['TESTDIR'],
'cacheable']):
sys.exit(80)
from mercurial import util, scmutil, extensions, hg, ui
filecache = scmutil.filecache
class fakerepo(object):
def __init__(self):
self._filecache = {}
def join(self, p):
return p
def sjoin(self, p):
return p
@filecache('x', 'y')
def cached(self):
print 'creating'
return 'string from function'
def invalidate(self):
for k in self._filecache:
try:
delattr(self, k)
except AttributeError:
pass
def basic(repo):
print "* neither file exists"
# calls function
repo.cached
repo.invalidate()
print "* neither file still exists"
# uses cache
repo.cached
# create empty file
f = open('x', 'w')
f.close()
repo.invalidate()
print "* empty file x created"
# should recreate the object
repo.cached
f = open('x', 'w')
f.write('a')
f.close()
repo.invalidate()
print "* file x changed size"
# should recreate the object
repo.cached
repo.invalidate()
print "* nothing changed with either file"
# stats file again, reuses object
repo.cached
# atomic replace file, size doesn't change
# hopefully st_mtime doesn't change as well so this doesn't use the cache
# because of inode change
f = scmutil.opener('.')('x', 'w', atomictemp=True)
f.write('b')
f.close()
repo.invalidate()
print "* file x changed inode"
repo.cached
# create empty file y
f = open('y', 'w')
f.close()
repo.invalidate()
print "* empty file y created"
# should recreate the object
repo.cached
f = open('y', 'w')
f.write('A')
f.close()
repo.invalidate()
print "* file y changed size"
# should recreate the object
repo.cached
f = scmutil.opener('.')('y', 'w', atomictemp=True)
f.write('B')
f.close()
repo.invalidate()
print "* file y changed inode"
repo.cached
f = scmutil.opener('.')('x', 'w', atomictemp=True)
f.write('c')
f.close()
f = scmutil.opener('.')('y', 'w', atomictemp=True)
f.write('C')
f.close()
repo.invalidate()
print "* both files changed inode"
repo.cached
def fakeuncacheable():
def wrapcacheable(orig, *args, **kwargs):
return False
def wrapinit(orig, *args, **kwargs):
pass
originit = extensions.wrapfunction(util.cachestat, '__init__', wrapinit)
origcacheable = extensions.wrapfunction(util.cachestat, 'cacheable',
wrapcacheable)
for fn in ['x', 'y']:
try:
os.remove(fn)
except OSError:
pass
basic(fakerepo())
util.cachestat.cacheable = origcacheable
util.cachestat.__init__ = originit
def test_filecache_synced():
# test old behaviour that caused filecached properties to go out of sync
os.system('hg init && echo a >> a && hg ci -qAm.')
repo = hg.repository(ui.ui())
# first rollback clears the filecache, but changelog to stays in __dict__
repo.rollback()
repo.commit('.')
# second rollback comes along and touches the changelog externally
# (file is moved)
repo.rollback()
# but since changelog isn't under the filecache control anymore, we don't
# see that it changed, and return the old changelog without reconstructing
# it
repo.commit('.')
def setbeforeget(repo):
os.remove('x')
os.remove('y')
repo.cached = 'string set externally'
repo.invalidate()
print "* neither file exists"
print repo.cached
repo.invalidate()
f = open('x', 'w')
f.write('a')
f.close()
print "* file x created"
print repo.cached
repo.cached = 'string 2 set externally'
repo.invalidate()
print "* string set externally again"
print repo.cached
repo.invalidate()
f = open('y', 'w')
f.write('b')
f.close()
print "* file y created"
print repo.cached
print 'basic:'
print
basic(fakerepo())
print
print 'fakeuncacheable:'
print
fakeuncacheable()
test_filecache_synced()
print
print 'setbeforeget:'
print
setbeforeget(fakerepo())