engine.py
649 lines
| 20.6 KiB
| text/x-python
|
PythonLexer
r46661 | # upgrade.py - functions for in place upgrade of Mercurial repository | |||
# | ||||
# Copyright (c) 2016-present, Gregory Szorc | ||||
# | ||||
# This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | ||||
from __future__ import absolute_import | ||||
import stat | ||||
from ..i18n import _ | ||||
from ..pycompat import getattr | ||||
from .. import ( | ||||
changelog, | ||||
error, | ||||
filelog, | ||||
manifest, | ||||
metadata, | ||||
pycompat, | ||||
requirements, | ||||
scmutil, | ||||
r47658 | store, | |||
r46661 | util, | |||
vfs as vfsmod, | ||||
) | ||||
Raphaël Gomès
|
r47847 | from ..revlogutils import ( | ||
constants as revlogconst, | ||||
flagutil, | ||||
nodemap, | ||||
sidedata as sidedatamod, | ||||
) | ||||
Simon Sapin
|
r48111 | from . import actions as upgrade_actions | ||
Raphaël Gomès
|
r47847 | |||
def get_sidedata_helpers(srcrepo, dstrepo): | ||||
use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade') | ||||
sequential = pycompat.iswindows or not use_w | ||||
if not sequential: | ||||
srcrepo.register_sidedata_computer( | ||||
revlogconst.KIND_CHANGELOG, | ||||
sidedatamod.SD_FILES, | ||||
(sidedatamod.SD_FILES,), | ||||
metadata._get_worker_sidedata_adder(srcrepo, dstrepo), | ||||
flagutil.REVIDX_HASCOPIESINFO, | ||||
replace=True, | ||||
) | ||||
Raphaël Gomès
|
r47848 | return sidedatamod.get_sidedata_helpers(srcrepo, dstrepo._wanted_sidedata) | ||
r46661 | ||||
r47658 | def _revlogfrompath(repo, rl_type, path): | |||
r46661 | """Obtain a revlog from a repo path. | |||
An instance of the appropriate class is returned. | ||||
""" | ||||
r47658 | if rl_type & store.FILEFLAGS_CHANGELOG: | |||
r46661 | return changelog.changelog(repo.svfs) | |||
r47658 | elif rl_type & store.FILEFLAGS_MANIFESTLOG: | |||
mandir = b'' | ||||
if b'/' in path: | ||||
mandir = path.rsplit(b'/', 1)[0] | ||||
Joerg Sonnenberger
|
r47538 | return manifest.manifestrevlog( | ||
repo.nodeconstants, repo.svfs, tree=mandir | ||||
) | ||||
r46661 | else: | |||
r47658 | # drop the extension and the `data/` prefix | |||
r48460 | path_part = path.rsplit(b'.', 1)[0].split(b'/', 1) | |||
if len(path_part) < 2: | ||||
msg = _('cannot recognize revlog from filename: %s') | ||||
msg %= path | ||||
raise error.Abort(msg) | ||||
path = path_part[1] | ||||
r47658 | return filelog.filelog(repo.svfs, path) | |||
r46661 | ||||
r47658 | def _copyrevlog(tr, destrepo, oldrl, rl_type, unencodedname): | |||
r46661 | """copy all relevant files for `oldrl` into `destrepo` store | |||
Files are copied "as is" without any transformation. The copy is performed | ||||
without extra checks. Callers are responsible for making sure the copied | ||||
content is compatible with format of the destination repository. | ||||
""" | ||||
oldrl = getattr(oldrl, '_revlog', oldrl) | ||||
r47658 | newrl = _revlogfrompath(destrepo, rl_type, unencodedname) | |||
r46661 | newrl = getattr(newrl, '_revlog', newrl) | |||
oldvfs = oldrl.opener | ||||
newvfs = newrl.opener | ||||
r47919 | oldindex = oldvfs.join(oldrl._indexfile) | |||
newindex = newvfs.join(newrl._indexfile) | ||||
r47920 | olddata = oldvfs.join(oldrl._datafile) | |||
newdata = newvfs.join(newrl._datafile) | ||||
r46661 | ||||
r47919 | with newvfs(newrl._indexfile, b'w'): | |||
r46661 | pass # create all the directories | |||
util.copyfile(oldindex, newindex) | ||||
r47920 | copydata = oldrl.opener.exists(oldrl._datafile) | |||
r46661 | if copydata: | |||
util.copyfile(olddata, newdata) | ||||
r47658 | if rl_type & store.FILEFLAGS_FILELOG: | |||
r46661 | destrepo.svfs.fncache.add(unencodedname) | |||
if copydata: | ||||
destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d') | ||||
UPGRADE_CHANGELOG = b"changelog" | ||||
UPGRADE_MANIFEST = b"manifest" | ||||
UPGRADE_FILELOGS = b"all-filelogs" | ||||
UPGRADE_ALL_REVLOGS = frozenset( | ||||
[UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS] | ||||
) | ||||
r47658 | def matchrevlog(revlogfilter, rl_type): | |||
r46661 | """check if a revlog is selected for cloning. | |||
In other words, are there any updates which need to be done on revlog | ||||
or it can be blindly copied. | ||||
The store entry is checked against the passed filter""" | ||||
r47658 | if rl_type & store.FILEFLAGS_CHANGELOG: | |||
r46661 | return UPGRADE_CHANGELOG in revlogfilter | |||
r47658 | elif rl_type & store.FILEFLAGS_MANIFESTLOG: | |||
r46661 | return UPGRADE_MANIFEST in revlogfilter | |||
r47658 | assert rl_type & store.FILEFLAGS_FILELOG | |||
r46661 | return UPGRADE_FILELOGS in revlogfilter | |||
Pulkit Goyal
|
r46809 | def _perform_clone( | ||
ui, | ||||
dstrepo, | ||||
tr, | ||||
old_revlog, | ||||
r47658 | rl_type, | |||
Pulkit Goyal
|
r46809 | unencoded, | ||
Pulkit Goyal
|
r46834 | upgrade_op, | ||
Raphaël Gomès
|
r47847 | sidedata_helpers, | ||
Pulkit Goyal
|
r46809 | oncopiedrevision, | ||
): | ||||
Kyle Lippincott
|
r47856 | """returns the new revlog object created""" | ||
Pulkit Goyal
|
r46809 | newrl = None | ||
r47658 | if matchrevlog(upgrade_op.revlogs_to_process, rl_type): | |||
Pulkit Goyal
|
r46809 | ui.note( | ||
_(b'cloning %d revisions from %s\n') % (len(old_revlog), unencoded) | ||||
) | ||||
r47658 | newrl = _revlogfrompath(dstrepo, rl_type, unencoded) | |||
Pulkit Goyal
|
r46809 | old_revlog.clone( | ||
tr, | ||||
newrl, | ||||
addrevisioncb=oncopiedrevision, | ||||
Pulkit Goyal
|
r46834 | deltareuse=upgrade_op.delta_reuse_mode, | ||
Pulkit Goyal
|
r46835 | forcedeltabothparents=upgrade_op.force_re_delta_both_parents, | ||
Raphaël Gomès
|
r47847 | sidedata_helpers=sidedata_helpers, | ||
Pulkit Goyal
|
r46809 | ) | ||
else: | ||||
msg = _(b'blindly copying %s containing %i revisions\n') | ||||
ui.note(msg % (unencoded, len(old_revlog))) | ||||
r47658 | _copyrevlog(tr, dstrepo, old_revlog, rl_type, unencoded) | |||
Pulkit Goyal
|
r46809 | |||
r47658 | newrl = _revlogfrompath(dstrepo, rl_type, unencoded) | |||
Pulkit Goyal
|
r46809 | return newrl | ||
r46661 | def _clonerevlogs( | |||
ui, | ||||
srcrepo, | ||||
dstrepo, | ||||
tr, | ||||
Pulkit Goyal
|
r46833 | upgrade_op, | ||
r46661 | ): | |||
"""Copy revlogs between 2 repos.""" | ||||
revcount = 0 | ||||
srcsize = 0 | ||||
srcrawsize = 0 | ||||
dstsize = 0 | ||||
fcount = 0 | ||||
frevcount = 0 | ||||
fsrcsize = 0 | ||||
frawsize = 0 | ||||
fdstsize = 0 | ||||
mcount = 0 | ||||
mrevcount = 0 | ||||
msrcsize = 0 | ||||
mrawsize = 0 | ||||
mdstsize = 0 | ||||
crevcount = 0 | ||||
csrcsize = 0 | ||||
crawsize = 0 | ||||
cdstsize = 0 | ||||
alldatafiles = list(srcrepo.store.walk()) | ||||
Pulkit Goyal
|
r46810 | # mapping of data files which needs to be cloned | ||
# key is unencoded filename | ||||
# value is revlog_object_from_srcrepo | ||||
manifests = {} | ||||
changelogs = {} | ||||
filelogs = {} | ||||
r46661 | ||||
# Perform a pass to collect metadata. This validates we can open all | ||||
# source files and allows a unified progress bar to be displayed. | ||||
r47658 | for rl_type, unencoded, encoded, size in alldatafiles: | |||
if not rl_type & store.FILEFLAGS_REVLOG_MAIN: | ||||
r46661 | continue | |||
r48459 | # the store.walk function will wrongly pickup transaction backup and | |||
# get confused. As a quick fix for 5.9 release, we ignore those. | ||||
# (this is not a module constants because it seems better to keep the | ||||
# hack together) | ||||
skip_undo = ( | ||||
b'undo.backup.00changelog.i', | ||||
b'undo.backup.00manifest.i', | ||||
) | ||||
if unencoded in skip_undo: | ||||
continue | ||||
r47658 | rl = _revlogfrompath(srcrepo, rl_type, unencoded) | |||
r46661 | ||||
info = rl.storageinfo( | ||||
exclusivefiles=True, | ||||
revisionscount=True, | ||||
trackedsize=True, | ||||
storedsize=True, | ||||
) | ||||
revcount += info[b'revisionscount'] or 0 | ||||
datasize = info[b'storedsize'] or 0 | ||||
rawsize = info[b'trackedsize'] or 0 | ||||
srcsize += datasize | ||||
srcrawsize += rawsize | ||||
# This is for the separate progress bars. | ||||
r47658 | if rl_type & store.FILEFLAGS_CHANGELOG: | |||
changelogs[unencoded] = (rl_type, rl) | ||||
r46661 | crevcount += len(rl) | |||
csrcsize += datasize | ||||
crawsize += rawsize | ||||
r47658 | elif rl_type & store.FILEFLAGS_MANIFESTLOG: | |||
manifests[unencoded] = (rl_type, rl) | ||||
r46661 | mcount += 1 | |||
mrevcount += len(rl) | ||||
msrcsize += datasize | ||||
mrawsize += rawsize | ||||
r47658 | elif rl_type & store.FILEFLAGS_FILELOG: | |||
filelogs[unencoded] = (rl_type, rl) | ||||
r46661 | fcount += 1 | |||
frevcount += len(rl) | ||||
fsrcsize += datasize | ||||
frawsize += rawsize | ||||
else: | ||||
error.ProgrammingError(b'unknown revlog type') | ||||
if not revcount: | ||||
return | ||||
ui.status( | ||||
_( | ||||
b'migrating %d total revisions (%d in filelogs, %d in manifests, ' | ||||
b'%d in changelog)\n' | ||||
) | ||||
% (revcount, frevcount, mrevcount, crevcount) | ||||
) | ||||
ui.status( | ||||
_(b'migrating %s in store; %s tracked data\n') | ||||
% ((util.bytecount(srcsize), util.bytecount(srcrawsize))) | ||||
) | ||||
# Used to keep track of progress. | ||||
progress = None | ||||
def oncopiedrevision(rl, rev, node): | ||||
progress.increment() | ||||
Raphaël Gomès
|
r47847 | sidedata_helpers = get_sidedata_helpers(srcrepo, dstrepo) | ||
r46661 | ||||
Pulkit Goyal
|
r46810 | # Migrating filelogs | ||
ui.status( | ||||
_( | ||||
b'migrating %d filelogs containing %d revisions ' | ||||
b'(%s in store; %s tracked data)\n' | ||||
) | ||||
% ( | ||||
fcount, | ||||
frevcount, | ||||
util.bytecount(fsrcsize), | ||||
util.bytecount(frawsize), | ||||
) | ||||
) | ||||
progress = srcrepo.ui.makeprogress(_(b'file revisions'), total=frevcount) | ||||
r47658 | for unencoded, (rl_type, oldrl) in sorted(filelogs.items()): | |||
Pulkit Goyal
|
r46809 | newrl = _perform_clone( | ||
ui, | ||||
dstrepo, | ||||
tr, | ||||
oldrl, | ||||
r47658 | rl_type, | |||
Pulkit Goyal
|
r46809 | unencoded, | ||
Pulkit Goyal
|
r46834 | upgrade_op, | ||
Raphaël Gomès
|
r47847 | sidedata_helpers, | ||
Pulkit Goyal
|
r46809 | oncopiedrevision, | ||
) | ||||
r46661 | info = newrl.storageinfo(storedsize=True) | |||
Pulkit Goyal
|
r46831 | fdstsize += info[b'storedsize'] or 0 | ||
Pulkit Goyal
|
r46810 | ui.status( | ||
_( | ||||
b'finished migrating %d filelog revisions across %d ' | ||||
b'filelogs; change in size: %s\n' | ||||
) | ||||
% (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)) | ||||
) | ||||
r46661 | ||||
Pulkit Goyal
|
r46810 | # Migrating manifests | ||
ui.status( | ||||
_( | ||||
b'migrating %d manifests containing %d revisions ' | ||||
b'(%s in store; %s tracked data)\n' | ||||
) | ||||
% ( | ||||
mcount, | ||||
mrevcount, | ||||
util.bytecount(msrcsize), | ||||
util.bytecount(mrawsize), | ||||
) | ||||
) | ||||
if progress: | ||||
progress.complete() | ||||
progress = srcrepo.ui.makeprogress( | ||||
_(b'manifest revisions'), total=mrevcount | ||||
) | ||||
r47658 | for unencoded, (rl_type, oldrl) in sorted(manifests.items()): | |||
Pulkit Goyal
|
r46810 | newrl = _perform_clone( | ||
ui, | ||||
dstrepo, | ||||
tr, | ||||
oldrl, | ||||
r47658 | rl_type, | |||
Pulkit Goyal
|
r46810 | unencoded, | ||
Pulkit Goyal
|
r46834 | upgrade_op, | ||
Raphaël Gomès
|
r47847 | sidedata_helpers, | ||
Pulkit Goyal
|
r46810 | oncopiedrevision, | ||
) | ||||
info = newrl.storageinfo(storedsize=True) | ||||
Pulkit Goyal
|
r46831 | mdstsize += info[b'storedsize'] or 0 | ||
Pulkit Goyal
|
r46810 | ui.status( | ||
_( | ||||
b'finished migrating %d manifest revisions across %d ' | ||||
b'manifests; change in size: %s\n' | ||||
) | ||||
% (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)) | ||||
) | ||||
r46661 | ||||
Pulkit Goyal
|
r46810 | # Migrating changelog | ||
ui.status( | ||||
_( | ||||
b'migrating changelog containing %d revisions ' | ||||
b'(%s in store; %s tracked data)\n' | ||||
) | ||||
% ( | ||||
crevcount, | ||||
util.bytecount(csrcsize), | ||||
util.bytecount(crawsize), | ||||
) | ||||
) | ||||
if progress: | ||||
progress.complete() | ||||
progress = srcrepo.ui.makeprogress( | ||||
_(b'changelog revisions'), total=crevcount | ||||
) | ||||
r47658 | for unencoded, (rl_type, oldrl) in sorted(changelogs.items()): | |||
Pulkit Goyal
|
r46810 | newrl = _perform_clone( | ||
ui, | ||||
dstrepo, | ||||
tr, | ||||
oldrl, | ||||
r47658 | rl_type, | |||
Pulkit Goyal
|
r46810 | unencoded, | ||
Pulkit Goyal
|
r46834 | upgrade_op, | ||
Raphaël Gomès
|
r47847 | sidedata_helpers, | ||
Pulkit Goyal
|
r46810 | oncopiedrevision, | ||
) | ||||
info = newrl.storageinfo(storedsize=True) | ||||
Pulkit Goyal
|
r46831 | cdstsize += info[b'storedsize'] or 0 | ||
r46661 | progress.complete() | |||
ui.status( | ||||
_( | ||||
b'finished migrating %d changelog revisions; change in size: ' | ||||
b'%s\n' | ||||
) | ||||
% (crevcount, util.bytecount(cdstsize - csrcsize)) | ||||
) | ||||
Pulkit Goyal
|
r46831 | dstsize = fdstsize + mdstsize + cdstsize | ||
r46661 | ui.status( | |||
_( | ||||
b'finished migrating %d total revisions; total change in store ' | ||||
b'size: %s\n' | ||||
) | ||||
% (revcount, util.bytecount(dstsize - srcsize)) | ||||
) | ||||
Pulkit Goyal
|
r46846 | def _files_to_copy_post_revlog_clone(srcrepo): | ||
"""yields files which should be copied to destination after revlogs | ||||
are cloned""" | ||||
for path, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)): | ||||
# don't copy revlogs as they are already cloned | ||||
r47659 | if store.revlog_type(path) is not None: | |||
Pulkit Goyal
|
r46846 | continue | ||
# Skip transaction related files. | ||||
if path.startswith(b'undo'): | ||||
continue | ||||
# Only copy regular files. | ||||
if kind != stat.S_IFREG: | ||||
continue | ||||
# Skip other skipped files. | ||||
if path in (b'lock', b'fncache'): | ||||
continue | ||||
# TODO: should we skip cache too? | ||||
r46661 | ||||
Pulkit Goyal
|
r46846 | yield path | ||
r46661 | ||||
Pulkit Goyal
|
r46837 | def _replacestores(currentrepo, upgradedrepo, backupvfs, upgrade_op): | ||
"""Replace the stores after current repository is upgraded | ||||
Creates a backup of current repository store at backup path | ||||
Replaces upgraded store files in current repo from upgraded one | ||||
Arguments: | ||||
currentrepo: repo object of current repository | ||||
upgradedrepo: repo object of the upgraded data | ||||
backupvfs: vfs object for the backup path | ||||
upgrade_op: upgrade operation object | ||||
to be used to decide what all is upgraded | ||||
""" | ||||
# TODO: don't blindly rename everything in store | ||||
# There can be upgrades where store is not touched at all | ||||
Pulkit Goyal
|
r47092 | if upgrade_op.backup_store: | ||
util.rename(currentrepo.spath, backupvfs.join(b'store')) | ||||
else: | ||||
currentrepo.vfs.rmtree(b'store', forcibly=True) | ||||
Pulkit Goyal
|
r46837 | util.rename(upgradedrepo.spath, currentrepo.spath) | ||
Pulkit Goyal
|
r46836 | def finishdatamigration(ui, srcrepo, dstrepo, requirements): | ||
r46661 | """Hook point for extensions to perform additional actions during upgrade. | |||
This function is called after revlogs and store files have been copied but | ||||
before the new store is swapped into the original location. | ||||
""" | ||||
r46673 | def upgrade(ui, srcrepo, dstrepo, upgrade_op): | |||
r46661 | """Do the low-level work of upgrading a repository. | |||
The upgrade is effectively performed as a copy between a source | ||||
repository and a temporary destination repository. | ||||
The source repository is unmodified for as long as possible so the | ||||
upgrade can abort at any time without causing loss of service for | ||||
readers and without corrupting the source repository. | ||||
""" | ||||
assert srcrepo.currentwlock() | ||||
assert dstrepo.currentwlock() | ||||
Pulkit Goyal
|
r47092 | backuppath = None | ||
backupvfs = None | ||||
r46661 | ||||
ui.status( | ||||
_( | ||||
b'(it is safe to interrupt this process any time before ' | ||||
b'data migration completes)\n' | ||||
) | ||||
) | ||||
Simon Sapin
|
r48111 | if upgrade_actions.dirstatev2 in upgrade_op.upgrade_actions: | ||
ui.status(_(b'upgrading to dirstate-v2 from v1\n')) | ||||
upgrade_dirstate(ui, srcrepo, upgrade_op, b'v1', b'v2') | ||||
upgrade_op.upgrade_actions.remove(upgrade_actions.dirstatev2) | ||||
if upgrade_actions.dirstatev2 in upgrade_op.removed_actions: | ||||
ui.status(_(b'downgrading from dirstate-v2 to v1\n')) | ||||
upgrade_dirstate(ui, srcrepo, upgrade_op, b'v2', b'v1') | ||||
upgrade_op.removed_actions.remove(upgrade_actions.dirstatev2) | ||||
if not (upgrade_op.upgrade_actions or upgrade_op.removed_actions): | ||||
return | ||||
Pulkit Goyal
|
r47194 | if upgrade_op.requirements_only: | ||
ui.status(_(b'upgrading repository requirements\n')) | ||||
scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) | ||||
Pulkit Goyal
|
r47199 | # if there is only one action and that is persistent nodemap upgrade | ||
# directly write the nodemap file and update requirements instead of going | ||||
# through the whole cloning process | ||||
elif ( | ||||
len(upgrade_op.upgrade_actions) == 1 | ||||
Simon Sapin
|
r48111 | and b'persistent-nodemap' in upgrade_op.upgrade_actions_names | ||
Pulkit Goyal
|
r47199 | and not upgrade_op.removed_actions | ||
): | ||||
ui.status( | ||||
_(b'upgrading repository to use persistent nodemap feature\n') | ||||
) | ||||
with srcrepo.transaction(b'upgrade') as tr: | ||||
unfi = srcrepo.unfiltered() | ||||
cl = unfi.changelog | ||||
nodemap.persist_nodemap(tr, cl, force=True) | ||||
Pulkit Goyal
|
r47275 | # we want to directly operate on the underlying revlog to force | ||
# create a nodemap file. This is fine since this is upgrade code | ||||
# and it heavily relies on repository being revlog based | ||||
# hence accessing private attributes can be justified | ||||
nodemap.persist_nodemap( | ||||
tr, unfi.manifestlog._rootstore._revlog, force=True | ||||
) | ||||
Pulkit Goyal
|
r47199 | scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) | ||
Pulkit Goyal
|
r47276 | elif ( | ||
len(upgrade_op.removed_actions) == 1 | ||||
and [ | ||||
x | ||||
for x in upgrade_op.removed_actions | ||||
if x.name == b'persistent-nodemap' | ||||
] | ||||
and not upgrade_op.upgrade_actions | ||||
): | ||||
ui.status( | ||||
_(b'downgrading repository to not use persistent nodemap feature\n') | ||||
) | ||||
with srcrepo.transaction(b'upgrade') as tr: | ||||
unfi = srcrepo.unfiltered() | ||||
cl = unfi.changelog | ||||
nodemap.delete_nodemap(tr, srcrepo, cl) | ||||
# check comment 20 lines above for accessing private attributes | ||||
nodemap.delete_nodemap( | ||||
tr, srcrepo, unfi.manifestlog._rootstore._revlog | ||||
) | ||||
scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) | ||||
Pulkit Goyal
|
r47194 | else: | ||
Pulkit Goyal
|
r47096 | with dstrepo.transaction(b'upgrade') as tr: | ||
_clonerevlogs( | ||||
ui, | ||||
srcrepo, | ||||
dstrepo, | ||||
tr, | ||||
upgrade_op, | ||||
) | ||||
# Now copy other files in the store directory. | ||||
for p in _files_to_copy_post_revlog_clone(srcrepo): | ||||
srcrepo.ui.status(_(b'copying %s\n') % p) | ||||
src = srcrepo.store.rawvfs.join(p) | ||||
dst = dstrepo.store.rawvfs.join(p) | ||||
util.copyfile(src, dst, copystat=True) | ||||
finishdatamigration(ui, srcrepo, dstrepo, requirements) | ||||
ui.status(_(b'data fully upgraded in a temporary repository\n')) | ||||
r46661 | ||||
Pulkit Goyal
|
r47096 | if upgrade_op.backup_store: | ||
backuppath = pycompat.mkdtemp( | ||||
prefix=b'upgradebackup.', dir=srcrepo.path | ||||
) | ||||
backupvfs = vfsmod.vfs(backuppath) | ||||
r46661 | ||||
Pulkit Goyal
|
r47096 | # Make a backup of requires file first, as it is the first to be modified. | ||
util.copyfile( | ||||
srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires') | ||||
) | ||||
r46661 | ||||
Pulkit Goyal
|
r47096 | # We install an arbitrary requirement that clients must not support | ||
# as a mechanism to lock out new clients during the data swap. This is | ||||
# better than allowing a client to continue while the repository is in | ||||
# an inconsistent state. | ||||
ui.status( | ||||
_( | ||||
b'marking source repository as being upgraded; clients will be ' | ||||
b'unable to read from repository\n' | ||||
) | ||||
Pulkit Goyal
|
r47092 | ) | ||
Pulkit Goyal
|
r47096 | scmutil.writereporequirements( | ||
srcrepo, srcrepo.requirements | {b'upgradeinprogress'} | ||||
r46661 | ) | |||
Pulkit Goyal
|
r47096 | ui.status(_(b'starting in-place swap of repository data\n')) | ||
if upgrade_op.backup_store: | ||||
ui.status( | ||||
_(b'replaced files will be backed up at %s\n') % backuppath | ||||
) | ||||
r46661 | ||||
Pulkit Goyal
|
r47096 | # Now swap in the new store directory. Doing it as a rename should make | ||
# the operation nearly instantaneous and atomic (at least in well-behaved | ||||
# environments). | ||||
ui.status(_(b'replacing store...\n')) | ||||
tstart = util.timer() | ||||
_replacestores(srcrepo, dstrepo, backupvfs, upgrade_op) | ||||
elapsed = util.timer() - tstart | ||||
ui.status( | ||||
_( | ||||
b'store replacement complete; repository was inconsistent for ' | ||||
b'%0.1fs\n' | ||||
) | ||||
% elapsed | ||||
r46661 | ) | |||
Pulkit Goyal
|
r47096 | # We first write the requirements file. Any new requirements will lock | ||
# out legacy clients. | ||||
ui.status( | ||||
_( | ||||
b'finalizing requirements file and making repository readable ' | ||||
b'again\n' | ||||
) | ||||
r46661 | ) | |||
Pulkit Goyal
|
r47096 | scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) | ||
r46661 | ||||
Pulkit Goyal
|
r47096 | if upgrade_op.backup_store: | ||
# The lock file from the old store won't be removed because nothing has a | ||||
# reference to its new location. So clean it up manually. Alternatively, we | ||||
# could update srcrepo.svfs and other variables to point to the new | ||||
# location. This is simpler. | ||||
Matt Harbison
|
r47552 | assert backupvfs is not None # help pytype | ||
Pulkit Goyal
|
r47096 | backupvfs.unlink(b'store/lock') | ||
r46661 | ||||
return backuppath | ||||
Simon Sapin
|
r48111 | |||
def upgrade_dirstate(ui, srcrepo, upgrade_op, old, new): | ||||
if upgrade_op.backup_store: | ||||
backuppath = pycompat.mkdtemp( | ||||
prefix=b'upgradebackup.', dir=srcrepo.path | ||||
) | ||||
ui.status(_(b'replaced files will be backed up at %s\n') % backuppath) | ||||
backupvfs = vfsmod.vfs(backuppath) | ||||
util.copyfile( | ||||
srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires') | ||||
) | ||||
util.copyfile( | ||||
srcrepo.vfs.join(b'dirstate'), backupvfs.join(b'dirstate') | ||||
) | ||||
assert srcrepo.dirstate._use_dirstate_v2 == (old == b'v2') | ||||
srcrepo.dirstate._map._use_dirstate_tree = True | ||||
srcrepo.dirstate._map.preload() | ||||
srcrepo.dirstate._use_dirstate_v2 = new == b'v2' | ||||
srcrepo.dirstate._map._use_dirstate_v2 = srcrepo.dirstate._use_dirstate_v2 | ||||
srcrepo.dirstate._dirty = True | ||||
Simon Sapin
|
r48474 | srcrepo.vfs.unlink(b'dirstate') | ||
Simon Sapin
|
r48111 | srcrepo.dirstate.write(None) | ||
scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) | ||||