engine.py
657 lines
| 20.8 KiB
| text/x-python
|
PythonLexer
r46661 | # upgrade.py - functions for in place upgrade of Mercurial repository | |||
# | ||||
# Copyright (c) 2016-present, Gregory Szorc | ||||
# | ||||
# This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | ||||
Matt Harbison
|
r52756 | from __future__ import annotations | ||
r46661 | ||||
import stat | ||||
from ..i18n import _ | ||||
from .. import ( | ||||
error, | ||||
metadata, | ||||
pycompat, | ||||
requirements, | ||||
scmutil, | ||||
r47658 | store, | |||
r46661 | util, | |||
vfs as vfsmod, | ||||
) | ||||
Raphaël Gomès
|
r47847 | from ..revlogutils import ( | ||
constants as revlogconst, | ||||
flagutil, | ||||
nodemap, | ||||
sidedata as sidedatamod, | ||||
) | ||||
Simon Sapin
|
r48111 | from . import actions as upgrade_actions | ||
Raphaël Gomès
|
r47847 | |||
def get_sidedata_helpers(srcrepo, dstrepo): | ||||
use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade') | ||||
Matt Harbison
|
r52898 | |||
if use_w and pycompat.isdarwin: | ||||
# Avoid a PicklingError on macOS in bundlerepository. | ||||
use_w = False | ||||
srcrepo.ui.debug( | ||||
b'ignoring experimental.worker.repository-upgrade=True on darwin' | ||||
) | ||||
Raphaël Gomès
|
r47847 | sequential = pycompat.iswindows or not use_w | ||
if not sequential: | ||||
srcrepo.register_sidedata_computer( | ||||
revlogconst.KIND_CHANGELOG, | ||||
sidedatamod.SD_FILES, | ||||
(sidedatamod.SD_FILES,), | ||||
metadata._get_worker_sidedata_adder(srcrepo, dstrepo), | ||||
flagutil.REVIDX_HASCOPIESINFO, | ||||
replace=True, | ||||
) | ||||
Raphaël Gomès
|
r47848 | return sidedatamod.get_sidedata_helpers(srcrepo, dstrepo._wanted_sidedata) | ||
r46661 | ||||
r51383 | def _copyrevlog(tr, destrepo, oldrl, entry): | |||
r46661 | """copy all relevant files for `oldrl` into `destrepo` store | |||
Files are copied "as is" without any transformation. The copy is performed | ||||
without extra checks. Callers are responsible for making sure the copied | ||||
content is compatible with format of the destination repository. | ||||
""" | ||||
oldrl = getattr(oldrl, '_revlog', oldrl) | ||||
r51520 | newrl = entry.get_revlog_instance(destrepo) | |||
r46661 | newrl = getattr(newrl, '_revlog', newrl) | |||
oldvfs = oldrl.opener | ||||
newvfs = newrl.opener | ||||
r47919 | oldindex = oldvfs.join(oldrl._indexfile) | |||
newindex = newvfs.join(newrl._indexfile) | ||||
r47920 | olddata = oldvfs.join(oldrl._datafile) | |||
newdata = newvfs.join(newrl._datafile) | ||||
r46661 | ||||
r47919 | with newvfs(newrl._indexfile, b'w'): | |||
r46661 | pass # create all the directories | |||
util.copyfile(oldindex, newindex) | ||||
r47920 | copydata = oldrl.opener.exists(oldrl._datafile) | |||
r46661 | if copydata: | |||
util.copyfile(olddata, newdata) | ||||
r51395 | if entry.is_filelog: | |||
r51385 | unencodedname = entry.main_file_path() | |||
r46661 | destrepo.svfs.fncache.add(unencodedname) | |||
if copydata: | ||||
destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d') | ||||
UPGRADE_CHANGELOG = b"changelog" | ||||
UPGRADE_MANIFEST = b"manifest" | ||||
UPGRADE_FILELOGS = b"all-filelogs" | ||||
UPGRADE_ALL_REVLOGS = frozenset( | ||||
[UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS] | ||||
) | ||||
r51396 | def matchrevlog(revlogfilter, entry): | |||
r46661 | """check if a revlog is selected for cloning. | |||
In other words, are there any updates which need to be done on revlog | ||||
or it can be blindly copied. | ||||
The store entry is checked against the passed filter""" | ||||
r51396 | if entry.is_changelog: | |||
r46661 | return UPGRADE_CHANGELOG in revlogfilter | |||
r51396 | elif entry.is_manifestlog: | |||
r46661 | return UPGRADE_MANIFEST in revlogfilter | |||
r51396 | assert entry.is_filelog | |||
r46661 | return UPGRADE_FILELOGS in revlogfilter | |||
Pulkit Goyal
|
r46809 | def _perform_clone( | ||
ui, | ||||
dstrepo, | ||||
tr, | ||||
old_revlog, | ||||
r51383 | entry, | |||
Pulkit Goyal
|
r46834 | upgrade_op, | ||
Raphaël Gomès
|
r47847 | sidedata_helpers, | ||
Pulkit Goyal
|
r46809 | oncopiedrevision, | ||
): | ||||
Kyle Lippincott
|
r47856 | """returns the new revlog object created""" | ||
Pulkit Goyal
|
r46809 | newrl = None | ||
r51385 | revlog_path = entry.main_file_path() | |||
r51396 | if matchrevlog(upgrade_op.revlogs_to_process, entry): | |||
Pulkit Goyal
|
r46809 | ui.note( | ||
r51383 | _(b'cloning %d revisions from %s\n') | |||
% (len(old_revlog), revlog_path) | ||||
Pulkit Goyal
|
r46809 | ) | ||
r51520 | newrl = entry.get_revlog_instance(dstrepo) | |||
Pulkit Goyal
|
r46809 | old_revlog.clone( | ||
tr, | ||||
newrl, | ||||
addrevisioncb=oncopiedrevision, | ||||
Pulkit Goyal
|
r46834 | deltareuse=upgrade_op.delta_reuse_mode, | ||
Pulkit Goyal
|
r46835 | forcedeltabothparents=upgrade_op.force_re_delta_both_parents, | ||
Raphaël Gomès
|
r47847 | sidedata_helpers=sidedata_helpers, | ||
Pulkit Goyal
|
r46809 | ) | ||
else: | ||||
msg = _(b'blindly copying %s containing %i revisions\n') | ||||
r51383 | ui.note(msg % (revlog_path, len(old_revlog))) | |||
_copyrevlog(tr, dstrepo, old_revlog, entry) | ||||
Pulkit Goyal
|
r46809 | |||
r51520 | newrl = entry.get_revlog_instance(dstrepo) | |||
Pulkit Goyal
|
r46809 | return newrl | ||
r46661 | def _clonerevlogs( | |||
ui, | ||||
srcrepo, | ||||
dstrepo, | ||||
tr, | ||||
Pulkit Goyal
|
r46833 | upgrade_op, | ||
r46661 | ): | |||
"""Copy revlogs between 2 repos.""" | ||||
revcount = 0 | ||||
srcsize = 0 | ||||
srcrawsize = 0 | ||||
dstsize = 0 | ||||
fcount = 0 | ||||
frevcount = 0 | ||||
fsrcsize = 0 | ||||
frawsize = 0 | ||||
fdstsize = 0 | ||||
mcount = 0 | ||||
mrevcount = 0 | ||||
msrcsize = 0 | ||||
mrawsize = 0 | ||||
mdstsize = 0 | ||||
crevcount = 0 | ||||
csrcsize = 0 | ||||
crawsize = 0 | ||||
cdstsize = 0 | ||||
alldatafiles = list(srcrepo.store.walk()) | ||||
Pulkit Goyal
|
r46810 | # mapping of data files which needs to be cloned | ||
# key is unencoded filename | ||||
# value is revlog_object_from_srcrepo | ||||
manifests = {} | ||||
changelogs = {} | ||||
filelogs = {} | ||||
r46661 | ||||
# Perform a pass to collect metadata. This validates we can open all | ||||
# source files and allows a unified progress bar to be displayed. | ||||
r51364 | for entry in alldatafiles: | |||
r51389 | if not entry.is_revlog: | |||
r46661 | continue | |||
r51520 | rl = entry.get_revlog_instance(srcrepo) | |||
r46661 | ||||
info = rl.storageinfo( | ||||
exclusivefiles=True, | ||||
revisionscount=True, | ||||
trackedsize=True, | ||||
storedsize=True, | ||||
) | ||||
revcount += info[b'revisionscount'] or 0 | ||||
datasize = info[b'storedsize'] or 0 | ||||
rawsize = info[b'trackedsize'] or 0 | ||||
srcsize += datasize | ||||
srcrawsize += rawsize | ||||
# This is for the separate progress bars. | ||||
r51395 | if entry.is_changelog: | |||
r51383 | changelogs[entry.target_id] = entry | |||
r46661 | crevcount += len(rl) | |||
csrcsize += datasize | ||||
crawsize += rawsize | ||||
r51395 | elif entry.is_manifestlog: | |||
r51383 | manifests[entry.target_id] = entry | |||
r46661 | mcount += 1 | |||
mrevcount += len(rl) | ||||
msrcsize += datasize | ||||
mrawsize += rawsize | ||||
r51395 | elif entry.is_filelog: | |||
r51383 | filelogs[entry.target_id] = entry | |||
r46661 | fcount += 1 | |||
frevcount += len(rl) | ||||
fsrcsize += datasize | ||||
frawsize += rawsize | ||||
else: | ||||
error.ProgrammingError(b'unknown revlog type') | ||||
if not revcount: | ||||
return | ||||
ui.status( | ||||
_( | ||||
b'migrating %d total revisions (%d in filelogs, %d in manifests, ' | ||||
b'%d in changelog)\n' | ||||
) | ||||
% (revcount, frevcount, mrevcount, crevcount) | ||||
) | ||||
ui.status( | ||||
_(b'migrating %s in store; %s tracked data\n') | ||||
% ((util.bytecount(srcsize), util.bytecount(srcrawsize))) | ||||
) | ||||
# Used to keep track of progress. | ||||
progress = None | ||||
def oncopiedrevision(rl, rev, node): | ||||
progress.increment() | ||||
Raphaël Gomès
|
r47847 | sidedata_helpers = get_sidedata_helpers(srcrepo, dstrepo) | ||
r46661 | ||||
Pulkit Goyal
|
r46810 | # Migrating filelogs | ||
ui.status( | ||||
_( | ||||
b'migrating %d filelogs containing %d revisions ' | ||||
b'(%s in store; %s tracked data)\n' | ||||
) | ||||
% ( | ||||
fcount, | ||||
frevcount, | ||||
util.bytecount(fsrcsize), | ||||
util.bytecount(frawsize), | ||||
) | ||||
) | ||||
progress = srcrepo.ui.makeprogress(_(b'file revisions'), total=frevcount) | ||||
r51383 | for target_id, entry in sorted(filelogs.items()): | |||
r51520 | oldrl = entry.get_revlog_instance(srcrepo) | |||
r50447 | ||||
Pulkit Goyal
|
r46809 | newrl = _perform_clone( | ||
ui, | ||||
dstrepo, | ||||
tr, | ||||
oldrl, | ||||
r51383 | entry, | |||
Pulkit Goyal
|
r46834 | upgrade_op, | ||
Raphaël Gomès
|
r47847 | sidedata_helpers, | ||
Pulkit Goyal
|
r46809 | oncopiedrevision, | ||
) | ||||
r46661 | info = newrl.storageinfo(storedsize=True) | |||
Pulkit Goyal
|
r46831 | fdstsize += info[b'storedsize'] or 0 | ||
Pulkit Goyal
|
r46810 | ui.status( | ||
_( | ||||
b'finished migrating %d filelog revisions across %d ' | ||||
b'filelogs; change in size: %s\n' | ||||
) | ||||
% (frevcount, fcount, util.bytecount(fdstsize - fsrcsize)) | ||||
) | ||||
r46661 | ||||
Pulkit Goyal
|
r46810 | # Migrating manifests | ||
ui.status( | ||||
_( | ||||
b'migrating %d manifests containing %d revisions ' | ||||
b'(%s in store; %s tracked data)\n' | ||||
) | ||||
% ( | ||||
mcount, | ||||
mrevcount, | ||||
util.bytecount(msrcsize), | ||||
util.bytecount(mrawsize), | ||||
) | ||||
) | ||||
if progress: | ||||
progress.complete() | ||||
progress = srcrepo.ui.makeprogress( | ||||
_(b'manifest revisions'), total=mrevcount | ||||
) | ||||
r51383 | for target_id, entry in sorted(manifests.items()): | |||
r51520 | oldrl = entry.get_revlog_instance(srcrepo) | |||
Pulkit Goyal
|
r46810 | newrl = _perform_clone( | ||
ui, | ||||
dstrepo, | ||||
tr, | ||||
oldrl, | ||||
r51383 | entry, | |||
Pulkit Goyal
|
r46834 | upgrade_op, | ||
Raphaël Gomès
|
r47847 | sidedata_helpers, | ||
Pulkit Goyal
|
r46810 | oncopiedrevision, | ||
) | ||||
info = newrl.storageinfo(storedsize=True) | ||||
Pulkit Goyal
|
r46831 | mdstsize += info[b'storedsize'] or 0 | ||
Pulkit Goyal
|
r46810 | ui.status( | ||
_( | ||||
b'finished migrating %d manifest revisions across %d ' | ||||
b'manifests; change in size: %s\n' | ||||
) | ||||
% (mrevcount, mcount, util.bytecount(mdstsize - msrcsize)) | ||||
) | ||||
r46661 | ||||
Pulkit Goyal
|
r46810 | # Migrating changelog | ||
ui.status( | ||||
_( | ||||
b'migrating changelog containing %d revisions ' | ||||
b'(%s in store; %s tracked data)\n' | ||||
) | ||||
% ( | ||||
crevcount, | ||||
util.bytecount(csrcsize), | ||||
util.bytecount(crawsize), | ||||
) | ||||
) | ||||
if progress: | ||||
progress.complete() | ||||
progress = srcrepo.ui.makeprogress( | ||||
_(b'changelog revisions'), total=crevcount | ||||
) | ||||
r51383 | for target_id, entry in sorted(changelogs.items()): | |||
r51520 | oldrl = entry.get_revlog_instance(srcrepo) | |||
Pulkit Goyal
|
r46810 | newrl = _perform_clone( | ||
ui, | ||||
dstrepo, | ||||
tr, | ||||
oldrl, | ||||
r51383 | entry, | |||
Pulkit Goyal
|
r46834 | upgrade_op, | ||
Raphaël Gomès
|
r47847 | sidedata_helpers, | ||
Pulkit Goyal
|
r46810 | oncopiedrevision, | ||
) | ||||
info = newrl.storageinfo(storedsize=True) | ||||
Pulkit Goyal
|
r46831 | cdstsize += info[b'storedsize'] or 0 | ||
r46661 | progress.complete() | |||
ui.status( | ||||
_( | ||||
b'finished migrating %d changelog revisions; change in size: ' | ||||
b'%s\n' | ||||
) | ||||
% (crevcount, util.bytecount(cdstsize - csrcsize)) | ||||
) | ||||
Pulkit Goyal
|
r46831 | dstsize = fdstsize + mdstsize + cdstsize | ||
r46661 | ui.status( | |||
_( | ||||
b'finished migrating %d total revisions; total change in store ' | ||||
b'size: %s\n' | ||||
) | ||||
% (revcount, util.bytecount(dstsize - srcsize)) | ||||
) | ||||
Pulkit Goyal
|
r46846 | def _files_to_copy_post_revlog_clone(srcrepo): | ||
"""yields files which should be copied to destination after revlogs | ||||
are cloned""" | ||||
for path, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)): | ||||
# don't copy revlogs as they are already cloned | ||||
r51564 | if store.is_revlog_file(path): | |||
Pulkit Goyal
|
r46846 | continue | ||
# Skip transaction related files. | ||||
if path.startswith(b'undo'): | ||||
continue | ||||
# Only copy regular files. | ||||
if kind != stat.S_IFREG: | ||||
continue | ||||
# Skip other skipped files. | ||||
if path in (b'lock', b'fncache'): | ||||
continue | ||||
# TODO: should we skip cache too? | ||||
r46661 | ||||
Pulkit Goyal
|
r46846 | yield path | ||
r46661 | ||||
Pulkit Goyal
|
r46837 | def _replacestores(currentrepo, upgradedrepo, backupvfs, upgrade_op): | ||
"""Replace the stores after current repository is upgraded | ||||
Creates a backup of current repository store at backup path | ||||
Replaces upgraded store files in current repo from upgraded one | ||||
Arguments: | ||||
currentrepo: repo object of current repository | ||||
upgradedrepo: repo object of the upgraded data | ||||
backupvfs: vfs object for the backup path | ||||
upgrade_op: upgrade operation object | ||||
to be used to decide what all is upgraded | ||||
""" | ||||
# TODO: don't blindly rename everything in store | ||||
# There can be upgrades where store is not touched at all | ||||
Pulkit Goyal
|
r47092 | if upgrade_op.backup_store: | ||
util.rename(currentrepo.spath, backupvfs.join(b'store')) | ||||
else: | ||||
currentrepo.vfs.rmtree(b'store', forcibly=True) | ||||
Pulkit Goyal
|
r46837 | util.rename(upgradedrepo.spath, currentrepo.spath) | ||
Pulkit Goyal
|
r46836 | def finishdatamigration(ui, srcrepo, dstrepo, requirements): | ||
r46661 | """Hook point for extensions to perform additional actions during upgrade. | |||
This function is called after revlogs and store files have been copied but | ||||
before the new store is swapped into the original location. | ||||
""" | ||||
r46673 | def upgrade(ui, srcrepo, dstrepo, upgrade_op): | |||
r46661 | """Do the low-level work of upgrading a repository. | |||
The upgrade is effectively performed as a copy between a source | ||||
repository and a temporary destination repository. | ||||
The source repository is unmodified for as long as possible so the | ||||
upgrade can abort at any time without causing loss of service for | ||||
readers and without corrupting the source repository. | ||||
""" | ||||
assert srcrepo.currentwlock() | ||||
assert dstrepo.currentwlock() | ||||
Pulkit Goyal
|
r47092 | backuppath = None | ||
backupvfs = None | ||||
r46661 | ||||
ui.status( | ||||
_( | ||||
b'(it is safe to interrupt this process any time before ' | ||||
b'data migration completes)\n' | ||||
) | ||||
) | ||||
Simon Sapin
|
r48111 | if upgrade_actions.dirstatev2 in upgrade_op.upgrade_actions: | ||
ui.status(_(b'upgrading to dirstate-v2 from v1\n')) | ||||
upgrade_dirstate(ui, srcrepo, upgrade_op, b'v1', b'v2') | ||||
upgrade_op.upgrade_actions.remove(upgrade_actions.dirstatev2) | ||||
if upgrade_actions.dirstatev2 in upgrade_op.removed_actions: | ||||
ui.status(_(b'downgrading from dirstate-v2 to v1\n')) | ||||
upgrade_dirstate(ui, srcrepo, upgrade_op, b'v2', b'v1') | ||||
upgrade_op.removed_actions.remove(upgrade_actions.dirstatev2) | ||||
r49641 | if upgrade_actions.dirstatetrackedkey in upgrade_op.upgrade_actions: | |||
r49644 | ui.status(_(b'create dirstate-tracked-hint file\n')) | |||
upgrade_tracked_hint(ui, srcrepo, upgrade_op, add=True) | ||||
r49641 | upgrade_op.upgrade_actions.remove(upgrade_actions.dirstatetrackedkey) | |||
elif upgrade_actions.dirstatetrackedkey in upgrade_op.removed_actions: | ||||
r49644 | ui.status(_(b'remove dirstate-tracked-hint file\n')) | |||
upgrade_tracked_hint(ui, srcrepo, upgrade_op, add=False) | ||||
r49641 | upgrade_op.removed_actions.remove(upgrade_actions.dirstatetrackedkey) | |||
Simon Sapin
|
r48111 | if not (upgrade_op.upgrade_actions or upgrade_op.removed_actions): | ||
return | ||||
Pulkit Goyal
|
r47194 | if upgrade_op.requirements_only: | ||
ui.status(_(b'upgrading repository requirements\n')) | ||||
scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) | ||||
Pulkit Goyal
|
r47199 | # if there is only one action and that is persistent nodemap upgrade | ||
# directly write the nodemap file and update requirements instead of going | ||||
# through the whole cloning process | ||||
elif ( | ||||
len(upgrade_op.upgrade_actions) == 1 | ||||
Simon Sapin
|
r48111 | and b'persistent-nodemap' in upgrade_op.upgrade_actions_names | ||
Pulkit Goyal
|
r47199 | and not upgrade_op.removed_actions | ||
): | ||||
ui.status( | ||||
_(b'upgrading repository to use persistent nodemap feature\n') | ||||
) | ||||
with srcrepo.transaction(b'upgrade') as tr: | ||||
unfi = srcrepo.unfiltered() | ||||
cl = unfi.changelog | ||||
nodemap.persist_nodemap(tr, cl, force=True) | ||||
Pulkit Goyal
|
r47275 | # we want to directly operate on the underlying revlog to force | ||
# create a nodemap file. This is fine since this is upgrade code | ||||
# and it heavily relies on repository being revlog based | ||||
# hence accessing private attributes can be justified | ||||
nodemap.persist_nodemap( | ||||
tr, unfi.manifestlog._rootstore._revlog, force=True | ||||
) | ||||
Pulkit Goyal
|
r47199 | scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) | ||
Pulkit Goyal
|
r47276 | elif ( | ||
len(upgrade_op.removed_actions) == 1 | ||||
and [ | ||||
x | ||||
for x in upgrade_op.removed_actions | ||||
if x.name == b'persistent-nodemap' | ||||
] | ||||
and not upgrade_op.upgrade_actions | ||||
): | ||||
ui.status( | ||||
_(b'downgrading repository to not use persistent nodemap feature\n') | ||||
) | ||||
with srcrepo.transaction(b'upgrade') as tr: | ||||
unfi = srcrepo.unfiltered() | ||||
cl = unfi.changelog | ||||
nodemap.delete_nodemap(tr, srcrepo, cl) | ||||
# check comment 20 lines above for accessing private attributes | ||||
nodemap.delete_nodemap( | ||||
tr, srcrepo, unfi.manifestlog._rootstore._revlog | ||||
) | ||||
scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) | ||||
Pulkit Goyal
|
r47194 | else: | ||
Pulkit Goyal
|
r47096 | with dstrepo.transaction(b'upgrade') as tr: | ||
_clonerevlogs( | ||||
ui, | ||||
srcrepo, | ||||
dstrepo, | ||||
tr, | ||||
upgrade_op, | ||||
) | ||||
# Now copy other files in the store directory. | ||||
for p in _files_to_copy_post_revlog_clone(srcrepo): | ||||
srcrepo.ui.status(_(b'copying %s\n') % p) | ||||
src = srcrepo.store.rawvfs.join(p) | ||||
dst = dstrepo.store.rawvfs.join(p) | ||||
util.copyfile(src, dst, copystat=True) | ||||
finishdatamigration(ui, srcrepo, dstrepo, requirements) | ||||
ui.status(_(b'data fully upgraded in a temporary repository\n')) | ||||
r46661 | ||||
Pulkit Goyal
|
r47096 | if upgrade_op.backup_store: | ||
backuppath = pycompat.mkdtemp( | ||||
prefix=b'upgradebackup.', dir=srcrepo.path | ||||
) | ||||
backupvfs = vfsmod.vfs(backuppath) | ||||
r46661 | ||||
Pulkit Goyal
|
r47096 | # Make a backup of requires file first, as it is the first to be modified. | ||
util.copyfile( | ||||
srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires') | ||||
) | ||||
r46661 | ||||
Pulkit Goyal
|
r47096 | # We install an arbitrary requirement that clients must not support | ||
# as a mechanism to lock out new clients during the data swap. This is | ||||
# better than allowing a client to continue while the repository is in | ||||
# an inconsistent state. | ||||
ui.status( | ||||
_( | ||||
b'marking source repository as being upgraded; clients will be ' | ||||
b'unable to read from repository\n' | ||||
) | ||||
Pulkit Goyal
|
r47092 | ) | ||
Pulkit Goyal
|
r47096 | scmutil.writereporequirements( | ||
srcrepo, srcrepo.requirements | {b'upgradeinprogress'} | ||||
r46661 | ) | |||
Pulkit Goyal
|
r47096 | ui.status(_(b'starting in-place swap of repository data\n')) | ||
if upgrade_op.backup_store: | ||||
ui.status( | ||||
_(b'replaced files will be backed up at %s\n') % backuppath | ||||
) | ||||
r46661 | ||||
Pulkit Goyal
|
r47096 | # Now swap in the new store directory. Doing it as a rename should make | ||
# the operation nearly instantaneous and atomic (at least in well-behaved | ||||
# environments). | ||||
ui.status(_(b'replacing store...\n')) | ||||
tstart = util.timer() | ||||
_replacestores(srcrepo, dstrepo, backupvfs, upgrade_op) | ||||
elapsed = util.timer() - tstart | ||||
ui.status( | ||||
_( | ||||
b'store replacement complete; repository was inconsistent for ' | ||||
b'%0.1fs\n' | ||||
) | ||||
% elapsed | ||||
r46661 | ) | |||
Pulkit Goyal
|
r47096 | # We first write the requirements file. Any new requirements will lock | ||
# out legacy clients. | ||||
ui.status( | ||||
_( | ||||
b'finalizing requirements file and making repository readable ' | ||||
b'again\n' | ||||
) | ||||
r46661 | ) | |||
Pulkit Goyal
|
r47096 | scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) | ||
r46661 | ||||
Pulkit Goyal
|
r47096 | if upgrade_op.backup_store: | ||
# The lock file from the old store won't be removed because nothing has a | ||||
# reference to its new location. So clean it up manually. Alternatively, we | ||||
# could update srcrepo.svfs and other variables to point to the new | ||||
# location. This is simpler. | ||||
Matt Harbison
|
r47552 | assert backupvfs is not None # help pytype | ||
Pulkit Goyal
|
r47096 | backupvfs.unlink(b'store/lock') | ||
r46661 | ||||
return backuppath | ||||
Simon Sapin
|
r48111 | |||
def upgrade_dirstate(ui, srcrepo, upgrade_op, old, new): | ||||
if upgrade_op.backup_store: | ||||
backuppath = pycompat.mkdtemp( | ||||
prefix=b'upgradebackup.', dir=srcrepo.path | ||||
) | ||||
ui.status(_(b'replaced files will be backed up at %s\n') % backuppath) | ||||
backupvfs = vfsmod.vfs(backuppath) | ||||
util.copyfile( | ||||
srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires') | ||||
) | ||||
r49266 | try: | |||
util.copyfile( | ||||
srcrepo.vfs.join(b'dirstate'), backupvfs.join(b'dirstate') | ||||
) | ||||
Manuel Jacob
|
r50201 | except FileNotFoundError: | ||
r49266 | # The dirstate does not exist on an empty repo or a repo with no | |||
# revision checked out | ||||
Manuel Jacob
|
r50201 | pass | ||
Simon Sapin
|
r48111 | |||
assert srcrepo.dirstate._use_dirstate_v2 == (old == b'v2') | ||||
Raphaël Gomès
|
r51345 | use_v2 = new == b'v2' | ||
if use_v2: | ||||
# Write the requirements *before* upgrading | ||||
scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) | ||||
Simon Sapin
|
r48111 | srcrepo.dirstate._map.preload() | ||
Raphaël Gomès
|
r51345 | srcrepo.dirstate._use_dirstate_v2 = use_v2 | ||
srcrepo.dirstate._map._use_dirstate_v2 = use_v2 | ||||
Simon Sapin
|
r48111 | srcrepo.dirstate._dirty = True | ||
r49266 | try: | |||
srcrepo.vfs.unlink(b'dirstate') | ||||
Manuel Jacob
|
r50201 | except FileNotFoundError: | ||
r49266 | # The dirstate does not exist on an empty repo or a repo with no | |||
# revision checked out | ||||
Manuel Jacob
|
r50201 | pass | ||
r49266 | ||||
Simon Sapin
|
r48111 | srcrepo.dirstate.write(None) | ||
Raphaël Gomès
|
r51345 | if not use_v2: | ||
# Remove the v2 requirement *after* downgrading | ||||
scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) | ||||
r49641 | ||||
r49644 | def upgrade_tracked_hint(ui, srcrepo, upgrade_op, add): | |||
r49641 | if add: | |||
r49644 | srcrepo.dirstate._use_tracked_hint = True | |||
r49641 | srcrepo.dirstate._dirty = True | |||
srcrepo.dirstate._dirty_tracked_set = True | ||||
srcrepo.dirstate.write(None) | ||||
if not add: | ||||
r49644 | srcrepo.dirstate.delete_tracked_hint() | |||
r49641 | ||||
scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) | ||||