diff --git a/contrib/automation/hgautomation/aws.py b/contrib/automation/hgautomation/aws.py
--- a/contrib/automation/hgautomation/aws.py
+++ b/contrib/automation/hgautomation/aws.py
@@ -925,10 +925,15 @@ def ensure_linux_dev_ami(c: AWSConnectio
requirements3_path = (
pathlib.Path(__file__).parent.parent / 'linux-requirements-py3.txt'
)
+ requirements35_path = (
+ pathlib.Path(__file__).parent.parent / 'linux-requirements-py3.5.txt'
+ )
with requirements2_path.open('r', encoding='utf-8') as fh:
requirements2 = fh.read()
with requirements3_path.open('r', encoding='utf-8') as fh:
requirements3 = fh.read()
+ with requirements35_path.open('r', encoding='utf-8') as fh:
+ requirements35 = fh.read()
# Compute a deterministic fingerprint to determine whether image needs to
# be regenerated.
@@ -938,6 +943,7 @@ def ensure_linux_dev_ami(c: AWSConnectio
'bootstrap_script': BOOTSTRAP_DEBIAN,
'requirements_py2': requirements2,
'requirements_py3': requirements3,
+ 'requirements_py35': requirements35,
}
)
@@ -979,6 +985,10 @@ def ensure_linux_dev_ami(c: AWSConnectio
fh.write(requirements3)
fh.chmod(0o0700)
+ with sftp.open('%s/requirements-py3.5.txt' % home, 'wb') as fh:
+ fh.write(requirements35)
+ fh.chmod(0o0700)
+
print('executing bootstrap')
chan, stdin, stdout = ssh_exec_command(
client, '%s/bootstrap' % home
diff --git a/contrib/automation/hgautomation/linux.py b/contrib/automation/hgautomation/linux.py
--- a/contrib/automation/hgautomation/linux.py
+++ b/contrib/automation/hgautomation/linux.py
@@ -26,11 +26,11 @@ DISTROS = {
INSTALL_PYTHONS = r'''
PYENV2_VERSIONS="2.7.17 pypy2.7-7.2.0"
-PYENV3_VERSIONS="3.5.10 3.6.12 3.7.9 3.8.6 3.9.0 pypy3.5-7.0.0 pypy3.6-7.3.0"
+PYENV3_VERSIONS="3.5.10 3.6.13 3.7.10 3.8.10 3.9.5 pypy3.5-7.0.0 pypy3.6-7.3.3 pypy3.7-7.3.3"
git clone https://github.com/pyenv/pyenv.git /hgdev/pyenv
pushd /hgdev/pyenv
-git checkout 8ac91b4fd678a8c04356f5ec85cfcd565c265e9a
+git checkout 328fd42c3a2fbf14ae46dae2021a087fe27ba7e2
popd
export PYENV_ROOT="/hgdev/pyenv"
@@ -56,7 +56,20 @@ done
for v in ${PYENV3_VERSIONS}; do
pyenv install -v ${v}
${PYENV_ROOT}/versions/${v}/bin/python get-pip.py
- ${PYENV_ROOT}/versions/${v}/bin/pip install -r /hgdev/requirements-py3.txt
+
+ case ${v} in
+ 3.5.*)
+ REQUIREMENTS=requirements-py3.5.txt
+ ;;
+ pypy3.5*)
+ REQUIREMENTS=requirements-py3.5.txt
+ ;;
+ *)
+ REQUIREMENTS=requirements-py3.txt
+ ;;
+ esac
+
+ ${PYENV_ROOT}/versions/${v}/bin/pip install -r /hgdev/${REQUIREMENTS}
done
pyenv global ${PYENV2_VERSIONS} ${PYENV3_VERSIONS} system
@@ -64,6 +77,18 @@ pyenv global ${PYENV2_VERSIONS} ${PYENV3
'\r\n', '\n'
)
+INSTALL_PYOXIDIZER = r'''
+PYOXIDIZER_VERSION=0.16.0
+PYOXIDIZER_SHA256=8875471c270312fbb934007fd30f65f1904cc0f5da6188d61c90ed2129b9f9c1
+PYOXIDIZER_URL=https://github.com/indygreg/PyOxidizer/releases/download/pyoxidizer%2F${PYOXIDIZER_VERSION}/pyoxidizer-${PYOXIDIZER_VERSION}-linux_x86_64.zip
+
+wget -O pyoxidizer.zip --progress dot:mega ${PYOXIDIZER_URL}
+echo "${PYOXIDIZER_SHA256} pyoxidizer.zip" | sha256sum --check -
+
+unzip pyoxidizer.zip
+chmod +x pyoxidizer
+sudo mv pyoxidizer /usr/local/bin/pyoxidizer
+'''
INSTALL_RUST = r'''
RUSTUP_INIT_SHA256=a46fe67199b7bcbbde2dcbc23ae08db6f29883e260e23899a88b9073effc9076
@@ -72,10 +97,8 @@ echo "${RUSTUP_INIT_SHA256} rustup-init"
chmod +x rustup-init
sudo -H -u hg -g hg ./rustup-init -y
-sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup install 1.31.1 1.46.0
+sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup install 1.41.1 1.52.0
sudo -H -u hg -g hg /home/hg/.cargo/bin/rustup component add clippy
-
-sudo -H -u hg -g hg /home/hg/.cargo/bin/cargo install --version 0.10.3 pyoxidizer
'''
@@ -306,9 +329,9 @@ sudo mkdir /hgdev
sudo chown `whoami` /hgdev
{install_rust}
+{install_pyoxidizer}
-cp requirements-py2.txt /hgdev/requirements-py2.txt
-cp requirements-py3.txt /hgdev/requirements-py3.txt
+cp requirements-*.txt /hgdev/
# Disable the pip version check because it uses the network and can
# be annoying.
@@ -332,6 +355,7 @@ sudo chown -R hg:hg /hgdev
'''.lstrip()
.format(
install_rust=INSTALL_RUST,
+ install_pyoxidizer=INSTALL_PYOXIDIZER,
install_pythons=INSTALL_PYTHONS,
bootstrap_virtualenv=BOOTSTRAP_VIRTUALENV,
)
diff --git a/contrib/automation/linux-requirements-py3.5.txt b/contrib/automation/linux-requirements-py3.5.txt
new file mode 100644
--- /dev/null
+++ b/contrib/automation/linux-requirements-py3.5.txt
@@ -0,0 +1,194 @@
+#
+# This file is autogenerated by pip-compile
+# To update, run:
+#
+# pip-compile --generate-hashes --output-file=contrib/automation/linux-requirements-py3.5.txt contrib/automation/linux-requirements.txt.in
+#
+astroid==2.4.2 \
+ --hash=sha256:2f4078c2a41bf377eea06d71c9d2ba4eb8f6b1af2135bec27bbbb7d8f12bb703 \
+ --hash=sha256:bc58d83eb610252fd8de6363e39d4f1d0619c894b0ed24603b881c02e64c7386
+ # via pylint
+docutils==0.17.1 \
+ --hash=sha256:686577d2e4c32380bb50cbb22f575ed742d58168cee37e99117a854bcd88f125 \
+ --hash=sha256:cf316c8370a737a022b72b56874f6602acf974a37a9fba42ec2876387549fc61
+ # via -r contrib/automation/linux-requirements.txt.in
+fuzzywuzzy==0.18.0 \
+ --hash=sha256:45016e92264780e58972dca1b3d939ac864b78437422beecebb3095f8efd00e8 \
+ --hash=sha256:928244b28db720d1e0ee7587acf660ea49d7e4c632569cad4f1cd7e68a5f0993
+ # via -r contrib/automation/linux-requirements.txt.in
+idna==3.1 \
+ --hash=sha256:5205d03e7bcbb919cc9c19885f9920d622ca52448306f2377daede5cf3faac16 \
+ --hash=sha256:c5b02147e01ea9920e6b0a3f1f7bb833612d507592c837a6c49552768f4054e1
+ # via yarl
+isort==4.3.21 \
+ --hash=sha256:54da7e92468955c4fceacd0c86bd0ec997b0e1ee80d97f67c35a78b719dccab1 \
+ --hash=sha256:6e811fcb295968434526407adb8796944f1988c5b65e8139058f2014cbe100fd
+ # via
+ # -r contrib/automation/linux-requirements.txt.in
+ # pylint
+lazy-object-proxy==1.4.3 \
+ --hash=sha256:0c4b206227a8097f05c4dbdd323c50edf81f15db3b8dc064d08c62d37e1a504d \
+ --hash=sha256:194d092e6f246b906e8f70884e620e459fc54db3259e60cf69a4d66c3fda3449 \
+ --hash=sha256:1be7e4c9f96948003609aa6c974ae59830a6baecc5376c25c92d7d697e684c08 \
+ --hash=sha256:4677f594e474c91da97f489fea5b7daa17b5517190899cf213697e48d3902f5a \
+ --hash=sha256:48dab84ebd4831077b150572aec802f303117c8cc5c871e182447281ebf3ac50 \
+ --hash=sha256:5541cada25cd173702dbd99f8e22434105456314462326f06dba3e180f203dfd \
+ --hash=sha256:59f79fef100b09564bc2df42ea2d8d21a64fdcda64979c0fa3db7bdaabaf6239 \
+ --hash=sha256:8d859b89baf8ef7f8bc6b00aa20316483d67f0b1cbf422f5b4dc56701c8f2ffb \
+ --hash=sha256:9254f4358b9b541e3441b007a0ea0764b9d056afdeafc1a5569eee1cc6c1b9ea \
+ --hash=sha256:9651375199045a358eb6741df3e02a651e0330be090b3bc79f6d0de31a80ec3e \
+ --hash=sha256:97bb5884f6f1cdce0099f86b907aa41c970c3c672ac8b9c8352789e103cf3156 \
+ --hash=sha256:9b15f3f4c0f35727d3a0fba4b770b3c4ebbb1fa907dbcc046a1d2799f3edd142 \
+ --hash=sha256:a2238e9d1bb71a56cd710611a1614d1194dc10a175c1e08d75e1a7bcc250d442 \
+ --hash=sha256:a6ae12d08c0bf9909ce12385803a543bfe99b95fe01e752536a60af2b7797c62 \
+ --hash=sha256:ca0a928a3ddbc5725be2dd1cf895ec0a254798915fb3a36af0964a0a4149e3db \
+ --hash=sha256:cb2c7c57005a6804ab66f106ceb8482da55f5314b7fcb06551db1edae4ad1531 \
+ --hash=sha256:d74bb8693bf9cf75ac3b47a54d716bbb1a92648d5f781fc799347cfc95952383 \
+ --hash=sha256:d945239a5639b3ff35b70a88c5f2f491913eb94871780ebfabb2568bd58afc5a \
+ --hash=sha256:eba7011090323c1dadf18b3b689845fd96a61ba0a1dfbd7f24b921398affc357 \
+ --hash=sha256:efa1909120ce98bbb3777e8b6f92237f5d5c8ea6758efea36a473e1d38f7d3e4 \
+ --hash=sha256:f3900e8a5de27447acbf900b4750b0ddfd7ec1ea7fbaf11dfa911141bc522af0
+ # via astroid
+mccabe==0.6.1 \
+ --hash=sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42 \
+ --hash=sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f
+ # via pylint
+multidict==5.0.2 \
+ --hash=sha256:060d68ae3e674c913ec41a464916f12c4d7ff17a3a9ebbf37ba7f2c681c2b33e \
+ --hash=sha256:06f39f0ddc308dab4e5fa282d145f90cd38d7ed75390fc83335636909a9ec191 \
+ --hash=sha256:17847fede1aafdb7e74e01bb34ab47a1a1ea726e8184c623c45d7e428d2d5d34 \
+ --hash=sha256:1cd102057b09223b919f9447c669cf2efabeefb42a42ae6233f25ffd7ee31a79 \
+ --hash=sha256:20cc9b2dd31761990abff7d0e63cd14dbfca4ebb52a77afc917b603473951a38 \
+ --hash=sha256:2576e30bbec004e863d87216bc34abe24962cc2e964613241a1c01c7681092ab \
+ --hash=sha256:2ab9cad4c5ef5c41e1123ed1f89f555aabefb9391d4e01fd6182de970b7267ed \
+ --hash=sha256:359ea00e1b53ceef282232308da9d9a3f60d645868a97f64df19485c7f9ef628 \
+ --hash=sha256:3e61cc244fd30bd9fdfae13bdd0c5ec65da51a86575ff1191255cae677045ffe \
+ --hash=sha256:43c7a87d8c31913311a1ab24b138254a0ee89142983b327a2c2eab7a7d10fea9 \
+ --hash=sha256:4a3f19da871befa53b48dd81ee48542f519beffa13090dc135fffc18d8fe36db \
+ --hash=sha256:4df708ef412fd9b59b7e6c77857e64c1f6b4c0116b751cb399384ec9a28baa66 \
+ --hash=sha256:59182e975b8c197d0146a003d0f0d5dc5487ce4899502061d8df585b0f51fba2 \
+ --hash=sha256:6128d2c0956fd60e39ec7d1c8f79426f0c915d36458df59ddd1f0cff0340305f \
+ --hash=sha256:6168839491a533fa75f3f5d48acbb829475e6c7d9fa5c6e245153b5f79b986a3 \
+ --hash=sha256:62abab8088704121297d39c8f47156cb8fab1da731f513e59ba73946b22cf3d0 \
+ --hash=sha256:653b2bbb0bbf282c37279dd04f429947ac92713049e1efc615f68d4e64b1dbc2 \
+ --hash=sha256:6566749cd78cb37cbf8e8171b5cd2cbfc03c99f0891de12255cf17a11c07b1a3 \
+ --hash=sha256:76cbdb22f48de64811f9ce1dd4dee09665f84f32d6a26de249a50c1e90e244e0 \
+ --hash=sha256:8efcf070d60fd497db771429b1c769a3783e3a0dd96c78c027e676990176adc5 \
+ --hash=sha256:8fa4549f341a057feec4c3139056ba73e17ed03a506469f447797a51f85081b5 \
+ --hash=sha256:9380b3f2b00b23a4106ba9dd022df3e6e2e84e1788acdbdd27603b621b3288df \
+ --hash=sha256:9ed9b280f7778ad6f71826b38a73c2fdca4077817c64bc1102fdada58e75c03c \
+ --hash=sha256:a7b8b5bd16376c8ac2977748bd978a200326af5145d8d0e7f799e2b355d425b6 \
+ --hash=sha256:af271c2540d1cd2a137bef8d95a8052230aa1cda26dd3b2c73d858d89993d518 \
+ --hash=sha256:b561e76c9e21402d9a446cdae13398f9942388b9bff529f32dfa46220af54d00 \
+ --hash=sha256:b82400ef848bbac6b9035a105ac6acaa1fb3eea0d164e35bbb21619b88e49fed \
+ --hash=sha256:b98af08d7bb37d3456a22f689819ea793e8d6961b9629322d7728c4039071641 \
+ --hash=sha256:c58e53e1c73109fdf4b759db9f2939325f510a8a5215135330fe6755921e4886 \
+ --hash=sha256:cbabfc12b401d074298bfda099c58dfa5348415ae2e4ec841290627cb7cb6b2e \
+ --hash=sha256:d4a6fb98e9e9be3f7d70fd3e852369c00a027bd5ed0f3e8ade3821bcad257408 \
+ --hash=sha256:d99da85d6890267292065e654a329e1d2f483a5d2485e347383800e616a8c0b1 \
+ --hash=sha256:e58db0e0d60029915f7fc95a8683fa815e204f2e1990f1fb46a7778d57ca8c35 \
+ --hash=sha256:e5bf89fe57f702a046c7ec718fe330ed50efd4bcf74722940db2eb0919cddb1c \
+ --hash=sha256:f612e8ef8408391a4a3366e3508bab8ef97b063b4918a317cb6e6de4415f01af \
+ --hash=sha256:f65a2442c113afde52fb09f9a6276bbc31da71add99dc76c3adf6083234e07c6 \
+ --hash=sha256:fa0503947a99a1be94f799fac89d67a5e20c333e78ddae16e8534b151cdc588a
+ # via yarl
+pyflakes==2.3.1 \
+ --hash=sha256:7893783d01b8a89811dd72d7dfd4d84ff098e5eed95cfa8905b22bbffe52efc3 \
+ --hash=sha256:f5bc8ecabc05bb9d291eb5203d6810b49040f6ff446a756326104746cc00c1db
+ # via -r contrib/automation/linux-requirements.txt.in
+pygments==2.9.0 \
+ --hash=sha256:a18f47b506a429f6f4b9df81bb02beab9ca21d0a5fee38ed15aef65f0545519f \
+ --hash=sha256:d66e804411278594d764fc69ec36ec13d9ae9147193a1740cd34d272ca383b8e
+ # via -r contrib/automation/linux-requirements.txt.in
+pylint==2.6.2 \
+ --hash=sha256:718b74786ea7ed07aa0c58bf572154d4679f960d26e9641cc1de204a30b87fc9 \
+ --hash=sha256:e71c2e9614a4f06e36498f310027942b0f4f2fde20aebb01655b31edc63b9eaf
+ # via -r contrib/automation/linux-requirements.txt.in
+python-levenshtein==0.12.2 \
+ --hash=sha256:dc2395fbd148a1ab31090dd113c366695934b9e85fe5a4b2a032745efd0346f6
+ # via -r contrib/automation/linux-requirements.txt.in
+pyyaml==5.3.1 \
+ --hash=sha256:06a0d7ba600ce0b2d2fe2e78453a470b5a6e000a985dd4a4e54e436cc36b0e97 \
+ --hash=sha256:240097ff019d7c70a4922b6869d8a86407758333f02203e0fc6ff79c5dcede76 \
+ --hash=sha256:4f4b913ca1a7319b33cfb1369e91e50354d6f07a135f3b901aca02aa95940bd2 \
+ --hash=sha256:6034f55dab5fea9e53f436aa68fa3ace2634918e8b5994d82f3621c04ff5ed2e \
+ --hash=sha256:69f00dca373f240f842b2931fb2c7e14ddbacd1397d57157a9b005a6a9942648 \
+ --hash=sha256:73f099454b799e05e5ab51423c7bcf361c58d3206fa7b0d555426b1f4d9a3eaf \
+ --hash=sha256:74809a57b329d6cc0fdccee6318f44b9b8649961fa73144a98735b0aaf029f1f \
+ --hash=sha256:7739fc0fa8205b3ee8808aea45e968bc90082c10aef6ea95e855e10abf4a37b2 \
+ --hash=sha256:95f71d2af0ff4227885f7a6605c37fd53d3a106fcab511b8860ecca9fcf400ee \
+ --hash=sha256:ad9c67312c84def58f3c04504727ca879cb0013b2517c85a9a253f0cb6380c0a \
+ --hash=sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d \
+ --hash=sha256:cc8955cfbfc7a115fa81d85284ee61147059a753344bc51098f3ccd69b0d7e0c \
+ --hash=sha256:d13155f591e6fcc1ec3b30685d50bf0711574e2c0dfffd7644babf8b5102ca1a
+ # via vcrpy
+six==1.16.0 \
+ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \
+ --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254
+ # via
+ # astroid
+ # vcrpy
+toml==0.10.2 \
+ --hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \
+ --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f
+ # via pylint
+typed-ast==1.4.3 ; python_version >= "3.0" and platform_python_implementation != "PyPy" \
+ --hash=sha256:01ae5f73431d21eead5015997ab41afa53aa1fbe252f9da060be5dad2c730ace \
+ --hash=sha256:067a74454df670dcaa4e59349a2e5c81e567d8d65458d480a5b3dfecec08c5ff \
+ --hash=sha256:0fb71b8c643187d7492c1f8352f2c15b4c4af3f6338f21681d3681b3dc31a266 \
+ --hash=sha256:1b3ead4a96c9101bef08f9f7d1217c096f31667617b58de957f690c92378b528 \
+ --hash=sha256:2068531575a125b87a41802130fa7e29f26c09a2833fea68d9a40cf33902eba6 \
+ --hash=sha256:209596a4ec71d990d71d5e0d312ac935d86930e6eecff6ccc7007fe54d703808 \
+ --hash=sha256:2c726c276d09fc5c414693a2de063f521052d9ea7c240ce553316f70656c84d4 \
+ --hash=sha256:398e44cd480f4d2b7ee8d98385ca104e35c81525dd98c519acff1b79bdaac363 \
+ --hash=sha256:52b1eb8c83f178ab787f3a4283f68258525f8d70f778a2f6dd54d3b5e5fb4341 \
+ --hash=sha256:5feca99c17af94057417d744607b82dd0a664fd5e4ca98061480fd8b14b18d04 \
+ --hash=sha256:7538e495704e2ccda9b234b82423a4038f324f3a10c43bc088a1636180f11a41 \
+ --hash=sha256:760ad187b1041a154f0e4d0f6aae3e40fdb51d6de16e5c99aedadd9246450e9e \
+ --hash=sha256:777a26c84bea6cd934422ac2e3b78863a37017618b6e5c08f92ef69853e765d3 \
+ --hash=sha256:95431a26309a21874005845c21118c83991c63ea800dd44843e42a916aec5899 \
+ --hash=sha256:9ad2c92ec681e02baf81fdfa056fe0d818645efa9af1f1cd5fd6f1bd2bdfd805 \
+ --hash=sha256:9c6d1a54552b5330bc657b7ef0eae25d00ba7ffe85d9ea8ae6540d2197a3788c \
+ --hash=sha256:aee0c1256be6c07bd3e1263ff920c325b59849dc95392a05f258bb9b259cf39c \
+ --hash=sha256:af3d4a73793725138d6b334d9d247ce7e5f084d96284ed23f22ee626a7b88e39 \
+ --hash=sha256:b36b4f3920103a25e1d5d024d155c504080959582b928e91cb608a65c3a49e1a \
+ --hash=sha256:b9574c6f03f685070d859e75c7f9eeca02d6933273b5e69572e5ff9d5e3931c3 \
+ --hash=sha256:bff6ad71c81b3bba8fa35f0f1921fb24ff4476235a6e94a26ada2e54370e6da7 \
+ --hash=sha256:c190f0899e9f9f8b6b7863debfb739abcb21a5c054f911ca3596d12b8a4c4c7f \
+ --hash=sha256:c907f561b1e83e93fad565bac5ba9c22d96a54e7ea0267c708bffe863cbe4075 \
+ --hash=sha256:cae53c389825d3b46fb37538441f75d6aecc4174f615d048321b716df2757fb0 \
+ --hash=sha256:dd4a21253f42b8d2b48410cb31fe501d32f8b9fbeb1f55063ad102fe9c425e40 \
+ --hash=sha256:dde816ca9dac1d9c01dd504ea5967821606f02e510438120091b84e852367428 \
+ --hash=sha256:f2362f3cb0f3172c42938946dbc5b7843c2a28aec307c49100c8b38764eb6927 \
+ --hash=sha256:f328adcfebed9f11301eaedfa48e15bdece9b519fb27e6a8c01aa52a17ec31b3 \
+ --hash=sha256:f8afcf15cc511ada719a88e013cec87c11aff7b91f019295eb4530f96fe5ef2f \
+ --hash=sha256:fb1bbeac803adea29cedd70781399c99138358c26d05fcbd23c13016b7f5ec65
+ # via
+ # -r contrib/automation/linux-requirements.txt.in
+ # astroid
+vcrpy==4.1.1 \
+ --hash=sha256:12c3fcdae7b88ecf11fc0d3e6d77586549d4575a2ceee18e82eee75c1f626162 \
+ --hash=sha256:57095bf22fc0a2d99ee9674cdafebed0f3ba763018582450706f7d3a74fff599
+ # via -r contrib/automation/linux-requirements.txt.in
+wrapt==1.12.1 \
+ --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7
+ # via
+ # astroid
+ # vcrpy
+yarl==1.3.0 \
+ --hash=sha256:024ecdc12bc02b321bc66b41327f930d1c2c543fa9a561b39861da9388ba7aa9 \
+ --hash=sha256:2f3010703295fbe1aec51023740871e64bb9664c789cba5a6bdf404e93f7568f \
+ --hash=sha256:3890ab952d508523ef4881457c4099056546593fa05e93da84c7250516e632eb \
+ --hash=sha256:3e2724eb9af5dc41648e5bb304fcf4891adc33258c6e14e2a7414ea32541e320 \
+ --hash=sha256:5badb97dd0abf26623a9982cd448ff12cb39b8e4c94032ccdedf22ce01a64842 \
+ --hash=sha256:73f447d11b530d860ca1e6b582f947688286ad16ca42256413083d13f260b7a0 \
+ --hash=sha256:7ab825726f2940c16d92aaec7d204cfc34ac26c0040da727cf8ba87255a33829 \
+ --hash=sha256:b25de84a8c20540531526dfbb0e2d2b648c13fd5dd126728c496d7c3fea33310 \
+ --hash=sha256:c6e341f5a6562af74ba55205dbd56d248daf1b5748ec48a0200ba227bb9e33f4 \
+ --hash=sha256:c9bb7c249c4432cd47e75af3864bc02d26c9594f49c82e2a28624417f0ae63b8 \
+ --hash=sha256:e060906c0c585565c718d1c3841747b61c5439af2211e185f6739a9412dfbde1
+ # via vcrpy
+
+# WARNING: The following packages were not pinned, but pip requires them to be
+# pinned when the requirements file includes hashes. Consider using the --allow-unsafe flag.
+# setuptools
diff --git a/contrib/automation/linux-requirements-py3.txt b/contrib/automation/linux-requirements-py3.txt
--- a/contrib/automation/linux-requirements-py3.txt
+++ b/contrib/automation/linux-requirements-py3.txt
@@ -6,208 +6,299 @@
#
appdirs==1.4.4 \
--hash=sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41 \
- --hash=sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128 \
+ --hash=sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128
# via black
-astroid==2.4.2 \
- --hash=sha256:2f4078c2a41bf377eea06d71c9d2ba4eb8f6b1af2135bec27bbbb7d8f12bb703 \
- --hash=sha256:bc58d83eb610252fd8de6363e39d4f1d0619c894b0ed24603b881c02e64c7386 \
+astroid==2.5.6 \
+ --hash=sha256:4db03ab5fc3340cf619dbc25e42c2cc3755154ce6009469766d7143d1fc2ee4e \
+ --hash=sha256:8a398dfce302c13f14bab13e2b14fe385d32b73f4e4853b9bdfb64598baa1975
# via pylint
-attrs==20.2.0 \
- --hash=sha256:26b54ddbbb9ee1d34d5d3668dd37d6cf74990ab23c828c2888dccdceee395594 \
- --hash=sha256:fce7fc47dfc976152e82d53ff92fa0407700c21acd20886a13777a0d20e655dc \
+attrs==21.1.0 \
+ --hash=sha256:3901be1cb7c2a780f14668691474d9252c070a756be0a9ead98cfeabfa11aeb8 \
+ --hash=sha256:8ee1e5f5a1afc5b19bdfae4fdf0c35ed324074bdce3500c939842c8f818645d9
# via black
black==19.10b0 ; python_version >= "3.6" and platform_python_implementation != "PyPy" \
--hash=sha256:1b30e59be925fafc1ee4565e5e08abef6b03fe455102883820fe5ee2e4734e0b \
- --hash=sha256:c2edb73a08e9e0e6f65a0e6af18b059b8b1cdd5bef997d7a0b181df93dc81539 \
+ --hash=sha256:c2edb73a08e9e0e6f65a0e6af18b059b8b1cdd5bef997d7a0b181df93dc81539
# via -r contrib/automation/linux-requirements.txt.in
click==7.1.2 \
--hash=sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a \
- --hash=sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc \
+ --hash=sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc
# via black
-docutils==0.16 \
- --hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \
- --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc \
+docutils==0.17.1 \
+ --hash=sha256:686577d2e4c32380bb50cbb22f575ed742d58168cee37e99117a854bcd88f125 \
+ --hash=sha256:cf316c8370a737a022b72b56874f6602acf974a37a9fba42ec2876387549fc61
# via -r contrib/automation/linux-requirements.txt.in
fuzzywuzzy==0.18.0 \
--hash=sha256:45016e92264780e58972dca1b3d939ac864b78437422beecebb3095f8efd00e8 \
- --hash=sha256:928244b28db720d1e0ee7587acf660ea49d7e4c632569cad4f1cd7e68a5f0993 \
+ --hash=sha256:928244b28db720d1e0ee7587acf660ea49d7e4c632569cad4f1cd7e68a5f0993
# via -r contrib/automation/linux-requirements.txt.in
-idna==2.10 \
- --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \
- --hash=sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0 \
+idna==3.1 \
+ --hash=sha256:5205d03e7bcbb919cc9c19885f9920d622ca52448306f2377daede5cf3faac16 \
+ --hash=sha256:c5b02147e01ea9920e6b0a3f1f7bb833612d507592c837a6c49552768f4054e1
# via yarl
isort==4.3.21 \
--hash=sha256:54da7e92468955c4fceacd0c86bd0ec997b0e1ee80d97f67c35a78b719dccab1 \
- --hash=sha256:6e811fcb295968434526407adb8796944f1988c5b65e8139058f2014cbe100fd \
- # via -r contrib/automation/linux-requirements.txt.in, pylint
-lazy-object-proxy==1.4.3 \
- --hash=sha256:0c4b206227a8097f05c4dbdd323c50edf81f15db3b8dc064d08c62d37e1a504d \
- --hash=sha256:194d092e6f246b906e8f70884e620e459fc54db3259e60cf69a4d66c3fda3449 \
- --hash=sha256:1be7e4c9f96948003609aa6c974ae59830a6baecc5376c25c92d7d697e684c08 \
- --hash=sha256:4677f594e474c91da97f489fea5b7daa17b5517190899cf213697e48d3902f5a \
- --hash=sha256:48dab84ebd4831077b150572aec802f303117c8cc5c871e182447281ebf3ac50 \
- --hash=sha256:5541cada25cd173702dbd99f8e22434105456314462326f06dba3e180f203dfd \
- --hash=sha256:59f79fef100b09564bc2df42ea2d8d21a64fdcda64979c0fa3db7bdaabaf6239 \
- --hash=sha256:8d859b89baf8ef7f8bc6b00aa20316483d67f0b1cbf422f5b4dc56701c8f2ffb \
- --hash=sha256:9254f4358b9b541e3441b007a0ea0764b9d056afdeafc1a5569eee1cc6c1b9ea \
- --hash=sha256:9651375199045a358eb6741df3e02a651e0330be090b3bc79f6d0de31a80ec3e \
- --hash=sha256:97bb5884f6f1cdce0099f86b907aa41c970c3c672ac8b9c8352789e103cf3156 \
- --hash=sha256:9b15f3f4c0f35727d3a0fba4b770b3c4ebbb1fa907dbcc046a1d2799f3edd142 \
- --hash=sha256:a2238e9d1bb71a56cd710611a1614d1194dc10a175c1e08d75e1a7bcc250d442 \
- --hash=sha256:a6ae12d08c0bf9909ce12385803a543bfe99b95fe01e752536a60af2b7797c62 \
- --hash=sha256:ca0a928a3ddbc5725be2dd1cf895ec0a254798915fb3a36af0964a0a4149e3db \
- --hash=sha256:cb2c7c57005a6804ab66f106ceb8482da55f5314b7fcb06551db1edae4ad1531 \
- --hash=sha256:d74bb8693bf9cf75ac3b47a54d716bbb1a92648d5f781fc799347cfc95952383 \
- --hash=sha256:d945239a5639b3ff35b70a88c5f2f491913eb94871780ebfabb2568bd58afc5a \
- --hash=sha256:eba7011090323c1dadf18b3b689845fd96a61ba0a1dfbd7f24b921398affc357 \
- --hash=sha256:efa1909120ce98bbb3777e8b6f92237f5d5c8ea6758efea36a473e1d38f7d3e4 \
- --hash=sha256:f3900e8a5de27447acbf900b4750b0ddfd7ec1ea7fbaf11dfa911141bc522af0 \
+ --hash=sha256:6e811fcb295968434526407adb8796944f1988c5b65e8139058f2014cbe100fd
+ # via
+ # -r contrib/automation/linux-requirements.txt.in
+ # pylint
+lazy-object-proxy==1.6.0 \
+ --hash=sha256:17e0967ba374fc24141738c69736da90e94419338fd4c7c7bef01ee26b339653 \
+ --hash=sha256:1fee665d2638491f4d6e55bd483e15ef21f6c8c2095f235fef72601021e64f61 \
+ --hash=sha256:22ddd618cefe54305df49e4c069fa65715be4ad0e78e8d252a33debf00f6ede2 \
+ --hash=sha256:24a5045889cc2729033b3e604d496c2b6f588c754f7a62027ad4437a7ecc4837 \
+ --hash=sha256:410283732af311b51b837894fa2f24f2c0039aa7f220135192b38fcc42bd43d3 \
+ --hash=sha256:4732c765372bd78a2d6b2150a6e99d00a78ec963375f236979c0626b97ed8e43 \
+ --hash=sha256:489000d368377571c6f982fba6497f2aa13c6d1facc40660963da62f5c379726 \
+ --hash=sha256:4f60460e9f1eb632584c9685bccea152f4ac2130e299784dbaf9fae9f49891b3 \
+ --hash=sha256:5743a5ab42ae40caa8421b320ebf3a998f89c85cdc8376d6b2e00bd12bd1b587 \
+ --hash=sha256:85fb7608121fd5621cc4377a8961d0b32ccf84a7285b4f1d21988b2eae2868e8 \
+ --hash=sha256:9698110e36e2df951c7c36b6729e96429c9c32b3331989ef19976592c5f3c77a \
+ --hash=sha256:9d397bf41caad3f489e10774667310d73cb9c4258e9aed94b9ec734b34b495fd \
+ --hash=sha256:b579f8acbf2bdd9ea200b1d5dea36abd93cabf56cf626ab9c744a432e15c815f \
+ --hash=sha256:b865b01a2e7f96db0c5d12cfea590f98d8c5ba64ad222300d93ce6ff9138bcad \
+ --hash=sha256:bf34e368e8dd976423396555078def5cfc3039ebc6fc06d1ae2c5a65eebbcde4 \
+ --hash=sha256:c6938967f8528b3668622a9ed3b31d145fab161a32f5891ea7b84f6b790be05b \
+ --hash=sha256:d1c2676e3d840852a2de7c7d5d76407c772927addff8d742b9808fe0afccebdf \
+ --hash=sha256:d7124f52f3bd259f510651450e18e0fd081ed82f3c08541dffc7b94b883aa981 \
+ --hash=sha256:d900d949b707778696fdf01036f58c9876a0d8bfe116e8d220cfd4b15f14e741 \
+ --hash=sha256:ebfd274dcd5133e0afae738e6d9da4323c3eb021b3e13052d8cbd0e457b1256e \
+ --hash=sha256:ed361bb83436f117f9917d282a456f9e5009ea12fd6de8742d1a4752c3017e93 \
+ --hash=sha256:f5144c75445ae3ca2057faac03fda5a902eff196702b0a24daf1d6ce0650514b
# via astroid
mccabe==0.6.1 \
--hash=sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42 \
- --hash=sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f \
+ --hash=sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f
# via pylint
-multidict==4.7.6 \
- --hash=sha256:1ece5a3369835c20ed57adadc663400b5525904e53bae59ec854a5d36b39b21a \
- --hash=sha256:275ca32383bc5d1894b6975bb4ca6a7ff16ab76fa622967625baeebcf8079000 \
- --hash=sha256:3750f2205b800aac4bb03b5ae48025a64e474d2c6cc79547988ba1d4122a09e2 \
- --hash=sha256:4538273208e7294b2659b1602490f4ed3ab1c8cf9dbdd817e0e9db8e64be2507 \
- --hash=sha256:5141c13374e6b25fe6bf092052ab55c0c03d21bd66c94a0e3ae371d3e4d865a5 \
- --hash=sha256:51a4d210404ac61d32dada00a50ea7ba412e6ea945bbe992e4d7a595276d2ec7 \
- --hash=sha256:5cf311a0f5ef80fe73e4f4c0f0998ec08f954a6ec72b746f3c179e37de1d210d \
- --hash=sha256:6513728873f4326999429a8b00fc7ceddb2509b01d5fd3f3be7881a257b8d463 \
- --hash=sha256:7388d2ef3c55a8ba80da62ecfafa06a1c097c18032a501ffd4cabbc52d7f2b19 \
- --hash=sha256:9456e90649005ad40558f4cf51dbb842e32807df75146c6d940b6f5abb4a78f3 \
- --hash=sha256:c026fe9a05130e44157b98fea3ab12969e5b60691a276150db9eda71710cd10b \
- --hash=sha256:d14842362ed4cf63751648e7672f7174c9818459d169231d03c56e84daf90b7c \
- --hash=sha256:e0d072ae0f2a179c375f67e3da300b47e1a83293c554450b29c900e50afaae87 \
- --hash=sha256:f07acae137b71af3bb548bd8da720956a3bc9f9a0b87733e0899226a2317aeb7 \
- --hash=sha256:fbb77a75e529021e7c4a8d4e823d88ef4d23674a202be4f5addffc72cbb91430 \
- --hash=sha256:fcfbb44c59af3f8ea984de67ec7c306f618a3ec771c2843804069917a8f2e255 \
- --hash=sha256:feed85993dbdb1dbc29102f50bca65bdc68f2c0c8d352468c25b54874f23c39d \
+multidict==5.1.0 \
+ --hash=sha256:018132dbd8688c7a69ad89c4a3f39ea2f9f33302ebe567a879da8f4ca73f0d0a \
+ --hash=sha256:051012ccee979b2b06be928a6150d237aec75dd6bf2d1eeeb190baf2b05abc93 \
+ --hash=sha256:05c20b68e512166fddba59a918773ba002fdd77800cad9f55b59790030bab632 \
+ --hash=sha256:07b42215124aedecc6083f1ce6b7e5ec5b50047afa701f3442054373a6deb656 \
+ --hash=sha256:0e3c84e6c67eba89c2dbcee08504ba8644ab4284863452450520dad8f1e89b79 \
+ --hash=sha256:0e929169f9c090dae0646a011c8b058e5e5fb391466016b39d21745b48817fd7 \
+ --hash=sha256:1ab820665e67373de5802acae069a6a05567ae234ddb129f31d290fc3d1aa56d \
+ --hash=sha256:25b4e5f22d3a37ddf3effc0710ba692cfc792c2b9edfb9c05aefe823256e84d5 \
+ --hash=sha256:2e68965192c4ea61fff1b81c14ff712fc7dc15d2bd120602e4a3494ea6584224 \
+ --hash=sha256:2f1a132f1c88724674271d636e6b7351477c27722f2ed789f719f9e3545a3d26 \
+ --hash=sha256:37e5438e1c78931df5d3c0c78ae049092877e5e9c02dd1ff5abb9cf27a5914ea \
+ --hash=sha256:3a041b76d13706b7fff23b9fc83117c7b8fe8d5fe9e6be45eee72b9baa75f348 \
+ --hash=sha256:3a4f32116f8f72ecf2a29dabfb27b23ab7cdc0ba807e8459e59a93a9be9506f6 \
+ --hash=sha256:46c73e09ad374a6d876c599f2328161bcd95e280f84d2060cf57991dec5cfe76 \
+ --hash=sha256:46dd362c2f045095c920162e9307de5ffd0a1bfbba0a6e990b344366f55a30c1 \
+ --hash=sha256:4b186eb7d6ae7c06eb4392411189469e6a820da81447f46c0072a41c748ab73f \
+ --hash=sha256:54fd1e83a184e19c598d5e70ba508196fd0bbdd676ce159feb412a4a6664f952 \
+ --hash=sha256:585fd452dd7782130d112f7ddf3473ffdd521414674c33876187e101b588738a \
+ --hash=sha256:5cf3443199b83ed9e955f511b5b241fd3ae004e3cb81c58ec10f4fe47c7dce37 \
+ --hash=sha256:6a4d5ce640e37b0efcc8441caeea8f43a06addace2335bd11151bc02d2ee31f9 \
+ --hash=sha256:7df80d07818b385f3129180369079bd6934cf70469f99daaebfac89dca288359 \
+ --hash=sha256:806068d4f86cb06af37cd65821554f98240a19ce646d3cd24e1c33587f313eb8 \
+ --hash=sha256:830f57206cc96ed0ccf68304141fec9481a096c4d2e2831f311bde1c404401da \
+ --hash=sha256:929006d3c2d923788ba153ad0de8ed2e5ed39fdbe8e7be21e2f22ed06c6783d3 \
+ --hash=sha256:9436dc58c123f07b230383083855593550c4d301d2532045a17ccf6eca505f6d \
+ --hash=sha256:9dd6e9b1a913d096ac95d0399bd737e00f2af1e1594a787e00f7975778c8b2bf \
+ --hash=sha256:ace010325c787c378afd7f7c1ac66b26313b3344628652eacd149bdd23c68841 \
+ --hash=sha256:b47a43177a5e65b771b80db71e7be76c0ba23cc8aa73eeeb089ed5219cdbe27d \
+ --hash=sha256:b797515be8743b771aa868f83563f789bbd4b236659ba52243b735d80b29ed93 \
+ --hash=sha256:b7993704f1a4b204e71debe6095150d43b2ee6150fa4f44d6d966ec356a8d61f \
+ --hash=sha256:d5c65bdf4484872c4af3150aeebe101ba560dcfb34488d9a8ff8dbcd21079647 \
+ --hash=sha256:d81eddcb12d608cc08081fa88d046c78afb1bf8107e6feab5d43503fea74a635 \
+ --hash=sha256:dc862056f76443a0db4509116c5cd480fe1b6a2d45512a653f9a855cc0517456 \
+ --hash=sha256:ecc771ab628ea281517e24fd2c52e8f31c41e66652d07599ad8818abaad38cda \
+ --hash=sha256:f200755768dc19c6f4e2b672421e0ebb3dd54c38d5a4f262b872d8cfcc9e93b5 \
+ --hash=sha256:f21756997ad8ef815d8ef3d34edd98804ab5ea337feedcd62fb52d22bf531281 \
+ --hash=sha256:fc13a9524bc18b6fb6e0dbec3533ba0496bbed167c56d0aabefd965584557d80
# via yarl
-pathspec==0.8.0 \
- --hash=sha256:7d91249d21749788d07a2d0f94147accd8f845507400749ea19c1ec9054a12b0 \
- --hash=sha256:da45173eb3a6f2a5a487efba21f050af2b41948be6ab52b6a1e3ff22bb8b7061 \
+pathspec==0.8.1 \
+ --hash=sha256:86379d6b86d75816baba717e64b1a3a3469deb93bb76d613c9ce79edc5cb68fd \
+ --hash=sha256:aa0cb481c4041bf52ffa7b0d8fa6cd3e88a2ca4879c533c9153882ee2556790d
# via black
-pyflakes==2.2.0 \
- --hash=sha256:0d94e0e05a19e57a99444b6ddcf9a6eb2e5c68d3ca1e98e90707af8152c90a92 \
- --hash=sha256:35b2d75ee967ea93b55750aa9edbbf72813e06a66ba54438df2cfac9e3c27fc8 \
+pyflakes==2.3.1 \
+ --hash=sha256:7893783d01b8a89811dd72d7dfd4d84ff098e5eed95cfa8905b22bbffe52efc3 \
+ --hash=sha256:f5bc8ecabc05bb9d291eb5203d6810b49040f6ff446a756326104746cc00c1db
# via -r contrib/automation/linux-requirements.txt.in
-pygments==2.7.1 \
- --hash=sha256:307543fe65c0947b126e83dd5a61bd8acbd84abec11f43caebaf5534cbc17998 \
- --hash=sha256:926c3f319eda178d1bd90851e4317e6d8cdb5e292a3386aac9bd75eca29cf9c7 \
+pygments==2.9.0 \
+ --hash=sha256:a18f47b506a429f6f4b9df81bb02beab9ca21d0a5fee38ed15aef65f0545519f \
+ --hash=sha256:d66e804411278594d764fc69ec36ec13d9ae9147193a1740cd34d272ca383b8e
# via -r contrib/automation/linux-requirements.txt.in
-pylint==2.6.0 \
- --hash=sha256:bb4a908c9dadbc3aac18860550e870f58e1a02c9f2c204fdf5693d73be061210 \
- --hash=sha256:bfe68f020f8a0fece830a22dd4d5dddb4ecc6137db04face4c3420a46a52239f \
+pylint==2.8.2 \
+ --hash=sha256:586d8fa9b1891f4b725f587ef267abe2a1bad89d6b184520c7f07a253dd6e217 \
+ --hash=sha256:f7e2072654a6b6afdf5e2fb38147d3e2d2d43c89f648637baab63e026481279b
+ # via -r contrib/automation/linux-requirements.txt.in
+python-levenshtein==0.12.2 \
+ --hash=sha256:dc2395fbd148a1ab31090dd113c366695934b9e85fe5a4b2a032745efd0346f6
# via -r contrib/automation/linux-requirements.txt.in
-python-levenshtein==0.12.0 \
- --hash=sha256:033a11de5e3d19ea25c9302d11224e1a1898fe5abd23c61c7c360c25195e3eb1 \
- # via -r contrib/automation/linux-requirements.txt.in
-pyyaml==5.3.1 \
- --hash=sha256:06a0d7ba600ce0b2d2fe2e78453a470b5a6e000a985dd4a4e54e436cc36b0e97 \
- --hash=sha256:240097ff019d7c70a4922b6869d8a86407758333f02203e0fc6ff79c5dcede76 \
- --hash=sha256:4f4b913ca1a7319b33cfb1369e91e50354d6f07a135f3b901aca02aa95940bd2 \
- --hash=sha256:69f00dca373f240f842b2931fb2c7e14ddbacd1397d57157a9b005a6a9942648 \
- --hash=sha256:73f099454b799e05e5ab51423c7bcf361c58d3206fa7b0d555426b1f4d9a3eaf \
- --hash=sha256:74809a57b329d6cc0fdccee6318f44b9b8649961fa73144a98735b0aaf029f1f \
- --hash=sha256:7739fc0fa8205b3ee8808aea45e968bc90082c10aef6ea95e855e10abf4a37b2 \
- --hash=sha256:95f71d2af0ff4227885f7a6605c37fd53d3a106fcab511b8860ecca9fcf400ee \
- --hash=sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d \
- --hash=sha256:cc8955cfbfc7a115fa81d85284ee61147059a753344bc51098f3ccd69b0d7e0c \
- --hash=sha256:d13155f591e6fcc1ec3b30685d50bf0711574e2c0dfffd7644babf8b5102ca1a \
+pyyaml==5.4.1 \
+ --hash=sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf \
+ --hash=sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696 \
+ --hash=sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393 \
+ --hash=sha256:294db365efa064d00b8d1ef65d8ea2c3426ac366c0c4368d930bf1c5fb497f77 \
+ --hash=sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922 \
+ --hash=sha256:3bd0e463264cf257d1ffd2e40223b197271046d09dadf73a0fe82b9c1fc385a5 \
+ --hash=sha256:4465124ef1b18d9ace298060f4eccc64b0850899ac4ac53294547536533800c8 \
+ --hash=sha256:49d4cdd9065b9b6e206d0595fee27a96b5dd22618e7520c33204a4a3239d5b10 \
+ --hash=sha256:4e0583d24c881e14342eaf4ec5fbc97f934b999a6828693a99157fde912540cc \
+ --hash=sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018 \
+ --hash=sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e \
+ --hash=sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253 \
+ --hash=sha256:72a01f726a9c7851ca9bfad6fd09ca4e090a023c00945ea05ba1638c09dc3347 \
+ --hash=sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183 \
+ --hash=sha256:895f61ef02e8fed38159bb70f7e100e00f471eae2bc838cd0f4ebb21e28f8541 \
+ --hash=sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb \
+ --hash=sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185 \
+ --hash=sha256:bfb51918d4ff3d77c1c856a9699f8492c612cde32fd3bcd344af9be34999bfdc \
+ --hash=sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db \
+ --hash=sha256:cb333c16912324fd5f769fff6bc5de372e9e7a202247b48870bc251ed40239aa \
+ --hash=sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46 \
+ --hash=sha256:d483ad4e639292c90170eb6f7783ad19490e7a8defb3e46f97dfe4bacae89122 \
+ --hash=sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b \
+ --hash=sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63 \
+ --hash=sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df \
+ --hash=sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc \
+ --hash=sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247 \
+ --hash=sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6 \
+ --hash=sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0
# via vcrpy
-regex==2020.9.27 \
- --hash=sha256:088afc8c63e7bd187a3c70a94b9e50ab3f17e1d3f52a32750b5b77dbe99ef5ef \
- --hash=sha256:1fe0a41437bbd06063aa184c34804efa886bcc128222e9916310c92cd54c3b4c \
- --hash=sha256:3d20024a70b97b4f9546696cbf2fd30bae5f42229fbddf8661261b1eaff0deb7 \
- --hash=sha256:41bb65f54bba392643557e617316d0d899ed5b4946dccee1cb6696152b29844b \
- --hash=sha256:4318d56bccfe7d43e5addb272406ade7a2274da4b70eb15922a071c58ab0108c \
- --hash=sha256:4707f3695b34335afdfb09be3802c87fa0bc27030471dbc082f815f23688bc63 \
- --hash=sha256:49f23ebd5ac073765ecbcf046edc10d63dcab2f4ae2bce160982cb30df0c0302 \
- --hash=sha256:5533a959a1748a5c042a6da71fe9267a908e21eded7a4f373efd23a2cbdb0ecc \
- --hash=sha256:5d892a4f1c999834eaa3c32bc9e8b976c5825116cde553928c4c8e7e48ebda67 \
- --hash=sha256:5f18875ac23d9aa2f060838e8b79093e8bb2313dbaaa9f54c6d8e52a5df097be \
- --hash=sha256:60b0e9e6dc45683e569ec37c55ac20c582973841927a85f2d8a7d20ee80216ab \
- --hash=sha256:816064fc915796ea1f26966163f6845de5af78923dfcecf6551e095f00983650 \
- --hash=sha256:84cada8effefe9a9f53f9b0d2ba9b7b6f5edf8d2155f9fdbe34616e06ececf81 \
- --hash=sha256:84e9407db1b2eb368b7ecc283121b5e592c9aaedbe8c78b1a2f1102eb2e21d19 \
- --hash=sha256:8d69cef61fa50c8133382e61fd97439de1ae623fe943578e477e76a9d9471637 \
- --hash=sha256:9a02d0ae31d35e1ec12a4ea4d4cca990800f66a917d0fb997b20fbc13f5321fc \
- --hash=sha256:9bc13e0d20b97ffb07821aa3e113f9998e84994fe4d159ffa3d3a9d1b805043b \
- --hash=sha256:a6f32aea4260dfe0e55dc9733ea162ea38f0ea86aa7d0f77b15beac5bf7b369d \
- --hash=sha256:ae91972f8ac958039920ef6e8769277c084971a142ce2b660691793ae44aae6b \
- --hash=sha256:c570f6fa14b9c4c8a4924aaad354652366577b4f98213cf76305067144f7b100 \
- --hash=sha256:c9443124c67b1515e4fe0bb0aa18df640965e1030f468a2a5dc2589b26d130ad \
- --hash=sha256:d23a18037313714fb3bb5a94434d3151ee4300bae631894b1ac08111abeaa4a3 \
- --hash=sha256:eaf548d117b6737df379fdd53bdde4f08870e66d7ea653e230477f071f861121 \
- --hash=sha256:ebbe29186a3d9b0c591e71b7393f1ae08c83cb2d8e517d2a822b8f7ec99dfd8b \
- --hash=sha256:eda4771e0ace7f67f58bc5b560e27fb20f32a148cbc993b0c3835970935c2707 \
- --hash=sha256:f1b3afc574a3db3b25c89161059d857bd4909a1269b0b3cb3c904677c8c4a3f7 \
- --hash=sha256:f2388013e68e750eaa16ccbea62d4130180c26abb1d8e5d584b9baf69672b30f \
+regex==2021.4.4 \
+ --hash=sha256:01afaf2ec48e196ba91b37451aa353cb7eda77efe518e481707e0515025f0cd5 \
+ --hash=sha256:11d773d75fa650cd36f68d7ca936e3c7afaae41b863b8c387a22aaa78d3c5c79 \
+ --hash=sha256:18c071c3eb09c30a264879f0d310d37fe5d3a3111662438889ae2eb6fc570c31 \
+ --hash=sha256:1e1c20e29358165242928c2de1482fb2cf4ea54a6a6dea2bd7a0e0d8ee321500 \
+ --hash=sha256:281d2fd05555079448537fe108d79eb031b403dac622621c78944c235f3fcf11 \
+ --hash=sha256:314d66636c494ed9c148a42731b3834496cc9a2c4251b1661e40936814542b14 \
+ --hash=sha256:32e65442138b7b76dd8173ffa2cf67356b7bc1768851dded39a7a13bf9223da3 \
+ --hash=sha256:339456e7d8c06dd36a22e451d58ef72cef293112b559010db3d054d5560ef439 \
+ --hash=sha256:3916d08be28a1149fb97f7728fca1f7c15d309a9f9682d89d79db75d5e52091c \
+ --hash=sha256:3a9cd17e6e5c7eb328517969e0cb0c3d31fd329298dd0c04af99ebf42e904f82 \
+ --hash=sha256:47bf5bf60cf04d72bf6055ae5927a0bd9016096bf3d742fa50d9bf9f45aa0711 \
+ --hash=sha256:4c46e22a0933dd783467cf32b3516299fb98cfebd895817d685130cc50cd1093 \
+ --hash=sha256:4c557a7b470908b1712fe27fb1ef20772b78079808c87d20a90d051660b1d69a \
+ --hash=sha256:52ba3d3f9b942c49d7e4bc105bb28551c44065f139a65062ab7912bef10c9afb \
+ --hash=sha256:563085e55b0d4fb8f746f6a335893bda5c2cef43b2f0258fe1020ab1dd874df8 \
+ --hash=sha256:598585c9f0af8374c28edd609eb291b5726d7cbce16be6a8b95aa074d252ee17 \
+ --hash=sha256:619d71c59a78b84d7f18891fe914446d07edd48dc8328c8e149cbe0929b4e000 \
+ --hash=sha256:67bdb9702427ceddc6ef3dc382455e90f785af4c13d495f9626861763ee13f9d \
+ --hash=sha256:6d1b01031dedf2503631d0903cb563743f397ccaf6607a5e3b19a3d76fc10480 \
+ --hash=sha256:741a9647fcf2e45f3a1cf0e24f5e17febf3efe8d4ba1281dcc3aa0459ef424dc \
+ --hash=sha256:7c2a1af393fcc09e898beba5dd59196edaa3116191cc7257f9224beaed3e1aa0 \
+ --hash=sha256:7d9884d86dd4dd489e981d94a65cd30d6f07203d90e98f6f657f05170f6324c9 \
+ --hash=sha256:90f11ff637fe8798933fb29f5ae1148c978cccb0452005bf4c69e13db951e765 \
+ --hash=sha256:919859aa909429fb5aa9cf8807f6045592c85ef56fdd30a9a3747e513db2536e \
+ --hash=sha256:96fcd1888ab4d03adfc9303a7b3c0bd78c5412b2bfbe76db5b56d9eae004907a \
+ --hash=sha256:97f29f57d5b84e73fbaf99ab3e26134e6687348e95ef6b48cfd2c06807005a07 \
+ --hash=sha256:980d7be47c84979d9136328d882f67ec5e50008681d94ecc8afa8a65ed1f4a6f \
+ --hash=sha256:a91aa8619b23b79bcbeb37abe286f2f408d2f2d6f29a17237afda55bb54e7aac \
+ --hash=sha256:ade17eb5d643b7fead300a1641e9f45401c98eee23763e9ed66a43f92f20b4a7 \
+ --hash=sha256:b9c3db21af35e3b3c05764461b262d6f05bbca08a71a7849fd79d47ba7bc33ed \
+ --hash=sha256:bd28bc2e3a772acbb07787c6308e00d9626ff89e3bfcdebe87fa5afbfdedf968 \
+ --hash=sha256:bf5824bfac591ddb2c1f0a5f4ab72da28994548c708d2191e3b87dd207eb3ad7 \
+ --hash=sha256:c0502c0fadef0d23b128605d69b58edb2c681c25d44574fc673b0e52dce71ee2 \
+ --hash=sha256:c38c71df845e2aabb7fb0b920d11a1b5ac8526005e533a8920aea97efb8ec6a4 \
+ --hash=sha256:ce15b6d103daff8e9fee13cf7f0add05245a05d866e73926c358e871221eae87 \
+ --hash=sha256:d3029c340cfbb3ac0a71798100ccc13b97dddf373a4ae56b6a72cf70dfd53bc8 \
+ --hash=sha256:e512d8ef5ad7b898cdb2d8ee1cb09a8339e4f8be706d27eaa180c2f177248a10 \
+ --hash=sha256:e8e5b509d5c2ff12f8418006d5a90e9436766133b564db0abaec92fd27fcee29 \
+ --hash=sha256:ee54ff27bf0afaf4c3b3a62bcd016c12c3fdb4ec4f413391a90bd38bc3624605 \
+ --hash=sha256:fa4537fb4a98fe8fde99626e4681cc644bdcf2a795038533f9f711513a862ae6 \
+ --hash=sha256:fd45ff9293d9274c5008a2054ecef86a9bfe819a67c7be1afb65e69b405b3042
# via black
-six==1.15.0 \
- --hash=sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259 \
- --hash=sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced \
- # via astroid, vcrpy
-toml==0.10.1 \
- --hash=sha256:926b612be1e5ce0634a2ca03470f95169cf16f939018233a670519cb4ac58b0f \
- --hash=sha256:bda89d5935c2eac546d648028b9901107a595863cb36bae0c73ac804a9b4ce88 \
- # via black, pylint
-typed-ast==1.4.1 ; python_version >= "3.0" and platform_python_implementation != "PyPy" \
- --hash=sha256:0666aa36131496aed8f7be0410ff974562ab7eeac11ef351def9ea6fa28f6355 \
- --hash=sha256:0c2c07682d61a629b68433afb159376e24e5b2fd4641d35424e462169c0a7919 \
- --hash=sha256:249862707802d40f7f29f6e1aad8d84b5aa9e44552d2cc17384b209f091276aa \
- --hash=sha256:24995c843eb0ad11a4527b026b4dde3da70e1f2d8806c99b7b4a7cf491612652 \
- --hash=sha256:269151951236b0f9a6f04015a9004084a5ab0d5f19b57de779f908621e7d8b75 \
- --hash=sha256:4083861b0aa07990b619bd7ddc365eb7fa4b817e99cf5f8d9cf21a42780f6e01 \
- --hash=sha256:498b0f36cc7054c1fead3d7fc59d2150f4d5c6c56ba7fb150c013fbc683a8d2d \
- --hash=sha256:4e3e5da80ccbebfff202a67bf900d081906c358ccc3d5e3c8aea42fdfdfd51c1 \
- --hash=sha256:6daac9731f172c2a22ade6ed0c00197ee7cc1221aa84cfdf9c31defeb059a907 \
- --hash=sha256:715ff2f2df46121071622063fc7543d9b1fd19ebfc4f5c8895af64a77a8c852c \
- --hash=sha256:73d785a950fc82dd2a25897d525d003f6378d1cb23ab305578394694202a58c3 \
- --hash=sha256:8c8aaad94455178e3187ab22c8b01a3837f8ee50e09cf31f1ba129eb293ec30b \
- --hash=sha256:8ce678dbaf790dbdb3eba24056d5364fb45944f33553dd5869b7580cdbb83614 \
- --hash=sha256:aaee9905aee35ba5905cfb3c62f3e83b3bec7b39413f0a7f19be4e547ea01ebb \
- --hash=sha256:bcd3b13b56ea479b3650b82cabd6b5343a625b0ced5429e4ccad28a8973f301b \
- --hash=sha256:c9e348e02e4d2b4a8b2eedb48210430658df6951fa484e59de33ff773fbd4b41 \
- --hash=sha256:d205b1b46085271b4e15f670058ce182bd1199e56b317bf2ec004b6a44f911f6 \
- --hash=sha256:d43943ef777f9a1c42bf4e552ba23ac77a6351de620aa9acf64ad54933ad4d34 \
- --hash=sha256:d5d33e9e7af3b34a40dc05f498939f0ebf187f07c385fd58d591c533ad8562fe \
- --hash=sha256:fc0fea399acb12edbf8a628ba8d2312f583bdbdb3335635db062fa98cf71fca4 \
- --hash=sha256:fe460b922ec15dd205595c9b5b99e2f056fd98ae8f9f56b888e7a17dc2b757e7 \
- # via -r contrib/automation/linux-requirements.txt.in, astroid, black
-typing-extensions==3.7.4.3 \
- --hash=sha256:7cb407020f00f7bfc3cb3e7881628838e69d8f3fcab2f64742a5e76b2f841918 \
- --hash=sha256:99d4073b617d30288f569d3f13d2bd7548c3a7e4c8de87db09a9d29bb3a4a60c \
- --hash=sha256:dafc7639cde7f1b6e1acc0f457842a83e722ccca8eef5270af2d74792619a89f \
+six==1.16.0 \
+ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \
+ --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254
+ # via vcrpy
+toml==0.10.2 \
+ --hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \
+ --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f
+ # via
+ # black
+ # pylint
+typed-ast==1.4.3 ; python_version >= "3.0" and platform_python_implementation != "PyPy" \
+ --hash=sha256:01ae5f73431d21eead5015997ab41afa53aa1fbe252f9da060be5dad2c730ace \
+ --hash=sha256:067a74454df670dcaa4e59349a2e5c81e567d8d65458d480a5b3dfecec08c5ff \
+ --hash=sha256:0fb71b8c643187d7492c1f8352f2c15b4c4af3f6338f21681d3681b3dc31a266 \
+ --hash=sha256:1b3ead4a96c9101bef08f9f7d1217c096f31667617b58de957f690c92378b528 \
+ --hash=sha256:2068531575a125b87a41802130fa7e29f26c09a2833fea68d9a40cf33902eba6 \
+ --hash=sha256:209596a4ec71d990d71d5e0d312ac935d86930e6eecff6ccc7007fe54d703808 \
+ --hash=sha256:2c726c276d09fc5c414693a2de063f521052d9ea7c240ce553316f70656c84d4 \
+ --hash=sha256:398e44cd480f4d2b7ee8d98385ca104e35c81525dd98c519acff1b79bdaac363 \
+ --hash=sha256:52b1eb8c83f178ab787f3a4283f68258525f8d70f778a2f6dd54d3b5e5fb4341 \
+ --hash=sha256:5feca99c17af94057417d744607b82dd0a664fd5e4ca98061480fd8b14b18d04 \
+ --hash=sha256:7538e495704e2ccda9b234b82423a4038f324f3a10c43bc088a1636180f11a41 \
+ --hash=sha256:760ad187b1041a154f0e4d0f6aae3e40fdb51d6de16e5c99aedadd9246450e9e \
+ --hash=sha256:777a26c84bea6cd934422ac2e3b78863a37017618b6e5c08f92ef69853e765d3 \
+ --hash=sha256:95431a26309a21874005845c21118c83991c63ea800dd44843e42a916aec5899 \
+ --hash=sha256:9ad2c92ec681e02baf81fdfa056fe0d818645efa9af1f1cd5fd6f1bd2bdfd805 \
+ --hash=sha256:9c6d1a54552b5330bc657b7ef0eae25d00ba7ffe85d9ea8ae6540d2197a3788c \
+ --hash=sha256:aee0c1256be6c07bd3e1263ff920c325b59849dc95392a05f258bb9b259cf39c \
+ --hash=sha256:af3d4a73793725138d6b334d9d247ce7e5f084d96284ed23f22ee626a7b88e39 \
+ --hash=sha256:b36b4f3920103a25e1d5d024d155c504080959582b928e91cb608a65c3a49e1a \
+ --hash=sha256:b9574c6f03f685070d859e75c7f9eeca02d6933273b5e69572e5ff9d5e3931c3 \
+ --hash=sha256:bff6ad71c81b3bba8fa35f0f1921fb24ff4476235a6e94a26ada2e54370e6da7 \
+ --hash=sha256:c190f0899e9f9f8b6b7863debfb739abcb21a5c054f911ca3596d12b8a4c4c7f \
+ --hash=sha256:c907f561b1e83e93fad565bac5ba9c22d96a54e7ea0267c708bffe863cbe4075 \
+ --hash=sha256:cae53c389825d3b46fb37538441f75d6aecc4174f615d048321b716df2757fb0 \
+ --hash=sha256:dd4a21253f42b8d2b48410cb31fe501d32f8b9fbeb1f55063ad102fe9c425e40 \
+ --hash=sha256:dde816ca9dac1d9c01dd504ea5967821606f02e510438120091b84e852367428 \
+ --hash=sha256:f2362f3cb0f3172c42938946dbc5b7843c2a28aec307c49100c8b38764eb6927 \
+ --hash=sha256:f328adcfebed9f11301eaedfa48e15bdece9b519fb27e6a8c01aa52a17ec31b3 \
+ --hash=sha256:f8afcf15cc511ada719a88e013cec87c11aff7b91f019295eb4530f96fe5ef2f \
+ --hash=sha256:fb1bbeac803adea29cedd70781399c99138358c26d05fcbd23c13016b7f5ec65
+ # via
+ # -r contrib/automation/linux-requirements.txt.in
+ # astroid
+ # black
+typing-extensions==3.10.0.0 \
+ --hash=sha256:0ac0f89795dd19de6b97debb0c6af1c70987fd80a2d62d1958f7e56fcc31b497 \
+ --hash=sha256:50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342 \
+ --hash=sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84
# via yarl
-vcrpy==4.1.0 \
- --hash=sha256:4138e79eb35981ad391406cbb7227bce7eba8bad788dcf1a89c2e4a8b740debe \
- --hash=sha256:d833248442bbc560599add895c9ab0ef518676579e8dc72d8b0933bdb3880253 \
+vcrpy==4.1.1 \
+ --hash=sha256:12c3fcdae7b88ecf11fc0d3e6d77586549d4575a2ceee18e82eee75c1f626162 \
+ --hash=sha256:57095bf22fc0a2d99ee9674cdafebed0f3ba763018582450706f7d3a74fff599
# via -r contrib/automation/linux-requirements.txt.in
wrapt==1.12.1 \
- --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7 \
- # via astroid, vcrpy
-yarl==1.6.0 \
- --hash=sha256:04a54f126a0732af75e5edc9addeaa2113e2ca7c6fce8974a63549a70a25e50e \
- --hash=sha256:3cc860d72ed989f3b1f3abbd6ecf38e412de722fb38b8f1b1a086315cf0d69c5 \
- --hash=sha256:5d84cc36981eb5a8533be79d6c43454c8e6a39ee3118ceaadbd3c029ab2ee580 \
- --hash=sha256:5e447e7f3780f44f890360ea973418025e8c0cdcd7d6a1b221d952600fd945dc \
- --hash=sha256:61d3ea3c175fe45f1498af868879c6ffeb989d4143ac542163c45538ba5ec21b \
- --hash=sha256:67c5ea0970da882eaf9efcf65b66792557c526f8e55f752194eff8ec722c75c2 \
- --hash=sha256:6f6898429ec3c4cfbef12907047136fd7b9e81a6ee9f105b45505e633427330a \
- --hash=sha256:7ce35944e8e61927a8f4eb78f5bc5d1e6da6d40eadd77e3f79d4e9399e263921 \
- --hash=sha256:b7c199d2cbaf892ba0f91ed36d12ff41ecd0dde46cbf64ff4bfe997a3ebc925e \
- --hash=sha256:c15d71a640fb1f8e98a1423f9c64d7f1f6a3a168f803042eaf3a5b5022fde0c1 \
- --hash=sha256:c22607421f49c0cb6ff3ed593a49b6a99c6ffdeaaa6c944cdda83c2393c8864d \
- --hash=sha256:c604998ab8115db802cc55cb1b91619b2831a6128a62ca7eea577fc8ea4d3131 \
- --hash=sha256:d088ea9319e49273f25b1c96a3763bf19a882cff774d1792ae6fba34bd40550a \
- --hash=sha256:db9eb8307219d7e09b33bcb43287222ef35cbcf1586ba9472b0a4b833666ada1 \
- --hash=sha256:e31fef4e7b68184545c3d68baec7074532e077bd1906b040ecfba659737df188 \
- --hash=sha256:e32f0fb443afcfe7f01f95172b66f279938fbc6bdaebe294b0ff6747fb6db020 \
- --hash=sha256:fcbe419805c9b20db9a51d33b942feddbf6e7fb468cb20686fd7089d4164c12a \
+ --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7
+ # via
+ # astroid
+ # vcrpy
+yarl==1.6.3 \
+ --hash=sha256:00d7ad91b6583602eb9c1d085a2cf281ada267e9a197e8b7cae487dadbfa293e \
+ --hash=sha256:0355a701b3998dcd832d0dc47cc5dedf3874f966ac7f870e0f3a6788d802d434 \
+ --hash=sha256:15263c3b0b47968c1d90daa89f21fcc889bb4b1aac5555580d74565de6836366 \
+ --hash=sha256:2ce4c621d21326a4a5500c25031e102af589edb50c09b321049e388b3934eec3 \
+ --hash=sha256:31ede6e8c4329fb81c86706ba8f6bf661a924b53ba191b27aa5fcee5714d18ec \
+ --hash=sha256:324ba3d3c6fee56e2e0b0d09bf5c73824b9f08234339d2b788af65e60040c959 \
+ --hash=sha256:329412812ecfc94a57cd37c9d547579510a9e83c516bc069470db5f75684629e \
+ --hash=sha256:4736eaee5626db8d9cda9eb5282028cc834e2aeb194e0d8b50217d707e98bb5c \
+ --hash=sha256:4953fb0b4fdb7e08b2f3b3be80a00d28c5c8a2056bb066169de00e6501b986b6 \
+ --hash=sha256:4c5bcfc3ed226bf6419f7a33982fb4b8ec2e45785a0561eb99274ebbf09fdd6a \
+ --hash=sha256:547f7665ad50fa8563150ed079f8e805e63dd85def6674c97efd78eed6c224a6 \
+ --hash=sha256:5b883e458058f8d6099e4420f0cc2567989032b5f34b271c0827de9f1079a424 \
+ --hash=sha256:63f90b20ca654b3ecc7a8d62c03ffa46999595f0167d6450fa8383bab252987e \
+ --hash=sha256:68dc568889b1c13f1e4745c96b931cc94fdd0defe92a72c2b8ce01091b22e35f \
+ --hash=sha256:69ee97c71fee1f63d04c945f56d5d726483c4762845400a6795a3b75d56b6c50 \
+ --hash=sha256:6d6283d8e0631b617edf0fd726353cb76630b83a089a40933043894e7f6721e2 \
+ --hash=sha256:72a660bdd24497e3e84f5519e57a9ee9220b6f3ac4d45056961bf22838ce20cc \
+ --hash=sha256:73494d5b71099ae8cb8754f1df131c11d433b387efab7b51849e7e1e851f07a4 \
+ --hash=sha256:7356644cbed76119d0b6bd32ffba704d30d747e0c217109d7979a7bc36c4d970 \
+ --hash=sha256:8a9066529240171b68893d60dca86a763eae2139dd42f42106b03cf4b426bf10 \
+ --hash=sha256:8aa3decd5e0e852dc68335abf5478a518b41bf2ab2f330fe44916399efedfae0 \
+ --hash=sha256:97b5bdc450d63c3ba30a127d018b866ea94e65655efaf889ebeabc20f7d12406 \
+ --hash=sha256:9ede61b0854e267fd565e7527e2f2eb3ef8858b301319be0604177690e1a3896 \
+ --hash=sha256:b2e9a456c121e26d13c29251f8267541bd75e6a1ccf9e859179701c36a078643 \
+ --hash=sha256:b5dfc9a40c198334f4f3f55880ecf910adebdcb2a0b9a9c23c9345faa9185721 \
+ --hash=sha256:bafb450deef6861815ed579c7a6113a879a6ef58aed4c3a4be54400ae8871478 \
+ --hash=sha256:c49ff66d479d38ab863c50f7bb27dee97c6627c5fe60697de15529da9c3de724 \
+ --hash=sha256:ce3beb46a72d9f2190f9e1027886bfc513702d748047b548b05dab7dfb584d2e \
+ --hash=sha256:d26608cf178efb8faa5ff0f2d2e77c208f471c5a3709e577a7b3fd0445703ac8 \
+ --hash=sha256:d597767fcd2c3dc49d6eea360c458b65643d1e4dbed91361cf5e36e53c1f8c96 \
+ --hash=sha256:d5c32c82990e4ac4d8150fd7652b972216b204de4e83a122546dce571c1bdf25 \
+ --hash=sha256:d8d07d102f17b68966e2de0e07bfd6e139c7c02ef06d3a0f8d2f0f055e13bb76 \
+ --hash=sha256:e46fba844f4895b36f4c398c5af062a9808d1f26b2999c58909517384d5deda2 \
+ --hash=sha256:e6b5460dc5ad42ad2b36cca524491dfcaffbfd9c8df50508bddc354e787b8dc2 \
+ --hash=sha256:f040bcc6725c821a4c0665f3aa96a4d0805a7aaf2caf266d256b8ed71b9f041c \
+ --hash=sha256:f0b059678fd549c66b89bed03efcabb009075bd131c248ecdf087bdb6faba24a \
+ --hash=sha256:fcbb48a93e8699eae920f8d92f7160c03567b421bc17362a9ffbbd706a816f71
# via vcrpy
# WARNING: The following packages were not pinned, but pip requires them to be
diff --git a/contrib/check-code.py b/contrib/check-code.py
--- a/contrib/check-code.py
+++ b/contrib/check-code.py
@@ -215,7 +215,6 @@ utestpats = [
"use regex test output patterns instead of sed",
),
(uprefix + r'(true|exit 0)', "explicit zero exit unnecessary"),
- (uprefix + r'.*(?initsockname,
- "--daemon-postexec",
+ hgcmd, "serve", "--no-profile", "--cmdserver",
+ "chgunix", "--address", opts->initsockname, "--daemon-postexec",
"chdir:/",
};
size_t baseargvsize = sizeof(baseargv) / sizeof(baseargv[0]);
diff --git a/contrib/dirstatenonnormalcheck.py b/contrib/dirstatenonnormalcheck.py
--- a/contrib/dirstatenonnormalcheck.py
+++ b/contrib/dirstatenonnormalcheck.py
@@ -11,6 +11,7 @@ from __future__ import absolute_import
from mercurial import (
dirstate,
extensions,
+ pycompat,
)
@@ -18,7 +19,7 @@ def nonnormalentries(dmap):
"""Compute nonnormal entries from dirstate's dmap"""
res = set()
for f, e in dmap.iteritems():
- if e[0] != b'n' or e[3] == -1:
+ if e.state != b'n' or e.mtime == -1:
res.add(f)
return res
@@ -27,18 +28,21 @@ def checkconsistency(ui, orig, dmap, _no
"""Compute nonnormalset from dmap, check that it matches _nonnormalset"""
nonnormalcomputedmap = nonnormalentries(dmap)
if _nonnormalset != nonnormalcomputedmap:
- ui.develwarn(b"%s call to %s\n" % (label, orig), config=b'dirstate')
+ b_orig = pycompat.sysbytes(repr(orig))
+ ui.develwarn(b"%s call to %s\n" % (label, b_orig), config=b'dirstate')
ui.develwarn(b"inconsistency in nonnormalset\n", config=b'dirstate')
- ui.develwarn(b"[nonnormalset] %s\n" % _nonnormalset, config=b'dirstate')
- ui.develwarn(b"[map] %s\n" % nonnormalcomputedmap, config=b'dirstate')
+ b_nonnormal = pycompat.sysbytes(repr(_nonnormalset))
+ ui.develwarn(b"[nonnormalset] %s\n" % b_nonnormal, config=b'dirstate')
+ b_nonnormalcomputed = pycompat.sysbytes(repr(nonnormalcomputedmap))
+ ui.develwarn(b"[map] %s\n" % b_nonnormalcomputed, config=b'dirstate')
-def _checkdirstate(orig, self, arg):
+def _checkdirstate(orig, self, *args, **kwargs):
"""Check nonnormal set consistency before and after the call to orig"""
checkconsistency(
self._ui, orig, self._map, self._map.nonnormalset, b"before"
)
- r = orig(self, arg)
+ r = orig(self, *args, **kwargs)
checkconsistency(
self._ui, orig, self._map, self._map.nonnormalset, b"after"
)
diff --git a/contrib/dumprevlog b/contrib/dumprevlog
--- a/contrib/dumprevlog
+++ b/contrib/dumprevlog
@@ -13,6 +13,10 @@ from mercurial import (
)
from mercurial.utils import procutil
+from mercurial.revlogutils import (
+ constants as revlog_constants,
+)
+
for fp in (sys.stdin, sys.stdout, sys.stderr):
procutil.setbinary(fp)
@@ -32,7 +36,16 @@ def printb(data, end=b'\n'):
for f in sys.argv[1:]:
- r = revlog.revlog(binopen, encoding.strtolocal(f))
+ localf = encoding.strtolocal(f)
+ if not localf.endswith(b'.i'):
+ print("file:", f, file=sys.stderr)
+ print(" invalid filename", file=sys.stderr)
+
+ r = revlog.revlog(
+ binopen,
+ target=(revlog_constants.KIND_OTHER, b'dump-revlog'),
+ radix=localf[:-2],
+ )
print("file:", f)
for i in r:
n = r.node(i)
diff --git a/contrib/fuzz/mpatch_corpus.py b/contrib/fuzz/mpatch_corpus.py
--- a/contrib/fuzz/mpatch_corpus.py
+++ b/contrib/fuzz/mpatch_corpus.py
@@ -1,10 +1,15 @@
from __future__ import absolute_import, print_function
import argparse
+import os
import struct
import sys
import zipfile
+# Add ../.. to sys.path as an absolute path so we can import hg modules
+hgloc = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
+sys.path[0:0] = [hgloc]
+
from mercurial import (
hg,
ui as uimod,
diff --git a/contrib/heptapod-ci.yml b/contrib/heptapod-ci.yml
--- a/contrib/heptapod-ci.yml
+++ b/contrib/heptapod-ci.yml
@@ -139,3 +139,36 @@ check-pytype-py3:
RUNTEST_ARGS: " --allow-slow-tests tests/test-check-pytype.t"
PYTHON: python3
TEST_HGMODULEPOLICY: "c"
+
+# `sh.exe --login` sets a couple of extra environment variables that are defined
+# in the MinGW shell, but switches CWD to /home/$username. The previous value
+# is stored in OLDPWD. Of the added variables, MSYSTEM is crucial to running
+# run-tests.py- it is needed to make run-tests.py generate a `python3` script
+# that satisfies the various shebang lines and delegates to `py -3`.
+.window_runtests_template: &windows_runtests
+ stage: tests
+ before_script:
+ # Temporary until this is adjusted in the environment
+ - $Env:TEMP="C:/Temp"
+ - $Env:TMP="C:/Temp"
+ # TODO: find/install cvs, bzr, perforce, gpg, sqlite3
+
+ script:
+ - echo "Entering script section"
+ - echo "python used, $Env:PYTHON"
+ - Invoke-Expression "$Env:PYTHON -V"
+ - Invoke-Expression "$Env:PYTHON -m black --version"
+ - echo "$Env:RUNTEST_ARGS"
+
+ - C:/MinGW/msys/1.0/bin/sh.exe --login -c 'cd "$OLDPWD" && HGTESTS_ALLOW_NETIO="$TEST_HGTESTS_ALLOW_NETIO" HGMODULEPOLICY="$TEST_HGMODULEPOLICY" $PYTHON tests/run-tests.py --color=always $RUNTEST_ARGS'
+
+windows-py3:
+ <<: *windows_runtests
+ when: manual
+ tags:
+ - windows
+ timeout: 2h
+ variables:
+ TEST_HGMODULEPOLICY: "c"
+ RUNTEST_ARGS: "--blacklist /tmp/check-tests.txt"
+ PYTHON: py -3
diff --git a/contrib/hg-ssh b/contrib/hg-ssh
--- a/contrib/hg-ssh
+++ b/contrib/hg-ssh
@@ -31,6 +31,7 @@ command="hg-ssh --read-only repos/*"
from __future__ import absolute_import
import os
+import re
import shlex
import sys
@@ -51,6 +52,12 @@ def main():
dispatch.initstdio()
cwd = os.getcwd()
+ if os.name == 'nt':
+ # os.getcwd() is inconsistent on the capitalization of the drive
+ # letter, so adjust it. see https://bugs.python.org/issue40368
+ if re.match('^[a-z]:', cwd):
+ cwd = cwd[0:1].upper() + cwd[1:]
+
readonly = False
args = sys.argv[1:]
while len(args):
diff --git a/contrib/import-checker.py b/contrib/import-checker.py
--- a/contrib/import-checker.py
+++ b/contrib/import-checker.py
@@ -23,7 +23,7 @@ import testparseutil
# Whitelist of modules that symbols can be directly imported from.
allowsymbolimports = (
'__future__',
- 'bzrlib',
+ 'breezy',
'hgclient',
'mercurial',
'mercurial.hgweb.common',
diff --git a/contrib/install-windows-dependencies.ps1 b/contrib/install-windows-dependencies.ps1
--- a/contrib/install-windows-dependencies.ps1
+++ b/contrib/install-windows-dependencies.ps1
@@ -32,15 +32,15 @@
$PYTHON37_X64_URL = "https://www.python.org/ftp/python/3.7.9/python-3.7.9-amd64.exe"
$PYTHON37_x64_SHA256 = "e69ed52afb5a722e5c56f6c21d594e85c17cb29f12f18bb69751cf1714e0f987"
-$PYTHON38_x86_URL = "https://www.python.org/ftp/python/3.8.6/python-3.8.6.exe"
-$PYTHON38_x86_SHA256 = "287d5df01ff22ff09e6a487ae018603ee19eade71d462ec703850c96f1d5e8a0"
-$PYTHON38_x64_URL = "https://www.python.org/ftp/python/3.8.6/python-3.8.6-amd64.exe"
-$PYTHON38_x64_SHA256 = "328a257f189cb500606bb26ab0fbdd298ed0e05d8c36540a322a1744f489a0a0"
+$PYTHON38_x86_URL = "https://www.python.org/ftp/python/3.8.10/python-3.8.10.exe"
+$PYTHON38_x86_SHA256 = "ad07633a1f0cd795f3bf9da33729f662281df196b4567fa795829f3bb38a30ac"
+$PYTHON38_x64_URL = "https://www.python.org/ftp/python/3.8.10/python-3.8.10-amd64.exe"
+$PYTHON38_x64_SHA256 = "7628244cb53408b50639d2c1287c659f4e29d3dfdb9084b11aed5870c0c6a48a"
-$PYTHON39_x86_URL = "https://www.python.org/ftp/python/3.9.0/python-3.9.0.exe"
-$PYTHON39_x86_SHA256 = "a4c65917f4225d1543959342f0615c813a4e9e7ff1137c4394ff6a5290ac1913"
-$PYTHON39_x64_URL = "https://www.python.org/ftp/python/3.9.0/python-3.9.0-amd64.exe"
-$PYTHON39_x64_SHA256 = "fd2e2c6612d43bb6b213b72fc53f07d73d99059fa72c96e44bde12e7815073ae"
+$PYTHON39_x86_URL = "https://www.python.org/ftp/python/3.9.5/python-3.9.5.exe"
+$PYTHON39_x86_SHA256 = "505129081a839b699a6ab9064b441ad922ef03767b5dd4241fd0c2166baf64de"
+$PYTHON39_x64_URL = "https://www.python.org/ftp/python/3.9.5/python-3.9.5-amd64.exe"
+$PYTHON39_x64_SHA256 = "84d5243088ba00c11e51905c704dbe041040dfff044f4e1ce5476844ee2e6eac"
# PIP 19.2.3.
$PIP_URL = "https://github.com/pypa/get-pip/raw/309a56c5fd94bd1134053a541cb4657a4e47e09d/get-pip.py"
@@ -62,6 +62,9 @@
$RUSTUP_INIT_URL = "https://static.rust-lang.org/rustup/archive/1.21.1/x86_64-pc-windows-gnu/rustup-init.exe"
$RUSTUP_INIT_SHA256 = "d17df34ba974b9b19cf5c75883a95475aa22ddc364591d75d174090d55711c72"
+$PYOXIDIZER_URL = "https://github.com/indygreg/PyOxidizer/releases/download/pyoxidizer%2F0.16.0/PyOxidizer-0.16.0-x64.msi"
+$PYOXIDIZER_SHA256 = "2a9c58add9161c272c418d5e6dec13fbe648f624b5d26770190357e4d664f24e"
+
# Writing progress slows down downloads substantially. So disable it.
$progressPreference = 'silentlyContinue'
@@ -121,11 +124,8 @@ function Install-Rust($prefix) {
Invoke-Process "${prefix}\assets\rustup-init.exe" "-y --default-host x86_64-pc-windows-msvc"
Invoke-Process "${prefix}\cargo\bin\rustup.exe" "target add i686-pc-windows-msvc"
- Invoke-Process "${prefix}\cargo\bin\rustup.exe" "install 1.46.0"
+ Invoke-Process "${prefix}\cargo\bin\rustup.exe" "install 1.52.0"
Invoke-Process "${prefix}\cargo\bin\rustup.exe" "component add clippy"
-
- # Install PyOxidizer for packaging.
- Invoke-Process "${prefix}\cargo\bin\cargo.exe" "install --version 0.10.3 pyoxidizer"
}
function Install-Dependencies($prefix) {
@@ -151,6 +151,7 @@ function Install-Dependencies($prefix) {
Secure-Download $MINGW_BIN_URL ${prefix}\assets\mingw-get-bin.zip $MINGW_BIN_SHA256
Secure-Download $MERCURIAL_WHEEL_URL ${prefix}\assets\${MERCURIAL_WHEEL_FILENAME} $MERCURIAL_WHEEL_SHA256
Secure-Download $RUSTUP_INIT_URL ${prefix}\assets\rustup-init.exe $RUSTUP_INIT_SHA256
+ Secure-Download $PYOXIDIZER_URL ${prefix}\assets\PyOxidizer.msi $PYOXIDIZER_SHA256
Write-Output "installing Python 2.7 32-bit"
Invoke-Process msiexec.exe "/i ${prefix}\assets\python27-x86.msi /l* ${prefix}\assets\python27-x86.log /q TARGETDIR=${prefix}\python27-x86 ALLUSERS="
@@ -172,6 +173,9 @@ function Install-Dependencies($prefix) {
Write-Output "installing Visual Studio 2017 Build Tools and SDKs"
Invoke-Process ${prefix}\assets\vs_buildtools.exe "--quiet --wait --norestart --nocache --channelUri https://aka.ms/vs/15/release/channel --add Microsoft.VisualStudio.Workload.MSBuildTools --add Microsoft.VisualStudio.Component.Windows10SDK.17763 --add Microsoft.VisualStudio.Workload.VCTools --add Microsoft.VisualStudio.Component.Windows10SDK --add Microsoft.VisualStudio.Component.VC.140"
+ Write-Output "installing PyOxidizer"
+ Invoke-Process msiexec.exe "/i ${prefix}\assets\PyOxidizer.msi /l* ${prefix}\assets\PyOxidizer.log /quiet"
+
Install-Rust ${prefix}
Write-Output "installing Visual C++ 9.0 for Python 2.7"
diff --git a/contrib/packaging/hgpackaging/cli.py b/contrib/packaging/hgpackaging/cli.py
--- a/contrib/packaging/hgpackaging/cli.py
+++ b/contrib/packaging/hgpackaging/cli.py
@@ -64,6 +64,7 @@ def build_wix(
extra_packages_script=None,
extra_wxs=None,
extra_features=None,
+ extra_pyoxidizer_vars=None,
):
if not pyoxidizer_target and not python:
raise Exception("--python required unless building with PyOxidizer")
@@ -105,7 +106,7 @@ def build_wix(
"timestamp_url": sign_timestamp_url,
}
- fn(**kwargs)
+ fn(**kwargs, extra_pyoxidizer_vars=extra_pyoxidizer_vars)
def get_parser():
@@ -168,6 +169,12 @@ def get_parser():
"in the installer from the extra wxs files"
),
)
+
+ sp.add_argument(
+ "--extra-pyoxidizer-vars",
+ help="json map of extra variables to pass to pyoxidizer",
+ )
+
sp.set_defaults(func=build_wix)
return parser
diff --git a/contrib/packaging/hgpackaging/inno.py b/contrib/packaging/hgpackaging/inno.py
--- a/contrib/packaging/hgpackaging/inno.py
+++ b/contrib/packaging/hgpackaging/inno.py
@@ -18,7 +18,7 @@ from .py2exe import (
build_py2exe,
stage_install,
)
-from .pyoxidizer import run_pyoxidizer
+from .pyoxidizer import create_pyoxidizer_install_layout
from .util import (
find_legacy_vc_runtime_files,
normalize_windows_version,
@@ -136,7 +136,9 @@ def build_with_pyoxidizer(
staging_dir = inno_build_dir / "stage"
inno_build_dir.mkdir(parents=True, exist_ok=True)
- run_pyoxidizer(source_dir, inno_build_dir, staging_dir, target_triple)
+ create_pyoxidizer_install_layout(
+ source_dir, inno_build_dir, staging_dir, target_triple
+ )
process_install_rules(EXTRA_INSTALL_RULES, source_dir, staging_dir)
diff --git a/contrib/packaging/hgpackaging/pyoxidizer.py b/contrib/packaging/hgpackaging/pyoxidizer.py
--- a/contrib/packaging/hgpackaging/pyoxidizer.py
+++ b/contrib/packaging/hgpackaging/pyoxidizer.py
@@ -12,6 +12,7 @@ import pathlib
import shutil
import subprocess
import sys
+import typing
from .downloads import download_entry
from .util import (
@@ -53,17 +54,36 @@ STAGING_EXCLUDES_WINDOWS = [
]
+def build_docs_html(source_dir: pathlib.Path):
+ """Ensures HTML documentation is built.
+
+ This will fail if docutils isn't available.
+
+ (The HTML docs aren't built as part of `pip install` so we need to build them
+ out of band.)
+ """
+ subprocess.run(
+ [sys.executable, str(source_dir / "setup.py"), "build_doc", "--html"],
+ cwd=str(source_dir),
+ check=True,
+ )
+
+
def run_pyoxidizer(
source_dir: pathlib.Path,
build_dir: pathlib.Path,
- out_dir: pathlib.Path,
target_triple: str,
-):
- """Build Mercurial with PyOxidizer and copy additional files into place.
+ build_vars: typing.Optional[typing.Dict[str, str]] = None,
+ target: typing.Optional[str] = None,
+) -> pathlib.Path:
+ """Run `pyoxidizer` in an environment with access to build dependencies.
- After successful completion, ``out_dir`` contains files constituting a
- Mercurial install.
+ Returns the output directory that pyoxidizer would have used for build
+ artifacts. Actual build artifacts are likely in a sub-directory with the
+ name of the pyoxidizer build target that was built.
"""
+ build_vars = build_vars or {}
+
# We need to make gettext binaries available for compiling i18n files.
gettext_pkg, gettext_entry = download_entry('gettext', build_dir)
gettext_dep_pkg = download_entry('gettext-dep', build_dir)[0]
@@ -91,8 +111,31 @@ def run_pyoxidizer(
target_triple,
]
+ for k, v in sorted(build_vars.items()):
+ args.extend(["--var", k, v])
+
+ if target:
+ args.append(target)
+
subprocess.run(args, env=env, check=True)
+ return source_dir / "build" / "pyoxidizer" / target_triple / "release"
+
+
+def create_pyoxidizer_install_layout(
+ source_dir: pathlib.Path,
+ build_dir: pathlib.Path,
+ out_dir: pathlib.Path,
+ target_triple: str,
+):
+ """Build Mercurial with PyOxidizer and copy additional files into place.
+
+ After successful completion, ``out_dir`` contains files constituting a
+ Mercurial install.
+ """
+
+ run_pyoxidizer(source_dir, build_dir, target_triple)
+
if "windows" in target_triple:
target = "app_windows"
else:
@@ -113,14 +156,7 @@ def run_pyoxidizer(
# is taught to use the importlib APIs for reading resources.
process_install_rules(STAGING_RULES_APP, build_dir, out_dir)
- # We also need to run setup.py build_doc to produce html files,
- # as they aren't built as part of ``pip install``.
- # This will fail if docutils isn't installed.
- subprocess.run(
- [sys.executable, str(source_dir / "setup.py"), "build_doc", "--html"],
- cwd=str(source_dir),
- check=True,
- )
+ build_docs_html(source_dir)
if "windows" in target_triple:
process_install_rules(STAGING_RULES_WINDOWS, source_dir, out_dir)
diff --git a/contrib/packaging/hgpackaging/wix.py b/contrib/packaging/hgpackaging/wix.py
--- a/contrib/packaging/hgpackaging/wix.py
+++ b/contrib/packaging/hgpackaging/wix.py
@@ -8,6 +8,7 @@
# no-check-code because Python 3 native.
import collections
+import json
import os
import pathlib
import re
@@ -22,7 +23,11 @@ from .py2exe import (
build_py2exe,
stage_install,
)
-from .pyoxidizer import run_pyoxidizer
+from .pyoxidizer import (
+ build_docs_html,
+ create_pyoxidizer_install_layout,
+ run_pyoxidizer,
+)
from .util import (
extract_zip_to_directory,
normalize_windows_version,
@@ -382,40 +387,74 @@ def build_installer_pyoxidizer(
extra_wxs: typing.Optional[typing.Dict[str, str]] = None,
extra_features: typing.Optional[typing.List[str]] = None,
signing_info: typing.Optional[typing.Dict[str, str]] = None,
+ extra_pyoxidizer_vars=None,
):
"""Build a WiX MSI installer using PyOxidizer."""
hg_build_dir = source_dir / "build"
build_dir = hg_build_dir / ("wix-%s" % target_triple)
- staging_dir = build_dir / "stage"
-
- arch = "x64" if "x86_64" in target_triple else "x86"
build_dir.mkdir(parents=True, exist_ok=True)
- run_pyoxidizer(source_dir, build_dir, staging_dir, target_triple)
+
+ # Need to ensure docs HTML is built because this isn't done as part of
+ # `pip install Mercurial`.
+ build_docs_html(source_dir)
+
+ build_vars = {}
- # We also install some extra files.
- process_install_rules(EXTRA_INSTALL_RULES, source_dir, staging_dir)
+ if msi_name:
+ build_vars["MSI_NAME"] = msi_name
+
+ if version:
+ build_vars["VERSION"] = version
+
+ if extra_features:
+ build_vars["EXTRA_MSI_FEATURES"] = ";".join(extra_features)
- # And remove some files we don't want.
- for f in STAGING_REMOVE_FILES:
- p = staging_dir / f
- if p.exists():
- print('removing %s' % p)
- p.unlink()
+ if signing_info:
+ if signing_info["cert_path"]:
+ build_vars["SIGNING_PFX_PATH"] = signing_info["cert_path"]
+ if signing_info["cert_password"]:
+ build_vars["SIGNING_PFX_PASSWORD"] = signing_info["cert_password"]
+ if signing_info["subject_name"]:
+ build_vars["SIGNING_SUBJECT_NAME"] = signing_info["subject_name"]
+ if signing_info["timestamp_url"]:
+ build_vars["TIME_STAMP_SERVER_URL"] = signing_info["timestamp_url"]
- return run_wix_packaging(
+ if extra_pyoxidizer_vars:
+ build_vars.update(json.loads(extra_pyoxidizer_vars))
+
+ if extra_wxs:
+ raise Exception(
+ "support for extra .wxs files has been temporarily dropped"
+ )
+
+ out_dir = run_pyoxidizer(
source_dir,
build_dir,
- staging_dir,
- arch,
- version,
- python2=False,
- msi_name=msi_name,
- extra_wxs=extra_wxs,
- extra_features=extra_features,
- signing_info=signing_info,
+ target_triple,
+ build_vars=build_vars,
+ target="msi",
)
+ msi_dir = out_dir / "msi"
+ msi_files = [f for f in os.listdir(msi_dir) if f.endswith(".msi")]
+
+ if len(msi_files) != 1:
+ raise Exception("expected exactly 1 .msi file; got %d" % len(msi_files))
+
+ msi_filename = msi_files[0]
+
+ msi_path = msi_dir / msi_filename
+ dist_path = source_dir / "dist" / msi_filename
+
+ dist_path.parent.mkdir(parents=True, exist_ok=True)
+
+ shutil.copyfile(msi_path, dist_path)
+
+ return {
+ "msi_path": dist_path,
+ }
+
def run_wix_packaging(
source_dir: pathlib.Path,
diff --git a/contrib/packaging/wix/mercurial.wxs b/contrib/packaging/wix/mercurial.wxs
--- a/contrib/packaging/wix/mercurial.wxs
+++ b/contrib/packaging/wix/mercurial.wxs
@@ -135,9 +135,13 @@
+
+
+
+
-
+
I', data[0:4])[0]
version = header & 0xFFFF
if version == 1:
- revlogio = revlog.revlogio()
inline = header & (1 << 16)
else:
raise error.Abort(b'unsupported revlog version: %d' % version)
+ parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
+ if parse_index_v1 is None:
+ parse_index_v1 = mercurial.revlog.revlogio().parseindex
+
rllen = len(rl)
node0 = rl.node(0)
@@ -2617,33 +2654,35 @@ def perfrevlogindex(ui, repo, file_=None
allnodesrev = list(reversed(allnodes))
def constructor():
- revlog.revlog(opener, indexfile)
+ if radix is not None:
+ revlog(opener, radix=radix)
+ else:
+ # hg <= 5.8
+ revlog(opener, indexfile=indexfile)
def read():
with opener(indexfile) as fh:
fh.read()
def parseindex():
- revlogio.parseindex(data, inline)
+ parse_index_v1(data, inline)
def getentry(revornode):
- index = revlogio.parseindex(data, inline)[0]
+ index = parse_index_v1(data, inline)[0]
index[revornode]
def getentries(revs, count=1):
- index = revlogio.parseindex(data, inline)[0]
+ index = parse_index_v1(data, inline)[0]
for i in range(count):
for rev in revs:
index[rev]
def resolvenode(node):
- index = revlogio.parseindex(data, inline)[0]
+ index = parse_index_v1(data, inline)[0]
rev = getattr(index, 'rev', None)
if rev is None:
- nodemap = getattr(
- revlogio.parseindex(data, inline)[0], 'nodemap', None
- )
+ nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
# This only works for the C code.
if nodemap is None:
return
@@ -2655,12 +2694,10 @@ def perfrevlogindex(ui, repo, file_=None
pass
def resolvenodes(nodes, count=1):
- index = revlogio.parseindex(data, inline)[0]
+ index = parse_index_v1(data, inline)[0]
rev = getattr(index, 'rev', None)
if rev is None:
- nodemap = getattr(
- revlogio.parseindex(data, inline)[0], 'nodemap', None
- )
+ nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
# This only works for the C code.
if nodemap is None:
return
@@ -3015,10 +3052,17 @@ def _temprevlog(ui, orig, truncaterev):
if util.safehasattr(orig, k):
revlogkwargs[k] = getattr(orig, k)
- origindexpath = orig.opener.join(orig.indexfile)
- origdatapath = orig.opener.join(orig.datafile)
- indexname = 'revlog.i'
- dataname = 'revlog.d'
+ indexfile = getattr(orig, '_indexfile', None)
+ if indexfile is None:
+ # compatibility with <= hg-5.8
+ indexfile = getattr(orig, 'indexfile')
+ origindexpath = orig.opener.join(indexfile)
+
+ datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
+ origdatapath = orig.opener.join(datafile)
+ radix = b'revlog'
+ indexname = b'revlog.i'
+ dataname = b'revlog.d'
tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
try:
@@ -3043,9 +3087,12 @@ def _temprevlog(ui, orig, truncaterev):
vfs = vfsmod.vfs(tmpdir)
vfs.options = getattr(orig.opener, 'options', None)
- dest = revlog.revlog(
- vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
- )
+ try:
+ dest = revlog(vfs, radix=radix, **revlogkwargs)
+ except TypeError:
+ dest = revlog(
+ vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
+ )
if dest._inline:
raise error.Abort('not supporting inline revlog (yet)')
# make sure internals are initialized
@@ -3111,9 +3158,14 @@ def perfrevlogchunks(ui, repo, file_=Non
def rlfh(rl):
if rl._inline:
- return getsvfs(repo)(rl.indexfile)
+ indexfile = getattr(rl, '_indexfile', None)
+ if indexfile is None:
+ # compatibility with <= hg-5.8
+ indexfile = getattr(rl, 'indexfile')
+ return getsvfs(repo)(indexfile)
else:
- return getsvfs(repo)(rl.datafile)
+ datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
+ return getsvfs(repo)(datafile)
def doread():
rl.clearcaches()
diff --git a/contrib/undumprevlog b/contrib/undumprevlog
--- a/contrib/undumprevlog
+++ b/contrib/undumprevlog
@@ -15,6 +15,10 @@ from mercurial import (
)
from mercurial.utils import procutil
+from mercurial.revlogutils import (
+ constants as revlog_constants,
+)
+
for fp in (sys.stdin, sys.stdout, sys.stderr):
procutil.setbinary(fp)
@@ -28,7 +32,12 @@ while True:
break
if l.startswith("file:"):
f = encoding.strtolocal(l[6:-1])
- r = revlog.revlog(opener, f)
+ assert f.endswith(b'.i')
+ r = revlog.revlog(
+ opener,
+ target=(revlog_constants.KIND_OTHER, b'undump-revlog'),
+ radix=f[:-2],
+ )
procutil.stdout.write(b'%s\n' % f)
elif l.startswith("node:"):
n = bin(l[6:-1])
diff --git a/hgext/absorb.py b/hgext/absorb.py
--- a/hgext/absorb.py
+++ b/hgext/absorb.py
@@ -38,7 +38,6 @@ import collections
from mercurial.i18n import _
from mercurial.node import (
hex,
- nullid,
short,
)
from mercurial import (
@@ -109,7 +108,7 @@ class emptyfilecontext(object):
return b''
def node(self):
- return nullid
+ return self._repo.nullid
def uniq(lst):
@@ -927,7 +926,7 @@ class fixupstate(object):
the commit is a clone from ctx, with a (optionally) different p1, and
different file contents replaced by memworkingcopy.
"""
- parents = p1 and (p1, nullid)
+ parents = p1 and (p1, self.repo.nullid)
extra = ctx.extra()
if self._useobsolete and self.ui.configbool(b'absorb', b'add-noise'):
extra[b'absorb_source'] = ctx.hex()
diff --git a/hgext/amend.py b/hgext/amend.py
--- a/hgext/amend.py
+++ b/hgext/amend.py
@@ -16,7 +16,6 @@ from mercurial.i18n import _
from mercurial import (
cmdutil,
commands,
- pycompat,
registrar,
)
@@ -66,11 +65,10 @@ def amend(ui, repo, *pats, **opts):
See :hg:`help commit` for more details.
"""
- opts = pycompat.byteskwargs(opts)
- cmdutil.checknotesize(ui, opts)
+ cmdutil.check_note_size(opts)
with repo.wlock(), repo.lock():
- if not opts.get(b'logfile'):
- opts[b'message'] = opts.get(b'message') or repo[b'.'].description()
- opts[b'amend'] = True
- return commands._docommit(ui, repo, *pats, **pycompat.strkwargs(opts))
+ if not opts.get('logfile'):
+ opts['message'] = opts.get('message') or repo[b'.'].description()
+ opts['amend'] = True
+ return commands._docommit(ui, repo, *pats, **opts)
diff --git a/hgext/convert/bzr.py b/hgext/convert/bzr.py
--- a/hgext/convert/bzr.py
+++ b/hgext/convert/bzr.py
@@ -5,8 +5,9 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
-# This module is for handling 'bzr', that was formerly known as Bazaar-NG;
-# it cannot access 'bar' repositories, but they were never used very much
+# This module is for handling Breezy imports or `brz`, but it's also compatible
+# with Bazaar or `bzr`, that was formerly known as Bazaar-NG;
+# it cannot access `bar` repositories, but they were never used very much.
from __future__ import absolute_import
import os
@@ -16,34 +17,36 @@ from mercurial import (
demandimport,
error,
pycompat,
+ util,
)
from . import common
+
# these do not work with demandimport, blacklist
demandimport.IGNORES.update(
[
- b'bzrlib.transactions',
- b'bzrlib.urlutils',
+ b'breezy.transactions',
+ b'breezy.urlutils',
b'ElementPath',
]
)
try:
# bazaar imports
- import bzrlib.bzrdir
- import bzrlib.errors
- import bzrlib.revision
- import bzrlib.revisionspec
+ import breezy.bzr.bzrdir
+ import breezy.errors
+ import breezy.revision
+ import breezy.revisionspec
- bzrdir = bzrlib.bzrdir
- errors = bzrlib.errors
- revision = bzrlib.revision
- revisionspec = bzrlib.revisionspec
+ bzrdir = breezy.bzr.bzrdir
+ errors = breezy.errors
+ revision = breezy.revision
+ revisionspec = breezy.revisionspec
revisionspec.RevisionSpec
except ImportError:
pass
-supportedkinds = (b'file', b'symlink')
+supportedkinds = ('file', 'symlink')
class bzr_source(common.converter_source):
@@ -58,15 +61,16 @@ class bzr_source(common.converter_source
)
try:
- # access bzrlib stuff
+ # access breezy stuff
bzrdir
except NameError:
raise common.NoRepo(_(b'Bazaar modules could not be loaded'))
- path = os.path.abspath(path)
+ path = util.abspath(path)
self._checkrepotype(path)
try:
- self.sourcerepo = bzrdir.BzrDir.open(path).open_repository()
+ bzr_dir = bzrdir.BzrDir.open(path.decode())
+ self.sourcerepo = bzr_dir.open_repository()
except errors.NoRepositoryPresent:
raise common.NoRepo(
_(b'%s does not look like a Bazaar repository') % path
@@ -78,7 +82,7 @@ class bzr_source(common.converter_source
# Lightweight checkouts detection is informational but probably
# fragile at API level. It should not terminate the conversion.
try:
- dir = bzrdir.BzrDir.open_containing(path)[0]
+ dir = bzrdir.BzrDir.open_containing(path.decode())[0]
try:
tree = dir.open_workingtree(recommend_upgrade=False)
branch = tree.branch
@@ -87,8 +91,8 @@ class bzr_source(common.converter_source
branch = dir.open_branch()
if (
tree is not None
- and tree.bzrdir.root_transport.base
- != branch.bzrdir.root_transport.base
+ and tree.controldir.root_transport.base
+ != branch.controldir.root_transport.base
):
self.ui.warn(
_(
@@ -127,7 +131,8 @@ class bzr_source(common.converter_source
revid = None
for branch in self._bzrbranches():
try:
- r = revisionspec.RevisionSpec.from_string(self.revs[0])
+ revspec = self.revs[0].decode()
+ r = revisionspec.RevisionSpec.from_string(revspec)
info = r.in_history(branch)
except errors.BzrError:
pass
@@ -142,24 +147,26 @@ class bzr_source(common.converter_source
return heads
def getfile(self, name, rev):
+ name = name.decode()
revtree = self.sourcerepo.revision_tree(rev)
- fileid = revtree.path2id(name.decode(self.encoding or b'utf-8'))
- kind = None
- if fileid is not None:
- kind = revtree.kind(fileid)
+
+ try:
+ kind = revtree.kind(name)
+ except breezy.errors.NoSuchFile:
+ return None, None
if kind not in supportedkinds:
# the file is not available anymore - was deleted
return None, None
- mode = self._modecache[(name, rev)]
- if kind == b'symlink':
- target = revtree.get_symlink_target(fileid)
+ mode = self._modecache[(name.encode(), rev)]
+ if kind == 'symlink':
+ target = revtree.get_symlink_target(name)
if target is None:
raise error.Abort(
_(b'%s.%s symlink has no target') % (name, rev)
)
- return target, mode
+ return target.encode(), mode
else:
- sio = revtree.get_file(fileid)
+ sio = revtree.get_file(name)
return sio.read(), mode
def getchanges(self, version, full):
@@ -184,15 +191,15 @@ class bzr_source(common.converter_source
parents = self._filterghosts(rev.parent_ids)
self._parentids[version] = parents
- branch = self.recode(rev.properties.get(b'branch-nick', u'default'))
- if branch == b'trunk':
- branch = b'default'
+ branch = rev.properties.get('branch-nick', 'default')
+ if branch == 'trunk':
+ branch = 'default'
return common.commit(
parents=parents,
date=b'%d %d' % (rev.timestamp, -rev.timezone),
author=self.recode(rev.committer),
desc=self.recode(rev.message),
- branch=branch,
+ branch=branch.encode('utf8'),
rev=version,
saverev=self._saverev,
)
@@ -234,35 +241,32 @@ class bzr_source(common.converter_source
# Process the entries by reverse lexicographic name order to
# handle nested renames correctly, most specific first.
+
+ def key(c):
+ return c.path[0] or c.path[1] or ""
+
curchanges = sorted(
current.iter_changes(origin),
- key=lambda c: c[1][0] or c[1][1],
+ key=key,
reverse=True,
)
- for (
- fileid,
- paths,
- changed_content,
- versioned,
- parent,
- name,
- kind,
- executable,
- ) in curchanges:
-
+ for change in curchanges:
+ paths = change.path
+ kind = change.kind
+ executable = change.executable
if paths[0] == u'' or paths[1] == u'':
# ignore changes to tree root
continue
# bazaar tracks directories, mercurial does not, so
# we have to rename the directory contents
- if kind[1] == b'directory':
- if kind[0] not in (None, b'directory'):
+ if kind[1] == 'directory':
+ if kind[0] not in (None, 'directory'):
# Replacing 'something' with a directory, record it
# so it can be removed.
changes.append((self.recode(paths[0]), revid))
- if kind[0] == b'directory' and None not in paths:
+ if kind[0] == 'directory' and None not in paths:
renaming = paths[0] != paths[1]
# neither an add nor an delete - a move
# rename all directory contents manually
@@ -270,9 +274,9 @@ class bzr_source(common.converter_source
# get all child-entries of the directory
for name, entry in inventory.iter_entries(subdir):
# hg does not track directory renames
- if entry.kind == b'directory':
+ if entry.kind == 'directory':
continue
- frompath = self.recode(paths[0] + b'/' + name)
+ frompath = self.recode(paths[0] + '/' + name)
if frompath in seen:
# Already handled by a more specific change entry
# This is important when you have:
@@ -283,14 +287,14 @@ class bzr_source(common.converter_source
seen.add(frompath)
if not renaming:
continue
- topath = self.recode(paths[1] + b'/' + name)
+ topath = self.recode(paths[1] + '/' + name)
# register the files as changed
changes.append((frompath, revid))
changes.append((topath, revid))
# add to mode cache
mode = (
(entry.executable and b'x')
- or (entry.kind == b'symlink' and b's')
+ or (entry.kind == 'symlink' and b's')
or b''
)
self._modecache[(topath, revid)] = mode
@@ -320,7 +324,7 @@ class bzr_source(common.converter_source
# populate the mode cache
kind, executable = [e[1] for e in (kind, executable)]
- mode = (executable and b'x') or (kind == b'symlink' and b'l') or b''
+ mode = (executable and b'x') or (kind == 'symlink' and b'l') or b''
self._modecache[(topath, revid)] = mode
changes.append((topath, revid))
diff --git a/hgext/convert/git.py b/hgext/convert/git.py
--- a/hgext/convert/git.py
+++ b/hgext/convert/git.py
@@ -9,11 +9,12 @@ from __future__ import absolute_import
import os
from mercurial.i18n import _
-from mercurial.node import nullhex
+from mercurial.node import sha1nodeconstants
from mercurial import (
config,
error,
pycompat,
+ util,
)
from . import common
@@ -74,7 +75,7 @@ class convert_git(common.converter_sourc
# Pass an absolute path to git to prevent from ever being interpreted
# as a URL
- path = os.path.abspath(path)
+ path = util.abspath(path)
if os.path.isdir(path + b"/.git"):
path += b"/.git"
@@ -192,7 +193,7 @@ class convert_git(common.converter_sourc
return heads
def catfile(self, rev, ftype):
- if rev == nullhex:
+ if rev == sha1nodeconstants.nullhex:
raise IOError
self.catfilepipe[0].write(rev + b'\n')
self.catfilepipe[0].flush()
@@ -214,7 +215,7 @@ class convert_git(common.converter_sourc
return data
def getfile(self, name, rev):
- if rev == nullhex:
+ if rev == sha1nodeconstants.nullhex:
return None, None
if name == b'.hgsub':
data = b'\n'.join([m.hgsub() for m in self.submoditer()])
@@ -228,7 +229,7 @@ class convert_git(common.converter_sourc
return data, mode
def submoditer(self):
- null = nullhex
+ null = sha1nodeconstants.nullhex
for m in sorted(self.submodules, key=lambda p: p.path):
if m.node != null:
yield m
@@ -317,7 +318,7 @@ class convert_git(common.converter_sourc
subexists[0] = True
if entry[4] == b'D' or renamesource:
subdeleted[0] = True
- changes.append((b'.hgsub', nullhex))
+ changes.append((b'.hgsub', sha1nodeconstants.nullhex))
else:
changes.append((b'.hgsub', b''))
elif entry[1] == b'160000' or entry[0] == b':160000':
@@ -325,7 +326,7 @@ class convert_git(common.converter_sourc
subexists[0] = True
else:
if renamesource:
- h = nullhex
+ h = sha1nodeconstants.nullhex
self.modecache[(f, h)] = (p and b"x") or (s and b"l") or b""
changes.append((f, h))
@@ -362,7 +363,7 @@ class convert_git(common.converter_sourc
if subexists[0]:
if subdeleted[0]:
- changes.append((b'.hgsubstate', nullhex))
+ changes.append((b'.hgsubstate', sha1nodeconstants.nullhex))
else:
self.retrievegitmodules(version)
changes.append((b'.hgsubstate', b''))
diff --git a/hgext/convert/hg.py b/hgext/convert/hg.py
--- a/hgext/convert/hg.py
+++ b/hgext/convert/hg.py
@@ -27,8 +27,7 @@ from mercurial.pycompat import open
from mercurial.node import (
bin,
hex,
- nullhex,
- nullid,
+ sha1nodeconstants,
)
from mercurial import (
bookmarks,
@@ -160,7 +159,7 @@ class mercurial_sink(common.converter_si
continue
revid = revmap.get(source.lookuprev(s[0]))
if not revid:
- if s[0] == nullhex:
+ if s[0] == sha1nodeconstants.nullhex:
revid = s[0]
else:
# missing, but keep for hash stability
@@ -179,7 +178,7 @@ class mercurial_sink(common.converter_si
revid = s[0]
subpath = s[1]
- if revid != nullhex:
+ if revid != sha1nodeconstants.nullhex:
revmap = self.subrevmaps.get(subpath)
if revmap is None:
revmap = mapfile(
@@ -304,9 +303,9 @@ class mercurial_sink(common.converter_si
parent = parents[0]
if len(parents) < 2:
- parents.append(nullid)
+ parents.append(self.repo.nullid)
if len(parents) < 2:
- parents.append(nullid)
+ parents.append(self.repo.nullid)
p2 = parents.pop(0)
text = commit.desc
@@ -356,7 +355,7 @@ class mercurial_sink(common.converter_si
p2 = parents.pop(0)
p1ctx = self.repo[p1]
p2ctx = None
- if p2 != nullid:
+ if p2 != self.repo.nullid:
p2ctx = self.repo[p2]
fileset = set(files)
if full:
@@ -421,7 +420,7 @@ class mercurial_sink(common.converter_si
def puttags(self, tags):
tagparent = self.repo.branchtip(self.tagsbranch, ignoremissing=True)
- tagparent = tagparent or nullid
+ tagparent = tagparent or self.repo.nullid
oldlines = set()
for branch, heads in pycompat.iteritems(self.repo.branchmap()):
diff --git a/hgext/convert/subversion.py b/hgext/convert/subversion.py
--- a/hgext/convert/subversion.py
+++ b/hgext/convert/subversion.py
@@ -164,7 +164,7 @@ def geturl(path):
# svn.client.url_from_path() fails with local repositories
pass
if os.path.isdir(path):
- path = os.path.normpath(os.path.abspath(path))
+ path = os.path.normpath(util.abspath(path))
if pycompat.iswindows:
path = b'/' + util.normpath(path)
# Module URL is later compared with the repository URL returned
@@ -431,7 +431,7 @@ def issvnurl(ui, url):
path = unicodepath.encode(fsencoding)
except ValueError:
proto = b'file'
- path = os.path.abspath(url)
+ path = util.abspath(url)
try:
path.decode(fsencoding)
except UnicodeDecodeError:
diff --git a/hgext/eol.py b/hgext/eol.py
--- a/hgext/eol.py
+++ b/hgext/eol.py
@@ -442,7 +442,7 @@ def reposetup(ui, repo):
continue
# all normal files need to be looked at again since
# the new .hgeol file specify a different filter
- self.dirstate.normallookup(f)
+ self.dirstate.set_possibly_dirty(f)
# Write the cache to update mtime and cache .hgeol
with self.vfs(b"eol.cache", b"w") as f:
f.write(hgeoldata)
diff --git a/hgext/fix.py b/hgext/fix.py
--- a/hgext/fix.py
+++ b/hgext/fix.py
@@ -757,7 +757,7 @@ def writeworkingdir(repo, ctx, filedata,
fctx = ctx[path]
fctx.write(data, fctx.flags())
if repo.dirstate[path] == b'n':
- repo.dirstate.normallookup(path)
+ repo.dirstate.set_possibly_dirty(path)
oldparentnodes = repo.dirstate.parents()
newparentnodes = [replacements.get(n, n) for n in oldparentnodes]
diff --git a/hgext/git/__init__.py b/hgext/git/__init__.py
--- a/hgext/git/__init__.py
+++ b/hgext/git/__init__.py
@@ -284,7 +284,7 @@ class gitbmstore(object):
def init(orig, ui, dest=b'.', **opts):
if opts.get('git', False):
- path = os.path.abspath(dest)
+ path = util.abspath(dest)
# TODO: walk up looking for the git repo
_setupdothg(ui, path)
return 0
diff --git a/hgext/git/dirstate.py b/hgext/git/dirstate.py
--- a/hgext/git/dirstate.py
+++ b/hgext/git/dirstate.py
@@ -4,7 +4,7 @@ import contextlib
import errno
import os
-from mercurial.node import nullid
+from mercurial.node import sha1nodeconstants
from mercurial import (
error,
extensions,
@@ -81,14 +81,16 @@ class gitdirstate(object):
except pygit2.GitError:
# Typically happens when peeling HEAD fails, as in an
# empty repository.
- return nullid
+ return sha1nodeconstants.nullid
def p2(self):
# TODO: MERGE_HEAD? something like that, right?
- return nullid
+ return sha1nodeconstants.nullid
- def setparents(self, p1, p2=nullid):
- assert p2 == nullid, b'TODO merging support'
+ def setparents(self, p1, p2=None):
+ if p2 is None:
+ p2 = sha1nodeconstants.nullid
+ assert p2 == sha1nodeconstants.nullid, b'TODO merging support'
self.git.head.set_target(gitutil.togitnode(p1))
@util.propertycache
@@ -102,14 +104,14 @@ class gitdirstate(object):
def parents(self):
# TODO how on earth do we find p2 if a merge is in flight?
- return self.p1(), nullid
+ return self.p1(), sha1nodeconstants.nullid
def __iter__(self):
return (pycompat.fsencode(f.path) for f in self.git.index)
def items(self):
for ie in self.git.index:
- yield ie.path, None # value should be a dirstatetuple
+ yield ie.path, None # value should be a DirstateItem
# py2,3 compat forward
iteritems = items
diff --git a/hgext/git/gitlog.py b/hgext/git/gitlog.py
--- a/hgext/git/gitlog.py
+++ b/hgext/git/gitlog.py
@@ -5,11 +5,8 @@ from mercurial.i18n import _
from mercurial.node import (
bin,
hex,
- nullhex,
- nullid,
nullrev,
sha1nodeconstants,
- wdirhex,
)
from mercurial import (
ancestor,
@@ -47,7 +44,7 @@ class baselog(object): # revlog.revlog)
)
def rev(self, n):
- if n == nullid:
+ if n == sha1nodeconstants.nullid:
return -1
t = self._db.execute(
'SELECT rev FROM changelog WHERE node = ?', (gitutil.togitnode(n),)
@@ -58,7 +55,7 @@ class baselog(object): # revlog.revlog)
def node(self, r):
if r == nullrev:
- return nullid
+ return sha1nodeconstants.nullid
t = self._db.execute(
'SELECT node FROM changelog WHERE rev = ?', (r,)
).fetchone()
@@ -135,7 +132,7 @@ class changelog(baselog):
bin(v[0]): v[1]
for v in self._db.execute('SELECT node, rev FROM changelog')
}
- r[nullid] = nullrev
+ r[sha1nodeconstants.nullid] = nullrev
return r
def tip(self):
@@ -144,7 +141,7 @@ class changelog(baselog):
).fetchone()
if t:
return bin(t[0])
- return nullid
+ return sha1nodeconstants.nullid
def revs(self, start=0, stop=None):
if stop is None:
@@ -167,7 +164,7 @@ class changelog(baselog):
return -1
def _partialmatch(self, id):
- if wdirhex.startswith(id):
+ if sha1nodeconstants.wdirhex.startswith(id):
raise error.WdirUnsupported
candidates = [
bin(x[0])
@@ -176,8 +173,8 @@ class changelog(baselog):
(pycompat.sysstr(id + b'%'),),
)
]
- if nullhex.startswith(id):
- candidates.append(nullid)
+ if sha1nodeconstants.nullhex.startswith(id):
+ candidates.append(sha1nodeconstants.nullid)
if len(candidates) > 1:
raise error.AmbiguousPrefixLookupError(
id, b'00changelog.i', _(b'ambiguous identifier')
@@ -223,8 +220,10 @@ class changelog(baselog):
n = nodeorrev
extra = {b'branch': b'default'}
# handle looking up nullid
- if n == nullid:
- return hgchangelog._changelogrevision(extra=extra, manifest=nullid)
+ if n == sha1nodeconstants.nullid:
+ return hgchangelog._changelogrevision(
+ extra=extra, manifest=sha1nodeconstants.nullid
+ )
hn = gitutil.togitnode(n)
# We've got a real commit!
files = [
@@ -301,7 +300,7 @@ class changelog(baselog):
not supplied, uses all of the revlog's heads. If common is not
supplied, uses nullid."""
if common is None:
- common = [nullid]
+ common = [sha1nodeconstants.nullid]
if heads is None:
heads = self.heads()
@@ -400,9 +399,9 @@ class changelog(baselog):
):
parents = []
hp1, hp2 = gitutil.togitnode(p1), gitutil.togitnode(p2)
- if p1 != nullid:
+ if p1 != sha1nodeconstants.nullid:
parents.append(hp1)
- if p2 and p2 != nullid:
+ if p2 and p2 != sha1nodeconstants.nullid:
parents.append(hp2)
assert date is not None
timestamp, tz = date
@@ -435,7 +434,7 @@ class manifestlog(baselog):
return self.get(b'', node)
def get(self, relpath, node):
- if node == nullid:
+ if node == sha1nodeconstants.nullid:
# TODO: this should almost certainly be a memgittreemanifestctx
return manifest.memtreemanifestctx(self, relpath)
commit = self.gitrepo[gitutil.togitnode(node)]
@@ -454,9 +453,10 @@ class filelog(baselog):
super(filelog, self).__init__(gr, db)
assert isinstance(path, bytes)
self.path = path
+ self.nullid = sha1nodeconstants.nullid
def read(self, node):
- if node == nullid:
+ if node == sha1nodeconstants.nullid:
return b''
return self.gitrepo[gitutil.togitnode(node)].data
diff --git a/hgext/git/gitutil.py b/hgext/git/gitutil.py
--- a/hgext/git/gitutil.py
+++ b/hgext/git/gitutil.py
@@ -1,7 +1,7 @@
"""utilities to assist in working with pygit2"""
from __future__ import absolute_import
-from mercurial.node import bin, hex, nullid
+from mercurial.node import bin, hex, sha1nodeconstants
from mercurial import pycompat
@@ -50,4 +50,4 @@ def fromgitnode(n):
return bin(n)
-nullgit = togitnode(nullid)
+nullgit = togitnode(sha1nodeconstants.nullid)
diff --git a/hgext/git/index.py b/hgext/git/index.py
--- a/hgext/git/index.py
+++ b/hgext/git/index.py
@@ -5,9 +5,7 @@ import os
import sqlite3
from mercurial.i18n import _
-from mercurial.node import (
- nullid,
-)
+from mercurial.node import sha1nodeconstants
from mercurial import (
encoding,
@@ -317,7 +315,9 @@ def _index_repo(
)
new_files = (p.delta.new_file for p in patchgen)
files = {
- nf.path: nf.id.hex for nf in new_files if nf.id.raw != nullid
+ nf.path: nf.id.hex
+ for nf in new_files
+ if nf.id.raw != sha1nodeconstants.nullid
}
for p, n in files.items():
# We intentionally set NULLs for any file parentage
diff --git a/hgext/gpg.py b/hgext/gpg.py
--- a/hgext/gpg.py
+++ b/hgext/gpg.py
@@ -14,7 +14,6 @@ from mercurial.i18n import _
from mercurial.node import (
bin,
hex,
- nullid,
short,
)
from mercurial import (
@@ -314,7 +313,9 @@ def _dosign(ui, repo, *revs, **opts):
if revs:
nodes = [repo.lookup(n) for n in revs]
else:
- nodes = [node for node in repo.dirstate.parents() if node != nullid]
+ nodes = [
+ node for node in repo.dirstate.parents() if node != repo.nullid
+ ]
if len(nodes) > 1:
raise error.Abort(
_(b'uncommitted merge - please provide a specific revision')
diff --git a/hgext/hgk.py b/hgext/hgk.py
--- a/hgext/hgk.py
+++ b/hgext/hgk.py
@@ -40,7 +40,6 @@ import os
from mercurial.i18n import _
from mercurial.node import (
- nullid,
nullrev,
short,
)
@@ -95,7 +94,7 @@ def difftree(ui, repo, node1=None, node2
mmap2 = repo[node2].manifest()
m = scmutil.match(repo[node1], files)
st = repo.status(node1, node2, m)
- empty = short(nullid)
+ empty = short(repo.nullid)
for f in st.modified:
# TODO get file permissions
@@ -317,9 +316,9 @@ def revtree(ui, args, repo, full=b"tree"
parentstr = b""
if parents:
pp = repo.changelog.parents(n)
- if pp[0] != nullid:
+ if pp[0] != repo.nullid:
parentstr += b" " + short(pp[0])
- if pp[1] != nullid:
+ if pp[1] != repo.nullid:
parentstr += b" " + short(pp[1])
if not full:
ui.write(b"%s%s\n" % (short(n), parentstr))
diff --git a/hgext/histedit.py b/hgext/histedit.py
--- a/hgext/histedit.py
+++ b/hgext/histedit.py
@@ -575,9 +575,8 @@ class histeditaction(object):
parentctx, but does not commit them."""
repo = self.repo
rulectx = repo[self.node]
- repo.ui.pushbuffer(error=True, labeled=True)
- hg.update(repo, self.state.parentctxnode, quietempty=True)
- repo.ui.popbuffer()
+ with repo.ui.silent():
+ hg.update(repo, self.state.parentctxnode, quietempty=True)
stats = applychanges(repo.ui, repo, rulectx, {})
repo.dirstate.setbranch(rulectx.branch())
if stats.unresolvedcount:
@@ -654,10 +653,9 @@ def applychanges(ui, repo, ctx, opts):
if ctx.p1().node() == repo.dirstate.p1():
# edits are "in place" we do not need to make any merge,
# just applies changes on parent for editing
- ui.pushbuffer()
- cmdutil.revert(ui, repo, ctx, all=True)
- stats = mergemod.updateresult(0, 0, 0, 0)
- ui.popbuffer()
+ with ui.silent():
+ cmdutil.revert(ui, repo, ctx, all=True)
+ stats = mergemod.updateresult(0, 0, 0, 0)
else:
try:
# ui.forcemerge is an internal variable, do not document
diff --git a/hgext/journal.py b/hgext/journal.py
--- a/hgext/journal.py
+++ b/hgext/journal.py
@@ -22,7 +22,6 @@ from mercurial.i18n import _
from mercurial.node import (
bin,
hex,
- nullid,
)
from mercurial import (
@@ -117,8 +116,8 @@ def recorddirstateparents(dirstate, old,
new = list(new)
if util.safehasattr(dirstate, 'journalstorage'):
# only record two hashes if there was a merge
- oldhashes = old[:1] if old[1] == nullid else old
- newhashes = new[:1] if new[1] == nullid else new
+ oldhashes = old[:1] if old[1] == dirstate._nodeconstants.nullid else old
+ newhashes = new[:1] if new[1] == dirstate._nodeconstants.nullid else new
dirstate.journalstorage.record(
wdirparenttype, b'.', oldhashes, newhashes
)
@@ -131,7 +130,7 @@ def recordbookmarks(orig, store, fp):
if util.safehasattr(repo, 'journal'):
oldmarks = bookmarks.bmstore(repo)
for mark, value in pycompat.iteritems(store):
- oldvalue = oldmarks.get(mark, nullid)
+ oldvalue = oldmarks.get(mark, repo.nullid)
if value != oldvalue:
repo.journal.record(bookmarktype, mark, oldvalue, value)
return orig(store, fp)
diff --git a/hgext/keyword.py b/hgext/keyword.py
--- a/hgext/keyword.py
+++ b/hgext/keyword.py
@@ -356,9 +356,9 @@ class kwtemplater(object):
fp.write(data)
fp.close()
if kwcmd:
- self.repo.dirstate.normal(f)
+ self.repo.dirstate.set_clean(f)
elif self.postcommit:
- self.repo.dirstate.normallookup(f)
+ self.repo.dirstate.update_file_p1(f, p1_tracked=True)
def shrink(self, fname, text):
'''Returns text with all keyword substitutions removed.'''
@@ -691,7 +691,7 @@ def kw_amend(orig, ui, repo, old, extra,
kwt = getattr(repo, '_keywordkwt', None)
if kwt is None:
return orig(ui, repo, old, extra, pats, opts)
- with repo.wlock():
+ with repo.wlock(), repo.dirstate.parentchange():
kwt.postcommit = True
newid = orig(ui, repo, old, extra, pats, opts)
if newid != old.node():
@@ -757,8 +757,9 @@ def kw_dorecord(orig, ui, repo, commitfu
if ctx != recctx:
modified, added = _preselect(wstatus, recctx.files())
kwt.restrict = False
- kwt.overwrite(recctx, modified, False, True)
- kwt.overwrite(recctx, added, False, True, True)
+ with repo.dirstate.parentchange():
+ kwt.overwrite(recctx, modified, False, True)
+ kwt.overwrite(recctx, added, False, True, True)
kwt.restrict = True
return ret
diff --git a/hgext/largefiles/basestore.py b/hgext/largefiles/basestore.py
--- a/hgext/largefiles/basestore.py
+++ b/hgext/largefiles/basestore.py
@@ -11,7 +11,8 @@ from __future__ import absolute_import
from mercurial.i18n import _
-from mercurial import node, util
+from mercurial.node import short
+from mercurial import util
from mercurial.utils import (
urlutil,
)
@@ -137,7 +138,7 @@ class basestore(object):
filestocheck = [] # list of (cset, filename, expectedhash)
for rev in revs:
cctx = self.repo[rev]
- cset = b"%d:%s" % (cctx.rev(), node.short(cctx.node()))
+ cset = b"%d:%s" % (cctx.rev(), short(cctx.node()))
for standin in cctx:
filename = lfutil.splitstandin(standin)
diff --git a/hgext/largefiles/lfcommands.py b/hgext/largefiles/lfcommands.py
--- a/hgext/largefiles/lfcommands.py
+++ b/hgext/largefiles/lfcommands.py
@@ -17,7 +17,6 @@ from mercurial.i18n import _
from mercurial.node import (
bin,
hex,
- nullid,
)
from mercurial import (
@@ -115,7 +114,7 @@ def lfconvert(ui, src, dest, *pats, **op
rsrc[ctx]
for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0]
)
- revmap = {nullid: nullid}
+ revmap = {rsrc.nullid: rdst.nullid}
if tolfile:
# Lock destination to prevent modification while it is converted to.
# Don't need to lock src because we are just reading from its
@@ -340,7 +339,7 @@ def _commitcontext(rdst, parents, ctx, d
# Generate list of changed files
def _getchangedfiles(ctx, parents):
files = set(ctx.files())
- if nullid not in parents:
+ if ctx.repo().nullid not in parents:
mc = ctx.manifest()
for pctx in ctx.parents():
for fn in pctx.manifest().diff(mc):
@@ -354,7 +353,7 @@ def _convertparents(ctx, revmap):
for p in ctx.parents():
parents.append(revmap[p.node()])
while len(parents) < 2:
- parents.append(nullid)
+ parents.append(ctx.repo().nullid)
return parents
@@ -520,47 +519,53 @@ def updatelfiles(
filelist = set(filelist)
lfiles = [f for f in lfiles if f in filelist]
- update = {}
- dropped = set()
- updated, removed = 0, 0
- wvfs = repo.wvfs
- wctx = repo[None]
- for lfile in lfiles:
- lfileorig = os.path.relpath(
- scmutil.backuppath(ui, repo, lfile), start=repo.root
- )
- standin = lfutil.standin(lfile)
- standinorig = os.path.relpath(
- scmutil.backuppath(ui, repo, standin), start=repo.root
- )
- if wvfs.exists(standin):
- if wvfs.exists(standinorig) and wvfs.exists(lfile):
- shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig))
- wvfs.unlinkpath(standinorig)
- expecthash = lfutil.readasstandin(wctx[standin])
- if expecthash != b'':
- if lfile not in wctx: # not switched to normal file
- if repo.dirstate[standin] != b'?':
- wvfs.unlinkpath(lfile, ignoremissing=True)
- else:
- dropped.add(lfile)
+ with lfdirstate.parentchange():
+ update = {}
+ dropped = set()
+ updated, removed = 0, 0
+ wvfs = repo.wvfs
+ wctx = repo[None]
+ for lfile in lfiles:
+ lfileorig = os.path.relpath(
+ scmutil.backuppath(ui, repo, lfile), start=repo.root
+ )
+ standin = lfutil.standin(lfile)
+ standinorig = os.path.relpath(
+ scmutil.backuppath(ui, repo, standin), start=repo.root
+ )
+ if wvfs.exists(standin):
+ if wvfs.exists(standinorig) and wvfs.exists(lfile):
+ shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig))
+ wvfs.unlinkpath(standinorig)
+ expecthash = lfutil.readasstandin(wctx[standin])
+ if expecthash != b'':
+ if lfile not in wctx: # not switched to normal file
+ if repo.dirstate[standin] != b'?':
+ wvfs.unlinkpath(lfile, ignoremissing=True)
+ else:
+ dropped.add(lfile)
- # use normallookup() to allocate an entry in largefiles
- # dirstate to prevent lfilesrepo.status() from reporting
- # missing files as removed.
- lfdirstate.normallookup(lfile)
- update[lfile] = expecthash
- else:
- # Remove lfiles for which the standin is deleted, unless the
- # lfile is added to the repository again. This happens when a
- # largefile is converted back to a normal file: the standin
- # disappears, but a new (normal) file appears as the lfile.
- if (
- wvfs.exists(lfile)
- and repo.dirstate.normalize(lfile) not in wctx
- ):
- wvfs.unlinkpath(lfile)
- removed += 1
+ # use normallookup() to allocate an entry in largefiles
+ # dirstate to prevent lfilesrepo.status() from reporting
+ # missing files as removed.
+ lfdirstate.update_file(
+ lfile,
+ p1_tracked=True,
+ wc_tracked=True,
+ possibly_dirty=True,
+ )
+ update[lfile] = expecthash
+ else:
+ # Remove lfiles for which the standin is deleted, unless the
+ # lfile is added to the repository again. This happens when a
+ # largefile is converted back to a normal file: the standin
+ # disappears, but a new (normal) file appears as the lfile.
+ if (
+ wvfs.exists(lfile)
+ and repo.dirstate.normalize(lfile) not in wctx
+ ):
+ wvfs.unlinkpath(lfile)
+ removed += 1
# largefile processing might be slow and be interrupted - be prepared
lfdirstate.write()
@@ -570,46 +575,48 @@ def updatelfiles(
for f in dropped:
repo.wvfs.unlinkpath(lfutil.standin(f))
-
# This needs to happen for dropped files, otherwise they stay in
# the M state.
- lfutil.synclfdirstate(repo, lfdirstate, f, normallookup)
+ lfdirstate._drop(f)
statuswriter(_(b'getting changed largefiles\n'))
cachelfiles(ui, repo, None, lfiles)
- for lfile in lfiles:
- update1 = 0
-
- expecthash = update.get(lfile)
- if expecthash:
- if not lfutil.copyfromcache(repo, expecthash, lfile):
- # failed ... but already removed and set to normallookup
- continue
- # Synchronize largefile dirstate to the last modified
- # time of the file
- lfdirstate.normal(lfile)
- update1 = 1
+ with lfdirstate.parentchange():
+ for lfile in lfiles:
+ update1 = 0
- # copy the exec mode of largefile standin from the repository's
- # dirstate to its state in the lfdirstate.
- standin = lfutil.standin(lfile)
- if wvfs.exists(standin):
- # exec is decided by the users permissions using mask 0o100
- standinexec = wvfs.stat(standin).st_mode & 0o100
- st = wvfs.stat(lfile)
- mode = st.st_mode
- if standinexec != mode & 0o100:
- # first remove all X bits, then shift all R bits to X
- mode &= ~0o111
- if standinexec:
- mode |= (mode >> 2) & 0o111 & ~util.umask
- wvfs.chmod(lfile, mode)
+ expecthash = update.get(lfile)
+ if expecthash:
+ if not lfutil.copyfromcache(repo, expecthash, lfile):
+ # failed ... but already removed and set to normallookup
+ continue
+ # Synchronize largefile dirstate to the last modified
+ # time of the file
+ lfdirstate.update_file(
+ lfile, p1_tracked=True, wc_tracked=True
+ )
update1 = 1
- updated += update1
+ # copy the exec mode of largefile standin from the repository's
+ # dirstate to its state in the lfdirstate.
+ standin = lfutil.standin(lfile)
+ if wvfs.exists(standin):
+ # exec is decided by the users permissions using mask 0o100
+ standinexec = wvfs.stat(standin).st_mode & 0o100
+ st = wvfs.stat(lfile)
+ mode = st.st_mode
+ if standinexec != mode & 0o100:
+ # first remove all X bits, then shift all R bits to X
+ mode &= ~0o111
+ if standinexec:
+ mode |= (mode >> 2) & 0o111 & ~util.umask
+ wvfs.chmod(lfile, mode)
+ update1 = 1
- lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
+ updated += update1
+
+ lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
lfdirstate.write()
if lfiles:
diff --git a/hgext/largefiles/lfutil.py b/hgext/largefiles/lfutil.py
--- a/hgext/largefiles/lfutil.py
+++ b/hgext/largefiles/lfutil.py
@@ -15,10 +15,7 @@ import os
import stat
from mercurial.i18n import _
-from mercurial.node import (
- hex,
- nullid,
-)
+from mercurial.node import hex
from mercurial.pycompat import open
from mercurial import (
@@ -28,6 +25,7 @@ from mercurial import (
httpconnection,
match as matchmod,
pycompat,
+ requirements,
scmutil,
sparse,
util,
@@ -164,7 +162,15 @@ class largefilesdirstate(dirstate.dirsta
def __getitem__(self, key):
return super(largefilesdirstate, self).__getitem__(unixpath(key))
- def normal(self, f):
+ def set_tracked(self, f):
+ return super(largefilesdirstate, self).set_tracked(unixpath(f))
+
+ def set_untracked(self, f):
+ return super(largefilesdirstate, self).set_untracked(unixpath(f))
+
+ def normal(self, f, parentfiledata=None):
+ # not sure if we should pass the `parentfiledata` down or throw it
+ # away. So throwing it away to stay on the safe side.
return super(largefilesdirstate, self).normal(unixpath(f))
def remove(self, f):
@@ -200,6 +206,7 @@ def openlfdirstate(ui, repo, create=True
vfs = repo.vfs
lfstoredir = longname
opener = vfsmod.vfs(vfs.join(lfstoredir))
+ use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
lfdirstate = largefilesdirstate(
opener,
ui,
@@ -207,6 +214,7 @@ def openlfdirstate(ui, repo, create=True
repo.dirstate._validate,
lambda: sparse.matcher(repo),
repo.nodeconstants,
+ use_dirstate_v2,
)
# If the largefiles dirstate does not exist, populate and create
@@ -221,9 +229,12 @@ def openlfdirstate(ui, repo, create=True
if len(standins) > 0:
vfs.makedirs(lfstoredir)
- for standin in standins:
- lfile = splitstandin(standin)
- lfdirstate.normallookup(lfile)
+ with lfdirstate.parentchange():
+ for standin in standins:
+ lfile = splitstandin(standin)
+ lfdirstate.update_file(
+ lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True
+ )
return lfdirstate
@@ -243,7 +254,7 @@ def lfdirstatestatus(lfdirstate, repo):
modified.append(lfile)
else:
clean.append(lfile)
- lfdirstate.normal(lfile)
+ lfdirstate.set_clean(lfile)
return s
@@ -544,46 +555,49 @@ def getstandinsstate(repo):
def synclfdirstate(repo, lfdirstate, lfile, normallookup):
lfstandin = standin(lfile)
- if lfstandin in repo.dirstate:
- stat = repo.dirstate._map[lfstandin]
- state, mtime = stat[0], stat[3]
+ if lfstandin not in repo.dirstate:
+ lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=False)
else:
- state, mtime = b'?', -1
- if state == b'n':
- if normallookup or mtime < 0 or not repo.wvfs.exists(lfile):
- # state 'n' doesn't ensure 'clean' in this case
- lfdirstate.normallookup(lfile)
- else:
- lfdirstate.normal(lfile)
- elif state == b'm':
- lfdirstate.normallookup(lfile)
- elif state == b'r':
- lfdirstate.remove(lfile)
- elif state == b'a':
- lfdirstate.add(lfile)
- elif state == b'?':
- lfdirstate.drop(lfile)
+ stat = repo.dirstate._map[lfstandin]
+ state, mtime = stat.state, stat.mtime
+ if state == b'n':
+ if normallookup or mtime < 0 or not repo.wvfs.exists(lfile):
+ # state 'n' doesn't ensure 'clean' in this case
+ lfdirstate.update_file(
+ lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True
+ )
+ else:
+ lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
+ elif state == b'm':
+ lfdirstate.update_file(
+ lfile, p1_tracked=True, wc_tracked=True, merged=True
+ )
+ elif state == b'r':
+ lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=False)
+ elif state == b'a':
+ lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
def markcommitted(orig, ctx, node):
repo = ctx.repo()
- orig(node)
+ lfdirstate = openlfdirstate(repo.ui, repo)
+ with lfdirstate.parentchange():
+ orig(node)
- # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
- # because files coming from the 2nd parent are omitted in the latter.
- #
- # The former should be used to get targets of "synclfdirstate",
- # because such files:
- # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
- # - have to be marked as "n" after commit, but
- # - aren't listed in "repo[node].files()"
+ # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
+ # because files coming from the 2nd parent are omitted in the latter.
+ #
+ # The former should be used to get targets of "synclfdirstate",
+ # because such files:
+ # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
+ # - have to be marked as "n" after commit, but
+ # - aren't listed in "repo[node].files()"
- lfdirstate = openlfdirstate(repo.ui, repo)
- for f in ctx.files():
- lfile = splitstandin(f)
- if lfile is not None:
- synclfdirstate(repo, lfdirstate, lfile, False)
+ for f in ctx.files():
+ lfile = splitstandin(f)
+ if lfile is not None:
+ synclfdirstate(repo, lfdirstate, lfile, False)
lfdirstate.write()
# As part of committing, copy all of the largefiles into the cache.
@@ -613,7 +627,7 @@ def getlfilestoupload(repo, missing, add
) as progress:
for i, n in enumerate(missing):
progress.update(i)
- parents = [p for p in repo[n].parents() if p != nullid]
+ parents = [p for p in repo[n].parents() if p != repo.nullid]
with lfstatus(repo, value=False):
ctx = repo[n]
diff --git a/hgext/largefiles/overrides.py b/hgext/largefiles/overrides.py
--- a/hgext/largefiles/overrides.py
+++ b/hgext/largefiles/overrides.py
@@ -150,10 +150,7 @@ def addlargefiles(ui, repo, isaddremove,
executable=lfutil.getexecutable(repo.wjoin(f)),
)
standins.append(standinname)
- if lfdirstate[f] == b'r':
- lfdirstate.normallookup(f)
- else:
- lfdirstate.add(f)
+ lfdirstate.set_tracked(f)
lfdirstate.write()
bad += [
lfutil.splitstandin(f)
@@ -230,9 +227,7 @@ def removelargefiles(ui, repo, isaddremo
repo[None].forget(remove)
for f in remove:
- lfutil.synclfdirstate(
- repo, lfdirstate, lfutil.splitstandin(f), False
- )
+ lfdirstate.set_untracked(lfutil.splitstandin(f))
lfdirstate.write()
@@ -653,12 +648,17 @@ def overridecalculateupdates(
def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
- for lfile, args, msg in actions[MERGE_ACTION_LARGEFILE_MARK_REMOVED]:
- # this should be executed before 'orig', to execute 'remove'
- # before all other actions
- repo.dirstate.remove(lfile)
- # make sure lfile doesn't get synclfdirstate'd as normal
- lfdirstate.add(lfile)
+ with lfdirstate.parentchange():
+ for lfile, args, msg in actions[
+ MERGE_ACTION_LARGEFILE_MARK_REMOVED
+ ]:
+ # this should be executed before 'orig', to execute 'remove'
+ # before all other actions
+ repo.dirstate.update_file(
+ lfile, p1_tracked=True, wc_tracked=False
+ )
+ # make sure lfile doesn't get synclfdirstate'd as normal
+ lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
lfdirstate.write()
return orig(repo, actions, branchmerge, getfiledata)
@@ -859,11 +859,11 @@ def overridecopy(orig, ui, repo, pats, o
# The file is gone, but this deletes any empty parent
# directories as a side-effect.
repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
- lfdirstate.remove(srclfile)
+ lfdirstate.set_untracked(srclfile)
else:
util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
- lfdirstate.add(destlfile)
+ lfdirstate.set_tracked(destlfile)
lfdirstate.write()
except error.Abort as e:
if e.message != _(b'no files to copy'):
@@ -1382,10 +1382,7 @@ def cmdutilforget(
with repo.wlock():
lfdirstate = lfutil.openlfdirstate(ui, repo)
for f in forget:
- if lfdirstate[f] == b'a':
- lfdirstate.drop(f)
- else:
- lfdirstate.remove(f)
+ lfdirstate.set_untracked(f)
lfdirstate.write()
standins = [lfutil.standin(f) for f in forget]
for f in standins:
@@ -1636,13 +1633,16 @@ def overriderollback(orig, ui, repo, **o
repo.wvfs.unlinkpath(standin, ignoremissing=True)
lfdirstate = lfutil.openlfdirstate(ui, repo)
- orphans = set(lfdirstate)
- lfiles = lfutil.listlfiles(repo)
- for file in lfiles:
- lfutil.synclfdirstate(repo, lfdirstate, file, True)
- orphans.discard(file)
- for lfile in orphans:
- lfdirstate.drop(lfile)
+ with lfdirstate.parentchange():
+ orphans = set(lfdirstate)
+ lfiles = lfutil.listlfiles(repo)
+ for file in lfiles:
+ lfutil.synclfdirstate(repo, lfdirstate, file, True)
+ orphans.discard(file)
+ for lfile in orphans:
+ lfdirstate.update_file(
+ lfile, p1_tracked=False, wc_tracked=False
+ )
lfdirstate.write()
return result
@@ -1787,7 +1787,9 @@ def mergeupdate(orig, repo, node, branch
# mark all clean largefiles as dirty, just in case the update gets
# interrupted before largefiles and lfdirstate are synchronized
for lfile in oldclean:
- lfdirstate.normallookup(lfile)
+ entry = lfdirstate._map.get(lfile)
+ assert not (entry.merged_removed or entry.from_p2_removed)
+ lfdirstate.set_possibly_dirty(lfile)
lfdirstate.write()
oldstandins = lfutil.getstandinsstate(repo)
@@ -1798,23 +1800,24 @@ def mergeupdate(orig, repo, node, branch
raise error.ProgrammingError(
b'largefiles is not compatible with in-memory merge'
)
- result = orig(repo, node, branchmerge, force, *args, **kwargs)
+ with lfdirstate.parentchange():
+ result = orig(repo, node, branchmerge, force, *args, **kwargs)
- newstandins = lfutil.getstandinsstate(repo)
- filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
+ newstandins = lfutil.getstandinsstate(repo)
+ filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
- # to avoid leaving all largefiles as dirty and thus rehash them, mark
- # all the ones that didn't change as clean
- for lfile in oldclean.difference(filelist):
- lfdirstate.normal(lfile)
- lfdirstate.write()
+ # to avoid leaving all largefiles as dirty and thus rehash them, mark
+ # all the ones that didn't change as clean
+ for lfile in oldclean.difference(filelist):
+ lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
+ lfdirstate.write()
- if branchmerge or force or partial:
- filelist.extend(s.deleted + s.removed)
+ if branchmerge or force or partial:
+ filelist.extend(s.deleted + s.removed)
- lfcommands.updatelfiles(
- repo.ui, repo, filelist=filelist, normallookup=partial
- )
+ lfcommands.updatelfiles(
+ repo.ui, repo, filelist=filelist, normallookup=partial
+ )
return result
diff --git a/hgext/largefiles/reposetup.py b/hgext/largefiles/reposetup.py
--- a/hgext/largefiles/reposetup.py
+++ b/hgext/largefiles/reposetup.py
@@ -222,7 +222,7 @@ def reposetup(ui, repo):
else:
if listclean:
clean.append(lfile)
- lfdirstate.normal(lfile)
+ lfdirstate.set_clean(lfile)
else:
tocheck = unsure + modified + added + clean
modified, added, clean = [], [], []
diff --git a/hgext/lfs/wrapper.py b/hgext/lfs/wrapper.py
--- a/hgext/lfs/wrapper.py
+++ b/hgext/lfs/wrapper.py
@@ -10,7 +10,7 @@ from __future__ import absolute_import
import hashlib
from mercurial.i18n import _
-from mercurial.node import bin, hex, nullid, short
+from mercurial.node import bin, hex, short
from mercurial.pycompat import (
getattr,
setattr,
@@ -158,7 +158,7 @@ def _islfs(rlog, node=None, rev=None):
rev = rlog.rev(node)
else:
node = rlog.node(rev)
- if node == nullid:
+ if node == rlog.nullid:
return False
flags = rlog.flags(rev)
return bool(flags & revlog.REVIDX_EXTSTORED)
diff --git a/hgext/mq.py b/hgext/mq.py
--- a/hgext/mq.py
+++ b/hgext/mq.py
@@ -73,7 +73,6 @@ from mercurial.i18n import _
from mercurial.node import (
bin,
hex,
- nullid,
nullrev,
short,
)
@@ -908,13 +907,13 @@ class queue(object):
"""
if rev is None:
(p1, p2) = repo.dirstate.parents()
- if p2 == nullid:
+ if p2 == repo.nullid:
return p1
if not self.applied:
return None
return self.applied[-1].node
p1, p2 = repo.changelog.parents(rev)
- if p2 != nullid and p2 in [x.node for x in self.applied]:
+ if p2 != repo.nullid and p2 in [x.node for x in self.applied]:
return p2
return p1
@@ -1091,18 +1090,9 @@ class queue(object):
if merge and files:
# Mark as removed/merged and update dirstate parent info
- removed = []
- merged = []
- for f in files:
- if os.path.lexists(repo.wjoin(f)):
- merged.append(f)
- else:
- removed.append(f)
with repo.dirstate.parentchange():
- for f in removed:
- repo.dirstate.remove(f)
- for f in merged:
- repo.dirstate.merge(f)
+ for f in files:
+ repo.dirstate.update_file_p1(f, p1_tracked=True)
p1 = repo.dirstate.p1()
repo.setparents(p1, merge)
@@ -1591,7 +1581,7 @@ class queue(object):
for hs in repo.branchmap().iterheads():
heads.extend(hs)
if not heads:
- heads = [nullid]
+ heads = [repo.nullid]
if repo.dirstate.p1() not in heads and not exact:
self.ui.status(_(b"(working directory not at a head)\n"))
@@ -1852,12 +1842,16 @@ class queue(object):
with repo.dirstate.parentchange():
for f in a:
repo.wvfs.unlinkpath(f, ignoremissing=True)
- repo.dirstate.drop(f)
+ repo.dirstate.update_file(
+ f, p1_tracked=False, wc_tracked=False
+ )
for f in m + r:
fctx = ctx[f]
repo.wwrite(f, fctx.data(), fctx.flags())
- repo.dirstate.normal(f)
- repo.setparents(qp, nullid)
+ repo.dirstate.update_file(
+ f, p1_tracked=True, wc_tracked=True
+ )
+ repo.setparents(qp, repo.nullid)
for patch in reversed(self.applied[start:end]):
self.ui.status(_(b"popping %s\n") % patch.name)
del self.applied[start:end]
@@ -2003,67 +1997,73 @@ class queue(object):
bmlist = repo[top].bookmarks()
- dsguard = None
- try:
- dsguard = dirstateguard.dirstateguard(repo, b'mq.refresh')
- if diffopts.git or diffopts.upgrade:
- copies = {}
- for dst in a:
- src = repo.dirstate.copied(dst)
- # during qfold, the source file for copies may
- # be removed. Treat this as a simple add.
- if src is not None and src in repo.dirstate:
- copies.setdefault(src, []).append(dst)
- repo.dirstate.add(dst)
- # remember the copies between patchparent and qtip
- for dst in aaa:
- src = ctx[dst].copysource()
- if src:
- copies.setdefault(src, []).extend(
- copies.get(dst, [])
+ with repo.dirstate.parentchange():
+ # XXX do we actually need the dirstateguard
+ dsguard = None
+ try:
+ dsguard = dirstateguard.dirstateguard(repo, b'mq.refresh')
+ if diffopts.git or diffopts.upgrade:
+ copies = {}
+ for dst in a:
+ src = repo.dirstate.copied(dst)
+ # during qfold, the source file for copies may
+ # be removed. Treat this as a simple add.
+ if src is not None and src in repo.dirstate:
+ copies.setdefault(src, []).append(dst)
+ repo.dirstate.update_file(
+ dst, p1_tracked=False, wc_tracked=True
)
- if dst in a:
- copies[src].append(dst)
- # we can't copy a file created by the patch itself
- if dst in copies:
- del copies[dst]
- for src, dsts in pycompat.iteritems(copies):
- for dst in dsts:
- repo.dirstate.copy(src, dst)
- else:
- for dst in a:
- repo.dirstate.add(dst)
- # Drop useless copy information
- for f in list(repo.dirstate.copies()):
- repo.dirstate.copy(None, f)
- for f in r:
- repo.dirstate.remove(f)
- # if the patch excludes a modified file, mark that
- # file with mtime=0 so status can see it.
- mm = []
- for i in pycompat.xrange(len(m) - 1, -1, -1):
- if not match1(m[i]):
- mm.append(m[i])
- del m[i]
- for f in m:
- repo.dirstate.normal(f)
- for f in mm:
- repo.dirstate.normallookup(f)
- for f in forget:
- repo.dirstate.drop(f)
-
- user = ph.user or ctx.user()
-
- oldphase = repo[top].phase()
-
- # assumes strip can roll itself back if interrupted
- repo.setparents(*cparents)
- self.applied.pop()
- self.applieddirty = True
- strip(self.ui, repo, [top], update=False, backup=False)
- dsguard.close()
- finally:
- release(dsguard)
+ # remember the copies between patchparent and qtip
+ for dst in aaa:
+ src = ctx[dst].copysource()
+ if src:
+ copies.setdefault(src, []).extend(
+ copies.get(dst, [])
+ )
+ if dst in a:
+ copies[src].append(dst)
+ # we can't copy a file created by the patch itself
+ if dst in copies:
+ del copies[dst]
+ for src, dsts in pycompat.iteritems(copies):
+ for dst in dsts:
+ repo.dirstate.copy(src, dst)
+ else:
+ for dst in a:
+ repo.dirstate.update_file(
+ dst, p1_tracked=False, wc_tracked=True
+ )
+ # Drop useless copy information
+ for f in list(repo.dirstate.copies()):
+ repo.dirstate.copy(None, f)
+ for f in r:
+ repo.dirstate.update_file_p1(f, p1_tracked=True)
+ # if the patch excludes a modified file, mark that
+ # file with mtime=0 so status can see it.
+ mm = []
+ for i in pycompat.xrange(len(m) - 1, -1, -1):
+ if not match1(m[i]):
+ mm.append(m[i])
+ del m[i]
+ for f in m:
+ repo.dirstate.update_file_p1(f, p1_tracked=True)
+ for f in mm:
+ repo.dirstate.update_file_p1(f, p1_tracked=True)
+ for f in forget:
+ repo.dirstate.update_file_p1(f, p1_tracked=False)
+
+ user = ph.user or ctx.user()
+
+ oldphase = repo[top].phase()
+
+ # assumes strip can roll itself back if interrupted
+ repo.setparents(*cparents)
+ self.applied.pop()
+ self.applieddirty = True
+ strip(self.ui, repo, [top], update=False, backup=False)
+ dsguard.close()
+ finally:
+ release(dsguard)
try:
# might be nice to attempt to roll back strip after this
@@ -3639,8 +3639,8 @@ def rename(ui, repo, patch, name=None, *
wctx = r[None]
with r.wlock():
if r.dirstate[patch] == b'a':
- r.dirstate.drop(patch)
- r.dirstate.add(name)
+ r.dirstate.set_untracked(patch)
+ r.dirstate.set_tracked(name)
else:
wctx.copy(patch, name)
wctx.forget([patch])
diff --git a/hgext/narrow/narrowbundle2.py b/hgext/narrow/narrowbundle2.py
--- a/hgext/narrow/narrowbundle2.py
+++ b/hgext/narrow/narrowbundle2.py
@@ -11,7 +11,6 @@ import errno
import struct
from mercurial.i18n import _
-from mercurial.node import nullid
from mercurial import (
bundle2,
changegroup,
@@ -94,7 +93,7 @@ def generateellipsesbundle2(
raise error.Abort(_(b'depth must be positive, got %d') % depth)
heads = set(heads or repo.heads())
- common = set(common or [nullid])
+ common = set(common or [repo.nullid])
visitnodes, relevant_nodes, ellipsisroots = exchange._computeellipsis(
repo, common, heads, set(), match, depth=depth
@@ -128,7 +127,7 @@ def generate_ellipses_bundle2_for_wideni
common,
known,
):
- common = set(common or [nullid])
+ common = set(common or [repo.nullid])
# Steps:
# 1. Send kill for "$known & ::common"
#
@@ -282,10 +281,10 @@ def handlechangegroup_widen(op, inpart):
try:
gen = exchange.readbundle(ui, f, chgrpfile, vfs)
# silence internal shuffling chatter
- override = {(b'ui', b'quiet'): True}
- if ui.verbose:
- override = {}
- with ui.configoverride(override):
+ maybe_silent = (
+ ui.silent() if not ui.verbose else util.nullcontextmanager()
+ )
+ with maybe_silent:
if isinstance(gen, bundle2.unbundle20):
with repo.transaction(b'strip') as tr:
bundle2.processbundle(repo, gen, lambda: tr)
diff --git a/hgext/narrow/narrowcommands.py b/hgext/narrow/narrowcommands.py
--- a/hgext/narrow/narrowcommands.py
+++ b/hgext/narrow/narrowcommands.py
@@ -12,7 +12,6 @@ import os
from mercurial.i18n import _
from mercurial.node import (
hex,
- nullid,
short,
)
from mercurial import (
@@ -193,7 +192,7 @@ def pullbundle2extraprepare(orig, pullop
kwargs[b'known'] = [
hex(ctx.node())
for ctx in repo.set(b'::%ln', pullop.common)
- if ctx.node() != nullid
+ if ctx.node() != repo.nullid
]
if not kwargs[b'known']:
# Mercurial serializes an empty list as '' and deserializes it as
@@ -228,10 +227,17 @@ def _narrow(
unfi = repo.unfiltered()
outgoing = discovery.findcommonoutgoing(unfi, remote, commoninc=commoninc)
ui.status(_(b'looking for local changes to affected paths\n'))
+ progress = ui.makeprogress(
+ topic=_(b'changesets'),
+ unit=_(b'changesets'),
+ total=len(outgoing.missing) + len(outgoing.excluded),
+ )
localnodes = []
- for n in itertools.chain(outgoing.missing, outgoing.excluded):
- if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()):
- localnodes.append(n)
+ with progress:
+ for n in itertools.chain(outgoing.missing, outgoing.excluded):
+ progress.increment()
+ if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()):
+ localnodes.append(n)
revstostrip = unfi.revs(b'descendants(%ln)', localnodes)
hiddenrevs = repoview.filterrevs(repo, b'visible')
visibletostrip = list(
@@ -275,6 +281,10 @@ def _narrow(
)
hg.clean(repo, urev)
overrides = {(b'devel', b'strip-obsmarkers'): False}
+ if backup:
+ ui.status(_(b'moving unwanted changesets to backup\n'))
+ else:
+ ui.status(_(b'deleting unwanted changesets\n'))
with ui.configoverride(overrides, b'narrow'):
repair.strip(ui, unfi, tostrip, topic=b'narrow', backup=backup)
@@ -310,8 +320,10 @@ def _narrow(
util.unlinkpath(repo.svfs.join(f))
repo.store.markremoved(f)
- narrowspec.updateworkingcopy(repo, assumeclean=True)
- narrowspec.copytoworkingcopy(repo)
+ ui.status(_(b'deleting unwanted files from working copy\n'))
+ with repo.dirstate.parentchange():
+ narrowspec.updateworkingcopy(repo, assumeclean=True)
+ narrowspec.copytoworkingcopy(repo)
repo.destroyed()
@@ -370,7 +382,7 @@ def _widen(
ds = repo.dirstate
p1, p2 = ds.p1(), ds.p2()
with ds.parentchange():
- ds.setparents(nullid, nullid)
+ ds.setparents(repo.nullid, repo.nullid)
if isoldellipses:
with wrappedextraprepare:
exchange.pull(repo, remote, heads=common)
@@ -380,7 +392,7 @@ def _widen(
known = [
ctx.node()
for ctx in repo.set(b'::%ln', common)
- if ctx.node() != nullid
+ if ctx.node() != repo.nullid
]
with remote.commandexecutor() as e:
bundle = e.callcommand(
@@ -411,7 +423,7 @@ def _widen(
with ds.parentchange():
ds.setparents(p1, p2)
- with repo.transaction(b'widening'):
+ with repo.transaction(b'widening'), repo.dirstate.parentchange():
repo.setnewnarrowpats()
narrowspec.updateworkingcopy(repo)
narrowspec.copytoworkingcopy(repo)
@@ -578,7 +590,9 @@ def trackedcmd(ui, repo, remotepath=None
return 0
if update_working_copy:
- with repo.wlock(), repo.lock(), repo.transaction(b'narrow-wc'):
+ with repo.wlock(), repo.lock(), repo.transaction(
+ b'narrow-wc'
+ ), repo.dirstate.parentchange():
narrowspec.updateworkingcopy(repo)
narrowspec.copytoworkingcopy(repo)
return 0
diff --git a/hgext/narrow/narrowdirstate.py b/hgext/narrow/narrowdirstate.py
--- a/hgext/narrow/narrowdirstate.py
+++ b/hgext/narrow/narrowdirstate.py
@@ -38,6 +38,14 @@ def wrapdirstate(repo, dirstate):
return super(narrowdirstate, self).normal(*args, **kwargs)
@_editfunc
+ def set_tracked(self, *args):
+ return super(narrowdirstate, self).set_tracked(*args)
+
+ @_editfunc
+ def set_untracked(self, *args):
+ return super(narrowdirstate, self).set_untracked(*args)
+
+ @_editfunc
def add(self, *args):
return super(narrowdirstate, self).add(*args)
diff --git a/hgext/phabricator.py b/hgext/phabricator.py
--- a/hgext/phabricator.py
+++ b/hgext/phabricator.py
@@ -69,7 +69,7 @@ import operator
import re
import time
-from mercurial.node import bin, nullid, short
+from mercurial.node import bin, short
from mercurial.i18n import _
from mercurial.pycompat import getattr
from mercurial.thirdparty import attr
@@ -586,7 +586,7 @@ def getoldnodedrevmap(repo, nodelist):
tags.tag(
repo,
tagname,
- nullid,
+ repo.nullid,
message=None,
user=None,
date=None,
@@ -1606,7 +1606,7 @@ def phabsend(ui, repo, *revs, **opts):
tags.tag(
repo,
tagname,
- nullid,
+ repo.nullid,
message=None,
user=None,
date=None,
diff --git a/hgext/purge.py b/hgext/purge.py
--- a/hgext/purge.py
+++ b/hgext/purge.py
@@ -25,8 +25,15 @@
'''command to delete untracked files from the working directory (DEPRECATED)
The functionality of this extension has been included in core Mercurial since
-version 5.7. Please use :hg:`purge ...` instead. :hg:`purge --confirm` is now the default, unless the extension is enabled for backward compatibility.
+version 5.7. Please use :hg:`purge ...` instead. :hg:`purge --confirm` is now
+the default, unless the extension is enabled for backward compatibility.
'''
# This empty extension looks pointless, but core mercurial checks if it's loaded
# to implement the slightly different behavior documented above.
+
+# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
+# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
+# be specifying the version(s) of Mercurial they are tested with, or
+# leave the attribute unspecified.
+testedwith = b'ships-with-hg-core'
diff --git a/hgext/rebase.py b/hgext/rebase.py
--- a/hgext/rebase.py
+++ b/hgext/rebase.py
@@ -190,18 +190,18 @@ class rebaseruntime(object):
self.destmap = {}
self.skipped = set()
- self.collapsef = opts.get(b'collapse', False)
- self.collapsemsg = cmdutil.logmessage(ui, opts)
- self.date = opts.get(b'date', None)
+ self.collapsef = opts.get('collapse', False)
+ self.collapsemsg = cmdutil.logmessage(ui, pycompat.byteskwargs(opts))
+ self.date = opts.get('date', None)
- e = opts.get(b'extrafn') # internal, used by e.g. hgsubversion
+ e = opts.get('extrafn') # internal, used by e.g. hgsubversion
self.extrafns = [_savegraft]
if e:
self.extrafns = [e]
self.backupf = ui.configbool(b'rewrite', b'backup-bundle')
- self.keepf = opts.get(b'keep', False)
- self.keepbranchesf = opts.get(b'keepbranches', False)
+ self.keepf = opts.get('keep', False)
+ self.keepbranchesf = opts.get('keepbranches', False)
self.skipemptysuccessorf = rewriteutil.skip_empty_successor(
repo.ui, b'rebase'
)
@@ -446,8 +446,15 @@ class rebaseruntime(object):
rebaseset = set(destmap.keys())
rebaseset -= set(self.obsolete_with_successor_in_destination)
rebaseset -= self.obsolete_with_successor_in_rebase_set
+ # We have our own divergence-checking in the rebase extension
+ overrides = {}
+ if obsolete.isenabled(self.repo, obsolete.createmarkersopt):
+ overrides = {
+ (b'experimental', b'evolution.allowdivergence'): b'true'
+ }
try:
- rewriteutil.precheck(self.repo, rebaseset, action=b'rebase')
+ with self.ui.configoverride(overrides):
+ rewriteutil.precheck(self.repo, rebaseset, action=b'rebase')
except error.Abort as e:
if e.hint is None:
e.hint = _(b'use --keep to keep original changesets')
@@ -623,7 +630,7 @@ class rebaseruntime(object):
repo.ui.debug(b'resuming interrupted rebase\n')
self.resume = False
else:
- overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
+ overrides = {(b'ui', b'forcemerge'): opts.get('tool', b'')}
with ui.configoverride(overrides, b'rebase'):
try:
rebasenode(
@@ -670,9 +677,7 @@ class rebaseruntime(object):
if not self.collapsef:
merging = p2 != nullrev
editform = cmdutil.mergeeditform(merging, b'rebase')
- editor = cmdutil.getcommiteditor(
- editform=editform, **pycompat.strkwargs(opts)
- )
+ editor = cmdutil.getcommiteditor(editform=editform, **opts)
# We need to set parents again here just in case we're continuing
# a rebase started with an old hg version (before 9c9cfecd4600),
# because those old versions would have left us with two dirstate
@@ -720,7 +725,7 @@ class rebaseruntime(object):
def _finishrebase(self):
repo, ui, opts = self.repo, self.ui, self.opts
- fm = ui.formatter(b'rebase', opts)
+ fm = ui.formatter(b'rebase', pycompat.byteskwargs(opts))
fm.startitem()
if self.collapsef:
p1, p2, _base = defineparents(
@@ -731,7 +736,7 @@ class rebaseruntime(object):
self.skipped,
self.obsolete_with_successor_in_destination,
)
- editopt = opts.get(b'edit')
+ editopt = opts.get('edit')
editform = b'rebase.collapse'
if self.collapsemsg:
commitmsg = self.collapsemsg
@@ -755,7 +760,7 @@ class rebaseruntime(object):
self.state[oldrev] = newrev
if b'qtip' in repo.tags():
- updatemq(repo, self.state, self.skipped, **pycompat.strkwargs(opts))
+ updatemq(repo, self.state, self.skipped, **opts)
# restore original working directory
# (we do this before stripping)
@@ -1056,18 +1061,17 @@ def rebase(ui, repo, **opts):
unresolved conflicts.
"""
- opts = pycompat.byteskwargs(opts)
inmemory = ui.configbool(b'rebase', b'experimental.inmemory')
- action = cmdutil.check_at_most_one_arg(opts, b'abort', b'stop', b'continue')
+ action = cmdutil.check_at_most_one_arg(opts, 'abort', 'stop', 'continue')
if action:
cmdutil.check_incompatible_arguments(
- opts, action, [b'confirm', b'dry_run']
+ opts, action, ['confirm', 'dry_run']
)
cmdutil.check_incompatible_arguments(
- opts, action, [b'rev', b'source', b'base', b'dest']
+ opts, action, ['rev', 'source', 'base', 'dest']
)
- cmdutil.check_at_most_one_arg(opts, b'confirm', b'dry_run')
- cmdutil.check_at_most_one_arg(opts, b'rev', b'source', b'base')
+ cmdutil.check_at_most_one_arg(opts, 'confirm', 'dry_run')
+ cmdutil.check_at_most_one_arg(opts, 'rev', 'source', 'base')
if action or repo.currenttransaction() is not None:
# in-memory rebase is not compatible with resuming rebases.
@@ -1075,19 +1079,19 @@ def rebase(ui, repo, **opts):
# fail the entire transaction.)
inmemory = False
- if opts.get(b'auto_orphans'):
- disallowed_opts = set(opts) - {b'auto_orphans'}
+ if opts.get('auto_orphans'):
+ disallowed_opts = set(opts) - {'auto_orphans'}
cmdutil.check_incompatible_arguments(
- opts, b'auto_orphans', disallowed_opts
+ opts, 'auto_orphans', disallowed_opts
)
- userrevs = list(repo.revs(opts.get(b'auto_orphans')))
- opts[b'rev'] = [revsetlang.formatspec(b'%ld and orphan()', userrevs)]
- opts[b'dest'] = b'_destautoorphanrebase(SRC)'
+ userrevs = list(repo.revs(opts.get('auto_orphans')))
+ opts['rev'] = [revsetlang.formatspec(b'%ld and orphan()', userrevs)]
+ opts['dest'] = b'_destautoorphanrebase(SRC)'
- if opts.get(b'dry_run') or opts.get(b'confirm'):
+ if opts.get('dry_run') or opts.get('confirm'):
return _dryrunrebase(ui, repo, action, opts)
- elif action == b'stop':
+ elif action == 'stop':
rbsrt = rebaseruntime(repo, ui)
with repo.wlock(), repo.lock():
rbsrt.restorestatus()
@@ -1136,7 +1140,7 @@ def rebase(ui, repo, **opts):
def _dryrunrebase(ui, repo, action, opts):
rbsrt = rebaseruntime(repo, ui, inmemory=True, dryrun=True, opts=opts)
- confirm = opts.get(b'confirm')
+ confirm = opts.get('confirm')
if confirm:
ui.status(_(b'starting in-memory rebase\n'))
else:
@@ -1193,7 +1197,7 @@ def _dryrunrebase(ui, repo, action, opts
isabort=True,
backup=False,
suppwarns=True,
- dryrun=opts.get(b'dry_run'),
+ dryrun=opts.get('dry_run'),
)
@@ -1203,9 +1207,9 @@ def _dorebase(ui, repo, action, opts, in
def _origrebase(ui, repo, action, opts, rbsrt):
- assert action != b'stop'
+ assert action != 'stop'
with repo.wlock(), repo.lock():
- if opts.get(b'interactive'):
+ if opts.get('interactive'):
try:
if extensions.find(b'histedit'):
enablehistedit = b''
@@ -1231,29 +1235,27 @@ def _origrebase(ui, repo, action, opts,
raise error.InputError(
_(b'cannot use collapse with continue or abort')
)
- if action == b'abort' and opts.get(b'tool', False):
+ if action == 'abort' and opts.get('tool', False):
ui.warn(_(b'tool option will be ignored\n'))
- if action == b'continue':
+ if action == 'continue':
ms = mergestatemod.mergestate.read(repo)
mergeutil.checkunresolved(ms)
- retcode = rbsrt._prepareabortorcontinue(
- isabort=(action == b'abort')
- )
+ retcode = rbsrt._prepareabortorcontinue(isabort=(action == 'abort'))
if retcode is not None:
return retcode
else:
# search default destination in this space
# used in the 'hg pull --rebase' case, see issue 5214.
- destspace = opts.get(b'_destspace')
+ destspace = opts.get('_destspace')
destmap = _definedestmap(
ui,
repo,
rbsrt.inmemory,
- opts.get(b'dest', None),
- opts.get(b'source', []),
- opts.get(b'base', []),
- opts.get(b'rev', []),
+ opts.get('dest', None),
+ opts.get('source', []),
+ opts.get('base', []),
+ opts.get('rev', []),
destspace=destspace,
)
retcode = rbsrt._preparenewrebase(destmap)
diff --git a/hgext/remotefilelog/basestore.py b/hgext/remotefilelog/basestore.py
--- a/hgext/remotefilelog/basestore.py
+++ b/hgext/remotefilelog/basestore.py
@@ -308,7 +308,7 @@ class basestore(object):
# Content matches the intended path
return True
return False
- except (ValueError, RuntimeError):
+ except (ValueError, shallowutil.BadRemotefilelogHeader):
pass
return False
diff --git a/hgext/remotefilelog/contentstore.py b/hgext/remotefilelog/contentstore.py
--- a/hgext/remotefilelog/contentstore.py
+++ b/hgext/remotefilelog/contentstore.py
@@ -2,7 +2,10 @@ from __future__ import absolute_import
import threading
-from mercurial.node import hex, nullid
+from mercurial.node import (
+ hex,
+ sha1nodeconstants,
+)
from mercurial.pycompat import getattr
from mercurial import (
mdiff,
@@ -55,7 +58,7 @@ class unioncontentstore(basestore.baseun
"""
chain = self.getdeltachain(name, node)
- if chain[-1][ChainIndicies.BASENODE] != nullid:
+ if chain[-1][ChainIndicies.BASENODE] != sha1nodeconstants.nullid:
# If we didn't receive a full chain, throw
raise KeyError((name, hex(node)))
@@ -92,7 +95,7 @@ class unioncontentstore(basestore.baseun
deltabasenode.
"""
chain = self._getpartialchain(name, node)
- while chain[-1][ChainIndicies.BASENODE] != nullid:
+ while chain[-1][ChainIndicies.BASENODE] != sha1nodeconstants.nullid:
x, x, deltabasename, deltabasenode, x = chain[-1]
try:
morechain = self._getpartialchain(deltabasename, deltabasenode)
@@ -187,7 +190,12 @@ class remotefilelogcontentstore(basestor
# Since remotefilelog content stores only contain full texts, just
# return that.
revision = self.get(name, node)
- return revision, name, nullid, self.getmeta(name, node)
+ return (
+ revision,
+ name,
+ sha1nodeconstants.nullid,
+ self.getmeta(name, node),
+ )
def getdeltachain(self, name, node):
# Since remotefilelog content stores just contain full texts, we return
@@ -195,7 +203,7 @@ class remotefilelogcontentstore(basestor
# The nullid in the deltabasenode slot indicates that the revision is a
# fulltext.
revision = self.get(name, node)
- return [(name, node, None, nullid, revision)]
+ return [(name, node, None, sha1nodeconstants.nullid, revision)]
def getmeta(self, name, node):
self._sanitizemetacache()
@@ -237,7 +245,12 @@ class remotecontentstore(object):
def getdelta(self, name, node):
revision = self.get(name, node)
- return revision, name, nullid, self._shared.getmeta(name, node)
+ return (
+ revision,
+ name,
+ sha1nodeconstants.nullid,
+ self._shared.getmeta(name, node),
+ )
def getdeltachain(self, name, node):
# Since our remote content stores just contain full texts, we return a
@@ -245,7 +258,7 @@ class remotecontentstore(object):
# The nullid in the deltabasenode slot indicates that the revision is a
# fulltext.
revision = self.get(name, node)
- return [(name, node, None, nullid, revision)]
+ return [(name, node, None, sha1nodeconstants.nullid, revision)]
def getmeta(self, name, node):
self._fileservice.prefetch(
@@ -268,7 +281,7 @@ class manifestrevlogstore(object):
self._store = repo.store
self._svfs = repo.svfs
self._revlogs = dict()
- self._cl = revlog.revlog(self._svfs, b'00changelog.i')
+ self._cl = revlog.revlog(self._svfs, radix=b'00changelog.i')
self._repackstartlinkrev = 0
def get(self, name, node):
@@ -276,11 +289,11 @@ class manifestrevlogstore(object):
def getdelta(self, name, node):
revision = self.get(name, node)
- return revision, name, nullid, self.getmeta(name, node)
+ return revision, name, self._cl.nullid, self.getmeta(name, node)
def getdeltachain(self, name, node):
revision = self.get(name, node)
- return [(name, node, None, nullid, revision)]
+ return [(name, node, None, self._cl.nullid, revision)]
def getmeta(self, name, node):
rl = self._revlog(name)
@@ -304,9 +317,9 @@ class manifestrevlogstore(object):
missing.discard(ancnode)
p1, p2 = rl.parents(ancnode)
- if p1 != nullid and p1 not in known:
+ if p1 != self._cl.nullid and p1 not in known:
missing.add(p1)
- if p2 != nullid and p2 not in known:
+ if p2 != self._cl.nullid and p2 not in known:
missing.add(p2)
linknode = self._cl.node(rl.linkrev(ancrev))
@@ -328,10 +341,10 @@ class manifestrevlogstore(object):
def _revlog(self, name):
rl = self._revlogs.get(name)
if rl is None:
- revlogname = b'00manifesttree.i'
+ revlogname = b'00manifesttree'
if name != b'':
- revlogname = b'meta/%s/00manifest.i' % name
- rl = revlog.revlog(self._svfs, revlogname)
+ revlogname = b'meta/%s/00manifest' % name
+ rl = revlog.revlog(self._svfs, radix=revlogname)
self._revlogs[name] = rl
return rl
@@ -352,7 +365,7 @@ class manifestrevlogstore(object):
if options and options.get(constants.OPTION_PACKSONLY):
return
treename = b''
- rl = revlog.revlog(self._svfs, b'00manifesttree.i')
+ rl = revlog.revlog(self._svfs, radix=b'00manifesttree')
startlinkrev = self._repackstartlinkrev
endlinkrev = self._repackendlinkrev
for rev in pycompat.xrange(len(rl) - 1, -1, -1):
@@ -369,9 +382,9 @@ class manifestrevlogstore(object):
if path[:5] != b'meta/' or path[-2:] != b'.i':
continue
- treename = path[5 : -len(b'/00manifest.i')]
+ treename = path[5 : -len(b'/00manifest')]
- rl = revlog.revlog(self._svfs, path)
+ rl = revlog.revlog(self._svfs, indexfile=path[:-2])
for rev in pycompat.xrange(len(rl) - 1, -1, -1):
linkrev = rl.linkrev(rev)
if linkrev < startlinkrev:
diff --git a/hgext/remotefilelog/datapack.py b/hgext/remotefilelog/datapack.py
--- a/hgext/remotefilelog/datapack.py
+++ b/hgext/remotefilelog/datapack.py
@@ -3,7 +3,10 @@ from __future__ import absolute_import
import struct
import zlib
-from mercurial.node import hex, nullid
+from mercurial.node import (
+ hex,
+ sha1nodeconstants,
+)
from mercurial.i18n import _
from mercurial import (
pycompat,
@@ -458,7 +461,7 @@ class mutabledatapack(basepack.mutableba
rawindex = b''
fmt = self.INDEXFORMAT
for node, deltabase, offset, size in entries:
- if deltabase == nullid:
+ if deltabase == sha1nodeconstants.nullid:
deltabaselocation = FULLTEXTINDEXMARK
else:
# Instead of storing the deltabase node in the index, let's
diff --git a/hgext/remotefilelog/debugcommands.py b/hgext/remotefilelog/debugcommands.py
--- a/hgext/remotefilelog/debugcommands.py
+++ b/hgext/remotefilelog/debugcommands.py
@@ -12,7 +12,7 @@ import zlib
from mercurial.node import (
bin,
hex,
- nullid,
+ sha1nodeconstants,
short,
)
from mercurial.i18n import _
@@ -57,9 +57,9 @@ def debugremotefilelog(ui, path, **opts)
_(b"%s => %s %s %s %s\n")
% (short(node), short(p1), short(p2), short(linknode), copyfrom)
)
- if p1 != nullid:
+ if p1 != sha1nodeconstants.nullid:
queue.append(p1)
- if p2 != nullid:
+ if p2 != sha1nodeconstants.nullid:
queue.append(p2)
@@ -152,7 +152,7 @@ def debugindex(orig, ui, repo, file_=Non
try:
pp = r.parents(node)
except Exception:
- pp = [nullid, nullid]
+ pp = [repo.nullid, repo.nullid]
ui.write(
b"% 6d % 9d % 7d % 6d % 7d %s %s %s\n"
% (
@@ -197,7 +197,7 @@ def debugindexdot(orig, ui, repo, file_)
node = r.node(i)
pp = r.parents(node)
ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
- if pp[1] != nullid:
+ if pp[1] != repo.nullid:
ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
ui.write(b"}\n")
@@ -212,7 +212,7 @@ def verifyremotefilelog(ui, path, **opts
filepath = os.path.join(root, file)
size, firstnode, mapping = parsefileblob(filepath, decompress)
for p1, p2, linknode, copyfrom in pycompat.itervalues(mapping):
- if linknode == nullid:
+ if linknode == sha1nodeconstants.nullid:
actualpath = os.path.relpath(root, path)
key = fileserverclient.getcachekey(
b"reponame", actualpath, file
@@ -371,7 +371,7 @@ def _sanitycheck(ui, nodes, bases):
current = node
deltabase = bases[current]
- while deltabase != nullid:
+ while deltabase != sha1nodeconstants.nullid:
if deltabase not in nodes:
ui.warn(
(
@@ -397,7 +397,7 @@ def _sanitycheck(ui, nodes, bases):
deltabase = bases[current]
# Since ``node`` begins a valid chain, reset/memoize its base to nullid
# so we don't traverse it again.
- bases[node] = nullid
+ bases[node] = sha1nodeconstants.nullid
return failures
diff --git a/hgext/remotefilelog/fileserverclient.py b/hgext/remotefilelog/fileserverclient.py
--- a/hgext/remotefilelog/fileserverclient.py
+++ b/hgext/remotefilelog/fileserverclient.py
@@ -14,7 +14,7 @@ import time
import zlib
from mercurial.i18n import _
-from mercurial.node import bin, hex, nullid
+from mercurial.node import bin, hex
from mercurial import (
error,
pycompat,
@@ -272,7 +272,7 @@ def _getfiles_optimistic(
def _getfiles_threaded(
remote, receivemissing, progresstick, missed, idmap, step
):
- remote._callstream(b"getfiles")
+ remote._callstream(b"x_rfl_getfiles")
pipeo = remote._pipeo
pipei = remote._pipei
@@ -599,9 +599,13 @@ class fileserverclient(object):
# partition missing nodes into nullid and not-nullid so we can
# warn about this filtering potentially shadowing bugs.
- nullids = len([None for unused, id in missingids if id == nullid])
+ nullids = len(
+ [None for unused, id in missingids if id == self.repo.nullid]
+ )
if nullids:
- missingids = [(f, id) for f, id in missingids if id != nullid]
+ missingids = [
+ (f, id) for f, id in missingids if id != self.repo.nullid
+ ]
repo.ui.develwarn(
(
b'remotefilelog not fetching %d null revs'
diff --git a/hgext/remotefilelog/historypack.py b/hgext/remotefilelog/historypack.py
--- a/hgext/remotefilelog/historypack.py
+++ b/hgext/remotefilelog/historypack.py
@@ -2,7 +2,10 @@ from __future__ import absolute_import
import struct
-from mercurial.node import hex, nullid
+from mercurial.node import (
+ hex,
+ sha1nodeconstants,
+)
from mercurial import (
pycompat,
util,
@@ -147,9 +150,9 @@ class historypack(basepack.basepack):
pending.remove(ancnode)
p1node = entry[ANC_P1NODE]
p2node = entry[ANC_P2NODE]
- if p1node != nullid and p1node not in known:
+ if p1node != sha1nodeconstants.nullid and p1node not in known:
pending.add(p1node)
- if p2node != nullid and p2node not in known:
+ if p2node != sha1nodeconstants.nullid and p2node not in known:
pending.add(p2node)
yield (ancnode, p1node, p2node, entry[ANC_LINKNODE], copyfrom)
@@ -457,9 +460,9 @@ class mutablehistorypack(basepack.mutabl
def parentfunc(node):
x, p1, p2, x, x, x = entrymap[node]
parents = []
- if p1 != nullid:
+ if p1 != sha1nodeconstants.nullid:
parents.append(p1)
- if p2 != nullid:
+ if p2 != sha1nodeconstants.nullid:
parents.append(p2)
return parents
diff --git a/hgext/remotefilelog/metadatastore.py b/hgext/remotefilelog/metadatastore.py
--- a/hgext/remotefilelog/metadatastore.py
+++ b/hgext/remotefilelog/metadatastore.py
@@ -1,6 +1,9 @@
from __future__ import absolute_import
-from mercurial.node import hex, nullid
+from mercurial.node import (
+ hex,
+ sha1nodeconstants,
+)
from . import (
basestore,
shallowutil,
@@ -51,9 +54,9 @@ class unionmetadatastore(basestore.baseu
missing.append((name, node))
continue
p1, p2, linknode, copyfrom = value
- if p1 != nullid and p1 not in known:
+ if p1 != sha1nodeconstants.nullid and p1 not in known:
queue.append((copyfrom or curname, p1))
- if p2 != nullid and p2 not in known:
+ if p2 != sha1nodeconstants.nullid and p2 not in known:
queue.append((curname, p2))
return missing
diff --git a/hgext/remotefilelog/remotefilectx.py b/hgext/remotefilelog/remotefilectx.py
--- a/hgext/remotefilelog/remotefilectx.py
+++ b/hgext/remotefilelog/remotefilectx.py
@@ -9,7 +9,7 @@ from __future__ import absolute_import
import collections
import time
-from mercurial.node import bin, hex, nullid, nullrev
+from mercurial.node import bin, hex, nullrev
from mercurial import (
ancestor,
context,
@@ -35,7 +35,7 @@ class remotefilectx(context.filectx):
ancestormap=None,
):
if fileid == nullrev:
- fileid = nullid
+ fileid = repo.nullid
if fileid and len(fileid) == 40:
fileid = bin(fileid)
super(remotefilectx, self).__init__(
@@ -78,7 +78,7 @@ class remotefilectx(context.filectx):
@propertycache
def _linkrev(self):
- if self._filenode == nullid:
+ if self._filenode == self._repo.nullid:
return nullrev
ancestormap = self.ancestormap()
@@ -174,7 +174,7 @@ class remotefilectx(context.filectx):
p1, p2, linknode, copyfrom = ancestormap[self._filenode]
results = []
- if p1 != nullid:
+ if p1 != repo.nullid:
path = copyfrom or self._path
flog = repo.file(path)
p1ctx = remotefilectx(
@@ -183,7 +183,7 @@ class remotefilectx(context.filectx):
p1ctx._descendantrev = self.rev()
results.append(p1ctx)
- if p2 != nullid:
+ if p2 != repo.nullid:
path = self._path
flog = repo.file(path)
p2ctx = remotefilectx(
@@ -504,25 +504,25 @@ class remoteworkingfilectx(context.worki
if renamed:
p1 = renamed
else:
- p1 = (path, pcl[0]._manifest.get(path, nullid))
+ p1 = (path, pcl[0]._manifest.get(path, self._repo.nullid))
- p2 = (path, nullid)
+ p2 = (path, self._repo.nullid)
if len(pcl) > 1:
- p2 = (path, pcl[1]._manifest.get(path, nullid))
+ p2 = (path, pcl[1]._manifest.get(path, self._repo.nullid))
m = {}
- if p1[1] != nullid:
+ if p1[1] != self._repo.nullid:
p1ctx = self._repo.filectx(p1[0], fileid=p1[1])
m.update(p1ctx.filelog().ancestormap(p1[1]))
- if p2[1] != nullid:
+ if p2[1] != self._repo.nullid:
p2ctx = self._repo.filectx(p2[0], fileid=p2[1])
m.update(p2ctx.filelog().ancestormap(p2[1]))
copyfrom = b''
if renamed:
copyfrom = renamed[0]
- m[None] = (p1[1], p2[1], nullid, copyfrom)
+ m[None] = (p1[1], p2[1], self._repo.nullid, copyfrom)
self._ancestormap = m
return self._ancestormap
diff --git a/hgext/remotefilelog/remotefilelog.py b/hgext/remotefilelog/remotefilelog.py
--- a/hgext/remotefilelog/remotefilelog.py
+++ b/hgext/remotefilelog/remotefilelog.py
@@ -10,12 +10,7 @@ from __future__ import absolute_import
import collections
import os
-from mercurial.node import (
- bin,
- nullid,
- wdirfilenodeids,
- wdirid,
-)
+from mercurial.node import bin
from mercurial.i18n import _
from mercurial import (
ancestor,
@@ -100,7 +95,7 @@ class remotefilelog(object):
pancestors = {}
queue = []
- if realp1 != nullid:
+ if realp1 != self.repo.nullid:
p1flog = self
if copyfrom:
p1flog = remotefilelog(self.opener, copyfrom, self.repo)
@@ -108,7 +103,7 @@ class remotefilelog(object):
pancestors.update(p1flog.ancestormap(realp1))
queue.append(realp1)
visited.add(realp1)
- if p2 != nullid:
+ if p2 != self.repo.nullid:
pancestors.update(self.ancestormap(p2))
queue.append(p2)
visited.add(p2)
@@ -129,10 +124,10 @@ class remotefilelog(object):
pacopyfrom,
)
- if pa1 != nullid and pa1 not in visited:
+ if pa1 != self.repo.nullid and pa1 not in visited:
queue.append(pa1)
visited.add(pa1)
- if pa2 != nullid and pa2 not in visited:
+ if pa2 != self.repo.nullid and pa2 not in visited:
queue.append(pa2)
visited.add(pa2)
@@ -238,7 +233,7 @@ class remotefilelog(object):
returns True if text is different than what is stored.
"""
- if node == nullid:
+ if node == self.repo.nullid:
return True
nodetext = self.read(node)
@@ -275,13 +270,13 @@ class remotefilelog(object):
return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
def parents(self, node):
- if node == nullid:
- return nullid, nullid
+ if node == self.repo.nullid:
+ return self.repo.nullid, self.repo.nullid
ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
p1, p2, linknode, copyfrom = ancestormap[node]
if copyfrom:
- p1 = nullid
+ p1 = self.repo.nullid
return p1, p2
@@ -317,8 +312,8 @@ class remotefilelog(object):
if prevnode is None:
basenode = prevnode = p1
if basenode == node:
- basenode = nullid
- if basenode != nullid:
+ basenode = self.repo.nullid
+ if basenode != self.repo.nullid:
revision = None
delta = self.revdiff(basenode, node)
else:
@@ -336,6 +331,8 @@ class remotefilelog(object):
delta=delta,
# Sidedata is not supported yet
sidedata=None,
+ # Protocol flags are not used yet
+ protocol_flags=0,
)
def revdiff(self, node1, node2):
@@ -380,13 +377,16 @@ class remotefilelog(object):
this is generally only used for bundling and communicating with vanilla
hg clients.
"""
- if node == nullid:
+ if node == self.repo.nullid:
return b""
if len(node) != 20:
raise error.LookupError(
node, self.filename, _(b'invalid revision input')
)
- if node == wdirid or node in wdirfilenodeids:
+ if (
+ node == self.repo.nodeconstants.wdirid
+ or node in self.repo.nodeconstants.wdirfilenodeids
+ ):
raise error.WdirUnsupported
store = self.repo.contentstore
@@ -432,8 +432,8 @@ class remotefilelog(object):
return self.repo.metadatastore.getancestors(self.filename, node)
def ancestor(self, a, b):
- if a == nullid or b == nullid:
- return nullid
+ if a == self.repo.nullid or b == self.repo.nullid:
+ return self.repo.nullid
revmap, parentfunc = self._buildrevgraph(a, b)
nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)}
@@ -442,13 +442,13 @@ class remotefilelog(object):
if ancs:
# choose a consistent winner when there's a tie
return min(map(nodemap.__getitem__, ancs))
- return nullid
+ return self.repo.nullid
def commonancestorsheads(self, a, b):
"""calculate all the heads of the common ancestors of nodes a and b"""
- if a == nullid or b == nullid:
- return nullid
+ if a == self.repo.nullid or b == self.repo.nullid:
+ return self.repo.nullid
revmap, parentfunc = self._buildrevgraph(a, b)
nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)}
@@ -472,10 +472,10 @@ class remotefilelog(object):
p1, p2, linknode, copyfrom = pdata
# Don't follow renames (copyfrom).
# remotefilectx.ancestor does that.
- if p1 != nullid and not copyfrom:
+ if p1 != self.repo.nullid and not copyfrom:
parents.append(p1)
allparents.add(p1)
- if p2 != nullid:
+ if p2 != self.repo.nullid:
parents.append(p2)
allparents.add(p2)
diff --git a/hgext/remotefilelog/remotefilelogserver.py b/hgext/remotefilelog/remotefilelogserver.py
--- a/hgext/remotefilelog/remotefilelogserver.py
+++ b/hgext/remotefilelog/remotefilelogserver.py
@@ -13,7 +13,7 @@ import time
import zlib
from mercurial.i18n import _
-from mercurial.node import bin, hex, nullid
+from mercurial.node import bin, hex
from mercurial.pycompat import open
from mercurial import (
changegroup,
@@ -242,7 +242,7 @@ def _loadfileblob(repo, cachepath, path,
filecachepath = os.path.join(cachepath, path, hex(node))
if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0:
filectx = repo.filectx(path, fileid=node)
- if filectx.node() == nullid:
+ if filectx.node() == repo.nullid:
repo.changelog = changelog.changelog(repo.svfs)
filectx = repo.filectx(path, fileid=node)
@@ -284,7 +284,7 @@ def getflogheads(repo, proto, path):
"""A server api for requesting a filelog's heads"""
flog = repo.file(path)
heads = flog.heads()
- return b'\n'.join((hex(head) for head in heads if head != nullid))
+ return b'\n'.join((hex(head) for head in heads if head != repo.nullid))
def getfile(repo, proto, file, node):
@@ -302,7 +302,7 @@ def getfile(repo, proto, file, node):
if not cachepath:
cachepath = os.path.join(repo.path, b"remotefilelogcache")
node = bin(node.strip())
- if node == nullid:
+ if node == repo.nullid:
return b'0\0'
return b'0\0' + _loadfileblob(repo, cachepath, file, node)
@@ -327,7 +327,7 @@ def getfiles(repo, proto):
break
node = bin(request[:40])
- if node == nullid:
+ if node == repo.nullid:
yield b'0\n'
continue
@@ -380,8 +380,8 @@ def createfileblob(filectx):
ancestortext = b""
for ancestorctx in ancestors:
parents = ancestorctx.parents()
- p1 = nullid
- p2 = nullid
+ p1 = repo.nullid
+ p2 = repo.nullid
if len(parents) > 0:
p1 = parents[0].filenode()
if len(parents) > 1:
diff --git a/hgext/remotefilelog/repack.py b/hgext/remotefilelog/repack.py
--- a/hgext/remotefilelog/repack.py
+++ b/hgext/remotefilelog/repack.py
@@ -4,10 +4,7 @@ import os
import time
from mercurial.i18n import _
-from mercurial.node import (
- nullid,
- short,
-)
+from mercurial.node import short
from mercurial import (
encoding,
error,
@@ -586,7 +583,7 @@ class repacker(object):
# Create one contiguous chain and reassign deltabases.
for i, node in enumerate(orphans):
if i == 0:
- deltabases[node] = (nullid, 0)
+ deltabases[node] = (self.repo.nullid, 0)
else:
parent = orphans[i - 1]
deltabases[node] = (parent, deltabases[parent][1] + 1)
@@ -676,8 +673,8 @@ class repacker(object):
# of immediate child
deltatuple = deltabases.get(node, None)
if deltatuple is None:
- deltabase, chainlen = nullid, 0
- deltabases[node] = (nullid, 0)
+ deltabase, chainlen = self.repo.nullid, 0
+ deltabases[node] = (self.repo.nullid, 0)
nobase.add(node)
else:
deltabase, chainlen = deltatuple
@@ -692,7 +689,7 @@ class repacker(object):
# file was copied from elsewhere. So don't attempt to do any
# deltas with the other file.
if copyfrom:
- p1 = nullid
+ p1 = self.repo.nullid
if chainlen < maxchainlen:
# Record this child as the delta base for its parents.
@@ -700,9 +697,9 @@ class repacker(object):
# many children, and this will only choose the last one.
# TODO: record all children and try all deltas to find
# best
- if p1 != nullid:
+ if p1 != self.repo.nullid:
deltabases[p1] = (node, chainlen + 1)
- if p2 != nullid:
+ if p2 != self.repo.nullid:
deltabases[p2] = (node, chainlen + 1)
# experimental config: repack.chainorphansbysize
@@ -719,7 +716,7 @@ class repacker(object):
# TODO: Optimize the deltachain fetching. Since we're
# iterating over the different version of the file, we may
# be fetching the same deltachain over and over again.
- if deltabase != nullid:
+ if deltabase != self.repo.nullid:
deltaentry = self.data.getdelta(filename, node)
delta, deltabasename, origdeltabase, meta = deltaentry
size = meta.get(constants.METAKEYSIZE)
@@ -791,9 +788,9 @@ class repacker(object):
# If copyfrom == filename, it means the copy history
# went to come other file, then came back to this one, so we
# should continue processing it.
- if p1 != nullid and copyfrom != filename:
+ if p1 != self.repo.nullid and copyfrom != filename:
dontprocess.add(p1)
- if p2 != nullid:
+ if p2 != self.repo.nullid:
dontprocess.add(p2)
continue
@@ -814,9 +811,9 @@ class repacker(object):
def parentfunc(node):
p1, p2, linknode, copyfrom = ancestors[node]
parents = []
- if p1 != nullid:
+ if p1 != self.repo.nullid:
parents.append(p1)
- if p2 != nullid:
+ if p2 != self.repo.nullid:
parents.append(p2)
return parents
diff --git a/hgext/remotefilelog/shallowbundle.py b/hgext/remotefilelog/shallowbundle.py
--- a/hgext/remotefilelog/shallowbundle.py
+++ b/hgext/remotefilelog/shallowbundle.py
@@ -7,7 +7,7 @@
from __future__ import absolute_import
from mercurial.i18n import _
-from mercurial.node import bin, hex, nullid
+from mercurial.node import bin, hex
from mercurial import (
bundlerepo,
changegroup,
@@ -143,7 +143,7 @@ class shallowcg1packer(changegroup.cgpac
def nodechunk(self, revlog, node, prevnode, linknode):
prefix = b''
- if prevnode == nullid:
+ if prevnode == revlog.nullid:
delta = revlog.rawdata(node)
prefix = mdiff.trivialdiffheader(len(delta))
else:
@@ -225,7 +225,17 @@ def addchangegroupfiles(
chain = None
while True:
- # returns: (node, p1, p2, cs, deltabase, delta, flags) or None
+ # returns: None or (
+ # node,
+ # p1,
+ # p2,
+ # cs,
+ # deltabase,
+ # delta,
+ # flags,
+ # sidedata,
+ # proto_flags
+ # )
revisiondata = source.deltachunk(chain)
if not revisiondata:
break
@@ -245,7 +255,7 @@ def addchangegroupfiles(
processed = set()
def available(f, node, depf, depnode):
- if depnode != nullid and (depf, depnode) not in processed:
+ if depnode != repo.nullid and (depf, depnode) not in processed:
if not (depf, depnode) in revisiondatas:
# It's not in the changegroup, assume it's already
# in the repo
@@ -263,11 +273,11 @@ def addchangegroupfiles(
prefetchfiles = []
for f, node in queue:
revisiondata = revisiondatas[(f, node)]
- # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
+ # revisiondata: (node, p1, p2, cs, deltabase, delta, flags, sdata, pfl)
dependents = [revisiondata[1], revisiondata[2], revisiondata[4]]
for dependent in dependents:
- if dependent == nullid or (f, dependent) in revisiondatas:
+ if dependent == repo.nullid or (f, dependent) in revisiondatas:
continue
prefetchfiles.append((f, hex(dependent)))
@@ -287,8 +297,18 @@ def addchangegroupfiles(
fl = repo.file(f)
revisiondata = revisiondatas[(f, node)]
- # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
- node, p1, p2, linknode, deltabase, delta, flags, sidedata = revisiondata
+ # revisiondata: (node, p1, p2, cs, deltabase, delta, flags, sdata, pfl)
+ (
+ node,
+ p1,
+ p2,
+ linknode,
+ deltabase,
+ delta,
+ flags,
+ sidedata,
+ proto_flags,
+ ) = revisiondata
if not available(f, node, f, deltabase):
continue
@@ -306,7 +326,7 @@ def addchangegroupfiles(
continue
for p in [p1, p2]:
- if p != nullid:
+ if p != repo.nullid:
if not available(f, node, f, p):
continue
diff --git a/hgext/remotefilelog/shallowrepo.py b/hgext/remotefilelog/shallowrepo.py
--- a/hgext/remotefilelog/shallowrepo.py
+++ b/hgext/remotefilelog/shallowrepo.py
@@ -9,7 +9,7 @@ from __future__ import absolute_import
import os
from mercurial.i18n import _
-from mercurial.node import hex, nullid, nullrev
+from mercurial.node import hex, nullrev
from mercurial import (
encoding,
error,
@@ -206,8 +206,8 @@ def wraprepo(repo):
m1 = ctx.p1().manifest()
files = []
for f in ctx.modified() + ctx.added():
- fparent1 = m1.get(f, nullid)
- if fparent1 != nullid:
+ fparent1 = m1.get(f, self.nullid)
+ if fparent1 != self.nullid:
files.append((f, hex(fparent1)))
self.fileservice.prefetch(files)
return super(shallowrepository, self).commitctx(
diff --git a/hgext/remotefilelog/shallowutil.py b/hgext/remotefilelog/shallowutil.py
--- a/hgext/remotefilelog/shallowutil.py
+++ b/hgext/remotefilelog/shallowutil.py
@@ -233,6 +233,10 @@ def bin2int(buf):
return x
+class BadRemotefilelogHeader(error.StorageError):
+ """Exception raised when parsing a remotefilelog blob header fails."""
+
+
def parsesizeflags(raw):
"""given a remotefilelog blob, return (headersize, rawtextsize, flags)
@@ -243,26 +247,30 @@ def parsesizeflags(raw):
size = None
try:
index = raw.index(b'\0')
- header = raw[:index]
- if header.startswith(b'v'):
- # v1 and above, header starts with 'v'
- if header.startswith(b'v1\n'):
- for s in header.split(b'\n'):
- if s.startswith(constants.METAKEYSIZE):
- size = int(s[len(constants.METAKEYSIZE) :])
- elif s.startswith(constants.METAKEYFLAG):
- flags = int(s[len(constants.METAKEYFLAG) :])
- else:
- raise RuntimeError(
- b'unsupported remotefilelog header: %s' % header
- )
+ except ValueError:
+ raise BadRemotefilelogHeader(
+ "unexpected remotefilelog header: illegal format"
+ )
+ header = raw[:index]
+ if header.startswith(b'v'):
+ # v1 and above, header starts with 'v'
+ if header.startswith(b'v1\n'):
+ for s in header.split(b'\n'):
+ if s.startswith(constants.METAKEYSIZE):
+ size = int(s[len(constants.METAKEYSIZE) :])
+ elif s.startswith(constants.METAKEYFLAG):
+ flags = int(s[len(constants.METAKEYFLAG) :])
else:
- # v0, str(int(size)) is the header
- size = int(header)
- except ValueError:
- raise RuntimeError("unexpected remotefilelog header: illegal format")
+ raise BadRemotefilelogHeader(
+ b'unsupported remotefilelog header: %s' % header
+ )
+ else:
+ # v0, str(int(size)) is the header
+ size = int(header)
if size is None:
- raise RuntimeError("unexpected remotefilelog header: no size found")
+ raise BadRemotefilelogHeader(
+ "unexpected remotefilelog header: no size found"
+ )
return index + 1, size, flags
diff --git a/hgext/sparse.py b/hgext/sparse.py
--- a/hgext/sparse.py
+++ b/hgext/sparse.py
@@ -256,6 +256,8 @@ def _setupdirstate(ui):
# Prevent adding files that are outside the sparse checkout
editfuncs = [
b'normal',
+ b'set_tracked',
+ b'set_untracked',
b'add',
b'normallookup',
b'copy',
diff --git a/hgext/sqlitestore.py b/hgext/sqlitestore.py
--- a/hgext/sqlitestore.py
+++ b/hgext/sqlitestore.py
@@ -52,7 +52,6 @@ import zlib
from mercurial.i18n import _
from mercurial.node import (
- nullid,
nullrev,
sha1nodeconstants,
short,
@@ -290,6 +289,7 @@ class sqliterevisiondelta(object):
revision = attr.ib()
delta = attr.ib()
sidedata = attr.ib()
+ protocol_flags = attr.ib()
linknode = attr.ib(default=None)
@@ -366,12 +366,12 @@ class sqlitefilestore(object):
)
if p1rev == nullrev:
- p1node = nullid
+ p1node = sha1nodeconstants.nullid
else:
p1node = self._revtonode[p1rev]
if p2rev == nullrev:
- p2node = nullid
+ p2node = sha1nodeconstants.nullid
else:
p2node = self._revtonode[p2rev]
@@ -400,7 +400,7 @@ class sqlitefilestore(object):
return iter(pycompat.xrange(len(self._revisions)))
def hasnode(self, node):
- if node == nullid:
+ if node == sha1nodeconstants.nullid:
return False
return node in self._nodetorev
@@ -411,8 +411,8 @@ class sqlitefilestore(object):
)
def parents(self, node):
- if node == nullid:
- return nullid, nullid
+ if node == sha1nodeconstants.nullid:
+ return sha1nodeconstants.nullid, sha1nodeconstants.nullid
if node not in self._revisions:
raise error.LookupError(node, self._path, _(b'no node'))
@@ -431,7 +431,7 @@ class sqlitefilestore(object):
return entry.p1rev, entry.p2rev
def rev(self, node):
- if node == nullid:
+ if node == sha1nodeconstants.nullid:
return nullrev
if node not in self._nodetorev:
@@ -441,7 +441,7 @@ class sqlitefilestore(object):
def node(self, rev):
if rev == nullrev:
- return nullid
+ return sha1nodeconstants.nullid
if rev not in self._revtonode:
raise IndexError(rev)
@@ -485,7 +485,7 @@ class sqlitefilestore(object):
def heads(self, start=None, stop=None):
if start is None and stop is None:
if not len(self):
- return [nullid]
+ return [sha1nodeconstants.nullid]
startrev = self.rev(start) if start is not None else nullrev
stoprevs = {self.rev(n) for n in stop or []}
@@ -529,7 +529,7 @@ class sqlitefilestore(object):
return len(self.revision(node))
def revision(self, node, raw=False, _verifyhash=True):
- if node in (nullid, nullrev):
+ if node in (sha1nodeconstants.nullid, nullrev):
return b''
if isinstance(node, int):
@@ -596,7 +596,7 @@ class sqlitefilestore(object):
b'unhandled value for nodesorder: %s' % nodesorder
)
- nodes = [n for n in nodes if n != nullid]
+ nodes = [n for n in nodes if n != sha1nodeconstants.nullid]
if not nodes:
return
@@ -705,12 +705,12 @@ class sqlitefilestore(object):
raise SQLiteStoreError(b'unhandled revision flag')
if maybemissingparents:
- if p1 != nullid and not self.hasnode(p1):
- p1 = nullid
+ if p1 != sha1nodeconstants.nullid and not self.hasnode(p1):
+ p1 = sha1nodeconstants.nullid
storeflags |= FLAG_MISSING_P1
- if p2 != nullid and not self.hasnode(p2):
- p2 = nullid
+ if p2 != sha1nodeconstants.nullid and not self.hasnode(p2):
+ p2 = sha1nodeconstants.nullid
storeflags |= FLAG_MISSING_P2
baserev = self.rev(deltabase)
@@ -736,7 +736,10 @@ class sqlitefilestore(object):
# Possibly reset parents to make them proper.
entry = self._revisions[node]
- if entry.flags & FLAG_MISSING_P1 and p1 != nullid:
+ if (
+ entry.flags & FLAG_MISSING_P1
+ and p1 != sha1nodeconstants.nullid
+ ):
entry.p1node = p1
entry.p1rev = self._nodetorev[p1]
entry.flags &= ~FLAG_MISSING_P1
@@ -746,7 +749,10 @@ class sqlitefilestore(object):
(self._nodetorev[p1], entry.flags, entry.rid),
)
- if entry.flags & FLAG_MISSING_P2 and p2 != nullid:
+ if (
+ entry.flags & FLAG_MISSING_P2
+ and p2 != sha1nodeconstants.nullid
+ ):
entry.p2node = p2
entry.p2rev = self._nodetorev[p2]
entry.flags &= ~FLAG_MISSING_P2
@@ -761,7 +767,7 @@ class sqlitefilestore(object):
empty = False
continue
- if deltabase == nullid:
+ if deltabase == sha1nodeconstants.nullid:
text = mdiff.patch(b'', delta)
storedelta = None
else:
@@ -1012,7 +1018,7 @@ class sqlitefilestore(object):
assert revisiondata is not None
deltabase = p1
- if deltabase == nullid:
+ if deltabase == sha1nodeconstants.nullid:
delta = revisiondata
else:
delta = mdiff.textdiff(
@@ -1021,7 +1027,7 @@ class sqlitefilestore(object):
# File index stores a pointer to its delta and the parent delta.
# The parent delta is stored via a pointer to the fileindex PK.
- if deltabase == nullid:
+ if deltabase == sha1nodeconstants.nullid:
baseid = None
else:
baseid = self._revisions[deltabase].rid
@@ -1055,12 +1061,12 @@ class sqlitefilestore(object):
rev = len(self)
- if p1 == nullid:
+ if p1 == sha1nodeconstants.nullid:
p1rev = nullrev
else:
p1rev = self._nodetorev[p1]
- if p2 == nullid:
+ if p2 == sha1nodeconstants.nullid:
p2rev = nullrev
else:
p2rev = self._nodetorev[p2]
diff --git a/hgext/transplant.py b/hgext/transplant.py
--- a/hgext/transplant.py
+++ b/hgext/transplant.py
@@ -22,7 +22,6 @@ from mercurial.pycompat import open
from mercurial.node import (
bin,
hex,
- nullid,
short,
)
from mercurial import (
@@ -134,6 +133,7 @@ class transplants(object):
class transplanter(object):
def __init__(self, ui, repo, opts):
self.ui = ui
+ self.repo = repo
self.path = repo.vfs.join(b'transplant')
self.opener = vfsmod.vfs(self.path)
self.transplants = transplants(
@@ -221,7 +221,7 @@ class transplanter(object):
exchange.pull(repo, source.peer(), heads=[node])
skipmerge = False
- if parents[1] != nullid:
+ if parents[1] != repo.nullid:
if not opts.get(b'parent'):
self.ui.note(
_(b'skipping merge changeset %d:%s\n')
@@ -516,7 +516,7 @@ class transplanter(object):
def parselog(self, fp):
parents = []
message = []
- node = nullid
+ node = self.repo.nullid
inmsg = False
user = None
date = None
@@ -568,7 +568,7 @@ class transplanter(object):
def matchfn(node):
if self.applied(repo, node, root):
return False
- if source.changelog.parents(node)[1] != nullid:
+ if source.changelog.parents(node)[1] != repo.nullid:
return False
extra = source.changelog.read(node)[5]
cnode = extra.get(b'transplant_source')
@@ -804,7 +804,7 @@ def _dotransplant(ui, repo, *revs, **opt
tp = transplanter(ui, repo, opts)
p1 = repo.dirstate.p1()
- if len(repo) > 0 and p1 == nullid:
+ if len(repo) > 0 and p1 == repo.nullid:
raise error.Abort(_(b'no revision checked out'))
if opts.get(b'continue'):
if not tp.canresume():
diff --git a/hgext/uncommit.py b/hgext/uncommit.py
--- a/hgext/uncommit.py
+++ b/hgext/uncommit.py
@@ -20,7 +20,6 @@ added and removed in the working directo
from __future__ import absolute_import
from mercurial.i18n import _
-from mercurial.node import nullid
from mercurial import (
cmdutil,
@@ -113,7 +112,7 @@ def _commitfiltered(
new = context.memctx(
repo,
- parents=[base.node(), nullid],
+ parents=[base.node(), repo.nullid],
text=message,
files=files,
filectxfn=filectxfn,
@@ -154,11 +153,10 @@ def uncommit(ui, repo, *pats, **opts):
If no files are specified, the commit will be pruned, unless --keep is
given.
"""
+ cmdutil.check_note_size(opts)
+ cmdutil.resolve_commit_options(ui, opts)
opts = pycompat.byteskwargs(opts)
- cmdutil.checknotesize(ui, opts)
- cmdutil.resolvecommitoptions(ui, opts)
-
with repo.wlock(), repo.lock():
st = repo.status()
diff --git a/mercurial/bookmarks.py b/mercurial/bookmarks.py
--- a/mercurial/bookmarks.py
+++ b/mercurial/bookmarks.py
@@ -15,7 +15,6 @@ from .node import (
bin,
hex,
short,
- wdirid,
)
from .pycompat import getattr
from . import (
@@ -601,11 +600,12 @@ def _diverge(ui, b, path, localmarks, re
# if an @pathalias already exists, we overwrite (update) it
if path.startswith(b"file:"):
path = urlutil.url(path).path
- for p, u in ui.configitems(b"paths"):
- if u.startswith(b"file:"):
- u = urlutil.url(u).path
- if path == u:
- return b'%s@%s' % (b, p)
+ for name, p in urlutil.list_paths(ui):
+ loc = p.rawloc
+ if loc.startswith(b"file:"):
+ loc = urlutil.url(loc).path
+ if path == loc:
+ return b'%s@%s' % (b, name)
# assign a unique "@number" suffix newly
for x in range(1, 100):
@@ -642,7 +642,7 @@ def binaryencode(repo, bookmarks):
binarydata = []
for book, node in bookmarks:
if not node: # None or ''
- node = wdirid
+ node = repo.nodeconstants.wdirid
binarydata.append(_binaryentry.pack(node, len(book)))
binarydata.append(book)
return b''.join(binarydata)
@@ -674,7 +674,7 @@ def binarydecode(repo, stream):
if len(bookmark) < length:
if entry:
raise error.Abort(_(b'bad bookmark stream'))
- if node == wdirid:
+ if node == repo.nodeconstants.wdirid:
node = None
books.append((bookmark, node))
return books
diff --git a/mercurial/branchmap.py b/mercurial/branchmap.py
--- a/mercurial/branchmap.py
+++ b/mercurial/branchmap.py
@@ -12,7 +12,6 @@ import struct
from .node import (
bin,
hex,
- nullid,
nullrev,
)
from . import (
@@ -189,7 +188,7 @@ class branchcache(object):
self,
repo,
entries=(),
- tipnode=nullid,
+ tipnode=None,
tiprev=nullrev,
filteredhash=None,
closednodes=None,
@@ -200,7 +199,10 @@ class branchcache(object):
has a given node or not. If it's not provided, we assume that every node
we have exists in changelog"""
self._repo = repo
- self.tipnode = tipnode
+ if tipnode is None:
+ self.tipnode = repo.nullid
+ else:
+ self.tipnode = tipnode
self.tiprev = tiprev
self.filteredhash = filteredhash
# closednodes is a set of nodes that close their branch. If the branch
@@ -536,7 +538,7 @@ class branchcache(object):
if not self.validfor(repo):
# cache key are not valid anymore
- self.tipnode = nullid
+ self.tipnode = repo.nullid
self.tiprev = nullrev
for heads in self.iterheads():
tiprev = max(cl.rev(node) for node in heads)
diff --git a/mercurial/bundle2.py b/mercurial/bundle2.py
--- a/mercurial/bundle2.py
+++ b/mercurial/bundle2.py
@@ -158,7 +158,6 @@ import sys
from .i18n import _
from .node import (
hex,
- nullid,
short,
)
from . import (
@@ -181,6 +180,7 @@ from .utils import (
stringutil,
urlutil,
)
+from .interfaces import repository
urlerr = util.urlerr
urlreq = util.urlreq
@@ -1730,8 +1730,8 @@ def _addpartsfromopts(ui, repo, bundler,
part.addparam(
b'targetphase', b'%d' % phases.secret, mandatory=False
)
- if b'exp-sidedata-flag' in repo.requirements:
- part.addparam(b'exp-sidedata', b'1')
+ if repository.REPO_FEATURE_SIDE_DATA in repo.features:
+ part.addparam(b'exp-sidedata', b'1')
if opts.get(b'streamv2', False):
addpartbundlestream2(bundler, repo, stream=True)
@@ -2014,13 +2014,6 @@ def handlechangegroup(op, inpart):
)
scmutil.writereporequirements(op.repo)
- bundlesidedata = bool(b'exp-sidedata' in inpart.params)
- reposidedata = bool(b'exp-sidedata-flag' in op.repo.requirements)
- if reposidedata and not bundlesidedata:
- msg = b"repository is using sidedata but the bundle source do not"
- hint = b'this is currently unsupported'
- raise error.Abort(msg, hint=hint)
-
extrakwargs = {}
targetphase = inpart.params.get(b'targetphase')
if targetphase is not None:
@@ -2576,7 +2569,7 @@ def widen_bundle(
fullnodes=commonnodes,
)
cgdata = packer.generate(
- {nullid},
+ {repo.nullid},
list(commonnodes),
False,
b'narrow_widen',
@@ -2587,9 +2580,9 @@ def widen_bundle(
part.addparam(b'version', cgversion)
if scmutil.istreemanifest(repo):
part.addparam(b'treemanifest', b'1')
- if b'exp-sidedata-flag' in repo.requirements:
- part.addparam(b'exp-sidedata', b'1')
- wanted = format_remote_wanted_sidedata(repo)
- part.addparam(b'exp-wanted-sidedata', wanted)
+ if repository.REPO_FEATURE_SIDE_DATA in repo.features:
+ part.addparam(b'exp-sidedata', b'1')
+ wanted = format_remote_wanted_sidedata(repo)
+ part.addparam(b'exp-wanted-sidedata', wanted)
return bundler
diff --git a/mercurial/bundlecaches.py b/mercurial/bundlecaches.py
--- a/mercurial/bundlecaches.py
+++ b/mercurial/bundlecaches.py
@@ -167,6 +167,8 @@ def parsebundlespec(repo, spec, strict=T
# Generaldelta repos require v2.
if requirementsmod.GENERALDELTA_REQUIREMENT in repo.requirements:
version = b'v2'
+ elif requirementsmod.REVLOGV2_REQUIREMENT in repo.requirements:
+ version = b'v2'
# Modern compression engines require v2.
if compression not in _bundlespecv1compengines:
version = b'v2'
diff --git a/mercurial/bundlerepo.py b/mercurial/bundlerepo.py
--- a/mercurial/bundlerepo.py
+++ b/mercurial/bundlerepo.py
@@ -19,7 +19,6 @@ import shutil
from .i18n import _
from .node import (
hex,
- nullid,
nullrev,
)
@@ -40,6 +39,7 @@ from . import (
phases,
pycompat,
revlog,
+ revlogutils,
util,
vfs as vfsmod,
)
@@ -47,9 +47,13 @@ from .utils import (
urlutil,
)
+from .revlogutils import (
+ constants as revlog_constants,
+)
+
class bundlerevlog(revlog.revlog):
- def __init__(self, opener, indexfile, cgunpacker, linkmapper):
+ def __init__(self, opener, target, radix, cgunpacker, linkmapper):
# How it works:
# To retrieve a revision, we need to know the offset of the revision in
# the bundle (an unbundle object). We store this offset in the index
@@ -58,7 +62,7 @@ class bundlerevlog(revlog.revlog):
# To differentiate a rev in the bundle from a rev in the revlog, we
# check revision against repotiprev.
opener = vfsmod.readonlyvfs(opener)
- revlog.revlog.__init__(self, opener, indexfile)
+ revlog.revlog.__init__(self, opener, target=target, radix=radix)
self.bundle = cgunpacker
n = len(self)
self.repotiprev = n - 1
@@ -81,25 +85,25 @@ class bundlerevlog(revlog.revlog):
for p in (p1, p2):
if not self.index.has_node(p):
raise error.LookupError(
- p, self.indexfile, _(b"unknown parent")
+ p, self.display_id, _(b"unknown parent")
)
if not self.index.has_node(deltabase):
raise LookupError(
- deltabase, self.indexfile, _(b'unknown delta base')
+ deltabase, self.display_id, _(b'unknown delta base')
)
baserev = self.rev(deltabase)
- # start, size, full unc. size, base (unused), link, p1, p2, node
- e = (
- revlog.offset_type(start, flags),
- size,
- -1,
- baserev,
- linkrev,
- self.rev(p1),
- self.rev(p2),
- node,
+ # start, size, full unc. size, base (unused), link, p1, p2, node, sidedata_offset (unused), sidedata_size (unused)
+ e = revlogutils.entry(
+ flags=flags,
+ data_offset=start,
+ data_compressed_length=size,
+ data_delta_base=baserev,
+ link_rev=linkrev,
+ parent_rev_1=self.rev(p1),
+ parent_rev_2=self.rev(p2),
+ node_id=node,
)
self.index.append(e)
self.bundlerevs.add(n)
@@ -172,7 +176,12 @@ class bundlechangelog(bundlerevlog, chan
changelog.changelog.__init__(self, opener)
linkmapper = lambda x: x
bundlerevlog.__init__(
- self, opener, self.indexfile, cgunpacker, linkmapper
+ self,
+ opener,
+ (revlog_constants.KIND_CHANGELOG, None),
+ self.radix,
+ cgunpacker,
+ linkmapper,
)
@@ -188,7 +197,12 @@ class bundlemanifest(bundlerevlog, manif
):
manifest.manifestrevlog.__init__(self, nodeconstants, opener, tree=dir)
bundlerevlog.__init__(
- self, opener, self.indexfile, cgunpacker, linkmapper
+ self,
+ opener,
+ (revlog_constants.KIND_MANIFESTLOG, dir),
+ self._revlog.radix,
+ cgunpacker,
+ linkmapper,
)
if dirlogstarts is None:
dirlogstarts = {}
@@ -215,7 +229,12 @@ class bundlefilelog(filelog.filelog):
def __init__(self, opener, path, cgunpacker, linkmapper):
filelog.filelog.__init__(self, opener, path)
self._revlog = bundlerevlog(
- opener, self.indexfile, cgunpacker, linkmapper
+ opener,
+ # XXX should use the unencoded path
+ target=(revlog_constants.KIND_FILELOG, path),
+ radix=self._revlog.radix,
+ cgunpacker=cgunpacker,
+ linkmapper=linkmapper,
)
@@ -447,7 +466,9 @@ class bundlerepository(object):
return encoding.getcwd() # always outside the repo
# Check if parents exist in localrepo before setting
- def setparents(self, p1, p2=nullid):
+ def setparents(self, p1, p2=None):
+ if p2 is None:
+ p2 = self.nullid
p1rev = self.changelog.rev(p1)
p2rev = self.changelog.rev(p2)
msg = _(b"setting parent to node %s that only exists in the bundle\n")
diff --git a/mercurial/cext/charencode.c b/mercurial/cext/charencode.c
--- a/mercurial/cext/charencode.c
+++ b/mercurial/cext/charencode.c
@@ -223,7 +223,7 @@ PyObject *make_file_foldmap(PyObject *se
PyObject *file_foldmap = NULL;
enum normcase_spec spec;
PyObject *k, *v;
- dirstateTupleObject *tuple;
+ dirstateItemObject *tuple;
Py_ssize_t pos = 0;
const char *table;
@@ -263,7 +263,7 @@ PyObject *make_file_foldmap(PyObject *se
goto quit;
}
- tuple = (dirstateTupleObject *)v;
+ tuple = (dirstateItemObject *)v;
if (tuple->state != 'r') {
PyObject *normed;
if (table != NULL) {
diff --git a/mercurial/cext/dirs.c b/mercurial/cext/dirs.c
--- a/mercurial/cext/dirs.c
+++ b/mercurial/cext/dirs.c
@@ -177,7 +177,7 @@ static int dirs_fromdict(PyObject *dirs,
"expected a dirstate tuple");
return -1;
}
- if (((dirstateTupleObject *)value)->state == skipchar)
+ if (((dirstateItemObject *)value)->state == skipchar)
continue;
}
diff --git a/mercurial/cext/manifest.c b/mercurial/cext/manifest.c
--- a/mercurial/cext/manifest.c
+++ b/mercurial/cext/manifest.c
@@ -28,6 +28,7 @@ typedef struct {
typedef struct {
PyObject_HEAD
PyObject *pydata;
+ Py_ssize_t nodelen;
line *lines;
int numlines; /* number of line entries */
int livelines; /* number of non-deleted lines */
@@ -49,12 +50,11 @@ static Py_ssize_t pathlen(line *l)
}
/* get the node value of a single line */
-static PyObject *nodeof(line *l, char *flag)
+static PyObject *nodeof(Py_ssize_t nodelen, line *l, char *flag)
{
char *s = l->start;
Py_ssize_t llen = pathlen(l);
Py_ssize_t hlen = l->len - llen - 2;
- Py_ssize_t hlen_raw;
PyObject *hash;
if (llen + 1 + 40 + 1 > l->len) { /* path '\0' hash '\n' */
PyErr_SetString(PyExc_ValueError, "manifest line too short");
@@ -73,36 +73,29 @@ static PyObject *nodeof(line *l, char *f
break;
}
- switch (hlen) {
- case 40: /* sha1 */
- hlen_raw = 20;
- break;
- case 64: /* new hash */
- hlen_raw = 32;
- break;
- default:
+ if (hlen != 2 * nodelen) {
PyErr_SetString(PyExc_ValueError, "invalid node length in manifest");
return NULL;
}
- hash = unhexlify(s + llen + 1, hlen_raw * 2);
+ hash = unhexlify(s + llen + 1, nodelen * 2);
if (!hash) {
return NULL;
}
if (l->hash_suffix != '\0') {
char newhash[33];
- memcpy(newhash, PyBytes_AsString(hash), hlen_raw);
+ memcpy(newhash, PyBytes_AsString(hash), nodelen);
Py_DECREF(hash);
- newhash[hlen_raw] = l->hash_suffix;
- hash = PyBytes_FromStringAndSize(newhash, hlen_raw+1);
+ newhash[nodelen] = l->hash_suffix;
+ hash = PyBytes_FromStringAndSize(newhash, nodelen + 1);
}
return hash;
}
/* get the node hash and flags of a line as a tuple */
-static PyObject *hashflags(line *l)
+static PyObject *hashflags(Py_ssize_t nodelen, line *l)
{
char flag;
- PyObject *hash = nodeof(l, &flag);
+ PyObject *hash = nodeof(nodelen, l, &flag);
PyObject *flags;
PyObject *tup;
@@ -190,17 +183,23 @@ static void lazymanifest_init_early(lazy
static int lazymanifest_init(lazymanifest *self, PyObject *args)
{
char *data;
- Py_ssize_t len;
+ Py_ssize_t nodelen, len;
int err, ret;
PyObject *pydata;
lazymanifest_init_early(self);
- if (!PyArg_ParseTuple(args, "S", &pydata)) {
+ if (!PyArg_ParseTuple(args, "nS", &nodelen, &pydata)) {
return -1;
}
- err = PyBytes_AsStringAndSize(pydata, &data, &len);
+ if (nodelen != 20 && nodelen != 32) {
+ /* See fixed buffer in nodeof */
+ PyErr_Format(PyExc_ValueError, "Unsupported node length");
+ return -1;
+ }
+ self->nodelen = nodelen;
+ self->dirty = false;
- self->dirty = false;
+ err = PyBytes_AsStringAndSize(pydata, &data, &len);
if (err == -1)
return -1;
self->pydata = pydata;
@@ -291,17 +290,18 @@ static line *lmiter_nextline(lmIter *sel
static PyObject *lmiter_iterentriesnext(PyObject *o)
{
+ lmIter *self = (lmIter *)o;
Py_ssize_t pl;
line *l;
char flag;
PyObject *ret = NULL, *path = NULL, *hash = NULL, *flags = NULL;
- l = lmiter_nextline((lmIter *)o);
+ l = lmiter_nextline(self);
if (!l) {
goto done;
}
pl = pathlen(l);
path = PyBytes_FromStringAndSize(l->start, pl);
- hash = nodeof(l, &flag);
+ hash = nodeof(self->m->nodelen, l, &flag);
if (!path || !hash) {
goto done;
}
@@ -471,7 +471,7 @@ static PyObject *lazymanifest_getitem(la
PyErr_Format(PyExc_KeyError, "No such manifest entry.");
return NULL;
}
- return hashflags(hit);
+ return hashflags(self->nodelen, hit);
}
static int lazymanifest_delitem(lazymanifest *self, PyObject *key)
@@ -568,13 +568,13 @@ static int lazymanifest_setitem(
pyhash = PyTuple_GetItem(value, 0);
if (!PyBytes_Check(pyhash)) {
PyErr_Format(PyExc_TypeError,
- "node must be a 20 or 32 bytes string");
+ "node must be a %zi bytes string", self->nodelen);
return -1;
}
hlen = PyBytes_Size(pyhash);
- if (hlen != 20 && hlen != 32) {
+ if (hlen != self->nodelen) {
PyErr_Format(PyExc_TypeError,
- "node must be a 20 or 32 bytes string");
+ "node must be a %zi bytes string", self->nodelen);
return -1;
}
hash = PyBytes_AsString(pyhash);
@@ -739,6 +739,7 @@ static lazymanifest *lazymanifest_copy(l
goto nomem;
}
lazymanifest_init_early(copy);
+ copy->nodelen = self->nodelen;
copy->numlines = self->numlines;
copy->livelines = self->livelines;
copy->dirty = false;
@@ -777,6 +778,7 @@ static lazymanifest *lazymanifest_filter
goto nomem;
}
lazymanifest_init_early(copy);
+ copy->nodelen = self->nodelen;
copy->dirty = true;
copy->lines = malloc(self->maxlines * sizeof(line));
if (!copy->lines) {
@@ -872,7 +874,7 @@ static PyObject *lazymanifest_diff(lazym
if (!key)
goto nomem;
if (result < 0) {
- PyObject *l = hashflags(left);
+ PyObject *l = hashflags(self->nodelen, left);
if (!l) {
goto nomem;
}
@@ -885,7 +887,7 @@ static PyObject *lazymanifest_diff(lazym
Py_DECREF(outer);
sneedle++;
} else if (result > 0) {
- PyObject *r = hashflags(right);
+ PyObject *r = hashflags(self->nodelen, right);
if (!r) {
goto nomem;
}
@@ -902,12 +904,12 @@ static PyObject *lazymanifest_diff(lazym
if (left->len != right->len
|| memcmp(left->start, right->start, left->len)
|| left->hash_suffix != right->hash_suffix) {
- PyObject *l = hashflags(left);
+ PyObject *l = hashflags(self->nodelen, left);
PyObject *r;
if (!l) {
goto nomem;
}
- r = hashflags(right);
+ r = hashflags(self->nodelen, right);
if (!r) {
Py_DECREF(l);
goto nomem;
diff --git a/mercurial/cext/parsers.c b/mercurial/cext/parsers.c
--- a/mercurial/cext/parsers.c
+++ b/mercurial/cext/parsers.c
@@ -29,6 +29,10 @@
static const char *const versionerrortext = "Python minor version mismatch";
+static const int dirstate_v1_from_p2 = -2;
+static const int dirstate_v1_nonnormal = -1;
+static const int ambiguous_time = -1;
+
static PyObject *dict_new_presized(PyObject *self, PyObject *args)
{
Py_ssize_t expected_size;
@@ -40,11 +44,11 @@ static PyObject *dict_new_presized(PyObj
return _dict_new_presized(expected_size);
}
-static inline dirstateTupleObject *make_dirstate_tuple(char state, int mode,
- int size, int mtime)
+static inline dirstateItemObject *make_dirstate_item(char state, int mode,
+ int size, int mtime)
{
- dirstateTupleObject *t =
- PyObject_New(dirstateTupleObject, &dirstateTupleType);
+ dirstateItemObject *t =
+ PyObject_New(dirstateItemObject, &dirstateItemType);
if (!t) {
return NULL;
}
@@ -55,19 +59,19 @@ static inline dirstateTupleObject *make_
return t;
}
-static PyObject *dirstate_tuple_new(PyTypeObject *subtype, PyObject *args,
- PyObject *kwds)
+static PyObject *dirstate_item_new(PyTypeObject *subtype, PyObject *args,
+ PyObject *kwds)
{
/* We do all the initialization here and not a tp_init function because
- * dirstate_tuple is immutable. */
- dirstateTupleObject *t;
+ * dirstate_item is immutable. */
+ dirstateItemObject *t;
char state;
int size, mode, mtime;
if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
return NULL;
}
- t = (dirstateTupleObject *)subtype->tp_alloc(subtype, 1);
+ t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
if (!t) {
return NULL;
}
@@ -79,19 +83,19 @@ static PyObject *dirstate_tuple_new(PyTy
return (PyObject *)t;
}
-static void dirstate_tuple_dealloc(PyObject *o)
+static void dirstate_item_dealloc(PyObject *o)
{
PyObject_Del(o);
}
-static Py_ssize_t dirstate_tuple_length(PyObject *o)
+static Py_ssize_t dirstate_item_length(PyObject *o)
{
return 4;
}
-static PyObject *dirstate_tuple_item(PyObject *o, Py_ssize_t i)
+static PyObject *dirstate_item_item(PyObject *o, Py_ssize_t i)
{
- dirstateTupleObject *t = (dirstateTupleObject *)o;
+ dirstateItemObject *t = (dirstateItemObject *)o;
switch (i) {
case 0:
return PyBytes_FromStringAndSize(&t->state, 1);
@@ -107,56 +111,279 @@ static PyObject *dirstate_tuple_item(PyO
}
}
-static PySequenceMethods dirstate_tuple_sq = {
- dirstate_tuple_length, /* sq_length */
- 0, /* sq_concat */
- 0, /* sq_repeat */
- dirstate_tuple_item, /* sq_item */
- 0, /* sq_ass_item */
- 0, /* sq_contains */
- 0, /* sq_inplace_concat */
- 0 /* sq_inplace_repeat */
+static PySequenceMethods dirstate_item_sq = {
+ dirstate_item_length, /* sq_length */
+ 0, /* sq_concat */
+ 0, /* sq_repeat */
+ dirstate_item_item, /* sq_item */
+ 0, /* sq_ass_item */
+ 0, /* sq_contains */
+ 0, /* sq_inplace_concat */
+ 0 /* sq_inplace_repeat */
+};
+
+static PyObject *dirstate_item_v1_state(dirstateItemObject *self)
+{
+ return PyBytes_FromStringAndSize(&self->state, 1);
+};
+
+static PyObject *dirstate_item_v1_mode(dirstateItemObject *self)
+{
+ return PyInt_FromLong(self->mode);
+};
+
+static PyObject *dirstate_item_v1_size(dirstateItemObject *self)
+{
+ return PyInt_FromLong(self->size);
+};
+
+static PyObject *dirstate_item_v1_mtime(dirstateItemObject *self)
+{
+ return PyInt_FromLong(self->mtime);
+};
+
+static PyObject *dm_nonnormal(dirstateItemObject *self)
+{
+ if (self->state != 'n' || self->mtime == ambiguous_time) {
+ Py_RETURN_TRUE;
+ } else {
+ Py_RETURN_FALSE;
+ }
+};
+static PyObject *dm_otherparent(dirstateItemObject *self)
+{
+ if (self->size == dirstate_v1_from_p2) {
+ Py_RETURN_TRUE;
+ } else {
+ Py_RETURN_FALSE;
+ }
+};
+
+static PyObject *dirstate_item_need_delay(dirstateItemObject *self,
+ PyObject *value)
+{
+ long now;
+ if (!pylong_to_long(value, &now)) {
+ return NULL;
+ }
+ if (self->state == 'n' && self->mtime == now) {
+ Py_RETURN_TRUE;
+ } else {
+ Py_RETURN_FALSE;
+ }
+};
+
+/* This will never change since it's bound to V1, unlike `make_dirstate_item`
+ */
+static inline dirstateItemObject *
+dirstate_item_from_v1_data(char state, int mode, int size, int mtime)
+{
+ dirstateItemObject *t =
+ PyObject_New(dirstateItemObject, &dirstateItemType);
+ if (!t) {
+ return NULL;
+ }
+ t->state = state;
+ t->mode = mode;
+ t->size = size;
+ t->mtime = mtime;
+ return t;
+}
+
+/* This will never change since it's bound to V1, unlike `dirstate_item_new` */
+static PyObject *dirstate_item_from_v1_meth(PyTypeObject *subtype,
+ PyObject *args)
+{
+ /* We do all the initialization here and not a tp_init function because
+ * dirstate_item is immutable. */
+ dirstateItemObject *t;
+ char state;
+ int size, mode, mtime;
+ if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
+ return NULL;
+ }
+
+ t = (dirstateItemObject *)subtype->tp_alloc(subtype, 1);
+ if (!t) {
+ return NULL;
+ }
+ t->state = state;
+ t->mode = mode;
+ t->size = size;
+ t->mtime = mtime;
+
+ return (PyObject *)t;
+};
+
+/* This means the next status call will have to actually check its content
+ to make sure it is correct. */
+static PyObject *dirstate_item_set_possibly_dirty(dirstateItemObject *self)
+{
+ self->mtime = ambiguous_time;
+ Py_RETURN_NONE;
+}
+
+static PyMethodDef dirstate_item_methods[] = {
+ {"v1_state", (PyCFunction)dirstate_item_v1_state, METH_NOARGS,
+ "return a \"state\" suitable for v1 serialization"},
+ {"v1_mode", (PyCFunction)dirstate_item_v1_mode, METH_NOARGS,
+ "return a \"mode\" suitable for v1 serialization"},
+ {"v1_size", (PyCFunction)dirstate_item_v1_size, METH_NOARGS,
+ "return a \"size\" suitable for v1 serialization"},
+ {"v1_mtime", (PyCFunction)dirstate_item_v1_mtime, METH_NOARGS,
+ "return a \"mtime\" suitable for v1 serialization"},
+ {"need_delay", (PyCFunction)dirstate_item_need_delay, METH_O,
+ "True if the stored mtime would be ambiguous with the current time"},
+ {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth, METH_O,
+ "build a new DirstateItem object from V1 data"},
+ {"set_possibly_dirty", (PyCFunction)dirstate_item_set_possibly_dirty,
+ METH_NOARGS, "mark a file as \"possibly dirty\""},
+ {"dm_nonnormal", (PyCFunction)dm_nonnormal, METH_NOARGS,
+ "True is the entry is non-normal in the dirstatemap sense"},
+ {"dm_otherparent", (PyCFunction)dm_otherparent, METH_NOARGS,
+ "True is the entry is `otherparent` in the dirstatemap sense"},
+ {NULL} /* Sentinel */
};
-PyTypeObject dirstateTupleType = {
- PyVarObject_HEAD_INIT(NULL, 0) /* header */
- "dirstate_tuple", /* tp_name */
- sizeof(dirstateTupleObject), /* tp_basicsize */
- 0, /* tp_itemsize */
- (destructor)dirstate_tuple_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_compare */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- &dirstate_tuple_sq, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT, /* tp_flags */
- "dirstate tuple", /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- 0, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- dirstate_tuple_new, /* tp_new */
+static PyObject *dirstate_item_get_mode(dirstateItemObject *self)
+{
+ return PyInt_FromLong(self->mode);
+};
+
+static PyObject *dirstate_item_get_size(dirstateItemObject *self)
+{
+ return PyInt_FromLong(self->size);
+};
+
+static PyObject *dirstate_item_get_mtime(dirstateItemObject *self)
+{
+ return PyInt_FromLong(self->mtime);
+};
+
+static PyObject *dirstate_item_get_state(dirstateItemObject *self)
+{
+ return PyBytes_FromStringAndSize(&self->state, 1);
+};
+
+static PyObject *dirstate_item_get_tracked(dirstateItemObject *self)
+{
+ if (self->state == 'a' || self->state == 'm' || self->state == 'n') {
+ Py_RETURN_TRUE;
+ } else {
+ Py_RETURN_FALSE;
+ }
+};
+
+static PyObject *dirstate_item_get_added(dirstateItemObject *self)
+{
+ if (self->state == 'a') {
+ Py_RETURN_TRUE;
+ } else {
+ Py_RETURN_FALSE;
+ }
+};
+
+static PyObject *dirstate_item_get_merged(dirstateItemObject *self)
+{
+ if (self->state == 'm') {
+ Py_RETURN_TRUE;
+ } else {
+ Py_RETURN_FALSE;
+ }
+};
+
+static PyObject *dirstate_item_get_merged_removed(dirstateItemObject *self)
+{
+ if (self->state == 'r' && self->size == dirstate_v1_nonnormal) {
+ Py_RETURN_TRUE;
+ } else {
+ Py_RETURN_FALSE;
+ }
+};
+
+static PyObject *dirstate_item_get_from_p2(dirstateItemObject *self)
+{
+ if (self->state == 'n' && self->size == dirstate_v1_from_p2) {
+ Py_RETURN_TRUE;
+ } else {
+ Py_RETURN_FALSE;
+ }
+};
+
+static PyObject *dirstate_item_get_from_p2_removed(dirstateItemObject *self)
+{
+ if (self->state == 'r' && self->size == dirstate_v1_from_p2) {
+ Py_RETURN_TRUE;
+ } else {
+ Py_RETURN_FALSE;
+ }
+};
+
+static PyObject *dirstate_item_get_removed(dirstateItemObject *self)
+{
+ if (self->state == 'r') {
+ Py_RETURN_TRUE;
+ } else {
+ Py_RETURN_FALSE;
+ }
+};
+
+static PyGetSetDef dirstate_item_getset[] = {
+ {"mode", (getter)dirstate_item_get_mode, NULL, "mode", NULL},
+ {"size", (getter)dirstate_item_get_size, NULL, "size", NULL},
+ {"mtime", (getter)dirstate_item_get_mtime, NULL, "mtime", NULL},
+ {"state", (getter)dirstate_item_get_state, NULL, "state", NULL},
+ {"tracked", (getter)dirstate_item_get_tracked, NULL, "tracked", NULL},
+ {"added", (getter)dirstate_item_get_added, NULL, "added", NULL},
+ {"merged_removed", (getter)dirstate_item_get_merged_removed, NULL,
+ "merged_removed", NULL},
+ {"merged", (getter)dirstate_item_get_merged, NULL, "merged", NULL},
+ {"from_p2_removed", (getter)dirstate_item_get_from_p2_removed, NULL,
+ "from_p2_removed", NULL},
+ {"from_p2", (getter)dirstate_item_get_from_p2, NULL, "from_p2", NULL},
+ {"removed", (getter)dirstate_item_get_removed, NULL, "removed", NULL},
+ {NULL} /* Sentinel */
+};
+
+PyTypeObject dirstateItemType = {
+ PyVarObject_HEAD_INIT(NULL, 0) /* header */
+ "dirstate_tuple", /* tp_name */
+ sizeof(dirstateItemObject), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ (destructor)dirstate_item_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_compare */
+ 0, /* tp_repr */
+ 0, /* tp_as_number */
+ &dirstate_item_sq, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT, /* tp_flags */
+ "dirstate tuple", /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ dirstate_item_methods, /* tp_methods */
+ 0, /* tp_members */
+ dirstate_item_getset, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ 0, /* tp_init */
+ 0, /* tp_alloc */
+ dirstate_item_new, /* tp_new */
};
static PyObject *parse_dirstate(PyObject *self, PyObject *args)
@@ -212,8 +439,8 @@ static PyObject *parse_dirstate(PyObject
goto quit;
}
- entry =
- (PyObject *)make_dirstate_tuple(state, mode, size, mtime);
+ entry = (PyObject *)dirstate_item_from_v1_data(state, mode,
+ size, mtime);
cpos = memchr(cur, 0, flen);
if (cpos) {
fname = PyBytes_FromStringAndSize(cur, cpos - cur);
@@ -274,13 +501,13 @@ static PyObject *nonnormalotherparentent
pos = 0;
while (PyDict_Next(dmap, &pos, &fname, &v)) {
- dirstateTupleObject *t;
+ dirstateItemObject *t;
if (!dirstate_tuple_check(v)) {
PyErr_SetString(PyExc_TypeError,
"expected a dirstate tuple");
goto bail;
}
- t = (dirstateTupleObject *)v;
+ t = (dirstateItemObject *)v;
if (t->state == 'n' && t->size == -2) {
if (PySet_Add(otherpset, fname) == -1) {
@@ -375,7 +602,7 @@ static PyObject *pack_dirstate(PyObject
p += 20;
for (pos = 0; PyDict_Next(map, &pos, &k, &v);) {
- dirstateTupleObject *tuple;
+ dirstateItemObject *tuple;
char state;
int mode, size, mtime;
Py_ssize_t len, l;
@@ -387,7 +614,7 @@ static PyObject *pack_dirstate(PyObject
"expected a dirstate tuple");
goto bail;
}
- tuple = (dirstateTupleObject *)v;
+ tuple = (dirstateItemObject *)v;
state = tuple->state;
mode = tuple->mode;
@@ -397,7 +624,7 @@ static PyObject *pack_dirstate(PyObject
/* See pure/parsers.py:pack_dirstate for why we do
* this. */
mtime = -1;
- mtime_unset = (PyObject *)make_dirstate_tuple(
+ mtime_unset = (PyObject *)make_dirstate_item(
state, mode, size, mtime);
if (!mtime_unset) {
goto bail;
@@ -668,7 +895,7 @@ void dirs_module_init(PyObject *mod);
void manifest_module_init(PyObject *mod);
void revlog_module_init(PyObject *mod);
-static const int version = 17;
+static const int version = 20;
static void module_init(PyObject *mod)
{
@@ -690,17 +917,16 @@ static void module_init(PyObject *mod)
revlog_module_init(mod);
capsule = PyCapsule_New(
- make_dirstate_tuple,
- "mercurial.cext.parsers.make_dirstate_tuple_CAPI", NULL);
+ make_dirstate_item,
+ "mercurial.cext.parsers.make_dirstate_item_CAPI", NULL);
if (capsule != NULL)
- PyModule_AddObject(mod, "make_dirstate_tuple_CAPI", capsule);
+ PyModule_AddObject(mod, "make_dirstate_item_CAPI", capsule);
- if (PyType_Ready(&dirstateTupleType) < 0) {
+ if (PyType_Ready(&dirstateItemType) < 0) {
return;
}
- Py_INCREF(&dirstateTupleType);
- PyModule_AddObject(mod, "dirstatetuple",
- (PyObject *)&dirstateTupleType);
+ Py_INCREF(&dirstateItemType);
+ PyModule_AddObject(mod, "DirstateItem", (PyObject *)&dirstateItemType);
}
static int check_python_version(void)
diff --git a/mercurial/cext/parsers.pyi b/mercurial/cext/parsers.pyi
--- a/mercurial/cext/parsers.pyi
+++ b/mercurial/cext/parsers.pyi
@@ -12,7 +12,7 @@ from typing import (
version: int
versionerrortext: str
-class dirstatetuple:
+class DirstateItem:
__doc__: str
def __len__(self) -> int: ...
@@ -29,7 +29,7 @@ class dirs:
# From manifest.c
class lazymanifest:
- def __init__(self, data: bytes): ...
+ def __init__(self, nodelen: int, data: bytes): ...
def __iter__(self) -> Iterator[bytes]: ...
def __len__(self) -> int: ...
diff --git a/mercurial/cext/revlog.c b/mercurial/cext/revlog.c
--- a/mercurial/cext/revlog.c
+++ b/mercurial/cext/revlog.c
@@ -99,7 +99,12 @@ struct indexObjectStruct {
int ntlookups; /* # lookups */
int ntmisses; /* # lookups that miss the cache */
int inlined;
- long hdrsize; /* size of index headers. Differs in v1 v.s. v2 format */
+ long entry_size; /* size of index headers. Differs in v1 v.s. v2 format
+ */
+ long rust_ext_compat; /* compatibility with being used in rust
+ extensions */
+ char format_version; /* size of index headers. Differs in v1 v.s. v2
+ format */
};
static Py_ssize_t index_length(const indexObject *self)
@@ -115,18 +120,21 @@ static Py_ssize_t inline_scan(indexObjec
static int index_find_node(indexObject *self, const char *node);
#if LONG_MAX == 0x7fffffffL
-static const char *const v1_tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
-static const char *const v2_tuple_format = PY23("Kiiiiiis#Ki", "Kiiiiiiy#Ki");
+static const char *const tuple_format = PY23("Kiiiiiis#KiBB", "Kiiiiiiy#KiBB");
#else
-static const char *const v1_tuple_format = PY23("kiiiiiis#", "kiiiiiiy#");
-static const char *const v2_tuple_format = PY23("kiiiiiis#ki", "kiiiiiiy#ki");
+static const char *const tuple_format = PY23("kiiiiiis#kiBB", "kiiiiiiy#kiBB");
#endif
/* A RevlogNG v1 index entry is 64 bytes long. */
-static const long v1_hdrsize = 64;
+static const long v1_entry_size = 64;
/* A Revlogv2 index entry is 96 bytes long. */
-static const long v2_hdrsize = 96;
+static const long v2_entry_size = 96;
+
+static const long format_v1 = 1; /* Internal only, could be any number */
+static const long format_v2 = 2; /* Internal only, could be any number */
+
+static const char comp_mode_inline = 2;
static void raise_revlog_error(void)
{
@@ -164,7 +172,7 @@ cleanup:
static const char *index_deref(indexObject *self, Py_ssize_t pos)
{
if (pos >= self->length)
- return self->added + (pos - self->length) * self->hdrsize;
+ return self->added + (pos - self->length) * self->entry_size;
if (self->inlined && pos > 0) {
if (self->offsets == NULL) {
@@ -181,7 +189,7 @@ static const char *index_deref(indexObje
return self->offsets[pos];
}
- return (const char *)(self->buf.buf) + pos * self->hdrsize;
+ return (const char *)(self->buf.buf) + pos * self->entry_size;
}
/*
@@ -290,6 +298,7 @@ static PyObject *index_get(indexObject *
uint64_t offset_flags, sidedata_offset;
int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2,
sidedata_comp_len;
+ char data_comp_mode, sidedata_comp_mode;
const char *c_node_id;
const char *data;
Py_ssize_t length = index_length(self);
@@ -328,19 +337,70 @@ static PyObject *index_get(indexObject *
parent_2 = getbe32(data + 28);
c_node_id = data + 32;
- if (self->hdrsize == v1_hdrsize) {
- return Py_BuildValue(v1_tuple_format, offset_flags, comp_len,
- uncomp_len, base_rev, link_rev, parent_1,
- parent_2, c_node_id, self->nodelen);
+ if (self->format_version == format_v1) {
+ sidedata_offset = 0;
+ sidedata_comp_len = 0;
+ data_comp_mode = comp_mode_inline;
+ sidedata_comp_mode = comp_mode_inline;
} else {
sidedata_offset = getbe64(data + 64);
sidedata_comp_len = getbe32(data + 72);
-
- return Py_BuildValue(v2_tuple_format, offset_flags, comp_len,
- uncomp_len, base_rev, link_rev, parent_1,
- parent_2, c_node_id, self->nodelen,
- sidedata_offset, sidedata_comp_len);
+ data_comp_mode = data[76] & 3;
+ sidedata_comp_mode = ((data[76] >> 2) & 3);
+ }
+
+ return Py_BuildValue(tuple_format, offset_flags, comp_len, uncomp_len,
+ base_rev, link_rev, parent_1, parent_2, c_node_id,
+ self->nodelen, sidedata_offset, sidedata_comp_len,
+ data_comp_mode, sidedata_comp_mode);
+}
+/*
+ * Pack header information in binary
+ */
+static PyObject *index_pack_header(indexObject *self, PyObject *args)
+{
+ int header;
+ char out[4];
+ if (!PyArg_ParseTuple(args, "I", &header)) {
+ return NULL;
+ }
+ if (self->format_version != format_v1) {
+ PyErr_Format(PyExc_RuntimeError,
+ "version header should go in the docket, not the "
+ "index: %lu",
+ header);
+ return NULL;
}
+ putbe32(header, out);
+ return PyBytes_FromStringAndSize(out, 4);
+}
+/*
+ * Return the raw binary string representing a revision
+ */
+static PyObject *index_entry_binary(indexObject *self, PyObject *value)
+{
+ long rev;
+ const char *data;
+ Py_ssize_t length = index_length(self);
+
+ if (!pylong_to_long(value, &rev)) {
+ return NULL;
+ }
+ if (rev < 0 || rev >= length) {
+ PyErr_Format(PyExc_ValueError, "revlog index out of range: %ld",
+ rev);
+ return NULL;
+ };
+
+ data = index_deref(self, rev);
+ if (data == NULL)
+ return NULL;
+ if (rev == 0 && self->format_version == format_v1) {
+ /* the header is eating the start of the first entry */
+ return PyBytes_FromStringAndSize(data + 4,
+ self->entry_size - 4);
+ }
+ return PyBytes_FromStringAndSize(data, self->entry_size);
}
/*
@@ -393,46 +453,53 @@ static PyObject *index_append(indexObjec
{
uint64_t offset_flags, sidedata_offset;
int rev, comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
+ char data_comp_mode, sidedata_comp_mode;
Py_ssize_t c_node_id_len, sidedata_comp_len;
const char *c_node_id;
+ char comp_field;
char *data;
- if (self->hdrsize == v1_hdrsize) {
- if (!PyArg_ParseTuple(obj, v1_tuple_format, &offset_flags,
- &comp_len, &uncomp_len, &base_rev,
- &link_rev, &parent_1, &parent_2,
- &c_node_id, &c_node_id_len)) {
- PyErr_SetString(PyExc_TypeError, "8-tuple required");
- return NULL;
- }
- } else {
- if (!PyArg_ParseTuple(obj, v2_tuple_format, &offset_flags,
- &comp_len, &uncomp_len, &base_rev,
- &link_rev, &parent_1, &parent_2,
- &c_node_id, &c_node_id_len,
- &sidedata_offset, &sidedata_comp_len)) {
- PyErr_SetString(PyExc_TypeError, "10-tuple required");
- return NULL;
- }
+ if (!PyArg_ParseTuple(obj, tuple_format, &offset_flags, &comp_len,
+ &uncomp_len, &base_rev, &link_rev, &parent_1,
+ &parent_2, &c_node_id, &c_node_id_len,
+ &sidedata_offset, &sidedata_comp_len,
+ &data_comp_mode, &sidedata_comp_mode)) {
+ PyErr_SetString(PyExc_TypeError, "11-tuple required");
+ return NULL;
}
if (c_node_id_len != self->nodelen) {
PyErr_SetString(PyExc_TypeError, "invalid node");
return NULL;
}
+ if (self->format_version == format_v1) {
+
+ if (data_comp_mode != comp_mode_inline) {
+ PyErr_Format(PyExc_ValueError,
+ "invalid data compression mode: %i",
+ data_comp_mode);
+ return NULL;
+ }
+ if (sidedata_comp_mode != comp_mode_inline) {
+ PyErr_Format(PyExc_ValueError,
+ "invalid sidedata compression mode: %i",
+ sidedata_comp_mode);
+ return NULL;
+ }
+ }
if (self->new_length == self->added_length) {
size_t new_added_length =
self->added_length ? self->added_length * 2 : 4096;
- void *new_added = PyMem_Realloc(self->added, new_added_length *
- self->hdrsize);
+ void *new_added = PyMem_Realloc(
+ self->added, new_added_length * self->entry_size);
if (!new_added)
return PyErr_NoMemory();
self->added = new_added;
self->added_length = new_added_length;
}
rev = self->length + self->new_length;
- data = self->added + self->hdrsize * self->new_length++;
+ data = self->added + self->entry_size * self->new_length++;
putbe32(offset_flags >> 32, data);
putbe32(offset_flags & 0xffffffffU, data + 4);
putbe32(comp_len, data + 8);
@@ -444,11 +511,14 @@ static PyObject *index_append(indexObjec
memcpy(data + 32, c_node_id, c_node_id_len);
/* Padding since SHA-1 is only 20 bytes for now */
memset(data + 32 + c_node_id_len, 0, 32 - c_node_id_len);
- if (self->hdrsize != v1_hdrsize) {
+ if (self->format_version == format_v2) {
putbe64(sidedata_offset, data + 64);
putbe32(sidedata_comp_len, data + 72);
+ comp_field = data_comp_mode & 3;
+ comp_field = comp_field | (sidedata_comp_mode & 3) << 2;
+ data[76] = comp_field;
/* Padding for 96 bytes alignment */
- memset(data + 76, 0, self->hdrsize - 76);
+ memset(data + 77, 0, self->entry_size - 77);
}
if (self->ntinitialized)
@@ -463,17 +533,18 @@ static PyObject *index_append(indexObjec
inside the transaction that creates the given revision. */
static PyObject *index_replace_sidedata_info(indexObject *self, PyObject *args)
{
- uint64_t sidedata_offset;
+ uint64_t offset_flags, sidedata_offset;
int rev;
+ char comp_mode;
Py_ssize_t sidedata_comp_len;
char *data;
#if LONG_MAX == 0x7fffffffL
- const char *const sidedata_format = PY23("nKi", "nKi");
+ const char *const sidedata_format = PY23("nKiKB", "nKiKB");
#else
- const char *const sidedata_format = PY23("nki", "nki");
+ const char *const sidedata_format = PY23("nkikB", "nkikB");
#endif
- if (self->hdrsize == v1_hdrsize || self->inlined) {
+ if (self->entry_size == v1_entry_size || self->inlined) {
/*
There is a bug in the transaction handling when going from an
inline revlog to a separate index and data file. Turn it off until
@@ -485,7 +556,7 @@ static PyObject *index_replace_sidedata_
}
if (!PyArg_ParseTuple(args, sidedata_format, &rev, &sidedata_offset,
- &sidedata_comp_len))
+ &sidedata_comp_len, &offset_flags, &comp_mode))
return NULL;
if (rev < 0 || rev >= index_length(self)) {
@@ -501,9 +572,11 @@ static PyObject *index_replace_sidedata_
/* Find the newly added node, offset from the "already on-disk" length
*/
- data = self->added + self->hdrsize * (rev - self->length);
+ data = self->added + self->entry_size * (rev - self->length);
+ putbe64(offset_flags, data);
putbe64(sidedata_offset, data + 64);
putbe32(sidedata_comp_len, data + 72);
+ data[76] = (data[76] & ~(3 << 2)) | ((comp_mode & 3) << 2);
Py_RETURN_NONE;
}
@@ -2652,17 +2725,17 @@ static Py_ssize_t inline_scan(indexObjec
const char *data = (const char *)self->buf.buf;
Py_ssize_t pos = 0;
Py_ssize_t end = self->buf.len;
- long incr = self->hdrsize;
+ long incr = self->entry_size;
Py_ssize_t len = 0;
- while (pos + self->hdrsize <= end && pos >= 0) {
+ while (pos + self->entry_size <= end && pos >= 0) {
uint32_t comp_len, sidedata_comp_len = 0;
/* 3rd element of header is length of compressed inline data */
comp_len = getbe32(data + pos + 8);
- if (self->hdrsize == v2_hdrsize) {
+ if (self->entry_size == v2_entry_size) {
sidedata_comp_len = getbe32(data + pos + 72);
}
- incr = self->hdrsize + comp_len + sidedata_comp_len;
+ incr = self->entry_size + comp_len + sidedata_comp_len;
if (offsets)
offsets[len] = data + pos;
len++;
@@ -2699,6 +2772,7 @@ static int index_init(indexObject *self,
self->offsets = NULL;
self->nodelen = 20;
self->nullentry = NULL;
+ self->rust_ext_compat = 1;
revlogv2 = NULL;
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|O", kwlist,
@@ -2715,20 +2789,16 @@ static int index_init(indexObject *self,
}
if (revlogv2 && PyObject_IsTrue(revlogv2)) {
- self->hdrsize = v2_hdrsize;
+ self->format_version = format_v2;
+ self->entry_size = v2_entry_size;
} else {
- self->hdrsize = v1_hdrsize;
+ self->format_version = format_v1;
+ self->entry_size = v1_entry_size;
}
- if (self->hdrsize == v1_hdrsize) {
- self->nullentry =
- Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0, 0, -1,
- -1, -1, -1, nullid, self->nodelen);
- } else {
- self->nullentry =
- Py_BuildValue(PY23("iiiiiiis#ii", "iiiiiiiy#ii"), 0, 0, 0,
- -1, -1, -1, -1, nullid, self->nodelen, 0, 0);
- }
+ self->nullentry = Py_BuildValue(
+ PY23("iiiiiiis#iiBB", "iiiiiiiy#iiBB"), 0, 0, 0, -1, -1, -1, -1,
+ nullid, self->nodelen, 0, 0, comp_mode_inline, comp_mode_inline);
if (!self->nullentry)
return -1;
@@ -2751,11 +2821,11 @@ static int index_init(indexObject *self,
goto bail;
self->length = len;
} else {
- if (size % self->hdrsize) {
+ if (size % self->entry_size) {
PyErr_SetString(PyExc_ValueError, "corrupt index file");
goto bail;
}
- self->length = size / self->hdrsize;
+ self->length = size / self->entry_size;
}
return 0;
@@ -2860,6 +2930,10 @@ static PyMethodDef index_methods[] = {
{"shortest", (PyCFunction)index_shortest, METH_VARARGS,
"find length of shortest hex nodeid of a binary ID"},
{"stats", (PyCFunction)index_stats, METH_NOARGS, "stats for the index"},
+ {"entry_binary", (PyCFunction)index_entry_binary, METH_O,
+ "return an entry in binary form"},
+ {"pack_header", (PyCFunction)index_pack_header, METH_VARARGS,
+ "pack the revlog header information into binary"},
{NULL} /* Sentinel */
};
@@ -2869,7 +2943,9 @@ static PyGetSetDef index_getset[] = {
};
static PyMemberDef index_members[] = {
- {"entry_size", T_LONG, offsetof(indexObject, hdrsize), 0,
+ {"entry_size", T_LONG, offsetof(indexObject, entry_size), 0,
+ "size of an index entry"},
+ {"rust_ext_compat", T_LONG, offsetof(indexObject, rust_ext_compat), 0,
"size of an index entry"},
{NULL} /* Sentinel */
};
diff --git a/mercurial/cext/util.h b/mercurial/cext/util.h
--- a/mercurial/cext/util.h
+++ b/mercurial/cext/util.h
@@ -28,11 +28,11 @@ typedef struct {
int mode;
int size;
int mtime;
-} dirstateTupleObject;
+} dirstateItemObject;
/* clang-format on */
-extern PyTypeObject dirstateTupleType;
-#define dirstate_tuple_check(op) (Py_TYPE(op) == &dirstateTupleType)
+extern PyTypeObject dirstateItemType;
+#define dirstate_tuple_check(op) (Py_TYPE(op) == &dirstateItemType)
#ifndef MIN
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
diff --git a/mercurial/changegroup.py b/mercurial/changegroup.py
--- a/mercurial/changegroup.py
+++ b/mercurial/changegroup.py
@@ -7,7 +7,6 @@
from __future__ import absolute_import
-import collections
import os
import struct
import weakref
@@ -15,7 +14,6 @@ import weakref
from .i18n import _
from .node import (
hex,
- nullid,
nullrev,
short,
)
@@ -34,10 +32,13 @@ from . import (
from .interfaces import repository
from .revlogutils import sidedata as sidedatamod
+from .revlogutils import constants as revlog_constants
+from .utils import storageutil
_CHANGEGROUPV1_DELTA_HEADER = struct.Struct(b"20s20s20s20s")
_CHANGEGROUPV2_DELTA_HEADER = struct.Struct(b"20s20s20s20s20s")
_CHANGEGROUPV3_DELTA_HEADER = struct.Struct(b">20s20s20s20s20sH")
+_CHANGEGROUPV4_DELTA_HEADER = struct.Struct(b">B20s20s20s20s20sH")
LFS_REQUIREMENT = b'lfs'
@@ -194,19 +195,20 @@ class cg1unpacker(object):
else:
deltabase = prevnode
flags = 0
- return node, p1, p2, deltabase, cs, flags
+ protocol_flags = 0
+ return node, p1, p2, deltabase, cs, flags, protocol_flags
def deltachunk(self, prevnode):
+ # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags, sidedata, proto_flags)
l = self._chunklength()
if not l:
return {}
headerdata = readexactly(self._stream, self.deltaheadersize)
header = self.deltaheader.unpack(headerdata)
delta = readexactly(self._stream, l - self.deltaheadersize)
- node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
- # cg4 forward-compat
- sidedata = {}
- return (node, p1, p2, cs, deltabase, delta, flags, sidedata)
+ header = self._deltaheader(header, prevnode)
+ node, p1, p2, deltabase, cs, flags, protocol_flags = header
+ return node, p1, p2, cs, deltabase, delta, flags, {}, protocol_flags
def getchunks(self):
"""returns all the chunks contains in the bundle
@@ -293,8 +295,16 @@ class cg1unpacker(object):
# Only useful if we're adding sidedata categories. If both peers have
# the same categories, then we simply don't do anything.
- if self.version == b'04' and srctype == b'pull':
- sidedata_helpers = get_sidedata_helpers(
+ adding_sidedata = (
+ (
+ requirements.REVLOGV2_REQUIREMENT in repo.requirements
+ or requirements.CHANGELOGV2_REQUIREMENT in repo.requirements
+ )
+ and self.version == b'04'
+ and srctype == b'pull'
+ )
+ if adding_sidedata:
+ sidedata_helpers = sidedatamod.get_sidedata_helpers(
repo,
sidedata_categories or set(),
pull=True,
@@ -386,15 +396,16 @@ class cg1unpacker(object):
_(b'manifests'), unit=_(b'chunks'), total=changesets
)
on_manifest_rev = None
- if sidedata_helpers and b'manifest' in sidedata_helpers[1]:
+ if sidedata_helpers:
+ if revlog_constants.KIND_MANIFESTLOG in sidedata_helpers[1]:
- def on_manifest_rev(manifest, rev):
- range = touched_manifests.get(manifest)
- if not range:
- touched_manifests[manifest] = (rev, rev)
- else:
- assert rev == range[1] + 1
- touched_manifests[manifest] = (range[0], rev)
+ def on_manifest_rev(manifest, rev):
+ range = touched_manifests.get(manifest)
+ if not range:
+ touched_manifests[manifest] = (rev, rev)
+ else:
+ assert rev == range[1] + 1
+ touched_manifests[manifest] = (range[0], rev)
self._unpackmanifests(
repo,
@@ -417,15 +428,16 @@ class cg1unpacker(object):
needfiles.setdefault(f, set()).add(n)
on_filelog_rev = None
- if sidedata_helpers and b'filelog' in sidedata_helpers[1]:
+ if sidedata_helpers:
+ if revlog_constants.KIND_FILELOG in sidedata_helpers[1]:
- def on_filelog_rev(filelog, rev):
- range = touched_filelogs.get(filelog)
- if not range:
- touched_filelogs[filelog] = (rev, rev)
- else:
- assert rev == range[1] + 1
- touched_filelogs[filelog] = (range[0], rev)
+ def on_filelog_rev(filelog, rev):
+ range = touched_filelogs.get(filelog)
+ if not range:
+ touched_filelogs[filelog] = (rev, rev)
+ else:
+ assert rev == range[1] + 1
+ touched_filelogs[filelog] = (range[0], rev)
# process the files
repo.ui.status(_(b"adding file changes\n"))
@@ -440,12 +452,14 @@ class cg1unpacker(object):
)
if sidedata_helpers:
- if b'changelog' in sidedata_helpers[1]:
- cl.rewrite_sidedata(sidedata_helpers, clstart, clend - 1)
+ if revlog_constants.KIND_CHANGELOG in sidedata_helpers[1]:
+ cl.rewrite_sidedata(
+ trp, sidedata_helpers, clstart, clend - 1
+ )
for mf, (startrev, endrev) in touched_manifests.items():
- mf.rewrite_sidedata(sidedata_helpers, startrev, endrev)
+ mf.rewrite_sidedata(trp, sidedata_helpers, startrev, endrev)
for fl, (startrev, endrev) in touched_filelogs.items():
- fl.rewrite_sidedata(sidedata_helpers, startrev, endrev)
+ fl.rewrite_sidedata(trp, sidedata_helpers, startrev, endrev)
# making sure the value exists
tr.changes.setdefault(b'changegroup-count-changesets', 0)
@@ -570,8 +584,8 @@ class cg1unpacker(object):
"""
chain = None
for chunkdata in iter(lambda: self.deltachunk(chain), {}):
- # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags, sidedata)
- yield chunkdata
+ # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags, sidedata, proto_flags)
+ yield chunkdata[:8]
chain = chunkdata[0]
@@ -590,7 +604,8 @@ class cg2unpacker(cg1unpacker):
def _deltaheader(self, headertuple, prevnode):
node, p1, p2, deltabase, cs = headertuple
flags = 0
- return node, p1, p2, deltabase, cs, flags
+ protocol_flags = 0
+ return node, p1, p2, deltabase, cs, flags, protocol_flags
class cg3unpacker(cg2unpacker):
@@ -608,7 +623,8 @@ class cg3unpacker(cg2unpacker):
def _deltaheader(self, headertuple, prevnode):
node, p1, p2, deltabase, cs, flags = headertuple
- return node, p1, p2, deltabase, cs, flags
+ protocol_flags = 0
+ return node, p1, p2, deltabase, cs, flags, protocol_flags
def _unpackmanifests(self, repo, revmap, trp, prog, addrevisioncb=None):
super(cg3unpacker, self)._unpackmanifests(
@@ -631,21 +647,48 @@ class cg4unpacker(cg3unpacker):
cg4 streams add support for exchanging sidedata.
"""
+ deltaheader = _CHANGEGROUPV4_DELTA_HEADER
+ deltaheadersize = deltaheader.size
version = b'04'
+ def _deltaheader(self, headertuple, prevnode):
+ protocol_flags, node, p1, p2, deltabase, cs, flags = headertuple
+ return node, p1, p2, deltabase, cs, flags, protocol_flags
+
def deltachunk(self, prevnode):
res = super(cg4unpacker, self).deltachunk(prevnode)
if not res:
return res
- (node, p1, p2, cs, deltabase, delta, flags, _sidedata) = res
+ (
+ node,
+ p1,
+ p2,
+ cs,
+ deltabase,
+ delta,
+ flags,
+ sidedata,
+ protocol_flags,
+ ) = res
+ assert not sidedata
- sidedata_raw = getchunk(self._stream)
sidedata = {}
- if len(sidedata_raw) > 0:
+ if protocol_flags & storageutil.CG_FLAG_SIDEDATA:
+ sidedata_raw = getchunk(self._stream)
sidedata = sidedatamod.deserialize_sidedata(sidedata_raw)
- return node, p1, p2, cs, deltabase, delta, flags, sidedata
+ return (
+ node,
+ p1,
+ p2,
+ cs,
+ deltabase,
+ delta,
+ flags,
+ sidedata,
+ protocol_flags,
+ )
class headerlessfixup(object):
@@ -673,7 +716,7 @@ def _revisiondeltatochunks(repo, delta,
if delta.delta is not None:
prefix, data = b'', delta.delta
- elif delta.basenode == nullid:
+ elif delta.basenode == repo.nullid:
data = delta.revision
prefix = mdiff.trivialdiffheader(len(data))
else:
@@ -688,10 +731,10 @@ def _revisiondeltatochunks(repo, delta,
yield prefix
yield data
- sidedata = delta.sidedata
- if sidedata is not None:
+ if delta.protocol_flags & storageutil.CG_FLAG_SIDEDATA:
# Need a separate chunk for sidedata to be able to differentiate
# "raw delta" length and sidedata length
+ sidedata = delta.sidedata
yield chunkheader(len(sidedata))
yield sidedata
@@ -787,9 +830,15 @@ def _resolvenarrowrevisioninfo(
return i
# We failed to resolve a parent for this node, so
# we crash the changegroup construction.
+ if util.safehasattr(store, 'target'):
+ target = store.display_id
+ else:
+ # some revlog not actually a revlog
+ target = store._revlog.display_id
+
raise error.Abort(
b"unable to resolve parent while packing '%s' %r"
- b' for changeset %r' % (store.indexfile, rev, clrev)
+ b' for changeset %r' % (target, rev, clrev)
)
return nullrev
@@ -828,7 +877,8 @@ def deltagroup(
If topic is not None, progress detail will be generated using this
topic name (e.g. changesets, manifests, etc).
- See `storageutil.emitrevisions` for the doc on `sidedata_helpers`.
+ See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
+ `sidedata_helpers`.
"""
if not nodes:
return
@@ -1056,7 +1106,9 @@ class cgpacker(object):
# TODO a better approach would be for the strip bundle to
# correctly advertise its sidedata categories directly.
remote_sidedata = repo._wanted_sidedata
- sidedata_helpers = get_sidedata_helpers(repo, remote_sidedata)
+ sidedata_helpers = sidedatamod.get_sidedata_helpers(
+ repo, remote_sidedata
+ )
clstate, deltas = self._generatechangelog(
cl,
@@ -1194,7 +1246,8 @@ class cgpacker(object):
if generate is False, the state will be fully populated and no chunk
stream will be yielded
- See `storageutil.emitrevisions` for the doc on `sidedata_helpers`.
+ See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
+ `sidedata_helpers`.
"""
clrevorder = {}
manifests = {}
@@ -1299,7 +1352,8 @@ class cgpacker(object):
`source` is unused here, but is used by extensions like remotefilelog to
change what is sent based in pulls vs pushes, etc.
- See `storageutil.emitrevisions` for the doc on `sidedata_helpers`.
+ See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
+ `sidedata_helpers`.
"""
repo = self._repo
mfl = repo.manifestlog
@@ -1633,11 +1687,18 @@ def _makecg4packer(
fullnodes=None,
remote_sidedata=None,
):
- # Same header func as cg3. Sidedata is in a separate chunk from the delta to
- # differenciate "raw delta" and sidedata.
- builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
- d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags
- )
+ # Sidedata is in a separate chunk from the delta to differentiate
+ # "raw delta" and sidedata.
+ def builddeltaheader(d):
+ return _CHANGEGROUPV4_DELTA_HEADER.pack(
+ d.protocol_flags,
+ d.node,
+ d.p1node,
+ d.p2node,
+ d.basenode,
+ d.linknode,
+ d.flags,
+ )
return cgpacker(
repo,
@@ -1682,11 +1743,15 @@ def allsupportedversions(repo):
#
# (or even to push subset of history)
needv03 = True
- has_revlogv2 = requirements.REVLOGV2_REQUIREMENT in repo.requirements
- if not has_revlogv2:
- versions.discard(b'04')
if not needv03:
versions.discard(b'03')
+ want_v4 = (
+ repo.ui.configbool(b'experimental', b'changegroup4')
+ or requirements.REVLOGV2_REQUIREMENT in repo.requirements
+ or requirements.CHANGELOGV2_REQUIREMENT in repo.requirements
+ )
+ if not want_v4:
+ versions.discard(b'04')
return versions
@@ -1913,25 +1978,3 @@ def _addchangegroupfiles(
)
return revisions, files
-
-
-def get_sidedata_helpers(repo, remote_sd_categories, pull=False):
- # Computers for computing sidedata on-the-fly
- sd_computers = collections.defaultdict(list)
- # Computers for categories to remove from sidedata
- sd_removers = collections.defaultdict(list)
-
- to_generate = remote_sd_categories - repo._wanted_sidedata
- to_remove = repo._wanted_sidedata - remote_sd_categories
- if pull:
- to_generate, to_remove = to_remove, to_generate
-
- for revlog_kind, computers in repo._sidedata_computers.items():
- for category, computer in computers.items():
- if category in to_generate:
- sd_computers[revlog_kind].append(computer)
- if category in to_remove:
- sd_removers[revlog_kind].append(computer)
-
- sidedata_helpers = (repo, sd_computers, sd_removers)
- return sidedata_helpers
diff --git a/mercurial/changelog.py b/mercurial/changelog.py
--- a/mercurial/changelog.py
+++ b/mercurial/changelog.py
@@ -11,7 +11,6 @@ from .i18n import _
from .node import (
bin,
hex,
- nullid,
)
from .thirdparty import attr
@@ -26,7 +25,10 @@ from .utils import (
dateutil,
stringutil,
)
-from .revlogutils import flagutil
+from .revlogutils import (
+ constants as revlog_constants,
+ flagutil,
+)
_defaultextra = {b'branch': b'default'}
@@ -221,7 +223,7 @@ class changelogrevision(object):
def __new__(cls, cl, text, sidedata, cpsd):
if not text:
- return _changelogrevision(extra=_defaultextra, manifest=nullid)
+ return _changelogrevision(extra=_defaultextra, manifest=cl.nullid)
self = super(changelogrevision, cls).__new__(cls)
# We could return here and implement the following as an __init__.
@@ -393,27 +395,22 @@ class changelog(revlog.revlog):
``concurrencychecker`` will be passed to the revlog init function, see
the documentation there.
"""
- if trypending and opener.exists(b'00changelog.i.a'):
- indexfile = b'00changelog.i.a'
- else:
- indexfile = b'00changelog.i'
-
- datafile = b'00changelog.d'
revlog.revlog.__init__(
self,
opener,
- indexfile,
- datafile=datafile,
+ target=(revlog_constants.KIND_CHANGELOG, None),
+ radix=b'00changelog',
checkambig=True,
mmaplargeindex=True,
persistentnodemap=opener.options.get(b'persistent-nodemap', False),
concurrencychecker=concurrencychecker,
+ trypending=trypending,
)
- if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
+ if self._initempty and (self._format_version == revlog.REVLOGV1):
# changelogs don't benefit from generaldelta.
- self.version &= ~revlog.FLAG_GENERALDELTA
+ self._format_flags &= ~revlog.FLAG_GENERALDELTA
self._generaldelta = False
# Delta chains for changelogs tend to be very small because entries
@@ -428,7 +425,6 @@ class changelog(revlog.revlog):
self._filteredrevs = frozenset()
self._filteredrevs_hashcache = {}
self._copiesstorage = opener.options.get(b'copies-storage')
- self.revlog_kind = b'changelog'
@property
def filteredrevs(self):
@@ -441,20 +437,25 @@ class changelog(revlog.revlog):
self._filteredrevs = val
self._filteredrevs_hashcache = {}
+ def _write_docket(self, tr):
+ if not self._delayed:
+ super(changelog, self)._write_docket(tr)
+
def delayupdate(self, tr):
"""delay visibility of index updates to other readers"""
-
- if not self._delayed:
+ if self._docket is None and not self._delayed:
if len(self) == 0:
self._divert = True
- if self._realopener.exists(self.indexfile + b'.a'):
- self._realopener.unlink(self.indexfile + b'.a')
- self.opener = _divertopener(self._realopener, self.indexfile)
+ if self._realopener.exists(self._indexfile + b'.a'):
+ self._realopener.unlink(self._indexfile + b'.a')
+ self.opener = _divertopener(self._realopener, self._indexfile)
else:
self._delaybuf = []
self.opener = _delayopener(
- self._realopener, self.indexfile, self._delaybuf
+ self._realopener, self._indexfile, self._delaybuf
)
+ self._segmentfile.opener = self.opener
+ self._segmentfile_sidedata.opener = self.opener
self._delayed = True
tr.addpending(b'cl-%i' % id(self), self._writepending)
tr.addfinalize(b'cl-%i' % id(self), self._finalize)
@@ -463,15 +464,19 @@ class changelog(revlog.revlog):
"""finalize index updates"""
self._delayed = False
self.opener = self._realopener
+ self._segmentfile.opener = self.opener
+ self._segmentfile_sidedata.opener = self.opener
# move redirected index data back into place
- if self._divert:
+ if self._docket is not None:
+ self._write_docket(tr)
+ elif self._divert:
assert not self._delaybuf
- tmpname = self.indexfile + b".a"
+ tmpname = self._indexfile + b".a"
nfile = self.opener.open(tmpname)
nfile.close()
- self.opener.rename(tmpname, self.indexfile, checkambig=True)
+ self.opener.rename(tmpname, self._indexfile, checkambig=True)
elif self._delaybuf:
- fp = self.opener(self.indexfile, b'a', checkambig=True)
+ fp = self.opener(self._indexfile, b'a', checkambig=True)
fp.write(b"".join(self._delaybuf))
fp.close()
self._delaybuf = None
@@ -482,10 +487,12 @@ class changelog(revlog.revlog):
def _writepending(self, tr):
"""create a file containing the unfinalized state for
pretxnchangegroup"""
+ if self._docket:
+ return self._docket.write(tr, pending=True)
if self._delaybuf:
# make a temporary copy of the index
- fp1 = self._realopener(self.indexfile)
- pendingfilename = self.indexfile + b".a"
+ fp1 = self._realopener(self._indexfile)
+ pendingfilename = self._indexfile + b".a"
# register as a temp file to ensure cleanup on failure
tr.registertmp(pendingfilename)
# write existing data
@@ -497,16 +504,18 @@ class changelog(revlog.revlog):
# switch modes so finalize can simply rename
self._delaybuf = None
self._divert = True
- self.opener = _divertopener(self._realopener, self.indexfile)
+ self.opener = _divertopener(self._realopener, self._indexfile)
+ self._segmentfile.opener = self.opener
+ self._segmentfile_sidedata.opener = self.opener
if self._divert:
return True
return False
- def _enforceinlinesize(self, tr, fp=None):
+ def _enforceinlinesize(self, tr):
if not self._delayed:
- revlog.revlog._enforceinlinesize(self, tr, fp)
+ revlog.revlog._enforceinlinesize(self, tr)
def read(self, nodeorrev):
"""Obtain data from a parsed changelog revision.
@@ -524,15 +533,16 @@ class changelog(revlog.revlog):
``changelogrevision`` instead, as it is faster for partial object
access.
"""
- d, s = self._revisiondata(nodeorrev)
- c = changelogrevision(
- self, d, s, self._copiesstorage == b'changeset-sidedata'
- )
+ d = self._revisiondata(nodeorrev)
+ sidedata = self.sidedata(nodeorrev)
+ copy_sd = self._copiesstorage == b'changeset-sidedata'
+ c = changelogrevision(self, d, sidedata, copy_sd)
return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
def changelogrevision(self, nodeorrev):
"""Obtain a ``changelogrevision`` for a node or revision."""
- text, sidedata = self._revisiondata(nodeorrev)
+ text = self._revisiondata(nodeorrev)
+ sidedata = self.sidedata(nodeorrev)
return changelogrevision(
self, text, sidedata, self._copiesstorage == b'changeset-sidedata'
)
diff --git a/mercurial/chgserver.py b/mercurial/chgserver.py
--- a/mercurial/chgserver.py
+++ b/mercurial/chgserver.py
@@ -320,7 +320,7 @@ class channeledsystem(object):
self.channel = channel
def __call__(self, cmd, environ, cwd=None, type=b'system', cmdtable=None):
- args = [type, cmd, os.path.abspath(cwd or b'.')]
+ args = [type, cmd, util.abspath(cwd or b'.')]
args.extend(b'%s=%s' % (k, v) for k, v in pycompat.iteritems(environ))
data = b'\0'.join(args)
self.out.write(struct.pack(b'>cI', self.channel, len(data)))
@@ -515,11 +515,9 @@ class chgcmdserver(commandserver.server)
if inst.hint:
self.ui.error(_(b"(%s)\n") % inst.hint)
errorraised = True
- except error.Abort as inst:
- if isinstance(inst, error.InputError):
- detailed_exit_code = 10
- elif isinstance(inst, error.ConfigError):
- detailed_exit_code = 30
+ except error.Error as inst:
+ if inst.detailed_exit_code is not None:
+ detailed_exit_code = inst.detailed_exit_code
self.ui.error(inst.format())
errorraised = True
diff --git a/mercurial/cmdutil.py b/mercurial/cmdutil.py
--- a/mercurial/cmdutil.py
+++ b/mercurial/cmdutil.py
@@ -15,7 +15,6 @@ import re
from .i18n import _
from .node import (
hex,
- nullid,
nullrev,
short,
)
@@ -62,6 +61,10 @@ from .utils import (
stringutil,
)
+from .revlogutils import (
+ constants as revlog_constants,
+)
+
if pycompat.TYPE_CHECKING:
from typing import (
Any,
@@ -298,37 +301,37 @@ def check_incompatible_arguments(opts, f
check_at_most_one_arg(opts, first, other)
-def resolvecommitoptions(ui, opts):
+def resolve_commit_options(ui, opts):
"""modify commit options dict to handle related options
The return value indicates that ``rewrite.update-timestamp`` is the reason
the ``date`` option is set.
"""
- check_at_most_one_arg(opts, b'date', b'currentdate')
- check_at_most_one_arg(opts, b'user', b'currentuser')
+ check_at_most_one_arg(opts, 'date', 'currentdate')
+ check_at_most_one_arg(opts, 'user', 'currentuser')
datemaydiffer = False # date-only change should be ignored?
- if opts.get(b'currentdate'):
- opts[b'date'] = b'%d %d' % dateutil.makedate()
+ if opts.get('currentdate'):
+ opts['date'] = b'%d %d' % dateutil.makedate()
elif (
- not opts.get(b'date')
+ not opts.get('date')
and ui.configbool(b'rewrite', b'update-timestamp')
- and opts.get(b'currentdate') is None
+ and opts.get('currentdate') is None
):
- opts[b'date'] = b'%d %d' % dateutil.makedate()
+ opts['date'] = b'%d %d' % dateutil.makedate()
datemaydiffer = True
- if opts.get(b'currentuser'):
- opts[b'user'] = ui.username()
+ if opts.get('currentuser'):
+ opts['user'] = ui.username()
return datemaydiffer
-def checknotesize(ui, opts):
+def check_note_size(opts):
"""make sure note is of valid format"""
- note = opts.get(b'note')
+ note = opts.get('note')
if not note:
return
@@ -343,19 +346,18 @@ def ishunk(x):
return isinstance(x, hunkclasses)
-def newandmodified(chunks, originalchunks):
+def isheader(x):
+ headerclasses = (crecordmod.uiheader, patch.header)
+ return isinstance(x, headerclasses)
+
+
+def newandmodified(chunks):
newlyaddedandmodifiedfiles = set()
alsorestore = set()
for chunk in chunks:
- if (
- ishunk(chunk)
- and chunk.header.isnewfile()
- and chunk not in originalchunks
- ):
- newlyaddedandmodifiedfiles.add(chunk.header.filename())
- alsorestore.update(
- set(chunk.header.files()) - {chunk.header.filename()}
- )
+ if isheader(chunk) and chunk.isnewfile():
+ newlyaddedandmodifiedfiles.add(chunk.filename())
+ alsorestore.update(set(chunk.files()) - {chunk.filename()})
return newlyaddedandmodifiedfiles, alsorestore
@@ -514,12 +516,12 @@ def dorecord(
diffopts.git = True
diffopts.showfunc = True
originaldiff = patch.diff(repo, changes=status, opts=diffopts)
- originalchunks = patch.parsepatch(originaldiff)
+ original_headers = patch.parsepatch(originaldiff)
match = scmutil.match(repo[None], pats)
# 1. filter patch, since we are intending to apply subset of it
try:
- chunks, newopts = filterfn(ui, originalchunks, match)
+ chunks, newopts = filterfn(ui, original_headers, match)
except error.PatchError as err:
raise error.InputError(_(b'error parsing patch: %s') % err)
opts.update(newopts)
@@ -529,15 +531,11 @@ def dorecord(
# version without the edit in the workdir. We also will need to restore
# files that were the sources of renames so that the patch application
# works.
- newlyaddedandmodifiedfiles, alsorestore = newandmodified(
- chunks, originalchunks
- )
+ newlyaddedandmodifiedfiles, alsorestore = newandmodified(chunks)
contenders = set()
for h in chunks:
- try:
+ if isheader(h):
contenders.update(set(h.files()))
- except AttributeError:
- pass
changed = status.modified + status.added + status.removed
newfiles = [f for f in changed if f in contenders]
@@ -632,7 +630,19 @@ def dorecord(
# without normallookup, restoring timestamp
# may cause partially committed files
# to be treated as unmodified
- dirstate.normallookup(realname)
+
+ # XXX-PENDINGCHANGE: We should clarify the context in
+ # which this function is called to make sure it
+ # already called within a `pendingchange`, However we
+ # are taking a shortcut here in order to be able to
+ # quickly deprecated the older API.
+ with dirstate.parentchange():
+ dirstate.update_file(
+ realname,
+ p1_tracked=True,
+ wc_tracked=True,
+ possibly_dirty=True,
+ )
# copystat=True here and above are a hack to trick any
# editors that have f open that we haven't modified them.
@@ -998,11 +1008,6 @@ def changebranch(ui, repo, revs, label,
_(b"a branch of the same name already exists")
)
- if repo.revs(b'obsolete() and %ld', revs):
- raise error.InputError(
- _(b"cannot change branch of a obsolete changeset")
- )
-
# make sure only topological heads
if repo.revs(b'heads(%ld) - head()', revs):
raise error.InputError(
@@ -1097,7 +1102,7 @@ def bailifchanged(repo, merge=True, hint
'hint' is the usual hint given to Abort exception.
"""
- if merge and repo.dirstate.p2() != nullid:
+ if merge and repo.dirstate.p2() != repo.nullid:
raise error.StateError(_(b'outstanding uncommitted merge'), hint=hint)
st = repo.status()
if st.modified or st.added or st.removed or st.deleted:
@@ -1434,8 +1439,12 @@ def openstorage(repo, cmd, file_, opts,
raise error.CommandError(cmd, _(b'invalid arguments'))
if not os.path.isfile(file_):
raise error.InputError(_(b"revlog '%s' not found") % file_)
+
+ target = (revlog_constants.KIND_OTHER, b'free-form:%s' % file_)
r = revlog.revlog(
- vfsmod.vfs(encoding.getcwd(), audit=False), file_[:-2] + b".i"
+ vfsmod.vfs(encoding.getcwd(), audit=False),
+ target=target,
+ radix=file_[:-2],
)
return r
@@ -1849,7 +1858,10 @@ def copy(ui, repo, pats, opts, rename=Fa
continue
copylist.append((tfn(pat, dest, srcs), srcs))
if not copylist:
- raise error.InputError(_(b'no files to copy'))
+ hint = None
+ if rename:
+ hint = _(b'maybe you meant to use --after --at-rev=.')
+ raise error.InputError(_(b'no files to copy'), hint=hint)
errors = 0
for targetpath, srcs in copylist:
@@ -2104,7 +2116,7 @@ def _exportsingle(repo, ctx, fm, match,
if parents:
prev = parents[0]
else:
- prev = nullid
+ prev = repo.nullid
fm.context(ctx=ctx)
fm.plain(b'# HG changeset patch\n')
@@ -2810,7 +2822,8 @@ def amend(ui, repo, old, extra, pats, op
extra.update(wctx.extra())
# date-only change should be ignored?
- datemaydiffer = resolvecommitoptions(ui, opts)
+ datemaydiffer = resolve_commit_options(ui, opts)
+ opts = pycompat.byteskwargs(opts)
date = old.date()
if opts.get(b'date'):
@@ -2966,29 +2979,32 @@ def amend(ui, repo, old, extra, pats, op
newid = repo.commitctx(new)
ms.reset()
- # Reroute the working copy parent to the new changeset
- repo.setparents(newid, nullid)
-
- # Fixing the dirstate because localrepo.commitctx does not update
- # it. This is rather convenient because we did not need to update
- # the dirstate for all the files in the new commit which commitctx
- # could have done if it updated the dirstate. Now, we can
- # selectively update the dirstate only for the amended files.
- dirstate = repo.dirstate
-
- # Update the state of the files which were added and modified in the
- # amend to "normal" in the dirstate. We need to use "normallookup" since
- # the files may have changed since the command started; using "normal"
- # would mark them as clean but with uncommitted contents.
- normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
- for f in normalfiles:
- dirstate.normallookup(f)
-
- # Update the state of files which were removed in the amend
- # to "removed" in the dirstate.
- removedfiles = set(wctx.removed()) & filestoamend
- for f in removedfiles:
- dirstate.drop(f)
+ with repo.dirstate.parentchange():
+ # Reroute the working copy parent to the new changeset
+ repo.setparents(newid, repo.nullid)
+
+ # Fixing the dirstate because localrepo.commitctx does not update
+ # it. This is rather convenient because we did not need to update
+ # the dirstate for all the files in the new commit which commitctx
+ # could have done if it updated the dirstate. Now, we can
+ # selectively update the dirstate only for the amended files.
+ dirstate = repo.dirstate
+
+ # Update the state of the files which were added and modified in the
+ # amend to "normal" in the dirstate. We need to use "normallookup" since
+ # the files may have changed since the command started; using "normal"
+ # would mark them as clean but with uncommitted contents.
+ normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
+ for f in normalfiles:
+ dirstate.update_file(
+ f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
+ )
+
+ # Update the state of files which were removed in the amend
+ # to "removed" in the dirstate.
+ removedfiles = set(wctx.removed()) & filestoamend
+ for f in removedfiles:
+ dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
mapping = {old.node(): (newid,)}
obsmetadata = None
@@ -3322,7 +3338,7 @@ def revert(ui, repo, ctx, *pats, **opts)
# in case of merge, files that are actually added can be reported as
# modified, we need to post process the result
- if p2 != nullid:
+ if p2 != repo.nullid:
mergeadd = set(dsmodified)
for path in dsmodified:
if path in mf:
@@ -3548,7 +3564,7 @@ def _performrevert(
repo.wvfs.unlinkpath(f, rmdir=rmdir)
except OSError:
pass
- repo.dirstate.remove(f)
+ repo.dirstate.set_untracked(f)
def prntstatusmsg(action, f):
exact = names[f]
@@ -3563,12 +3579,12 @@ def _performrevert(
)
if choice == 0:
prntstatusmsg(b'forget', f)
- repo.dirstate.drop(f)
+ repo.dirstate.set_untracked(f)
else:
excluded_files.append(f)
else:
prntstatusmsg(b'forget', f)
- repo.dirstate.drop(f)
+ repo.dirstate.set_untracked(f)
for f in actions[b'remove'][0]:
audit_path(f)
if interactive:
@@ -3586,17 +3602,17 @@ def _performrevert(
for f in actions[b'drop'][0]:
audit_path(f)
prntstatusmsg(b'drop', f)
- repo.dirstate.remove(f)
+ repo.dirstate.set_untracked(f)
normal = None
if node == parent:
# We're reverting to our parent. If possible, we'd like status
# to report the file as clean. We have to use normallookup for
# merges to avoid losing information about merged/dirty files.
- if p2 != nullid:
- normal = repo.dirstate.normallookup
+ if p2 != repo.nullid:
+ normal = repo.dirstate.set_tracked
else:
- normal = repo.dirstate.normal
+ normal = repo.dirstate.set_clean
newlyaddedandmodifiedfiles = set()
if interactive:
@@ -3624,12 +3640,12 @@ def _performrevert(
diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
else:
diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
- originalchunks = patch.parsepatch(diff)
+ original_headers = patch.parsepatch(diff)
try:
chunks, opts = recordfilter(
- repo.ui, originalchunks, match, operation=operation
+ repo.ui, original_headers, match, operation=operation
)
if operation == b'discard':
chunks = patch.reversehunks(chunks)
@@ -3642,9 +3658,7 @@ def _performrevert(
# "remove added file (Yn)?", so we don't need to worry about the
# alsorestore value. Ideally we'd be able to partially revert
# copied/renamed files.
- newlyaddedandmodifiedfiles, unusedalsorestore = newandmodified(
- chunks, originalchunks
- )
+ newlyaddedandmodifiedfiles, unusedalsorestore = newandmodified(chunks)
if tobackup is None:
tobackup = set()
# Apply changes
@@ -3687,11 +3701,11 @@ def _performrevert(
if f not in newlyaddedandmodifiedfiles:
prntstatusmsg(b'add', f)
checkout(f)
- repo.dirstate.add(f)
-
- normal = repo.dirstate.normallookup
- if node == parent and p2 == nullid:
- normal = repo.dirstate.normal
+ repo.dirstate.set_tracked(f)
+
+ normal = repo.dirstate.set_tracked
+ if node == parent and p2 == repo.nullid:
+ normal = repo.dirstate.set_clean
for f in actions[b'undelete'][0]:
if interactive:
choice = repo.ui.promptchoice(
diff --git a/mercurial/commands.py b/mercurial/commands.py
--- a/mercurial/commands.py
+++ b/mercurial/commands.py
@@ -15,10 +15,8 @@ import sys
from .i18n import _
from .node import (
hex,
- nullid,
nullrev,
short,
- wdirhex,
wdirrev,
)
from .pycompat import open
@@ -486,7 +484,7 @@ def annotate(ui, repo, *pats, **opts):
return b'%d ' % rev
def formathex(h):
- if h == wdirhex:
+ if h == repo.nodeconstants.wdirhex:
return b'%s+' % shorthex(hex(ctx.p1().node()))
else:
return b'%s ' % shorthex(h)
@@ -809,9 +807,9 @@ def _dobackout(ui, repo, node=None, rev=
)
p1, p2 = repo.changelog.parents(node)
- if p1 == nullid:
+ if p1 == repo.nullid:
raise error.InputError(_(b'cannot backout a change with no parents'))
- if p2 != nullid:
+ if p2 != repo.nullid:
if not opts.get(b'parent'):
raise error.InputError(_(b'cannot backout a merge changeset'))
p = repo.lookup(opts[b'parent'])
@@ -1085,7 +1083,7 @@ def bisect(
)
else:
node, p2 = repo.dirstate.parents()
- if p2 != nullid:
+ if p2 != repo.nullid:
raise error.StateError(_(b'current bisect revision is a merge'))
if rev:
if not nodes:
@@ -2079,9 +2077,8 @@ def _docommit(ui, repo, *pats, **opts):
# commit(), 1 if nothing changed or None on success.
return 1 if ret == 0 else ret
- opts = pycompat.byteskwargs(opts)
- if opts.get(b'subrepos'):
- cmdutil.check_incompatible_arguments(opts, b'subrepos', [b'amend'])
+ if opts.get('subrepos'):
+ cmdutil.check_incompatible_arguments(opts, 'subrepos', ['amend'])
# Let --subrepos on the command line override config setting.
ui.setconfig(b'ui', b'commitsubrepos', True, b'commit')
@@ -2092,7 +2089,7 @@ def _docommit(ui, repo, *pats, **opts):
tip = repo.changelog.tip()
extra = {}
- if opts.get(b'close_branch') or opts.get(b'force_close_branch'):
+ if opts.get('close_branch') or opts.get('force_close_branch'):
extra[b'close'] = b'1'
if repo[b'.'].closesbranch():
@@ -2106,21 +2103,21 @@ def _docommit(ui, repo, *pats, **opts):
elif (
branch == repo[b'.'].branch()
and repo[b'.'].node() not in bheads
- and not opts.get(b'force_close_branch')
+ and not opts.get('force_close_branch')
):
hint = _(
b'use --force-close-branch to close branch from a non-head'
b' changeset'
)
raise error.InputError(_(b'can only close branch heads'), hint=hint)
- elif opts.get(b'amend'):
+ elif opts.get('amend'):
if (
repo[b'.'].p1().branch() != branch
and repo[b'.'].p2().branch() != branch
):
raise error.InputError(_(b'can only close branch heads'))
- if opts.get(b'amend'):
+ if opts.get('amend'):
if ui.configbool(b'ui', b'commitsubrepos'):
raise error.InputError(
_(b'cannot amend with ui.commitsubrepos enabled')
@@ -2139,6 +2136,7 @@ def _docommit(ui, repo, *pats, **opts):
cmdutil.checkunfinished(repo)
node = cmdutil.amend(ui, repo, old, extra, pats, opts)
+ opts = pycompat.byteskwargs(opts)
if node == old.node():
ui.status(_(b"nothing changed\n"))
return 1
@@ -2167,6 +2165,7 @@ def _docommit(ui, repo, *pats, **opts):
extra=extra,
)
+ opts = pycompat.byteskwargs(opts)
node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
if not node:
@@ -2202,8 +2201,24 @@ def _docommit(ui, repo, *pats, **opts):
b'config|showconfig|debugconfig',
[
(b'u', b'untrusted', None, _(b'show untrusted configuration options')),
+ # This is experimental because we need
+ # * reasonable behavior around aliases,
+ # * decide if we display [debug] [experimental] and [devel] section par
+ # default
+ # * some way to display "generic" config entry (the one matching
+ # regexp,
+ # * proper display of the different value type
+ # * a better way to handle values (and variable types),
+ # * maybe some type information ?
+ (
+ b'',
+ b'exp-all-known',
+ None,
+ _(b'show all known config option (EXPERIMENTAL)'),
+ ),
(b'e', b'edit', None, _(b'edit user config')),
(b'l', b'local', None, _(b'edit repository config')),
+ (b'', b'source', None, _(b'show source of configuration value')),
(
b'',
b'shared',
@@ -2234,7 +2249,7 @@ def config(ui, repo, *values, **opts):
--global, edit the system-wide config file. With --local, edit the
repository-level config file.
- With --debug, the source (filename and line number) is printed
+ With --source, the source (filename and line number) is printed
for each config item.
See :hg:`help config` for more information about config files.
@@ -2337,7 +2352,10 @@ def config(ui, repo, *values, **opts):
selentries = set(selentries)
matched = False
- for section, name, value in ui.walkconfig(untrusted=untrusted):
+ all_known = opts[b'exp_all_known']
+ show_source = ui.debugflag or opts.get(b'source')
+ entries = ui.walkconfig(untrusted=untrusted, all_known=all_known)
+ for section, name, value in entries:
source = ui.configsource(section, name, untrusted)
value = pycompat.bytestr(value)
defaultvalue = ui.configdefault(section, name)
@@ -2348,7 +2366,7 @@ def config(ui, repo, *values, **opts):
if values and not (section in selsections or entryname in selentries):
continue
fm.startitem()
- fm.condwrite(ui.debugflag, b'source', b'%s: ', source)
+ fm.condwrite(show_source, b'source', b'%s: ', source)
if uniquesel:
fm.data(name=entryname)
fm.write(b'value', b'%s\n', value)
@@ -3071,8 +3089,7 @@ def graft(ui, repo, *revs, **opts):
def _dograft(ui, repo, *revs, **opts):
- opts = pycompat.byteskwargs(opts)
- if revs and opts.get(b'rev'):
+ if revs and opts.get('rev'):
ui.warn(
_(
b'warning: inconsistent use of --rev might give unexpected '
@@ -3081,61 +3098,59 @@ def _dograft(ui, repo, *revs, **opts):
)
revs = list(revs)
- revs.extend(opts.get(b'rev'))
+ revs.extend(opts.get('rev'))
# a dict of data to be stored in state file
statedata = {}
# list of new nodes created by ongoing graft
statedata[b'newnodes'] = []
- cmdutil.resolvecommitoptions(ui, opts)
-
- editor = cmdutil.getcommiteditor(
- editform=b'graft', **pycompat.strkwargs(opts)
- )
-
- cmdutil.check_at_most_one_arg(opts, b'abort', b'stop', b'continue')
+ cmdutil.resolve_commit_options(ui, opts)
+
+ editor = cmdutil.getcommiteditor(editform=b'graft', **opts)
+
+ cmdutil.check_at_most_one_arg(opts, 'abort', 'stop', 'continue')
cont = False
- if opts.get(b'no_commit'):
+ if opts.get('no_commit'):
cmdutil.check_incompatible_arguments(
opts,
- b'no_commit',
- [b'edit', b'currentuser', b'currentdate', b'log'],
+ 'no_commit',
+ ['edit', 'currentuser', 'currentdate', 'log'],
)
graftstate = statemod.cmdstate(repo, b'graftstate')
- if opts.get(b'stop'):
+ if opts.get('stop'):
cmdutil.check_incompatible_arguments(
opts,
- b'stop',
+ 'stop',
[
- b'edit',
- b'log',
- b'user',
- b'date',
- b'currentdate',
- b'currentuser',
- b'rev',
+ 'edit',
+ 'log',
+ 'user',
+ 'date',
+ 'currentdate',
+ 'currentuser',
+ 'rev',
],
)
return _stopgraft(ui, repo, graftstate)
- elif opts.get(b'abort'):
+ elif opts.get('abort'):
cmdutil.check_incompatible_arguments(
opts,
- b'abort',
+ 'abort',
[
- b'edit',
- b'log',
- b'user',
- b'date',
- b'currentdate',
- b'currentuser',
- b'rev',
+ 'edit',
+ 'log',
+ 'user',
+ 'date',
+ 'currentdate',
+ 'currentuser',
+ 'rev',
],
)
return cmdutil.abortgraft(ui, repo, graftstate)
- elif opts.get(b'continue'):
+ elif opts.get('continue'):
cont = True
if revs:
raise error.InputError(_(b"can't specify --continue and revisions"))
@@ -3143,15 +3158,15 @@ def _dograft(ui, repo, *revs, **opts):
if graftstate.exists():
statedata = cmdutil.readgraftstate(repo, graftstate)
if statedata.get(b'date'):
- opts[b'date'] = statedata[b'date']
+ opts['date'] = statedata[b'date']
if statedata.get(b'user'):
- opts[b'user'] = statedata[b'user']
+ opts['user'] = statedata[b'user']
if statedata.get(b'log'):
- opts[b'log'] = True
+ opts['log'] = True
if statedata.get(b'no_commit'):
- opts[b'no_commit'] = statedata.get(b'no_commit')
+ opts['no_commit'] = statedata.get(b'no_commit')
if statedata.get(b'base'):
- opts[b'base'] = statedata.get(b'base')
+ opts['base'] = statedata.get(b'base')
nodes = statedata[b'nodes']
revs = [repo[node].rev() for node in nodes]
else:
@@ -3165,8 +3180,8 @@ def _dograft(ui, repo, *revs, **opts):
skipped = set()
basectx = None
- if opts.get(b'base'):
- basectx = scmutil.revsingle(repo, opts[b'base'], None)
+ if opts.get('base'):
+ basectx = scmutil.revsingle(repo, opts['base'], None)
if basectx is None:
# check for merges
for rev in repo.revs(b'%ld and merge()', revs):
@@ -3184,7 +3199,7 @@ def _dograft(ui, repo, *revs, **opts):
# way to the graftstate. With --force, any revisions we would have otherwise
# skipped would not have been filtered out, and if they hadn't been applied
# already, they'd have been in the graftstate.
- if not (cont or opts.get(b'force')) and basectx is None:
+ if not (cont or opts.get('force')) and basectx is None:
# check for ancestors of dest branch
ancestors = repo.revs(b'%ld & (::.)', revs)
for rev in ancestors:
@@ -3257,10 +3272,10 @@ def _dograft(ui, repo, *revs, **opts):
if not revs:
return -1
- if opts.get(b'no_commit'):
+ if opts.get('no_commit'):
statedata[b'no_commit'] = True
- if opts.get(b'base'):
- statedata[b'base'] = opts[b'base']
+ if opts.get('base'):
+ statedata[b'base'] = opts['base']
for pos, ctx in enumerate(repo.set(b"%ld", revs)):
desc = b'%d:%s "%s"' % (
ctx.rev(),
@@ -3271,7 +3286,7 @@ def _dograft(ui, repo, *revs, **opts):
if names:
desc += b' (%s)' % b' '.join(names)
ui.status(_(b'grafting %s\n') % desc)
- if opts.get(b'dry_run'):
+ if opts.get('dry_run'):
continue
source = ctx.extra().get(b'source')
@@ -3282,22 +3297,22 @@ def _dograft(ui, repo, *revs, **opts):
else:
extra[b'source'] = ctx.hex()
user = ctx.user()
- if opts.get(b'user'):
- user = opts[b'user']
+ if opts.get('user'):
+ user = opts['user']
statedata[b'user'] = user
date = ctx.date()
- if opts.get(b'date'):
- date = opts[b'date']
+ if opts.get('date'):
+ date = opts['date']
statedata[b'date'] = date
message = ctx.description()
- if opts.get(b'log'):
+ if opts.get('log'):
message += b'\n(grafted from %s)' % ctx.hex()
statedata[b'log'] = True
# we don't merge the first commit when continuing
if not cont:
# perform the graft merge with p1(rev) as 'ancestor'
- overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
+ overrides = {(b'ui', b'forcemerge'): opts.get('tool', b'')}
base = ctx.p1() if basectx is None else basectx
with ui.configoverride(overrides, b'graft'):
stats = mergemod.graft(repo, ctx, base, [b'local', b'graft'])
@@ -3315,7 +3330,7 @@ def _dograft(ui, repo, *revs, **opts):
cont = False
# commit if --no-commit is false
- if not opts.get(b'no_commit'):
+ if not opts.get('no_commit'):
node = repo.commit(
text=message, user=user, date=date, extra=extra, editor=editor
)
@@ -3330,7 +3345,7 @@ def _dograft(ui, repo, *revs, **opts):
nn.append(node)
# remove state when we complete successfully
- if not opts.get(b'dry_run'):
+ if not opts.get('dry_run'):
graftstate.delete()
return 0
@@ -4847,7 +4862,7 @@ def merge(ui, repo, node=None, **opts):
opts = pycompat.byteskwargs(opts)
abort = opts.get(b'abort')
- if abort and repo.dirstate.p2() == nullid:
+ if abort and repo.dirstate.p2() == repo.nullid:
cmdutil.wrongtooltocontinue(repo, _(b'merge'))
cmdutil.check_incompatible_arguments(opts, b'abort', [b'rev', b'preview'])
if abort:
@@ -5072,7 +5087,7 @@ def parents(ui, repo, file_=None, **opts
displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
for n in p:
- if n != nullid:
+ if n != repo.nullid:
displayer.show(repo[n])
displayer.close()
@@ -5128,15 +5143,9 @@ def paths(ui, repo, search=None, **opts)
"""
opts = pycompat.byteskwargs(opts)
+
+ pathitems = urlutil.list_paths(ui, search)
ui.pager(b'paths')
- if search:
- pathitems = [
- (name, path)
- for name, path in pycompat.iteritems(ui.paths)
- if name == search
- ]
- else:
- pathitems = sorted(pycompat.iteritems(ui.paths))
fm = ui.formatter(b'paths', opts)
if fm.isplain():
@@ -5157,6 +5166,11 @@ def paths(ui, repo, search=None, **opts)
assert subopt not in (b'name', b'url')
if showsubopts:
fm.plain(b'%s:%s = ' % (name, subopt))
+ if isinstance(value, bool):
+ if value:
+ value = b'yes'
+ else:
+ value = b'no'
fm.condwrite(showsubopts, subopt, b'%s\n', value)
fm.end()
@@ -6105,7 +6119,7 @@ def resolve(ui, repo, *pats, **opts):
with repo.wlock():
ms = mergestatemod.mergestate.read(repo)
- if not (ms.active() or repo.dirstate.p2() != nullid):
+ if not (ms.active() or repo.dirstate.p2() != repo.nullid):
raise error.StateError(
_(b'resolve command not applicable when not merging')
)
@@ -6223,8 +6237,21 @@ def resolve(ui, repo, *pats, **opts):
raise
ms.commit()
- branchmerge = repo.dirstate.p2() != nullid
- mergestatemod.recordupdates(repo, ms.actions(), branchmerge, None)
+ branchmerge = repo.dirstate.p2() != repo.nullid
+ # resolve is not doing a parent change here, however, `record updates`
+ # will call some dirstate API that at intended for parent changes call.
+ # Ideally we would not need this and could implement a lighter version
+ # of the recordupdateslogic that will not have to deal with the part
+ # related to parent changes. However this would requires that:
+ # - we are sure we passed around enough information at update/merge
+ # time to no longer needs it at `hg resolve time`
+ # - we are sure we store that information well enough to be able to reuse it
+ # - we are the necessary logic to reuse it right.
+ #
+ # All this should eventually happens, but in the mean time, we use this
+ # context manager slightly out of the context it should be.
+ with repo.dirstate.parentchange():
+ mergestatemod.recordupdates(repo, ms.actions(), branchmerge, None)
if not didwork and pats:
hint = None
@@ -6315,7 +6342,7 @@ def revert(ui, repo, *pats, **opts):
opts[b"rev"] = cmdutil.finddate(ui, repo, opts[b"date"])
parent, p2 = repo.dirstate.parents()
- if not opts.get(b'rev') and p2 != nullid:
+ if not opts.get(b'rev') and p2 != repo.nullid:
# revert after merge is a trap for new users (issue2915)
raise error.InputError(
_(b'uncommitted merge with no revision specified'),
@@ -6335,7 +6362,7 @@ def revert(ui, repo, *pats, **opts):
or opts.get(b'interactive')
):
msg = _(b"no files or directories specified")
- if p2 != nullid:
+ if p2 != repo.nullid:
hint = _(
b"uncommitted merge, use --all to discard all changes,"
b" or 'hg update -C .' to abort the merge"
@@ -7227,9 +7254,8 @@ def summary(ui, repo, **opts):
if revs:
revs = [other.lookup(rev) for rev in revs]
ui.debug(b'comparing with %s\n' % urlutil.hidepassword(source))
- repo.ui.pushbuffer()
- commoninc = discovery.findcommonincoming(repo, other, heads=revs)
- repo.ui.popbuffer()
+ with repo.ui.silent():
+ commoninc = discovery.findcommonincoming(repo, other, heads=revs)
return source, sbranch, other, commoninc, commoninc[1]
if needsincoming:
@@ -7273,11 +7299,10 @@ def summary(ui, repo, **opts):
common = commoninc
if revs:
revs = [repo.lookup(rev) for rev in revs]
- repo.ui.pushbuffer()
- outgoing = discovery.findcommonoutgoing(
- repo, dother, onlyheads=revs, commoninc=common
- )
- repo.ui.popbuffer()
+ with repo.ui.silent():
+ outgoing = discovery.findcommonoutgoing(
+ repo, dother, onlyheads=revs, commoninc=common
+ )
return dest, dbranch, dother, outgoing
if needsoutgoing:
@@ -7396,7 +7421,7 @@ def tag(ui, repo, name1, *names, **opts)
for n in names:
if repo.tagtype(n) == b'global':
alltags = tagsmod.findglobaltags(ui, repo)
- if alltags[n][0] == nullid:
+ if alltags[n][0] == repo.nullid:
raise error.InputError(
_(b"tag '%s' is already removed") % n
)
@@ -7423,7 +7448,7 @@ def tag(ui, repo, name1, *names, **opts)
)
if not opts.get(b'local'):
p1, p2 = repo.dirstate.parents()
- if p2 != nullid:
+ if p2 != repo.nullid:
raise error.StateError(_(b'uncommitted merge'))
bheads = repo.branchheads()
if not opts.get(b'force') and bheads and p1 not in bheads:
diff --git a/mercurial/commandserver.py b/mercurial/commandserver.py
--- a/mercurial/commandserver.py
+++ b/mercurial/commandserver.py
@@ -429,7 +429,7 @@ def setuplogging(ui, repo=None, fp=None)
elif logpath == b'-':
logger = loggingutil.fileobjectlogger(ui.ferr, tracked)
else:
- logpath = os.path.abspath(util.expandpath(logpath))
+ logpath = util.abspath(util.expandpath(logpath))
# developer config: cmdserver.max-log-files
maxfiles = ui.configint(b'cmdserver', b'max-log-files')
# developer config: cmdserver.max-log-size
diff --git a/mercurial/commit.py b/mercurial/commit.py
--- a/mercurial/commit.py
+++ b/mercurial/commit.py
@@ -10,7 +10,6 @@ import errno
from .i18n import _
from .node import (
hex,
- nullid,
nullrev,
)
@@ -277,10 +276,10 @@ def _filecommit(
"""
fname = fctx.path()
- fparent1 = manifest1.get(fname, nullid)
- fparent2 = manifest2.get(fname, nullid)
+ fparent1 = manifest1.get(fname, repo.nullid)
+ fparent2 = manifest2.get(fname, repo.nullid)
touched = None
- if fparent1 == fparent2 == nullid:
+ if fparent1 == fparent2 == repo.nullid:
touched = 'added'
if isinstance(fctx, context.filectx):
@@ -291,9 +290,11 @@ def _filecommit(
if node in [fparent1, fparent2]:
repo.ui.debug(b'reusing %s filelog entry\n' % fname)
if (
- fparent1 != nullid and manifest1.flags(fname) != fctx.flags()
+ fparent1 != repo.nullid
+ and manifest1.flags(fname) != fctx.flags()
) or (
- fparent2 != nullid and manifest2.flags(fname) != fctx.flags()
+ fparent2 != repo.nullid
+ and manifest2.flags(fname) != fctx.flags()
):
touched = 'modified'
return node, touched
@@ -327,7 +328,9 @@ def _filecommit(
newfparent = fparent2
if manifest2: # branch merge
- if fparent2 == nullid or cnode is None: # copied on remote side
+ if (
+ fparent2 == repo.nullid or cnode is None
+ ): # copied on remote side
if cfname in manifest2:
cnode = manifest2[cfname]
newfparent = fparent1
@@ -346,7 +349,7 @@ def _filecommit(
if includecopymeta:
meta[b"copy"] = cfname
meta[b"copyrev"] = hex(cnode)
- fparent1, fparent2 = nullid, newfparent
+ fparent1, fparent2 = repo.nullid, newfparent
else:
repo.ui.warn(
_(
@@ -356,20 +359,20 @@ def _filecommit(
% (fname, cfname)
)
- elif fparent1 == nullid:
- fparent1, fparent2 = fparent2, nullid
- elif fparent2 != nullid:
+ elif fparent1 == repo.nullid:
+ fparent1, fparent2 = fparent2, repo.nullid
+ elif fparent2 != repo.nullid:
if ms.active() and ms.extras(fname).get(b'filenode-source') == b'other':
- fparent1, fparent2 = fparent2, nullid
+ fparent1, fparent2 = fparent2, repo.nullid
elif ms.active() and ms.extras(fname).get(b'merged') != b'yes':
- fparent1, fparent2 = fparent1, nullid
+ fparent1, fparent2 = fparent1, repo.nullid
# is one parent an ancestor of the other?
else:
fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
if fparent1 in fparentancestors:
- fparent1, fparent2 = fparent2, nullid
+ fparent1, fparent2 = fparent2, repo.nullid
elif fparent2 in fparentancestors:
- fparent2 = nullid
+ fparent2 = repo.nullid
force_new_node = False
# The file might have been deleted by merge code and user explicitly choose
@@ -384,9 +387,14 @@ def _filecommit(
force_new_node = True
# is the file changed?
text = fctx.data()
- if fparent2 != nullid or meta or flog.cmp(fparent1, text) or force_new_node:
+ if (
+ fparent2 != repo.nullid
+ or meta
+ or flog.cmp(fparent1, text)
+ or force_new_node
+ ):
if touched is None: # do not overwrite added
- if fparent2 == nullid:
+ if fparent2 == repo.nullid:
touched = 'modified'
else:
touched = 'merged'
diff --git a/mercurial/config.py b/mercurial/config.py
--- a/mercurial/config.py
+++ b/mercurial/config.py
@@ -258,93 +258,3 @@ class config(object):
self.parse(
path, fp.read(), sections=sections, remap=remap, include=include
)
-
-
-def parselist(value):
- """parse a configuration value as a list of comma/space separated strings
-
- >>> parselist(b'this,is "a small" ,test')
- ['this', 'is', 'a small', 'test']
- """
-
- def _parse_plain(parts, s, offset):
- whitespace = False
- while offset < len(s) and (
- s[offset : offset + 1].isspace() or s[offset : offset + 1] == b','
- ):
- whitespace = True
- offset += 1
- if offset >= len(s):
- return None, parts, offset
- if whitespace:
- parts.append(b'')
- if s[offset : offset + 1] == b'"' and not parts[-1]:
- return _parse_quote, parts, offset + 1
- elif s[offset : offset + 1] == b'"' and parts[-1][-1:] == b'\\':
- parts[-1] = parts[-1][:-1] + s[offset : offset + 1]
- return _parse_plain, parts, offset + 1
- parts[-1] += s[offset : offset + 1]
- return _parse_plain, parts, offset + 1
-
- def _parse_quote(parts, s, offset):
- if offset < len(s) and s[offset : offset + 1] == b'"': # ""
- parts.append(b'')
- offset += 1
- while offset < len(s) and (
- s[offset : offset + 1].isspace()
- or s[offset : offset + 1] == b','
- ):
- offset += 1
- return _parse_plain, parts, offset
-
- while offset < len(s) and s[offset : offset + 1] != b'"':
- if (
- s[offset : offset + 1] == b'\\'
- and offset + 1 < len(s)
- and s[offset + 1 : offset + 2] == b'"'
- ):
- offset += 1
- parts[-1] += b'"'
- else:
- parts[-1] += s[offset : offset + 1]
- offset += 1
-
- if offset >= len(s):
- real_parts = _configlist(parts[-1])
- if not real_parts:
- parts[-1] = b'"'
- else:
- real_parts[0] = b'"' + real_parts[0]
- parts = parts[:-1]
- parts.extend(real_parts)
- return None, parts, offset
-
- offset += 1
- while offset < len(s) and s[offset : offset + 1] in [b' ', b',']:
- offset += 1
-
- if offset < len(s):
- if offset + 1 == len(s) and s[offset : offset + 1] == b'"':
- parts[-1] += b'"'
- offset += 1
- else:
- parts.append(b'')
- else:
- return None, parts, offset
-
- return _parse_plain, parts, offset
-
- def _configlist(s):
- s = s.rstrip(b' ,')
- if not s:
- return []
- parser, parts, offset = _parse_plain, [b''], 0
- while parser:
- parser, parts, offset = parser(parts, s, offset)
- return parts
-
- if value is not None and isinstance(value, bytes):
- result = _configlist(value.lstrip(b' ,\n'))
- else:
- result = value
- return result or []
diff --git a/mercurial/configitems.py b/mercurial/configitems.py
--- a/mercurial/configitems.py
+++ b/mercurial/configitems.py
@@ -904,6 +904,11 @@ coreconfigitem(
)
coreconfigitem(
b'experimental',
+ b'changegroup4',
+ default=False,
+)
+coreconfigitem(
+ b'experimental',
b'cleanup-as-archived',
default=False,
)
@@ -954,6 +959,11 @@ coreconfigitem(
)
coreconfigitem(
b'experimental',
+ b'dirstate-tree.in-memory',
+ default=False,
+)
+coreconfigitem(
+ b'experimental',
b'editortmpinhg',
default=False,
)
@@ -1138,6 +1148,27 @@ coreconfigitem(
b'revisions.prefixhexnode',
default=False,
)
+# "out of experimental" todo list.
+#
+# * include management of a persistent nodemap in the main docket
+# * enforce a "no-truncate" policy for mmap safety
+# - for censoring operation
+# - for stripping operation
+# - for rollback operation
+# * proper streaming (race free) of the docket file
+# * track garbage data to evemtually allow rewriting -existing- sidedata.
+# * Exchange-wise, we will also need to do something more efficient than
+# keeping references to the affected revlogs, especially memory-wise when
+# rewriting sidedata.
+# * introduce a proper solution to reduce the number of filelog related files.
+# * use caching for reading sidedata (similar to what we do for data).
+# * no longer set offset=0 if sidedata_size=0 (simplify cutoff computation).
+# * Improvement to consider
+# - avoid compression header in chunk using the default compression?
+# - forbid "inline" compression mode entirely?
+# - split the data offset and flag field (the 2 bytes save are mostly trouble)
+# - keep track of uncompressed -chunk- size (to preallocate memory better)
+# - keep track of chain base or size (probably not that useful anymore)
coreconfigitem(
b'experimental',
b'revlogv2',
@@ -1272,6 +1303,14 @@ coreconfigitem(
experimental=True,
)
coreconfigitem(
+ # Enable this dirstate format *when creating a new repository*.
+ # Which format to use for existing repos is controlled by .hg/requires
+ b'format',
+ b'exp-dirstate-v2',
+ default=False,
+ experimental=True,
+)
+coreconfigitem(
b'format',
b'dotencode',
default=True,
@@ -1310,6 +1349,20 @@ coreconfigitem(
default=lambda: [b'zstd', b'zlib'],
alias=[(b'experimental', b'format.compression')],
)
+# Experimental TODOs:
+#
+# * Same as for evlogv2 (but for the reduction of the number of files)
+# * Improvement to investigate
+# - storing .hgtags fnode
+# - storing `rank` of changesets
+# - storing branch related identifier
+
+coreconfigitem(
+ b'format',
+ b'exp-use-changelog-v2',
+ default=None,
+ experimental=True,
+)
coreconfigitem(
b'format',
b'usefncache',
@@ -1342,20 +1395,6 @@ coreconfigitem(
b'use-persistent-nodemap',
default=_persistent_nodemap_default,
)
-# TODO needs to grow a docket file to at least store the last offset of the data
-# file when rewriting sidedata.
-# Will also need a way of dealing with garbage data if we allow rewriting
-# *existing* sidedata.
-# Exchange-wise, we will also need to do something more efficient than keeping
-# references to the affected revlogs, especially memory-wise when rewriting
-# sidedata.
-# Also... compress the sidedata? (this should be coming very soon)
-coreconfigitem(
- b'format',
- b'exp-revlogv2.2',
- default=False,
- experimental=True,
-)
coreconfigitem(
b'format',
b'exp-use-copies-side-data-changeset',
@@ -1364,12 +1403,6 @@ coreconfigitem(
)
coreconfigitem(
b'format',
- b'exp-use-side-data',
- default=False,
- experimental=True,
-)
-coreconfigitem(
- b'format',
b'use-share-safe',
default=False,
)
diff --git a/mercurial/context.py b/mercurial/context.py
--- a/mercurial/context.py
+++ b/mercurial/context.py
@@ -14,14 +14,9 @@ import stat
from .i18n import _
from .node import (
- addednodeid,
hex,
- modifiednodeid,
- nullid,
nullrev,
short,
- wdirfilenodeids,
- wdirhex,
)
from .pycompat import (
getattr,
@@ -140,7 +135,7 @@ class basectx(object):
removed.append(fn)
elif flag1 != flag2:
modified.append(fn)
- elif node2 not in wdirfilenodeids:
+ elif node2 not in self._repo.nodeconstants.wdirfilenodeids:
# When comparing files between two commits, we save time by
# not comparing the file contents when the nodeids differ.
# Note that this means we incorrectly report a reverted change
@@ -737,7 +732,7 @@ class changectx(basectx):
n2 = c2._parents[0]._node
cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
if not cahs:
- anc = nullid
+ anc = self._repo.nodeconstants.nullid
elif len(cahs) == 1:
anc = cahs[0]
else:
@@ -1132,7 +1127,11 @@ class basefilectx(object):
_path = self._path
fl = self._filelog
parents = self._filelog.parents(self._filenode)
- pl = [(_path, node, fl) for node in parents if node != nullid]
+ pl = [
+ (_path, node, fl)
+ for node in parents
+ if node != self._repo.nodeconstants.nullid
+ ]
r = fl.renamed(self._filenode)
if r:
@@ -1393,6 +1392,9 @@ class committablectx(basectx):
def __bytes__(self):
return bytes(self._parents[0]) + b"+"
+ def hex(self):
+ self._repo.nodeconstants.wdirhex
+
__str__ = encoding.strmethod(__bytes__)
def __nonzero__(self):
@@ -1556,12 +1558,12 @@ class workingctx(committablectx):
return self._repo.dirstate[key] not in b"?r"
def hex(self):
- return wdirhex
+ return self._repo.nodeconstants.wdirhex
@propertycache
def _parents(self):
p = self._repo.dirstate.parents()
- if p[1] == nullid:
+ if p[1] == self._repo.nodeconstants.nullid:
p = p[:-1]
# use unfiltered repo to delay/avoid loading obsmarkers
unfi = self._repo.unfiltered()
@@ -1572,7 +1574,9 @@ class workingctx(committablectx):
for n in p
]
- def setparents(self, p1node, p2node=nullid):
+ def setparents(self, p1node, p2node=None):
+ if p2node is None:
+ p2node = self._repo.nodeconstants.nullid
dirstate = self._repo.dirstate
with dirstate.parentchange():
copies = dirstate.setparents(p1node, p2node)
@@ -1584,7 +1588,7 @@ class workingctx(committablectx):
for f in copies:
if f not in pctx and copies[f] in pctx:
dirstate.copy(copies[f], f)
- if p2node == nullid:
+ if p2node == self._repo.nodeconstants.nullid:
for f, s in sorted(dirstate.copies().items()):
if f not in pctx and s not in pctx:
dirstate.copy(None, f)
@@ -1697,12 +1701,8 @@ class workingctx(committablectx):
% uipath(f)
)
rejected.append(f)
- elif ds[f] in b'amn':
+ elif not ds.set_tracked(f):
ui.warn(_(b"%s already tracked!\n") % uipath(f))
- elif ds[f] == b'r':
- ds.normallookup(f)
- else:
- ds.add(f)
return rejected
def forget(self, files, prefix=b""):
@@ -1711,13 +1711,9 @@ class workingctx(committablectx):
uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
rejected = []
for f in files:
- if f not in ds:
+ if not ds.set_untracked(f):
self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
rejected.append(f)
- elif ds[f] != b'a':
- ds.remove(f)
- else:
- ds.drop(f)
return rejected
def copy(self, source, dest):
@@ -1738,10 +1734,7 @@ class workingctx(committablectx):
else:
with self._repo.wlock():
ds = self._repo.dirstate
- if ds[dest] in b'?':
- ds.add(dest)
- elif ds[dest] in b'r':
- ds.normallookup(dest)
+ ds.set_tracked(dest)
ds.copy(source, dest)
def match(
@@ -1836,7 +1829,7 @@ class workingctx(committablectx):
def _poststatusfixup(self, status, fixup):
"""update dirstate for files that are actually clean"""
poststatus = self._repo.postdsstatus()
- if fixup or poststatus:
+ if fixup or poststatus or self._repo.dirstate._dirty:
try:
oldid = self._repo.dirstate.identity()
@@ -1845,9 +1838,15 @@ class workingctx(committablectx):
# wlock can invalidate the dirstate, so cache normal _after_
# taking the lock
with self._repo.wlock(False):
- if self._repo.dirstate.identity() == oldid:
+ dirstate = self._repo.dirstate
+ if dirstate.identity() == oldid:
if fixup:
- normal = self._repo.dirstate.normal
+ if dirstate.pendingparentchange():
+ normal = lambda f: dirstate.update_file(
+ f, p1_tracked=True, wc_tracked=True
+ )
+ else:
+ normal = dirstate.set_clean
for f in fixup:
normal(f)
# write changes out explicitly, because nesting
@@ -1944,8 +1943,8 @@ class workingctx(committablectx):
ff = self._flagfunc
for i, l in (
- (addednodeid, status.added),
- (modifiednodeid, status.modified),
+ (self._repo.nodeconstants.addednodeid, status.added),
+ (self._repo.nodeconstants.modifiednodeid, status.modified),
):
for f in l:
man[f] = i
@@ -2023,19 +2022,23 @@ class workingctx(committablectx):
def markcommitted(self, node):
with self._repo.dirstate.parentchange():
for f in self.modified() + self.added():
- self._repo.dirstate.normal(f)
+ self._repo.dirstate.update_file(
+ f, p1_tracked=True, wc_tracked=True
+ )
for f in self.removed():
- self._repo.dirstate.drop(f)
+ self._repo.dirstate.update_file(
+ f, p1_tracked=False, wc_tracked=False
+ )
self._repo.dirstate.setparents(node)
self._repo._quick_access_changeid_invalidate()
+ sparse.aftercommit(self._repo, node)
+
# write changes out explicitly, because nesting wlock at
# runtime may prevent 'wlock.release()' in 'repo.commit()'
# from immediately doing so for subsequent changing files
self._repo.dirstate.write(self._repo.currenttransaction())
- sparse.aftercommit(self._repo, node)
-
def mergestate(self, clean=False):
if clean:
return mergestatemod.mergestate.clean(self._repo)
@@ -2070,13 +2073,18 @@ class committablefilectx(basefilectx):
path = self.copysource()
if not path:
return None
- return path, self._changectx._parents[0]._manifest.get(path, nullid)
+ return (
+ path,
+ self._changectx._parents[0]._manifest.get(
+ path, self._repo.nodeconstants.nullid
+ ),
+ )
def parents(self):
'''return parent filectxs, following copies if necessary'''
def filenode(ctx, path):
- return ctx._manifest.get(path, nullid)
+ return ctx._manifest.get(path, self._repo.nodeconstants.nullid)
path = self._path
fl = self._filelog
@@ -2094,7 +2102,7 @@ class committablefilectx(basefilectx):
return [
self._parentfilectx(p, fileid=n, filelog=l)
for p, n, l in pl
- if n != nullid
+ if n != self._repo.nodeconstants.nullid
]
def children(self):
@@ -2222,7 +2230,9 @@ class overlayworkingctx(committablectx):
# ``overlayworkingctx`` (e.g. with --collapse).
util.clearcachedproperty(self, b'_manifest')
- def setparents(self, p1node, p2node=nullid):
+ def setparents(self, p1node, p2node=None):
+ if p2node is None:
+ p2node = self._repo.nodeconstants.nullid
assert p1node == self._wrappedctx.node()
self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
@@ -2248,10 +2258,10 @@ class overlayworkingctx(committablectx):
flag = self._flagfunc
for path in self.added():
- man[path] = addednodeid
+ man[path] = self._repo.nodeconstants.addednodeid
man.setflag(path, flag(path))
for path in self.modified():
- man[path] = modifiednodeid
+ man[path] = self._repo.nodeconstants.modifiednodeid
man.setflag(path, flag(path))
for path in self.removed():
del man[path]
@@ -2827,7 +2837,7 @@ class memctx(committablectx):
)
self._rev = None
self._node = None
- parents = [(p or nullid) for p in parents]
+ parents = [(p or self._repo.nodeconstants.nullid) for p in parents]
p1, p2 = parents
self._parents = [self._repo[p] for p in (p1, p2)]
files = sorted(set(files))
@@ -2866,10 +2876,10 @@ class memctx(committablectx):
man = pctx.manifest().copy()
for f in self._status.modified:
- man[f] = modifiednodeid
+ man[f] = self._repo.nodeconstants.modifiednodeid
for f in self._status.added:
- man[f] = addednodeid
+ man[f] = self._repo.nodeconstants.addednodeid
for f in self._status.removed:
if f in man:
@@ -3006,12 +3016,12 @@ class metadataonlyctx(committablectx):
# sanity check to ensure that the reused manifest parents are
# manifests of our commit parents
mp1, mp2 = self.manifestctx().parents
- if p1 != nullid and p1.manifestnode() != mp1:
+ if p1 != self._repo.nodeconstants.nullid and p1.manifestnode() != mp1:
raise RuntimeError(
r"can't reuse the manifest: its p1 "
r"doesn't match the new ctx p1"
)
- if p2 != nullid and p2.manifestnode() != mp2:
+ if p2 != self._repo.nodeconstants.nullid and p2.manifestnode() != mp2:
raise RuntimeError(
r"can't reuse the manifest: "
r"its p2 doesn't match the new ctx p2"
diff --git a/mercurial/copies.py b/mercurial/copies.py
--- a/mercurial/copies.py
+++ b/mercurial/copies.py
@@ -12,10 +12,7 @@ import collections
import os
from .i18n import _
-from .node import (
- nullid,
- nullrev,
-)
+from .node import nullrev
from . import (
match as matchmod,
@@ -321,15 +318,16 @@ def _changesetforwardcopies(a, b, match)
if p in children_count:
children_count[p] += 1
revinfo = _revinfo_getter(repo, match)
- return _combine_changeset_copies(
- revs,
- children_count,
- b.rev(),
- revinfo,
- match,
- isancestor,
- multi_thread,
- )
+ with repo.changelog.reading():
+ return _combine_changeset_copies(
+ revs,
+ children_count,
+ b.rev(),
+ revinfo,
+ match,
+ isancestor,
+ multi_thread,
+ )
else:
# When not using side-data, we will process the edges "from" the parent.
# so we need a full mapping of the parent -> children relation.
@@ -579,7 +577,7 @@ def _revinfo_getter_extra(repo):
parents = fctx._filelog.parents(fctx._filenode)
nb_parents = 0
for n in parents:
- if n != nullid:
+ if n != repo.nullid:
nb_parents += 1
return nb_parents >= 2
diff --git a/mercurial/debugcommands.py b/mercurial/debugcommands.py
--- a/mercurial/debugcommands.py
+++ b/mercurial/debugcommands.py
@@ -7,6 +7,7 @@
from __future__ import absolute_import
+import binascii
import codecs
import collections
import contextlib
@@ -30,7 +31,6 @@ from .i18n import _
from .node import (
bin,
hex,
- nullid,
nullrev,
short,
)
@@ -92,6 +92,7 @@ from . import (
wireprotoserver,
wireprotov2peer,
)
+from .interfaces import repository
from .utils import (
cborutil,
compression,
@@ -794,7 +795,7 @@ def debugdeltachain(ui, repo, file_=None
index = r.index
start = r.start
length = r.length
- generaldelta = r.version & revlog.FLAG_GENERALDELTA
+ generaldelta = r._generaldelta
withsparseread = getattr(r, '_withsparseread', False)
def revinfo(rev):
@@ -941,6 +942,12 @@ def debugdeltachain(ui, repo, file_=None
),
(b'', b'dates', True, _(b'display the saved mtime')),
(b'', b'datesort', None, _(b'sort by saved mtime')),
+ (
+ b'',
+ b'all',
+ False,
+ _(b'display dirstate-v2 tree nodes that would not exist in v1'),
+ ),
],
_(b'[OPTION]...'),
)
@@ -953,29 +960,56 @@ def debugstate(ui, repo, **opts):
datesort = opts.get('datesort')
if datesort:
- keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
+ keyfunc = lambda x: (
+ x[1].v1_mtime(),
+ x[0],
+ ) # sort by mtime, then by filename
else:
keyfunc = None # sort by filename
- for file_, ent in sorted(pycompat.iteritems(repo.dirstate), key=keyfunc):
- if ent[3] == -1:
+ if opts['all']:
+ entries = list(repo.dirstate._map.debug_iter())
+ else:
+ entries = list(pycompat.iteritems(repo.dirstate))
+ entries.sort(key=keyfunc)
+ for file_, ent in entries:
+ if ent.v1_mtime() == -1:
timestr = b'unset '
elif nodates:
timestr = b'set '
else:
timestr = time.strftime(
- "%Y-%m-%d %H:%M:%S ", time.localtime(ent[3])
+ "%Y-%m-%d %H:%M:%S ", time.localtime(ent.v1_mtime())
)
timestr = encoding.strtolocal(timestr)
- if ent[1] & 0o20000:
+ if ent.mode & 0o20000:
mode = b'lnk'
else:
- mode = b'%3o' % (ent[1] & 0o777 & ~util.umask)
- ui.write(b"%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
+ mode = b'%3o' % (ent.v1_mode() & 0o777 & ~util.umask)
+ ui.write(
+ b"%c %s %10d %s%s\n"
+ % (ent.v1_state(), mode, ent.v1_size(), timestr, file_)
+ )
for f in repo.dirstate.copies():
ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
@command(
+ b'debugdirstateignorepatternshash',
+ [],
+ _(b''),
+)
+def debugdirstateignorepatternshash(ui, repo, **opts):
+ """show the hash of ignore patterns stored in dirstate if v2,
+ or nothing for dirstate-v2
+ """
+ if repo.dirstate._use_dirstate_v2:
+ docket = repo.dirstate._map.docket
+ hash_len = 20 # 160 bits for SHA-1
+ hash_bytes = docket.tree_metadata[-hash_len:]
+ ui.write(binascii.hexlify(hash_bytes) + b'\n')
+
+
+@command(
b'debugdiscovery',
[
(b'', b'old', None, _(b'use old-style discovery')),
@@ -1667,7 +1701,7 @@ def debugindexdot(ui, repo, file_=None,
node = r.node(i)
pp = r.parents(node)
ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
- if pp[1] != nullid:
+ if pp[1] != repo.nullid:
ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
ui.write(b"}\n")
@@ -1675,7 +1709,7 @@ def debugindexdot(ui, repo, file_=None,
@command(b'debugindexstats', [])
def debugindexstats(ui, repo):
"""show stats related to the changelog index"""
- repo.changelog.shortest(nullid, 1)
+ repo.changelog.shortest(repo.nullid, 1)
index = repo.changelog.index
if not util.safehasattr(index, b'stats'):
raise error.Abort(_(b'debugindexstats only works with native code'))
@@ -2425,7 +2459,7 @@ def debugobsolete(ui, repo, precursor=No
# arbitrary node identifiers, possibly not present in the
# local repository.
n = bin(s)
- if len(n) != len(nullid):
+ if len(n) != repo.nodeconstants.nodelen:
raise TypeError()
return n
except TypeError:
@@ -2603,7 +2637,7 @@ def debugpathcomplete(ui, repo, *specs,
files, dirs = set(), set()
adddir, addfile = dirs.add, files.add
for f, st in pycompat.iteritems(dirstate):
- if f.startswith(spec) and st[0] in acceptable:
+ if f.startswith(spec) and st.state in acceptable:
if fixpaths:
f = f.replace(b'/', pycompat.ossep)
if fullpaths:
@@ -2749,9 +2783,9 @@ def debugpickmergetool(ui, repo, *pats,
changedelete = opts[b'changedelete']
for path in ctx.walk(m):
fctx = ctx[path]
- try:
- if not ui.debugflag:
- ui.pushbuffer(error=True)
+ with ui.silent(
+ error=True
+ ) if not ui.debugflag else util.nullcontextmanager():
tool, toolpath = filemerge._picktool(
repo,
ui,
@@ -2760,9 +2794,6 @@ def debugpickmergetool(ui, repo, *pats,
b'l' in fctx.flags(),
changedelete,
)
- finally:
- if not ui.debugflag:
- ui.popbuffer()
ui.write(b'%s = %s\n' % (path, tool))
@@ -2973,8 +3004,8 @@ def debugrevlog(ui, repo, file_=None, **
)
return 0
- v = r.version
- format = v & 0xFFFF
+ format = r._format_version
+ v = r._format_flags
flags = []
gdelta = False
if v & revlog.FLAG_INLINE_DATA:
@@ -3328,7 +3359,7 @@ def debugrevlogindex(ui, repo, file_=Non
try:
pp = r.parents(node)
except Exception:
- pp = [nullid, nullid]
+ pp = [repo.nullid, repo.nullid]
if ui.verbose:
ui.write(
b"% 6d % 9d % 7d % 7d %s %s %s\n"
@@ -3742,7 +3773,9 @@ def debugbackupbundle(ui, repo, *pats, *
for n in chlist:
if limit is not None and count >= limit:
break
- parents = [True for p in other.changelog.parents(n) if p != nullid]
+ parents = [
+ True for p in other.changelog.parents(n) if p != repo.nullid
+ ]
if opts.get(b"no_merges") and len(parents) == 2:
continue
count += 1
@@ -3787,16 +3820,13 @@ def debugbackupbundle(ui, repo, *pats, *
if revs:
revs = [other.lookup(rev) for rev in revs]
- quiet = ui.quiet
- try:
- ui.quiet = True
- other, chlist, cleanupfn = bundlerepo.getremotechanges(
- ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
- )
- except error.LookupError:
- continue
- finally:
- ui.quiet = quiet
+ with ui.silent():
+ try:
+ other, chlist, cleanupfn = bundlerepo.getremotechanges(
+ ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
+ )
+ except error.LookupError:
+ continue
try:
if not chlist:
@@ -4046,7 +4076,7 @@ def debuguiprompt(ui, prompt=b''):
def debugupdatecaches(ui, repo, *pats, **opts):
"""warm all known caches in the repository"""
with repo.wlock(), repo.lock():
- repo.updatecaches(full=True)
+ repo.updatecaches(caches=repository.CACHES_ALL)
@command(
@@ -4573,17 +4603,16 @@ def debugwireproto(ui, repo, path=None,
ui.write(_(b'creating http peer for wire protocol version 2\n'))
# We go through makepeer() because we need an API descriptor for
# the peer instance to be useful.
- with ui.configoverride(
+ maybe_silent = (
+ ui.silent()
+ if opts[b'nologhandshake']
+ else util.nullcontextmanager()
+ )
+ with maybe_silent, ui.configoverride(
{(b'experimental', b'httppeer.advertise-v2'): True}
):
- if opts[b'nologhandshake']:
- ui.pushbuffer()
-
peer = httppeer.makepeer(ui, path, opener=opener)
- if opts[b'nologhandshake']:
- ui.popbuffer()
-
if not isinstance(peer, httppeer.httpv2peer):
raise error.Abort(
_(
diff --git a/mercurial/dirstate.py b/mercurial/dirstate.py
--- a/mercurial/dirstate.py
+++ b/mercurial/dirstate.py
@@ -14,12 +14,12 @@ import os
import stat
from .i18n import _
-from .node import nullid
from .pycompat import delattr
from hgdemandimport import tracing
from . import (
+ dirstatemap,
encoding,
error,
match as matchmod,
@@ -28,7 +28,6 @@ from . import (
pycompat,
scmutil,
sparse,
- txnutil,
util,
)
@@ -40,11 +39,13 @@ from .interfaces import (
parsers = policy.importmod('parsers')
rustmod = policy.importrust('dirstate')
+SUPPORTS_DIRSTATE_V2 = rustmod is not None
+
propertycache = util.propertycache
filecache = scmutil.filecache
-_rangemask = 0x7FFFFFFF
+_rangemask = dirstatemap.rangemask
-dirstatetuple = parsers.dirstatetuple
+DirstateItem = parsers.DirstateItem
class repocache(filecache):
@@ -71,10 +72,39 @@ def _getfsnow(vfs):
vfs.unlink(tmpname)
+def requires_parents_change(func):
+ def wrap(self, *args, **kwargs):
+ if not self.pendingparentchange():
+ msg = 'calling `%s` outside of a parentchange context'
+ msg %= func.__name__
+ raise error.ProgrammingError(msg)
+ return func(self, *args, **kwargs)
+
+ return wrap
+
+
+def requires_no_parents_change(func):
+ def wrap(self, *args, **kwargs):
+ if self.pendingparentchange():
+ msg = 'calling `%s` inside of a parentchange context'
+ msg %= func.__name__
+ raise error.ProgrammingError(msg)
+ return func(self, *args, **kwargs)
+
+ return wrap
+
+
@interfaceutil.implementer(intdirstate.idirstate)
class dirstate(object):
def __init__(
- self, opener, ui, root, validate, sparsematchfn, nodeconstants
+ self,
+ opener,
+ ui,
+ root,
+ validate,
+ sparsematchfn,
+ nodeconstants,
+ use_dirstate_v2,
):
"""Create a new dirstate object.
@@ -82,6 +112,7 @@ class dirstate(object):
dirstate file; root is the root of the directory tracked by
the dirstate.
"""
+ self._use_dirstate_v2 = use_dirstate_v2
self._nodeconstants = nodeconstants
self._opener = opener
self._validate = validate
@@ -100,7 +131,7 @@ class dirstate(object):
self._plchangecallbacks = {}
self._origpl = None
self._updatedfiles = set()
- self._mapcls = dirstatemap
+ self._mapcls = dirstatemap.dirstatemap
# Access and cache cwd early, so we don't access it for the first time
# after a working-copy update caused it to not exist (accessing it then
# raises an exception).
@@ -140,7 +171,11 @@ class dirstate(object):
def _map(self):
"""Return the dirstate contents (see documentation for dirstatemap)."""
self._map = self._mapcls(
- self._ui, self._opener, self._root, self._nodeconstants
+ self._ui,
+ self._opener,
+ self._root,
+ self._nodeconstants,
+ self._use_dirstate_v2,
)
return self._map
@@ -288,8 +323,15 @@ class dirstate(object):
r marked for removal
a marked for addition
? not tracked
+
+ XXX The "state" is a bit obscure to be in the "public" API. we should
+ consider migrating all user of this to going through the dirstate entry
+ instead.
"""
- return self._map.get(key, (b"?",))[0]
+ entry = self._map.get(key)
+ if entry is not None:
+ return entry.state
+ return b'?'
def __contains__(self, key):
return key in self._map
@@ -302,6 +344,9 @@ class dirstate(object):
iteritems = items
+ def directories(self):
+ return self._map.directories()
+
def parents(self):
return [self._validate(p) for p in self._pl]
@@ -311,18 +356,25 @@ class dirstate(object):
def p2(self):
return self._validate(self._pl[1])
+ @property
+ def in_merge(self):
+ """True if a merge is in progress"""
+ return self._pl[1] != self._nodeconstants.nullid
+
def branch(self):
return encoding.tolocal(self._branch)
- def setparents(self, p1, p2=nullid):
+ def setparents(self, p1, p2=None):
"""Set dirstate parents to p1 and p2.
- When moving from two parents to one, 'm' merged entries a
+ When moving from two parents to one, "merged" entries a
adjusted to normal and previous copy records discarded and
returned by the call.
See localrepo.setparents()
"""
+ if p2 is None:
+ p2 = self._nodeconstants.nullid
if self._parentwriters == 0:
raise ValueError(
b"cannot set dirstate parent outside of "
@@ -335,27 +387,29 @@ class dirstate(object):
self._origpl = self._pl
self._map.setparents(p1, p2)
copies = {}
- if oldp2 != nullid and p2 == nullid:
- candidatefiles = self._map.nonnormalset.union(
- self._map.otherparentset
- )
+ if (
+ oldp2 != self._nodeconstants.nullid
+ and p2 == self._nodeconstants.nullid
+ ):
+ candidatefiles = self._map.non_normal_or_other_parent_paths()
+
for f in candidatefiles:
s = self._map.get(f)
if s is None:
continue
- # Discard 'm' markers when moving away from a merge state
- if s[0] == b'm':
+ # Discard "merged" markers when moving away from a merge state
+ if s.merged:
source = self._map.copymap.get(f)
if source:
copies[f] = source
- self.normallookup(f)
+ self._normallookup(f)
# Also fix up otherparent markers
- elif s[0] == b'n' and s[2] == -2:
+ elif s.from_p2:
source = self._map.copymap.get(f)
if source:
copies[f] = source
- self.add(f)
+ self._add(f)
return copies
def setbranch(self, branch):
@@ -408,27 +462,246 @@ class dirstate(object):
def copies(self):
return self._map.copymap
- def _addpath(self, f, state, mode, size, mtime):
- oldstate = self[f]
- if state == b'a' or oldstate == b'r':
+ @requires_no_parents_change
+ def set_tracked(self, filename):
+ """a "public" method for generic code to mark a file as tracked
+
+ This function is to be called outside of "update/merge" case. For
+ example by a command like `hg add X`.
+
+ return True the file was previously untracked, False otherwise.
+ """
+ entry = self._map.get(filename)
+ if entry is None:
+ self._add(filename)
+ return True
+ elif not entry.tracked:
+ self._normallookup(filename)
+ return True
+ # XXX This is probably overkill for more case, but we need this to
+ # fully replace the `normallookup` call with `set_tracked` one.
+ # Consider smoothing this in the future.
+ self.set_possibly_dirty(filename)
+ return False
+
+ @requires_no_parents_change
+ def set_untracked(self, filename):
+ """a "public" method for generic code to mark a file as untracked
+
+ This function is to be called outside of "update/merge" case. For
+ example by a command like `hg remove X`.
+
+ return True the file was previously tracked, False otherwise.
+ """
+ entry = self._map.get(filename)
+ if entry is None:
+ return False
+ elif entry.added:
+ self._drop(filename)
+ return True
+ else:
+ self._remove(filename)
+ return True
+
+ @requires_no_parents_change
+ def set_clean(self, filename, parentfiledata=None):
+ """record that the current state of the file on disk is known to be clean"""
+ self._dirty = True
+ self._updatedfiles.add(filename)
+ self._normal(filename, parentfiledata=parentfiledata)
+
+ @requires_no_parents_change
+ def set_possibly_dirty(self, filename):
+ """record that the current state of the file on disk is unknown"""
+ self._dirty = True
+ self._updatedfiles.add(filename)
+ self._map.set_possibly_dirty(filename)
+
+ @requires_parents_change
+ def update_file_p1(
+ self,
+ filename,
+ p1_tracked,
+ ):
+ """Set a file as tracked in the parent (or not)
+
+ This is to be called when adjust the dirstate to a new parent after an history
+ rewriting operation.
+
+ It should not be called during a merge (p2 != nullid) and only within
+ a `with dirstate.parentchange():` context.
+ """
+ if self.in_merge:
+ msg = b'update_file_reference should not be called when merging'
+ raise error.ProgrammingError(msg)
+ entry = self._map.get(filename)
+ if entry is None:
+ wc_tracked = False
+ else:
+ wc_tracked = entry.tracked
+ possibly_dirty = False
+ if p1_tracked and wc_tracked:
+ # the underlying reference might have changed, we will have to
+ # check it.
+ possibly_dirty = True
+ elif not (p1_tracked or wc_tracked):
+ # the file is no longer relevant to anyone
+ self._drop(filename)
+ elif (not p1_tracked) and wc_tracked:
+ if entry is not None and entry.added:
+ return # avoid dropping copy information (maybe?)
+ elif p1_tracked and not wc_tracked:
+ pass
+ else:
+ assert False, 'unreachable'
+
+ # this mean we are doing call for file we do not really care about the
+ # data (eg: added or removed), however this should be a minor overhead
+ # compared to the overall update process calling this.
+ parentfiledata = None
+ if wc_tracked:
+ parentfiledata = self._get_filedata(filename)
+
+ self._updatedfiles.add(filename)
+ self._map.reset_state(
+ filename,
+ wc_tracked,
+ p1_tracked,
+ possibly_dirty=possibly_dirty,
+ parentfiledata=parentfiledata,
+ )
+ if (
+ parentfiledata is not None
+ and parentfiledata[2] > self._lastnormaltime
+ ):
+ # Remember the most recent modification timeslot for status(),
+ # to make sure we won't miss future size-preserving file content
+ # modifications that happen within the same timeslot.
+ self._lastnormaltime = parentfiledata[2]
+
+ @requires_parents_change
+ def update_file(
+ self,
+ filename,
+ wc_tracked,
+ p1_tracked,
+ p2_tracked=False,
+ merged=False,
+ clean_p1=False,
+ clean_p2=False,
+ possibly_dirty=False,
+ parentfiledata=None,
+ ):
+ """update the information about a file in the dirstate
+
+ This is to be called when the direstates parent changes to keep track
+ of what is the file situation in regards to the working copy and its parent.
+
+ This function must be called within a `dirstate.parentchange` context.
+
+ note: the API is at an early stage and we might need to ajust it
+ depending of what information ends up being relevant and useful to
+ other processing.
+ """
+ if merged and (clean_p1 or clean_p2):
+ msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
+ raise error.ProgrammingError(msg)
+
+ # note: I do not think we need to double check name clash here since we
+ # are in a update/merge case that should already have taken care of
+ # this. The test agrees
+
+ self._dirty = True
+ self._updatedfiles.add(filename)
+
+ need_parent_file_data = (
+ not (possibly_dirty or clean_p2 or merged)
+ and wc_tracked
+ and p1_tracked
+ )
+
+ # this mean we are doing call for file we do not really care about the
+ # data (eg: added or removed), however this should be a minor overhead
+ # compared to the overall update process calling this.
+ if need_parent_file_data:
+ if parentfiledata is None:
+ parentfiledata = self._get_filedata(filename)
+ mtime = parentfiledata[2]
+
+ if mtime > self._lastnormaltime:
+ # Remember the most recent modification timeslot for
+ # status(), to make sure we won't miss future
+ # size-preserving file content modifications that happen
+ # within the same timeslot.
+ self._lastnormaltime = mtime
+
+ self._map.reset_state(
+ filename,
+ wc_tracked,
+ p1_tracked,
+ p2_tracked=p2_tracked,
+ merged=merged,
+ clean_p1=clean_p1,
+ clean_p2=clean_p2,
+ possibly_dirty=possibly_dirty,
+ parentfiledata=parentfiledata,
+ )
+ if (
+ parentfiledata is not None
+ and parentfiledata[2] > self._lastnormaltime
+ ):
+ # Remember the most recent modification timeslot for status(),
+ # to make sure we won't miss future size-preserving file content
+ # modifications that happen within the same timeslot.
+ self._lastnormaltime = parentfiledata[2]
+
+ def _addpath(
+ self,
+ f,
+ mode=0,
+ size=None,
+ mtime=None,
+ added=False,
+ merged=False,
+ from_p2=False,
+ possibly_dirty=False,
+ ):
+ entry = self._map.get(f)
+ if added or entry is not None and entry.removed:
scmutil.checkfilename(f)
if self._map.hastrackeddir(f):
- raise error.Abort(
- _(b'directory %r already in dirstate') % pycompat.bytestr(f)
- )
+ msg = _(b'directory %r already in dirstate')
+ msg %= pycompat.bytestr(f)
+ raise error.Abort(msg)
# shadows
for d in pathutil.finddirs(f):
if self._map.hastrackeddir(d):
break
entry = self._map.get(d)
- if entry is not None and entry[0] != b'r':
- raise error.Abort(
- _(b'file %r in dirstate clashes with %r')
- % (pycompat.bytestr(d), pycompat.bytestr(f))
- )
+ if entry is not None and not entry.removed:
+ msg = _(b'file %r in dirstate clashes with %r')
+ msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
+ raise error.Abort(msg)
self._dirty = True
self._updatedfiles.add(f)
- self._map.addfile(f, oldstate, state, mode, size, mtime)
+ self._map.addfile(
+ f,
+ mode=mode,
+ size=size,
+ mtime=mtime,
+ added=added,
+ merged=merged,
+ from_p2=from_p2,
+ possibly_dirty=possibly_dirty,
+ )
+
+ def _get_filedata(self, filename):
+ """returns"""
+ s = os.lstat(self._join(filename))
+ mode = s.st_mode
+ size = s.st_size
+ mtime = s[stat.ST_MTIME]
+ return (mode, size, mtime)
def normal(self, f, parentfiledata=None):
"""Mark a file normal and clean.
@@ -440,14 +713,28 @@ class dirstate(object):
determined the file was clean, to limit the risk of the
file having been changed by an external process between the
moment where the file was determined to be clean and now."""
+ if self.pendingparentchange():
+ util.nouideprecwarn(
+ b"do not use `normal` inside of update/merge context."
+ b" Use `update_file` or `update_file_p1`",
+ b'6.0',
+ stacklevel=2,
+ )
+ else:
+ util.nouideprecwarn(
+ b"do not use `normal` outside of update/merge context."
+ b" Use `set_tracked`",
+ b'6.0',
+ stacklevel=2,
+ )
+ self._normal(f, parentfiledata=parentfiledata)
+
+ def _normal(self, f, parentfiledata=None):
if parentfiledata:
(mode, size, mtime) = parentfiledata
else:
- s = os.lstat(self._join(f))
- mode = s.st_mode
- size = s.st_size
- mtime = s[stat.ST_MTIME]
- self._addpath(f, b'n', mode, size & _rangemask, mtime & _rangemask)
+ (mode, size, mtime) = self._get_filedata(f)
+ self._addpath(f, mode=mode, size=size, mtime=mtime)
self._map.copymap.pop(f, None)
if f in self._map.nonnormalset:
self._map.nonnormalset.remove(f)
@@ -459,77 +746,171 @@ class dirstate(object):
def normallookup(self, f):
'''Mark a file normal, but possibly dirty.'''
- if self._pl[1] != nullid:
+ if self.pendingparentchange():
+ util.nouideprecwarn(
+ b"do not use `normallookup` inside of update/merge context."
+ b" Use `update_file` or `update_file_p1`",
+ b'6.0',
+ stacklevel=2,
+ )
+ else:
+ util.nouideprecwarn(
+ b"do not use `normallookup` outside of update/merge context."
+ b" Use `set_possibly_dirty` or `set_tracked`",
+ b'6.0',
+ stacklevel=2,
+ )
+ self._normallookup(f)
+
+ def _normallookup(self, f):
+ '''Mark a file normal, but possibly dirty.'''
+ if self.in_merge:
# if there is a merge going on and the file was either
- # in state 'm' (-1) or coming from other parent (-2) before
+ # "merged" or coming from other parent (-2) before
# being removed, restore that state.
entry = self._map.get(f)
if entry is not None:
- if entry[0] == b'r' and entry[2] in (-1, -2):
+ # XXX this should probably be dealt with a a lower level
+ # (see `merged_removed` and `from_p2_removed`)
+ if entry.merged_removed or entry.from_p2_removed:
source = self._map.copymap.get(f)
- if entry[2] == -1:
- self.merge(f)
- elif entry[2] == -2:
- self.otherparent(f)
- if source:
+ if entry.merged_removed:
+ self._merge(f)
+ elif entry.from_p2_removed:
+ self._otherparent(f)
+ if source is not None:
self.copy(source, f)
return
- if entry[0] == b'm' or entry[0] == b'n' and entry[2] == -2:
+ elif entry.merged or entry.from_p2:
return
- self._addpath(f, b'n', 0, -1, -1)
+ self._addpath(f, possibly_dirty=True)
self._map.copymap.pop(f, None)
def otherparent(self, f):
'''Mark as coming from the other parent, always dirty.'''
- if self._pl[1] == nullid:
- raise error.Abort(
- _(b"setting %r to other parent only allowed in merges") % f
+ if self.pendingparentchange():
+ util.nouideprecwarn(
+ b"do not use `otherparent` inside of update/merge context."
+ b" Use `update_file` or `update_file_p1`",
+ b'6.0',
+ stacklevel=2,
)
- if f in self and self[f] == b'n':
+ else:
+ util.nouideprecwarn(
+ b"do not use `otherparent` outside of update/merge context."
+ b"It should have been set by the update/merge code",
+ b'6.0',
+ stacklevel=2,
+ )
+ self._otherparent(f)
+
+ def _otherparent(self, f):
+ if not self.in_merge:
+ msg = _(b"setting %r to other parent only allowed in merges") % f
+ raise error.Abort(msg)
+ entry = self._map.get(f)
+ if entry is not None and entry.tracked:
# merge-like
- self._addpath(f, b'm', 0, -2, -1)
+ self._addpath(f, merged=True)
else:
# add-like
- self._addpath(f, b'n', 0, -2, -1)
+ self._addpath(f, from_p2=True)
self._map.copymap.pop(f, None)
def add(self, f):
'''Mark a file added.'''
- self._addpath(f, b'a', 0, -1, -1)
- self._map.copymap.pop(f, None)
+ if self.pendingparentchange():
+ util.nouideprecwarn(
+ b"do not use `add` inside of update/merge context."
+ b" Use `update_file`",
+ b'6.0',
+ stacklevel=2,
+ )
+ else:
+ util.nouideprecwarn(
+ b"do not use `remove` outside of update/merge context."
+ b" Use `set_tracked`",
+ b'6.0',
+ stacklevel=2,
+ )
+ self._add(f)
+
+ def _add(self, filename):
+ """internal function to mark a file as added"""
+ self._addpath(filename, added=True)
+ self._map.copymap.pop(filename, None)
def remove(self, f):
- '''Mark a file removed.'''
+ '''Mark a file removed'''
+ if self.pendingparentchange():
+ util.nouideprecwarn(
+ b"do not use `remove` insde of update/merge context."
+ b" Use `update_file` or `update_file_p1`",
+ b'6.0',
+ stacklevel=2,
+ )
+ else:
+ util.nouideprecwarn(
+ b"do not use `remove` outside of update/merge context."
+ b" Use `set_untracked`",
+ b'6.0',
+ stacklevel=2,
+ )
+ self._remove(f)
+
+ def _remove(self, filename):
+ """internal function to mark a file removed"""
self._dirty = True
- oldstate = self[f]
- size = 0
- if self._pl[1] != nullid:
- entry = self._map.get(f)
- if entry is not None:
- # backup the previous state
- if entry[0] == b'm': # merge
- size = -1
- elif entry[0] == b'n' and entry[2] == -2: # other parent
- size = -2
- self._map.otherparentset.add(f)
- self._updatedfiles.add(f)
- self._map.removefile(f, oldstate, size)
- if size == 0:
- self._map.copymap.pop(f, None)
+ self._updatedfiles.add(filename)
+ self._map.removefile(filename, in_merge=self.in_merge)
def merge(self, f):
'''Mark a file merged.'''
- if self._pl[1] == nullid:
- return self.normallookup(f)
- return self.otherparent(f)
+ if self.pendingparentchange():
+ util.nouideprecwarn(
+ b"do not use `merge` inside of update/merge context."
+ b" Use `update_file`",
+ b'6.0',
+ stacklevel=2,
+ )
+ else:
+ util.nouideprecwarn(
+ b"do not use `merge` outside of update/merge context."
+ b"It should have been set by the update/merge code",
+ b'6.0',
+ stacklevel=2,
+ )
+ self._merge(f)
+
+ def _merge(self, f):
+ if not self.in_merge:
+ return self._normallookup(f)
+ return self._otherparent(f)
def drop(self, f):
'''Drop a file from the dirstate'''
- oldstate = self[f]
- if self._map.dropfile(f, oldstate):
+ if self.pendingparentchange():
+ util.nouideprecwarn(
+ b"do not use `drop` inside of update/merge context."
+ b" Use `update_file`",
+ b'6.0',
+ stacklevel=2,
+ )
+ else:
+ util.nouideprecwarn(
+ b"do not use `drop` outside of update/merge context."
+ b" Use `set_untracked`",
+ b'6.0',
+ stacklevel=2,
+ )
+ self._drop(f)
+
+ def _drop(self, filename):
+ """internal function to drop a file from the dirstate"""
+ if self._map.dropfile(filename):
self._dirty = True
- self._updatedfiles.add(f)
- self._map.copymap.pop(f, None)
+ self._updatedfiles.add(filename)
+ self._map.copymap.pop(filename, None)
def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
if exists is None:
@@ -638,12 +1019,12 @@ class dirstate(object):
if self._origpl is None:
self._origpl = self._pl
- self._map.setparents(parent, nullid)
+ self._map.setparents(parent, self._nodeconstants.nullid)
for f in to_lookup:
- self.normallookup(f)
+ self._normallookup(f)
for f in to_drop:
- self.drop(f)
+ self._drop(f)
self._dirty = True
@@ -679,13 +1060,13 @@ class dirstate(object):
tr.addfilegenerator(
b'dirstate',
(self._filename,),
- self._writedirstate,
+ lambda f: self._writedirstate(tr, f),
location=b'plain',
)
return
st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
- self._writedirstate(st)
+ self._writedirstate(tr, st)
def addparentchangecallback(self, category, callback):
"""add a callback to be called when the wd parents are changed
@@ -698,7 +1079,7 @@ class dirstate(object):
"""
self._plchangecallbacks[category] = callback
- def _writedirstate(self, st):
+ def _writedirstate(self, tr, st):
# notify callbacks about parents change
if self._origpl is not None and self._origpl != self._pl:
for c, callback in sorted(
@@ -716,7 +1097,7 @@ class dirstate(object):
if delaywrite > 0:
# do we have any files to delay for?
for f, e in pycompat.iteritems(self._map):
- if e[0] == b'n' and e[3] == now:
+ if e.need_delay(now):
import time # to avoid useless import
# rather than sleep n seconds, sleep until the next
@@ -728,7 +1109,7 @@ class dirstate(object):
now = end # trust our estimate that the end is near now
break
- self._map.write(st, now)
+ self._map.write(tr, st, now)
self._lastnormaltime = 0
self._dirty = False
@@ -1120,6 +1501,7 @@ class dirstate(object):
warnings,
bad,
traversed,
+ dirty,
) = rustmod.status(
self._map._rustmap,
matcher,
@@ -1133,6 +1515,8 @@ class dirstate(object):
bool(matcher.traversedir),
)
+ self._dirty |= dirty
+
if matcher.traversedir:
for dir in traversed:
matcher.traversedir(dir)
@@ -1267,21 +1651,26 @@ class dirstate(object):
# general. That is much slower than simply accessing and storing the
# tuple members one by one.
t = dget(fn)
- state = t[0]
- mode = t[1]
- size = t[2]
- time = t[3]
+ mode = t.mode
+ size = t.size
+ time = t.mtime
- if not st and state in b"nma":
+ if not st and t.tracked:
dadd(fn)
- elif state == b'n':
+ elif t.merged:
+ madd(fn)
+ elif t.added:
+ aadd(fn)
+ elif t.removed:
+ radd(fn)
+ elif t.tracked:
if (
size >= 0
and (
(size != st.st_size and size != st.st_size & _rangemask)
or ((mode ^ st.st_mode) & 0o100 and checkexec)
)
- or size == -2 # other parent
+ or t.from_p2
or fn in copymap
):
if stat.S_ISLNK(st.st_mode) and size != st.st_size:
@@ -1303,12 +1692,6 @@ class dirstate(object):
ladd(fn)
elif listclean:
cadd(fn)
- elif state == b'm':
- madd(fn)
- elif state == b'a':
- aadd(fn)
- elif state == b'r':
- radd(fn)
status = scmutil.status(
modified, added, removed, deleted, unknown, ignored, clean
)
@@ -1351,7 +1734,8 @@ class dirstate(object):
# output file will be used to create backup of dirstate at this point.
if self._dirty or not self._opener.exists(filename):
self._writedirstate(
- self._opener(filename, b"w", atomictemp=True, checkambig=True)
+ tr,
+ self._opener(filename, b"w", atomictemp=True, checkambig=True),
)
if tr:
@@ -1361,7 +1745,7 @@ class dirstate(object):
tr.addfilegenerator(
b'dirstate',
(self._filename,),
- self._writedirstate,
+ lambda f: self._writedirstate(tr, f),
location=b'plain',
)
@@ -1394,546 +1778,3 @@ class dirstate(object):
def clearbackup(self, tr, backupname):
'''Clear backup file'''
self._opener.unlink(backupname)
-
-
-class dirstatemap(object):
- """Map encapsulating the dirstate's contents.
-
- The dirstate contains the following state:
-
- - `identity` is the identity of the dirstate file, which can be used to
- detect when changes have occurred to the dirstate file.
-
- - `parents` is a pair containing the parents of the working copy. The
- parents are updated by calling `setparents`.
-
- - the state map maps filenames to tuples of (state, mode, size, mtime),
- where state is a single character representing 'normal', 'added',
- 'removed', or 'merged'. It is read by treating the dirstate as a
- dict. File state is updated by calling the `addfile`, `removefile` and
- `dropfile` methods.
-
- - `copymap` maps destination filenames to their source filename.
-
- The dirstate also provides the following views onto the state:
-
- - `nonnormalset` is a set of the filenames that have state other
- than 'normal', or are normal but have an mtime of -1 ('normallookup').
-
- - `otherparentset` is a set of the filenames that are marked as coming
- from the second parent when the dirstate is currently being merged.
-
- - `filefoldmap` is a dict mapping normalized filenames to the denormalized
- form that they appear as in the dirstate.
-
- - `dirfoldmap` is a dict mapping normalized directory names to the
- denormalized form that they appear as in the dirstate.
- """
-
- def __init__(self, ui, opener, root, nodeconstants):
- self._ui = ui
- self._opener = opener
- self._root = root
- self._filename = b'dirstate'
- self._nodelen = 20
- self._nodeconstants = nodeconstants
-
- self._parents = None
- self._dirtyparents = False
-
- # for consistent view between _pl() and _read() invocations
- self._pendingmode = None
-
- @propertycache
- def _map(self):
- self._map = {}
- self.read()
- return self._map
-
- @propertycache
- def copymap(self):
- self.copymap = {}
- self._map
- return self.copymap
-
- def clear(self):
- self._map.clear()
- self.copymap.clear()
- self.setparents(nullid, nullid)
- util.clearcachedproperty(self, b"_dirs")
- util.clearcachedproperty(self, b"_alldirs")
- util.clearcachedproperty(self, b"filefoldmap")
- util.clearcachedproperty(self, b"dirfoldmap")
- util.clearcachedproperty(self, b"nonnormalset")
- util.clearcachedproperty(self, b"otherparentset")
-
- def items(self):
- return pycompat.iteritems(self._map)
-
- # forward for python2,3 compat
- iteritems = items
-
- def __len__(self):
- return len(self._map)
-
- def __iter__(self):
- return iter(self._map)
-
- def get(self, key, default=None):
- return self._map.get(key, default)
-
- def __contains__(self, key):
- return key in self._map
-
- def __getitem__(self, key):
- return self._map[key]
-
- def keys(self):
- return self._map.keys()
-
- def preload(self):
- """Loads the underlying data, if it's not already loaded"""
- self._map
-
- def addfile(self, f, oldstate, state, mode, size, mtime):
- """Add a tracked file to the dirstate."""
- if oldstate in b"?r" and "_dirs" in self.__dict__:
- self._dirs.addpath(f)
- if oldstate == b"?" and "_alldirs" in self.__dict__:
- self._alldirs.addpath(f)
- self._map[f] = dirstatetuple(state, mode, size, mtime)
- if state != b'n' or mtime == -1:
- self.nonnormalset.add(f)
- if size == -2:
- self.otherparentset.add(f)
-
- def removefile(self, f, oldstate, size):
- """
- Mark a file as removed in the dirstate.
-
- The `size` parameter is used to store sentinel values that indicate
- the file's previous state. In the future, we should refactor this
- to be more explicit about what that state is.
- """
- if oldstate not in b"?r" and "_dirs" in self.__dict__:
- self._dirs.delpath(f)
- if oldstate == b"?" and "_alldirs" in self.__dict__:
- self._alldirs.addpath(f)
- if "filefoldmap" in self.__dict__:
- normed = util.normcase(f)
- self.filefoldmap.pop(normed, None)
- self._map[f] = dirstatetuple(b'r', 0, size, 0)
- self.nonnormalset.add(f)
-
- def dropfile(self, f, oldstate):
- """
- Remove a file from the dirstate. Returns True if the file was
- previously recorded.
- """
- exists = self._map.pop(f, None) is not None
- if exists:
- if oldstate != b"r" and "_dirs" in self.__dict__:
- self._dirs.delpath(f)
- if "_alldirs" in self.__dict__:
- self._alldirs.delpath(f)
- if "filefoldmap" in self.__dict__:
- normed = util.normcase(f)
- self.filefoldmap.pop(normed, None)
- self.nonnormalset.discard(f)
- return exists
-
- def clearambiguoustimes(self, files, now):
- for f in files:
- e = self.get(f)
- if e is not None and e[0] == b'n' and e[3] == now:
- self._map[f] = dirstatetuple(e[0], e[1], e[2], -1)
- self.nonnormalset.add(f)
-
- def nonnormalentries(self):
- '''Compute the nonnormal dirstate entries from the dmap'''
- try:
- return parsers.nonnormalotherparententries(self._map)
- except AttributeError:
- nonnorm = set()
- otherparent = set()
- for fname, e in pycompat.iteritems(self._map):
- if e[0] != b'n' or e[3] == -1:
- nonnorm.add(fname)
- if e[0] == b'n' and e[2] == -2:
- otherparent.add(fname)
- return nonnorm, otherparent
-
- @propertycache
- def filefoldmap(self):
- """Returns a dictionary mapping normalized case paths to their
- non-normalized versions.
- """
- try:
- makefilefoldmap = parsers.make_file_foldmap
- except AttributeError:
- pass
- else:
- return makefilefoldmap(
- self._map, util.normcasespec, util.normcasefallback
- )
-
- f = {}
- normcase = util.normcase
- for name, s in pycompat.iteritems(self._map):
- if s[0] != b'r':
- f[normcase(name)] = name
- f[b'.'] = b'.' # prevents useless util.fspath() invocation
- return f
-
- def hastrackeddir(self, d):
- """
- Returns True if the dirstate contains a tracked (not removed) file
- in this directory.
- """
- return d in self._dirs
-
- def hasdir(self, d):
- """
- Returns True if the dirstate contains a file (tracked or removed)
- in this directory.
- """
- return d in self._alldirs
-
- @propertycache
- def _dirs(self):
- return pathutil.dirs(self._map, b'r')
-
- @propertycache
- def _alldirs(self):
- return pathutil.dirs(self._map)
-
- def _opendirstatefile(self):
- fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
- if self._pendingmode is not None and self._pendingmode != mode:
- fp.close()
- raise error.Abort(
- _(b'working directory state may be changed parallelly')
- )
- self._pendingmode = mode
- return fp
-
- def parents(self):
- if not self._parents:
- try:
- fp = self._opendirstatefile()
- st = fp.read(2 * self._nodelen)
- fp.close()
- except IOError as err:
- if err.errno != errno.ENOENT:
- raise
- # File doesn't exist, so the current state is empty
- st = b''
-
- l = len(st)
- if l == self._nodelen * 2:
- self._parents = (
- st[: self._nodelen],
- st[self._nodelen : 2 * self._nodelen],
- )
- elif l == 0:
- self._parents = (nullid, nullid)
- else:
- raise error.Abort(
- _(b'working directory state appears damaged!')
- )
-
- return self._parents
-
- def setparents(self, p1, p2):
- self._parents = (p1, p2)
- self._dirtyparents = True
-
- def read(self):
- # ignore HG_PENDING because identity is used only for writing
- self.identity = util.filestat.frompath(
- self._opener.join(self._filename)
- )
-
- try:
- fp = self._opendirstatefile()
- try:
- st = fp.read()
- finally:
- fp.close()
- except IOError as err:
- if err.errno != errno.ENOENT:
- raise
- return
- if not st:
- return
-
- if util.safehasattr(parsers, b'dict_new_presized'):
- # Make an estimate of the number of files in the dirstate based on
- # its size. This trades wasting some memory for avoiding costly
- # resizes. Each entry have a prefix of 17 bytes followed by one or
- # two path names. Studies on various large-scale real-world repositories
- # found 54 bytes a reasonable upper limit for the average path names.
- # Copy entries are ignored for the sake of this estimate.
- self._map = parsers.dict_new_presized(len(st) // 71)
-
- # Python's garbage collector triggers a GC each time a certain number
- # of container objects (the number being defined by
- # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
- # for each file in the dirstate. The C version then immediately marks
- # them as not to be tracked by the collector. However, this has no
- # effect on when GCs are triggered, only on what objects the GC looks
- # into. This means that O(number of files) GCs are unavoidable.
- # Depending on when in the process's lifetime the dirstate is parsed,
- # this can get very expensive. As a workaround, disable GC while
- # parsing the dirstate.
- #
- # (we cannot decorate the function directly since it is in a C module)
- parse_dirstate = util.nogc(parsers.parse_dirstate)
- p = parse_dirstate(self._map, self.copymap, st)
- if not self._dirtyparents:
- self.setparents(*p)
-
- # Avoid excess attribute lookups by fast pathing certain checks
- self.__contains__ = self._map.__contains__
- self.__getitem__ = self._map.__getitem__
- self.get = self._map.get
-
- def write(self, st, now):
- st.write(
- parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
- )
- st.close()
- self._dirtyparents = False
- self.nonnormalset, self.otherparentset = self.nonnormalentries()
-
- @propertycache
- def nonnormalset(self):
- nonnorm, otherparents = self.nonnormalentries()
- self.otherparentset = otherparents
- return nonnorm
-
- @propertycache
- def otherparentset(self):
- nonnorm, otherparents = self.nonnormalentries()
- self.nonnormalset = nonnorm
- return otherparents
-
- @propertycache
- def identity(self):
- self._map
- return self.identity
-
- @propertycache
- def dirfoldmap(self):
- f = {}
- normcase = util.normcase
- for name in self._dirs:
- f[normcase(name)] = name
- return f
-
-
-if rustmod is not None:
-
- class dirstatemap(object):
- def __init__(self, ui, opener, root, nodeconstants):
- self._nodeconstants = nodeconstants
- self._ui = ui
- self._opener = opener
- self._root = root
- self._filename = b'dirstate'
- self._parents = None
- self._dirtyparents = False
-
- # for consistent view between _pl() and _read() invocations
- self._pendingmode = None
-
- def addfile(self, *args, **kwargs):
- return self._rustmap.addfile(*args, **kwargs)
-
- def removefile(self, *args, **kwargs):
- return self._rustmap.removefile(*args, **kwargs)
-
- def dropfile(self, *args, **kwargs):
- return self._rustmap.dropfile(*args, **kwargs)
-
- def clearambiguoustimes(self, *args, **kwargs):
- return self._rustmap.clearambiguoustimes(*args, **kwargs)
-
- def nonnormalentries(self):
- return self._rustmap.nonnormalentries()
-
- def get(self, *args, **kwargs):
- return self._rustmap.get(*args, **kwargs)
-
- @propertycache
- def _rustmap(self):
- """
- Fills the Dirstatemap when called.
- Use `self._inner_rustmap` if reading the dirstate is not necessary.
- """
- self._rustmap = self._inner_rustmap
- self.read()
- return self._rustmap
-
- @propertycache
- def _inner_rustmap(self):
- """
- Does not fill the Dirstatemap when called. This allows for
- optimizations where only setting/getting the parents is needed.
- """
- self._inner_rustmap = rustmod.DirstateMap(self._root)
- return self._inner_rustmap
-
- @property
- def copymap(self):
- return self._rustmap.copymap()
-
- def preload(self):
- self._rustmap
-
- def clear(self):
- self._rustmap.clear()
- self._inner_rustmap.clear()
- self.setparents(nullid, nullid)
- util.clearcachedproperty(self, b"_dirs")
- util.clearcachedproperty(self, b"_alldirs")
- util.clearcachedproperty(self, b"dirfoldmap")
-
- def items(self):
- return self._rustmap.items()
-
- def keys(self):
- return iter(self._rustmap)
-
- def __contains__(self, key):
- return key in self._rustmap
-
- def __getitem__(self, item):
- return self._rustmap[item]
-
- def __len__(self):
- return len(self._rustmap)
-
- def __iter__(self):
- return iter(self._rustmap)
-
- # forward for python2,3 compat
- iteritems = items
-
- def _opendirstatefile(self):
- fp, mode = txnutil.trypending(
- self._root, self._opener, self._filename
- )
- if self._pendingmode is not None and self._pendingmode != mode:
- fp.close()
- raise error.Abort(
- _(b'working directory state may be changed parallelly')
- )
- self._pendingmode = mode
- return fp
-
- def setparents(self, p1, p2):
- self._rustmap.setparents(p1, p2)
- self._parents = (p1, p2)
- self._dirtyparents = True
-
- def parents(self):
- if not self._parents:
- try:
- fp = self._opendirstatefile()
- st = fp.read(40)
- fp.close()
- except IOError as err:
- if err.errno != errno.ENOENT:
- raise
- # File doesn't exist, so the current state is empty
- st = b''
-
- try:
- self._parents = self._inner_rustmap.parents(st)
- except ValueError:
- raise error.Abort(
- _(b'working directory state appears damaged!')
- )
-
- return self._parents
-
- def read(self):
- # ignore HG_PENDING because identity is used only for writing
- self.identity = util.filestat.frompath(
- self._opener.join(self._filename)
- )
-
- try:
- fp = self._opendirstatefile()
- try:
- st = fp.read()
- finally:
- fp.close()
- except IOError as err:
- if err.errno != errno.ENOENT:
- raise
- return
- if not st:
- return
-
- parse_dirstate = util.nogc(self._rustmap.read)
- parents = parse_dirstate(st)
- if parents and not self._dirtyparents:
- self.setparents(*parents)
-
- self.__contains__ = self._rustmap.__contains__
- self.__getitem__ = self._rustmap.__getitem__
- self.get = self._rustmap.get
-
- def write(self, st, now):
- parents = self.parents()
- st.write(self._rustmap.write(parents[0], parents[1], now))
- st.close()
- self._dirtyparents = False
-
- @propertycache
- def filefoldmap(self):
- """Returns a dictionary mapping normalized case paths to their
- non-normalized versions.
- """
- return self._rustmap.filefoldmapasdict()
-
- def hastrackeddir(self, d):
- self._dirs # Trigger Python's propertycache
- return self._rustmap.hastrackeddir(d)
-
- def hasdir(self, d):
- self._dirs # Trigger Python's propertycache
- return self._rustmap.hasdir(d)
-
- @propertycache
- def _dirs(self):
- return self._rustmap.getdirs()
-
- @propertycache
- def _alldirs(self):
- return self._rustmap.getalldirs()
-
- @propertycache
- def identity(self):
- self._rustmap
- return self.identity
-
- @property
- def nonnormalset(self):
- nonnorm = self._rustmap.non_normal_entries()
- return nonnorm
-
- @propertycache
- def otherparentset(self):
- otherparents = self._rustmap.other_parent_entries()
- return otherparents
-
- @propertycache
- def dirfoldmap(self):
- f = {}
- normcase = util.normcase
- for name in self._dirs:
- f[normcase(name)] = name
- return f
diff --git a/mercurial/dirstate.py b/mercurial/dirstatemap.py
copy from mercurial/dirstate.py
copy to mercurial/dirstatemap.py
--- a/mercurial/dirstate.py
+++ b/mercurial/dirstatemap.py
@@ -1,1399 +1,45 @@
-# dirstate.py - working directory tracking for mercurial
-#
-# Copyright 2005-2007 Olivia Mackall
+# dirstatemap.py
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
-import collections
-import contextlib
import errno
-import os
-import stat
from .i18n import _
-from .node import nullid
-from .pycompat import delattr
-
-from hgdemandimport import tracing
from . import (
- encoding,
error,
- match as matchmod,
pathutil,
policy,
pycompat,
- scmutil,
- sparse,
txnutil,
util,
)
-from .interfaces import (
- dirstate as intdirstate,
- util as interfaceutil,
+from .dirstateutils import (
+ docket as docketmod,
)
parsers = policy.importmod('parsers')
rustmod = policy.importrust('dirstate')
propertycache = util.propertycache
-filecache = scmutil.filecache
-_rangemask = 0x7FFFFFFF
-dirstatetuple = parsers.dirstatetuple
-
-
-class repocache(filecache):
- """filecache for files in .hg/"""
-
- def join(self, obj, fname):
- return obj._opener.join(fname)
-
-
-class rootcache(filecache):
- """filecache for files in the repository root"""
-
- def join(self, obj, fname):
- return obj._join(fname)
-
-
-def _getfsnow(vfs):
- '''Get "now" timestamp on filesystem'''
- tmpfd, tmpname = vfs.mkstemp()
- try:
- return os.fstat(tmpfd)[stat.ST_MTIME]
- finally:
- os.close(tmpfd)
- vfs.unlink(tmpname)
+DirstateItem = parsers.DirstateItem
-@interfaceutil.implementer(intdirstate.idirstate)
-class dirstate(object):
- def __init__(
- self, opener, ui, root, validate, sparsematchfn, nodeconstants
- ):
- """Create a new dirstate object.
-
- opener is an open()-like callable that can be used to open the
- dirstate file; root is the root of the directory tracked by
- the dirstate.
- """
- self._nodeconstants = nodeconstants
- self._opener = opener
- self._validate = validate
- self._root = root
- self._sparsematchfn = sparsematchfn
- # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
- # UNC path pointing to root share (issue4557)
- self._rootdir = pathutil.normasprefix(root)
- self._dirty = False
- self._lastnormaltime = 0
- self._ui = ui
- self._filecache = {}
- self._parentwriters = 0
- self._filename = b'dirstate'
- self._pendingfilename = b'%s.pending' % self._filename
- self._plchangecallbacks = {}
- self._origpl = None
- self._updatedfiles = set()
- self._mapcls = dirstatemap
- # Access and cache cwd early, so we don't access it for the first time
- # after a working-copy update caused it to not exist (accessing it then
- # raises an exception).
- self._cwd
-
- def prefetch_parents(self):
- """make sure the parents are loaded
-
- Used to avoid a race condition.
- """
- self._pl
-
- @contextlib.contextmanager
- def parentchange(self):
- """Context manager for handling dirstate parents.
-
- If an exception occurs in the scope of the context manager,
- the incoherent dirstate won't be written when wlock is
- released.
- """
- self._parentwriters += 1
- yield
- # Typically we want the "undo" step of a context manager in a
- # finally block so it happens even when an exception
- # occurs. In this case, however, we only want to decrement
- # parentwriters if the code in the with statement exits
- # normally, so we don't have a try/finally here on purpose.
- self._parentwriters -= 1
-
- def pendingparentchange(self):
- """Returns true if the dirstate is in the middle of a set of changes
- that modify the dirstate parent.
- """
- return self._parentwriters > 0
-
- @propertycache
- def _map(self):
- """Return the dirstate contents (see documentation for dirstatemap)."""
- self._map = self._mapcls(
- self._ui, self._opener, self._root, self._nodeconstants
- )
- return self._map
-
- @property
- def _sparsematcher(self):
- """The matcher for the sparse checkout.
-
- The working directory may not include every file from a manifest. The
- matcher obtained by this property will match a path if it is to be
- included in the working directory.
- """
- # TODO there is potential to cache this property. For now, the matcher
- # is resolved on every access. (But the called function does use a
- # cache to keep the lookup fast.)
- return self._sparsematchfn()
-
- @repocache(b'branch')
- def _branch(self):
- try:
- return self._opener.read(b"branch").strip() or b"default"
- except IOError as inst:
- if inst.errno != errno.ENOENT:
- raise
- return b"default"
-
- @property
- def _pl(self):
- return self._map.parents()
-
- def hasdir(self, d):
- return self._map.hastrackeddir(d)
-
- @rootcache(b'.hgignore')
- def _ignore(self):
- files = self._ignorefiles()
- if not files:
- return matchmod.never()
-
- pats = [b'include:%s' % f for f in files]
- return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
-
- @propertycache
- def _slash(self):
- return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
-
- @propertycache
- def _checklink(self):
- return util.checklink(self._root)
-
- @propertycache
- def _checkexec(self):
- return bool(util.checkexec(self._root))
-
- @propertycache
- def _checkcase(self):
- return not util.fscasesensitive(self._join(b'.hg'))
-
- def _join(self, f):
- # much faster than os.path.join()
- # it's safe because f is always a relative path
- return self._rootdir + f
-
- def flagfunc(self, buildfallback):
- if self._checklink and self._checkexec:
-
- def f(x):
- try:
- st = os.lstat(self._join(x))
- if util.statislink(st):
- return b'l'
- if util.statisexec(st):
- return b'x'
- except OSError:
- pass
- return b''
-
- return f
-
- fallback = buildfallback()
- if self._checklink:
-
- def f(x):
- if os.path.islink(self._join(x)):
- return b'l'
- if b'x' in fallback(x):
- return b'x'
- return b''
-
- return f
- if self._checkexec:
-
- def f(x):
- if b'l' in fallback(x):
- return b'l'
- if util.isexec(self._join(x)):
- return b'x'
- return b''
-
- return f
- else:
- return fallback
-
- @propertycache
- def _cwd(self):
- # internal config: ui.forcecwd
- forcecwd = self._ui.config(b'ui', b'forcecwd')
- if forcecwd:
- return forcecwd
- return encoding.getcwd()
-
- def getcwd(self):
- """Return the path from which a canonical path is calculated.
-
- This path should be used to resolve file patterns or to convert
- canonical paths back to file paths for display. It shouldn't be
- used to get real file paths. Use vfs functions instead.
- """
- cwd = self._cwd
- if cwd == self._root:
- return b''
- # self._root ends with a path separator if self._root is '/' or 'C:\'
- rootsep = self._root
- if not util.endswithsep(rootsep):
- rootsep += pycompat.ossep
- if cwd.startswith(rootsep):
- return cwd[len(rootsep) :]
- else:
- # we're outside the repo. return an absolute path.
- return cwd
-
- def pathto(self, f, cwd=None):
- if cwd is None:
- cwd = self.getcwd()
- path = util.pathto(self._root, cwd, f)
- if self._slash:
- return util.pconvert(path)
- return path
-
- def __getitem__(self, key):
- """Return the current state of key (a filename) in the dirstate.
-
- States are:
- n normal
- m needs merging
- r marked for removal
- a marked for addition
- ? not tracked
- """
- return self._map.get(key, (b"?",))[0]
-
- def __contains__(self, key):
- return key in self._map
-
- def __iter__(self):
- return iter(sorted(self._map))
-
- def items(self):
- return pycompat.iteritems(self._map)
-
- iteritems = items
-
- def parents(self):
- return [self._validate(p) for p in self._pl]
-
- def p1(self):
- return self._validate(self._pl[0])
-
- def p2(self):
- return self._validate(self._pl[1])
-
- def branch(self):
- return encoding.tolocal(self._branch)
-
- def setparents(self, p1, p2=nullid):
- """Set dirstate parents to p1 and p2.
-
- When moving from two parents to one, 'm' merged entries a
- adjusted to normal and previous copy records discarded and
- returned by the call.
-
- See localrepo.setparents()
- """
- if self._parentwriters == 0:
- raise ValueError(
- b"cannot set dirstate parent outside of "
- b"dirstate.parentchange context manager"
- )
-
- self._dirty = True
- oldp2 = self._pl[1]
- if self._origpl is None:
- self._origpl = self._pl
- self._map.setparents(p1, p2)
- copies = {}
- if oldp2 != nullid and p2 == nullid:
- candidatefiles = self._map.nonnormalset.union(
- self._map.otherparentset
- )
- for f in candidatefiles:
- s = self._map.get(f)
- if s is None:
- continue
-
- # Discard 'm' markers when moving away from a merge state
- if s[0] == b'm':
- source = self._map.copymap.get(f)
- if source:
- copies[f] = source
- self.normallookup(f)
- # Also fix up otherparent markers
- elif s[0] == b'n' and s[2] == -2:
- source = self._map.copymap.get(f)
- if source:
- copies[f] = source
- self.add(f)
- return copies
-
- def setbranch(self, branch):
- self.__class__._branch.set(self, encoding.fromlocal(branch))
- f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
- try:
- f.write(self._branch + b'\n')
- f.close()
-
- # make sure filecache has the correct stat info for _branch after
- # replacing the underlying file
- ce = self._filecache[b'_branch']
- if ce:
- ce.refresh()
- except: # re-raises
- f.discard()
- raise
-
- def invalidate(self):
- """Causes the next access to reread the dirstate.
-
- This is different from localrepo.invalidatedirstate() because it always
- rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
- check whether the dirstate has changed before rereading it."""
-
- for a in ("_map", "_branch", "_ignore"):
- if a in self.__dict__:
- delattr(self, a)
- self._lastnormaltime = 0
- self._dirty = False
- self._updatedfiles.clear()
- self._parentwriters = 0
- self._origpl = None
-
- def copy(self, source, dest):
- """Mark dest as a copy of source. Unmark dest if source is None."""
- if source == dest:
- return
- self._dirty = True
- if source is not None:
- self._map.copymap[dest] = source
- self._updatedfiles.add(source)
- self._updatedfiles.add(dest)
- elif self._map.copymap.pop(dest, None):
- self._updatedfiles.add(dest)
-
- def copied(self, file):
- return self._map.copymap.get(file, None)
-
- def copies(self):
- return self._map.copymap
-
- def _addpath(self, f, state, mode, size, mtime):
- oldstate = self[f]
- if state == b'a' or oldstate == b'r':
- scmutil.checkfilename(f)
- if self._map.hastrackeddir(f):
- raise error.Abort(
- _(b'directory %r already in dirstate') % pycompat.bytestr(f)
- )
- # shadows
- for d in pathutil.finddirs(f):
- if self._map.hastrackeddir(d):
- break
- entry = self._map.get(d)
- if entry is not None and entry[0] != b'r':
- raise error.Abort(
- _(b'file %r in dirstate clashes with %r')
- % (pycompat.bytestr(d), pycompat.bytestr(f))
- )
- self._dirty = True
- self._updatedfiles.add(f)
- self._map.addfile(f, oldstate, state, mode, size, mtime)
-
- def normal(self, f, parentfiledata=None):
- """Mark a file normal and clean.
-
- parentfiledata: (mode, size, mtime) of the clean file
-
- parentfiledata should be computed from memory (for mode,
- size), as or close as possible from the point where we
- determined the file was clean, to limit the risk of the
- file having been changed by an external process between the
- moment where the file was determined to be clean and now."""
- if parentfiledata:
- (mode, size, mtime) = parentfiledata
- else:
- s = os.lstat(self._join(f))
- mode = s.st_mode
- size = s.st_size
- mtime = s[stat.ST_MTIME]
- self._addpath(f, b'n', mode, size & _rangemask, mtime & _rangemask)
- self._map.copymap.pop(f, None)
- if f in self._map.nonnormalset:
- self._map.nonnormalset.remove(f)
- if mtime > self._lastnormaltime:
- # Remember the most recent modification timeslot for status(),
- # to make sure we won't miss future size-preserving file content
- # modifications that happen within the same timeslot.
- self._lastnormaltime = mtime
-
- def normallookup(self, f):
- '''Mark a file normal, but possibly dirty.'''
- if self._pl[1] != nullid:
- # if there is a merge going on and the file was either
- # in state 'm' (-1) or coming from other parent (-2) before
- # being removed, restore that state.
- entry = self._map.get(f)
- if entry is not None:
- if entry[0] == b'r' and entry[2] in (-1, -2):
- source = self._map.copymap.get(f)
- if entry[2] == -1:
- self.merge(f)
- elif entry[2] == -2:
- self.otherparent(f)
- if source:
- self.copy(source, f)
- return
- if entry[0] == b'm' or entry[0] == b'n' and entry[2] == -2:
- return
- self._addpath(f, b'n', 0, -1, -1)
- self._map.copymap.pop(f, None)
-
- def otherparent(self, f):
- '''Mark as coming from the other parent, always dirty.'''
- if self._pl[1] == nullid:
- raise error.Abort(
- _(b"setting %r to other parent only allowed in merges") % f
- )
- if f in self and self[f] == b'n':
- # merge-like
- self._addpath(f, b'm', 0, -2, -1)
- else:
- # add-like
- self._addpath(f, b'n', 0, -2, -1)
- self._map.copymap.pop(f, None)
-
- def add(self, f):
- '''Mark a file added.'''
- self._addpath(f, b'a', 0, -1, -1)
- self._map.copymap.pop(f, None)
-
- def remove(self, f):
- '''Mark a file removed.'''
- self._dirty = True
- oldstate = self[f]
- size = 0
- if self._pl[1] != nullid:
- entry = self._map.get(f)
- if entry is not None:
- # backup the previous state
- if entry[0] == b'm': # merge
- size = -1
- elif entry[0] == b'n' and entry[2] == -2: # other parent
- size = -2
- self._map.otherparentset.add(f)
- self._updatedfiles.add(f)
- self._map.removefile(f, oldstate, size)
- if size == 0:
- self._map.copymap.pop(f, None)
-
- def merge(self, f):
- '''Mark a file merged.'''
- if self._pl[1] == nullid:
- return self.normallookup(f)
- return self.otherparent(f)
-
- def drop(self, f):
- '''Drop a file from the dirstate'''
- oldstate = self[f]
- if self._map.dropfile(f, oldstate):
- self._dirty = True
- self._updatedfiles.add(f)
- self._map.copymap.pop(f, None)
-
- def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
- if exists is None:
- exists = os.path.lexists(os.path.join(self._root, path))
- if not exists:
- # Maybe a path component exists
- if not ignoremissing and b'/' in path:
- d, f = path.rsplit(b'/', 1)
- d = self._normalize(d, False, ignoremissing, None)
- folded = d + b"/" + f
- else:
- # No path components, preserve original case
- folded = path
- else:
- # recursively normalize leading directory components
- # against dirstate
- if b'/' in normed:
- d, f = normed.rsplit(b'/', 1)
- d = self._normalize(d, False, ignoremissing, True)
- r = self._root + b"/" + d
- folded = d + b"/" + util.fspath(f, r)
- else:
- folded = util.fspath(normed, self._root)
- storemap[normed] = folded
-
- return folded
-
- def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
- normed = util.normcase(path)
- folded = self._map.filefoldmap.get(normed, None)
- if folded is None:
- if isknown:
- folded = path
- else:
- folded = self._discoverpath(
- path, normed, ignoremissing, exists, self._map.filefoldmap
- )
- return folded
-
- def _normalize(self, path, isknown, ignoremissing=False, exists=None):
- normed = util.normcase(path)
- folded = self._map.filefoldmap.get(normed, None)
- if folded is None:
- folded = self._map.dirfoldmap.get(normed, None)
- if folded is None:
- if isknown:
- folded = path
- else:
- # store discovered result in dirfoldmap so that future
- # normalizefile calls don't start matching directories
- folded = self._discoverpath(
- path, normed, ignoremissing, exists, self._map.dirfoldmap
- )
- return folded
-
- def normalize(self, path, isknown=False, ignoremissing=False):
- """
- normalize the case of a pathname when on a casefolding filesystem
-
- isknown specifies whether the filename came from walking the
- disk, to avoid extra filesystem access.
-
- If ignoremissing is True, missing path are returned
- unchanged. Otherwise, we try harder to normalize possibly
- existing path components.
-
- The normalized case is determined based on the following precedence:
-
- - version of name already stored in the dirstate
- - version of name stored on disk
- - version provided via command arguments
- """
-
- if self._checkcase:
- return self._normalize(path, isknown, ignoremissing)
- return path
-
- def clear(self):
- self._map.clear()
- self._lastnormaltime = 0
- self._updatedfiles.clear()
- self._dirty = True
-
- def rebuild(self, parent, allfiles, changedfiles=None):
- if changedfiles is None:
- # Rebuild entire dirstate
- to_lookup = allfiles
- to_drop = []
- lastnormaltime = self._lastnormaltime
- self.clear()
- self._lastnormaltime = lastnormaltime
- elif len(changedfiles) < 10:
- # Avoid turning allfiles into a set, which can be expensive if it's
- # large.
- to_lookup = []
- to_drop = []
- for f in changedfiles:
- if f in allfiles:
- to_lookup.append(f)
- else:
- to_drop.append(f)
- else:
- changedfilesset = set(changedfiles)
- to_lookup = changedfilesset & set(allfiles)
- to_drop = changedfilesset - to_lookup
-
- if self._origpl is None:
- self._origpl = self._pl
- self._map.setparents(parent, nullid)
-
- for f in to_lookup:
- self.normallookup(f)
- for f in to_drop:
- self.drop(f)
-
- self._dirty = True
-
- def identity(self):
- """Return identity of dirstate itself to detect changing in storage
-
- If identity of previous dirstate is equal to this, writing
- changes based on the former dirstate out can keep consistency.
- """
- return self._map.identity
-
- def write(self, tr):
- if not self._dirty:
- return
-
- filename = self._filename
- if tr:
- # 'dirstate.write()' is not only for writing in-memory
- # changes out, but also for dropping ambiguous timestamp.
- # delayed writing re-raise "ambiguous timestamp issue".
- # See also the wiki page below for detail:
- # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
-
- # emulate dropping timestamp in 'parsers.pack_dirstate'
- now = _getfsnow(self._opener)
- self._map.clearambiguoustimes(self._updatedfiles, now)
-
- # emulate that all 'dirstate.normal' results are written out
- self._lastnormaltime = 0
- self._updatedfiles.clear()
-
- # delay writing in-memory changes out
- tr.addfilegenerator(
- b'dirstate',
- (self._filename,),
- self._writedirstate,
- location=b'plain',
- )
- return
-
- st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
- self._writedirstate(st)
-
- def addparentchangecallback(self, category, callback):
- """add a callback to be called when the wd parents are changed
-
- Callback will be called with the following arguments:
- dirstate, (oldp1, oldp2), (newp1, newp2)
-
- Category is a unique identifier to allow overwriting an old callback
- with a newer callback.
- """
- self._plchangecallbacks[category] = callback
-
- def _writedirstate(self, st):
- # notify callbacks about parents change
- if self._origpl is not None and self._origpl != self._pl:
- for c, callback in sorted(
- pycompat.iteritems(self._plchangecallbacks)
- ):
- callback(self, self._origpl, self._pl)
- self._origpl = None
- # use the modification time of the newly created temporary file as the
- # filesystem's notion of 'now'
- now = util.fstat(st)[stat.ST_MTIME] & _rangemask
-
- # enough 'delaywrite' prevents 'pack_dirstate' from dropping
- # timestamp of each entries in dirstate, because of 'now > mtime'
- delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
- if delaywrite > 0:
- # do we have any files to delay for?
- for f, e in pycompat.iteritems(self._map):
- if e[0] == b'n' and e[3] == now:
- import time # to avoid useless import
-
- # rather than sleep n seconds, sleep until the next
- # multiple of n seconds
- clock = time.time()
- start = int(clock) - (int(clock) % delaywrite)
- end = start + delaywrite
- time.sleep(end - clock)
- now = end # trust our estimate that the end is near now
- break
-
- self._map.write(st, now)
- self._lastnormaltime = 0
- self._dirty = False
+# a special value used internally for `size` if the file come from the other parent
+FROM_P2 = -2
- def _dirignore(self, f):
- if self._ignore(f):
- return True
- for p in pathutil.finddirs(f):
- if self._ignore(p):
- return True
- return False
-
- def _ignorefiles(self):
- files = []
- if os.path.exists(self._join(b'.hgignore')):
- files.append(self._join(b'.hgignore'))
- for name, path in self._ui.configitems(b"ui"):
- if name == b'ignore' or name.startswith(b'ignore.'):
- # we need to use os.path.join here rather than self._join
- # because path is arbitrary and user-specified
- files.append(os.path.join(self._rootdir, util.expandpath(path)))
- return files
-
- def _ignorefileandline(self, f):
- files = collections.deque(self._ignorefiles())
- visited = set()
- while files:
- i = files.popleft()
- patterns = matchmod.readpatternfile(
- i, self._ui.warn, sourceinfo=True
- )
- for pattern, lineno, line in patterns:
- kind, p = matchmod._patsplit(pattern, b'glob')
- if kind == b"subinclude":
- if p not in visited:
- files.append(p)
- continue
- m = matchmod.match(
- self._root, b'', [], [pattern], warn=self._ui.warn
- )
- if m(f):
- return (i, lineno, line)
- visited.add(i)
- return (None, -1, b"")
-
- def _walkexplicit(self, match, subrepos):
- """Get stat data about the files explicitly specified by match.
-
- Return a triple (results, dirsfound, dirsnotfound).
- - results is a mapping from filename to stat result. It also contains
- listings mapping subrepos and .hg to None.
- - dirsfound is a list of files found to be directories.
- - dirsnotfound is a list of files that the dirstate thinks are
- directories and that were not found."""
-
- def badtype(mode):
- kind = _(b'unknown')
- if stat.S_ISCHR(mode):
- kind = _(b'character device')
- elif stat.S_ISBLK(mode):
- kind = _(b'block device')
- elif stat.S_ISFIFO(mode):
- kind = _(b'fifo')
- elif stat.S_ISSOCK(mode):
- kind = _(b'socket')
- elif stat.S_ISDIR(mode):
- kind = _(b'directory')
- return _(b'unsupported file type (type is %s)') % kind
-
- badfn = match.bad
- dmap = self._map
- lstat = os.lstat
- getkind = stat.S_IFMT
- dirkind = stat.S_IFDIR
- regkind = stat.S_IFREG
- lnkkind = stat.S_IFLNK
- join = self._join
- dirsfound = []
- foundadd = dirsfound.append
- dirsnotfound = []
- notfoundadd = dirsnotfound.append
-
- if not match.isexact() and self._checkcase:
- normalize = self._normalize
- else:
- normalize = None
-
- files = sorted(match.files())
- subrepos.sort()
- i, j = 0, 0
- while i < len(files) and j < len(subrepos):
- subpath = subrepos[j] + b"/"
- if files[i] < subpath:
- i += 1
- continue
- while i < len(files) and files[i].startswith(subpath):
- del files[i]
- j += 1
-
- if not files or b'' in files:
- files = [b'']
- # constructing the foldmap is expensive, so don't do it for the
- # common case where files is ['']
- normalize = None
- results = dict.fromkeys(subrepos)
- results[b'.hg'] = None
-
- for ff in files:
- if normalize:
- nf = normalize(ff, False, True)
- else:
- nf = ff
- if nf in results:
- continue
-
- try:
- st = lstat(join(nf))
- kind = getkind(st.st_mode)
- if kind == dirkind:
- if nf in dmap:
- # file replaced by dir on disk but still in dirstate
- results[nf] = None
- foundadd((nf, ff))
- elif kind == regkind or kind == lnkkind:
- results[nf] = st
- else:
- badfn(ff, badtype(kind))
- if nf in dmap:
- results[nf] = None
- except OSError as inst: # nf not found on disk - it is dirstate only
- if nf in dmap: # does it exactly match a missing file?
- results[nf] = None
- else: # does it match a missing directory?
- if self._map.hasdir(nf):
- notfoundadd(nf)
- else:
- badfn(ff, encoding.strtolocal(inst.strerror))
-
- # match.files() may contain explicitly-specified paths that shouldn't
- # be taken; drop them from the list of files found. dirsfound/notfound
- # aren't filtered here because they will be tested later.
- if match.anypats():
- for f in list(results):
- if f == b'.hg' or f in subrepos:
- # keep sentinel to disable further out-of-repo walks
- continue
- if not match(f):
- del results[f]
-
- # Case insensitive filesystems cannot rely on lstat() failing to detect
- # a case-only rename. Prune the stat object for any file that does not
- # match the case in the filesystem, if there are multiple files that
- # normalize to the same path.
- if match.isexact() and self._checkcase:
- normed = {}
-
- for f, st in pycompat.iteritems(results):
- if st is None:
- continue
-
- nc = util.normcase(f)
- paths = normed.get(nc)
-
- if paths is None:
- paths = set()
- normed[nc] = paths
-
- paths.add(f)
-
- for norm, paths in pycompat.iteritems(normed):
- if len(paths) > 1:
- for path in paths:
- folded = self._discoverpath(
- path, norm, True, None, self._map.dirfoldmap
- )
- if path != folded:
- results[path] = None
-
- return results, dirsfound, dirsnotfound
-
- def walk(self, match, subrepos, unknown, ignored, full=True):
- """
- Walk recursively through the directory tree, finding all files
- matched by match.
-
- If full is False, maybe skip some known-clean files.
-
- Return a dict mapping filename to stat-like object (either
- mercurial.osutil.stat instance or return value of os.stat()).
-
- """
- # full is a flag that extensions that hook into walk can use -- this
- # implementation doesn't use it at all. This satisfies the contract
- # because we only guarantee a "maybe".
-
- if ignored:
- ignore = util.never
- dirignore = util.never
- elif unknown:
- ignore = self._ignore
- dirignore = self._dirignore
- else:
- # if not unknown and not ignored, drop dir recursion and step 2
- ignore = util.always
- dirignore = util.always
-
- matchfn = match.matchfn
- matchalways = match.always()
- matchtdir = match.traversedir
- dmap = self._map
- listdir = util.listdir
- lstat = os.lstat
- dirkind = stat.S_IFDIR
- regkind = stat.S_IFREG
- lnkkind = stat.S_IFLNK
- join = self._join
-
- exact = skipstep3 = False
- if match.isexact(): # match.exact
- exact = True
- dirignore = util.always # skip step 2
- elif match.prefix(): # match.match, no patterns
- skipstep3 = True
-
- if not exact and self._checkcase:
- normalize = self._normalize
- normalizefile = self._normalizefile
- skipstep3 = False
- else:
- normalize = self._normalize
- normalizefile = None
-
- # step 1: find all explicit files
- results, work, dirsnotfound = self._walkexplicit(match, subrepos)
- if matchtdir:
- for d in work:
- matchtdir(d[0])
- for d in dirsnotfound:
- matchtdir(d)
-
- skipstep3 = skipstep3 and not (work or dirsnotfound)
- work = [d for d in work if not dirignore(d[0])]
-
- # step 2: visit subdirectories
- def traverse(work, alreadynormed):
- wadd = work.append
- while work:
- tracing.counter('dirstate.walk work', len(work))
- nd = work.pop()
- visitentries = match.visitchildrenset(nd)
- if not visitentries:
- continue
- if visitentries == b'this' or visitentries == b'all':
- visitentries = None
- skip = None
- if nd != b'':
- skip = b'.hg'
- try:
- with tracing.log('dirstate.walk.traverse listdir %s', nd):
- entries = listdir(join(nd), stat=True, skip=skip)
- except OSError as inst:
- if inst.errno in (errno.EACCES, errno.ENOENT):
- match.bad(
- self.pathto(nd), encoding.strtolocal(inst.strerror)
- )
- continue
- raise
- for f, kind, st in entries:
- # Some matchers may return files in the visitentries set,
- # instead of 'this', if the matcher explicitly mentions them
- # and is not an exactmatcher. This is acceptable; we do not
- # make any hard assumptions about file-or-directory below
- # based on the presence of `f` in visitentries. If
- # visitchildrenset returned a set, we can always skip the
- # entries *not* in the set it provided regardless of whether
- # they're actually a file or a directory.
- if visitentries and f not in visitentries:
- continue
- if normalizefile:
- # even though f might be a directory, we're only
- # interested in comparing it to files currently in the
- # dmap -- therefore normalizefile is enough
- nf = normalizefile(
- nd and (nd + b"/" + f) or f, True, True
- )
- else:
- nf = nd and (nd + b"/" + f) or f
- if nf not in results:
- if kind == dirkind:
- if not ignore(nf):
- if matchtdir:
- matchtdir(nf)
- wadd(nf)
- if nf in dmap and (matchalways or matchfn(nf)):
- results[nf] = None
- elif kind == regkind or kind == lnkkind:
- if nf in dmap:
- if matchalways or matchfn(nf):
- results[nf] = st
- elif (matchalways or matchfn(nf)) and not ignore(
- nf
- ):
- # unknown file -- normalize if necessary
- if not alreadynormed:
- nf = normalize(nf, False, True)
- results[nf] = st
- elif nf in dmap and (matchalways or matchfn(nf)):
- results[nf] = None
-
- for nd, d in work:
- # alreadynormed means that processwork doesn't have to do any
- # expensive directory normalization
- alreadynormed = not normalize or nd == d
- traverse([d], alreadynormed)
-
- for s in subrepos:
- del results[s]
- del results[b'.hg']
-
- # step 3: visit remaining files from dmap
- if not skipstep3 and not exact:
- # If a dmap file is not in results yet, it was either
- # a) not matching matchfn b) ignored, c) missing, or d) under a
- # symlink directory.
- if not results and matchalways:
- visit = [f for f in dmap]
- else:
- visit = [f for f in dmap if f not in results and matchfn(f)]
- visit.sort()
+# a special value used internally for `size` if the file is modified/merged/added
+NONNORMAL = -1
- if unknown:
- # unknown == True means we walked all dirs under the roots
- # that wasn't ignored, and everything that matched was stat'ed
- # and is already in results.
- # The rest must thus be ignored or under a symlink.
- audit_path = pathutil.pathauditor(self._root, cached=True)
-
- for nf in iter(visit):
- # If a stat for the same file was already added with a
- # different case, don't add one for this, since that would
- # make it appear as if the file exists under both names
- # on disk.
- if (
- normalizefile
- and normalizefile(nf, True, True) in results
- ):
- results[nf] = None
- # Report ignored items in the dmap as long as they are not
- # under a symlink directory.
- elif audit_path.check(nf):
- try:
- results[nf] = lstat(join(nf))
- # file was just ignored, no links, and exists
- except OSError:
- # file doesn't exist
- results[nf] = None
- else:
- # It's either missing or under a symlink directory
- # which we in this case report as missing
- results[nf] = None
- else:
- # We may not have walked the full directory tree above,
- # so stat and check everything we missed.
- iv = iter(visit)
- for st in util.statfiles([join(i) for i in visit]):
- results[next(iv)] = st
- return results
-
- def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
- # Force Rayon (Rust parallelism library) to respect the number of
- # workers. This is a temporary workaround until Rust code knows
- # how to read the config file.
- numcpus = self._ui.configint(b"worker", b"numcpus")
- if numcpus is not None:
- encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
-
- workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
- if not workers_enabled:
- encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
-
- (
- lookup,
- modified,
- added,
- removed,
- deleted,
- clean,
- ignored,
- unknown,
- warnings,
- bad,
- traversed,
- ) = rustmod.status(
- self._map._rustmap,
- matcher,
- self._rootdir,
- self._ignorefiles(),
- self._checkexec,
- self._lastnormaltime,
- bool(list_clean),
- bool(list_ignored),
- bool(list_unknown),
- bool(matcher.traversedir),
- )
-
- if matcher.traversedir:
- for dir in traversed:
- matcher.traversedir(dir)
-
- if self._ui.warn:
- for item in warnings:
- if isinstance(item, tuple):
- file_path, syntax = item
- msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
- file_path,
- syntax,
- )
- self._ui.warn(msg)
- else:
- msg = _(b"skipping unreadable pattern file '%s': %s\n")
- self._ui.warn(
- msg
- % (
- pathutil.canonpath(
- self._rootdir, self._rootdir, item
- ),
- b"No such file or directory",
- )
- )
-
- for (fn, message) in bad:
- matcher.bad(fn, encoding.strtolocal(message))
-
- status = scmutil.status(
- modified=modified,
- added=added,
- removed=removed,
- deleted=deleted,
- unknown=unknown,
- ignored=ignored,
- clean=clean,
- )
- return (lookup, status)
-
- def status(self, match, subrepos, ignored, clean, unknown):
- """Determine the status of the working copy relative to the
- dirstate and return a pair of (unsure, status), where status is of type
- scmutil.status and:
-
- unsure:
- files that might have been modified since the dirstate was
- written, but need to be read to be sure (size is the same
- but mtime differs)
- status.modified:
- files that have definitely been modified since the dirstate
- was written (different size or mode)
- status.clean:
- files that have definitely not been modified since the
- dirstate was written
- """
- listignored, listclean, listunknown = ignored, clean, unknown
- lookup, modified, added, unknown, ignored = [], [], [], [], []
- removed, deleted, clean = [], [], []
-
- dmap = self._map
- dmap.preload()
-
- use_rust = True
-
- allowed_matchers = (
- matchmod.alwaysmatcher,
- matchmod.exactmatcher,
- matchmod.includematcher,
- )
-
- if rustmod is None:
- use_rust = False
- elif self._checkcase:
- # Case-insensitive filesystems are not handled yet
- use_rust = False
- elif subrepos:
- use_rust = False
- elif sparse.enabled:
- use_rust = False
- elif not isinstance(match, allowed_matchers):
- # Some matchers have yet to be implemented
- use_rust = False
-
- if use_rust:
- try:
- return self._rust_status(
- match, listclean, listignored, listunknown
- )
- except rustmod.FallbackError:
- pass
+# a special value used internally for `time` if the time is ambigeous
+AMBIGUOUS_TIME = -1
- def noop(f):
- pass
-
- dcontains = dmap.__contains__
- dget = dmap.__getitem__
- ladd = lookup.append # aka "unsure"
- madd = modified.append
- aadd = added.append
- uadd = unknown.append if listunknown else noop
- iadd = ignored.append if listignored else noop
- radd = removed.append
- dadd = deleted.append
- cadd = clean.append if listclean else noop
- mexact = match.exact
- dirignore = self._dirignore
- checkexec = self._checkexec
- copymap = self._map.copymap
- lastnormaltime = self._lastnormaltime
-
- # We need to do full walks when either
- # - we're listing all clean files, or
- # - match.traversedir does something, because match.traversedir should
- # be called for every dir in the working dir
- full = listclean or match.traversedir is not None
- for fn, st in pycompat.iteritems(
- self.walk(match, subrepos, listunknown, listignored, full=full)
- ):
- if not dcontains(fn):
- if (listignored or mexact(fn)) and dirignore(fn):
- if listignored:
- iadd(fn)
- else:
- uadd(fn)
- continue
-
- # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
- # written like that for performance reasons. dmap[fn] is not a
- # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
- # opcode has fast paths when the value to be unpacked is a tuple or
- # a list, but falls back to creating a full-fledged iterator in
- # general. That is much slower than simply accessing and storing the
- # tuple members one by one.
- t = dget(fn)
- state = t[0]
- mode = t[1]
- size = t[2]
- time = t[3]
-
- if not st and state in b"nma":
- dadd(fn)
- elif state == b'n':
- if (
- size >= 0
- and (
- (size != st.st_size and size != st.st_size & _rangemask)
- or ((mode ^ st.st_mode) & 0o100 and checkexec)
- )
- or size == -2 # other parent
- or fn in copymap
- ):
- if stat.S_ISLNK(st.st_mode) and size != st.st_size:
- # issue6456: Size returned may be longer due to
- # encryption on EXT-4 fscrypt, undecided.
- ladd(fn)
- else:
- madd(fn)
- elif (
- time != st[stat.ST_MTIME]
- and time != st[stat.ST_MTIME] & _rangemask
- ):
- ladd(fn)
- elif st[stat.ST_MTIME] == lastnormaltime:
- # fn may have just been marked as normal and it may have
- # changed in the same second without changing its size.
- # This can happen if we quickly do multiple commits.
- # Force lookup, so we don't miss such a racy file change.
- ladd(fn)
- elif listclean:
- cadd(fn)
- elif state == b'm':
- madd(fn)
- elif state == b'a':
- aadd(fn)
- elif state == b'r':
- radd(fn)
- status = scmutil.status(
- modified, added, removed, deleted, unknown, ignored, clean
- )
- return (lookup, status)
-
- def matches(self, match):
- """
- return files in the dirstate (in whatever state) filtered by match
- """
- dmap = self._map
- if rustmod is not None:
- dmap = self._map._rustmap
-
- if match.always():
- return dmap.keys()
- files = match.files()
- if match.isexact():
- # fast path -- filter the other way around, since typically files is
- # much smaller than dmap
- return [f for f in files if f in dmap]
- if match.prefix() and all(fn in dmap for fn in files):
- # fast path -- all the values are known to be files, so just return
- # that
- return list(files)
- return [f for f in dmap if match(f)]
-
- def _actualfilename(self, tr):
- if tr:
- return self._pendingfilename
- else:
- return self._filename
-
- def savebackup(self, tr, backupname):
- '''Save current dirstate into backup file'''
- filename = self._actualfilename(tr)
- assert backupname != filename
-
- # use '_writedirstate' instead of 'write' to write changes certainly,
- # because the latter omits writing out if transaction is running.
- # output file will be used to create backup of dirstate at this point.
- if self._dirty or not self._opener.exists(filename):
- self._writedirstate(
- self._opener(filename, b"w", atomictemp=True, checkambig=True)
- )
-
- if tr:
- # ensure that subsequent tr.writepending returns True for
- # changes written out above, even if dirstate is never
- # changed after this
- tr.addfilegenerator(
- b'dirstate',
- (self._filename,),
- self._writedirstate,
- location=b'plain',
- )
-
- # ensure that pending file written above is unlinked at
- # failure, even if tr.writepending isn't invoked until the
- # end of this transaction
- tr.registertmp(filename, location=b'plain')
-
- self._opener.tryunlink(backupname)
- # hardlink backup is okay because _writedirstate is always called
- # with an "atomictemp=True" file.
- util.copyfile(
- self._opener.join(filename),
- self._opener.join(backupname),
- hardlink=True,
- )
-
- def restorebackup(self, tr, backupname):
- '''Restore dirstate by backup file'''
- # this "invalidate()" prevents "wlock.release()" from writing
- # changes of dirstate out after restoring from backup file
- self.invalidate()
- filename = self._actualfilename(tr)
- o = self._opener
- if util.samefile(o.join(backupname), o.join(filename)):
- o.unlink(backupname)
- else:
- o.rename(backupname, filename, checkambig=True)
-
- def clearbackup(self, tr, backupname):
- '''Clear backup file'''
- self._opener.unlink(backupname)
+rangemask = 0x7FFFFFFF
class dirstatemap(object):
@@ -1430,13 +76,16 @@ class dirstatemap(object):
denormalized form that they appear as in the dirstate.
"""
- def __init__(self, ui, opener, root, nodeconstants):
+ def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
self._ui = ui
self._opener = opener
self._root = root
self._filename = b'dirstate'
self._nodelen = 20
self._nodeconstants = nodeconstants
+ assert (
+ not use_dirstate_v2
+ ), "should have detected unsupported requirement"
self._parents = None
self._dirtyparents = False
@@ -1459,7 +108,7 @@ class dirstatemap(object):
def clear(self):
self._map.clear()
self.copymap.clear()
- self.setparents(nullid, nullid)
+ self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
util.clearcachedproperty(self, b"_dirs")
util.clearcachedproperty(self, b"_alldirs")
util.clearcachedproperty(self, b"filefoldmap")
@@ -1473,6 +122,8 @@ class dirstatemap(object):
# forward for python2,3 compat
iteritems = items
+ debug_iter = items
+
def __len__(self):
return len(self._map)
@@ -1495,19 +146,161 @@ class dirstatemap(object):
"""Loads the underlying data, if it's not already loaded"""
self._map
- def addfile(self, f, oldstate, state, mode, size, mtime):
+ def _dirs_incr(self, filename, old_entry=None):
+ """incremente the dirstate counter if applicable"""
+ if (
+ old_entry is None or old_entry.removed
+ ) and "_dirs" in self.__dict__:
+ self._dirs.addpath(filename)
+ if old_entry is None and "_alldirs" in self.__dict__:
+ self._alldirs.addpath(filename)
+
+ def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
+ """decremente the dirstate counter if applicable"""
+ if old_entry is not None:
+ if "_dirs" in self.__dict__ and not old_entry.removed:
+ self._dirs.delpath(filename)
+ if "_alldirs" in self.__dict__ and not remove_variant:
+ self._alldirs.delpath(filename)
+ elif remove_variant and "_alldirs" in self.__dict__:
+ self._alldirs.addpath(filename)
+ if "filefoldmap" in self.__dict__:
+ normed = util.normcase(filename)
+ self.filefoldmap.pop(normed, None)
+
+ def set_possibly_dirty(self, filename):
+ """record that the current state of the file on disk is unknown"""
+ self[filename].set_possibly_dirty()
+
+ def addfile(
+ self,
+ f,
+ mode=0,
+ size=None,
+ mtime=None,
+ added=False,
+ merged=False,
+ from_p2=False,
+ possibly_dirty=False,
+ ):
"""Add a tracked file to the dirstate."""
- if oldstate in b"?r" and "_dirs" in self.__dict__:
- self._dirs.addpath(f)
- if oldstate == b"?" and "_alldirs" in self.__dict__:
- self._alldirs.addpath(f)
- self._map[f] = dirstatetuple(state, mode, size, mtime)
- if state != b'n' or mtime == -1:
+ if added:
+ assert not merged
+ assert not possibly_dirty
+ assert not from_p2
+ state = b'a'
+ size = NONNORMAL
+ mtime = AMBIGUOUS_TIME
+ elif merged:
+ assert not possibly_dirty
+ assert not from_p2
+ state = b'm'
+ size = FROM_P2
+ mtime = AMBIGUOUS_TIME
+ elif from_p2:
+ assert not possibly_dirty
+ state = b'n'
+ size = FROM_P2
+ mtime = AMBIGUOUS_TIME
+ elif possibly_dirty:
+ state = b'n'
+ size = NONNORMAL
+ mtime = AMBIGUOUS_TIME
+ else:
+ assert size != FROM_P2
+ assert size != NONNORMAL
+ state = b'n'
+ size = size & rangemask
+ mtime = mtime & rangemask
+ assert state is not None
+ assert size is not None
+ assert mtime is not None
+ old_entry = self.get(f)
+ self._dirs_incr(f, old_entry)
+ e = self._map[f] = DirstateItem(state, mode, size, mtime)
+ if e.dm_nonnormal:
self.nonnormalset.add(f)
- if size == -2:
+ if e.dm_otherparent:
self.otherparentset.add(f)
- def removefile(self, f, oldstate, size):
+ def reset_state(
+ self,
+ filename,
+ wc_tracked,
+ p1_tracked,
+ p2_tracked=False,
+ merged=False,
+ clean_p1=False,
+ clean_p2=False,
+ possibly_dirty=False,
+ parentfiledata=None,
+ ):
+ """Set a entry to a given state, diregarding all previous state
+
+ This is to be used by the part of the dirstate API dedicated to
+ adjusting the dirstate after a update/merge.
+
+ note: calling this might result to no entry existing at all if the
+ dirstate map does not see any point at having one for this file
+ anymore.
+ """
+ if merged and (clean_p1 or clean_p2):
+ msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
+ raise error.ProgrammingError(msg)
+ # copy information are now outdated
+ # (maybe new information should be in directly passed to this function)
+ self.copymap.pop(filename, None)
+
+ if not (p1_tracked or p2_tracked or wc_tracked):
+ self.dropfile(filename)
+ elif merged:
+ # XXX might be merged and removed ?
+ entry = self.get(filename)
+ if entry is not None and entry.tracked:
+ # XXX mostly replicate dirstate.other parent. We should get
+ # the higher layer to pass us more reliable data where `merged`
+ # actually mean merged. Dropping the else clause will show
+ # failure in `test-graft.t`
+ self.addfile(filename, merged=True)
+ else:
+ self.addfile(filename, from_p2=True)
+ elif not (p1_tracked or p2_tracked) and wc_tracked:
+ self.addfile(filename, added=True, possibly_dirty=possibly_dirty)
+ elif (p1_tracked or p2_tracked) and not wc_tracked:
+ # XXX might be merged and removed ?
+ old_entry = self._map.get(filename)
+ self._dirs_decr(filename, old_entry=old_entry, remove_variant=True)
+ self._map[filename] = DirstateItem(b'r', 0, 0, 0)
+ self.nonnormalset.add(filename)
+ elif clean_p2 and wc_tracked:
+ if p1_tracked or self.get(filename) is not None:
+ # XXX the `self.get` call is catching some case in
+ # `test-merge-remove.t` where the file is tracked in p1, the
+ # p1_tracked argument is False.
+ #
+ # In addition, this seems to be a case where the file is marked
+ # as merged without actually being the result of a merge
+ # action. So thing are not ideal here.
+ self.addfile(filename, merged=True)
+ else:
+ self.addfile(filename, from_p2=True)
+ elif not p1_tracked and p2_tracked and wc_tracked:
+ self.addfile(filename, from_p2=True, possibly_dirty=possibly_dirty)
+ elif possibly_dirty:
+ self.addfile(filename, possibly_dirty=possibly_dirty)
+ elif wc_tracked:
+ # this is a "normal" file
+ if parentfiledata is None:
+ msg = b'failed to pass parentfiledata for a normal file: %s'
+ msg %= filename
+ raise error.ProgrammingError(msg)
+ mode, size, mtime = parentfiledata
+ self.addfile(filename, mode=mode, size=size, mtime=mtime)
+ self.nonnormalset.discard(filename)
+ else:
+ assert False, 'unreachable'
+
+ def removefile(self, f, in_merge=False):
"""
Mark a file as removed in the dirstate.
@@ -1515,38 +308,41 @@ class dirstatemap(object):
the file's previous state. In the future, we should refactor this
to be more explicit about what that state is.
"""
- if oldstate not in b"?r" and "_dirs" in self.__dict__:
- self._dirs.delpath(f)
- if oldstate == b"?" and "_alldirs" in self.__dict__:
- self._alldirs.addpath(f)
- if "filefoldmap" in self.__dict__:
- normed = util.normcase(f)
- self.filefoldmap.pop(normed, None)
- self._map[f] = dirstatetuple(b'r', 0, size, 0)
+ entry = self.get(f)
+ size = 0
+ if in_merge:
+ # XXX we should not be able to have 'm' state and 'FROM_P2' if not
+ # during a merge. So I (marmoute) am not sure we need the
+ # conditionnal at all. Adding double checking this with assert
+ # would be nice.
+ if entry is not None:
+ # backup the previous state
+ if entry.merged: # merge
+ size = NONNORMAL
+ elif entry.from_p2:
+ size = FROM_P2
+ self.otherparentset.add(f)
+ if entry is not None and not (entry.merged or entry.from_p2):
+ self.copymap.pop(f, None)
+ self._dirs_decr(f, old_entry=entry, remove_variant=True)
+ self._map[f] = DirstateItem(b'r', 0, size, 0)
self.nonnormalset.add(f)
- def dropfile(self, f, oldstate):
+ def dropfile(self, f):
"""
Remove a file from the dirstate. Returns True if the file was
previously recorded.
"""
- exists = self._map.pop(f, None) is not None
- if exists:
- if oldstate != b"r" and "_dirs" in self.__dict__:
- self._dirs.delpath(f)
- if "_alldirs" in self.__dict__:
- self._alldirs.delpath(f)
- if "filefoldmap" in self.__dict__:
- normed = util.normcase(f)
- self.filefoldmap.pop(normed, None)
+ old_entry = self._map.pop(f, None)
+ self._dirs_decr(f, old_entry=old_entry)
self.nonnormalset.discard(f)
- return exists
+ return old_entry is not None
def clearambiguoustimes(self, files, now):
for f in files:
e = self.get(f)
- if e is not None and e[0] == b'n' and e[3] == now:
- self._map[f] = dirstatetuple(e[0], e[1], e[2], -1)
+ if e is not None and e.need_delay(now):
+ e.set_possibly_dirty()
self.nonnormalset.add(f)
def nonnormalentries(self):
@@ -1557,9 +353,9 @@ class dirstatemap(object):
nonnorm = set()
otherparent = set()
for fname, e in pycompat.iteritems(self._map):
- if e[0] != b'n' or e[3] == -1:
+ if e.dm_nonnormal:
nonnorm.add(fname)
- if e[0] == b'n' and e[2] == -2:
+ if e.from_p2:
otherparent.add(fname)
return nonnorm, otherparent
@@ -1580,7 +376,7 @@ class dirstatemap(object):
f = {}
normcase = util.normcase
for name, s in pycompat.iteritems(self._map):
- if s[0] != b'r':
+ if not s.removed:
f[normcase(name)] = name
f[b'.'] = b'.' # prevents useless util.fspath() invocation
return f
@@ -1636,7 +432,10 @@ class dirstatemap(object):
st[self._nodelen : 2 * self._nodelen],
)
elif l == 0:
- self._parents = (nullid, nullid)
+ self._parents = (
+ self._nodeconstants.nullid,
+ self._nodeconstants.nullid,
+ )
else:
raise error.Abort(
_(b'working directory state appears damaged!')
@@ -1698,7 +497,7 @@ class dirstatemap(object):
self.__getitem__ = self._map.__getitem__
self.get = self._map.get
- def write(self, st, now):
+ def write(self, _tr, st, now):
st.write(
parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
)
@@ -1718,6 +517,9 @@ class dirstatemap(object):
self.nonnormalset = nonnorm
return otherparents
+ def non_normal_or_other_parent_paths(self):
+ return self.nonnormalset.union(self.otherparentset)
+
@propertycache
def identity(self):
self._map
@@ -1735,20 +537,129 @@ class dirstatemap(object):
if rustmod is not None:
class dirstatemap(object):
- def __init__(self, ui, opener, root, nodeconstants):
+ def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
+ self._use_dirstate_v2 = use_dirstate_v2
self._nodeconstants = nodeconstants
self._ui = ui
self._opener = opener
self._root = root
self._filename = b'dirstate'
+ self._nodelen = 20 # Also update Rust code when changing this!
self._parents = None
self._dirtyparents = False
+ self._docket = None
# for consistent view between _pl() and _read() invocations
self._pendingmode = None
- def addfile(self, *args, **kwargs):
- return self._rustmap.addfile(*args, **kwargs)
+ self._use_dirstate_tree = self._ui.configbool(
+ b"experimental",
+ b"dirstate-tree.in-memory",
+ False,
+ )
+
+ def addfile(
+ self,
+ f,
+ mode=0,
+ size=None,
+ mtime=None,
+ added=False,
+ merged=False,
+ from_p2=False,
+ possibly_dirty=False,
+ ):
+ return self._rustmap.addfile(
+ f,
+ mode,
+ size,
+ mtime,
+ added,
+ merged,
+ from_p2,
+ possibly_dirty,
+ )
+
+ def reset_state(
+ self,
+ filename,
+ wc_tracked,
+ p1_tracked,
+ p2_tracked=False,
+ merged=False,
+ clean_p1=False,
+ clean_p2=False,
+ possibly_dirty=False,
+ parentfiledata=None,
+ ):
+ """Set a entry to a given state, disregarding all previous state
+
+ This is to be used by the part of the dirstate API dedicated to
+ adjusting the dirstate after a update/merge.
+
+ note: calling this might result to no entry existing at all if the
+ dirstate map does not see any point at having one for this file
+ anymore.
+ """
+ if merged and (clean_p1 or clean_p2):
+ msg = (
+ b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
+ )
+ raise error.ProgrammingError(msg)
+ # copy information are now outdated
+ # (maybe new information should be in directly passed to this function)
+ self.copymap.pop(filename, None)
+
+ if not (p1_tracked or p2_tracked or wc_tracked):
+ self.dropfile(filename)
+ elif merged:
+ # XXX might be merged and removed ?
+ entry = self.get(filename)
+ if entry is not None and entry.tracked:
+ # XXX mostly replicate dirstate.other parent. We should get
+ # the higher layer to pass us more reliable data where `merged`
+ # actually mean merged. Dropping the else clause will show
+ # failure in `test-graft.t`
+ self.addfile(filename, merged=True)
+ else:
+ self.addfile(filename, from_p2=True)
+ elif not (p1_tracked or p2_tracked) and wc_tracked:
+ self.addfile(
+ filename, added=True, possibly_dirty=possibly_dirty
+ )
+ elif (p1_tracked or p2_tracked) and not wc_tracked:
+ # XXX might be merged and removed ?
+ self[filename] = DirstateItem(b'r', 0, 0, 0)
+ self.nonnormalset.add(filename)
+ elif clean_p2 and wc_tracked:
+ if p1_tracked or self.get(filename) is not None:
+ # XXX the `self.get` call is catching some case in
+ # `test-merge-remove.t` where the file is tracked in p1, the
+ # p1_tracked argument is False.
+ #
+ # In addition, this seems to be a case where the file is marked
+ # as merged without actually being the result of a merge
+ # action. So thing are not ideal here.
+ self.addfile(filename, merged=True)
+ else:
+ self.addfile(filename, from_p2=True)
+ elif not p1_tracked and p2_tracked and wc_tracked:
+ self.addfile(
+ filename, from_p2=True, possibly_dirty=possibly_dirty
+ )
+ elif possibly_dirty:
+ self.addfile(filename, possibly_dirty=possibly_dirty)
+ elif wc_tracked:
+ # this is a "normal" file
+ if parentfiledata is None:
+ msg = b'failed to pass parentfiledata for a normal file: %s'
+ msg %= filename
+ raise error.ProgrammingError(msg)
+ mode, size, mtime = parentfiledata
+ self.addfile(filename, mode=mode, size=size, mtime=mtime)
+ self.nonnormalset.discard(filename)
+ else:
+ assert False, 'unreachable'
def removefile(self, *args, **kwargs):
return self._rustmap.removefile(*args, **kwargs)
@@ -1765,36 +676,24 @@ if rustmod is not None:
def get(self, *args, **kwargs):
return self._rustmap.get(*args, **kwargs)
- @propertycache
- def _rustmap(self):
- """
- Fills the Dirstatemap when called.
- Use `self._inner_rustmap` if reading the dirstate is not necessary.
- """
- self._rustmap = self._inner_rustmap
- self.read()
- return self._rustmap
-
- @propertycache
- def _inner_rustmap(self):
- """
- Does not fill the Dirstatemap when called. This allows for
- optimizations where only setting/getting the parents is needed.
- """
- self._inner_rustmap = rustmod.DirstateMap(self._root)
- return self._inner_rustmap
-
@property
def copymap(self):
return self._rustmap.copymap()
+ def directories(self):
+ return self._rustmap.directories()
+
+ def debug_iter(self):
+ return self._rustmap.debug_iter()
+
def preload(self):
self._rustmap
def clear(self):
self._rustmap.clear()
- self._inner_rustmap.clear()
- self.setparents(nullid, nullid)
+ self.setparents(
+ self._nodeconstants.nullid, self._nodeconstants.nullid
+ )
util.clearcachedproperty(self, b"_dirs")
util.clearcachedproperty(self, b"_alldirs")
util.clearcachedproperty(self, b"dirfoldmap")
@@ -1832,64 +731,145 @@ if rustmod is not None:
self._pendingmode = mode
return fp
+ def _readdirstatefile(self, size=-1):
+ try:
+ with self._opendirstatefile() as fp:
+ return fp.read(size)
+ except IOError as err:
+ if err.errno != errno.ENOENT:
+ raise
+ # File doesn't exist, so the current state is empty
+ return b''
+
def setparents(self, p1, p2):
- self._rustmap.setparents(p1, p2)
self._parents = (p1, p2)
self._dirtyparents = True
def parents(self):
if not self._parents:
- try:
- fp = self._opendirstatefile()
- st = fp.read(40)
- fp.close()
- except IOError as err:
- if err.errno != errno.ENOENT:
- raise
- # File doesn't exist, so the current state is empty
- st = b''
-
- try:
- self._parents = self._inner_rustmap.parents(st)
- except ValueError:
- raise error.Abort(
- _(b'working directory state appears damaged!')
- )
+ if self._use_dirstate_v2:
+ self._parents = self.docket.parents
+ else:
+ read_len = self._nodelen * 2
+ st = self._readdirstatefile(read_len)
+ l = len(st)
+ if l == read_len:
+ self._parents = (
+ st[: self._nodelen],
+ st[self._nodelen : 2 * self._nodelen],
+ )
+ elif l == 0:
+ self._parents = (
+ self._nodeconstants.nullid,
+ self._nodeconstants.nullid,
+ )
+ else:
+ raise error.Abort(
+ _(b'working directory state appears damaged!')
+ )
return self._parents
- def read(self):
+ @property
+ def docket(self):
+ if not self._docket:
+ if not self._use_dirstate_v2:
+ raise error.ProgrammingError(
+ b'dirstate only has a docket in v2 format'
+ )
+ self._docket = docketmod.DirstateDocket.parse(
+ self._readdirstatefile(), self._nodeconstants
+ )
+ return self._docket
+
+ @propertycache
+ def _rustmap(self):
+ """
+ Fills the Dirstatemap when called.
+ """
# ignore HG_PENDING because identity is used only for writing
self.identity = util.filestat.frompath(
self._opener.join(self._filename)
)
- try:
- fp = self._opendirstatefile()
- try:
- st = fp.read()
- finally:
- fp.close()
- except IOError as err:
- if err.errno != errno.ENOENT:
- raise
- return
- if not st:
- return
+ if self._use_dirstate_v2:
+ if self.docket.uuid:
+ # TODO: use mmap when possible
+ data = self._opener.read(self.docket.data_filename())
+ else:
+ data = b''
+ self._rustmap = rustmod.DirstateMap.new_v2(
+ data, self.docket.data_size, self.docket.tree_metadata
+ )
+ parents = self.docket.parents
+ else:
+ self._rustmap, parents = rustmod.DirstateMap.new_v1(
+ self._use_dirstate_tree, self._readdirstatefile()
+ )
- parse_dirstate = util.nogc(self._rustmap.read)
- parents = parse_dirstate(st)
if parents and not self._dirtyparents:
self.setparents(*parents)
self.__contains__ = self._rustmap.__contains__
self.__getitem__ = self._rustmap.__getitem__
self.get = self._rustmap.get
+ return self._rustmap
- def write(self, st, now):
- parents = self.parents()
- st.write(self._rustmap.write(parents[0], parents[1], now))
- st.close()
+ def write(self, tr, st, now):
+ if not self._use_dirstate_v2:
+ p1, p2 = self.parents()
+ packed = self._rustmap.write_v1(p1, p2, now)
+ st.write(packed)
+ st.close()
+ self._dirtyparents = False
+ return
+
+ # We can only append to an existing data file if there is one
+ can_append = self.docket.uuid is not None
+ packed, meta, append = self._rustmap.write_v2(now, can_append)
+ if append:
+ docket = self.docket
+ data_filename = docket.data_filename()
+ if tr:
+ tr.add(data_filename, docket.data_size)
+ with self._opener(data_filename, b'r+b') as fp:
+ fp.seek(docket.data_size)
+ assert fp.tell() == docket.data_size
+ written = fp.write(packed)
+ if written is not None: # py2 may return None
+ assert written == len(packed), (written, len(packed))
+ docket.data_size += len(packed)
+ docket.parents = self.parents()
+ docket.tree_metadata = meta
+ st.write(docket.serialize())
+ st.close()
+ else:
+ old_docket = self.docket
+ new_docket = docketmod.DirstateDocket.with_new_uuid(
+ self.parents(), len(packed), meta
+ )
+ data_filename = new_docket.data_filename()
+ if tr:
+ tr.add(data_filename, 0)
+ self._opener.write(data_filename, packed)
+ # Write the new docket after the new data file has been
+ # written. Because `st` was opened with `atomictemp=True`,
+ # the actual `.hg/dirstate` file is only affected on close.
+ st.write(new_docket.serialize())
+ st.close()
+ # Remove the old data file after the new docket pointing to
+ # the new data file was written.
+ if old_docket.uuid:
+ data_filename = old_docket.data_filename()
+ unlink = lambda _tr=None: self._opener.unlink(data_filename)
+ if tr:
+ category = b"dirstate-v2-clean-" + old_docket.uuid
+ tr.addpostclose(category, unlink)
+ else:
+ unlink()
+ self._docket = new_docket
+ # Reload from the newly-written file
+ util.clearcachedproperty(self, b"_rustmap")
self._dirtyparents = False
@propertycache
@@ -1900,22 +880,12 @@ if rustmod is not None:
return self._rustmap.filefoldmapasdict()
def hastrackeddir(self, d):
- self._dirs # Trigger Python's propertycache
return self._rustmap.hastrackeddir(d)
def hasdir(self, d):
- self._dirs # Trigger Python's propertycache
return self._rustmap.hasdir(d)
@propertycache
- def _dirs(self):
- return self._rustmap.getdirs()
-
- @propertycache
- def _alldirs(self):
- return self._rustmap.getalldirs()
-
- @propertycache
def identity(self):
self._rustmap
return self.identity
@@ -1930,10 +900,23 @@ if rustmod is not None:
otherparents = self._rustmap.other_parent_entries()
return otherparents
+ def non_normal_or_other_parent_paths(self):
+ return self._rustmap.non_normal_or_other_parent_paths()
+
@propertycache
def dirfoldmap(self):
f = {}
normcase = util.normcase
- for name in self._dirs:
+ for name in self._rustmap.tracked_dirs():
f[normcase(name)] = name
return f
+
+ def set_possibly_dirty(self, filename):
+ """record that the current state of the file on disk is unknown"""
+ entry = self[filename]
+ entry.set_possibly_dirty()
+ self._rustmap.set_v1(filename, entry)
+
+ def __setitem__(self, key, value):
+ assert isinstance(value, DirstateItem)
+ self._rustmap.set_v1(key, value)
diff --git a/mercurial/dirstateutils/__init__.py b/mercurial/dirstateutils/__init__.py
new file mode 100644
diff --git a/mercurial/dirstateutils/docket.py b/mercurial/dirstateutils/docket.py
new file mode 100644
--- /dev/null
+++ b/mercurial/dirstateutils/docket.py
@@ -0,0 +1,75 @@
+# dirstatedocket.py - docket file for dirstate-v2
+#
+# Copyright Mercurial Contributors
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import struct
+
+from ..revlogutils import docket as docket_mod
+
+
+V2_FORMAT_MARKER = b"dirstate-v2\n"
+
+# Must match the constant of the same name in
+# `rust/hg-core/src/dirstate_tree/on_disk.rs`
+TREE_METADATA_SIZE = 44
+
+# * 12 bytes: format marker
+# * 32 bytes: node ID of the working directory's first parent
+# * 32 bytes: node ID of the working directory's second parent
+# * 4 bytes: big-endian used size of the data file
+# * {TREE_METADATA_SIZE} bytes: tree metadata, parsed separately
+# * 1 byte: length of the data file's UUID
+# * variable: data file's UUID
+#
+# Node IDs are null-padded if shorter than 32 bytes.
+# A data file shorter than the specified used size is corrupted (truncated)
+HEADER = struct.Struct(
+ ">{}s32s32sL{}sB".format(len(V2_FORMAT_MARKER), TREE_METADATA_SIZE)
+)
+
+
+class DirstateDocket(object):
+ data_filename_pattern = b'dirstate.%s.d'
+
+ def __init__(self, parents, data_size, tree_metadata, uuid):
+ self.parents = parents
+ self.data_size = data_size
+ self.tree_metadata = tree_metadata
+ self.uuid = uuid
+
+ @classmethod
+ def with_new_uuid(cls, parents, data_size, tree_metadata):
+ return cls(parents, data_size, tree_metadata, docket_mod.make_uid())
+
+ @classmethod
+ def parse(cls, data, nodeconstants):
+ if not data:
+ parents = (nodeconstants.nullid, nodeconstants.nullid)
+ return cls(parents, 0, b'', None)
+ marker, p1, p2, data_size, meta, uuid_size = HEADER.unpack_from(data)
+ if marker != V2_FORMAT_MARKER:
+ raise ValueError("expected dirstate-v2 marker")
+ uuid = data[HEADER.size : HEADER.size + uuid_size]
+ p1 = p1[: nodeconstants.nodelen]
+ p2 = p2[: nodeconstants.nodelen]
+ return cls((p1, p2), data_size, meta, uuid)
+
+ def serialize(self):
+ p1, p2 = self.parents
+ header = HEADER.pack(
+ V2_FORMAT_MARKER,
+ p1,
+ p2,
+ self.data_size,
+ self.tree_metadata,
+ len(self.uuid),
+ )
+ return header + self.uuid
+
+ def data_filename(self):
+ return self.data_filename_pattern % self.uuid
diff --git a/mercurial/discovery.py b/mercurial/discovery.py
--- a/mercurial/discovery.py
+++ b/mercurial/discovery.py
@@ -12,7 +12,6 @@ import functools
from .i18n import _
from .node import (
hex,
- nullid,
short,
)
@@ -107,7 +106,7 @@ class outgoing(object):
if missingroots:
discbases = []
for n in missingroots:
- discbases.extend([p for p in cl.parents(n) if p != nullid])
+ discbases.extend([p for p in cl.parents(n) if p != repo.nullid])
# TODO remove call to nodesbetween.
# TODO populate attributes on outgoing instance instead of setting
# discbases.
@@ -116,7 +115,7 @@ class outgoing(object):
ancestorsof = heads
commonheads = [n for n in discbases if n not in included]
elif not commonheads:
- commonheads = [nullid]
+ commonheads = [repo.nullid]
self.commonheads = commonheads
self.ancestorsof = ancestorsof
self._revlog = cl
@@ -381,7 +380,7 @@ def checkheads(pushop):
# - a local outgoing head descended from update
# - a remote head that's known locally and not
# ancestral to an outgoing head
- if remoteheads == [nullid]:
+ if remoteheads == [repo.nullid]:
# remote is empty, nothing to check.
return
diff --git a/mercurial/dispatch.py b/mercurial/dispatch.py
--- a/mercurial/dispatch.py
+++ b/mercurial/dispatch.py
@@ -1064,6 +1064,16 @@ def _dispatch(req):
if req.earlyoptions[b'profile']:
for ui_ in uis:
ui_.setconfig(b'profiling', b'enabled', b'true', b'--profile')
+ elif req.earlyoptions[b'profile'] is False:
+ # Check for it being set already, so that we don't pollute the config
+ # with this when using chg in the very common case that it's not
+ # enabled.
+ if lui.configbool(b'profiling', b'enabled'):
+ # Only do this on lui so that `chg foo` with a user config setting
+ # profiling.enabled=1 still shows profiling information (chg will
+ # specify `--no-profile` when `hg serve` is starting up, we don't
+ # want that to propagate to every later invocation).
+ lui.setconfig(b'profiling', b'enabled', b'false', b'--no-profile')
profile = lui.configbool(b'profiling', b'enabled')
with profiling.profile(lui, enabled=profile) as profiler:
diff --git a/mercurial/encoding.py b/mercurial/encoding.py
--- a/mercurial/encoding.py
+++ b/mercurial/encoding.py
@@ -9,6 +9,7 @@ from __future__ import absolute_import,
import locale
import os
+import re
import unicodedata
from .pycompat import getattr
@@ -284,13 +285,75 @@ else:
strmethod = pycompat.identity
+
+def lower(s):
+ # type: (bytes) -> bytes
+ """best-effort encoding-aware case-folding of local string s"""
+ try:
+ return asciilower(s)
+ except UnicodeDecodeError:
+ pass
+ try:
+ if isinstance(s, localstr):
+ u = s._utf8.decode("utf-8")
+ else:
+ u = s.decode(_sysstr(encoding), _sysstr(encodingmode))
+
+ lu = u.lower()
+ if u == lu:
+ return s # preserve localstring
+ return lu.encode(_sysstr(encoding))
+ except UnicodeError:
+ return s.lower() # we don't know how to fold this except in ASCII
+ except LookupError as k:
+ raise error.Abort(k, hint=b"please check your locale settings")
+
+
+def upper(s):
+ # type: (bytes) -> bytes
+ """best-effort encoding-aware case-folding of local string s"""
+ try:
+ return asciiupper(s)
+ except UnicodeDecodeError:
+ return upperfallback(s)
+
+
+def upperfallback(s):
+ # type: (Any) -> Any
+ try:
+ if isinstance(s, localstr):
+ u = s._utf8.decode("utf-8")
+ else:
+ u = s.decode(_sysstr(encoding), _sysstr(encodingmode))
+
+ uu = u.upper()
+ if u == uu:
+ return s # preserve localstring
+ return uu.encode(_sysstr(encoding))
+ except UnicodeError:
+ return s.upper() # we don't know how to fold this except in ASCII
+ except LookupError as k:
+ raise error.Abort(k, hint=b"please check your locale settings")
+
+
if not _nativeenviron:
# now encoding and helper functions are available, recreate the environ
# dict to be exported to other modules
- environ = {
- tolocal(k.encode('utf-8')): tolocal(v.encode('utf-8'))
- for k, v in os.environ.items() # re-exports
- }
+ if pycompat.iswindows and pycompat.ispy3:
+
+ class WindowsEnviron(dict):
+ """`os.environ` normalizes environment variables to uppercase on windows"""
+
+ def get(self, key, default=None):
+ return super().get(upper(key), default)
+
+ environ = WindowsEnviron()
+
+ for k, v in os.environ.items(): # re-exports
+ environ[tolocal(k.encode('utf-8'))] = tolocal(v.encode('utf-8'))
+
+
+DRIVE_RE = re.compile(b'^[a-z]:')
if pycompat.ispy3:
# os.getcwd() on Python 3 returns string, but it has os.getcwdb() which
@@ -303,7 +366,21 @@ if pycompat.ispy3:
# os.path.realpath(), which is used on ``repo.root``. Since those
# strings are compared in various places as simple strings, also call
# realpath here. See https://bugs.python.org/issue40368
- getcwd = lambda: strtolocal(os.path.realpath(os.getcwd())) # re-exports
+ #
+ # However this is not reliable, so lets explicitly make this drive
+ # letter upper case.
+ #
+ # note: we should consider dropping realpath here since it seems to
+ # change the semantic of `getcwd`.
+
+ def getcwd():
+ cwd = os.getcwd() # re-exports
+ cwd = os.path.realpath(cwd)
+ cwd = strtolocal(cwd)
+ if DRIVE_RE.match(cwd):
+ cwd = cwd[0:1].upper() + cwd[1:]
+ return cwd
+
else:
getcwd = os.getcwdb # re-exports
else:
@@ -441,56 +518,6 @@ def trim(s, width, ellipsis=b'', leftsid
return ellipsis # no enough room for multi-column characters
-def lower(s):
- # type: (bytes) -> bytes
- """best-effort encoding-aware case-folding of local string s"""
- try:
- return asciilower(s)
- except UnicodeDecodeError:
- pass
- try:
- if isinstance(s, localstr):
- u = s._utf8.decode("utf-8")
- else:
- u = s.decode(_sysstr(encoding), _sysstr(encodingmode))
-
- lu = u.lower()
- if u == lu:
- return s # preserve localstring
- return lu.encode(_sysstr(encoding))
- except UnicodeError:
- return s.lower() # we don't know how to fold this except in ASCII
- except LookupError as k:
- raise error.Abort(k, hint=b"please check your locale settings")
-
-
-def upper(s):
- # type: (bytes) -> bytes
- """best-effort encoding-aware case-folding of local string s"""
- try:
- return asciiupper(s)
- except UnicodeDecodeError:
- return upperfallback(s)
-
-
-def upperfallback(s):
- # type: (Any) -> Any
- try:
- if isinstance(s, localstr):
- u = s._utf8.decode("utf-8")
- else:
- u = s.decode(_sysstr(encoding), _sysstr(encodingmode))
-
- uu = u.upper()
- if u == uu:
- return s # preserve localstring
- return uu.encode(_sysstr(encoding))
- except UnicodeError:
- return s.upper() # we don't know how to fold this except in ASCII
- except LookupError as k:
- raise error.Abort(k, hint=b"please check your locale settings")
-
-
class normcasespecs(object):
"""what a platform's normcase does to ASCII strings
diff --git a/mercurial/error.py b/mercurial/error.py
--- a/mercurial/error.py
+++ b/mercurial/error.py
@@ -51,13 +51,52 @@ class Hint(object):
super(Hint, self).__init__(*args, **kw)
-class StorageError(Hint, Exception):
+class Error(Hint, Exception):
+ """Base class for Mercurial errors."""
+
+ coarse_exit_code = None
+ detailed_exit_code = None
+
+ def __init__(self, message, hint=None):
+ # type: (bytes, Optional[bytes]) -> None
+ self.message = message
+ self.hint = hint
+ # Pass the message into the Exception constructor to help extensions
+ # that look for exc.args[0].
+ Exception.__init__(self, message)
+
+ def __bytes__(self):
+ return self.message
+
+ if pycompat.ispy3:
+
+ def __str__(self):
+ # the output would be unreadable if the message was translated,
+ # but do not replace it with encoding.strfromlocal(), which
+ # may raise another exception.
+ return pycompat.sysstr(self.__bytes__())
+
+ def format(self):
+ # type: () -> bytes
+ from .i18n import _
+
+ message = _(b"abort: %s\n") % self.message
+ if self.hint:
+ message += _(b"(%s)\n") % self.hint
+ return message
+
+
+class Abort(Error):
+ """Raised if a command needs to print an error and exit."""
+
+
+class StorageError(Error):
"""Raised when an error occurs in a storage layer.
Usually subclassed by a storage-specific exception.
"""
- __bytes__ = _tobytes
+ detailed_exit_code = 50
class RevlogError(StorageError):
@@ -159,10 +198,20 @@ class WorkerError(Exception):
__bytes__ = _tobytes
-class InterventionRequired(Hint, Exception):
+class InterventionRequired(Abort):
"""Exception raised when a command requires human intervention."""
- __bytes__ = _tobytes
+ coarse_exit_code = 1
+ detailed_exit_code = 240
+
+ def format(self):
+ # type: () -> bytes
+ from .i18n import _
+
+ message = _(b"%s\n") % self.message
+ if self.hint:
+ message += _(b"(%s)\n") % self.hint
+ return message
class ConflictResolutionRequired(InterventionRequired):
@@ -182,44 +231,14 @@ class ConflictResolutionRequired(Interve
)
-class Abort(Hint, Exception):
- """Raised if a command needs to print an error and exit."""
-
- def __init__(self, message, hint=None):
- # type: (bytes, Optional[bytes]) -> None
- self.message = message
- self.hint = hint
- # Pass the message into the Exception constructor to help extensions
- # that look for exc.args[0].
- Exception.__init__(self, message)
-
- def __bytes__(self):
- return self.message
-
- if pycompat.ispy3:
-
- def __str__(self):
- # the output would be unreadable if the message was translated,
- # but do not replace it with encoding.strfromlocal(), which
- # may raise another exception.
- return pycompat.sysstr(self.__bytes__())
-
- def format(self):
- # type: () -> bytes
- from .i18n import _
-
- message = _(b"abort: %s\n") % self.message
- if self.hint:
- message += _(b"(%s)\n") % self.hint
- return message
-
-
class InputError(Abort):
"""Indicates that the user made an error in their input.
Examples: Invalid command, invalid flags, invalid revision.
"""
+ detailed_exit_code = 10
+
class StateError(Abort):
"""Indicates that the operation might work if retried in a different state.
@@ -227,6 +246,8 @@ class StateError(Abort):
Examples: Unresolved merge conflicts, unfinished operations.
"""
+ detailed_exit_code = 20
+
class CanceledError(Abort):
"""Indicates that the user canceled the operation.
@@ -234,6 +255,8 @@ class CanceledError(Abort):
Examples: Close commit editor with error status, quit chistedit.
"""
+ detailed_exit_code = 250
+
class SecurityError(Abort):
"""Indicates that some aspect of security failed.
@@ -242,6 +265,8 @@ class SecurityError(Abort):
filesystem, mismatched GPG signature, DoS protection.
"""
+ detailed_exit_code = 150
+
class HookLoadError(Abort):
"""raised when loading a hook fails, aborting an operation
@@ -254,10 +279,14 @@ class HookAbort(Abort):
Exists to allow more specialized catching."""
+ detailed_exit_code = 40
+
class ConfigError(Abort):
"""Exception raised when parsing config files"""
+ detailed_exit_code = 30
+
def __init__(self, message, location=None, hint=None):
# type: (bytes, Optional[bytes], Optional[bytes]) -> None
super(ConfigError, self).__init__(message, hint=hint)
@@ -307,6 +336,8 @@ class ResponseExpected(Abort):
class RemoteError(Abort):
"""Exception raised when interacting with a remote repo fails"""
+ detailed_exit_code = 100
+
class OutOfBandError(RemoteError):
"""Exception raised when a remote repo reports failure"""
@@ -325,6 +356,8 @@ class OutOfBandError(RemoteError):
class ParseError(Abort):
"""Raised when parsing config files and {rev,file}sets (msg[, pos])"""
+ detailed_exit_code = 10
+
def __init__(self, message, location=None, hint=None):
# type: (bytes, Optional[Union[bytes, int]], Optional[bytes]) -> None
super(ParseError, self).__init__(message, hint=hint)
diff --git a/mercurial/exchange.py b/mercurial/exchange.py
--- a/mercurial/exchange.py
+++ b/mercurial/exchange.py
@@ -13,7 +13,6 @@ import weakref
from .i18n import _
from .node import (
hex,
- nullid,
nullrev,
)
from . import (
@@ -44,6 +43,7 @@ from .utils import (
stringutil,
urlutil,
)
+from .interfaces import repository
urlerr = util.urlerr
urlreq = util.urlreq
@@ -164,7 +164,7 @@ def _computeoutgoing(repo, heads, common
hasnode = cl.hasnode
common = [n for n in common if hasnode(n)]
else:
- common = [nullid]
+ common = [repo.nullid]
if not heads:
heads = cl.heads()
return discovery.outgoing(repo, common, heads)
@@ -184,6 +184,10 @@ def _checkpublish(pushop):
published = repo.filtered(b'served').revs(b'not public()')
else:
published = repo.revs(b'::%ln - public()', pushop.revs)
+ # we want to use pushop.revs in the revset even if they themselves are
+ # secret, but we don't want to have anything that the server won't see
+ # in the result of this expression
+ published &= repo.filtered(b'served')
if published:
if behavior == b'warn':
ui.warn(
@@ -894,7 +898,7 @@ def _pushb2ctx(pushop, bundler):
cgpart.addparam(b'version', version)
if scmutil.istreemanifest(pushop.repo):
cgpart.addparam(b'treemanifest', b'1')
- if b'exp-sidedata-flag' in pushop.repo.requirements:
+ if repository.REPO_FEATURE_SIDE_DATA in pushop.repo.features:
cgpart.addparam(b'exp-sidedata', b'1')
def handlereply(op):
@@ -1839,7 +1843,7 @@ def _pullbundle2(pullop):
if (
pullop.remote.capable(b'clonebundles')
and pullop.heads is None
- and list(pullop.common) == [nullid]
+ and list(pullop.common) == [pullop.repo.nullid]
):
kwargs[b'cbattempted'] = pullop.clonebundleattempted
@@ -1849,7 +1853,7 @@ def _pullbundle2(pullop):
pullop.repo.ui.status(_(b"no changes found\n"))
pullop.cgresult = 0
else:
- if pullop.heads is None and list(pullop.common) == [nullid]:
+ if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
pullop.repo.ui.status(_(b"requesting all changes\n"))
if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
@@ -1920,7 +1924,7 @@ def _pullchangeset(pullop):
pullop.cgresult = 0
return
tr = pullop.gettransaction()
- if pullop.heads is None and list(pullop.common) == [nullid]:
+ if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
pullop.repo.ui.status(_(b"requesting all changes\n"))
elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
# issue1320, avoid a race if remote changed after discovery
@@ -2428,7 +2432,7 @@ def _getbundlechangegrouppart(
if scmutil.istreemanifest(repo):
part.addparam(b'treemanifest', b'1')
- if b'exp-sidedata-flag' in repo.requirements:
+ if repository.REPO_FEATURE_SIDE_DATA in repo.features:
part.addparam(b'exp-sidedata', b'1')
sidedata = bundle2.format_remote_wanted_sidedata(repo)
part.addparam(b'exp-wanted-sidedata', sidedata)
diff --git a/mercurial/exchangev2.py b/mercurial/exchangev2.py
--- a/mercurial/exchangev2.py
+++ b/mercurial/exchangev2.py
@@ -11,10 +11,7 @@ import collections
import weakref
from .i18n import _
-from .node import (
- nullid,
- short,
-)
+from .node import short
from . import (
bookmarks,
error,
@@ -304,7 +301,7 @@ def _pullchangesetdiscovery(repo, remote
if set(remoteheads).issubset(common):
fetch = []
- common.discard(nullid)
+ common.discard(repo.nullid)
return common, fetch, remoteheads
@@ -413,7 +410,7 @@ def _processchangesetdata(repo, tr, objs
# Linknode is always itself for changesets.
cset[b'node'],
# We always send full revisions. So delta base is not set.
- nullid,
+ repo.nullid,
mdiff.trivialdiffheader(len(data)) + data,
# Flags not yet supported.
0,
@@ -478,7 +475,7 @@ def _fetchmanifests(repo, tr, remote, ma
basenode = manifest[b'deltabasenode']
delta = extrafields[b'delta']
elif b'revision' in extrafields:
- basenode = nullid
+ basenode = repo.nullid
revision = extrafields[b'revision']
delta = mdiff.trivialdiffheader(len(revision)) + revision
else:
@@ -610,7 +607,7 @@ def _fetchfiles(repo, tr, remote, fnodes
basenode = filerevision[b'deltabasenode']
delta = extrafields[b'delta']
elif b'revision' in extrafields:
- basenode = nullid
+ basenode = repo.nullid
revision = extrafields[b'revision']
delta = mdiff.trivialdiffheader(len(revision)) + revision
else:
@@ -705,7 +702,7 @@ def _fetchfilesfromcsets(
basenode = filerevision[b'deltabasenode']
delta = extrafields[b'delta']
elif b'revision' in extrafields:
- basenode = nullid
+ basenode = repo.nullid
revision = extrafields[b'revision']
delta = mdiff.trivialdiffheader(len(revision)) + revision
else:
diff --git a/mercurial/exewrapper.c b/mercurial/exewrapper.c
--- a/mercurial/exewrapper.c
+++ b/mercurial/exewrapper.c
@@ -48,7 +48,7 @@ int _tmain(int argc, TCHAR *argv[])
int(__cdecl * Py_Main)(int argc, TCHAR *argv[]);
#if PY_MAJOR_VERSION >= 3
- Py_LegacyWindowsStdioFlag = 1;
+ _wputenv(L"PYTHONLEGACYWINDOWSSTDIO=1");
#endif
if (GetModuleFileName(NULL, pyscript, _countof(pyscript)) == 0) {
diff --git a/mercurial/extensions.py b/mercurial/extensions.py
--- a/mercurial/extensions.py
+++ b/mercurial/extensions.py
@@ -713,7 +713,7 @@ def _disabledpaths():
# it might not be on a filesystem even if it does.
if util.safehasattr(hgext, '__file__'):
extpath = os.path.dirname(
- os.path.abspath(pycompat.fsencode(hgext.__file__))
+ util.abspath(pycompat.fsencode(hgext.__file__))
)
try:
files = os.listdir(extpath)
diff --git a/mercurial/filelog.py b/mercurial/filelog.py
--- a/mercurial/filelog.py
+++ b/mercurial/filelog.py
@@ -8,10 +8,7 @@
from __future__ import absolute_import
from .i18n import _
-from .node import (
- nullid,
- nullrev,
-)
+from .node import nullrev
from . import (
error,
revlog,
@@ -21,18 +18,24 @@ from .interfaces import (
util as interfaceutil,
)
from .utils import storageutil
+from .revlogutils import (
+ constants as revlog_constants,
+)
@interfaceutil.implementer(repository.ifilestorage)
class filelog(object):
def __init__(self, opener, path):
self._revlog = revlog.revlog(
- opener, b'/'.join((b'data', path + b'.i')), censorable=True
+ opener,
+ # XXX should use the unencoded path
+ target=(revlog_constants.KIND_FILELOG, path),
+ radix=b'/'.join((b'data', path)),
+ censorable=True,
)
# Full name of the user visible file, relative to the repository root.
# Used by LFS.
self._revlog.filename = path
- self._revlog.revlog_kind = b'filelog'
self.nullid = self._revlog.nullid
def __len__(self):
@@ -42,7 +45,7 @@ class filelog(object):
return self._revlog.__iter__()
def hasnode(self, node):
- if node in (nullid, nullrev):
+ if node in (self.nullid, nullrev):
return False
try:
@@ -68,7 +71,7 @@ class filelog(object):
def lookup(self, node):
return storageutil.fileidlookup(
- self._revlog, node, self._revlog.indexfile
+ self._revlog, node, self._revlog.display_id
)
def linkrev(self, rev):
@@ -225,18 +228,6 @@ class filelog(object):
storedsize=storedsize,
)
- # TODO these aren't part of the interface and aren't internal methods.
- # Callers should be fixed to not use them.
-
- # Used by bundlefilelog, unionfilelog.
- @property
- def indexfile(self):
- return self._revlog.indexfile
-
- @indexfile.setter
- def indexfile(self, value):
- self._revlog.indexfile = value
-
# Used by repo upgrade.
def clone(self, tr, destrevlog, **kwargs):
if not isinstance(destrevlog, filelog):
diff --git a/mercurial/filemerge.py b/mercurial/filemerge.py
--- a/mercurial/filemerge.py
+++ b/mercurial/filemerge.py
@@ -15,7 +15,6 @@ import shutil
from .i18n import _
from .node import (
hex,
- nullid,
short,
)
from .pycompat import (
@@ -111,7 +110,7 @@ class absentfilectx(object):
return None
def filenode(self):
- return nullid
+ return self._ctx.repo().nullid
_customcmp = True
diff --git a/mercurial/help.py b/mercurial/help.py
--- a/mercurial/help.py
+++ b/mercurial/help.py
@@ -540,6 +540,12 @@ helptable = sorted(
TOPIC_CATEGORY_CONCEPTS,
),
(
+ [b"evolution"],
+ _(b"Safely rewriting history (EXPERIMENTAL)"),
+ loaddoc(b'evolution'),
+ TOPIC_CATEGORY_CONCEPTS,
+ ),
+ (
[b'scripting'],
_(b'Using Mercurial from scripts and automation'),
loaddoc(b'scripting'),
diff --git a/mercurial/helptext/config.txt b/mercurial/helptext/config.txt
--- a/mercurial/helptext/config.txt
+++ b/mercurial/helptext/config.txt
@@ -5,7 +5,7 @@ Troubleshooting
===============
If you're having problems with your configuration,
-:hg:`config --debug` can help you understand what is introducing
+:hg:`config --source` can help you understand what is introducing
a setting into your environment.
See :hg:`help config.syntax` and :hg:`help config.files`
@@ -1718,6 +1718,12 @@ the path they point to.
The following sub-options can be defined:
+``multi-urls``
+ A boolean option. When enabled the value of the `[paths]` entry will be
+ parsed as a list and the alias will resolve to multiple destination. If some
+ of the list entry use the `path://` syntax, the suboption will be inherited
+ individually.
+
``pushurl``
The URL to use for push operations. If not defined, the location
defined by the path's main entry is used.
diff --git a/mercurial/helptext/evolution.txt b/mercurial/helptext/evolution.txt
new file mode 100644
--- /dev/null
+++ b/mercurial/helptext/evolution.txt
@@ -0,0 +1,56 @@
+Obsolescence markers make it possible to mark changesets that have been
+deleted or superseded in a new version of the changeset.
+
+Unlike the previous way of handling such changes, by stripping the old
+changesets from the repository, obsolescence markers can be propagated
+between repositories. This allows for a safe and simple way of exchanging
+mutable history and altering it after the fact. Changeset phases are
+respected, such that only draft and secret changesets can be altered (see
+:hg:`help phases` for details).
+
+Obsolescence is tracked using "obsolescence markers", a piece of metadata
+tracking which changesets have been made obsolete, potential successors for
+a given changeset, the moment the changeset was marked as obsolete, and the
+user who performed the rewriting operation. The markers are stored
+separately from standard changeset data can be exchanged without any of the
+precursor changesets, preventing unnecessary exchange of obsolescence data.
+
+The complete set of obsolescence markers describes a history of changeset
+modifications that is orthogonal to the repository history of file
+modifications. This changeset history allows for detection and automatic
+resolution of edge cases arising from multiple users rewriting the same part
+of history concurrently.
+
+Current feature status
+======================
+
+This feature is still in development.
+
+Instability
+===========
+
+Rewriting changesets might introduce instability.
+
+There are two main kinds of instability: orphaning and diverging.
+
+Orphans are changesets left behind when their ancestors are rewritten.
+Divergence has two variants:
+
+* Content-divergence occurs when independent rewrites of the same changesets
+ lead to different results.
+
+* Phase-divergence occurs when the old (obsolete) version of a changeset
+ becomes public.
+
+It is possible to prevent local creation of orphans by using the following config::
+
+ [experimental]
+ evolution.createmarkers = true
+ evolution.exchange = true
+
+You can also enable that option explicitly::
+
+ [experimental]
+ evolution.createmarkers = true
+ evolution.exchange = true
+ evolution.allowunstable = true
diff --git a/mercurial/helptext/internals/changegroups.txt b/mercurial/helptext/internals/changegroups.txt
--- a/mercurial/helptext/internals/changegroups.txt
+++ b/mercurial/helptext/internals/changegroups.txt
@@ -2,12 +2,13 @@ Changegroups are representations of repo
the changelog data, root/flat manifest data, treemanifest data, and
filelogs.
-There are 3 versions of changegroups: ``1``, ``2``, and ``3``. From a
+There are 4 versions of changegroups: ``1``, ``2``, ``3`` and ``4``. From a
high-level, versions ``1`` and ``2`` are almost exactly the same, with the
only difference being an additional item in the *delta header*. Version
``3`` adds support for storage flags in the *delta header* and optionally
exchanging treemanifests (enabled by setting an option on the
-``changegroup`` part in the bundle2).
+``changegroup`` part in the bundle2). Version ``4`` adds support for exchanging
+sidedata (additional revision metadata not part of the digest).
Changegroups when not exchanging treemanifests consist of 3 logical
segments::
@@ -74,8 +75,8 @@ The *delta data* is a series of *delta*s
entry (either that the recipient already has, or previously specified in the
bundle/changegroup).
-The *delta header* is different between versions ``1``, ``2``, and
-``3`` of the changegroup format.
+The *delta header* is different between versions ``1``, ``2``, ``3`` and ``4``
+of the changegroup format.
Version 1 (headerlen=80)::
@@ -104,6 +105,15 @@ Version 3 (headerlen=102)::
| | | | | | |
+------------------------------------------------------------------------------+
+Version 4 (headerlen=103)::
+
+ +------------------------------------------------------------------------------+----------+
+ | | | | | | | |
+ | node | p1 node | p2 node | base node | link node | flags | pflags |
+ | (20 bytes) | (20 bytes) | (20 bytes) | (20 bytes) | (20 bytes) | (2 bytes) | (1 byte) |
+ | | | | | | | |
+ +------------------------------------------------------------------------------+----------+
+
The *delta data* consists of ``chunklen - 4 - headerlen`` bytes, which contain a
series of *delta*s, densely packed (no separators). These deltas describe a diff
from an existing entry (either that the recipient already has, or previously
@@ -140,12 +150,24 @@ 8192
Externally stored. The revision fulltext contains ``key:value`` ``\n``
delimited metadata defining an object stored elsewhere. Used by the LFS
extension.
+4096
+ Contains copy information. This revision changes files in a way that could
+ affect copy tracing. This does *not* affect changegroup handling, but is
+ relevant for other parts of Mercurial.
For historical reasons, the integer values are identical to revlog version 1
per-revision storage flags and correspond to bits being set in this 2-byte
field. Bits were allocated starting from the most-significant bit, hence the
reverse ordering and allocation of these flags.
+The *pflags* (protocol flags) field holds bitwise flags affecting the protocol
+itself. They are first in the header since they may affect the handling of the
+rest of the fields in a future version. They are defined as such:
+
+1 indicates whether to read a chunk of sidedata (of variable length) right
+ after the revision flags.
+
+
Changeset Segment
=================
@@ -166,9 +188,9 @@ the boundary to the next segment (either
Treemanifests Segment
---------------------
-The *treemanifests segment* only exists in changegroup version ``3``, and
-only if the 'treemanifest' param is part of the bundle2 changegroup part
-(it is not possible to use changegroup version 3 outside of bundle2).
+The *treemanifests segment* only exists in changegroup version ``3`` and ``4``,
+and only if the 'treemanifest' param is part of the bundle2 changegroup part
+(it is not possible to use changegroup version 3 or 4 outside of bundle2).
Aside from the filenames in the *treemanifests segment* containing a
trailing ``/`` character, it behaves identically to the *filelogs segment*
(see below). The final sub-segment is followed by an *empty chunk* (logically,
diff --git a/mercurial/hg.py b/mercurial/hg.py
--- a/mercurial/hg.py
+++ b/mercurial/hg.py
@@ -16,8 +16,7 @@ import stat
from .i18n import _
from .node import (
hex,
- nullhex,
- nullid,
+ sha1nodeconstants,
short,
)
from .pycompat import getattr
@@ -25,7 +24,6 @@ from .pycompat import getattr
from . import (
bookmarks,
bundlerepo,
- cacheutil,
cmdutil,
destutil,
discovery,
@@ -53,6 +51,7 @@ from . import (
verify as verifymod,
vfs as vfsmod,
)
+from .interfaces import repository as repositorymod
from .utils import (
hashutil,
stringutil,
@@ -568,7 +567,7 @@ def clonewithshare(
# Resolve the value to put in [paths] section for the source.
if islocal(source):
- defaultpath = os.path.abspath(urlutil.urllocalpath(source))
+ defaultpath = util.abspath(urlutil.urllocalpath(source))
else:
defaultpath = source
@@ -772,7 +771,7 @@ def clone(
},
).result()
- if rootnode != nullid:
+ if rootnode != sha1nodeconstants.nullid:
sharepath = os.path.join(sharepool, hex(rootnode))
else:
ui.status(
@@ -822,10 +821,15 @@ def clone(
abspath = origsource
if islocal(origsource):
- abspath = os.path.abspath(urlutil.urllocalpath(origsource))
+ abspath = util.abspath(urlutil.urllocalpath(origsource))
if islocal(dest):
- cleandir = dest
+ if os.path.exists(dest):
+ # only clean up directories we create ourselves
+ hgdir = os.path.realpath(os.path.join(dest, b".hg"))
+ cleandir = hgdir
+ else:
+ cleandir = dest
copy = False
if (
@@ -852,38 +856,26 @@ def clone(
if copy:
srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
- hgdir = os.path.realpath(os.path.join(dest, b".hg"))
- if not os.path.exists(dest):
- util.makedirs(dest)
- else:
- # only clean up directories we create ourselves
- cleandir = hgdir
- try:
- destpath = hgdir
- util.makedir(destpath, notindexed=True)
- except OSError as inst:
- if inst.errno == errno.EEXIST:
- cleandir = None
- raise error.Abort(
- _(b"destination '%s' already exists") % dest
- )
- raise
- destlock = copystore(ui, srcrepo, destpath)
- # copy bookmarks over
- srcbookmarks = srcrepo.vfs.join(b'bookmarks')
- dstbookmarks = os.path.join(destpath, b'bookmarks')
- if os.path.exists(srcbookmarks):
- util.copyfile(srcbookmarks, dstbookmarks)
+ destrootpath = urlutil.urllocalpath(dest)
+ dest_reqs = localrepo.clone_requirements(ui, createopts, srcrepo)
+ localrepo.createrepository(
+ ui,
+ destrootpath,
+ requirements=dest_reqs,
+ )
+ destrepo = localrepo.makelocalrepository(ui, destrootpath)
+ destlock = destrepo.lock()
+ from . import streamclone # avoid cycle
- dstcachedir = os.path.join(destpath, b'cache')
- for cache in cacheutil.cachetocopy(srcrepo):
- _copycache(srcrepo, dstcachedir, cache)
+ streamclone.local_copy(srcrepo, destrepo)
# we need to re-init the repo after manually copying the data
# into it
destpeer = peer(srcrepo, peeropts, dest)
- srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
+ srcrepo.hook(
+ b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
+ )
else:
try:
# only pass ui when no srcrepo
@@ -1053,7 +1045,7 @@ def clone(
# as the only "bad" outcome would be some slowness. That potential
# slowness already affect reader.
with destrepo.lock():
- destrepo.updatecaches(full=b"post-clone")
+ destrepo.updatecaches(caches=repositorymod.CACHES_POST_CLONE)
finally:
release(srclock, destlock)
if cleandir is not None:
@@ -1329,7 +1321,9 @@ def incoming(ui, repo, source, opts, sub
for n in chlist:
if limit is not None and count >= limit:
break
- parents = [p for p in other.changelog.parents(n) if p != nullid]
+ parents = [
+ p for p in other.changelog.parents(n) if p != repo.nullid
+ ]
if opts.get(b'no_merges') and len(parents) == 2:
continue
count += 1
@@ -1406,7 +1400,7 @@ def _outgoing_filter(repo, revs, opts):
for n in revs:
if limit is not None and count >= limit:
break
- parents = [p for p in cl.parents(n) if p != nullid]
+ parents = [p for p in cl.parents(n) if p != repo.nullid]
if no_merges and len(parents) == 2:
continue
count += 1
diff --git a/mercurial/hgweb/hgwebdir_mod.py b/mercurial/hgweb/hgwebdir_mod.py
--- a/mercurial/hgweb/hgwebdir_mod.py
+++ b/mercurial/hgweb/hgwebdir_mod.py
@@ -70,7 +70,7 @@ def findrepos(paths):
except KeyError:
repos.append((prefix, root))
continue
- roothead = os.path.normpath(os.path.abspath(roothead))
+ roothead = os.path.normpath(util.abspath(roothead))
paths = scmutil.walkrepos(roothead, followsym=True, recurse=recurse)
repos.extend(urlrepos(prefix, roothead, paths))
return repos
diff --git a/mercurial/hgweb/server.py b/mercurial/hgweb/server.py
--- a/mercurial/hgweb/server.py
+++ b/mercurial/hgweb/server.py
@@ -344,7 +344,7 @@ class _httprequesthandlerssl(_httpreques
try:
import threading
- threading.activeCount() # silence pyflakes and bypass demandimport
+ threading.active_count() # silence pyflakes and bypass demandimport
_mixin = socketserver.ThreadingMixIn
except ImportError:
if util.safehasattr(os, b"fork"):
diff --git a/mercurial/hgweb/webutil.py b/mercurial/hgweb/webutil.py
--- a/mercurial/hgweb/webutil.py
+++ b/mercurial/hgweb/webutil.py
@@ -14,7 +14,7 @@ import os
import re
from ..i18n import _
-from ..node import hex, nullid, short
+from ..node import hex, short
from ..pycompat import setattr
from .common import (
@@ -220,7 +220,7 @@ def _ctxsgen(context, ctxs):
def _siblings(siblings=None, hiderev=None):
if siblings is None:
siblings = []
- siblings = [s for s in siblings if s.node() != nullid]
+ siblings = [s for s in siblings if s.node() != s.repo().nullid]
if len(siblings) == 1 and siblings[0].rev() == hiderev:
siblings = []
return templateutil.mappinggenerator(_ctxsgen, args=(siblings,))
@@ -316,12 +316,16 @@ def _nodenamesgen(context, f, node, name
yield {name: t}
-def showtag(repo, t1, node=nullid):
+def showtag(repo, t1, node=None):
+ if node is None:
+ node = repo.nullid
args = (repo.nodetags, node, b'tag')
return templateutil.mappinggenerator(_nodenamesgen, args=args, name=t1)
-def showbookmark(repo, t1, node=nullid):
+def showbookmark(repo, t1, node=None):
+ if node is None:
+ node = repo.nullid
args = (repo.nodebookmarks, node, b'bookmark')
return templateutil.mappinggenerator(_nodenamesgen, args=args, name=t1)
diff --git a/mercurial/interfaces/dirstate.py b/mercurial/interfaces/dirstate.py
--- a/mercurial/interfaces/dirstate.py
+++ b/mercurial/interfaces/dirstate.py
@@ -2,13 +2,19 @@ from __future__ import absolute_import,
import contextlib
-from .. import node as nodemod
-
from . import util as interfaceutil
class idirstate(interfaceutil.Interface):
- def __init__(opener, ui, root, validate, sparsematchfn, nodeconstants):
+ def __init__(
+ opener,
+ ui,
+ root,
+ validate,
+ sparsematchfn,
+ nodeconstants,
+ use_dirstate_v2,
+ ):
"""Create a new dirstate object.
opener is an open()-like callable that can be used to open the
@@ -78,7 +84,7 @@ class idirstate(interfaceutil.Interface)
"""Iterate the dirstate's contained filenames as bytestrings."""
def items():
- """Iterate the dirstate's entries as (filename, dirstatetuple).
+ """Iterate the dirstate's entries as (filename, DirstateItem.
As usual, filename is a bytestring.
"""
@@ -97,7 +103,7 @@ class idirstate(interfaceutil.Interface)
def branch():
pass
- def setparents(p1, p2=nodemod.nullid):
+ def setparents(p1, p2=None):
"""Set dirstate parents to p1 and p2.
When moving from two parents to one, 'm' merged entries a
diff --git a/mercurial/interfaces/repository.py b/mercurial/interfaces/repository.py
--- a/mercurial/interfaces/repository.py
+++ b/mercurial/interfaces/repository.py
@@ -1,4 +1,5 @@
# repository.py - Interfaces and base classes for repositories and peers.
+# coding: utf-8
#
# Copyright 2017 Gregory Szorc
#
@@ -21,20 +22,20 @@ REPO_FEATURE_SHARED_STORAGE = b'sharedst
REPO_FEATURE_LFS = b'lfs'
# Repository supports being stream cloned.
REPO_FEATURE_STREAM_CLONE = b'streamclone'
+# Repository supports (at least) some sidedata to be stored
+REPO_FEATURE_SIDE_DATA = b'side-data'
# Files storage may lack data for all ancestors.
REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
REVISION_FLAG_CENSORED = 1 << 15
REVISION_FLAG_ELLIPSIS = 1 << 14
REVISION_FLAG_EXTSTORED = 1 << 13
-REVISION_FLAG_SIDEDATA = 1 << 12
-REVISION_FLAG_HASCOPIESINFO = 1 << 11
+REVISION_FLAG_HASCOPIESINFO = 1 << 12
REVISION_FLAGS_KNOWN = (
REVISION_FLAG_CENSORED
| REVISION_FLAG_ELLIPSIS
| REVISION_FLAG_EXTSTORED
- | REVISION_FLAG_SIDEDATA
| REVISION_FLAG_HASCOPIESINFO
)
@@ -44,6 +45,54 @@ CG_DELTAMODE_FULL = b'fulltext'
CG_DELTAMODE_P1 = b'p1'
+## Cache related constants:
+#
+# Used to control which cache should be warmed in a repo.updatecaches(…) call.
+
+# Warm branchmaps of all known repoview's filter-level
+CACHE_BRANCHMAP_ALL = b"branchmap-all"
+# Warm branchmaps of repoview's filter-level used by server
+CACHE_BRANCHMAP_SERVED = b"branchmap-served"
+# Warm internal changelog cache (eg: persistent nodemap)
+CACHE_CHANGELOG_CACHE = b"changelog-cache"
+# Warm full manifest cache
+CACHE_FULL_MANIFEST = b"full-manifest"
+# Warm file-node-tags cache
+CACHE_FILE_NODE_TAGS = b"file-node-tags"
+# Warm internal manifestlog cache (eg: persistent nodemap)
+CACHE_MANIFESTLOG_CACHE = b"manifestlog-cache"
+# Warn rev branch cache
+CACHE_REV_BRANCH = b"rev-branch-cache"
+# Warm tags' cache for default repoview'
+CACHE_TAGS_DEFAULT = b"tags-default"
+# Warm tags' cache for repoview's filter-level used by server
+CACHE_TAGS_SERVED = b"tags-served"
+
+# the cache to warm by default after a simple transaction
+# (this is a mutable set to let extension update it)
+CACHES_DEFAULT = {
+ CACHE_BRANCHMAP_SERVED,
+}
+
+# the caches to warm when warming all of them
+# (this is a mutable set to let extension update it)
+CACHES_ALL = {
+ CACHE_BRANCHMAP_SERVED,
+ CACHE_BRANCHMAP_ALL,
+ CACHE_CHANGELOG_CACHE,
+ CACHE_FILE_NODE_TAGS,
+ CACHE_FULL_MANIFEST,
+ CACHE_MANIFESTLOG_CACHE,
+ CACHE_TAGS_DEFAULT,
+ CACHE_TAGS_SERVED,
+}
+
+# the cache to warm by default on simple call
+# (this is a mutable set to let extension update it)
+CACHES_POST_CLONE = CACHES_ALL.copy()
+CACHES_POST_CLONE.discard(CACHE_FILE_NODE_TAGS)
+
+
class ipeerconnection(interfaceutil.Interface):
"""Represents a "connection" to a repository.
@@ -457,6 +506,13 @@ class irevisiondelta(interfaceutil.Inter
"""Raw sidedata bytes for the given revision."""
)
+ protocol_flags = interfaceutil.Attribute(
+ """Single byte of integer flags that can influence the protocol.
+
+ This is a bitwise composition of the ``storageutil.CG_FLAG*`` constants.
+ """
+ )
+
class ifilerevisionssequence(interfaceutil.Interface):
"""Contains index data for all revisions of a file.
@@ -1162,13 +1218,6 @@ class imanifeststorage(interfaceutil.Int
"""An ``ifilerevisionssequence`` instance."""
)
- indexfile = interfaceutil.Attribute(
- """Path of revlog index file.
-
- TODO this is revlog specific and should not be exposed.
- """
- )
-
opener = interfaceutil.Attribute(
"""VFS opener to use to access underlying files used for storage.
@@ -1176,13 +1225,6 @@ class imanifeststorage(interfaceutil.Int
"""
)
- version = interfaceutil.Attribute(
- """Revlog version number.
-
- TODO this is revlog specific and should not be exposed.
- """
- )
-
_generaldelta = interfaceutil.Attribute(
"""Whether generaldelta storage is being used.
@@ -1851,7 +1893,9 @@ class ilocalrepositorymain(interfaceutil
def savecommitmessage(text):
pass
- def register_sidedata_computer(kind, category, keys, computer):
+ def register_sidedata_computer(
+ kind, category, keys, computer, flags, replace=False
+ ):
pass
def register_wanted_sidedata(category):
diff --git a/mercurial/localrepo.py b/mercurial/localrepo.py
--- a/mercurial/localrepo.py
+++ b/mercurial/localrepo.py
@@ -19,7 +19,6 @@ from .i18n import _
from .node import (
bin,
hex,
- nullid,
nullrev,
sha1nodeconstants,
short,
@@ -50,7 +49,6 @@ from . import (
match as matchmod,
mergestate as mergestatemod,
mergeutil,
- metadata as metadatamod,
namespaces,
narrowspec,
obsolete,
@@ -91,6 +89,7 @@ from .utils import (
from .revlogutils import (
concurrency_checker as revlogchecker,
constants as revlogconst,
+ sidedata as sidedatamod,
)
release = lockmod.release
@@ -738,6 +737,14 @@ def makelocalrepository(baseui, path, in
storevfs = store.vfs
storevfs.options = resolvestorevfsoptions(ui, requirements, features)
+ if (
+ requirementsmod.REVLOGV2_REQUIREMENT in requirements
+ or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
+ ):
+ features.add(repository.REPO_FEATURE_SIDE_DATA)
+ # the revlogv2 docket introduced race condition that we need to fix
+ features.discard(repository.REPO_FEATURE_STREAM_CLONE)
+
# The cache vfs is used to manage cache files.
cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
cachevfs.createmode = store.createmode
@@ -880,6 +887,9 @@ def gathersupportedrequirements(ui):
# Start with all requirements supported by this file.
supported = set(localrepository._basesupported)
+ if dirstate.SUPPORTS_DIRSTATE_V2:
+ supported.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
+
# Execute ``featuresetupfuncs`` entries if they belong to an extension
# relevant to this ui instance.
modules = {m.__name__ for n, m in extensions.extensions(ui)}
@@ -1017,6 +1027,8 @@ def resolverevlogstorevfsoptions(ui, req
options[b'revlogv1'] = True
if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
options[b'revlogv2'] = True
+ if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
+ options[b'changelogv2'] = True
if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
options[b'generaldelta'] = True
@@ -1064,9 +1076,6 @@ def resolverevlogstorevfsoptions(ui, req
if sparserevlog:
options[b'generaldelta'] = True
- sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
- options[b'side-data'] = sidedata
-
maxchainlen = None
if sparserevlog:
maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
@@ -1219,7 +1228,7 @@ class localrepository(object):
requirementsmod.TREEMANIFEST_REQUIREMENT,
requirementsmod.COPIESSDC_REQUIREMENT,
requirementsmod.REVLOGV2_REQUIREMENT,
- requirementsmod.SIDEDATA_REQUIREMENT,
+ requirementsmod.CHANGELOGV2_REQUIREMENT,
requirementsmod.SPARSEREVLOG_REQUIREMENT,
requirementsmod.NODEMAP_REQUIREMENT,
bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
@@ -1408,7 +1417,7 @@ class localrepository(object):
self._wanted_sidedata = set()
self._sidedata_computers = {}
- metadatamod.set_sidedata_spec_for_repo(self)
+ sidedatamod.set_sidedata_spec_for_repo(self)
def _getvfsward(self, origfunc):
"""build a ward for self.vfs"""
@@ -1681,6 +1690,8 @@ class localrepository(object):
def _makedirstate(self):
"""Extension point for wrapping the dirstate per-repo."""
sparsematchfn = lambda: sparse.matcher(self)
+ v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
+ use_dirstate_v2 = v2_req in self.requirements
return dirstate.dirstate(
self.vfs,
@@ -1689,6 +1700,7 @@ class localrepository(object):
self._dirstatevalidate,
sparsematchfn,
self.nodeconstants,
+ use_dirstate_v2,
)
def _dirstatevalidate(self, node):
@@ -1702,7 +1714,7 @@ class localrepository(object):
_(b"warning: ignoring unknown working parent %s!\n")
% short(node)
)
- return nullid
+ return self.nullid
@storecache(narrowspec.FILENAME)
def narrowpats(self):
@@ -1753,9 +1765,9 @@ class localrepository(object):
@unfilteredpropertycache
def _quick_access_changeid_null(self):
return {
- b'null': (nullrev, nullid),
- nullrev: (nullrev, nullid),
- nullid: (nullrev, nullid),
+ b'null': (nullrev, self.nodeconstants.nullid),
+ nullrev: (nullrev, self.nodeconstants.nullid),
+ self.nullid: (nullrev, self.nullid),
}
@unfilteredpropertycache
@@ -1765,7 +1777,7 @@ class localrepository(object):
quick = self._quick_access_changeid_null.copy()
cl = self.unfiltered().changelog
for node in self.dirstate.parents():
- if node == nullid:
+ if node == self.nullid:
continue
rev = cl.index.get_rev(node)
if rev is None:
@@ -1785,7 +1797,7 @@ class localrepository(object):
quick[r] = pair
quick[n] = pair
p1node = self.dirstate.p1()
- if p1node != nullid:
+ if p1node != self.nullid:
quick[b'.'] = quick[p1node]
return quick
@@ -1841,7 +1853,7 @@ class localrepository(object):
# when we know that '.' won't be hidden
node = self.dirstate.p1()
rev = self.unfiltered().changelog.rev(node)
- elif len(changeid) == 20:
+ elif len(changeid) == self.nodeconstants.nodelen:
try:
node = changeid
rev = self.changelog.rev(changeid)
@@ -1862,7 +1874,7 @@ class localrepository(object):
changeid = hex(changeid) # for the error message
raise
- elif len(changeid) == 40:
+ elif len(changeid) == 2 * self.nodeconstants.nodelen:
node = bin(changeid)
rev = self.changelog.rev(node)
else:
@@ -2037,7 +2049,7 @@ class localrepository(object):
# local encoding.
tags = {}
for (name, (node, hist)) in pycompat.iteritems(alltags):
- if node != nullid:
+ if node != self.nullid:
tags[encoding.tolocal(name)] = node
tags[b'tip'] = self.changelog.tip()
tagtypes = {
@@ -2161,7 +2173,9 @@ class localrepository(object):
def wjoin(self, f, *insidef):
return self.vfs.reljoin(self.root, f, *insidef)
- def setparents(self, p1, p2=nullid):
+ def setparents(self, p1, p2=None):
+ if p2 is None:
+ p2 = self.nullid
self[None].setparents(p1, p2)
self._quick_access_changeid_invalidate()
@@ -2718,7 +2732,7 @@ class localrepository(object):
return updater
@unfilteredmethod
- def updatecaches(self, tr=None, full=False):
+ def updatecaches(self, tr=None, full=False, caches=None):
"""warm appropriate caches
If this function is called after a transaction closed. The transaction
@@ -2738,40 +2752,61 @@ class localrepository(object):
# later call to `destroyed` will refresh them.
return
- if tr is None or tr.changes[b'origrepolen'] < len(self):
- # accessing the 'served' branchmap should refresh all the others,
- self.ui.debug(b'updating the branch cache\n')
- self.filtered(b'served').branchmap()
- self.filtered(b'served.hidden').branchmap()
+ unfi = self.unfiltered()
if full:
- unfi = self.unfiltered()
-
+ msg = (
+ "`full` argument for `repo.updatecaches` is deprecated\n"
+ "(use `caches=repository.CACHE_ALL` instead)"
+ )
+ self.ui.deprecwarn(msg, b"5.9")
+ caches = repository.CACHES_ALL
+ if full == b"post-clone":
+ caches = repository.CACHES_POST_CLONE
+ caches = repository.CACHES_ALL
+ elif caches is None:
+ caches = repository.CACHES_DEFAULT
+
+ if repository.CACHE_BRANCHMAP_SERVED in caches:
+ if tr is None or tr.changes[b'origrepolen'] < len(self):
+ # accessing the 'served' branchmap should refresh all the others,
+ self.ui.debug(b'updating the branch cache\n')
+ self.filtered(b'served').branchmap()
+ self.filtered(b'served.hidden').branchmap()
+
+ if repository.CACHE_CHANGELOG_CACHE in caches:
self.changelog.update_caches(transaction=tr)
+
+ if repository.CACHE_MANIFESTLOG_CACHE in caches:
self.manifestlog.update_caches(transaction=tr)
+ if repository.CACHE_REV_BRANCH in caches:
rbc = unfi.revbranchcache()
for r in unfi.changelog:
rbc.branchinfo(r)
rbc.write()
+ if repository.CACHE_FULL_MANIFEST in caches:
# ensure the working copy parents are in the manifestfulltextcache
for ctx in self[b'.'].parents():
ctx.manifest() # accessing the manifest is enough
- if not full == b"post-clone":
- # accessing fnode cache warms the cache
- tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
+ if repository.CACHE_FILE_NODE_TAGS in caches:
+ # accessing fnode cache warms the cache
+ tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
+
+ if repository.CACHE_TAGS_DEFAULT in caches:
# accessing tags warm the cache
self.tags()
+ if repository.CACHE_TAGS_SERVED in caches:
self.filtered(b'served').tags()
- # The `full` arg is documented as updating even the lazily-loaded
- # caches immediately, so we're forcing a write to cause these caches
- # to be warmed up even if they haven't explicitly been requested
- # yet (if they've never been used by hg, they won't ever have been
- # written, even if they're a subset of another kind of cache that
- # *has* been used).
+ if repository.CACHE_BRANCHMAP_ALL in caches:
+ # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
+ # so we're forcing a write to cause these caches to be warmed up
+ # even if they haven't explicitly been requested yet (if they've
+ # never been used by hg, they won't ever have been written, even if
+ # they're a subset of another kind of cache that *has* been used).
for filt in repoview.filtertable.keys():
filtered = self.filtered(filt)
filtered.branchmap().write(filtered)
@@ -3100,7 +3135,7 @@ class localrepository(object):
subrepoutil.writestate(self, newstate)
p1, p2 = self.dirstate.parents()
- hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
+ hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
try:
self.hook(
b"precommit", throw=True, parent1=hookp1, parent2=hookp2
@@ -3273,7 +3308,7 @@ class localrepository(object):
t = n
while True:
p = self.changelog.parents(n)
- if p[1] != nullid or p[0] == nullid:
+ if p[1] != self.nullid or p[0] == self.nullid:
b.append((t, n, p[0], p[1]))
break
n = p[0]
@@ -3286,7 +3321,7 @@ class localrepository(object):
n, l, i = top, [], 0
f = 1
- while n != bottom and n != nullid:
+ while n != bottom and n != self.nullid:
p = self.changelog.parents(n)[0]
if i == f:
l.append(n)
@@ -3370,20 +3405,32 @@ class localrepository(object):
return self.pathto(fp.name[len(self.root) + 1 :])
def register_wanted_sidedata(self, category):
+ if repository.REPO_FEATURE_SIDE_DATA not in self.features:
+ # Only revlogv2 repos can want sidedata.
+ return
self._wanted_sidedata.add(pycompat.bytestr(category))
- def register_sidedata_computer(self, kind, category, keys, computer):
- if kind not in (b"changelog", b"manifest", b"filelog"):
+ def register_sidedata_computer(
+ self, kind, category, keys, computer, flags, replace=False
+ ):
+ if kind not in revlogconst.ALL_KINDS:
msg = _(b"unexpected revlog kind '%s'.")
raise error.ProgrammingError(msg % kind)
category = pycompat.bytestr(category)
- if category in self._sidedata_computers.get(kind, []):
+ already_registered = category in self._sidedata_computers.get(kind, [])
+ if already_registered and not replace:
msg = _(
b"cannot register a sidedata computer twice for category '%s'."
)
raise error.ProgrammingError(msg % category)
+ if replace and not already_registered:
+ msg = _(
+ b"cannot replace a sidedata computer that isn't registered "
+ b"for category '%s'."
+ )
+ raise error.ProgrammingError(msg % category)
self._sidedata_computers.setdefault(kind, {})
- self._sidedata_computers[kind][category] = (keys, computer)
+ self._sidedata_computers[kind][category] = (keys, computer, flags)
# used to avoid circular references so destructors work
@@ -3398,8 +3445,9 @@ def aftertrans(files):
vfs.tryunlink(dest)
try:
vfs.rename(src, dest)
- except OSError: # journal file does not yet exist
- pass
+ except OSError as exc: # journal file does not yet exist
+ if exc.errno != errno.ENOENT:
+ raise
return a
@@ -3437,6 +3485,24 @@ def defaultcreateopts(ui, createopts=Non
return createopts
+def clone_requirements(ui, createopts, srcrepo):
+ """clone the requirements of a local repo for a local clone
+
+ The store requirements are unchanged while the working copy requirements
+ depends on the configuration
+ """
+ target_requirements = set()
+ createopts = defaultcreateopts(ui, createopts=createopts)
+ for r in newreporequirements(ui, createopts):
+ if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
+ target_requirements.add(r)
+
+ for r in srcrepo.requirements:
+ if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
+ target_requirements.add(r)
+ return target_requirements
+
+
def newreporequirements(ui, createopts):
"""Determine the set of requirements for a new local repository.
@@ -3507,25 +3573,33 @@ def newreporequirements(ui, createopts):
if ui.configbool(b'format', b'sparse-revlog'):
requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
- # experimental config: format.exp-use-side-data
- if ui.configbool(b'format', b'exp-use-side-data'):
- requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
- requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
- requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
+ # experimental config: format.exp-dirstate-v2
+ # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
+ if ui.configbool(b'format', b'exp-dirstate-v2'):
+ if dirstate.SUPPORTS_DIRSTATE_V2:
+ requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
+ else:
+ raise error.Abort(
+ _(
+ b"dirstate v2 format requested by config "
+ b"but not supported (requires Rust extensions)"
+ )
+ )
+
# experimental config: format.exp-use-copies-side-data-changeset
if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
- requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
- requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
- requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
+ requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
if ui.configbool(b'experimental', b'treemanifest'):
requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
+ changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
+ if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
+ requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
+
revlogv2 = ui.config(b'experimental', b'revlogv2')
if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
- # generaldelta is implied by revlogv2.
- requirements.discard(requirementsmod.GENERALDELTA_REQUIREMENT)
requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
# experimental config: format.internal-phase
if ui.configbool(b'format', b'internal-phase'):
@@ -3621,11 +3695,13 @@ def filterknowncreateopts(ui, createopts
return {k: v for k, v in createopts.items() if k not in known}
-def createrepository(ui, path, createopts=None):
+def createrepository(ui, path, createopts=None, requirements=None):
"""Create a new repository in a vfs.
``path`` path to the new repo's working directory.
``createopts`` options for the new repository.
+ ``requirement`` predefined set of requirements.
+ (incompatible with ``createopts``)
The following keys for ``createopts`` are recognized:
@@ -3648,27 +3724,34 @@ def createrepository(ui, path, createopt
Indicates that storage for files should be shallow (not all ancestor
revisions are known).
"""
- createopts = defaultcreateopts(ui, createopts=createopts)
-
- unknownopts = filterknowncreateopts(ui, createopts)
-
- if not isinstance(unknownopts, dict):
- raise error.ProgrammingError(
- b'filterknowncreateopts() did not return a dict'
- )
-
- if unknownopts:
- raise error.Abort(
- _(
- b'unable to create repository because of unknown '
- b'creation option: %s'
+
+ if requirements is not None:
+ if createopts is not None:
+ msg = b'cannot specify both createopts and requirements'
+ raise error.ProgrammingError(msg)
+ createopts = {}
+ else:
+ createopts = defaultcreateopts(ui, createopts=createopts)
+
+ unknownopts = filterknowncreateopts(ui, createopts)
+
+ if not isinstance(unknownopts, dict):
+ raise error.ProgrammingError(
+ b'filterknowncreateopts() did not return a dict'
)
- % b', '.join(sorted(unknownopts)),
- hint=_(b'is a required extension not loaded?'),
- )
-
- requirements = newreporequirements(ui, createopts=createopts)
- requirements -= checkrequirementscompat(ui, requirements)
+
+ if unknownopts:
+ raise error.Abort(
+ _(
+ b'unable to create repository because of unknown '
+ b'creation option: %s'
+ )
+ % b', '.join(sorted(unknownopts)),
+ hint=_(b'is a required extension not loaded?'),
+ )
+
+ requirements = newreporequirements(ui, createopts=createopts)
+ requirements -= checkrequirementscompat(ui, requirements)
wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
diff --git a/mercurial/logcmdutil.py b/mercurial/logcmdutil.py
--- a/mercurial/logcmdutil.py
+++ b/mercurial/logcmdutil.py
@@ -12,12 +12,7 @@ import os
import posixpath
from .i18n import _
-from .node import (
- nullid,
- nullrev,
- wdirid,
- wdirrev,
-)
+from .node import nullrev, wdirrev
from .thirdparty import attr
@@ -98,9 +93,8 @@ def diff_parent(ctx):
},
b"merge-diff",
):
- repo.ui.pushbuffer()
- merge.merge(ctx.p2(), wc=wctx)
- repo.ui.popbuffer()
+ with repo.ui.silent():
+ merge.merge(ctx.p2(), wc=wctx)
return wctx
else:
return ctx.p1()
@@ -357,7 +351,7 @@ class changesetprinter(object):
if self.ui.debugflag:
mnode = ctx.manifestnode()
if mnode is None:
- mnode = wdirid
+ mnode = self.repo.nodeconstants.wdirid
mrev = wdirrev
else:
mrev = self.repo.manifestlog.rev(mnode)
@@ -505,7 +499,11 @@ class changesetformatter(changesetprinte
)
if self.ui.debugflag or b'manifest' in datahint:
- fm.data(manifest=fm.hexfunc(ctx.manifestnode() or wdirid))
+ fm.data(
+ manifest=fm.hexfunc(
+ ctx.manifestnode() or self.repo.nodeconstants.wdirid
+ )
+ )
if self.ui.debugflag or b'extra' in datahint:
fm.data(extra=fm.formatdict(ctx.extra()))
@@ -991,7 +989,7 @@ def _initialrevs(repo, wopts):
"""Return the initial set of revisions to be filtered or followed"""
if wopts.revspec:
revs = scmutil.revrange(repo, wopts.revspec)
- elif wopts.follow and repo.dirstate.p1() == nullid:
+ elif wopts.follow and repo.dirstate.p1() == repo.nullid:
revs = smartset.baseset()
elif wopts.follow:
revs = repo.revs(b'.')
diff --git a/mercurial/manifest.py b/mercurial/manifest.py
--- a/mercurial/manifest.py
+++ b/mercurial/manifest.py
@@ -16,7 +16,6 @@ from .i18n import _
from .node import (
bin,
hex,
- nullid,
nullrev,
)
from .pycompat import getattr
@@ -35,6 +34,9 @@ from .interfaces import (
repository,
util as interfaceutil,
)
+from .revlogutils import (
+ constants as revlog_constants,
+)
parsers = policy.importmod('parsers')
propertycache = util.propertycache
@@ -43,7 +45,7 @@ propertycache = util.propertycache
FASTDELTA_TEXTDIFF_THRESHOLD = 1000
-def _parse(data):
+def _parse(nodelen, data):
# This method does a little bit of excessive-looking
# precondition checking. This is so that the behavior of this
# class exactly matches its C counterpart to try and help
@@ -64,7 +66,7 @@ def _parse(data):
nl -= 1
else:
flags = b''
- if nl not in (40, 64):
+ if nl != 2 * nodelen:
raise ValueError(b'Invalid manifest line')
yield f, bin(n), flags
@@ -132,7 +134,7 @@ class lazymanifestiterentries(object):
else:
hlen = nlpos - zeropos - 1
flags = b''
- if hlen not in (40, 64):
+ if hlen != 2 * self.lm._nodelen:
raise error.StorageError(b'Invalid manifest line')
hashval = unhexlify(
data, self.lm.extrainfo[self.pos], zeropos + 1, hlen
@@ -177,12 +179,14 @@ class _lazymanifest(object):
def __init__(
self,
+ nodelen,
data,
positions=None,
extrainfo=None,
extradata=None,
hasremovals=False,
):
+ self._nodelen = nodelen
if positions is None:
self.positions = self.findlines(data)
self.extrainfo = [0] * len(self.positions)
@@ -289,7 +293,7 @@ class _lazymanifest(object):
hlen -= 1
else:
flags = b''
- if hlen not in (40, 64):
+ if hlen != 2 * self._nodelen:
raise error.StorageError(b'Invalid manifest line')
hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, hlen)
return (hashval, flags)
@@ -345,6 +349,7 @@ class _lazymanifest(object):
def copy(self):
# XXX call _compact like in C?
return _lazymanifest(
+ self._nodelen,
self.data,
self.positions,
self.extrainfo,
@@ -455,7 +460,7 @@ class _lazymanifest(object):
def filtercopy(self, filterfn):
# XXX should be optimized
- c = _lazymanifest(b'')
+ c = _lazymanifest(self._nodelen, b'')
for f, n, fl in self.iterentries():
if filterfn(f):
c[f] = n, fl
@@ -470,8 +475,9 @@ except AttributeError:
@interfaceutil.implementer(repository.imanifestdict)
class manifestdict(object):
- def __init__(self, data=b''):
- self._lm = _lazymanifest(data)
+ def __init__(self, nodelen, data=b''):
+ self._nodelen = nodelen
+ self._lm = _lazymanifest(nodelen, data)
def __getitem__(self, key):
return self._lm[key][0]
@@ -579,14 +585,14 @@ class manifestdict(object):
return self.copy()
if self._filesfastpath(match):
- m = manifestdict()
+ m = manifestdict(self._nodelen)
lm = self._lm
for fn in match.files():
if fn in lm:
m._lm[fn] = lm[fn]
return m
- m = manifestdict()
+ m = manifestdict(self._nodelen)
m._lm = self._lm.filtercopy(match)
return m
@@ -629,7 +635,7 @@ class manifestdict(object):
return b''
def copy(self):
- c = manifestdict()
+ c = manifestdict(self._nodelen)
c._lm = self._lm.copy()
return c
@@ -795,7 +801,8 @@ class treemanifest(object):
def __init__(self, nodeconstants, dir=b'', text=b''):
self._dir = dir
self.nodeconstants = nodeconstants
- self._node = nullid
+ self._node = self.nodeconstants.nullid
+ self._nodelen = self.nodeconstants.nodelen
self._loadfunc = _noop
self._copyfunc = _noop
self._dirty = False
@@ -1323,7 +1330,7 @@ class treemanifest(object):
def parse(self, text, readsubtree):
selflazy = self._lazydirs
- for f, n, fl in _parse(text):
+ for f, n, fl in _parse(self._nodelen, text):
if fl == b't':
f = f + b'/'
# False below means "doesn't need to be copied" and can use the
@@ -1391,7 +1398,7 @@ class treemanifest(object):
continue
subp1 = getnode(m1, d)
subp2 = getnode(m2, d)
- if subp1 == nullid:
+ if subp1 == self.nodeconstants.nullid:
subp1, subp2 = subp2, subp1
writesubtree(subm, subp1, subp2, match)
@@ -1560,7 +1567,6 @@ class manifestrevlog(object):
opener,
tree=b'',
dirlogcache=None,
- indexfile=None,
treemanifest=False,
):
"""Constructs a new manifest revlog
@@ -1591,10 +1597,9 @@ class manifestrevlog(object):
if tree:
assert self._treeondisk, b'opts is %r' % opts
- if indexfile is None:
- indexfile = b'00manifest.i'
- if tree:
- indexfile = b"meta/" + tree + indexfile
+ radix = b'00manifest'
+ if tree:
+ radix = b"meta/" + tree + radix
self.tree = tree
@@ -1606,7 +1611,8 @@ class manifestrevlog(object):
self._revlog = revlog.revlog(
opener,
- indexfile,
+ target=(revlog_constants.KIND_MANIFESTLOG, self.tree),
+ radix=radix,
# only root indexfile is cached
checkambig=not bool(tree),
mmaplargeindex=True,
@@ -1615,9 +1621,7 @@ class manifestrevlog(object):
)
self.index = self._revlog.index
- self.version = self._revlog.version
self._generaldelta = self._revlog._generaldelta
- self._revlog.revlog_kind = b'manifest'
def _setupmanifestcachehooks(self, repo):
"""Persist the manifestfulltextcache on lock release"""
@@ -1901,14 +1905,6 @@ class manifestrevlog(object):
)
@property
- def indexfile(self):
- return self._revlog.indexfile
-
- @indexfile.setter
- def indexfile(self, value):
- self._revlog.indexfile = value
-
- @property
def opener(self):
return self._revlog.opener
@@ -1994,7 +1990,7 @@ class manifestlog(object):
else:
m = manifestctx(self, node)
- if node != nullid:
+ if node != self.nodeconstants.nullid:
mancache = self._dirmancache.get(tree)
if not mancache:
mancache = util.lrucachedict(self._cachesize)
@@ -2020,7 +2016,7 @@ class manifestlog(object):
class memmanifestctx(object):
def __init__(self, manifestlog):
self._manifestlog = manifestlog
- self._manifestdict = manifestdict()
+ self._manifestdict = manifestdict(manifestlog.nodeconstants.nodelen)
def _storage(self):
return self._manifestlog.getstorage(b'')
@@ -2082,8 +2078,9 @@ class manifestctx(object):
def read(self):
if self._data is None:
- if self._node == nullid:
- self._data = manifestdict()
+ nc = self._manifestlog.nodeconstants
+ if self._node == nc.nullid:
+ self._data = manifestdict(nc.nodelen)
else:
store = self._storage()
if self._node in store.fulltextcache:
@@ -2092,7 +2089,7 @@ class manifestctx(object):
text = store.revision(self._node)
arraytext = bytearray(text)
store.fulltextcache[self._node] = arraytext
- self._data = manifestdict(text)
+ self._data = manifestdict(nc.nodelen, text)
return self._data
def readfast(self, shallow=False):
@@ -2119,7 +2116,7 @@ class manifestctx(object):
store = self._storage()
r = store.rev(self._node)
d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
- return manifestdict(d)
+ return manifestdict(store.nodeconstants.nodelen, d)
def find(self, key):
return self.read().find(key)
@@ -2188,7 +2185,7 @@ class treemanifestctx(object):
def read(self):
if self._data is None:
store = self._storage()
- if self._node == nullid:
+ if self._node == self._manifestlog.nodeconstants.nullid:
self._data = treemanifest(self._manifestlog.nodeconstants)
# TODO accessing non-public API
elif store._treeondisk:
@@ -2245,7 +2242,7 @@ class treemanifestctx(object):
if shallow:
r = store.rev(self._node)
d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
- return manifestdict(d)
+ return manifestdict(store.nodeconstants.nodelen, d)
else:
# Need to perform a slow delta
r0 = store.deltaparent(store.rev(self._node))
@@ -2274,7 +2271,9 @@ class treemanifestctx(object):
return self.readdelta(shallow=shallow)
if shallow:
- return manifestdict(store.revision(self._node))
+ return manifestdict(
+ store.nodeconstants.nodelen, store.revision(self._node)
+ )
else:
return self.read()
diff --git a/mercurial/merge.py b/mercurial/merge.py
--- a/mercurial/merge.py
+++ b/mercurial/merge.py
@@ -13,12 +13,7 @@ import stat
import struct
from .i18n import _
-from .node import (
- addednodeid,
- modifiednodeid,
- nullid,
- nullrev,
-)
+from .node import nullrev
from .thirdparty import attr
from .utils import stringutil
from . import (
@@ -779,7 +774,7 @@ def manifestmerge(
# to flag the change. If wctx is a committed revision, we shouldn't
# care for the dirty state of the working directory.
if any(wctx.sub(s).dirty() for s in wctx.substate):
- m1[b'.hgsubstate'] = modifiednodeid
+ m1[b'.hgsubstate'] = repo.nodeconstants.modifiednodeid
# Don't use m2-vs-ma optimization if:
# - ma is the same as m1 or m2, which we're just going to diff again later
@@ -944,7 +939,7 @@ def manifestmerge(
mresult.addcommitinfo(
f, b'merge-removal-candidate', b'yes'
)
- elif n1 == addednodeid:
+ elif n1 == repo.nodeconstants.addednodeid:
# This file was locally added. We should forget it instead of
# deleting it.
mresult.addfile(
@@ -1729,20 +1724,13 @@ def applyupdates(
removed += msremoved
extraactions = ms.actions()
- if extraactions:
- for k, acts in pycompat.iteritems(extraactions):
- for a in acts:
- mresult.addfile(a[0], k, *a[1:])
- if k == mergestatemod.ACTION_GET and wantfiledata:
- # no filedata until mergestate is updated to provide it
- for a in acts:
- getfiledata[a[0]] = None
progress.complete()
- assert len(getfiledata) == (
- mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0
+ return (
+ updateresult(updated, merged, removed, unresolved),
+ getfiledata,
+ extraactions,
)
- return updateresult(updated, merged, removed, unresolved), getfiledata
def _advertisefsmonitor(repo, num_gets, p1node):
@@ -1785,7 +1773,7 @@ def _advertisefsmonitor(repo, num_gets,
if (
fsmonitorwarning
and not fsmonitorenabled
- and p1node == nullid
+ and p1node == repo.nullid
and num_gets >= fsmonitorthreshold
and pycompat.sysplatform.startswith((b'linux', b'darwin'))
):
@@ -1913,7 +1901,7 @@ def _update(
else:
if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
- pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
+ pas = [repo[anc] for anc in (sorted(cahs) or [repo.nullid])]
else:
pas = [p1.ancestor(p2, warn=branchmerge)]
@@ -2112,7 +2100,7 @@ def _update(
### apply phase
if not branchmerge: # just jump to the new rev
- fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
+ fp1, fp2, xp1, xp2 = fp2, repo.nullid, xp2, b''
# If we're doing a partial update, we need to skip updating
# the dirstate.
always = matcher is None or matcher.always()
@@ -2127,7 +2115,7 @@ def _update(
)
wantfiledata = updatedirstate and not branchmerge
- stats, getfiledata = applyupdates(
+ stats, getfiledata, extraactions = applyupdates(
repo,
mresult,
wc,
@@ -2138,6 +2126,18 @@ def _update(
)
if updatedirstate:
+ if extraactions:
+ for k, acts in pycompat.iteritems(extraactions):
+ for a in acts:
+ mresult.addfile(a[0], k, *a[1:])
+ if k == mergestatemod.ACTION_GET and wantfiledata:
+ # no filedata until mergestate is updated to provide it
+ for a in acts:
+ getfiledata[a[0]] = None
+
+ assert len(getfiledata) == (
+ mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0
+ )
with repo.dirstate.parentchange():
repo.setparents(fp1, fp2)
mergestatemod.recordupdates(
@@ -2149,10 +2149,10 @@ def _update(
if not branchmerge:
repo.dirstate.setbranch(p2.branch())
- # If we're updating to a location, clean up any stale temporary includes
- # (ex: this happens during hg rebase --abort).
- if not branchmerge:
- sparse.prunetemporaryincludes(repo)
+ # If we're updating to a location, clean up any stale temporary includes
+ # (ex: this happens during hg rebase --abort).
+ if not branchmerge:
+ sparse.prunetemporaryincludes(repo)
if updatedirstate:
repo.hook(
@@ -2281,14 +2281,14 @@ def graft(
if keepconflictparent and stats.unresolvedcount:
pother = ctx.node()
else:
- pother = nullid
+ pother = repo.nullid
parents = ctx.parents()
if keepparent and len(parents) == 2 and base in parents:
parents.remove(base)
pother = parents[0].node()
# Never set both parents equal to each other
if pother == pctx.node():
- pother = nullid
+ pother = repo.nullid
if wctx.isinmemory():
wctx.setparents(pctx.node(), pother)
diff --git a/mercurial/mergestate.py b/mercurial/mergestate.py
--- a/mercurial/mergestate.py
+++ b/mercurial/mergestate.py
@@ -9,7 +9,6 @@ from .i18n import _
from .node import (
bin,
hex,
- nullhex,
nullrev,
)
from . import (
@@ -32,7 +31,7 @@ def _droponode(data):
def _filectxorabsent(hexnode, ctx, f):
- if hexnode == nullhex:
+ if hexnode == ctx.repo().nodeconstants.nullhex:
return filemerge.absentfilectx(ctx, f)
else:
return ctx[f]
@@ -248,7 +247,7 @@ class _mergestate_base(object):
note: also write the local version to the `.hg/merge` directory.
"""
if fcl.isabsent():
- localkey = nullhex
+ localkey = self._repo.nodeconstants.nullhex
else:
localkey = mergestate.getlocalkey(fcl.path())
self._make_backup(fcl, localkey)
@@ -354,7 +353,7 @@ class _mergestate_base(object):
flags = flo
if preresolve:
# restore local
- if localkey != nullhex:
+ if localkey != self._repo.nodeconstants.nullhex:
self._restore_backup(wctx[dfile], localkey, flags)
else:
wctx[dfile].remove(ignoremissing=True)
@@ -658,7 +657,10 @@ class mergestate(_mergestate_base):
records.append(
(RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
)
- elif v[1] == nullhex or v[6] == nullhex:
+ elif (
+ v[1] == self._repo.nodeconstants.nullhex
+ or v[6] == self._repo.nodeconstants.nullhex
+ ):
# Change/Delete or Delete/Change conflicts. These are stored in
# 'C' records. v[1] is the local file, and is nullhex when the
# file is deleted locally ('dc'). v[6] is the remote file, and
@@ -741,38 +743,42 @@ def recordupdates(repo, actions, branchm
# remove (must come first)
for f, args, msg in actions.get(ACTION_REMOVE, []):
if branchmerge:
- repo.dirstate.remove(f)
+ repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=False)
else:
- repo.dirstate.drop(f)
+ repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
# forget (must come first)
for f, args, msg in actions.get(ACTION_FORGET, []):
- repo.dirstate.drop(f)
+ repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
# resolve path conflicts
for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
(f0, origf0) = args
- repo.dirstate.add(f)
+ repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
repo.dirstate.copy(origf0, f)
if f0 == origf0:
- repo.dirstate.remove(f0)
+ repo.dirstate.update_file(f0, p1_tracked=True, wc_tracked=False)
else:
- repo.dirstate.drop(f0)
+ repo.dirstate.update_file(f0, p1_tracked=False, wc_tracked=False)
# re-add
for f, args, msg in actions.get(ACTION_ADD, []):
- repo.dirstate.add(f)
+ repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
# re-add/mark as modified
for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
if branchmerge:
- repo.dirstate.normallookup(f)
+ repo.dirstate.update_file(
+ f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
+ )
else:
- repo.dirstate.add(f)
+ repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
# exec change
for f, args, msg in actions.get(ACTION_EXEC, []):
- repo.dirstate.normallookup(f)
+ repo.dirstate.update_file(
+ f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
+ )
# keep
for f, args, msg in actions.get(ACTION_KEEP, []):
@@ -789,10 +795,22 @@ def recordupdates(repo, actions, branchm
# get
for f, args, msg in actions.get(ACTION_GET, []):
if branchmerge:
- repo.dirstate.otherparent(f)
+ # tracked in p1 can be True also but update_file should not care
+ repo.dirstate.update_file(
+ f,
+ p1_tracked=False,
+ p2_tracked=True,
+ wc_tracked=True,
+ clean_p2=True,
+ )
else:
parentfiledata = getfiledata[f] if getfiledata else None
- repo.dirstate.normal(f, parentfiledata=parentfiledata)
+ repo.dirstate.update_file(
+ f,
+ p1_tracked=True,
+ wc_tracked=True,
+ parentfiledata=parentfiledata,
+ )
# merge
for f, args, msg in actions.get(ACTION_MERGE, []):
@@ -800,10 +818,14 @@ def recordupdates(repo, actions, branchm
if branchmerge:
# We've done a branch merge, mark this file as merged
# so that we properly record the merger later
- repo.dirstate.merge(f)
+ repo.dirstate.update_file(
+ f, p1_tracked=True, wc_tracked=True, merged=True
+ )
if f1 != f2: # copy/rename
if move:
- repo.dirstate.remove(f1)
+ repo.dirstate.update_file(
+ f1, p1_tracked=True, wc_tracked=False
+ )
if f1 != f:
repo.dirstate.copy(f1, f)
else:
@@ -815,26 +837,30 @@ def recordupdates(repo, actions, branchm
# merge will appear as a normal local file
# modification.
if f2 == f: # file not locally copied/moved
- repo.dirstate.normallookup(f)
+ repo.dirstate.update_file(
+ f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
+ )
if move:
- repo.dirstate.drop(f1)
+ repo.dirstate.update_file(
+ f1, p1_tracked=False, wc_tracked=False
+ )
# directory rename, move local
for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
f0, flag = args
if branchmerge:
- repo.dirstate.add(f)
- repo.dirstate.remove(f0)
+ repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
+ repo.dirstate.update_file(f0, p1_tracked=True, wc_tracked=False)
repo.dirstate.copy(f0, f)
else:
- repo.dirstate.normal(f)
- repo.dirstate.drop(f0)
+ repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=True)
+ repo.dirstate.update_file(f0, p1_tracked=False, wc_tracked=False)
# directory rename, get
for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
f0, flag = args
if branchmerge:
- repo.dirstate.add(f)
+ repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
repo.dirstate.copy(f0, f)
else:
- repo.dirstate.normal(f)
+ repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=True)
diff --git a/mercurial/metadata.py b/mercurial/metadata.py
--- a/mercurial/metadata.py
+++ b/mercurial/metadata.py
@@ -11,14 +11,9 @@ from __future__ import absolute_import,
import multiprocessing
import struct
-from .node import (
- nullid,
- nullrev,
-)
+from .node import nullrev
from . import (
error,
- pycompat,
- requirements as requirementsmod,
util,
)
@@ -617,7 +612,7 @@ def computechangesetfilesmerged(ctx):
if f in ctx:
fctx = ctx[f]
parents = fctx._filelog.parents(fctx._filenode)
- if parents[1] != nullid:
+ if parents[1] != ctx.repo().nullid:
merged.append(f)
return merged
@@ -822,26 +817,9 @@ def _getsidedata(srcrepo, rev):
def copies_sidedata_computer(repo, revlog, rev, existing_sidedata):
- return _getsidedata(repo, rev)[0]
-
-
-def set_sidedata_spec_for_repo(repo):
- if requirementsmod.COPIESSDC_REQUIREMENT in repo.requirements:
- repo.register_wanted_sidedata(sidedatamod.SD_FILES)
- repo.register_sidedata_computer(
- b"changelog",
- sidedatamod.SD_FILES,
- (sidedatamod.SD_FILES,),
- copies_sidedata_computer,
- )
-
-
-def getsidedataadder(srcrepo, destrepo):
- use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade')
- if pycompat.iswindows or not use_w:
- return _get_simple_sidedata_adder(srcrepo, destrepo)
- else:
- return _get_worker_sidedata_adder(srcrepo, destrepo)
+ sidedata, has_copies_info = _getsidedata(repo, rev)
+ flags_to_add = sidedataflag.REVIDX_HASCOPIESINFO if has_copies_info else 0
+ return sidedata, (flags_to_add, 0)
def _sidedata_worker(srcrepo, revs_queue, sidedata_queue, tokens):
@@ -910,57 +888,21 @@ def _get_worker_sidedata_adder(srcrepo,
# received, when shelve 43 for later use.
staging = {}
- def sidedata_companion(revlog, rev):
- data = {}, False
- if util.safehasattr(revlog, b'filteredrevs'): # this is a changelog
- # Is the data previously shelved ?
- data = staging.pop(rev, None)
- if data is None:
- # look at the queued result until we find the one we are lookig
- # for (shelve the other ones)
+ def sidedata_companion(repo, revlog, rev, old_sidedata):
+ # Is the data previously shelved ?
+ data = staging.pop(rev, None)
+ if data is None:
+ # look at the queued result until we find the one we are lookig
+ # for (shelve the other ones)
+ r, data = sidedataq.get()
+ while r != rev:
+ staging[r] = data
r, data = sidedataq.get()
- while r != rev:
- staging[r] = data
- r, data = sidedataq.get()
- tokens.release()
+ tokens.release()
sidedata, has_copies_info = data
new_flag = 0
if has_copies_info:
new_flag = sidedataflag.REVIDX_HASCOPIESINFO
- return False, (), sidedata, new_flag, 0
+ return sidedata, (new_flag, 0)
return sidedata_companion
-
-
-def _get_simple_sidedata_adder(srcrepo, destrepo):
- """The simple version of the sidedata computation
-
- It just compute it in the same thread on request"""
-
- def sidedatacompanion(revlog, rev):
- sidedata, has_copies_info = {}, False
- if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
- sidedata, has_copies_info = _getsidedata(srcrepo, rev)
- new_flag = 0
- if has_copies_info:
- new_flag = sidedataflag.REVIDX_HASCOPIESINFO
-
- return False, (), sidedata, new_flag, 0
-
- return sidedatacompanion
-
-
-def getsidedataremover(srcrepo, destrepo):
- def sidedatacompanion(revlog, rev):
- f = ()
- if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
- if revlog.flags(rev) & sidedataflag.REVIDX_SIDEDATA:
- f = (
- sidedatamod.SD_P1COPIES,
- sidedatamod.SD_P2COPIES,
- sidedatamod.SD_FILESADDED,
- sidedatamod.SD_FILESREMOVED,
- )
- return False, f, {}, 0, sidedataflag.REVIDX_HASCOPIESINFO
-
- return sidedatacompanion
diff --git a/mercurial/narrowspec.py b/mercurial/narrowspec.py
--- a/mercurial/narrowspec.py
+++ b/mercurial/narrowspec.py
@@ -343,11 +343,14 @@ def updateworkingcopy(repo, assumeclean=
for f in sorted(status.ignored):
repo.ui.status(_(b'not deleting ignored file %s\n') % uipathfn(f))
for f in clean + trackeddirty:
- ds.drop(f)
+ ds.update_file(f, p1_tracked=False, wc_tracked=False)
pctx = repo[b'.']
+
+ # only update added files that are in the sparse checkout
+ addedmatch = matchmod.intersectmatchers(addedmatch, sparse.matcher(repo))
newfiles = [f for f in pctx.manifest().walk(addedmatch) if f not in ds]
for f in newfiles:
- ds.normallookup(f)
+ ds.update_file(f, p1_tracked=True, wc_tracked=True, possibly_dirty=True)
_writeaddedfiles(repo, pctx, newfiles)
repo._updatingnarrowspec = False
diff --git a/mercurial/obsolete.py b/mercurial/obsolete.py
--- a/mercurial/obsolete.py
+++ b/mercurial/obsolete.py
@@ -73,11 +73,14 @@ import errno
import struct
from .i18n import _
+from .node import (
+ bin,
+ hex,
+)
from .pycompat import getattr
from .node import (
bin,
hex,
- nullid,
)
from . import (
encoding,
@@ -103,6 +106,7 @@ propertycache = util.propertycache
# Options for obsolescence
createmarkersopt = b'createmarkers'
allowunstableopt = b'allowunstable'
+allowdivergenceopt = b'allowdivergence'
exchangeopt = b'exchange'
@@ -141,10 +145,13 @@ def getoptions(repo):
createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
unstablevalue = _getoptionvalue(repo, allowunstableopt)
+ divergencevalue = _getoptionvalue(repo, allowdivergenceopt)
exchangevalue = _getoptionvalue(repo, exchangeopt)
# createmarkers must be enabled if other options are enabled
- if (unstablevalue or exchangevalue) and not createmarkersvalue:
+ if (
+ unstablevalue or divergencevalue or exchangevalue
+ ) and not createmarkersvalue:
raise error.Abort(
_(
b"'createmarkers' obsolete option must be enabled "
@@ -155,6 +162,7 @@ def getoptions(repo):
return {
createmarkersopt: createmarkersvalue,
allowunstableopt: unstablevalue,
+ allowdivergenceopt: divergencevalue,
exchangeopt: exchangevalue,
}
@@ -526,14 +534,14 @@ def _addchildren(children, markers):
children.setdefault(p, set()).add(mark)
-def _checkinvalidmarkers(markers):
+def _checkinvalidmarkers(repo, markers):
"""search for marker with invalid data and raise error if needed
Exist as a separated function to allow the evolve extension for a more
subtle handling.
"""
for mark in markers:
- if nullid in mark[1]:
+ if repo.nullid in mark[1]:
raise error.Abort(
_(
b'bad obsolescence marker detected: '
@@ -727,7 +735,7 @@ class obsstore(object):
return []
self._version, markers = _readmarkers(data)
markers = list(markers)
- _checkinvalidmarkers(markers)
+ _checkinvalidmarkers(self.repo, markers)
return markers
@propertycache
@@ -761,7 +769,7 @@ class obsstore(object):
_addpredecessors(self.predecessors, markers)
if self._cached('children'):
_addchildren(self.children, markers)
- _checkinvalidmarkers(markers)
+ _checkinvalidmarkers(self.repo, markers)
def relevantmarkers(self, nodes):
"""return a set of all obsolescence markers relevant to a set of nodes.
diff --git a/mercurial/patch.py b/mercurial/patch.py
--- a/mercurial/patch.py
+++ b/mercurial/patch.py
@@ -20,7 +20,7 @@ import zlib
from .i18n import _
from .node import (
hex,
- nullhex,
+ sha1nodeconstants,
short,
)
from .pycompat import open
@@ -3100,8 +3100,8 @@ def diffcontent(data1, data2, header, bi
ctx1, fctx1, path1, flag1, content1, date1 = data1
ctx2, fctx2, path2, flag2, content2, date2 = data2
- index1 = _gitindex(content1) if path1 in ctx1 else nullhex
- index2 = _gitindex(content2) if path2 in ctx2 else nullhex
+ index1 = _gitindex(content1) if path1 in ctx1 else sha1nodeconstants.nullhex
+ index2 = _gitindex(content2) if path2 in ctx2 else sha1nodeconstants.nullhex
if binary and opts.git and not opts.nobinary:
text = mdiff.b85diff(content1, content2)
if text:
diff --git a/mercurial/pathutil.py b/mercurial/pathutil.py
--- a/mercurial/pathutil.py
+++ b/mercurial/pathutil.py
@@ -323,7 +323,7 @@ class dirs(object):
addpath = self.addpath
if isinstance(map, dict) and skip is not None:
for f, s in pycompat.iteritems(map):
- if s[0] != skip:
+ if s.state != skip:
addpath(f)
elif skip is not None:
raise error.ProgrammingError(
diff --git a/mercurial/phases.py b/mercurial/phases.py
--- a/mercurial/phases.py
+++ b/mercurial/phases.py
@@ -109,7 +109,6 @@ from .i18n import _
from .node import (
bin,
hex,
- nullid,
nullrev,
short,
wdirrev,
@@ -862,7 +861,7 @@ def analyzeremotephases(repo, subset, ro
node = bin(nhex)
phase = int(phase)
if phase == public:
- if node != nullid:
+ if node != repo.nullid:
repo.ui.warn(
_(
b'ignoring inconsistent public root'
@@ -919,10 +918,10 @@ def newheads(repo, heads, roots):
rev = cl.index.get_rev
if not roots:
return heads
- if not heads or heads == [nullid]:
+ if not heads or heads == [repo.nullid]:
return []
# The logic operated on revisions, convert arguments early for convenience
- new_heads = {rev(n) for n in heads if n != nullid}
+ new_heads = {rev(n) for n in heads if n != repo.nullid}
roots = [rev(n) for n in roots]
# compute the area we need to remove
affected_zone = repo.revs(b"(%ld::%ld)", roots, new_heads)
diff --git a/mercurial/policy.py b/mercurial/policy.py
--- a/mercurial/policy.py
+++ b/mercurial/policy.py
@@ -80,7 +80,7 @@ def _importfrom(pkgname, modname):
('cext', 'bdiff'): 3,
('cext', 'mpatch'): 1,
('cext', 'osutil'): 4,
- ('cext', 'parsers'): 17,
+ ('cext', 'parsers'): 20,
}
# map import request to other package or module
diff --git a/mercurial/posix.py b/mercurial/posix.py
--- a/mercurial/posix.py
+++ b/mercurial/posix.py
@@ -36,6 +36,8 @@ osutil = policy.importmod('osutil')
normpath = os.path.normpath
samestat = os.path.samestat
+abspath = os.path.abspath # re-exports
+
try:
oslink = os.link
except AttributeError:
diff --git a/mercurial/pure/parsers.py b/mercurial/pure/parsers.py
--- a/mercurial/pure/parsers.py
+++ b/mercurial/pure/parsers.py
@@ -10,9 +10,15 @@ from __future__ import absolute_import
import struct
import zlib
-from ..node import nullid, nullrev
+from ..node import (
+ nullrev,
+ sha1nodeconstants,
+)
+from ..thirdparty import attr
from .. import (
+ error,
pycompat,
+ revlogutils,
util,
)
@@ -27,22 +33,204 @@ stringio = pycompat.bytesio
_compress = zlib.compress
_decompress = zlib.decompress
-# Some code below makes tuples directly because it's more convenient. However,
-# code outside this module should always use dirstatetuple.
-def dirstatetuple(*x):
- # x is a tuple
- return x
+
+# a special value used internally for `size` if the file come from the other parent
+FROM_P2 = -2
+
+# a special value used internally for `size` if the file is modified/merged/added
+NONNORMAL = -1
+
+# a special value used internally for `time` if the time is ambigeous
+AMBIGUOUS_TIME = -1
+
+
+@attr.s(slots=True, init=False)
+class DirstateItem(object):
+ """represent a dirstate entry
+
+ It contains:
+
+ - state (one of 'n', 'a', 'r', 'm')
+ - mode,
+ - size,
+ - mtime,
+ """
+
+ _state = attr.ib()
+ _mode = attr.ib()
+ _size = attr.ib()
+ _mtime = attr.ib()
+
+ def __init__(self, state, mode, size, mtime):
+ self._state = state
+ self._mode = mode
+ self._size = size
+ self._mtime = mtime
+
+ @classmethod
+ def from_v1_data(cls, state, mode, size, mtime):
+ """Build a new DirstateItem object from V1 data
+
+ Since the dirstate-v1 format is frozen, the signature of this function
+ is not expected to change, unlike the __init__ one.
+ """
+ return cls(
+ state=state,
+ mode=mode,
+ size=size,
+ mtime=mtime,
+ )
+
+ def set_possibly_dirty(self):
+ """Mark a file as "possibly dirty"
+
+ This means the next status call will have to actually check its content
+ to make sure it is correct.
+ """
+ self._mtime = AMBIGUOUS_TIME
+
+ def __getitem__(self, idx):
+ if idx == 0 or idx == -4:
+ msg = b"do not use item[x], use item.state"
+ util.nouideprecwarn(msg, b'6.0', stacklevel=2)
+ return self._state
+ elif idx == 1 or idx == -3:
+ msg = b"do not use item[x], use item.mode"
+ util.nouideprecwarn(msg, b'6.0', stacklevel=2)
+ return self._mode
+ elif idx == 2 or idx == -2:
+ msg = b"do not use item[x], use item.size"
+ util.nouideprecwarn(msg, b'6.0', stacklevel=2)
+ return self._size
+ elif idx == 3 or idx == -1:
+ msg = b"do not use item[x], use item.mtime"
+ util.nouideprecwarn(msg, b'6.0', stacklevel=2)
+ return self._mtime
+ else:
+ raise IndexError(idx)
+
+ @property
+ def mode(self):
+ return self._mode
+
+ @property
+ def size(self):
+ return self._size
+
+ @property
+ def mtime(self):
+ return self._mtime
+
+ @property
+ def state(self):
+ """
+ States are:
+ n normal
+ m needs merging
+ r marked for removal
+ a marked for addition
+
+ XXX This "state" is a bit obscure and mostly a direct expression of the
+ dirstatev1 format. It would make sense to ultimately deprecate it in
+ favor of the more "semantic" attributes.
+ """
+ return self._state
+
+ @property
+ def tracked(self):
+ """True is the file is tracked in the working copy"""
+ return self._state in b"nma"
+
+ @property
+ def added(self):
+ """True if the file has been added"""
+ return self._state == b'a'
+
+ @property
+ def merged(self):
+ """True if the file has been merged
+
+ Should only be set if a merge is in progress in the dirstate
+ """
+ return self._state == b'm'
+
+ @property
+ def from_p2(self):
+ """True if the file have been fetched from p2 during the current merge
+
+ This is only True is the file is currently tracked.
+
+ Should only be set if a merge is in progress in the dirstate
+ """
+ return self._state == b'n' and self._size == FROM_P2
+
+ @property
+ def from_p2_removed(self):
+ """True if the file has been removed, but was "from_p2" initially
+
+ This property seems like an abstraction leakage and should probably be
+ dealt in this class (or maybe the dirstatemap) directly.
+ """
+ return self._state == b'r' and self._size == FROM_P2
+
+ @property
+ def removed(self):
+ """True if the file has been removed"""
+ return self._state == b'r'
+
+ @property
+ def merged_removed(self):
+ """True if the file has been removed, but was "merged" initially
+
+ This property seems like an abstraction leakage and should probably be
+ dealt in this class (or maybe the dirstatemap) directly.
+ """
+ return self._state == b'r' and self._size == NONNORMAL
+
+ @property
+ def dm_nonnormal(self):
+ """True is the entry is non-normal in the dirstatemap sense
+
+ There is no reason for any code, but the dirstatemap one to use this.
+ """
+ return self.state != b'n' or self.mtime == AMBIGUOUS_TIME
+
+ @property
+ def dm_otherparent(self):
+ """True is the entry is `otherparent` in the dirstatemap sense
+
+ There is no reason for any code, but the dirstatemap one to use this.
+ """
+ return self._size == FROM_P2
+
+ def v1_state(self):
+ """return a "state" suitable for v1 serialization"""
+ return self._state
+
+ def v1_mode(self):
+ """return a "mode" suitable for v1 serialization"""
+ return self._mode
+
+ def v1_size(self):
+ """return a "size" suitable for v1 serialization"""
+ return self._size
+
+ def v1_mtime(self):
+ """return a "mtime" suitable for v1 serialization"""
+ return self._mtime
+
+ def need_delay(self, now):
+ """True if the stored mtime would be ambiguous with the current time"""
+ return self._state == b'n' and self._mtime == now
def gettype(q):
return int(q & 0xFFFF)
-def offset_type(offset, type):
- return int(int(offset) << 16 | type)
-
-
class BaseIndexObject(object):
+ # Can I be passed to an algorithme implemented in Rust ?
+ rust_ext_compat = 0
# Format of an index entry according to Python's `struct` language
index_format = revlog_constants.INDEX_ENTRY_V1
# Size of a C unsigned long long int, platform independent
@@ -50,7 +238,20 @@ class BaseIndexObject(object):
# Size of a C long int, platform independent
int_size = struct.calcsize(b'>i')
# An empty index entry, used as a default value to be overridden, or nullrev
- null_item = (0, 0, 0, -1, -1, -1, -1, nullid)
+ null_item = (
+ 0,
+ 0,
+ 0,
+ -1,
+ -1,
+ -1,
+ -1,
+ sha1nodeconstants.nullid,
+ 0,
+ 0,
+ revlog_constants.COMP_MODE_INLINE,
+ revlog_constants.COMP_MODE_INLINE,
+ )
@util.propertycache
def entry_size(self):
@@ -64,7 +265,7 @@ class BaseIndexObject(object):
@util.propertycache
def _nodemap(self):
- nodemap = nodemaputil.NodeMap({nullid: nullrev})
+ nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
for r in range(0, len(self)):
n = self[r][7]
nodemap[n] = r
@@ -101,9 +302,14 @@ class BaseIndexObject(object):
def append(self, tup):
if '_nodemap' in vars(self):
self._nodemap[tup[7]] = len(self)
- data = self.index_format.pack(*tup)
+ data = self._pack_entry(len(self), tup)
self._extra.append(data)
+ def _pack_entry(self, rev, entry):
+ assert entry[8] == 0
+ assert entry[9] == 0
+ return self.index_format.pack(*entry[:8])
+
def _check_index(self, i):
if not isinstance(i, int):
raise TypeError(b"expecting int indexes")
@@ -119,15 +325,43 @@ class BaseIndexObject(object):
else:
index = self._calculate_index(i)
data = self._data[index : index + self.entry_size]
- r = self.index_format.unpack(data)
+ r = self._unpack_entry(i, data)
if self._lgt and i == 0:
- r = (offset_type(0, gettype(r[0])),) + r[1:]
+ offset = revlogutils.offset_type(0, gettype(r[0]))
+ r = (offset,) + r[1:]
+ return r
+
+ def _unpack_entry(self, rev, data):
+ r = self.index_format.unpack(data)
+ r = r + (
+ 0,
+ 0,
+ revlog_constants.COMP_MODE_INLINE,
+ revlog_constants.COMP_MODE_INLINE,
+ )
return r
+ def pack_header(self, header):
+ """pack header information as binary"""
+ v_fmt = revlog_constants.INDEX_HEADER
+ return v_fmt.pack(header)
+
+ def entry_binary(self, rev):
+ """return the raw binary string representing a revision"""
+ entry = self[rev]
+ p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
+ if rev == 0:
+ p = p[revlog_constants.INDEX_HEADER.size :]
+ return p
+
class IndexObject(BaseIndexObject):
def __init__(self, data):
- assert len(data) % self.entry_size == 0
+ assert len(data) % self.entry_size == 0, (
+ len(data),
+ self.entry_size,
+ len(data) % self.entry_size,
+ )
self._data = data
self._lgt = len(data) // self.entry_size
self._extra = []
@@ -240,64 +474,92 @@ def parse_index2(data, inline, revlogv2=
if not inline:
cls = IndexObject2 if revlogv2 else IndexObject
return cls(data), None
- cls = InlinedIndexObject2 if revlogv2 else InlinedIndexObject
+ cls = InlinedIndexObject
return cls(data, inline), (0, data)
-class Index2Mixin(object):
+def parse_index_cl_v2(data):
+ return IndexChangelogV2(data), None
+
+
+class IndexObject2(IndexObject):
index_format = revlog_constants.INDEX_ENTRY_V2
- null_item = (0, 0, 0, -1, -1, -1, -1, nullid, 0, 0)
- def replace_sidedata_info(self, i, sidedata_offset, sidedata_length):
+ def replace_sidedata_info(
+ self,
+ rev,
+ sidedata_offset,
+ sidedata_length,
+ offset_flags,
+ compression_mode,
+ ):
"""
Replace an existing index entry's sidedata offset and length with new
ones.
This cannot be used outside of the context of sidedata rewriting,
- inside the transaction that creates the revision `i`.
+ inside the transaction that creates the revision `rev`.
"""
- if i < 0:
+ if rev < 0:
raise KeyError
- self._check_index(i)
- sidedata_format = b">Qi"
- packed_size = struct.calcsize(sidedata_format)
- if i >= self._lgt:
- packed = _pack(sidedata_format, sidedata_offset, sidedata_length)
- old = self._extra[i - self._lgt]
- new = old[:64] + packed + old[64 + packed_size :]
- self._extra[i - self._lgt] = new
- else:
+ self._check_index(rev)
+ if rev < self._lgt:
msg = b"cannot rewrite entries outside of this transaction"
raise KeyError(msg)
+ else:
+ entry = list(self[rev])
+ entry[0] = offset_flags
+ entry[8] = sidedata_offset
+ entry[9] = sidedata_length
+ entry[11] = compression_mode
+ entry = tuple(entry)
+ new = self._pack_entry(rev, entry)
+ self._extra[rev - self._lgt] = new
+ def _unpack_entry(self, rev, data):
+ data = self.index_format.unpack(data)
+ entry = data[:10]
+ data_comp = data[10] & 3
+ sidedata_comp = (data[10] & (3 << 2)) >> 2
+ return entry + (data_comp, sidedata_comp)
-class IndexObject2(Index2Mixin, IndexObject):
- pass
+ def _pack_entry(self, rev, entry):
+ data = entry[:10]
+ data_comp = entry[10] & 3
+ sidedata_comp = (entry[11] & 3) << 2
+ data += (data_comp | sidedata_comp,)
+
+ return self.index_format.pack(*data)
+
+ def entry_binary(self, rev):
+ """return the raw binary string representing a revision"""
+ entry = self[rev]
+ return self._pack_entry(rev, entry)
+
+ def pack_header(self, header):
+ """pack header information as binary"""
+ msg = 'version header should go in the docket, not the index: %d'
+ msg %= header
+ raise error.ProgrammingError(msg)
-class InlinedIndexObject2(Index2Mixin, InlinedIndexObject):
- def _inline_scan(self, lgt):
- sidedata_length_pos = 72
- off = 0
- if lgt is not None:
- self._offsets = [0] * lgt
- count = 0
- while off <= len(self._data) - self.entry_size:
- start = off + self.big_int_size
- (data_size,) = struct.unpack(
- b'>i',
- self._data[start : start + self.int_size],
- )
- start = off + sidedata_length_pos
- (side_data_size,) = struct.unpack(
- b'>i', self._data[start : start + self.int_size]
- )
- if lgt is not None:
- self._offsets[count] = off
- count += 1
- off += self.entry_size + data_size + side_data_size
- if off != len(self._data):
- raise ValueError(b"corrupted data")
- return count
+class IndexChangelogV2(IndexObject2):
+ index_format = revlog_constants.INDEX_ENTRY_CL_V2
+
+ def _unpack_entry(self, rev, data, r=True):
+ items = self.index_format.unpack(data)
+ entry = items[:3] + (rev, rev) + items[3:8]
+ data_comp = items[8] & 3
+ sidedata_comp = (items[8] >> 2) & 3
+ return entry + (data_comp, sidedata_comp)
+
+ def _pack_entry(self, rev, entry):
+ assert entry[3] == rev, entry[3]
+ assert entry[4] == rev, entry[4]
+ data = entry[:3] + entry[5:10]
+ data_comp = entry[10] & 3
+ sidedata_comp = (entry[11] & 3) << 2
+ data += (data_comp | sidedata_comp,)
+ return self.index_format.pack(*data)
def parse_index_devel_nodemap(data, inline):
@@ -322,7 +584,7 @@ def parse_dirstate(dmap, copymap, st):
if b'\0' in f:
f, c = f.split(b'\0')
copymap[f] = c
- dmap[f] = e[:4]
+ dmap[f] = DirstateItem.from_v1_data(*e[:4])
return parents
@@ -332,7 +594,7 @@ def pack_dirstate(dmap, copymap, pl, now
write = cs.write
write(b"".join(pl))
for f, e in pycompat.iteritems(dmap):
- if e[0] == b'n' and e[3] == now:
+ if e.need_delay(now):
# The file was last modified "simultaneously" with the current
# write to dirstate (i.e. within the same second for file-
# systems with a granularity of 1 sec). This commonly happens
@@ -342,12 +604,18 @@ def pack_dirstate(dmap, copymap, pl, now
# dirstate, forcing future 'status' calls to compare the
# contents of the file if the size is the same. This prevents
# mistakenly treating such files as clean.
- e = dirstatetuple(e[0], e[1], e[2], -1)
- dmap[f] = e
+ e.set_possibly_dirty()
if f in copymap:
f = b"%s\0%s" % (f, copymap[f])
- e = _pack(b">cllll", e[0], e[1], e[2], e[3], len(f))
+ e = _pack(
+ b">cllll",
+ e.v1_state(),
+ e.v1_mode(),
+ e.v1_size(),
+ e.v1_mtime(),
+ len(f),
+ )
write(e)
write(f)
return cs.getvalue()
diff --git a/mercurial/repair.py b/mercurial/repair.py
--- a/mercurial/repair.py
+++ b/mercurial/repair.py
@@ -28,6 +28,7 @@ from . import (
pycompat,
requirements,
scmutil,
+ util,
)
from .utils import (
hashutil,
@@ -239,19 +240,23 @@ def strip(ui, repo, nodelist, backup=Tru
ui.note(_(b"adding branch\n"))
f = vfs.open(tmpbundlefile, b"rb")
gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
- if not repo.ui.verbose:
- # silence internal shuffling chatter
- repo.ui.pushbuffer()
- tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile)
- txnname = b'strip'
- if not isinstance(gen, bundle2.unbundle20):
- txnname = b"strip\n%s" % urlutil.hidepassword(tmpbundleurl)
- with repo.transaction(txnname) as tr:
- bundle2.applybundle(
- repo, gen, tr, source=b'strip', url=tmpbundleurl
- )
- if not repo.ui.verbose:
- repo.ui.popbuffer()
+ # silence internal shuffling chatter
+ maybe_silent = (
+ repo.ui.silent()
+ if not repo.ui.verbose
+ else util.nullcontextmanager()
+ )
+ with maybe_silent:
+ tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile)
+ txnname = b'strip'
+ if not isinstance(gen, bundle2.unbundle20):
+ txnname = b"strip\n%s" % urlutil.hidepassword(
+ tmpbundleurl
+ )
+ with repo.transaction(txnname) as tr:
+ bundle2.applybundle(
+ repo, gen, tr, source=b'strip', url=tmpbundleurl
+ )
f.close()
with repo.transaction(b'repair') as tr:
diff --git a/mercurial/repoview.py b/mercurial/repoview.py
--- a/mercurial/repoview.py
+++ b/mercurial/repoview.py
@@ -333,7 +333,7 @@ class filteredchangelogmixin(object):
r = super(filteredchangelogmixin, self).rev(node)
if r in self.filteredrevs:
raise error.FilteredLookupError(
- hex(node), self.indexfile, _(b'filtered node')
+ hex(node), self.display_id, _(b'filtered node')
)
return r
diff --git a/mercurial/requirements.py b/mercurial/requirements.py
--- a/mercurial/requirements.py
+++ b/mercurial/requirements.py
@@ -12,6 +12,8 @@ DOTENCODE_REQUIREMENT = b'dotencode'
STORE_REQUIREMENT = b'store'
FNCACHE_REQUIREMENT = b'fncache'
+DIRSTATE_V2_REQUIREMENT = b'exp-dirstate-v2'
+
# When narrowing is finalized and no longer subject to format changes,
# we should move this to just "narrow" or similar.
NARROW_REQUIREMENT = b'narrowhg-experimental'
@@ -30,6 +32,10 @@ REVLOGV1_REQUIREMENT = b'revlogv1'
# Increment the sub-version when the revlog v2 format changes to lock out old
# clients.
+CHANGELOGV2_REQUIREMENT = b'exp-changelog-v2'
+
+# Increment the sub-version when the revlog v2 format changes to lock out old
+# clients.
REVLOGV2_REQUIREMENT = b'exp-revlogv2.2'
# A repository with the sparserevlog feature will have delta chains that
@@ -41,10 +47,6 @@ REVLOGV2_REQUIREMENT = b'exp-revlogv2.2'
# This is why once a repository has enabled sparse-read, it becomes required.
SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
-# A repository with the sidedataflag requirement will allow to store extra
-# information for revision without altering their original hashes.
-SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
-
# A repository with the the copies-sidedata-changeset requirement will store
# copies related information in changeset's sidedata.
COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
@@ -74,9 +76,12 @@ SHARESAFE_REQUIREMENT = b'share-safe'
# repo. Hence both of them should be stored in working copy
# * SHARESAFE_REQUIREMENT needs to be stored in working dir to mark that rest of
# the requirements are stored in store's requires
+# * DIRSTATE_V2_REQUIREMENT affects .hg/dirstate, of which there is one per
+# working directory.
WORKING_DIR_REQUIREMENTS = {
SPARSE_REQUIREMENT,
SHARED_REQUIREMENT,
RELATIVE_SHARED_REQUIREMENT,
SHARESAFE_REQUIREMENT,
+ DIRSTATE_V2_REQUIREMENT,
}
diff --git a/mercurial/revlog.py b/mercurial/revlog.py
--- a/mercurial/revlog.py
+++ b/mercurial/revlog.py
@@ -1,4 +1,5 @@
# revlog.py - storage back-end for mercurial
+# coding: utf8
#
# Copyright 2005-2007 Olivia Mackall
#
@@ -26,25 +27,24 @@ import zlib
from .node import (
bin,
hex,
- nullhex,
- nullid,
nullrev,
sha1nodeconstants,
short,
- wdirfilenodeids,
- wdirhex,
- wdirid,
wdirrev,
)
from .i18n import _
from .pycompat import getattr
from .revlogutils.constants import (
+ ALL_KINDS,
+ CHANGELOGV2,
+ COMP_MODE_DEFAULT,
+ COMP_MODE_INLINE,
+ COMP_MODE_PLAIN,
+ FEATURES_BY_VERSION,
FLAG_GENERALDELTA,
FLAG_INLINE_DATA,
- INDEX_ENTRY_V0,
- INDEX_ENTRY_V1,
- INDEX_ENTRY_V2,
INDEX_HEADER,
+ KIND_CHANGELOG,
REVLOGV0,
REVLOGV1,
REVLOGV1_FLAGS,
@@ -53,6 +53,7 @@ from .revlogutils.constants import (
REVLOG_DEFAULT_FLAGS,
REVLOG_DEFAULT_FORMAT,
REVLOG_DEFAULT_VERSION,
+ SUPPORTED_FLAGS,
)
from .revlogutils.flagutil import (
REVIDX_DEFAULT_FLAGS,
@@ -62,7 +63,6 @@ from .revlogutils.flagutil import (
REVIDX_HASCOPIESINFO,
REVIDX_ISCENSORED,
REVIDX_RAWTEXT_CHANGING_FLAGS,
- REVIDX_SIDEDATA,
)
from .thirdparty import attr
from . import (
@@ -72,6 +72,7 @@ from . import (
mdiff,
policy,
pycompat,
+ revlogutils,
templatefilters,
util,
)
@@ -81,8 +82,12 @@ from .interfaces import (
)
from .revlogutils import (
deltas as deltautil,
+ docket as docketutil,
flagutil,
nodemap as nodemaputil,
+ randomaccessfile,
+ revlogv0,
+ rewrite,
sidedata as sidedatautil,
)
from .utils import (
@@ -92,6 +97,7 @@ from .utils import (
# blanked usage of all the name to prevent pyflakes constraints
# We need these name available in the module for extensions.
+
REVLOGV0
REVLOGV1
REVLOGV2
@@ -104,7 +110,6 @@ REVLOGV1_FLAGS
REVLOGV2_FLAGS
REVIDX_ISCENSORED
REVIDX_ELLIPSIS
-REVIDX_SIDEDATA
REVIDX_HASCOPIESINFO
REVIDX_EXTSTORED
REVIDX_DEFAULT_FLAGS
@@ -121,7 +126,6 @@ rustrevlog = policy.importrust('revlog')
# max size of revlog with inline data
_maxinline = 131072
-_chunksize = 1048576
# Flag processors for REVIDX_ELLIPSIS.
def ellipsisreadprocessor(rl, text):
@@ -143,20 +147,6 @@ ellipsisprocessor = (
)
-def getoffset(q):
- return int(q >> 16)
-
-
-def gettype(q):
- return int(q & 0xFFFF)
-
-
-def offset_type(offset, type):
- if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
- raise ValueError(b'unknown revlog index flags')
- return int(int(offset) << 16 | type)
-
-
def _verify_revision(rl, skipflags, state, node):
"""Verify the integrity of the given revlog ``node`` while providing a hook
point for extensions to influence the operation."""
@@ -177,27 +167,6 @@ HAS_FAST_PERSISTENT_NODEMAP = rustrevlog
)
-@attr.s(slots=True, frozen=True)
-class _revisioninfo(object):
- """Information about a revision that allows building its fulltext
- node: expected hash of the revision
- p1, p2: parent revs of the revision
- btext: built text cache consisting of a one-element list
- cachedelta: (baserev, uncompressed_delta) or None
- flags: flags associated to the revision storage
-
- One of btext[0] or cachedelta must be set.
- """
-
- node = attr.ib()
- p1 = attr.ib()
- p2 = attr.ib()
- btext = attr.ib()
- textlen = attr.ib()
- cachedelta = attr.ib()
- flags = attr.ib()
-
-
@interfaceutil.implementer(repository.irevisiondelta)
@attr.s(slots=True)
class revlogrevisiondelta(object):
@@ -210,6 +179,7 @@ class revlogrevisiondelta(object):
revision = attr.ib()
delta = attr.ib()
sidedata = attr.ib()
+ protocol_flags = attr.ib()
linknode = attr.ib(default=None)
@@ -221,161 +191,51 @@ class revlogproblem(object):
node = attr.ib(default=None)
-class revlogoldindex(list):
- entry_size = INDEX_ENTRY_V0.size
-
- @property
- def nodemap(self):
- msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
- util.nouideprecwarn(msg, b'5.3', stacklevel=2)
- return self._nodemap
-
- @util.propertycache
- def _nodemap(self):
- nodemap = nodemaputil.NodeMap({nullid: nullrev})
- for r in range(0, len(self)):
- n = self[r][7]
- nodemap[n] = r
- return nodemap
-
- def has_node(self, node):
- """return True if the node exist in the index"""
- return node in self._nodemap
-
- def rev(self, node):
- """return a revision for a node
-
- If the node is unknown, raise a RevlogError"""
- return self._nodemap[node]
-
- def get_rev(self, node):
- """return a revision for a node
-
- If the node is unknown, return None"""
- return self._nodemap.get(node)
-
- def append(self, tup):
- self._nodemap[tup[7]] = len(self)
- super(revlogoldindex, self).append(tup)
-
- def __delitem__(self, i):
- if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
- raise ValueError(b"deleting slices only supports a:-1 with step 1")
- for r in pycompat.xrange(i.start, len(self)):
- del self._nodemap[self[r][7]]
- super(revlogoldindex, self).__delitem__(i)
-
- def clearcaches(self):
- self.__dict__.pop('_nodemap', None)
-
- def __getitem__(self, i):
- if i == -1:
- return (0, 0, 0, -1, -1, -1, -1, nullid)
- return list.__getitem__(self, i)
-
-
-class revlogoldio(object):
- def parseindex(self, data, inline):
- s = INDEX_ENTRY_V0.size
- index = []
- nodemap = nodemaputil.NodeMap({nullid: nullrev})
- n = off = 0
- l = len(data)
- while off + s <= l:
- cur = data[off : off + s]
- off += s
- e = INDEX_ENTRY_V0.unpack(cur)
- # transform to revlogv1 format
- e2 = (
- offset_type(e[0], 0),
- e[1],
- -1,
- e[2],
- e[3],
- nodemap.get(e[4], nullrev),
- nodemap.get(e[5], nullrev),
- e[6],
- )
- index.append(e2)
- nodemap[e[6]] = n
- n += 1
-
- index = revlogoldindex(index)
- return index, None
-
- def packentry(self, entry, node, version, rev):
- """return the binary representation of an entry
-
- entry: a tuple containing all the values (see index.__getitem__)
- node: a callback to convert a revision to nodeid
- version: the changelog version
- rev: the revision number
- """
- if gettype(entry[0]):
- raise error.RevlogError(
- _(b'index entry flags need revlog version 1')
- )
- e2 = (
- getoffset(entry[0]),
- entry[1],
- entry[3],
- entry[4],
- node(entry[5]),
- node(entry[6]),
- entry[7],
- )
- return INDEX_ENTRY_V0.pack(*e2)
+def parse_index_v1(data, inline):
+ # call the C implementation to parse the index data
+ index, cache = parsers.parse_index2(data, inline)
+ return index, cache
+
+
+def parse_index_v2(data, inline):
+ # call the C implementation to parse the index data
+ index, cache = parsers.parse_index2(data, inline, revlogv2=True)
+ return index, cache
+
+
+def parse_index_cl_v2(data, inline):
+ # call the C implementation to parse the index data
+ assert not inline
+ from .pure.parsers import parse_index_cl_v2
+
+ index, cache = parse_index_cl_v2(data)
+ return index, cache
+
+
+if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
+
+ def parse_index_v1_nodemap(data, inline):
+ index, cache = parsers.parse_index_devel_nodemap(data, inline)
+ return index, cache
+
+
+else:
+ parse_index_v1_nodemap = None
+
+
+def parse_index_v1_mixed(data, inline):
+ index, cache = parse_index_v1(data, inline)
+ return rustrevlog.MixedIndex(index), cache
# corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
# signed integer)
_maxentrysize = 0x7FFFFFFF
-
-class revlogio(object):
- def parseindex(self, data, inline):
- # call the C implementation to parse the index data
- index, cache = parsers.parse_index2(data, inline)
- return index, cache
-
- def packentry(self, entry, node, version, rev):
- p = INDEX_ENTRY_V1.pack(*entry)
- if rev == 0:
- p = INDEX_HEADER.pack(version) + p[4:]
- return p
-
-
-class revlogv2io(object):
- def parseindex(self, data, inline):
- index, cache = parsers.parse_index2(data, inline, revlogv2=True)
- return index, cache
-
- def packentry(self, entry, node, version, rev):
- p = INDEX_ENTRY_V2.pack(*entry)
- if rev == 0:
- p = INDEX_HEADER.pack(version) + p[4:]
- return p
-
-
-NodemapRevlogIO = None
-
-if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
-
- class NodemapRevlogIO(revlogio):
- """A debug oriented IO class that return a PersistentNodeMapIndexObject
-
- The PersistentNodeMapIndexObject object is meant to test the persistent nodemap feature.
- """
-
- def parseindex(self, data, inline):
- index, cache = parsers.parse_index_devel_nodemap(data, inline)
- return index, cache
-
-
-class rustrevlogio(revlogio):
- def parseindex(self, data, inline):
- index, cache = super(rustrevlogio, self).parseindex(data, inline)
- return rustrevlog.MixedIndex(index), cache
+FILE_TOO_SHORT_MSG = _(
+ b'cannot read from revlog %s;'
+ b' expected %d bytes from offset %d, data size is %d'
+)
class revlog(object):
@@ -419,6 +279,9 @@ class revlog(object):
file handle, a filename, and an expected position. It should check whether
the current position in the file handle is valid, and log/warn/fail (by
raising).
+
+ See mercurial/revlogutils/contants.py for details about the content of an
+ index entry.
"""
_flagserrorclass = error.RevlogError
@@ -426,14 +289,16 @@ class revlog(object):
def __init__(
self,
opener,
- indexfile,
- datafile=None,
+ target,
+ radix,
+ postfix=None, # only exist for `tmpcensored` now
checkambig=False,
mmaplargeindex=False,
censorable=False,
upperboundcomp=None,
persistentnodemap=False,
concurrencychecker=None,
+ trypending=False,
):
"""
create a revlog object
@@ -441,17 +306,31 @@ class revlog(object):
opener is a function that abstracts the file opening operation
and can be used to implement COW semantics or the like.
+ `target`: a (KIND, ID) tuple that identify the content stored in
+ this revlog. It help the rest of the code to understand what the revlog
+ is about without having to resort to heuristic and index filename
+ analysis. Note: that this must be reliably be set by normal code, but
+ that test, debug, or performance measurement code might not set this to
+ accurate value.
"""
self.upperboundcomp = upperboundcomp
- self.indexfile = indexfile
- self.datafile = datafile or (indexfile[:-2] + b".d")
- self.nodemap_file = None
+
+ self.radix = radix
+
+ self._docket_file = None
+ self._indexfile = None
+ self._datafile = None
+ self._sidedatafile = None
+ self._nodemap_file = None
+ self.postfix = postfix
+ self._trypending = trypending
+ self.opener = opener
if persistentnodemap:
- self.nodemap_file = nodemaputil.get_nodemap_file(
- opener, self.indexfile
- )
-
- self.opener = opener
+ self._nodemap_file = nodemaputil.get_nodemap_file(self)
+
+ assert target[0] in ALL_KINDS
+ assert len(target) == 2
+ self.target = target
# When True, indexfile is opened with checkambig=True at writing, to
# avoid file stat ambiguity.
self._checkambig = checkambig
@@ -468,6 +347,7 @@ class revlog(object):
self._maxchainlen = None
self._deltabothparents = True
self.index = None
+ self._docket = None
self._nodemap_docket = None
# Mapping of partial identifiers to full nodes.
self._pcache = {}
@@ -477,6 +357,7 @@ class revlog(object):
self._maxdeltachainspan = -1
self._withsparseread = False
self._sparserevlog = False
+ self.hassidedata = False
self._srdensitythreshold = 0.50
self._srmingapsize = 262144
@@ -484,27 +365,46 @@ class revlog(object):
# custom flags.
self._flagprocessors = dict(flagutil.flagprocessors)
- # 2-tuple of file handles being used for active writing.
+ # 3-tuple of file handles being used for active writing.
self._writinghandles = None
+ # prevent nesting of addgroup
+ self._adding_group = None
self._loadindex()
self._concurrencychecker = concurrencychecker
- def _loadindex(self):
+ def _init_opts(self):
+ """process options (from above/config) to setup associated default revlog mode
+
+ These values might be affected when actually reading on disk information.
+
+ The relevant values are returned for use in _loadindex().
+
+ * newversionflags:
+ version header to use if we need to create a new revlog
+
+ * mmapindexthreshold:
+ minimal index size for start to use mmap
+
+ * force_nodemap:
+ force the usage of a "development" version of the nodemap code
+ """
mmapindexthreshold = None
opts = self.opener.options
- if b'revlogv2' in opts:
- newversionflags = REVLOGV2 | FLAG_INLINE_DATA
+ if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
+ new_header = CHANGELOGV2
+ elif b'revlogv2' in opts:
+ new_header = REVLOGV2
elif b'revlogv1' in opts:
- newversionflags = REVLOGV1 | FLAG_INLINE_DATA
+ new_header = REVLOGV1 | FLAG_INLINE_DATA
if b'generaldelta' in opts:
- newversionflags |= FLAG_GENERALDELTA
+ new_header |= FLAG_GENERALDELTA
elif b'revlogv0' in self.opener.options:
- newversionflags = REVLOGV0
+ new_header = REVLOGV0
else:
- newversionflags = REVLOG_DEFAULT_VERSION
+ new_header = REVLOG_DEFAULT_VERSION
if b'chunkcachesize' in opts:
self._chunkcachesize = opts[b'chunkcachesize']
@@ -526,7 +426,6 @@ class revlog(object):
self._maxdeltachainspan = opts[b'maxdeltachainspan']
if self._mmaplargeindex and b'mmapindexthreshold' in opts:
mmapindexthreshold = opts[b'mmapindexthreshold']
- self.hassidedata = bool(opts.get(b'side-data', False))
self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
withsparseread = bool(opts.get(b'with-sparse-read', False))
# sparse-revlog forces sparse-read
@@ -554,75 +453,118 @@ class revlog(object):
_(b'revlog chunk cache size %r is not a power of 2')
% self._chunkcachesize
)
-
- indexdata = b''
- self._initempty = True
+ force_nodemap = opts.get(b'devel-force-nodemap', False)
+ return new_header, mmapindexthreshold, force_nodemap
+
+ def _get_data(self, filepath, mmap_threshold, size=None):
+ """return a file content with or without mmap
+
+ If the file is missing return the empty string"""
try:
- with self._indexfp() as f:
- if (
- mmapindexthreshold is not None
- and self.opener.fstat(f).st_size >= mmapindexthreshold
- ):
- # TODO: should .close() to release resources without
- # relying on Python GC
- indexdata = util.buffer(util.mmapread(f))
+ with self.opener(filepath) as fp:
+ if mmap_threshold is not None:
+ file_size = self.opener.fstat(fp).st_size
+ if file_size >= mmap_threshold:
+ if size is not None:
+ # avoid potentiel mmap crash
+ size = min(file_size, size)
+ # TODO: should .close() to release resources without
+ # relying on Python GC
+ if size is None:
+ return util.buffer(util.mmapread(fp))
+ else:
+ return util.buffer(util.mmapread(fp, size))
+ if size is None:
+ return fp.read()
else:
- indexdata = f.read()
- if len(indexdata) > 0:
- versionflags = INDEX_HEADER.unpack(indexdata[:4])[0]
- self._initempty = False
- else:
- versionflags = newversionflags
+ return fp.read(size)
except IOError as inst:
if inst.errno != errno.ENOENT:
raise
-
- versionflags = newversionflags
-
- self.version = versionflags
-
- flags = versionflags & ~0xFFFF
- fmt = versionflags & 0xFFFF
-
- if fmt == REVLOGV0:
- if flags:
- raise error.RevlogError(
- _(b'unknown flags (%#04x) in version %d revlog %s')
- % (flags >> 16, fmt, self.indexfile)
+ return b''
+
+ def _loadindex(self, docket=None):
+
+ new_header, mmapindexthreshold, force_nodemap = self._init_opts()
+
+ if self.postfix is not None:
+ entry_point = b'%s.i.%s' % (self.radix, self.postfix)
+ elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
+ entry_point = b'%s.i.a' % self.radix
+ else:
+ entry_point = b'%s.i' % self.radix
+
+ if docket is not None:
+ self._docket = docket
+ self._docket_file = entry_point
+ else:
+ entry_data = b''
+ self._initempty = True
+ entry_data = self._get_data(entry_point, mmapindexthreshold)
+ if len(entry_data) > 0:
+ header = INDEX_HEADER.unpack(entry_data[:4])[0]
+ self._initempty = False
+ else:
+ header = new_header
+
+ self._format_flags = header & ~0xFFFF
+ self._format_version = header & 0xFFFF
+
+ supported_flags = SUPPORTED_FLAGS.get(self._format_version)
+ if supported_flags is None:
+ msg = _(b'unknown version (%d) in revlog %s')
+ msg %= (self._format_version, self.display_id)
+ raise error.RevlogError(msg)
+ elif self._format_flags & ~supported_flags:
+ msg = _(b'unknown flags (%#04x) in version %d revlog %s')
+ display_flag = self._format_flags >> 16
+ msg %= (display_flag, self._format_version, self.display_id)
+ raise error.RevlogError(msg)
+
+ features = FEATURES_BY_VERSION[self._format_version]
+ self._inline = features[b'inline'](self._format_flags)
+ self._generaldelta = features[b'generaldelta'](self._format_flags)
+ self.hassidedata = features[b'sidedata']
+
+ if not features[b'docket']:
+ self._indexfile = entry_point
+ index_data = entry_data
+ else:
+ self._docket_file = entry_point
+ if self._initempty:
+ self._docket = docketutil.default_docket(self, header)
+ else:
+ self._docket = docketutil.parse_docket(
+ self, entry_data, use_pending=self._trypending
+ )
+
+ if self._docket is not None:
+ self._indexfile = self._docket.index_filepath()
+ index_data = b''
+ index_size = self._docket.index_end
+ if index_size > 0:
+ index_data = self._get_data(
+ self._indexfile, mmapindexthreshold, size=index_size
)
-
- self._inline = False
- self._generaldelta = False
-
- elif fmt == REVLOGV1:
- if flags & ~REVLOGV1_FLAGS:
- raise error.RevlogError(
- _(b'unknown flags (%#04x) in version %d revlog %s')
- % (flags >> 16, fmt, self.indexfile)
- )
-
- self._inline = versionflags & FLAG_INLINE_DATA
- self._generaldelta = versionflags & FLAG_GENERALDELTA
-
- elif fmt == REVLOGV2:
- if flags & ~REVLOGV2_FLAGS:
- raise error.RevlogError(
- _(b'unknown flags (%#04x) in version %d revlog %s')
- % (flags >> 16, fmt, self.indexfile)
- )
-
- # There is a bug in the transaction handling when going from an
- # inline revlog to a separate index and data file. Turn it off until
- # it's fixed, since v2 revlogs sometimes get rewritten on exchange.
- # See issue6485
+ if len(index_data) < index_size:
+ msg = _(b'too few index data for %s: got %d, expected %d')
+ msg %= (self.display_id, len(index_data), index_size)
+ raise error.RevlogError(msg)
+
self._inline = False
# generaldelta implied by version 2 revlogs.
self._generaldelta = True
-
+ # the logic for persistent nodemap will be dealt with within the
+ # main docket, so disable it for now.
+ self._nodemap_file = None
+
+ if self._docket is not None:
+ self._datafile = self._docket.data_filepath()
+ self._sidedatafile = self._docket.sidedata_filepath()
+ elif self.postfix is None:
+ self._datafile = b'%s.d' % self.radix
else:
- raise error.RevlogError(
- _(b'unknown version (%d) in revlog %s') % (fmt, self.indexfile)
- )
+ self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
self.nodeconstants = sha1nodeconstants
self.nullid = self.nodeconstants.nullid
@@ -634,33 +576,35 @@ class revlog(object):
self._storedeltachains = True
devel_nodemap = (
- self.nodemap_file
- and opts.get(b'devel-force-nodemap', False)
- and NodemapRevlogIO is not None
+ self._nodemap_file
+ and force_nodemap
+ and parse_index_v1_nodemap is not None
)
use_rust_index = False
if rustrevlog is not None:
- if self.nodemap_file is not None:
+ if self._nodemap_file is not None:
use_rust_index = True
else:
use_rust_index = self.opener.options.get(b'rust.index')
- self._io = revlogio()
- if self.version == REVLOGV0:
- self._io = revlogoldio()
- elif fmt == REVLOGV2:
- self._io = revlogv2io()
+ self._parse_index = parse_index_v1
+ if self._format_version == REVLOGV0:
+ self._parse_index = revlogv0.parse_index_v0
+ elif self._format_version == REVLOGV2:
+ self._parse_index = parse_index_v2
+ elif self._format_version == CHANGELOGV2:
+ self._parse_index = parse_index_cl_v2
elif devel_nodemap:
- self._io = NodemapRevlogIO()
+ self._parse_index = parse_index_v1_nodemap
elif use_rust_index:
- self._io = rustrevlogio()
+ self._parse_index = parse_index_v1_mixed
try:
- d = self._io.parseindex(indexdata, self._inline)
- index, _chunkcache = d
+ d = self._parse_index(index_data, self._inline)
+ index, chunkcache = d
use_nodemap = (
not self._inline
- and self.nodemap_file is not None
+ and self._nodemap_file is not None
and util.safehasattr(index, 'update_nodemap_data')
)
if use_nodemap:
@@ -676,58 +620,106 @@ class revlog(object):
index.update_nodemap_data(*nodemap_data)
except (ValueError, IndexError):
raise error.RevlogError(
- _(b"index %s is corrupted") % self.indexfile
+ _(b"index %s is corrupted") % self.display_id
)
- self.index, self._chunkcache = d
- if not self._chunkcache:
- self._chunkclear()
+ self.index = index
+ self._segmentfile = randomaccessfile.randomaccessfile(
+ self.opener,
+ (self._indexfile if self._inline else self._datafile),
+ self._chunkcachesize,
+ chunkcache,
+ )
+ self._segmentfile_sidedata = randomaccessfile.randomaccessfile(
+ self.opener,
+ self._sidedatafile,
+ self._chunkcachesize,
+ )
# revnum -> (chain-length, sum-delta-length)
self._chaininfocache = util.lrucachedict(500)
# revlog header -> revlog compressor
self._decompressors = {}
@util.propertycache
+ def revlog_kind(self):
+ return self.target[0]
+
+ @util.propertycache
+ def display_id(self):
+ """The public facing "ID" of the revlog that we use in message"""
+ # Maybe we should build a user facing representation of
+ # revlog.target instead of using `self.radix`
+ return self.radix
+
+ def _get_decompressor(self, t):
+ try:
+ compressor = self._decompressors[t]
+ except KeyError:
+ try:
+ engine = util.compengines.forrevlogheader(t)
+ compressor = engine.revlogcompressor(self._compengineopts)
+ self._decompressors[t] = compressor
+ except KeyError:
+ raise error.RevlogError(
+ _(b'unknown compression type %s') % binascii.hexlify(t)
+ )
+ return compressor
+
+ @util.propertycache
def _compressor(self):
engine = util.compengines[self._compengine]
return engine.revlogcompressor(self._compengineopts)
- def _indexfp(self, mode=b'r'):
+ @util.propertycache
+ def _decompressor(self):
+ """the default decompressor"""
+ if self._docket is None:
+ return None
+ t = self._docket.default_compression_header
+ c = self._get_decompressor(t)
+ return c.decompress
+
+ def _indexfp(self):
"""file object for the revlog's index file"""
- args = {'mode': mode}
- if mode != b'r':
- args['checkambig'] = self._checkambig
- if mode == b'w':
- args['atomictemp'] = True
- return self.opener(self.indexfile, **args)
+ return self.opener(self._indexfile, mode=b"r")
+
+ def __index_write_fp(self):
+ # You should not use this directly and use `_writing` instead
+ try:
+ f = self.opener(
+ self._indexfile, mode=b"r+", checkambig=self._checkambig
+ )
+ if self._docket is None:
+ f.seek(0, os.SEEK_END)
+ else:
+ f.seek(self._docket.index_end, os.SEEK_SET)
+ return f
+ except IOError as inst:
+ if inst.errno != errno.ENOENT:
+ raise
+ return self.opener(
+ self._indexfile, mode=b"w+", checkambig=self._checkambig
+ )
+
+ def __index_new_fp(self):
+ # You should not use this unless you are upgrading from inline revlog
+ return self.opener(
+ self._indexfile,
+ mode=b"w",
+ checkambig=self._checkambig,
+ atomictemp=True,
+ )
def _datafp(self, mode=b'r'):
"""file object for the revlog's data file"""
- return self.opener(self.datafile, mode=mode)
+ return self.opener(self._datafile, mode=mode)
@contextlib.contextmanager
- def _datareadfp(self, existingfp=None):
- """file object suitable to read data"""
- # Use explicit file handle, if given.
- if existingfp is not None:
- yield existingfp
-
- # Use a file handle being actively used for writes, if available.
- # There is some danger to doing this because reads will seek the
- # file. However, _writeentry() performs a SEEK_END before all writes,
- # so we should be safe.
- elif self._writinghandles:
- if self._inline:
- yield self._writinghandles[0]
- else:
- yield self._writinghandles[1]
-
- # Otherwise open a new file handle.
+ def _sidedatareadfp(self):
+ """file object suitable to read sidedata"""
+ if self._writinghandles:
+ yield self._writinghandles[2]
else:
- if self._inline:
- func = self._indexfp
- else:
- func = self._datafp
- with func() as fp:
+ with self.opener(self._sidedatafile) as fp:
yield fp
def tiprev(self):
@@ -785,7 +777,7 @@ class revlog(object):
return True
def update_caches(self, transaction):
- if self.nodemap_file is not None:
+ if self._nodemap_file is not None:
if transaction is None:
nodemaputil.update_persistent_nodemap(self)
else:
@@ -794,7 +786,8 @@ class revlog(object):
def clearcaches(self):
self._revisioncache = None
self._chainbasecache.clear()
- self._chunkcache = (0, b'')
+ self._segmentfile.clear_cache()
+ self._segmentfile_sidedata.clear_cache()
self._pcache = {}
self._nodemap_docket = None
self.index.clearcaches()
@@ -802,7 +795,7 @@ class revlog(object):
# end up having to refresh it here.
use_nodemap = (
not self._inline
- and self.nodemap_file is not None
+ and self._nodemap_file is not None
and util.safehasattr(self.index, 'update_nodemap_data')
)
if use_nodemap:
@@ -818,9 +811,12 @@ class revlog(object):
raise
except error.RevlogError:
# parsers.c radix tree lookup failed
- if node == wdirid or node in wdirfilenodeids:
+ if (
+ node == self.nodeconstants.wdirid
+ or node in self.nodeconstants.wdirfilenodeids
+ ):
raise error.WdirUnsupported
- raise error.LookupError(node, self.indexfile, _(b'no node'))
+ raise error.LookupError(node, self.display_id, _(b'no node'))
# Accessors for index entries.
@@ -829,6 +825,23 @@ class revlog(object):
def start(self, rev):
return int(self.index[rev][0] >> 16)
+ def sidedata_cut_off(self, rev):
+ sd_cut_off = self.index[rev][8]
+ if sd_cut_off != 0:
+ return sd_cut_off
+ # This is some annoying dance, because entries without sidedata
+ # currently use 0 as their ofsset. (instead of previous-offset +
+ # previous-size)
+ #
+ # We should reconsider this sidedata → 0 sidata_offset policy.
+ # In the meantime, we need this.
+ while 0 <= rev:
+ e = self.index[rev]
+ if e[9] != 0:
+ return e[8] + e[9]
+ rev -= 1
+ return 0
+
def flags(self, rev):
return self.index[rev][0] & 0xFFFF
@@ -836,7 +849,7 @@ class revlog(object):
return self.index[rev][1]
def sidedata_length(self, rev):
- if self.version & 0xFFFF != REVLOGV2:
+ if not self.hassidedata:
return 0
return self.index[rev][9]
@@ -996,7 +1009,7 @@ class revlog(object):
checkrev(r)
# and we're sure ancestors aren't filtered as well
- if rustancestor is not None:
+ if rustancestor is not None and self.index.rust_ext_compat:
lazyancestors = rustancestor.LazyAncestors
arg = self.index
else:
@@ -1021,7 +1034,7 @@ class revlog(object):
not supplied, uses all of the revlog's heads. If common is not
supplied, uses nullid."""
if common is None:
- common = [nullid]
+ common = [self.nullid]
if heads is None:
heads = self.heads()
@@ -1083,7 +1096,7 @@ class revlog(object):
if common is None:
common = [nullrev]
- if rustancestor is not None:
+ if rustancestor is not None and self.index.rust_ext_compat:
return rustancestor.MissingAncestors(self.index, common)
return ancestor.incrementalmissingancestors(self.parentrevs, common)
@@ -1127,7 +1140,7 @@ class revlog(object):
not supplied, uses all of the revlog's heads. If common is not
supplied, uses nullid."""
if common is None:
- common = [nullid]
+ common = [self.nullid]
if heads is None:
heads = self.heads()
@@ -1165,11 +1178,15 @@ class revlog(object):
return nonodes
lowestrev = min([self.rev(n) for n in roots])
else:
- roots = [nullid] # Everybody's a descendant of nullid
+ roots = [self.nullid] # Everybody's a descendant of nullid
lowestrev = nullrev
if (lowestrev == nullrev) and (heads is None):
# We want _all_ the nodes!
- return ([self.node(r) for r in self], [nullid], list(self.heads()))
+ return (
+ [self.node(r) for r in self],
+ [self.nullid],
+ list(self.heads()),
+ )
if heads is None:
# All nodes are ancestors, so the latest ancestor is the last
# node.
@@ -1195,7 +1212,7 @@ class revlog(object):
# grab a node to tag
n = nodestotag.pop()
# Never tag nullid
- if n == nullid:
+ if n == self.nullid:
continue
# A node's revision number represents its place in a
# topologically sorted list of nodes.
@@ -1207,7 +1224,7 @@ class revlog(object):
ancestors.add(n) # Mark as ancestor
# Add non-nullid parents to list of nodes to tag.
nodestotag.update(
- [p for p in self.parents(n) if p != nullid]
+ [p for p in self.parents(n) if p != self.nullid]
)
elif n in heads: # We've seen it before, is it a fake head?
# So it is, real heads should not be the ancestors of
@@ -1235,7 +1252,7 @@ class revlog(object):
# We are descending from nullid, and don't need to care about
# any other roots.
lowestrev = nullrev
- roots = [nullid]
+ roots = [self.nullid]
# Transform our roots list into a set.
descendants = set(roots)
# Also, keep the original roots so we can filter out roots that aren't
@@ -1299,7 +1316,7 @@ class revlog(object):
return self.index.headrevs()
except AttributeError:
return self._headrevs()
- if rustdagop is not None:
+ if rustdagop is not None and self.index.rust_ext_compat:
return rustdagop.headrevs(self.index, revs)
return dagop.headrevs(revs, self._uncheckedparentrevs)
@@ -1329,7 +1346,7 @@ class revlog(object):
"""
if start is None and stop is None:
if not len(self):
- return [nullid]
+ return [self.nullid]
return [self.node(r) for r in self.headrevs()]
if start is None:
@@ -1419,13 +1436,13 @@ class revlog(object):
if ancs:
# choose a consistent winner when there's a tie
return min(map(self.node, ancs))
- return nullid
+ return self.nullid
def _match(self, id):
if isinstance(id, int):
# rev
return self.node(id)
- if len(id) == 20:
+ if len(id) == self.nodeconstants.nodelen:
# possibly a binary node
# odds of a binary node being all hex in ASCII are 1 in 10**25
try:
@@ -1446,7 +1463,7 @@ class revlog(object):
return self.node(rev)
except (ValueError, OverflowError):
pass
- if len(id) == 40:
+ if len(id) == 2 * self.nodeconstants.nodelen:
try:
# a full hex nodeid?
node = bin(id)
@@ -1457,29 +1474,34 @@ class revlog(object):
def _partialmatch(self, id):
# we don't care wdirfilenodeids as they should be always full hash
- maybewdir = wdirhex.startswith(id)
+ maybewdir = self.nodeconstants.wdirhex.startswith(id)
+ ambiguous = False
try:
partial = self.index.partialmatch(id)
if partial and self.hasnode(partial):
if maybewdir:
# single 'ff...' match in radix tree, ambiguous with wdir
- raise error.RevlogError
- return partial
- if maybewdir:
+ ambiguous = True
+ else:
+ return partial
+ elif maybewdir:
# no 'ff...' match in radix tree, wdir identified
raise error.WdirUnsupported
- return None
+ else:
+ return None
except error.RevlogError:
# parsers.c radix tree lookup gave multiple matches
# fast path: for unfiltered changelog, radix tree is accurate
if not getattr(self, 'filteredrevs', None):
- raise error.AmbiguousPrefixLookupError(
- id, self.indexfile, _(b'ambiguous identifier')
- )
+ ambiguous = True
# fall through to slow path that filters hidden revisions
except (AttributeError, ValueError):
# we are pure python, or key was too short to search radix tree
pass
+ if ambiguous:
+ raise error.AmbiguousPrefixLookupError(
+ id, self.display_id, _(b'ambiguous identifier')
+ )
if id in self._pcache:
return self._pcache[id]
@@ -1493,14 +1515,14 @@ class revlog(object):
nl = [
n for n in nl if hex(n).startswith(id) and self.hasnode(n)
]
- if nullhex.startswith(id):
- nl.append(nullid)
+ if self.nodeconstants.nullhex.startswith(id):
+ nl.append(self.nullid)
if len(nl) > 0:
if len(nl) == 1 and not maybewdir:
self._pcache[id] = nl[0]
return nl[0]
raise error.AmbiguousPrefixLookupError(
- id, self.indexfile, _(b'ambiguous identifier')
+ id, self.display_id, _(b'ambiguous identifier')
)
if maybewdir:
raise error.WdirUnsupported
@@ -1520,7 +1542,7 @@ class revlog(object):
if n:
return n
- raise error.LookupError(id, self.indexfile, _(b'no match found'))
+ raise error.LookupError(id, self.display_id, _(b'no match found'))
def shortest(self, node, minlength=1):
"""Find the shortest unambiguous prefix that matches node."""
@@ -1534,7 +1556,7 @@ class revlog(object):
# single 'ff...' match
return True
if matchednode is None:
- raise error.LookupError(node, self.indexfile, _(b'no node'))
+ raise error.LookupError(node, self.display_id, _(b'no node'))
return True
def maybewdir(prefix):
@@ -1554,13 +1576,15 @@ class revlog(object):
length = max(self.index.shortest(node), minlength)
return disambiguate(hexnode, length)
except error.RevlogError:
- if node != wdirid:
- raise error.LookupError(node, self.indexfile, _(b'no node'))
+ if node != self.nodeconstants.wdirid:
+ raise error.LookupError(
+ node, self.display_id, _(b'no node')
+ )
except AttributeError:
# Fall through to pure code
pass
- if node == wdirid:
+ if node == self.nodeconstants.wdirid:
for length in range(minlength, len(hexnode) + 1):
prefix = hexnode[:length]
if isvalid(prefix):
@@ -1579,102 +1603,6 @@ class revlog(object):
p1, p2 = self.parents(node)
return storageutil.hashrevisionsha1(text, p1, p2) != node
- def _cachesegment(self, offset, data):
- """Add a segment to the revlog cache.
-
- Accepts an absolute offset and the data that is at that location.
- """
- o, d = self._chunkcache
- # try to add to existing cache
- if o + len(d) == offset and len(d) + len(data) < _chunksize:
- self._chunkcache = o, d + data
- else:
- self._chunkcache = offset, data
-
- def _readsegment(self, offset, length, df=None):
- """Load a segment of raw data from the revlog.
-
- Accepts an absolute offset, length to read, and an optional existing
- file handle to read from.
-
- If an existing file handle is passed, it will be seeked and the
- original seek position will NOT be restored.
-
- Returns a str or buffer of raw byte data.
-
- Raises if the requested number of bytes could not be read.
- """
- # Cache data both forward and backward around the requested
- # data, in a fixed size window. This helps speed up operations
- # involving reading the revlog backwards.
- cachesize = self._chunkcachesize
- realoffset = offset & ~(cachesize - 1)
- reallength = (
- (offset + length + cachesize) & ~(cachesize - 1)
- ) - realoffset
- with self._datareadfp(df) as df:
- df.seek(realoffset)
- d = df.read(reallength)
-
- self._cachesegment(realoffset, d)
- if offset != realoffset or reallength != length:
- startoffset = offset - realoffset
- if len(d) - startoffset < length:
- raise error.RevlogError(
- _(
- b'partial read of revlog %s; expected %d bytes from '
- b'offset %d, got %d'
- )
- % (
- self.indexfile if self._inline else self.datafile,
- length,
- realoffset,
- len(d) - startoffset,
- )
- )
-
- return util.buffer(d, startoffset, length)
-
- if len(d) < length:
- raise error.RevlogError(
- _(
- b'partial read of revlog %s; expected %d bytes from offset '
- b'%d, got %d'
- )
- % (
- self.indexfile if self._inline else self.datafile,
- length,
- offset,
- len(d),
- )
- )
-
- return d
-
- def _getsegment(self, offset, length, df=None):
- """Obtain a segment of raw data from the revlog.
-
- Accepts an absolute offset, length of bytes to obtain, and an
- optional file handle to the already-opened revlog. If the file
- handle is used, it's original seek position will not be preserved.
-
- Requests for data may be returned from a cache.
-
- Returns a str or a buffer instance of raw byte data.
- """
- o, d = self._chunkcache
- l = len(d)
-
- # is it in the cache?
- cachestart = offset - o
- cacheend = cachestart + length
- if cachestart >= 0 and cacheend <= l:
- if cachestart == 0 and cacheend == l:
- return d # avoid a copy
- return util.buffer(d, cachestart, cacheend - cachestart)
-
- return self._readsegment(offset, length, df=df)
-
def _getsegmentforrevs(self, startrev, endrev, df=None):
"""Obtain a segment of raw data corresponding to a range of revisions.
@@ -1707,7 +1635,7 @@ class revlog(object):
end += (endrev + 1) * self.index.entry_size
length = end - start
- return start, self._getsegment(start, length, df=df)
+ return start, self._segmentfile.read_chunk(start, length, df)
def _chunk(self, rev, df=None):
"""Obtain a single decompressed chunk for a revision.
@@ -1718,7 +1646,18 @@ class revlog(object):
Returns a str holding uncompressed data for the requested revision.
"""
- return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
+ compression_mode = self.index[rev][10]
+ data = self._getsegmentforrevs(rev, rev, df=df)[1]
+ if compression_mode == COMP_MODE_PLAIN:
+ return data
+ elif compression_mode == COMP_MODE_DEFAULT:
+ return self._decompressor(data)
+ elif compression_mode == COMP_MODE_INLINE:
+ return self.decompress(data)
+ else:
+ msg = b'unknown compression mode %d'
+ msg %= compression_mode
+ raise error.RevlogError(msg)
def _chunks(self, revs, df=None, targetsize=None):
"""Obtain decompressed chunks for the specified revisions.
@@ -1766,19 +1705,28 @@ class revlog(object):
return [self._chunk(rev, df=df) for rev in revschunk]
decomp = self.decompress
+ # self._decompressor might be None, but will not be used in that case
+ def_decomp = self._decompressor
for rev in revschunk:
chunkstart = start(rev)
if inline:
chunkstart += (rev + 1) * iosize
chunklength = length(rev)
- ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
+ comp_mode = self.index[rev][10]
+ c = buffer(data, chunkstart - offset, chunklength)
+ if comp_mode == COMP_MODE_PLAIN:
+ ladd(c)
+ elif comp_mode == COMP_MODE_INLINE:
+ ladd(decomp(c))
+ elif comp_mode == COMP_MODE_DEFAULT:
+ ladd(def_decomp(c))
+ else:
+ msg = b'unknown compression mode %d'
+ msg %= comp_mode
+ raise error.RevlogError(msg)
return l
- def _chunkclear(self):
- """Clear the raw chunk cache."""
- self._chunkcache = (0, b'')
-
def deltaparent(self, rev):
"""return deltaparent of the given revision"""
base = self.index[rev][3]
@@ -1854,7 +1802,7 @@ class revlog(object):
b'use revlog.rawdata(...)'
)
util.nouideprecwarn(msg, b'5.2', stacklevel=2)
- return self._revisiondata(nodeorrev, _df, raw=raw)[0]
+ return self._revisiondata(nodeorrev, _df, raw=raw)
def sidedata(self, nodeorrev, _df=None):
"""a map of extra data related to the changeset but not part of the hash
@@ -1863,7 +1811,12 @@ class revlog(object):
mapping object will likely be used in the future for a more
efficient/lazy code.
"""
- return self._revisiondata(nodeorrev, _df)[1]
+ # deal with argument type
+ if isinstance(nodeorrev, int):
+ rev = nodeorrev
+ else:
+ rev = self.rev(nodeorrev)
+ return self._sidedata(rev)
def _revisiondata(self, nodeorrev, _df=None, raw=False):
# deal with argument type
@@ -1875,24 +1828,17 @@ class revlog(object):
rev = None
# fast path the special `nullid` rev
- if node == nullid:
- return b"", {}
+ if node == self.nullid:
+ return b""
# ``rawtext`` is the text as stored inside the revlog. Might be the
# revision or might need to be processed to retrieve the revision.
rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
- if self.version & 0xFFFF == REVLOGV2:
- if rev is None:
- rev = self.rev(node)
- sidedata = self._sidedata(rev)
- else:
- sidedata = {}
-
if raw and validated:
# if we don't want to process the raw text and that raw
# text is cached, we can exit early.
- return rawtext, sidedata
+ return rawtext
if rev is None:
rev = self.rev(node)
# the revlog's flag for this revision
@@ -1901,7 +1847,7 @@ class revlog(object):
if validated and flags == REVIDX_DEFAULT_FLAGS:
# no extra flags set, no flag processor runs, text = rawtext
- return rawtext, sidedata
+ return rawtext
if raw:
validatehash = flagutil.processflagsraw(self, rawtext, flags)
@@ -1914,7 +1860,7 @@ class revlog(object):
if not validated:
self._revisioncache = (node, rev, rawtext)
- return text, sidedata
+ return text
def _rawtext(self, node, rev, _df=None):
"""return the possibly unvalidated rawtext for a revision
@@ -1970,7 +1916,30 @@ class revlog(object):
if sidedata_size == 0:
return {}
- segment = self._getsegment(sidedata_offset, sidedata_size)
+ if self._docket.sidedata_end < sidedata_offset + sidedata_size:
+ filename = self._sidedatafile
+ end = self._docket.sidedata_end
+ offset = sidedata_offset
+ length = sidedata_size
+ m = FILE_TOO_SHORT_MSG % (filename, length, offset, end)
+ raise error.RevlogError(m)
+
+ comp_segment = self._segmentfile_sidedata.read_chunk(
+ sidedata_offset, sidedata_size
+ )
+
+ comp = self.index[rev][11]
+ if comp == COMP_MODE_PLAIN:
+ segment = comp_segment
+ elif comp == COMP_MODE_DEFAULT:
+ segment = self._decompressor(comp_segment)
+ elif comp == COMP_MODE_INLINE:
+ segment = self.decompress(comp_segment)
+ else:
+ msg = b'unknown compression mode %d'
+ msg %= comp
+ raise error.RevlogError(msg)
+
sidedata = sidedatautil.deserialize_sidedata(segment)
return sidedata
@@ -1979,7 +1948,7 @@ class revlog(object):
_df - an existing file handle to read from. (internal-only)
"""
- return self._revisiondata(nodeorrev, _df, raw=True)[0]
+ return self._revisiondata(nodeorrev, _df, raw=True)
def hash(self, text, p1, p2):
"""Compute a node hash.
@@ -2013,14 +1982,14 @@ class revlog(object):
revornode = templatefilters.short(hex(node))
raise error.RevlogError(
_(b"integrity check failed on %s:%s")
- % (self.indexfile, pycompat.bytestr(revornode))
+ % (self.display_id, pycompat.bytestr(revornode))
)
except error.RevlogError:
if self._censorable and storageutil.iscensoredtext(text):
- raise error.CensoredNodeError(self.indexfile, node, text)
+ raise error.CensoredNodeError(self.display_id, node, text)
raise
- def _enforceinlinesize(self, tr, fp=None):
+ def _enforceinlinesize(self, tr):
"""Check if the revlog is too big for inline and convert if so.
This should be called after revisions are added to the revlog. If the
@@ -2028,51 +1997,172 @@ class revlog(object):
to use multiple index and data files.
"""
tiprev = len(self) - 1
- if (
- not self._inline
- or (self.start(tiprev) + self.length(tiprev)) < _maxinline
- ):
+ total_size = self.start(tiprev) + self.length(tiprev)
+ if not self._inline or total_size < _maxinline:
return
- troffset = tr.findoffset(self.indexfile)
+ troffset = tr.findoffset(self._indexfile)
if troffset is None:
raise error.RevlogError(
- _(b"%s not found in the transaction") % self.indexfile
+ _(b"%s not found in the transaction") % self._indexfile
)
trindex = 0
- tr.add(self.datafile, 0)
-
- if fp:
+ tr.add(self._datafile, 0)
+
+ existing_handles = False
+ if self._writinghandles is not None:
+ existing_handles = True
+ fp = self._writinghandles[0]
fp.flush()
fp.close()
# We can't use the cached file handle after close(). So prevent
# its usage.
self._writinghandles = None
-
- with self._indexfp(b'r') as ifh, self._datafp(b'w') as dfh:
- for r in self:
- dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
- if troffset <= self.start(r):
- trindex = r
-
- with self._indexfp(b'w') as fp:
- self.version &= ~FLAG_INLINE_DATA
- self._inline = False
- io = self._io
- for i in self:
- e = io.packentry(self.index[i], self.node, self.version, i)
- fp.write(e)
-
- # the temp file replace the real index when we exit the context
- # manager
-
- tr.replace(self.indexfile, trindex * self.index.entry_size)
- nodemaputil.setup_persistent_nodemap(tr, self)
- self._chunkclear()
+ self._segmentfile.writing_handle = None
+ # No need to deal with sidedata writing handle as it is only
+ # relevant with revlog-v2 which is never inline, not reaching
+ # this code
+
+ new_dfh = self._datafp(b'w+')
+ new_dfh.truncate(0) # drop any potentially existing data
+ try:
+ with self._indexfp() as read_ifh:
+ for r in self:
+ new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
+ if troffset <= self.start(r) + r * self.index.entry_size:
+ trindex = r
+ new_dfh.flush()
+
+ with self.__index_new_fp() as fp:
+ self._format_flags &= ~FLAG_INLINE_DATA
+ self._inline = False
+ for i in self:
+ e = self.index.entry_binary(i)
+ if i == 0 and self._docket is None:
+ header = self._format_flags | self._format_version
+ header = self.index.pack_header(header)
+ e = header + e
+ fp.write(e)
+ if self._docket is not None:
+ self._docket.index_end = fp.tell()
+
+ # There is a small transactional race here. If the rename of
+ # the index fails, we should remove the datafile. It is more
+ # important to ensure that the data file is not truncated
+ # when the index is replaced as otherwise data is lost.
+ tr.replace(self._datafile, self.start(trindex))
+
+ # the temp file replace the real index when we exit the context
+ # manager
+
+ tr.replace(self._indexfile, trindex * self.index.entry_size)
+ nodemaputil.setup_persistent_nodemap(tr, self)
+ self._segmentfile = randomaccessfile.randomaccessfile(
+ self.opener,
+ self._datafile,
+ self._chunkcachesize,
+ )
+
+ if existing_handles:
+ # switched from inline to conventional reopen the index
+ ifh = self.__index_write_fp()
+ self._writinghandles = (ifh, new_dfh, None)
+ self._segmentfile.writing_handle = new_dfh
+ new_dfh = None
+ # No need to deal with sidedata writing handle as it is only
+ # relevant with revlog-v2 which is never inline, not reaching
+ # this code
+ finally:
+ if new_dfh is not None:
+ new_dfh.close()
def _nodeduplicatecallback(self, transaction, node):
"""called when trying to add a node already stored."""
+ @contextlib.contextmanager
+ def reading(self):
+ """Context manager that keeps data and sidedata files open for reading"""
+ with self._segmentfile.reading():
+ with self._segmentfile_sidedata.reading():
+ yield
+
+ @contextlib.contextmanager
+ def _writing(self, transaction):
+ if self._trypending:
+ msg = b'try to write in a `trypending` revlog: %s'
+ msg %= self.display_id
+ raise error.ProgrammingError(msg)
+ if self._writinghandles is not None:
+ yield
+ else:
+ ifh = dfh = sdfh = None
+ try:
+ r = len(self)
+ # opening the data file.
+ dsize = 0
+ if r:
+ dsize = self.end(r - 1)
+ dfh = None
+ if not self._inline:
+ try:
+ dfh = self._datafp(b"r+")
+ if self._docket is None:
+ dfh.seek(0, os.SEEK_END)
+ else:
+ dfh.seek(self._docket.data_end, os.SEEK_SET)
+ except IOError as inst:
+ if inst.errno != errno.ENOENT:
+ raise
+ dfh = self._datafp(b"w+")
+ transaction.add(self._datafile, dsize)
+ if self._sidedatafile is not None:
+ try:
+ sdfh = self.opener(self._sidedatafile, mode=b"r+")
+ dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
+ except IOError as inst:
+ if inst.errno != errno.ENOENT:
+ raise
+ sdfh = self.opener(self._sidedatafile, mode=b"w+")
+ transaction.add(
+ self._sidedatafile, self._docket.sidedata_end
+ )
+
+ # opening the index file.
+ isize = r * self.index.entry_size
+ ifh = self.__index_write_fp()
+ if self._inline:
+ transaction.add(self._indexfile, dsize + isize)
+ else:
+ transaction.add(self._indexfile, isize)
+ # exposing all file handle for writing.
+ self._writinghandles = (ifh, dfh, sdfh)
+ self._segmentfile.writing_handle = ifh if self._inline else dfh
+ self._segmentfile_sidedata.writing_handle = sdfh
+ yield
+ if self._docket is not None:
+ self._write_docket(transaction)
+ finally:
+ self._writinghandles = None
+ self._segmentfile.writing_handle = None
+ self._segmentfile_sidedata.writing_handle = None
+ if dfh is not None:
+ dfh.close()
+ if sdfh is not None:
+ sdfh.close()
+ # closing the index file last to avoid exposing referent to
+ # potential unflushed data content.
+ if ifh is not None:
+ ifh.close()
+
+ def _write_docket(self, transaction):
+ """write the current docket on disk
+
+ Exist as a method to help changelog to implement transaction logic
+
+ We could also imagine using the same transaction logic for all revlog
+ since docket are cheap."""
+ self._docket.write(transaction)
+
def addrevision(
self,
text,
@@ -2102,12 +2192,12 @@ class revlog(object):
"""
if link == nullrev:
raise error.RevlogError(
- _(b"attempted to add linkrev -1 to %s") % self.indexfile
+ _(b"attempted to add linkrev -1 to %s") % self.display_id
)
if sidedata is None:
sidedata = {}
- elif not self.hassidedata:
+ elif sidedata and not self.hassidedata:
raise error.ProgrammingError(
_(b"trying to add sidedata to a revlog who don't support them")
)
@@ -2127,7 +2217,7 @@ class revlog(object):
_(
b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
)
- % (self.indexfile, len(rawtext))
+ % (self.display_id, len(rawtext))
)
node = node or self.hash(rawtext, p1, p2)
@@ -2168,11 +2258,7 @@ class revlog(object):
useful when reusing a revision not stored in this revlog (ex: received
over wire, or read from an external bundle).
"""
- dfh = None
- if not self._inline:
- dfh = self._datafp(b"a+")
- ifh = self._indexfp(b"a+")
- try:
+ with self._writing(transaction):
return self._addrevision(
node,
rawtext,
@@ -2182,15 +2268,9 @@ class revlog(object):
p2,
flags,
cachedelta,
- ifh,
- dfh,
deltacomputer=deltacomputer,
sidedata=sidedata,
)
- finally:
- if dfh:
- dfh.close()
- ifh.close()
def compress(self, data):
"""Generate a possibly-compressed representation of data."""
@@ -2253,17 +2333,7 @@ class revlog(object):
elif t == b'u':
return util.buffer(data, 1)
- try:
- compressor = self._decompressors[t]
- except KeyError:
- try:
- engine = util.compengines.forrevlogheader(t)
- compressor = engine.revlogcompressor(self._compengineopts)
- self._decompressors[t] = compressor
- except KeyError:
- raise error.RevlogError(
- _(b'unknown compression type %s') % binascii.hexlify(t)
- )
+ compressor = self._get_decompressor(t)
return compressor.decompress(data)
@@ -2277,8 +2347,6 @@ class revlog(object):
p2,
flags,
cachedelta,
- ifh,
- dfh,
alwayscache=False,
deltacomputer=None,
sidedata=None,
@@ -2296,19 +2364,25 @@ class revlog(object):
- rawtext is optional (can be None); if not set, cachedelta must be set.
if both are set, they must correspond to each other.
"""
- if node == nullid:
+ if node == self.nullid:
raise error.RevlogError(
- _(b"%s: attempt to add null revision") % self.indexfile
+ _(b"%s: attempt to add null revision") % self.display_id
)
- if node == wdirid or node in wdirfilenodeids:
+ if (
+ node == self.nodeconstants.wdirid
+ or node in self.nodeconstants.wdirfilenodeids
+ ):
raise error.RevlogError(
- _(b"%s: attempt to add wdir revision") % self.indexfile
+ _(b"%s: attempt to add wdir revision") % self.display_id
)
+ if self._writinghandles is None:
+ msg = b'adding revision outside `revlog._writing` context'
+ raise error.ProgrammingError(msg)
if self._inline:
- fh = ifh
+ fh = self._writinghandles[0]
else:
- fh = dfh
+ fh = self._writinghandles[1]
btext = [rawtext]
@@ -2318,18 +2392,20 @@ class revlog(object):
offset = self._get_data_offset(prev)
if self._concurrencychecker:
+ ifh, dfh, sdfh = self._writinghandles
+ # XXX no checking for the sidedata file
if self._inline:
# offset is "as if" it were in the .d file, so we need to add on
# the size of the entry metadata.
self._concurrencychecker(
- ifh, self.indexfile, offset + curr * self.index.entry_size
+ ifh, self._indexfile, offset + curr * self.index.entry_size
)
else:
# Entries in the .i are a consistent size.
self._concurrencychecker(
- ifh, self.indexfile, curr * self.index.entry_size
+ ifh, self._indexfile, curr * self.index.entry_size
)
- self._concurrencychecker(dfh, self.datafile, offset)
+ self._concurrencychecker(dfh, self._datafile, offset)
p1r, p2r = self.rev(p1), self.rev(p2)
@@ -2348,13 +2424,45 @@ class revlog(object):
if deltacomputer is None:
deltacomputer = deltautil.deltacomputer(self)
- revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
+ revinfo = revlogutils.revisioninfo(
+ node,
+ p1,
+ p2,
+ btext,
+ textlen,
+ cachedelta,
+ flags,
+ )
deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
- if sidedata:
+ compression_mode = COMP_MODE_INLINE
+ if self._docket is not None:
+ default_comp = self._docket.default_compression_header
+ r = deltautil.delta_compression(default_comp, deltainfo)
+ compression_mode, deltainfo = r
+
+ sidedata_compression_mode = COMP_MODE_INLINE
+ if sidedata and self.hassidedata:
+ sidedata_compression_mode = COMP_MODE_PLAIN
serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
- sidedata_offset = offset + deltainfo.deltalen
+ sidedata_offset = self._docket.sidedata_end
+ h, comp_sidedata = self.compress(serialized_sidedata)
+ if (
+ h != b'u'
+ and comp_sidedata[0:1] != b'\0'
+ and len(comp_sidedata) < len(serialized_sidedata)
+ ):
+ assert not h
+ if (
+ comp_sidedata[0:1]
+ == self._docket.default_compression_header
+ ):
+ sidedata_compression_mode = COMP_MODE_DEFAULT
+ serialized_sidedata = comp_sidedata
+ else:
+ sidedata_compression_mode = COMP_MODE_INLINE
+ serialized_sidedata = comp_sidedata
else:
serialized_sidedata = b""
# Don't store the offset if the sidedata is empty, that way
@@ -2362,33 +2470,36 @@ class revlog(object):
# than ones we manually add.
sidedata_offset = 0
- e = (
- offset_type(offset, flags),
- deltainfo.deltalen,
- textlen,
- deltainfo.base,
- link,
- p1r,
- p2r,
- node,
- sidedata_offset,
- len(serialized_sidedata),
+ e = revlogutils.entry(
+ flags=flags,
+ data_offset=offset,
+ data_compressed_length=deltainfo.deltalen,
+ data_uncompressed_length=textlen,
+ data_compression_mode=compression_mode,
+ data_delta_base=deltainfo.base,
+ link_rev=link,
+ parent_rev_1=p1r,
+ parent_rev_2=p2r,
+ node_id=node,
+ sidedata_offset=sidedata_offset,
+ sidedata_compressed_length=len(serialized_sidedata),
+ sidedata_compression_mode=sidedata_compression_mode,
)
- if self.version & 0xFFFF != REVLOGV2:
- e = e[:8]
-
self.index.append(e)
- entry = self._io.packentry(e, self.node, self.version, curr)
+ entry = self.index.entry_binary(curr)
+ if curr == 0 and self._docket is None:
+ header = self._format_flags | self._format_version
+ header = self.index.pack_header(header)
+ entry = header + entry
self._writeentry(
transaction,
- ifh,
- dfh,
entry,
deltainfo.data,
link,
offset,
serialized_sidedata,
+ sidedata_offset,
)
rawtext = btext[0]
@@ -2410,19 +2521,13 @@ class revlog(object):
to `n - 1`'s sidedata being written after `n`'s data.
TODO cache this in a docket file before getting out of experimental."""
- if self.version & 0xFFFF != REVLOGV2:
+ if self._docket is None:
return self.end(prev)
-
- offset = 0
- for rev, entry in enumerate(self.index):
- sidedata_end = entry[8] + entry[9]
- # Sidedata for a previous rev has potentially been written after
- # this rev's end, so take the max.
- offset = max(self.end(rev), offset, sidedata_end)
- return offset
+ else:
+ return self._docket.data_end
def _writeentry(
- self, transaction, ifh, dfh, entry, data, link, offset, sidedata
+ self, transaction, entry, data, link, offset, sidedata, sidedata_offset
):
# Files opened in a+ mode have inconsistent behavior on various
# platforms. Windows requires that a file positioning call be made
@@ -2436,29 +2541,47 @@ class revlog(object):
# Note: This is likely not necessary on Python 3. However, because
# the file handle is reused for reads and may be seeked there, we need
# to be careful before changing this.
- ifh.seek(0, os.SEEK_END)
+ if self._writinghandles is None:
+ msg = b'adding revision outside `revlog._writing` context'
+ raise error.ProgrammingError(msg)
+ ifh, dfh, sdfh = self._writinghandles
+ if self._docket is None:
+ ifh.seek(0, os.SEEK_END)
+ else:
+ ifh.seek(self._docket.index_end, os.SEEK_SET)
if dfh:
- dfh.seek(0, os.SEEK_END)
+ if self._docket is None:
+ dfh.seek(0, os.SEEK_END)
+ else:
+ dfh.seek(self._docket.data_end, os.SEEK_SET)
+ if sdfh:
+ sdfh.seek(self._docket.sidedata_end, os.SEEK_SET)
curr = len(self) - 1
if not self._inline:
- transaction.add(self.datafile, offset)
- transaction.add(self.indexfile, curr * len(entry))
+ transaction.add(self._datafile, offset)
+ if self._sidedatafile:
+ transaction.add(self._sidedatafile, sidedata_offset)
+ transaction.add(self._indexfile, curr * len(entry))
if data[0]:
dfh.write(data[0])
dfh.write(data[1])
if sidedata:
- dfh.write(sidedata)
+ sdfh.write(sidedata)
ifh.write(entry)
else:
offset += curr * self.index.entry_size
- transaction.add(self.indexfile, offset)
+ transaction.add(self._indexfile, offset)
ifh.write(entry)
ifh.write(data[0])
ifh.write(data[1])
- if sidedata:
- ifh.write(sidedata)
- self._enforceinlinesize(transaction, ifh)
+ assert not sidedata
+ self._enforceinlinesize(transaction)
+ if self._docket is not None:
+ self._docket.index_end = self._writinghandles[0].tell()
+ self._docket.data_end = self._writinghandles[1].tell()
+ self._docket.sidedata_end = self._writinghandles[2].tell()
+
nodemaputil.setup_persistent_nodemap(transaction, self)
def addgroup(
@@ -2481,115 +2604,93 @@ class revlog(object):
this revlog and the node that was added.
"""
- if self._writinghandles:
+ if self._adding_group:
raise error.ProgrammingError(b'cannot nest addgroup() calls')
- r = len(self)
- end = 0
- if r:
- end = self.end(r - 1)
- ifh = self._indexfp(b"a+")
- isize = r * self.index.entry_size
- if self._inline:
- transaction.add(self.indexfile, end + isize)
- dfh = None
- else:
- transaction.add(self.indexfile, isize)
- transaction.add(self.datafile, end)
- dfh = self._datafp(b"a+")
-
- def flush():
- if dfh:
- dfh.flush()
- ifh.flush()
-
- self._writinghandles = (ifh, dfh)
+ self._adding_group = True
empty = True
-
try:
- deltacomputer = deltautil.deltacomputer(self)
- # loop through our set of deltas
- for data in deltas:
- node, p1, p2, linknode, deltabase, delta, flags, sidedata = data
- link = linkmapper(linknode)
- flags = flags or REVIDX_DEFAULT_FLAGS
-
- rev = self.index.get_rev(node)
- if rev is not None:
- # this can happen if two branches make the same change
- self._nodeduplicatecallback(transaction, rev)
- if duplicaterevisioncb:
- duplicaterevisioncb(self, rev)
- empty = False
- continue
-
- for p in (p1, p2):
- if not self.index.has_node(p):
+ with self._writing(transaction):
+ deltacomputer = deltautil.deltacomputer(self)
+ # loop through our set of deltas
+ for data in deltas:
+ (
+ node,
+ p1,
+ p2,
+ linknode,
+ deltabase,
+ delta,
+ flags,
+ sidedata,
+ ) = data
+ link = linkmapper(linknode)
+ flags = flags or REVIDX_DEFAULT_FLAGS
+
+ rev = self.index.get_rev(node)
+ if rev is not None:
+ # this can happen if two branches make the same change
+ self._nodeduplicatecallback(transaction, rev)
+ if duplicaterevisioncb:
+ duplicaterevisioncb(self, rev)
+ empty = False
+ continue
+
+ for p in (p1, p2):
+ if not self.index.has_node(p):
+ raise error.LookupError(
+ p, self.radix, _(b'unknown parent')
+ )
+
+ if not self.index.has_node(deltabase):
raise error.LookupError(
- p, self.indexfile, _(b'unknown parent')
+ deltabase, self.display_id, _(b'unknown delta base')
)
- if not self.index.has_node(deltabase):
- raise error.LookupError(
- deltabase, self.indexfile, _(b'unknown delta base')
+ baserev = self.rev(deltabase)
+
+ if baserev != nullrev and self.iscensored(baserev):
+ # if base is censored, delta must be full replacement in a
+ # single patch operation
+ hlen = struct.calcsize(b">lll")
+ oldlen = self.rawsize(baserev)
+ newlen = len(delta) - hlen
+ if delta[:hlen] != mdiff.replacediffheader(
+ oldlen, newlen
+ ):
+ raise error.CensoredBaseError(
+ self.display_id, self.node(baserev)
+ )
+
+ if not flags and self._peek_iscensored(baserev, delta):
+ flags |= REVIDX_ISCENSORED
+
+ # We assume consumers of addrevisioncb will want to retrieve
+ # the added revision, which will require a call to
+ # revision(). revision() will fast path if there is a cache
+ # hit. So, we tell _addrevision() to always cache in this case.
+ # We're only using addgroup() in the context of changegroup
+ # generation so the revision data can always be handled as raw
+ # by the flagprocessor.
+ rev = self._addrevision(
+ node,
+ None,
+ transaction,
+ link,
+ p1,
+ p2,
+ flags,
+ (baserev, delta),
+ alwayscache=alwayscache,
+ deltacomputer=deltacomputer,
+ sidedata=sidedata,
)
- baserev = self.rev(deltabase)
-
- if baserev != nullrev and self.iscensored(baserev):
- # if base is censored, delta must be full replacement in a
- # single patch operation
- hlen = struct.calcsize(b">lll")
- oldlen = self.rawsize(baserev)
- newlen = len(delta) - hlen
- if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
- raise error.CensoredBaseError(
- self.indexfile, self.node(baserev)
- )
-
- if not flags and self._peek_iscensored(baserev, delta, flush):
- flags |= REVIDX_ISCENSORED
-
- # We assume consumers of addrevisioncb will want to retrieve
- # the added revision, which will require a call to
- # revision(). revision() will fast path if there is a cache
- # hit. So, we tell _addrevision() to always cache in this case.
- # We're only using addgroup() in the context of changegroup
- # generation so the revision data can always be handled as raw
- # by the flagprocessor.
- rev = self._addrevision(
- node,
- None,
- transaction,
- link,
- p1,
- p2,
- flags,
- (baserev, delta),
- ifh,
- dfh,
- alwayscache=alwayscache,
- deltacomputer=deltacomputer,
- sidedata=sidedata,
- )
-
- if addrevisioncb:
- addrevisioncb(self, rev)
- empty = False
-
- if not dfh and not self._inline:
- # addrevision switched from inline to conventional
- # reopen the index
- ifh.close()
- dfh = self._datafp(b"a+")
- ifh = self._indexfp(b"a+")
- self._writinghandles = (ifh, dfh)
+ if addrevisioncb:
+ addrevisioncb(self, rev)
+ empty = False
finally:
- self._writinghandles = None
-
- if dfh:
- dfh.close()
- ifh.close()
+ self._adding_group = False
return not empty
def iscensored(self, rev):
@@ -2599,7 +2700,7 @@ class revlog(object):
return self.flags(rev) & REVIDX_ISCENSORED
- def _peek_iscensored(self, baserev, delta, flush):
+ def _peek_iscensored(self, baserev, delta):
"""Quickly check if a delta produces a censored revision."""
if not self._censorable:
return False
@@ -2642,19 +2743,31 @@ class revlog(object):
return
# first truncate the files on disk
- end = self.start(rev)
+ data_end = self.start(rev)
if not self._inline:
- transaction.add(self.datafile, end)
+ transaction.add(self._datafile, data_end)
end = rev * self.index.entry_size
else:
- end += rev * self.index.entry_size
-
- transaction.add(self.indexfile, end)
+ end = data_end + (rev * self.index.entry_size)
+
+ if self._sidedatafile:
+ sidedata_end = self.sidedata_cut_off(rev)
+ transaction.add(self._sidedatafile, sidedata_end)
+
+ transaction.add(self._indexfile, end)
+ if self._docket is not None:
+ # XXX we could, leverage the docket while stripping. However it is
+ # not powerfull enough at the time of this comment
+ self._docket.index_end = end
+ self._docket.data_end = data_end
+ self._docket.sidedata_end = sidedata_end
+ self._docket.write(transaction, stripping=True)
# then reset internal state in memory to forget those revisions
self._revisioncache = None
self._chaininfocache = util.lrucachedict(500)
- self._chunkclear()
+ self._segmentfile.clear_cache()
+ self._segmentfile_sidedata.clear_cache()
del self.index[rev:-1]
@@ -2682,7 +2795,7 @@ class revlog(object):
dd = 0
try:
- f = self.opener(self.indexfile)
+ f = self.opener(self._indexfile)
f.seek(0, io.SEEK_END)
actual = f.tell()
f.close()
@@ -2703,9 +2816,19 @@ class revlog(object):
return (dd, di)
def files(self):
- res = [self.indexfile]
- if not self._inline:
- res.append(self.datafile)
+ res = [self._indexfile]
+ if self._docket_file is None:
+ if not self._inline:
+ res.append(self._datafile)
+ else:
+ res.append(self._docket_file)
+ res.extend(self._docket.old_index_filepaths(include_empty=False))
+ if self._docket.data_end:
+ res.append(self._datafile)
+ res.extend(self._docket.old_data_filepaths(include_empty=False))
+ if self._docket.sidedata_end:
+ res.append(self._sidedatafile)
+ res.extend(self._docket.old_sidedata_filepaths(include_empty=False))
return res
def emitrevisions(
@@ -2762,7 +2885,7 @@ class revlog(object):
addrevisioncb=None,
deltareuse=DELTAREUSESAMEREVS,
forcedeltabothparents=None,
- sidedatacompanion=None,
+ sidedata_helpers=None,
):
"""Copy this revlog to another, possibly with format changes.
@@ -2805,21 +2928,8 @@ class revlog(object):
argument controls whether to force compute deltas against both parents
for merges. By default, the current default is used.
- If not None, the `sidedatacompanion` is callable that accept two
- arguments:
-
- (srcrevlog, rev)
-
- and return a quintet that control changes to sidedata content from the
- old revision to the new clone result:
-
- (dropall, filterout, update, new_flags, dropped_flags)
-
- * if `dropall` is True, all sidedata should be dropped
- * `filterout` is a set of sidedata keys that should be dropped
- * `update` is a mapping of additionnal/new key -> value
- * new_flags is a bitfields of new flags that the revision should get
- * dropped_flags is a bitfields of new flags that the revision shoudl not longer have
+ See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
+ `sidedata_helpers`.
"""
if deltareuse not in self.DELTAREUSEALL:
raise ValueError(
@@ -2859,7 +2969,7 @@ class revlog(object):
addrevisioncb,
deltareuse,
forcedeltabothparents,
- sidedatacompanion,
+ sidedata_helpers,
)
finally:
@@ -2874,7 +2984,7 @@ class revlog(object):
addrevisioncb,
deltareuse,
forcedeltabothparents,
- sidedatacompanion,
+ sidedata_helpers,
):
"""perform the core duty of `revlog.clone` after parameter processing"""
deltacomputer = deltautil.deltacomputer(destrevlog)
@@ -2890,31 +3000,19 @@ class revlog(object):
p2 = index[entry[6]][7]
node = entry[7]
- sidedataactions = (False, [], {}, 0, 0)
- if sidedatacompanion is not None:
- sidedataactions = sidedatacompanion(self, rev)
-
# (Possibly) reuse the delta from the revlog if allowed and
# the revlog chunk is a delta.
cachedelta = None
rawtext = None
- if any(sidedataactions) or deltareuse == self.DELTAREUSEFULLADD:
- dropall = sidedataactions[0]
- filterout = sidedataactions[1]
- update = sidedataactions[2]
- new_flags = sidedataactions[3]
- dropped_flags = sidedataactions[4]
- text, sidedata = self._revisiondata(rev)
- if dropall:
- sidedata = {}
- for key in filterout:
- sidedata.pop(key, None)
- sidedata.update(update)
- if not sidedata:
- sidedata = None
-
- flags |= new_flags
- flags &= ~dropped_flags
+ if deltareuse == self.DELTAREUSEFULLADD:
+ text = self._revisiondata(rev)
+ sidedata = self.sidedata(rev)
+
+ if sidedata_helpers is not None:
+ (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
+ self, sidedata_helpers, sidedata, rev
+ )
+ flags = flags | new_flags[0] & ~new_flags[1]
destrevlog.addrevision(
text,
@@ -2934,16 +3032,20 @@ class revlog(object):
if dp != nullrev:
cachedelta = (dp, bytes(self._chunk(rev)))
+ sidedata = None
if not cachedelta:
- rawtext = self.rawdata(rev)
-
- ifh = destrevlog.opener(
- destrevlog.indexfile, b'a+', checkambig=False
- )
- dfh = None
- if not destrevlog._inline:
- dfh = destrevlog.opener(destrevlog.datafile, b'a+')
- try:
+ rawtext = self._revisiondata(rev)
+ sidedata = self.sidedata(rev)
+ if sidedata is None:
+ sidedata = self.sidedata(rev)
+
+ if sidedata_helpers is not None:
+ (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
+ self, sidedata_helpers, sidedata, rev
+ )
+ flags = flags | new_flags[0] & ~new_flags[1]
+
+ with destrevlog._writing(tr):
destrevlog._addrevision(
node,
rawtext,
@@ -2953,100 +3055,23 @@ class revlog(object):
p2,
flags,
cachedelta,
- ifh,
- dfh,
deltacomputer=deltacomputer,
+ sidedata=sidedata,
)
- finally:
- if dfh:
- dfh.close()
- ifh.close()
if addrevisioncb:
addrevisioncb(self, rev, node)
def censorrevision(self, tr, censornode, tombstone=b''):
- if (self.version & 0xFFFF) == REVLOGV0:
+ if self._format_version == REVLOGV0:
raise error.RevlogError(
- _(b'cannot censor with version %d revlogs') % self.version
- )
-
- censorrev = self.rev(censornode)
- tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
-
- if len(tombstone) > self.rawsize(censorrev):
- raise error.Abort(
- _(b'censor tombstone must be no longer than censored data')
+ _(b'cannot censor with version %d revlogs')
+ % self._format_version
)
-
- # Rewriting the revlog in place is hard. Our strategy for censoring is
- # to create a new revlog, copy all revisions to it, then replace the
- # revlogs on transaction close.
-
- newindexfile = self.indexfile + b'.tmpcensored'
- newdatafile = self.datafile + b'.tmpcensored'
-
- # This is a bit dangerous. We could easily have a mismatch of state.
- newrl = revlog(self.opener, newindexfile, newdatafile, censorable=True)
- newrl.version = self.version
- newrl._generaldelta = self._generaldelta
- newrl._io = self._io
-
- for rev in self.revs():
- node = self.node(rev)
- p1, p2 = self.parents(node)
-
- if rev == censorrev:
- newrl.addrawrevision(
- tombstone,
- tr,
- self.linkrev(censorrev),
- p1,
- p2,
- censornode,
- REVIDX_ISCENSORED,
- )
-
- if newrl.deltaparent(rev) != nullrev:
- raise error.Abort(
- _(
- b'censored revision stored as delta; '
- b'cannot censor'
- ),
- hint=_(
- b'censoring of revlogs is not '
- b'fully implemented; please report '
- b'this bug'
- ),
- )
- continue
-
- if self.iscensored(rev):
- if self.deltaparent(rev) != nullrev:
- raise error.Abort(
- _(
- b'cannot censor due to censored '
- b'revision having delta stored'
- )
- )
- rawtext = self._chunk(rev)
- else:
- rawtext = self.rawdata(rev)
-
- newrl.addrawrevision(
- rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev)
- )
-
- tr.addbackup(self.indexfile, location=b'store')
- if not self._inline:
- tr.addbackup(self.datafile, location=b'store')
-
- self.opener.rename(newrl.indexfile, self.indexfile)
- if not self._inline:
- self.opener.rename(newrl.datafile, self.datafile)
-
- self.clearcaches()
- self._loadindex()
+ elif self._format_version == REVLOGV1:
+ rewrite.v1_censor(self, tr, censornode, tombstone)
+ else:
+ rewrite.v2_censor(self, tr, censornode, tombstone)
def verifyintegrity(self, state):
"""Verifies the integrity of the revlog.
@@ -3060,13 +3085,13 @@ class revlog(object):
if di:
yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
- version = self.version & 0xFFFF
+ version = self._format_version
# The verifier tells us what version revlog we should be.
if version != state[b'expectedversion']:
yield revlogproblem(
warning=_(b"warning: '%s' uses revlog format %d; expected %d")
- % (self.indexfile, version, state[b'expectedversion'])
+ % (self.display_id, version, state[b'expectedversion'])
)
state[b'skipread'] = set()
@@ -3164,9 +3189,9 @@ class revlog(object):
d = {}
if exclusivefiles:
- d[b'exclusivefiles'] = [(self.opener, self.indexfile)]
+ d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
if not self._inline:
- d[b'exclusivefiles'].append((self.opener, self.datafile))
+ d[b'exclusivefiles'].append((self.opener, self._datafile))
if sharedfiles:
d[b'sharedfiles'] = []
@@ -3184,12 +3209,10 @@ class revlog(object):
return d
- def rewrite_sidedata(self, helpers, startrev, endrev):
- if self.version & 0xFFFF != REVLOGV2:
+ def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
+ if not self.hassidedata:
return
- # inline are not yet supported because they suffer from an issue when
- # rewriting them (since it's not an append-only operation).
- # See issue6485.
+ # revlog formats with sidedata support does not support inline
assert not self._inline
if not helpers[1] and not helpers[2]:
# Nothing to generate or remove
@@ -3197,13 +3220,14 @@ class revlog(object):
new_entries = []
# append the new sidedata
- with self._datafp(b'a+') as fp:
- # Maybe this bug still exists, see revlog._writeentry
- fp.seek(0, os.SEEK_END)
- current_offset = fp.tell()
+ with self._writing(transaction):
+ ifh, dfh, sdfh = self._writinghandles
+ dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
+
+ current_offset = sdfh.tell()
for rev in range(startrev, endrev + 1):
entry = self.index[rev]
- new_sidedata = storageutil.run_sidedata_helpers(
+ new_sidedata, flags = sidedatautil.run_sidedata_helpers(
store=self,
sidedata_helpers=helpers,
sidedata={},
@@ -3213,24 +3237,58 @@ class revlog(object):
serialized_sidedata = sidedatautil.serialize_sidedata(
new_sidedata
)
+
+ sidedata_compression_mode = COMP_MODE_INLINE
+ if serialized_sidedata and self.hassidedata:
+ sidedata_compression_mode = COMP_MODE_PLAIN
+ h, comp_sidedata = self.compress(serialized_sidedata)
+ if (
+ h != b'u'
+ and comp_sidedata[0] != b'\0'
+ and len(comp_sidedata) < len(serialized_sidedata)
+ ):
+ assert not h
+ if (
+ comp_sidedata[0]
+ == self._docket.default_compression_header
+ ):
+ sidedata_compression_mode = COMP_MODE_DEFAULT
+ serialized_sidedata = comp_sidedata
+ else:
+ sidedata_compression_mode = COMP_MODE_INLINE
+ serialized_sidedata = comp_sidedata
if entry[8] != 0 or entry[9] != 0:
# rewriting entries that already have sidedata is not
# supported yet, because it introduces garbage data in the
# revlog.
- msg = b"Rewriting existing sidedata is not supported yet"
+ msg = b"rewriting existing sidedata is not supported yet"
raise error.Abort(msg)
- entry = entry[:8]
- entry += (current_offset, len(serialized_sidedata))
-
- fp.write(serialized_sidedata)
- new_entries.append(entry)
+
+ # Apply (potential) flags to add and to remove after running
+ # the sidedata helpers
+ new_offset_flags = entry[0] | flags[0] & ~flags[1]
+ entry_update = (
+ current_offset,
+ len(serialized_sidedata),
+ new_offset_flags,
+ sidedata_compression_mode,
+ )
+
+ # the sidedata computation might have move the file cursors around
+ sdfh.seek(current_offset, os.SEEK_SET)
+ sdfh.write(serialized_sidedata)
+ new_entries.append(entry_update)
current_offset += len(serialized_sidedata)
-
- # rewrite the new index entries
- with self._indexfp(b'w+') as fp:
- fp.seek(startrev * self.index.entry_size)
- for i, entry in enumerate(new_entries):
+ self._docket.sidedata_end = sdfh.tell()
+
+ # rewrite the new index entries
+ ifh.seek(startrev * self.index.entry_size)
+ for i, e in enumerate(new_entries):
rev = startrev + i
- self.index.replace_sidedata_info(rev, entry[8], entry[9])
- packed = self._io.packentry(entry, self.node, self.version, rev)
- fp.write(packed)
+ self.index.replace_sidedata_info(rev, *e)
+ packed = self.index.entry_binary(rev)
+ if rev == 0 and self._docket is None:
+ header = self._format_flags | self._format_version
+ header = self.index.pack_header(header)
+ packed = header + packed
+ ifh.write(packed)
diff --git a/mercurial/revlogutils/__init__.py b/mercurial/revlogutils/__init__.py
--- a/mercurial/revlogutils/__init__.py
+++ b/mercurial/revlogutils/__init__.py
@@ -6,3 +6,75 @@
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
+
+from ..thirdparty import attr
+from ..interfaces import repository
+
+# See mercurial.revlogutils.constants for doc
+COMP_MODE_INLINE = 2
+
+
+def offset_type(offset, type):
+ if (type & ~repository.REVISION_FLAGS_KNOWN) != 0:
+ raise ValueError(b'unknown revlog index flags: %d' % type)
+ return int(int(offset) << 16 | type)
+
+
+def entry(
+ data_offset,
+ data_compressed_length,
+ data_delta_base,
+ link_rev,
+ parent_rev_1,
+ parent_rev_2,
+ node_id,
+ flags=0,
+ data_uncompressed_length=-1,
+ data_compression_mode=COMP_MODE_INLINE,
+ sidedata_offset=0,
+ sidedata_compressed_length=0,
+ sidedata_compression_mode=COMP_MODE_INLINE,
+):
+ """Build one entry from symbolic name
+
+ This is useful to abstract the actual detail of how we build the entry
+ tuple for caller who don't care about it.
+
+ This should always be called using keyword arguments. Some arguments have
+ default value, this match the value used by index version that does not store such data.
+ """
+ return (
+ offset_type(data_offset, flags),
+ data_compressed_length,
+ data_uncompressed_length,
+ data_delta_base,
+ link_rev,
+ parent_rev_1,
+ parent_rev_2,
+ node_id,
+ sidedata_offset,
+ sidedata_compressed_length,
+ data_compression_mode,
+ sidedata_compression_mode,
+ )
+
+
+@attr.s(slots=True, frozen=True)
+class revisioninfo(object):
+ """Information about a revision that allows building its fulltext
+ node: expected hash of the revision
+ p1, p2: parent revs of the revision
+ btext: built text cache consisting of a one-element list
+ cachedelta: (baserev, uncompressed_delta) or None
+ flags: flags associated to the revision storage
+
+ One of btext[0] or cachedelta must be set.
+ """
+
+ node = attr.ib()
+ p1 = attr.ib()
+ p2 = attr.ib()
+ btext = attr.ib()
+ textlen = attr.ib()
+ cachedelta = attr.ib()
+ flags = attr.ib()
diff --git a/mercurial/revlogutils/constants.py b/mercurial/revlogutils/constants.py
--- a/mercurial/revlogutils/constants.py
+++ b/mercurial/revlogutils/constants.py
@@ -1,4 +1,4 @@
-# revlogdeltas.py - constant used for revlog logic
+# revlogdeltas.py - constant used for revlog logic.
#
# Copyright 2005-2007 Olivia Mackall
# Copyright 2018 Octobus
@@ -12,16 +12,110 @@ from __future__ import absolute_import
import struct
from ..interfaces import repository
+from .. import revlogutils
+
+### Internal utily constants
+
+KIND_CHANGELOG = 1001 # over 256 to not be comparable with a bytes
+KIND_MANIFESTLOG = 1002
+KIND_FILELOG = 1003
+KIND_OTHER = 1004
+
+ALL_KINDS = {
+ KIND_CHANGELOG,
+ KIND_MANIFESTLOG,
+ KIND_FILELOG,
+ KIND_OTHER,
+}
+
+### Index entry key
+#
+#
+# Internal details
+# ----------------
+#
+# A large part of the revlog logic deals with revisions' "index entries", tuple
+# objects that contains the same "items" whatever the revlog version.
+# Different versions will have different ways of storing these items (sometimes
+# not having them at all), but the tuple will always be the same. New fields
+# are usually added at the end to avoid breaking existing code that relies
+# on the existing order. The field are defined as follows:
+
+# [0] offset:
+# The byte index of the start of revision data chunk.
+# That value is shifted up by 16 bits. use "offset = field >> 16" to
+# retrieve it.
+#
+# flags:
+# A flag field that carries special information or changes the behavior
+# of the revision. (see `REVIDX_*` constants for details)
+# The flag field only occupies the first 16 bits of this field,
+# use "flags = field & 0xFFFF" to retrieve the value.
+ENTRY_DATA_OFFSET = 0
+
+# [1] compressed length:
+# The size, in bytes, of the chunk on disk
+ENTRY_DATA_COMPRESSED_LENGTH = 1
+
+# [2] uncompressed length:
+# The size, in bytes, of the full revision once reconstructed.
+ENTRY_DATA_UNCOMPRESSED_LENGTH = 2
+
+# [3] base rev:
+# Either the base of the revision delta chain (without general
+# delta), or the base of the delta (stored in the data chunk)
+# with general delta.
+ENTRY_DELTA_BASE = 3
+
+# [4] link rev:
+# Changelog revision number of the changeset introducing this
+# revision.
+ENTRY_LINK_REV = 4
+
+# [5] parent 1 rev:
+# Revision number of the first parent
+ENTRY_PARENT_1 = 5
+
+# [6] parent 2 rev:
+# Revision number of the second parent
+ENTRY_PARENT_2 = 6
+
+# [7] node id:
+# The node id of the current revision
+ENTRY_NODE_ID = 7
+
+# [8] sidedata offset:
+# The byte index of the start of the revision's side-data chunk.
+ENTRY_SIDEDATA_OFFSET = 8
+
+# [9] sidedata chunk length:
+# The size, in bytes, of the revision's side-data chunk.
+ENTRY_SIDEDATA_COMPRESSED_LENGTH = 9
+
+# [10] data compression mode:
+# two bits that detail the way the data chunk is compressed on disk.
+# (see "COMP_MODE_*" constants for details). For revlog version 0 and
+# 1 this will always be COMP_MODE_INLINE.
+ENTRY_DATA_COMPRESSION_MODE = 10
+
+# [11] side-data compression mode:
+# two bits that detail the way the sidedata chunk is compressed on disk.
+# (see "COMP_MODE_*" constants for details)
+ENTRY_SIDEDATA_COMPRESSION_MODE = 11
### main revlog header
-INDEX_HEADER = struct.Struct(b">I")
+# We cannot rely on Struct.format is inconsistent for python <=3.6 versus above
+INDEX_HEADER_FMT = b">I"
+INDEX_HEADER = struct.Struct(INDEX_HEADER_FMT)
## revlog version
REVLOGV0 = 0
REVLOGV1 = 1
# Dummy value until file format is finalized.
REVLOGV2 = 0xDEAD
+# Dummy value until file format is finalized.
+CHANGELOGV2 = 0xD34D
## global revlog header flags
# Shared across v1 and v2.
@@ -31,8 +125,10 @@ FLAG_GENERALDELTA = 1 << 17
REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
REVLOG_DEFAULT_FORMAT = REVLOGV1
REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
+REVLOGV0_FLAGS = 0
REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
REVLOGV2_FLAGS = FLAG_INLINE_DATA
+CHANGELOGV2_FLAGS = 0
### individual entry
@@ -70,9 +166,24 @@ assert INDEX_ENTRY_V1.size == 32 * 2
# 32 bytes: nodeid
# 8 bytes: sidedata offset
# 4 bytes: sidedata compressed length
-# 20 bytes: Padding to align to 96 bytes (see RevlogV2Plan wiki page)
-INDEX_ENTRY_V2 = struct.Struct(b">Qiiiiii20s12xQi20x")
-assert INDEX_ENTRY_V2.size == 32 * 3
+# 1 bytes: compression mode (2 lower bit are data_compression_mode)
+# 19 bytes: Padding to align to 96 bytes (see RevlogV2Plan wiki page)
+INDEX_ENTRY_V2 = struct.Struct(b">Qiiiiii20s12xQiB19x")
+assert INDEX_ENTRY_V2.size == 32 * 3, INDEX_ENTRY_V2.size
+
+# 6 bytes: offset
+# 2 bytes: flags
+# 4 bytes: compressed length
+# 4 bytes: uncompressed length
+# 4 bytes: parent 1 rev
+# 4 bytes: parent 2 rev
+# 32 bytes: nodeid
+# 8 bytes: sidedata offset
+# 4 bytes: sidedata compressed length
+# 1 bytes: compression mode (2 lower bit are data_compression_mode)
+# 27 bytes: Padding to align to 96 bytes (see RevlogV2Plan wiki page)
+INDEX_ENTRY_CL_V2 = struct.Struct(b">Qiiii20s12xQiB27x")
+assert INDEX_ENTRY_CL_V2.size == 32 * 3, INDEX_ENTRY_V2.size
# revlog index flags
@@ -85,8 +196,6 @@ REVIDX_ISCENSORED = repository.REVISION_
REVIDX_ELLIPSIS = repository.REVISION_FLAG_ELLIPSIS
# revision data is stored externally
REVIDX_EXTSTORED = repository.REVISION_FLAG_EXTSTORED
-# revision data contains extra metadata not part of the official digest
-REVIDX_SIDEDATA = repository.REVISION_FLAG_SIDEDATA
# revision changes files in a way that could affect copy tracing.
REVIDX_HASCOPIESINFO = repository.REVISION_FLAG_HASCOPIESINFO
REVIDX_DEFAULT_FLAGS = 0
@@ -95,13 +204,79 @@ REVIDX_FLAGS_ORDER = [
REVIDX_ISCENSORED,
REVIDX_ELLIPSIS,
REVIDX_EXTSTORED,
- REVIDX_SIDEDATA,
REVIDX_HASCOPIESINFO,
]
# bitmark for flags that could cause rawdata content change
-REVIDX_RAWTEXT_CHANGING_FLAGS = (
- REVIDX_ISCENSORED | REVIDX_EXTSTORED | REVIDX_SIDEDATA
-)
+REVIDX_RAWTEXT_CHANGING_FLAGS = REVIDX_ISCENSORED | REVIDX_EXTSTORED
+
+## chunk compression mode constants:
+# These constants are used in revlog version >=2 to denote the compression used
+# for a chunk.
+
+# Chunk use no compression, the data stored on disk can be directly use as
+# chunk value. Without any header information prefixed.
+COMP_MODE_PLAIN = 0
+
+# Chunk use the "default compression" for the revlog (usually defined in the
+# revlog docket). A header is still used.
+#
+# XXX: keeping a header is probably not useful and we should probably drop it.
+#
+# XXX: The value of allow mixed type of compression in the revlog is unclear
+# and we should consider making PLAIN/DEFAULT the only available mode for
+# revlog v2, disallowing INLINE mode.
+COMP_MODE_DEFAULT = 1
+
+# Chunk use a compression mode stored "inline" at the start of the chunk
+# itself. This is the mode always used for revlog version "0" and "1"
+COMP_MODE_INLINE = revlogutils.COMP_MODE_INLINE
+
+SUPPORTED_FLAGS = {
+ REVLOGV0: REVLOGV0_FLAGS,
+ REVLOGV1: REVLOGV1_FLAGS,
+ REVLOGV2: REVLOGV2_FLAGS,
+ CHANGELOGV2: CHANGELOGV2_FLAGS,
+}
+
+_no = lambda flags: False
+_yes = lambda flags: True
+
+
+def _from_flag(flag):
+ return lambda flags: bool(flags & flag)
+
+
+FEATURES_BY_VERSION = {
+ REVLOGV0: {
+ b'inline': _no,
+ b'generaldelta': _no,
+ b'sidedata': False,
+ b'docket': False,
+ },
+ REVLOGV1: {
+ b'inline': _from_flag(FLAG_INLINE_DATA),
+ b'generaldelta': _from_flag(FLAG_GENERALDELTA),
+ b'sidedata': False,
+ b'docket': False,
+ },
+ REVLOGV2: {
+ # The point of inline-revlog is to reduce the number of files used in
+ # the store. Using a docket defeat this purpose. So we needs other
+ # means to reduce the number of files for revlogv2.
+ b'inline': _no,
+ b'generaldelta': _yes,
+ b'sidedata': True,
+ b'docket': True,
+ },
+ CHANGELOGV2: {
+ b'inline': _no,
+ # General delta is useless for changelog since we don't do any delta
+ b'generaldelta': _no,
+ b'sidedata': True,
+ b'docket': True,
+ },
+}
+
SPARSE_REVLOG_MAX_CHAIN_LENGTH = 1000
diff --git a/mercurial/revlogutils/deltas.py b/mercurial/revlogutils/deltas.py
--- a/mercurial/revlogutils/deltas.py
+++ b/mercurial/revlogutils/deltas.py
@@ -18,6 +18,9 @@ from ..i18n import _
from ..pycompat import getattr
from .constants import (
+ COMP_MODE_DEFAULT,
+ COMP_MODE_INLINE,
+ COMP_MODE_PLAIN,
REVIDX_ISCENSORED,
REVIDX_RAWTEXT_CHANGING_FLAGS,
)
@@ -553,6 +556,24 @@ class _deltainfo(object):
snapshotdepth = attr.ib()
+def drop_u_compression(delta):
+ """turn into a "u" (no-compression) into no-compression without header
+
+ This is useful for revlog format that has better compression method.
+ """
+ assert delta.data[0] == b'u', delta.data[0]
+ return _deltainfo(
+ delta.distance,
+ delta.deltalen - 1,
+ (b'', delta.data[1]),
+ delta.base,
+ delta.chainbase,
+ delta.chainlen,
+ delta.compresseddeltalen,
+ delta.snapshotdepth,
+ )
+
+
def isgooddeltainfo(revlog, deltainfo, revinfo):
"""Returns True if the given delta is good. Good means that it is within
the disk span, disk size, and chain length bounds that we know to be
@@ -914,7 +935,7 @@ class deltacomputer(object):
def buildtext(self, revinfo, fh):
"""Builds a fulltext version of a revision
- revinfo: _revisioninfo instance that contains all needed info
+ revinfo: revisioninfo instance that contains all needed info
fh: file handle to either the .i or the .d revlog file,
depending on whether it is inlined or not
"""
@@ -1012,8 +1033,7 @@ class deltacomputer(object):
snapshotdepth,
)
- def _fullsnapshotinfo(self, fh, revinfo):
- curr = len(self.revlog)
+ def _fullsnapshotinfo(self, fh, revinfo, curr):
rawtext = self.buildtext(revinfo, fh)
data = self.revlog.compress(rawtext)
compresseddeltalen = deltalen = dist = len(data[1]) + len(data[0])
@@ -1032,7 +1052,7 @@ class deltacomputer(object):
snapshotdepth,
)
- def finddeltainfo(self, revinfo, fh):
+ def finddeltainfo(self, revinfo, fh, excluded_bases=None, target_rev=None):
"""Find an acceptable delta against a candidate revision
revinfo: information about the revision (instance of _revisioninfo)
@@ -1044,15 +1064,25 @@ class deltacomputer(object):
If no suitable deltabase is found, we return delta info for a full
snapshot.
+
+ `excluded_bases` is an optional set of revision that cannot be used as
+ a delta base. Use this to recompute delta suitable in censor or strip
+ context.
"""
+ if target_rev is None:
+ target_rev = len(self.revlog)
+
if not revinfo.textlen:
- return self._fullsnapshotinfo(fh, revinfo)
+ return self._fullsnapshotinfo(fh, revinfo, target_rev)
+
+ if excluded_bases is None:
+ excluded_bases = set()
# no delta for flag processor revision (see "candelta" for why)
# not calling candelta since only one revision needs test, also to
# avoid overhead fetching flags again.
if revinfo.flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
- return self._fullsnapshotinfo(fh, revinfo)
+ return self._fullsnapshotinfo(fh, revinfo, target_rev)
cachedelta = revinfo.cachedelta
p1 = revinfo.p1
@@ -1072,6 +1102,10 @@ class deltacomputer(object):
# challenge it against refined candidates
nominateddeltas.append(deltainfo)
for candidaterev in candidaterevs:
+ if candidaterev in excluded_bases:
+ continue
+ if candidaterev >= target_rev:
+ continue
candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh)
if candidatedelta is not None:
if isgooddeltainfo(self.revlog, candidatedelta, revinfo):
@@ -1084,5 +1118,30 @@ class deltacomputer(object):
candidaterevs = next(groups)
if deltainfo is None:
- deltainfo = self._fullsnapshotinfo(fh, revinfo)
+ deltainfo = self._fullsnapshotinfo(fh, revinfo, target_rev)
return deltainfo
+
+
+def delta_compression(default_compression_header, deltainfo):
+ """return (COMPRESSION_MODE, deltainfo)
+
+ used by revlog v2+ format to dispatch between PLAIN and DEFAULT
+ compression.
+ """
+ h, d = deltainfo.data
+ compression_mode = COMP_MODE_INLINE
+ if not h and not d:
+ # not data to store at all... declare them uncompressed
+ compression_mode = COMP_MODE_PLAIN
+ elif not h:
+ t = d[0:1]
+ if t == b'\0':
+ compression_mode = COMP_MODE_PLAIN
+ elif t == default_compression_header:
+ compression_mode = COMP_MODE_DEFAULT
+ elif h == b'u':
+ # we have a more efficient way to declare uncompressed
+ h = b''
+ compression_mode = COMP_MODE_PLAIN
+ deltainfo = drop_u_compression(deltainfo)
+ return compression_mode, deltainfo
diff --git a/mercurial/revlogutils/docket.py b/mercurial/revlogutils/docket.py
new file mode 100644
--- /dev/null
+++ b/mercurial/revlogutils/docket.py
@@ -0,0 +1,441 @@
+# docket - code related to revlog "docket"
+#
+# Copyright 2021 Pierre-Yves David
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+### Revlog docket file
+#
+# The revlog is stored on disk using multiple files:
+#
+# * a small docket file, containing metadata and a pointer,
+#
+# * an index file, containing fixed width information about revisions,
+#
+# * a data file, containing variable width data for these revisions,
+
+from __future__ import absolute_import
+
+import errno
+import os
+import random
+import struct
+
+from .. import (
+ encoding,
+ error,
+ node,
+ pycompat,
+ util,
+)
+
+from . import (
+ constants,
+)
+
+
+def make_uid(id_size=8):
+ """return a new unique identifier.
+
+ The identifier is random and composed of ascii characters."""
+ # size we "hex" the result we need half the number of bits to have a final
+ # uuid of size ID_SIZE
+ return node.hex(os.urandom(id_size // 2))
+
+
+# some special test logic to avoid anoying random output in the test
+stable_docket_file = encoding.environ.get(b'HGTEST_UUIDFILE')
+
+if stable_docket_file:
+
+ def make_uid(id_size=8):
+ try:
+ with open(stable_docket_file, mode='rb') as f:
+ seed = f.read().strip()
+ except IOError as inst:
+ if inst.errno != errno.ENOENT:
+ raise
+ seed = b'04' # chosen by a fair dice roll. garanteed to be random
+ if pycompat.ispy3:
+ iter_seed = iter(seed)
+ else:
+ # pytype: disable=wrong-arg-types
+ iter_seed = (ord(c) for c in seed)
+ # pytype: enable=wrong-arg-types
+ # some basic circular sum hashing on 64 bits
+ int_seed = 0
+ low_mask = int('1' * 35, 2)
+ for i in iter_seed:
+ high_part = int_seed >> 35
+ low_part = (int_seed & low_mask) << 28
+ int_seed = high_part + low_part + i
+ r = random.Random()
+ if pycompat.ispy3:
+ r.seed(int_seed, version=1)
+ else:
+ r.seed(int_seed)
+ # once we drop python 3.8 support we can simply use r.randbytes
+ raw = r.getrandbits(id_size * 4)
+ assert id_size == 8
+ p = struct.pack('>L', raw)
+ new = node.hex(p)
+ with open(stable_docket_file, 'wb') as f:
+ f.write(new)
+ return new
+
+
+# Docket format
+#
+# * 4 bytes: revlog version
+# | This is mandatory as docket must be compatible with the previous
+# | revlog index header.
+# * 1 bytes: size of index uuid
+# * 1 bytes: number of outdated index uuid
+# * 1 bytes: size of data uuid
+# * 1 bytes: number of outdated data uuid
+# * 1 bytes: size of sizedata uuid
+# * 1 bytes: number of outdated data uuid
+# * 8 bytes: size of index-data
+# * 8 bytes: pending size of index-data
+# * 8 bytes: size of data
+# * 8 bytes: size of sidedata
+# * 8 bytes: pending size of data
+# * 8 bytes: pending size of sidedata
+# * 1 bytes: default compression header
+S_HEADER = struct.Struct(constants.INDEX_HEADER_FMT + b'BBBBBBLLLLLLc')
+# * 1 bytes: size of index uuid
+# * 8 bytes: size of file
+S_OLD_UID = struct.Struct('>BL')
+
+
+class RevlogDocket(object):
+ """metadata associated with revlog"""
+
+ def __init__(
+ self,
+ revlog,
+ use_pending=False,
+ version_header=None,
+ index_uuid=None,
+ older_index_uuids=(),
+ data_uuid=None,
+ older_data_uuids=(),
+ sidedata_uuid=None,
+ older_sidedata_uuids=(),
+ index_end=0,
+ pending_index_end=0,
+ data_end=0,
+ pending_data_end=0,
+ sidedata_end=0,
+ pending_sidedata_end=0,
+ default_compression_header=None,
+ ):
+ self._version_header = version_header
+ self._read_only = bool(use_pending)
+ self._dirty = False
+ self._radix = revlog.radix
+ self._path = revlog._docket_file
+ self._opener = revlog.opener
+ self._index_uuid = index_uuid
+ self._older_index_uuids = older_index_uuids
+ self._data_uuid = data_uuid
+ self._older_data_uuids = older_data_uuids
+ self._sidedata_uuid = sidedata_uuid
+ self._older_sidedata_uuids = older_sidedata_uuids
+ assert not set(older_index_uuids) & set(older_data_uuids)
+ assert not set(older_data_uuids) & set(older_sidedata_uuids)
+ assert not set(older_index_uuids) & set(older_sidedata_uuids)
+ # thes asserts should be True as long as we have a single index filename
+ assert index_end <= pending_index_end
+ assert data_end <= pending_data_end
+ assert sidedata_end <= pending_sidedata_end
+ self._initial_index_end = index_end
+ self._pending_index_end = pending_index_end
+ self._initial_data_end = data_end
+ self._pending_data_end = pending_data_end
+ self._initial_sidedata_end = sidedata_end
+ self._pending_sidedata_end = pending_sidedata_end
+ if use_pending:
+ self._index_end = self._pending_index_end
+ self._data_end = self._pending_data_end
+ self._sidedata_end = self._pending_sidedata_end
+ else:
+ self._index_end = self._initial_index_end
+ self._data_end = self._initial_data_end
+ self._sidedata_end = self._initial_sidedata_end
+ self.default_compression_header = default_compression_header
+
+ def index_filepath(self):
+ """file path to the current index file associated to this docket"""
+ # very simplistic version at first
+ if self._index_uuid is None:
+ self._index_uuid = make_uid()
+ return b"%s-%s.idx" % (self._radix, self._index_uuid)
+
+ def new_index_file(self):
+ """switch index file to a new UID
+
+ The previous index UID is moved to the "older" list."""
+ old = (self._index_uuid, self._index_end)
+ self._older_index_uuids.insert(0, old)
+ self._index_uuid = make_uid()
+ return self.index_filepath()
+
+ def old_index_filepaths(self, include_empty=True):
+ """yield file path to older index files associated to this docket"""
+ # very simplistic version at first
+ for uuid, size in self._older_index_uuids:
+ if include_empty or size > 0:
+ yield b"%s-%s.idx" % (self._radix, uuid)
+
+ def data_filepath(self):
+ """file path to the current data file associated to this docket"""
+ # very simplistic version at first
+ if self._data_uuid is None:
+ self._data_uuid = make_uid()
+ return b"%s-%s.dat" % (self._radix, self._data_uuid)
+
+ def new_data_file(self):
+ """switch data file to a new UID
+
+ The previous data UID is moved to the "older" list."""
+ old = (self._data_uuid, self._data_end)
+ self._older_data_uuids.insert(0, old)
+ self._data_uuid = make_uid()
+ return self.data_filepath()
+
+ def old_data_filepaths(self, include_empty=True):
+ """yield file path to older data files associated to this docket"""
+ # very simplistic version at first
+ for uuid, size in self._older_data_uuids:
+ if include_empty or size > 0:
+ yield b"%s-%s.dat" % (self._radix, uuid)
+
+ def sidedata_filepath(self):
+ """file path to the current sidedata file associated to this docket"""
+ # very simplistic version at first
+ if self._sidedata_uuid is None:
+ self._sidedata_uuid = make_uid()
+ return b"%s-%s.sda" % (self._radix, self._sidedata_uuid)
+
+ def new_sidedata_file(self):
+ """switch sidedata file to a new UID
+
+ The previous sidedata UID is moved to the "older" list."""
+ old = (self._sidedata_uuid, self._sidedata_end)
+ self._older_sidedata_uuids.insert(0, old)
+ self._sidedata_uuid = make_uid()
+ return self.sidedata_filepath()
+
+ def old_sidedata_filepaths(self, include_empty=True):
+ """yield file path to older sidedata files associated to this docket"""
+ # very simplistic version at first
+ for uuid, size in self._older_sidedata_uuids:
+ if include_empty or size > 0:
+ yield b"%s-%s.sda" % (self._radix, uuid)
+
+ @property
+ def index_end(self):
+ return self._index_end
+
+ @index_end.setter
+ def index_end(self, new_size):
+ if new_size != self._index_end:
+ self._index_end = new_size
+ self._dirty = True
+
+ @property
+ def data_end(self):
+ return self._data_end
+
+ @data_end.setter
+ def data_end(self, new_size):
+ if new_size != self._data_end:
+ self._data_end = new_size
+ self._dirty = True
+
+ @property
+ def sidedata_end(self):
+ return self._sidedata_end
+
+ @sidedata_end.setter
+ def sidedata_end(self, new_size):
+ if new_size != self._sidedata_end:
+ self._sidedata_end = new_size
+ self._dirty = True
+
+ def write(self, transaction, pending=False, stripping=False):
+ """write the modification of disk if any
+
+ This make the new content visible to all process"""
+ if not self._dirty:
+ return False
+ else:
+ if self._read_only:
+ msg = b'writing read-only docket: %s'
+ msg %= self._path
+ raise error.ProgrammingError(msg)
+ if not stripping:
+ # XXX we could, leverage the docket while stripping. However it
+ # is not powerfull enough at the time of this comment
+ transaction.addbackup(self._path, location=b'store')
+ with self._opener(self._path, mode=b'w', atomictemp=True) as f:
+ f.write(self._serialize(pending=pending))
+ # if pending we still need to the write final data eventually
+ self._dirty = pending
+ return True
+
+ def _serialize(self, pending=False):
+ if pending:
+ official_index_end = self._initial_index_end
+ official_data_end = self._initial_data_end
+ official_sidedata_end = self._initial_sidedata_end
+ else:
+ official_index_end = self._index_end
+ official_data_end = self._data_end
+ official_sidedata_end = self._sidedata_end
+
+ # this assert should be True as long as we have a single index filename
+ assert official_data_end <= self._data_end
+ assert official_sidedata_end <= self._sidedata_end
+ data = (
+ self._version_header,
+ len(self._index_uuid),
+ len(self._older_index_uuids),
+ len(self._data_uuid),
+ len(self._older_data_uuids),
+ len(self._sidedata_uuid),
+ len(self._older_sidedata_uuids),
+ official_index_end,
+ self._index_end,
+ official_data_end,
+ self._data_end,
+ official_sidedata_end,
+ self._sidedata_end,
+ self.default_compression_header,
+ )
+ s = []
+ s.append(S_HEADER.pack(*data))
+
+ s.append(self._index_uuid)
+ for u, size in self._older_index_uuids:
+ s.append(S_OLD_UID.pack(len(u), size))
+ for u, size in self._older_index_uuids:
+ s.append(u)
+
+ s.append(self._data_uuid)
+ for u, size in self._older_data_uuids:
+ s.append(S_OLD_UID.pack(len(u), size))
+ for u, size in self._older_data_uuids:
+ s.append(u)
+
+ s.append(self._sidedata_uuid)
+ for u, size in self._older_sidedata_uuids:
+ s.append(S_OLD_UID.pack(len(u), size))
+ for u, size in self._older_sidedata_uuids:
+ s.append(u)
+ return b''.join(s)
+
+
+def default_docket(revlog, version_header):
+ """given a revlog version a new docket object for the given revlog"""
+ rl_version = version_header & 0xFFFF
+ if rl_version not in (constants.REVLOGV2, constants.CHANGELOGV2):
+ return None
+ comp = util.compengines[revlog._compengine].revlogheader()
+ docket = RevlogDocket(
+ revlog,
+ version_header=version_header,
+ default_compression_header=comp,
+ )
+ docket._dirty = True
+ return docket
+
+
+def _parse_old_uids(get_data, count):
+ all_sizes = []
+ all_uids = []
+ for i in range(0, count):
+ raw = get_data(S_OLD_UID.size)
+ all_sizes.append(S_OLD_UID.unpack(raw))
+
+ for uid_size, file_size in all_sizes:
+ uid = get_data(uid_size)
+ all_uids.append((uid, file_size))
+ return all_uids
+
+
+def parse_docket(revlog, data, use_pending=False):
+ """given some docket data return a docket object for the given revlog"""
+ header = S_HEADER.unpack(data[: S_HEADER.size])
+
+ # this is a mutable closure capture used in `get_data`
+ offset = [S_HEADER.size]
+
+ def get_data(size):
+ """utility closure to access the `size` next bytes"""
+ if offset[0] + size > len(data):
+ # XXX better class
+ msg = b"docket is too short, expected %d got %d"
+ msg %= (offset[0] + size, len(data))
+ raise error.Abort(msg)
+ raw = data[offset[0] : offset[0] + size]
+ offset[0] += size
+ return raw
+
+ iheader = iter(header)
+
+ version_header = next(iheader)
+
+ index_uuid_size = next(iheader)
+ index_uuid = get_data(index_uuid_size)
+
+ older_index_uuid_count = next(iheader)
+ older_index_uuids = _parse_old_uids(get_data, older_index_uuid_count)
+
+ data_uuid_size = next(iheader)
+ data_uuid = get_data(data_uuid_size)
+
+ older_data_uuid_count = next(iheader)
+ older_data_uuids = _parse_old_uids(get_data, older_data_uuid_count)
+
+ sidedata_uuid_size = next(iheader)
+ sidedata_uuid = get_data(sidedata_uuid_size)
+
+ older_sidedata_uuid_count = next(iheader)
+ older_sidedata_uuids = _parse_old_uids(get_data, older_sidedata_uuid_count)
+
+ index_size = next(iheader)
+
+ pending_index_size = next(iheader)
+
+ data_size = next(iheader)
+
+ pending_data_size = next(iheader)
+
+ sidedata_size = next(iheader)
+
+ pending_sidedata_size = next(iheader)
+
+ default_compression_header = next(iheader)
+
+ docket = RevlogDocket(
+ revlog,
+ use_pending=use_pending,
+ version_header=version_header,
+ index_uuid=index_uuid,
+ older_index_uuids=older_index_uuids,
+ data_uuid=data_uuid,
+ older_data_uuids=older_data_uuids,
+ sidedata_uuid=sidedata_uuid,
+ older_sidedata_uuids=older_sidedata_uuids,
+ index_end=index_size,
+ pending_index_end=pending_index_size,
+ data_end=data_size,
+ pending_data_end=pending_data_size,
+ sidedata_end=sidedata_size,
+ pending_sidedata_end=pending_sidedata_size,
+ default_compression_header=default_compression_header,
+ )
+ return docket
diff --git a/mercurial/revlogutils/flagutil.py b/mercurial/revlogutils/flagutil.py
--- a/mercurial/revlogutils/flagutil.py
+++ b/mercurial/revlogutils/flagutil.py
@@ -18,7 +18,6 @@ from .constants import (
REVIDX_HASCOPIESINFO,
REVIDX_ISCENSORED,
REVIDX_RAWTEXT_CHANGING_FLAGS,
- REVIDX_SIDEDATA,
)
from .. import error, util
@@ -28,7 +27,6 @@ from .. import error, util
REVIDX_ISCENSORED
REVIDX_ELLIPSIS
REVIDX_EXTSTORED
-REVIDX_SIDEDATA
REVIDX_HASCOPIESINFO,
REVIDX_DEFAULT_FLAGS
REVIDX_FLAGS_ORDER
diff --git a/mercurial/revlogutils/nodemap.py b/mercurial/revlogutils/nodemap.py
--- a/mercurial/revlogutils/nodemap.py
+++ b/mercurial/revlogutils/nodemap.py
@@ -9,7 +9,6 @@
from __future__ import absolute_import
import errno
-import os
import re
import struct
@@ -19,6 +18,7 @@ from .. import (
error,
util,
)
+from . import docket as docket_mod
class NodeMap(dict):
@@ -28,9 +28,9 @@ class NodeMap(dict):
def persisted_data(revlog):
"""read the nodemap for a revlog from disk"""
- if revlog.nodemap_file is None:
+ if revlog._nodemap_file is None:
return None
- pdata = revlog.opener.tryread(revlog.nodemap_file)
+ pdata = revlog.opener.tryread(revlog._nodemap_file)
if not pdata:
return None
offset = 0
@@ -77,11 +77,11 @@ def setup_persistent_nodemap(tr, revlog)
"""
if revlog._inline:
return # inlined revlog are too small for this to be relevant
- if revlog.nodemap_file is None:
+ if revlog._nodemap_file is None:
return # we do not use persistent_nodemap on this revlog
# we need to happen after the changelog finalization, in that use "cl-"
- callback_id = b"nm-revlog-persistent-nodemap-%s" % revlog.nodemap_file
+ callback_id = b"nm-revlog-persistent-nodemap-%s" % revlog._nodemap_file
if tr.hasfinalize(callback_id):
return # no need to register again
tr.addpending(
@@ -123,7 +123,7 @@ def update_persistent_nodemap(revlog):
"""
if revlog._inline:
return # inlined revlog are too small for this to be relevant
- if revlog.nodemap_file is None:
+ if revlog._nodemap_file is None:
return # we do not use persistent_nodemap on this revlog
notr = _NoTransaction()
@@ -134,10 +134,10 @@ def update_persistent_nodemap(revlog):
def delete_nodemap(tr, repo, revlog):
"""Delete nodemap data on disk for a given revlog"""
- if revlog.nodemap_file is None:
+ if revlog._nodemap_file is None:
msg = "calling persist nodemap on a revlog without the feature enabled"
raise error.ProgrammingError(msg)
- repo.svfs.unlink(revlog.nodemap_file)
+ repo.svfs.unlink(revlog._nodemap_file)
def persist_nodemap(tr, revlog, pending=False, force=False):
@@ -146,11 +146,9 @@ def persist_nodemap(tr, revlog, pending=
raise error.ProgrammingError(
"cannot persist nodemap of a filtered changelog"
)
- if revlog.nodemap_file is None:
+ if revlog._nodemap_file is None:
if force:
- revlog.nodemap_file = get_nodemap_file(
- revlog.opener, revlog.indexfile
- )
+ revlog._nodemap_file = get_nodemap_file(revlog)
else:
msg = "calling persist nodemap on a revlog without the feature enabled"
raise error.ProgrammingError(msg)
@@ -227,7 +225,7 @@ def persist_nodemap(tr, revlog, pending=
target_docket.tip_node = revlog.node(target_docket.tip_rev)
# EXP-TODO: if this is a cache, this should use a cache vfs, not a
# store vfs
- file_path = revlog.nodemap_file
+ file_path = revlog._nodemap_file
if pending:
file_path += b'.a'
tr.registertmp(file_path)
@@ -250,7 +248,7 @@ def persist_nodemap(tr, revlog, pending=
for oldfile in olds:
realvfs.tryunlink(oldfile)
- callback_id = b"revlog-cleanup-nodemap-%s" % revlog.nodemap_file
+ callback_id = b"revlog-cleanup-nodemap-%s" % revlog._nodemap_file
tr.addpostclose(callback_id, cleanup)
@@ -280,15 +278,6 @@ ONDISK_VERSION = 1
S_VERSION = struct.Struct(">B")
S_HEADER = struct.Struct(">BQQQQ")
-ID_SIZE = 8
-
-
-def _make_uid():
- """return a new unique identifier.
-
- The identifier is random and composed of ascii characters."""
- return hex(os.urandom(ID_SIZE))
-
class NodeMapDocket(object):
"""metadata associated with persistent nodemap data
@@ -298,7 +287,7 @@ class NodeMapDocket(object):
def __init__(self, uid=None):
if uid is None:
- uid = _make_uid()
+ uid = docket_mod.make_uid()
# a unique identifier for the data file:
# - When new data are appended, it is preserved.
# - When a new data file is created, a new identifier is generated.
@@ -365,15 +354,12 @@ class NodeMapDocket(object):
def _rawdata_filepath(revlog, docket):
"""The (vfs relative) nodemap's rawdata file for a given uid"""
- if revlog.nodemap_file.endswith(b'.n.a'):
- prefix = revlog.nodemap_file[:-4]
- else:
- prefix = revlog.nodemap_file[:-2]
+ prefix = revlog.radix
return b"%s-%s.nd" % (prefix, docket.uid)
def _other_rawdata_filepath(revlog, docket):
- prefix = revlog.nodemap_file[:-2]
+ prefix = revlog.radix
pattern = re.compile(br"(^|/)%s-[0-9a-f]+\.nd$" % prefix)
new_file_path = _rawdata_filepath(revlog, docket)
new_file_name = revlog.opener.basename(new_file_path)
@@ -653,12 +639,9 @@ def _find_node(block, node):
return entry
-def get_nodemap_file(opener, indexfile):
- if indexfile.endswith(b'.a'):
- pending_path = indexfile[:-4] + b".n.a"
- if opener.exists(pending_path):
+def get_nodemap_file(revlog):
+ if revlog._trypending:
+ pending_path = revlog.radix + b".n.a"
+ if revlog.opener.exists(pending_path):
return pending_path
- else:
- return indexfile[:-4] + b".n"
- else:
- return indexfile[:-2] + b".n"
+ return revlog.radix + b".n"
diff --git a/mercurial/revlogutils/randomaccessfile.py b/mercurial/revlogutils/randomaccessfile.py
new file mode 100644
--- /dev/null
+++ b/mercurial/revlogutils/randomaccessfile.py
@@ -0,0 +1,159 @@
+# Copyright Mercurial Contributors
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import contextlib
+
+from ..i18n import _
+from .. import (
+ error,
+ util,
+)
+
+
+_MAX_CACHED_CHUNK_SIZE = 1048576 # 1 MiB
+
+PARTIAL_READ_MSG = _(
+ b'partial read of revlog %s; expected %d bytes from offset %d, got %d'
+)
+
+
+def _is_power_of_two(n):
+ return (n & (n - 1) == 0) and n != 0
+
+
+class randomaccessfile(object):
+ """Accessing arbitrary chuncks of data within a file, with some caching"""
+
+ def __init__(
+ self,
+ opener,
+ filename,
+ default_cached_chunk_size,
+ initial_cache=None,
+ ):
+ # Required by bitwise manipulation below
+ assert _is_power_of_two(default_cached_chunk_size)
+
+ self.opener = opener
+ self.filename = filename
+ self.default_cached_chunk_size = default_cached_chunk_size
+ self.writing_handle = None # This is set from revlog.py
+ self.reading_handle = None
+ self._cached_chunk = b''
+ self._cached_chunk_position = 0 # Offset from the start of the file
+ if initial_cache:
+ self._cached_chunk_position, self._cached_chunk = initial_cache
+
+ def clear_cache(self):
+ self._cached_chunk = b''
+ self._cached_chunk_position = 0
+
+ def _open(self, mode=b'r'):
+ """Return a file object"""
+ return self.opener(self.filename, mode=mode)
+
+ @contextlib.contextmanager
+ def _open_read(self, existing_file_obj=None):
+ """File object suitable for reading data"""
+ # Use explicit file handle, if given.
+ if existing_file_obj is not None:
+ yield existing_file_obj
+
+ # Use a file handle being actively used for writes, if available.
+ # There is some danger to doing this because reads will seek the
+ # file. However, revlog._writeentry performs a SEEK_END before all
+ # writes, so we should be safe.
+ elif self.writing_handle:
+ yield self.writing_handle
+
+ elif self.reading_handle:
+ yield self.reading_handle
+
+ # Otherwise open a new file handle.
+ else:
+ with self._open() as fp:
+ yield fp
+
+ @contextlib.contextmanager
+ def reading(self):
+ """Context manager that keeps the file open for reading"""
+ if (
+ self.reading_handle is None
+ and self.writing_handle is None
+ and self.filename is not None
+ ):
+ with self._open() as fp:
+ self.reading_handle = fp
+ try:
+ yield
+ finally:
+ self.reading_handle = None
+ else:
+ yield
+
+ def read_chunk(self, offset, length, existing_file_obj=None):
+ """Read a chunk of bytes from the file.
+
+ Accepts an absolute offset, length to read, and an optional existing
+ file handle to read from.
+
+ If an existing file handle is passed, it will be seeked and the
+ original seek position will NOT be restored.
+
+ Returns a str or buffer of raw byte data.
+
+ Raises if the requested number of bytes could not be read.
+ """
+ end = offset + length
+ cache_start = self._cached_chunk_position
+ cache_end = cache_start + len(self._cached_chunk)
+ # Is the requested chunk within the cache?
+ if cache_start <= offset and end <= cache_end:
+ if cache_start == offset and end == cache_end:
+ return self._cached_chunk # avoid a copy
+ relative_start = offset - cache_start
+ return util.buffer(self._cached_chunk, relative_start, length)
+
+ return self._read_and_update_cache(offset, length, existing_file_obj)
+
+ def _read_and_update_cache(self, offset, length, existing_file_obj=None):
+ # Cache data both forward and backward around the requested
+ # data, in a fixed size window. This helps speed up operations
+ # involving reading the revlog backwards.
+ real_offset = offset & ~(self.default_cached_chunk_size - 1)
+ real_length = (
+ (offset + length + self.default_cached_chunk_size)
+ & ~(self.default_cached_chunk_size - 1)
+ ) - real_offset
+ with self._open_read(existing_file_obj) as file_obj:
+ file_obj.seek(real_offset)
+ data = file_obj.read(real_length)
+
+ self._add_cached_chunk(real_offset, data)
+
+ relative_offset = offset - real_offset
+ got = len(data) - relative_offset
+ if got < length:
+ message = PARTIAL_READ_MSG % (self.filename, length, offset, got)
+ raise error.RevlogError(message)
+
+ if offset != real_offset or real_length != length:
+ return util.buffer(data, relative_offset, length)
+ return data
+
+ def _add_cached_chunk(self, offset, data):
+ """Add to or replace the cached data chunk.
+
+ Accepts an absolute offset and the data that is at that location.
+ """
+ if (
+ self._cached_chunk_position + len(self._cached_chunk) == offset
+ and len(self._cached_chunk) + len(data) < _MAX_CACHED_CHUNK_SIZE
+ ):
+ # add to existing cache
+ self._cached_chunk += data
+ else:
+ self._cached_chunk = data
+ self._cached_chunk_position = offset
diff --git a/mercurial/revlogutils/revlogv0.py b/mercurial/revlogutils/revlogv0.py
new file mode 100644
--- /dev/null
+++ b/mercurial/revlogutils/revlogv0.py
@@ -0,0 +1,147 @@
+# revlogv0 - code related to revlog format "V0"
+#
+# Copyright 2005-2007 Olivia Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import
+
+
+from ..node import sha1nodeconstants
+from .constants import (
+ INDEX_ENTRY_V0,
+)
+from ..i18n import _
+
+from .. import (
+ error,
+ node,
+ pycompat,
+ revlogutils,
+ util,
+)
+
+from . import (
+ nodemap as nodemaputil,
+)
+
+
+def getoffset(q):
+ return int(q >> 16)
+
+
+def gettype(q):
+ return int(q & 0xFFFF)
+
+
+class revlogoldindex(list):
+ rust_ext_compat = 0
+ entry_size = INDEX_ENTRY_V0.size
+ null_item = revlogutils.entry(
+ data_offset=0,
+ data_compressed_length=0,
+ data_delta_base=node.nullrev,
+ link_rev=node.nullrev,
+ parent_rev_1=node.nullrev,
+ parent_rev_2=node.nullrev,
+ node_id=sha1nodeconstants.nullid,
+ )
+
+ @property
+ def nodemap(self):
+ msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
+ util.nouideprecwarn(msg, b'5.3', stacklevel=2)
+ return self._nodemap
+
+ @util.propertycache
+ def _nodemap(self):
+ nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: node.nullrev})
+ for r in range(0, len(self)):
+ n = self[r][7]
+ nodemap[n] = r
+ return nodemap
+
+ def has_node(self, node):
+ """return True if the node exist in the index"""
+ return node in self._nodemap
+
+ def rev(self, node):
+ """return a revision for a node
+
+ If the node is unknown, raise a RevlogError"""
+ return self._nodemap[node]
+
+ def get_rev(self, node):
+ """return a revision for a node
+
+ If the node is unknown, return None"""
+ return self._nodemap.get(node)
+
+ def append(self, tup):
+ self._nodemap[tup[7]] = len(self)
+ super(revlogoldindex, self).append(tup)
+
+ def __delitem__(self, i):
+ if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
+ raise ValueError(b"deleting slices only supports a:-1 with step 1")
+ for r in pycompat.xrange(i.start, len(self)):
+ del self._nodemap[self[r][7]]
+ super(revlogoldindex, self).__delitem__(i)
+
+ def clearcaches(self):
+ self.__dict__.pop('_nodemap', None)
+
+ def __getitem__(self, i):
+ if i == -1:
+ return self.null_item
+ return list.__getitem__(self, i)
+
+ def pack_header(self, header):
+ """pack header information in binary"""
+ return b''
+
+ def entry_binary(self, rev):
+ """return the raw binary string representing a revision"""
+ entry = self[rev]
+ if gettype(entry[0]):
+ raise error.RevlogError(
+ _(b'index entry flags need revlog version 1')
+ )
+ e2 = (
+ getoffset(entry[0]),
+ entry[1],
+ entry[3],
+ entry[4],
+ self[entry[5]][7],
+ self[entry[6]][7],
+ entry[7],
+ )
+ return INDEX_ENTRY_V0.pack(*e2)
+
+
+def parse_index_v0(data, inline):
+ s = INDEX_ENTRY_V0.size
+ index = []
+ nodemap = nodemaputil.NodeMap({node.nullid: node.nullrev})
+ n = off = 0
+ l = len(data)
+ while off + s <= l:
+ cur = data[off : off + s]
+ off += s
+ e = INDEX_ENTRY_V0.unpack(cur)
+ # transform to revlogv1 format
+ e2 = revlogutils.entry(
+ data_offset=e[0],
+ data_compressed_length=e[1],
+ data_delta_base=e[2],
+ link_rev=e[3],
+ parent_rev_1=nodemap.get(e[4], node.nullrev),
+ parent_rev_2=nodemap.get(e[5], node.nullrev),
+ node_id=e[6],
+ )
+ index.append(e2)
+ nodemap[e[6]] = n
+ n += 1
+
+ index = revlogoldindex(index)
+ return index, None
diff --git a/mercurial/revlogutils/rewrite.py b/mercurial/revlogutils/rewrite.py
new file mode 100644
--- /dev/null
+++ b/mercurial/revlogutils/rewrite.py
@@ -0,0 +1,474 @@
+# censor code related to censoring revision
+# coding: utf8
+#
+# Copyright 2021 Pierre-Yves David
+# Copyright 2015 Google, Inc
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import contextlib
+import os
+
+from ..node import (
+ nullrev,
+)
+from .constants import (
+ COMP_MODE_PLAIN,
+ ENTRY_DATA_COMPRESSED_LENGTH,
+ ENTRY_DATA_COMPRESSION_MODE,
+ ENTRY_DATA_OFFSET,
+ ENTRY_DATA_UNCOMPRESSED_LENGTH,
+ ENTRY_DELTA_BASE,
+ ENTRY_LINK_REV,
+ ENTRY_NODE_ID,
+ ENTRY_PARENT_1,
+ ENTRY_PARENT_2,
+ ENTRY_SIDEDATA_COMPRESSED_LENGTH,
+ ENTRY_SIDEDATA_COMPRESSION_MODE,
+ ENTRY_SIDEDATA_OFFSET,
+ REVLOGV0,
+ REVLOGV1,
+)
+from ..i18n import _
+
+from .. import (
+ error,
+ pycompat,
+ revlogutils,
+ util,
+)
+from ..utils import (
+ storageutil,
+)
+from . import (
+ constants,
+ deltas,
+)
+
+
+def v1_censor(rl, tr, censornode, tombstone=b''):
+ """censors a revision in a "version 1" revlog"""
+ assert rl._format_version == constants.REVLOGV1, rl._format_version
+
+ # avoid cycle
+ from .. import revlog
+
+ censorrev = rl.rev(censornode)
+ tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
+
+ # Rewriting the revlog in place is hard. Our strategy for censoring is
+ # to create a new revlog, copy all revisions to it, then replace the
+ # revlogs on transaction close.
+ #
+ # This is a bit dangerous. We could easily have a mismatch of state.
+ newrl = revlog.revlog(
+ rl.opener,
+ target=rl.target,
+ radix=rl.radix,
+ postfix=b'tmpcensored',
+ censorable=True,
+ )
+ newrl._format_version = rl._format_version
+ newrl._format_flags = rl._format_flags
+ newrl._generaldelta = rl._generaldelta
+ newrl._parse_index = rl._parse_index
+
+ for rev in rl.revs():
+ node = rl.node(rev)
+ p1, p2 = rl.parents(node)
+
+ if rev == censorrev:
+ newrl.addrawrevision(
+ tombstone,
+ tr,
+ rl.linkrev(censorrev),
+ p1,
+ p2,
+ censornode,
+ constants.REVIDX_ISCENSORED,
+ )
+
+ if newrl.deltaparent(rev) != nullrev:
+ m = _(b'censored revision stored as delta; cannot censor')
+ h = _(
+ b'censoring of revlogs is not fully implemented;'
+ b' please report this bug'
+ )
+ raise error.Abort(m, hint=h)
+ continue
+
+ if rl.iscensored(rev):
+ if rl.deltaparent(rev) != nullrev:
+ m = _(
+ b'cannot censor due to censored '
+ b'revision having delta stored'
+ )
+ raise error.Abort(m)
+ rawtext = rl._chunk(rev)
+ else:
+ rawtext = rl.rawdata(rev)
+
+ newrl.addrawrevision(
+ rawtext, tr, rl.linkrev(rev), p1, p2, node, rl.flags(rev)
+ )
+
+ tr.addbackup(rl._indexfile, location=b'store')
+ if not rl._inline:
+ tr.addbackup(rl._datafile, location=b'store')
+
+ rl.opener.rename(newrl._indexfile, rl._indexfile)
+ if not rl._inline:
+ rl.opener.rename(newrl._datafile, rl._datafile)
+
+ rl.clearcaches()
+ rl._loadindex()
+
+
+def v2_censor(revlog, tr, censornode, tombstone=b''):
+ """censors a revision in a "version 2" revlog"""
+ assert revlog._format_version != REVLOGV0, revlog._format_version
+ assert revlog._format_version != REVLOGV1, revlog._format_version
+
+ censor_revs = {revlog.rev(censornode)}
+ _rewrite_v2(revlog, tr, censor_revs, tombstone)
+
+
+def _rewrite_v2(revlog, tr, censor_revs, tombstone=b''):
+ """rewrite a revlog to censor some of its content
+
+ General principle
+
+ We create new revlog files (index/data/sidedata) to copy the content of
+ the existing data without the censored data.
+
+ We need to recompute new delta for any revision that used the censored
+ revision as delta base. As the cumulative size of the new delta may be
+ large, we store them in a temporary file until they are stored in their
+ final destination.
+
+ All data before the censored data can be blindly copied. The rest needs
+ to be copied as we go and the associated index entry needs adjustement.
+ """
+ assert revlog._format_version != REVLOGV0, revlog._format_version
+ assert revlog._format_version != REVLOGV1, revlog._format_version
+
+ old_index = revlog.index
+ docket = revlog._docket
+
+ tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
+
+ first_excl_rev = min(censor_revs)
+
+ first_excl_entry = revlog.index[first_excl_rev]
+ index_cutoff = revlog.index.entry_size * first_excl_rev
+ data_cutoff = first_excl_entry[ENTRY_DATA_OFFSET] >> 16
+ sidedata_cutoff = revlog.sidedata_cut_off(first_excl_rev)
+
+ with pycompat.unnamedtempfile(mode=b"w+b") as tmp_storage:
+ # rev → (new_base, data_start, data_end, compression_mode)
+ rewritten_entries = _precompute_rewritten_delta(
+ revlog,
+ old_index,
+ censor_revs,
+ tmp_storage,
+ )
+
+ all_files = _setup_new_files(
+ revlog,
+ index_cutoff,
+ data_cutoff,
+ sidedata_cutoff,
+ )
+
+ # we dont need to open the old index file since its content already
+ # exist in a usable form in `old_index`.
+ with all_files() as open_files:
+ (
+ old_data_file,
+ old_sidedata_file,
+ new_index_file,
+ new_data_file,
+ new_sidedata_file,
+ ) = open_files
+
+ # writing the censored revision
+
+ # Writing all subsequent revisions
+ for rev in range(first_excl_rev, len(old_index)):
+ if rev in censor_revs:
+ _rewrite_censor(
+ revlog,
+ old_index,
+ open_files,
+ rev,
+ tombstone,
+ )
+ else:
+ _rewrite_simple(
+ revlog,
+ old_index,
+ open_files,
+ rev,
+ rewritten_entries,
+ tmp_storage,
+ )
+ docket.write(transaction=None, stripping=True)
+
+
+def _precompute_rewritten_delta(
+ revlog,
+ old_index,
+ excluded_revs,
+ tmp_storage,
+):
+ """Compute new delta for revisions whose delta is based on revision that
+ will not survive as is.
+
+ Return a mapping: {rev → (new_base, data_start, data_end, compression_mode)}
+ """
+ dc = deltas.deltacomputer(revlog)
+ rewritten_entries = {}
+ first_excl_rev = min(excluded_revs)
+ with revlog._segmentfile._open_read() as dfh:
+ for rev in range(first_excl_rev, len(old_index)):
+ if rev in excluded_revs:
+ # this revision will be preserved as is, so we don't need to
+ # consider recomputing a delta.
+ continue
+ entry = old_index[rev]
+ if entry[ENTRY_DELTA_BASE] not in excluded_revs:
+ continue
+ # This is a revision that use the censored revision as the base
+ # for its delta. We need a need new deltas
+ if entry[ENTRY_DATA_UNCOMPRESSED_LENGTH] == 0:
+ # this revision is empty, we can delta against nullrev
+ rewritten_entries[rev] = (nullrev, 0, 0, COMP_MODE_PLAIN)
+ else:
+
+ text = revlog.rawdata(rev, _df=dfh)
+ info = revlogutils.revisioninfo(
+ node=entry[ENTRY_NODE_ID],
+ p1=revlog.node(entry[ENTRY_PARENT_1]),
+ p2=revlog.node(entry[ENTRY_PARENT_2]),
+ btext=[text],
+ textlen=len(text),
+ cachedelta=None,
+ flags=entry[ENTRY_DATA_OFFSET] & 0xFFFF,
+ )
+ d = dc.finddeltainfo(
+ info, dfh, excluded_bases=excluded_revs, target_rev=rev
+ )
+ default_comp = revlog._docket.default_compression_header
+ comp_mode, d = deltas.delta_compression(default_comp, d)
+ # using `tell` is a bit lazy, but we are not here for speed
+ start = tmp_storage.tell()
+ tmp_storage.write(d.data[1])
+ end = tmp_storage.tell()
+ rewritten_entries[rev] = (d.base, start, end, comp_mode)
+ return rewritten_entries
+
+
+def _setup_new_files(
+ revlog,
+ index_cutoff,
+ data_cutoff,
+ sidedata_cutoff,
+):
+ """
+
+ return a context manager to open all the relevant files:
+ - old_data_file,
+ - old_sidedata_file,
+ - new_index_file,
+ - new_data_file,
+ - new_sidedata_file,
+
+ The old_index_file is not here because it is accessed through the
+ `old_index` object if the caller function.
+ """
+ docket = revlog._docket
+ old_index_filepath = revlog.opener.join(docket.index_filepath())
+ old_data_filepath = revlog.opener.join(docket.data_filepath())
+ old_sidedata_filepath = revlog.opener.join(docket.sidedata_filepath())
+
+ new_index_filepath = revlog.opener.join(docket.new_index_file())
+ new_data_filepath = revlog.opener.join(docket.new_data_file())
+ new_sidedata_filepath = revlog.opener.join(docket.new_sidedata_file())
+
+ util.copyfile(old_index_filepath, new_index_filepath, nb_bytes=index_cutoff)
+ util.copyfile(old_data_filepath, new_data_filepath, nb_bytes=data_cutoff)
+ util.copyfile(
+ old_sidedata_filepath,
+ new_sidedata_filepath,
+ nb_bytes=sidedata_cutoff,
+ )
+ revlog.opener.register_file(docket.index_filepath())
+ revlog.opener.register_file(docket.data_filepath())
+ revlog.opener.register_file(docket.sidedata_filepath())
+
+ docket.index_end = index_cutoff
+ docket.data_end = data_cutoff
+ docket.sidedata_end = sidedata_cutoff
+
+ # reload the revlog internal information
+ revlog.clearcaches()
+ revlog._loadindex(docket=docket)
+
+ @contextlib.contextmanager
+ def all_files_opener():
+ # hide opening in an helper function to please check-code, black
+ # and various python version at the same time
+ with open(old_data_filepath, 'rb') as old_data_file:
+ with open(old_sidedata_filepath, 'rb') as old_sidedata_file:
+ with open(new_index_filepath, 'r+b') as new_index_file:
+ with open(new_data_filepath, 'r+b') as new_data_file:
+ with open(
+ new_sidedata_filepath, 'r+b'
+ ) as new_sidedata_file:
+ new_index_file.seek(0, os.SEEK_END)
+ assert new_index_file.tell() == index_cutoff
+ new_data_file.seek(0, os.SEEK_END)
+ assert new_data_file.tell() == data_cutoff
+ new_sidedata_file.seek(0, os.SEEK_END)
+ assert new_sidedata_file.tell() == sidedata_cutoff
+ yield (
+ old_data_file,
+ old_sidedata_file,
+ new_index_file,
+ new_data_file,
+ new_sidedata_file,
+ )
+
+ return all_files_opener
+
+
+def _rewrite_simple(
+ revlog,
+ old_index,
+ all_files,
+ rev,
+ rewritten_entries,
+ tmp_storage,
+):
+ """append a normal revision to the index after the rewritten one(s)"""
+ (
+ old_data_file,
+ old_sidedata_file,
+ new_index_file,
+ new_data_file,
+ new_sidedata_file,
+ ) = all_files
+ entry = old_index[rev]
+ flags = entry[ENTRY_DATA_OFFSET] & 0xFFFF
+ old_data_offset = entry[ENTRY_DATA_OFFSET] >> 16
+
+ if rev not in rewritten_entries:
+ old_data_file.seek(old_data_offset)
+ new_data_size = entry[ENTRY_DATA_COMPRESSED_LENGTH]
+ new_data = old_data_file.read(new_data_size)
+ data_delta_base = entry[ENTRY_DELTA_BASE]
+ d_comp_mode = entry[ENTRY_DATA_COMPRESSION_MODE]
+ else:
+ (
+ data_delta_base,
+ start,
+ end,
+ d_comp_mode,
+ ) = rewritten_entries[rev]
+ new_data_size = end - start
+ tmp_storage.seek(start)
+ new_data = tmp_storage.read(new_data_size)
+
+ # It might be faster to group continuous read/write operation,
+ # however, this is censor, an operation that is not focussed
+ # around stellar performance. So I have not written this
+ # optimisation yet.
+ new_data_offset = new_data_file.tell()
+ new_data_file.write(new_data)
+
+ sidedata_size = entry[ENTRY_SIDEDATA_COMPRESSED_LENGTH]
+ new_sidedata_offset = new_sidedata_file.tell()
+ if 0 < sidedata_size:
+ old_sidedata_offset = entry[ENTRY_SIDEDATA_OFFSET]
+ old_sidedata_file.seek(old_sidedata_offset)
+ new_sidedata = old_sidedata_file.read(sidedata_size)
+ new_sidedata_file.write(new_sidedata)
+
+ data_uncompressed_length = entry[ENTRY_DATA_UNCOMPRESSED_LENGTH]
+ sd_com_mode = entry[ENTRY_SIDEDATA_COMPRESSION_MODE]
+ assert data_delta_base <= rev, (data_delta_base, rev)
+
+ new_entry = revlogutils.entry(
+ flags=flags,
+ data_offset=new_data_offset,
+ data_compressed_length=new_data_size,
+ data_uncompressed_length=data_uncompressed_length,
+ data_delta_base=data_delta_base,
+ link_rev=entry[ENTRY_LINK_REV],
+ parent_rev_1=entry[ENTRY_PARENT_1],
+ parent_rev_2=entry[ENTRY_PARENT_2],
+ node_id=entry[ENTRY_NODE_ID],
+ sidedata_offset=new_sidedata_offset,
+ sidedata_compressed_length=sidedata_size,
+ data_compression_mode=d_comp_mode,
+ sidedata_compression_mode=sd_com_mode,
+ )
+ revlog.index.append(new_entry)
+ entry_bin = revlog.index.entry_binary(rev)
+ new_index_file.write(entry_bin)
+
+ revlog._docket.index_end = new_index_file.tell()
+ revlog._docket.data_end = new_data_file.tell()
+ revlog._docket.sidedata_end = new_sidedata_file.tell()
+
+
+def _rewrite_censor(
+ revlog,
+ old_index,
+ all_files,
+ rev,
+ tombstone,
+):
+ """rewrite and append a censored revision"""
+ (
+ old_data_file,
+ old_sidedata_file,
+ new_index_file,
+ new_data_file,
+ new_sidedata_file,
+ ) = all_files
+ entry = old_index[rev]
+
+ # XXX consider trying the default compression too
+ new_data_size = len(tombstone)
+ new_data_offset = new_data_file.tell()
+ new_data_file.write(tombstone)
+
+ # we are not adding any sidedata as they might leak info about the censored version
+
+ link_rev = entry[ENTRY_LINK_REV]
+
+ p1 = entry[ENTRY_PARENT_1]
+ p2 = entry[ENTRY_PARENT_2]
+
+ new_entry = revlogutils.entry(
+ flags=constants.REVIDX_ISCENSORED,
+ data_offset=new_data_offset,
+ data_compressed_length=new_data_size,
+ data_uncompressed_length=new_data_size,
+ data_delta_base=rev,
+ link_rev=link_rev,
+ parent_rev_1=p1,
+ parent_rev_2=p2,
+ node_id=entry[ENTRY_NODE_ID],
+ sidedata_offset=0,
+ sidedata_compressed_length=0,
+ data_compression_mode=COMP_MODE_PLAIN,
+ sidedata_compression_mode=COMP_MODE_PLAIN,
+ )
+ revlog.index.append(new_entry)
+ entry_bin = revlog.index.entry_binary(rev)
+ new_index_file.write(entry_bin)
+ revlog._docket.index_end = new_index_file.tell()
+ revlog._docket.data_end = new_data_file.tell()
diff --git a/mercurial/revlogutils/sidedata.py b/mercurial/revlogutils/sidedata.py
--- a/mercurial/revlogutils/sidedata.py
+++ b/mercurial/revlogutils/sidedata.py
@@ -32,9 +32,11 @@ the concept.
from __future__ import absolute_import
+import collections
import struct
-from .. import error
+from .. import error, requirements as requirementsmod
+from ..revlogutils import constants, flagutil
from ..utils import hashutil
## sidedata type constant
@@ -91,3 +93,83 @@ def deserialize_sidedata(blob):
sidedata[key] = entrytext
dataoffset = nextdataoffset
return sidedata
+
+
+def get_sidedata_helpers(repo, remote_sd_categories, pull=False):
+ """
+ Returns a dictionary mapping revlog types to tuples of
+ `(repo, computers, removers)`:
+ * `repo` is used as an argument for computers
+ * `computers` is a list of `(category, (keys, computer, flags)` that
+ compute the missing sidedata categories that were asked:
+ * `category` is the sidedata category
+ * `keys` are the sidedata keys to be affected
+ * `flags` is a bitmask (an integer) of flags to remove when
+ removing the category.
+ * `computer` is the function `(repo, store, rev, sidedata)` that
+ returns a tuple of
+ `(new sidedata dict, (flags to add, flags to remove))`.
+ For example, it will return `({}, (0, 1 << 15))` to return no
+ sidedata, with no flags to add and one flag to remove.
+ * `removers` will remove the keys corresponding to the categories
+ that are present, but not needed.
+ If both `computers` and `removers` are empty, sidedata will simply not
+ be transformed.
+ """
+ # Computers for computing sidedata on-the-fly
+ sd_computers = collections.defaultdict(list)
+ # Computers for categories to remove from sidedata
+ sd_removers = collections.defaultdict(list)
+ to_generate = remote_sd_categories - repo._wanted_sidedata
+ to_remove = repo._wanted_sidedata - remote_sd_categories
+ if pull:
+ to_generate, to_remove = to_remove, to_generate
+
+ for revlog_kind, computers in repo._sidedata_computers.items():
+ for category, computer in computers.items():
+ if category in to_generate:
+ sd_computers[revlog_kind].append(computer)
+ if category in to_remove:
+ sd_removers[revlog_kind].append(computer)
+
+ sidedata_helpers = (repo, sd_computers, sd_removers)
+ return sidedata_helpers
+
+
+def run_sidedata_helpers(store, sidedata_helpers, sidedata, rev):
+ """Returns the sidedata for the given revision after running through
+ the given helpers.
+ - `store`: the revlog this applies to (changelog, manifest, or filelog
+ instance)
+ - `sidedata_helpers`: see `get_sidedata_helpers`
+ - `sidedata`: previous sidedata at the given rev, if any
+ - `rev`: affected rev of `store`
+ """
+ repo, sd_computers, sd_removers = sidedata_helpers
+ kind = store.revlog_kind
+ flags_to_add = 0
+ flags_to_remove = 0
+ for _keys, sd_computer, _flags in sd_computers.get(kind, []):
+ sidedata, flags = sd_computer(repo, store, rev, sidedata)
+ flags_to_add |= flags[0]
+ flags_to_remove |= flags[1]
+ for keys, _computer, flags in sd_removers.get(kind, []):
+ for key in keys:
+ sidedata.pop(key, None)
+ flags_to_remove |= flags
+ return sidedata, (flags_to_add, flags_to_remove)
+
+
+def set_sidedata_spec_for_repo(repo):
+ # prevent cycle metadata -> revlogutils.sidedata -> metadata
+ from .. import metadata
+
+ if requirementsmod.COPIESSDC_REQUIREMENT in repo.requirements:
+ repo.register_wanted_sidedata(SD_FILES)
+ repo.register_sidedata_computer(
+ constants.KIND_CHANGELOG,
+ SD_FILES,
+ (SD_FILES,),
+ metadata.copies_sidedata_computer,
+ flagutil.REVIDX_HASCOPIESINFO,
+ )
diff --git a/mercurial/revset.py b/mercurial/revset.py
--- a/mercurial/revset.py
+++ b/mercurial/revset.py
@@ -1724,7 +1724,7 @@ def named(repo, subset, x):
def _node(repo, n):
"""process a node input"""
rn = None
- if len(n) == 40:
+ if len(n) == 2 * repo.nodeconstants.nodelen:
try:
rn = repo.changelog.rev(bin(n))
except error.WdirUnsupported:
@@ -1842,6 +1842,9 @@ def origin(repo, subset, x):
def outgoing(repo, subset, x):
"""Changesets not found in the specified destination repository, or the
default push location.
+
+ If the location resolve to multiple repositories, the union of all
+ outgoing changeset will be used.
"""
# Avoid cycles.
from . import (
@@ -1869,9 +1872,10 @@ def outgoing(repo, subset, x):
revs = [repo.lookup(rev) for rev in revs]
other = hg.peer(repo, {}, dest)
try:
- repo.ui.pushbuffer()
- outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
- repo.ui.popbuffer()
+ with repo.ui.silent():
+ outgoing = discovery.findcommonoutgoing(
+ repo, other, onlyheads=revs
+ )
finally:
other.close()
missing.update(outgoing.missing)
diff --git a/mercurial/rewriteutil.py b/mercurial/rewriteutil.py
--- a/mercurial/rewriteutil.py
+++ b/mercurial/rewriteutil.py
@@ -17,16 +17,38 @@ from .node import (
from . import (
error,
+ node,
obsolete,
obsutil,
revset,
scmutil,
+ util,
)
NODE_RE = re.compile(br'\b[0-9a-f]{6,64}\b')
+def _formatrevs(repo, revs, maxrevs=4):
+ """returns a string summarizing revisions in a decent size
+
+ If there are few enough revisions, we list them all. Otherwise we display a
+ summary of the form:
+
+ 1ea73414a91b and 5 others
+ """
+ tonode = repo.changelog.node
+ numrevs = len(revs)
+ if numrevs < maxrevs:
+ shorts = [node.short(tonode(r)) for r in revs]
+ summary = b', '.join(shorts)
+ else:
+ first = revs.first()
+ summary = _(b'%s and %d others')
+ summary %= (node.short(tonode(first)), numrevs - 1)
+ return summary
+
+
def precheck(repo, revs, action=b'rewrite'):
"""check if revs can be rewritten
action is used to control the error message.
@@ -34,22 +56,75 @@ def precheck(repo, revs, action=b'rewrit
Make sure this function is called after taking the lock.
"""
if nullrev in revs:
- msg = _(b"cannot %s null changeset") % action
+ msg = _(b"cannot %s the null revision") % action
hint = _(b"no changeset checked out")
raise error.InputError(msg, hint=hint)
+ if any(util.safehasattr(r, 'rev') for r in revs):
+ repo.ui.develwarn(b"rewriteutil.precheck called with ctx not revs")
+ revs = (r.rev() for r in revs)
+
if len(repo[None].parents()) > 1:
- raise error.StateError(_(b"cannot %s while merging") % action)
+ raise error.StateError(
+ _(b"cannot %s changesets while merging") % action
+ )
publicrevs = repo.revs(b'%ld and public()', revs)
if publicrevs:
- msg = _(b"cannot %s public changesets") % action
+ summary = _formatrevs(repo, publicrevs)
+ msg = _(b"cannot %s public changesets: %s") % (action, summary)
hint = _(b"see 'hg help phases' for details")
raise error.InputError(msg, hint=hint)
newunstable = disallowednewunstable(repo, revs)
if newunstable:
- raise error.InputError(_(b"cannot %s changeset with children") % action)
+ hint = _(b"see 'hg help evolution.instability'")
+ raise error.InputError(
+ _(b"cannot %s changeset, as that will orphan %d descendants")
+ % (action, len(newunstable)),
+ hint=hint,
+ )
+
+ if not obsolete.isenabled(repo, obsolete.allowdivergenceopt):
+ new_divergence = _find_new_divergence(repo, revs)
+ if new_divergence:
+ local_ctx, other_ctx, base_ctx = new_divergence
+ msg = _(
+ b'cannot %s %s, as that creates content-divergence with %s'
+ ) % (
+ action,
+ local_ctx,
+ other_ctx,
+ )
+ if local_ctx.rev() != base_ctx.rev():
+ msg += _(b', from %s') % base_ctx
+ if repo.ui.verbose:
+ if local_ctx.rev() != base_ctx.rev():
+ msg += _(
+ b'\n changeset %s is a successor of ' b'changeset %s'
+ ) % (local_ctx, base_ctx)
+ msg += _(
+ b'\n changeset %s already has a successor in '
+ b'changeset %s\n'
+ b' rewriting changeset %s would create '
+ b'"content-divergence"\n'
+ b' set experimental.evolution.allowdivergence=True to '
+ b'skip this check'
+ ) % (base_ctx, other_ctx, local_ctx)
+ raise error.InputError(
+ msg,
+ hint=_(
+ b"see 'hg help evolution.instability' for details on content-divergence"
+ ),
+ )
+ else:
+ raise error.InputError(
+ msg,
+ hint=_(
+ b"add --verbose for details or see "
+ b"'hg help evolution.instability'"
+ ),
+ )
def disallowednewunstable(repo, revs):
@@ -65,6 +140,40 @@ def disallowednewunstable(repo, revs):
return repo.revs(b"(%ld::) - %ld", revs, revs)
+def _find_new_divergence(repo, revs):
+ obsrevs = repo.revs(b'%ld and obsolete()', revs)
+ for r in obsrevs:
+ div = find_new_divergence_from(repo, repo[r])
+ if div:
+ return (repo[r], repo[div[0]], repo.unfiltered()[div[1]])
+ return None
+
+
+def find_new_divergence_from(repo, ctx):
+ """return divergent revision if rewriting an obsolete cset (ctx) will
+ create divergence
+
+ Returns (, ) or None
+ """
+ if not ctx.obsolete():
+ return None
+ # We need to check two cases that can cause divergence:
+ # case 1: the rev being rewritten has a non-obsolete successor (easily
+ # detected by successorssets)
+ sset = obsutil.successorssets(repo, ctx.node())
+ if sset:
+ return (sset[0][0], ctx.node())
+ else:
+ # case 2: one of the precursors of the rev being revived has a
+ # non-obsolete successor (we need divergentsets for this)
+ divsets = obsutil.divergentsets(repo, ctx)
+ if divsets:
+ nsuccset = divsets[0][b'divergentnodes']
+ prec = divsets[0][b'commonpredecessor']
+ return (nsuccset[0], prec)
+ return None
+
+
def skip_empty_successor(ui, command):
empty_successor = ui.config(b'rewrite', b'empty-successor')
if empty_successor == b'skip':
diff --git a/mercurial/scmutil.py b/mercurial/scmutil.py
--- a/mercurial/scmutil.py
+++ b/mercurial/scmutil.py
@@ -19,10 +19,8 @@ from .i18n import _
from .node import (
bin,
hex,
- nullid,
nullrev,
short,
- wdirid,
wdirrev,
)
from .pycompat import getattr
@@ -200,34 +198,13 @@ def callcatch(ui, func):
ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
except error.CensoredNodeError as inst:
ui.error(_(b"abort: file censored %s\n") % inst)
- except error.StorageError as inst:
- ui.error(_(b"abort: %s\n") % inst)
- if inst.hint:
- ui.error(_(b"(%s)\n") % inst.hint)
- detailed_exit_code = 50
- except error.InterventionRequired as inst:
- ui.error(b"%s\n" % inst)
- if inst.hint:
- ui.error(_(b"(%s)\n") % inst.hint)
- detailed_exit_code = 240
- coarse_exit_code = 1
except error.WdirUnsupported:
ui.error(_(b"abort: working directory revision cannot be specified\n"))
- except error.Abort as inst:
- if isinstance(inst, (error.InputError, error.ParseError)):
- detailed_exit_code = 10
- elif isinstance(inst, error.StateError):
- detailed_exit_code = 20
- elif isinstance(inst, error.ConfigError):
- detailed_exit_code = 30
- elif isinstance(inst, error.HookAbort):
- detailed_exit_code = 40
- elif isinstance(inst, error.RemoteError):
- detailed_exit_code = 100
- elif isinstance(inst, error.SecurityError):
- detailed_exit_code = 150
- elif isinstance(inst, error.CanceledError):
- detailed_exit_code = 250
+ except error.Error as inst:
+ if inst.detailed_exit_code is not None:
+ detailed_exit_code = inst.detailed_exit_code
+ if inst.coarse_exit_code is not None:
+ coarse_exit_code = inst.coarse_exit_code
ui.error(inst.format())
except error.WorkerError as inst:
# Don't print a message -- the worker already should have
@@ -450,7 +427,7 @@ def binnode(ctx):
"""Return binary node id for a given basectx"""
node = ctx.node()
if node is None:
- return wdirid
+ return ctx.repo().nodeconstants.wdirid
return node
@@ -645,7 +622,7 @@ def revsymbol(repo, symbol):
except (ValueError, OverflowError, IndexError):
pass
- if len(symbol) == 40:
+ if len(symbol) == 2 * repo.nodeconstants.nodelen:
try:
node = bin(symbol)
rev = repo.changelog.rev(node)
@@ -1108,7 +1085,7 @@ def cleanupnodes(
if roots:
newnode = roots[0].node()
else:
- newnode = nullid
+ newnode = repo.nullid
else:
newnode = newnodes[0]
moves[oldnode] = newnode
@@ -1479,7 +1456,7 @@ def dirstatecopy(ui, repo, wctx, src, ds
origsrc = repo.dirstate.copied(src) or src
if dst == origsrc: # copying back a copy?
if repo.dirstate[dst] not in b'mn' and not dryrun:
- repo.dirstate.normallookup(dst)
+ repo.dirstate.set_tracked(dst)
else:
if repo.dirstate[origsrc] == b'a' and origsrc == src:
if not ui.quiet:
@@ -1506,27 +1483,17 @@ def movedirstate(repo, newctx, match=Non
oldctx = repo[b'.']
ds = repo.dirstate
copies = dict(ds.copies())
- ds.setparents(newctx.node(), nullid)
+ ds.setparents(newctx.node(), repo.nullid)
s = newctx.status(oldctx, match=match)
+
for f in s.modified:
- if ds[f] == b'r':
- # modified + removed -> removed
- continue
- ds.normallookup(f)
+ ds.update_file_p1(f, p1_tracked=True)
for f in s.added:
- if ds[f] == b'r':
- # added + removed -> unknown
- ds.drop(f)
- elif ds[f] != b'a':
- ds.add(f)
+ ds.update_file_p1(f, p1_tracked=False)
for f in s.removed:
- if ds[f] == b'a':
- # removed + added -> normal
- ds.normallookup(f)
- elif ds[f] != b'r':
- ds.remove(f)
+ ds.update_file_p1(f, p1_tracked=True)
# Merge old parent and old working dir copies
oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
diff --git a/mercurial/setdiscovery.py b/mercurial/setdiscovery.py
--- a/mercurial/setdiscovery.py
+++ b/mercurial/setdiscovery.py
@@ -46,10 +46,7 @@ import collections
import random
from .i18n import _
-from .node import (
- nullid,
- nullrev,
-)
+from .node import nullrev
from . import (
error,
policy,
@@ -277,6 +274,8 @@ class partialdiscovery(object):
return sample
+pure_partialdiscovery = partialdiscovery
+
partialdiscovery = policy.importrust(
'discovery', member='PartialDiscovery', default=partialdiscovery
)
@@ -391,9 +390,9 @@ def findcommonheads(
audit[b'total-roundtrips'] = 1
if cl.tiprev() == nullrev:
- if srvheadhashes != [nullid]:
- return [nullid], True, srvheadhashes
- return [nullid], False, []
+ if srvheadhashes != [cl.nullid]:
+ return [cl.nullid], True, srvheadhashes
+ return [cl.nullid], False, []
else:
# we still need the remote head for the function return
with remote.commandexecutor() as e:
@@ -406,7 +405,7 @@ def findcommonheads(
knownsrvheads = [] # revnos of remote heads that are known locally
for node in srvheadhashes:
- if node == nullid:
+ if node == cl.nullid:
continue
try:
@@ -437,9 +436,11 @@ def findcommonheads(
hard_limit_sample = not (dynamic_sample or remote.limitedarguments)
randomize = ui.configbool(b'devel', b'discovery.randomize')
- disco = partialdiscovery(
- local, ownheads, hard_limit_sample, randomize=randomize
- )
+ if cl.index.rust_ext_compat:
+ pd = partialdiscovery
+ else:
+ pd = pure_partialdiscovery
+ disco = pd(local, ownheads, hard_limit_sample, randomize=randomize)
if initial_head_exchange:
# treat remote heads (and maybe own heads) as a first implicit sample
# response
@@ -503,17 +504,17 @@ def findcommonheads(
if audit is not None:
audit[b'total-roundtrips'] = roundtrips
- if not result and srvheadhashes != [nullid]:
+ if not result and srvheadhashes != [cl.nullid]:
if abortwhenunrelated:
raise error.Abort(_(b"repository is unrelated"))
else:
ui.warn(_(b"warning: repository is unrelated\n"))
return (
- {nullid},
+ {cl.nullid},
True,
srvheadhashes,
)
- anyincoming = srvheadhashes != [nullid]
+ anyincoming = srvheadhashes != [cl.nullid]
result = {clnode(r) for r in result}
return result, anyincoming, srvheadhashes
diff --git a/mercurial/shelve.py b/mercurial/shelve.py
--- a/mercurial/shelve.py
+++ b/mercurial/shelve.py
@@ -31,7 +31,6 @@ from .i18n import _
from .node import (
bin,
hex,
- nullid,
nullrev,
)
from . import (
@@ -782,9 +781,7 @@ def mergefiles(ui, repo, wctx, shelvectx
dirstate."""
with ui.configoverride({(b'ui', b'quiet'): True}):
hg.update(repo, wctx.node())
- ui.pushbuffer(True)
cmdutil.revert(ui, repo, shelvectx)
- ui.popbuffer()
def restorebranch(ui, repo, branchtorestore):
@@ -822,7 +819,7 @@ def unshelvecontinue(ui, repo, state, op
pendingctx = state.pendingctx
with repo.dirstate.parentchange():
- repo.setparents(state.pendingctx.node(), nullid)
+ repo.setparents(state.pendingctx.node(), repo.nullid)
repo.dirstate.write(repo.currenttransaction())
targetphase = phases.internal
@@ -831,7 +828,7 @@ def unshelvecontinue(ui, repo, state, op
overrides = {(b'phases', b'new-commit'): targetphase}
with repo.ui.configoverride(overrides, b'unshelve'):
with repo.dirstate.parentchange():
- repo.setparents(state.parents[0], nullid)
+ repo.setparents(state.parents[0], repo.nullid)
newnode, ispartialunshelve = _createunshelvectx(
ui, repo, shelvectx, basename, interactive, opts
)
@@ -1027,7 +1024,7 @@ def _rebaserestoredcommit(
raise error.ConflictResolutionRequired(b'unshelve')
with repo.dirstate.parentchange():
- repo.setparents(tmpwctx.node(), nullid)
+ repo.setparents(tmpwctx.node(), repo.nullid)
newnode, ispartialunshelve = _createunshelvectx(
ui, repo, shelvectx, basename, interactive, opts
)
diff --git a/mercurial/sparse.py b/mercurial/sparse.py
--- a/mercurial/sparse.py
+++ b/mercurial/sparse.py
@@ -10,10 +10,7 @@ from __future__ import absolute_import
import os
from .i18n import _
-from .node import (
- hex,
- nullid,
-)
+from .node import hex
from . import (
error,
match as matchmod,
@@ -177,7 +174,7 @@ def activeconfig(repo):
revs = [
repo.changelog.rev(node)
for node in repo.dirstate.parents()
- if node != nullid
+ if node != repo.nullid
]
allincludes = set()
@@ -286,7 +283,7 @@ def prunetemporaryincludes(repo):
# Fix dirstate
for file in dropped:
- dirstate.drop(file)
+ dirstate.update_file(file, p1_tracked=False, wc_tracked=False)
repo.vfs.unlink(b'tempsparse')
repo._sparsesignaturecache.clear()
@@ -321,7 +318,7 @@ def matcher(repo, revs=None, includetemp
revs = [
repo.changelog.rev(node)
for node in repo.dirstate.parents()
- if node != nullid
+ if node != repo.nullid
]
signature = configsignature(repo, includetemp=includetemp)
@@ -442,13 +439,21 @@ def filterupdatesactions(repo, wctx, mct
message,
)
- mergemod.applyupdates(
- repo, tmresult, repo[None], repo[b'.'], False, wantfiledata=False
- )
+ with repo.dirstate.parentchange():
+ mergemod.applyupdates(
+ repo,
+ tmresult,
+ repo[None],
+ repo[b'.'],
+ False,
+ wantfiledata=False,
+ )
- dirstate = repo.dirstate
- for file, flags, msg in tmresult.getactions([mergestatemod.ACTION_GET]):
- dirstate.normal(file)
+ dirstate = repo.dirstate
+ for file, flags, msg in tmresult.getactions(
+ [mergestatemod.ACTION_GET]
+ ):
+ dirstate.update_file(file, p1_tracked=True, wc_tracked=True)
profiles = activeconfig(repo)[2]
changedprofiles = profiles & files
@@ -560,14 +565,16 @@ def refreshwdir(repo, origstatus, origsp
# Fix dirstate
for file in added:
- dirstate.normal(file)
+ dirstate.update_file(file, p1_tracked=True, wc_tracked=True)
for file in dropped:
- dirstate.drop(file)
+ dirstate.update_file(file, p1_tracked=False, wc_tracked=False)
for file in lookup:
# File exists on disk, and we're bringing it back in an unknown state.
- dirstate.normallookup(file)
+ dirstate.update_file(
+ file, p1_tracked=True, wc_tracked=True, possibly_dirty=True
+ )
return added, dropped, lookup
@@ -633,7 +640,7 @@ def clearrules(repo, force=False):
The remaining sparse config only has profiles, if defined. The working
directory is refreshed, as needed.
"""
- with repo.wlock():
+ with repo.wlock(), repo.dirstate.parentchange():
raw = repo.vfs.tryread(b'sparse')
includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
@@ -649,7 +656,7 @@ def importfromfiles(repo, opts, paths, f
The updated sparse config is written out and the working directory
is refreshed, as needed.
"""
- with repo.wlock():
+ with repo.wlock(), repo.dirstate.parentchange():
# read current configuration
raw = repo.vfs.tryread(b'sparse')
includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
@@ -711,7 +718,7 @@ def updateconfig(
The new config is written out and a working directory refresh is performed.
"""
- with repo.wlock():
+ with repo.wlock(), repo.dirstate.parentchange():
raw = repo.vfs.tryread(b'sparse')
oldinclude, oldexclude, oldprofiles = parseconfig(
repo.ui, raw, b'sparse'
diff --git a/mercurial/statichttprepo.py b/mercurial/statichttprepo.py
--- a/mercurial/statichttprepo.py
+++ b/mercurial/statichttprepo.py
@@ -177,6 +177,7 @@ class statichttprepository(
self.filtername = None
self._extrafilterid = None
self._wanted_sidedata = set()
+ self.features = set()
try:
requirements = set(self.vfs.read(b'requires').splitlines())
diff --git a/mercurial/store.py b/mercurial/store.py
--- a/mercurial/store.py
+++ b/mercurial/store.py
@@ -389,7 +389,15 @@ def _calcmode(vfs):
]
REVLOG_FILES_MAIN_EXT = (b'.i', b'i.tmpcensored')
-REVLOG_FILES_OTHER_EXT = (b'.d', b'.n', b'.nd', b'd.tmpcensored')
+REVLOG_FILES_OTHER_EXT = (
+ b'.idx',
+ b'.d',
+ b'.dat',
+ b'.n',
+ b'.nd',
+ b'.sda',
+ b'd.tmpcensored',
+)
# files that are "volatile" and might change between listing and streaming
#
# note: the ".nd" file are nodemap data and won't "change" but they might be
@@ -397,7 +405,9 @@ REVLOG_FILES_OTHER_EXT = (b'.d', b'.n',
REVLOG_FILES_VOLATILE_EXT = (b'.n', b'.nd')
# some exception to the above matching
-EXCLUDED = re.compile(b'.*undo\.[^/]+\.nd?$')
+#
+# XXX This is currently not in use because of issue6542
+EXCLUDED = re.compile(b'.*undo\.[^/]+\.(nd?|i)$')
def is_revlog(f, kind, st):
@@ -407,13 +417,17 @@ def is_revlog(f, kind, st):
def revlog_type(f):
+ # XXX we need to filter `undo.` created by the transaction here, however
+ # being naive about it also filter revlog for `undo.*` files, leading to
+ # issue6542. So we no longer use EXCLUDED.
if f.endswith(REVLOG_FILES_MAIN_EXT):
return FILEFLAGS_REVLOG_MAIN
- elif f.endswith(REVLOG_FILES_OTHER_EXT) and EXCLUDED.match(f) is None:
+ elif f.endswith(REVLOG_FILES_OTHER_EXT):
t = FILETYPE_FILELOG_OTHER
if f.endswith(REVLOG_FILES_VOLATILE_EXT):
t |= FILEFLAGS_VOLATILE
return t
+ return None
# the file is part of changelog data
@@ -706,7 +720,7 @@ class _fncachevfs(vfsmod.proxyvfs):
# do not trigger a fncache load when adding a file that already is
# known to exist.
notload = self.fncache.entries is None and self.vfs.exists(encoded)
- if notload and b'a' in mode and not self.vfs.stat(encoded).st_size:
+ if notload and b'r+' in mode and not self.vfs.stat(encoded).st_size:
# when appending to an existing file, if the file has size zero,
# it should be considered as missing. Such zero-size files are
# the result of truncation when a transaction is aborted.
@@ -721,6 +735,11 @@ class _fncachevfs(vfsmod.proxyvfs):
else:
return self.vfs.join(path)
+ def register_file(self, path):
+ """generic hook point to lets fncache steer its stew"""
+ if path.startswith(b'data/') or path.startswith(b'meta/'):
+ self.fncache.add(path)
+
class fncachestore(basicstore):
def __init__(self, path, vfstype, dotencode):
@@ -753,6 +772,7 @@ class fncachestore(basicstore):
ef = self.encode(f)
try:
t = revlog_type(f)
+ assert t is not None, f
t |= FILEFLAGS_FILELOG
yield t, f, ef, self.getsize(ef)
except OSError as err:
diff --git a/mercurial/streamclone.py b/mercurial/streamclone.py
--- a/mercurial/streamclone.py
+++ b/mercurial/streamclone.py
@@ -8,6 +8,7 @@
from __future__ import absolute_import
import contextlib
+import errno
import os
import struct
@@ -15,6 +16,7 @@ from .i18n import _
from .pycompat import open
from .interfaces import repository
from . import (
+ bookmarks,
cacheutil,
error,
narrowspec,
@@ -25,6 +27,9 @@ from . import (
store,
util,
)
+from .utils import (
+ stringutil,
+)
def canperformstreamclone(pullop, bundle2=False):
@@ -613,6 +618,47 @@ def _test_sync_point_walk_2(repo):
"""a function for synchronisation during tests"""
+def _v2_walk(repo, includes, excludes, includeobsmarkers):
+ """emit a seris of files information useful to clone a repo
+
+ return (entries, totalfilesize)
+
+ entries is a list of tuple (vfs-key, file-path, file-type, size)
+
+ - `vfs-key`: is a key to the right vfs to write the file (see _makemap)
+ - `name`: file path of the file to copy (to be feed to the vfss)
+ - `file-type`: do this file need to be copied with the source lock ?
+ - `size`: the size of the file (or None)
+ """
+ assert repo._currentlock(repo._lockref) is not None
+ entries = []
+ totalfilesize = 0
+
+ matcher = None
+ if includes or excludes:
+ matcher = narrowspec.match(repo.root, includes, excludes)
+
+ for rl_type, name, ename, size in _walkstreamfiles(repo, matcher):
+ if size:
+ ft = _fileappend
+ if rl_type & store.FILEFLAGS_VOLATILE:
+ ft = _filefull
+ entries.append((_srcstore, name, ft, size))
+ totalfilesize += size
+ for name in _walkstreamfullstorefiles(repo):
+ if repo.svfs.exists(name):
+ totalfilesize += repo.svfs.lstat(name).st_size
+ entries.append((_srcstore, name, _filefull, None))
+ if includeobsmarkers and repo.svfs.exists(b'obsstore'):
+ totalfilesize += repo.svfs.lstat(b'obsstore').st_size
+ entries.append((_srcstore, b'obsstore', _filefull, None))
+ for name in cacheutil.cachetocopy(repo):
+ if repo.cachevfs.exists(name):
+ totalfilesize += repo.cachevfs.lstat(name).st_size
+ entries.append((_srccache, name, _filefull, None))
+ return entries, totalfilesize
+
+
def generatev2(repo, includes, excludes, includeobsmarkers):
"""Emit content for version 2 of a streaming clone.
@@ -628,32 +674,14 @@ def generatev2(repo, includes, excludes,
with repo.lock():
- entries = []
- totalfilesize = 0
-
- matcher = None
- if includes or excludes:
- matcher = narrowspec.match(repo.root, includes, excludes)
+ repo.ui.debug(b'scanning\n')
- repo.ui.debug(b'scanning\n')
- for rl_type, name, ename, size in _walkstreamfiles(repo, matcher):
- if size:
- ft = _fileappend
- if rl_type & store.FILEFLAGS_VOLATILE:
- ft = _filefull
- entries.append((_srcstore, name, ft, size))
- totalfilesize += size
- for name in _walkstreamfullstorefiles(repo):
- if repo.svfs.exists(name):
- totalfilesize += repo.svfs.lstat(name).st_size
- entries.append((_srcstore, name, _filefull, None))
- if includeobsmarkers and repo.svfs.exists(b'obsstore'):
- totalfilesize += repo.svfs.lstat(b'obsstore').st_size
- entries.append((_srcstore, b'obsstore', _filefull, None))
- for name in cacheutil.cachetocopy(repo):
- if repo.cachevfs.exists(name):
- totalfilesize += repo.cachevfs.lstat(name).st_size
- entries.append((_srccache, name, _filefull, None))
+ entries, totalfilesize = _v2_walk(
+ repo,
+ includes=includes,
+ excludes=excludes,
+ includeobsmarkers=includeobsmarkers,
+ )
chunks = _emit2(repo, entries, totalfilesize)
first = next(chunks)
@@ -767,3 +795,112 @@ def applybundlev2(repo, fp, filecount, f
repo.ui, repo.requirements, repo.features
)
scmutil.writereporequirements(repo)
+
+
+def _copy_files(src_vfs_map, dst_vfs_map, entries, progress):
+ hardlink = [True]
+
+ def copy_used():
+ hardlink[0] = False
+ progress.topic = _(b'copying')
+
+ for k, path, size in entries:
+ src_vfs = src_vfs_map[k]
+ dst_vfs = dst_vfs_map[k]
+ src_path = src_vfs.join(path)
+ dst_path = dst_vfs.join(path)
+ dirname = dst_vfs.dirname(path)
+ if not dst_vfs.exists(dirname):
+ dst_vfs.makedirs(dirname)
+ dst_vfs.register_file(path)
+ # XXX we could use the #nb_bytes argument.
+ util.copyfile(
+ src_path,
+ dst_path,
+ hardlink=hardlink[0],
+ no_hardlink_cb=copy_used,
+ check_fs_hardlink=False,
+ )
+ progress.increment()
+ return hardlink[0]
+
+
+def local_copy(src_repo, dest_repo):
+ """copy all content from one local repository to another
+
+ This is useful for local clone"""
+ src_store_requirements = {
+ r
+ for r in src_repo.requirements
+ if r not in requirementsmod.WORKING_DIR_REQUIREMENTS
+ }
+ dest_store_requirements = {
+ r
+ for r in dest_repo.requirements
+ if r not in requirementsmod.WORKING_DIR_REQUIREMENTS
+ }
+ assert src_store_requirements == dest_store_requirements
+
+ with dest_repo.lock():
+ with src_repo.lock():
+
+ # bookmark is not integrated to the streaming as it might use the
+ # `repo.vfs` and they are too many sentitive data accessible
+ # through `repo.vfs` to expose it to streaming clone.
+ src_book_vfs = bookmarks.bookmarksvfs(src_repo)
+ srcbookmarks = src_book_vfs.join(b'bookmarks')
+ bm_count = 0
+ if os.path.exists(srcbookmarks):
+ bm_count = 1
+
+ entries, totalfilesize = _v2_walk(
+ src_repo,
+ includes=None,
+ excludes=None,
+ includeobsmarkers=True,
+ )
+ src_vfs_map = _makemap(src_repo)
+ dest_vfs_map = _makemap(dest_repo)
+ progress = src_repo.ui.makeprogress(
+ topic=_(b'linking'),
+ total=len(entries) + bm_count,
+ unit=_(b'files'),
+ )
+ # copy files
+ #
+ # We could copy the full file while the source repository is locked
+ # and the other one without the lock. However, in the linking case,
+ # this would also requires checks that nobody is appending any data
+ # to the files while we do the clone, so this is not done yet. We
+ # could do this blindly when copying files.
+ files = ((k, path, size) for k, path, ftype, size in entries)
+ hardlink = _copy_files(src_vfs_map, dest_vfs_map, files, progress)
+
+ # copy bookmarks over
+ if bm_count:
+ dst_book_vfs = bookmarks.bookmarksvfs(dest_repo)
+ dstbookmarks = dst_book_vfs.join(b'bookmarks')
+ util.copyfile(srcbookmarks, dstbookmarks)
+ progress.complete()
+ if hardlink:
+ msg = b'linked %d files\n'
+ else:
+ msg = b'copied %d files\n'
+ src_repo.ui.debug(msg % (len(entries) + bm_count))
+
+ with dest_repo.transaction(b"localclone") as tr:
+ dest_repo.store.write(tr)
+
+ # clean up transaction file as they do not make sense
+ undo_files = [(dest_repo.svfs, b'undo.backupfiles')]
+ undo_files.extend(dest_repo.undofiles())
+ for undovfs, undofile in undo_files:
+ try:
+ undovfs.unlink(undofile)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ msg = _(b'error removing %s: %s\n')
+ path = undovfs.join(undofile)
+ e_msg = stringutil.forcebytestr(e)
+ msg %= (path, e_msg)
+ dest_repo.ui.warn(msg)
diff --git a/mercurial/strip.py b/mercurial/strip.py
--- a/mercurial/strip.py
+++ b/mercurial/strip.py
@@ -2,7 +2,6 @@ from __future__ import absolute_import
from .i18n import _
from .pycompat import getattr
-from .node import nullid
from . import (
bookmarks as bookmarksmod,
cmdutil,
@@ -39,7 +38,7 @@ def _findupdatetarget(repo, nodes):
if (
util.safehasattr(repo, b'mq')
- and p2 != nullid
+ and p2 != repo.nullid
and p2 in [x.node for x in repo.mq.applied]
):
unode = p2
@@ -218,7 +217,7 @@ def debugstrip(ui, repo, *revs, **opts):
# if one of the wdir parent is stripped we'll need
# to update away to an earlier revision
update = any(
- p != nullid and cl.rev(p) in strippedrevs
+ p != repo.nullid and cl.rev(p) in strippedrevs
for p in repo.dirstate.parents()
)
diff --git a/mercurial/subrepo.py b/mercurial/subrepo.py
--- a/mercurial/subrepo.py
+++ b/mercurial/subrepo.py
@@ -21,7 +21,6 @@ from .i18n import _
from .node import (
bin,
hex,
- nullid,
short,
)
from . import (
@@ -61,7 +60,7 @@ def _expandedabspath(path):
expandedpath = urlutil.urllocalpath(util.expandpath(path))
u = urlutil.url(expandedpath)
if not u.scheme:
- path = util.normpath(os.path.abspath(u.path))
+ path = util.normpath(util.abspath(u.path))
return path
@@ -686,7 +685,7 @@ class hgsubrepo(abstractsubrepo):
# we can't fully delete the repository as it may contain
# local-only history
self.ui.note(_(b'removing subrepo %s\n') % subrelpath(self))
- hg.clean(self._repo, nullid, False)
+ hg.clean(self._repo, self._repo.nullid, False)
def _get(self, state):
source, revision, kind = state
diff --git a/mercurial/subrepoutil.py b/mercurial/subrepoutil.py
--- a/mercurial/subrepoutil.py
+++ b/mercurial/subrepoutil.py
@@ -458,7 +458,7 @@ def _abssource(repo, push=False, abort=T
# C:\some\path\relative
if urlutil.hasdriveletter(path):
if len(path) == 2 or path[2:3] not in br'\/':
- path = os.path.abspath(path)
+ path = util.abspath(path)
return path
if abort:
diff --git a/mercurial/tagmerge.py b/mercurial/tagmerge.py
--- a/mercurial/tagmerge.py
+++ b/mercurial/tagmerge.py
@@ -74,9 +74,6 @@
from __future__ import absolute_import
from .i18n import _
-from .node import (
- nullhex,
-)
from . import (
tags as tagsmod,
util,
@@ -243,8 +240,8 @@ def merge(repo, fcd, fco, fca):
pnlosttagset = basetagset - pntagset
for t in pnlosttagset:
pntags[t] = basetags[t]
- if pntags[t][-1][0] != nullhex:
- pntags[t].append([nullhex, None])
+ if pntags[t][-1][0] != repo.nodeconstants.nullhex:
+ pntags[t].append([repo.nodeconstants.nullhex, None])
conflictedtags = [] # for reporting purposes
mergedtags = util.sortdict(p1tags)
diff --git a/mercurial/tags.py b/mercurial/tags.py
--- a/mercurial/tags.py
+++ b/mercurial/tags.py
@@ -18,7 +18,6 @@ import io
from .node import (
bin,
hex,
- nullid,
nullrev,
short,
)
@@ -96,12 +95,12 @@ def fnoderevs(ui, repo, revs):
return fnodes
-def _nulltonone(value):
+def _nulltonone(repo, value):
"""convert nullid to None
For tag value, nullid means "deleted". This small utility function helps
translating that to None."""
- if value == nullid:
+ if value == repo.nullid:
return None
return value
@@ -123,14 +122,14 @@ def difftags(ui, repo, oldfnodes, newfno
# list of (tag, old, new): None means missing
entries = []
for tag, (new, __) in newtags.items():
- new = _nulltonone(new)
+ new = _nulltonone(repo, new)
old, __ = oldtags.pop(tag, (None, None))
- old = _nulltonone(old)
+ old = _nulltonone(repo, old)
if old != new:
entries.append((tag, old, new))
# handle deleted tags
for tag, (old, __) in oldtags.items():
- old = _nulltonone(old)
+ old = _nulltonone(repo, old)
if old is not None:
entries.append((tag, old, None))
entries.sort()
@@ -452,7 +451,7 @@ def _readtagcache(ui, repo):
repoheads = repo.heads()
# Case 2 (uncommon): empty repo; get out quickly and don't bother
# writing an empty cache.
- if repoheads == [nullid]:
+ if repoheads == [repo.nullid]:
return ([], {}, valid, {}, False)
# Case 3 (uncommon): cache file missing or empty.
@@ -499,7 +498,7 @@ def _getfnodes(ui, repo, nodes):
for node in nodes:
fnode = fnodescache.getfnode(node)
flog = repo.file(b'.hgtags')
- if fnode != nullid:
+ if fnode != repo.nullid:
if fnode not in validated_fnodes:
if flog.hasnode(fnode):
validated_fnodes.add(fnode)
@@ -510,7 +509,7 @@ def _getfnodes(ui, repo, nodes):
if unknown_entries:
fixed_nodemap = fnodescache.refresh_invalid_nodes(unknown_entries)
for node, fnode in pycompat.iteritems(fixed_nodemap):
- if fnode != nullid:
+ if fnode != repo.nullid:
cachefnode[node] = fnode
fnodescache.write()
@@ -632,7 +631,7 @@ def _tag(
m = name
if repo._tagscache.tagtypes and name in repo._tagscache.tagtypes:
- old = repo.tags().get(name, nullid)
+ old = repo.tags().get(name, repo.nullid)
fp.write(b'%s %s\n' % (hex(old), m))
fp.write(b'%s %s\n' % (hex(node), m))
fp.close()
@@ -762,8 +761,8 @@ class hgtagsfnodescache(object):
If an .hgtags does not exist at the specified revision, nullid is
returned.
"""
- if node == nullid:
- return nullid
+ if node == self._repo.nullid:
+ return node
ctx = self._repo[node]
rev = ctx.rev()
@@ -826,7 +825,7 @@ class hgtagsfnodescache(object):
fnode = ctx.filenode(b'.hgtags')
except error.LookupError:
# No .hgtags file on this revision.
- fnode = nullid
+ fnode = self._repo.nullid
return fnode
def setfnode(self, node, fnode):
diff --git a/mercurial/templatefuncs.py b/mercurial/templatefuncs.py
--- a/mercurial/templatefuncs.py
+++ b/mercurial/templatefuncs.py
@@ -10,10 +10,7 @@ from __future__ import absolute_import
import re
from .i18n import _
-from .node import (
- bin,
- wdirid,
-)
+from .node import bin
from . import (
color,
dagop,
@@ -767,9 +764,10 @@ def shortest(context, mapping, args):
)
repo = context.resource(mapping, b'repo')
- if len(hexnode) > 40:
+ hexnodelen = 2 * repo.nodeconstants.nodelen
+ if len(hexnode) > hexnodelen:
return hexnode
- elif len(hexnode) == 40:
+ elif len(hexnode) == hexnodelen:
try:
node = bin(hexnode)
except TypeError:
@@ -778,7 +776,7 @@ def shortest(context, mapping, args):
try:
node = scmutil.resolvehexnodeidprefix(repo, hexnode)
except error.WdirUnsupported:
- node = wdirid
+ node = repo.nodeconstants.wdirid
except error.LookupError:
return hexnode
if not node:
diff --git a/mercurial/templatekw.py b/mercurial/templatekw.py
--- a/mercurial/templatekw.py
+++ b/mercurial/templatekw.py
@@ -10,8 +10,6 @@ from __future__ import absolute_import
from .i18n import _
from .node import (
hex,
- nullid,
- wdirid,
wdirrev,
)
@@ -29,7 +27,10 @@ from . import (
templateutil,
util,
)
-from .utils import stringutil
+from .utils import (
+ stringutil,
+ urlutil,
+)
_hybrid = templateutil.hybrid
hybriddict = templateutil.hybriddict
@@ -412,7 +413,7 @@ def getgraphnode(repo, ctx, cache):
def getgraphnodecurrent(repo, ctx, cache):
wpnodes = repo.dirstate.parents()
- if wpnodes[1] == nullid:
+ if wpnodes[1] == repo.nullid:
wpnodes = wpnodes[:1]
if ctx.node() in wpnodes:
return b'@'
@@ -525,11 +526,12 @@ def showmanifest(context, mapping):
ctx = context.resource(mapping, b'ctx')
mnode = ctx.manifestnode()
if mnode is None:
- mnode = wdirid
+ mnode = repo.nodeconstants.wdirid
mrev = wdirrev
+ mhex = repo.nodeconstants.wdirhex
else:
mrev = repo.manifestlog.rev(mnode)
- mhex = hex(mnode)
+ mhex = hex(mnode)
mapping = context.overlaymap(mapping, {b'rev': mrev, b'node': mhex})
f = context.process(b'manifest', mapping)
return templateutil.hybriditem(
@@ -661,17 +663,29 @@ def showpeerurls(context, mapping):
repo = context.resource(mapping, b'repo')
# see commands.paths() for naming of dictionary keys
paths = repo.ui.paths
- urls = util.sortdict(
- (k, p.rawloc) for k, p in sorted(pycompat.iteritems(paths))
- )
+ all_paths = urlutil.list_paths(repo.ui)
+ urls = util.sortdict((k, p.rawloc) for k, p in all_paths)
def makemap(k):
- p = paths[k]
- d = {b'name': k, b'url': p.rawloc}
- d.update((o, v) for o, v in sorted(pycompat.iteritems(p.suboptions)))
+ ps = paths[k]
+ d = {b'name': k}
+ if len(ps) == 1:
+ d[b'url'] = ps[0].rawloc
+ sub_opts = pycompat.iteritems(ps[0].suboptions)
+ sub_opts = util.sortdict(sorted(sub_opts))
+ d.update(sub_opts)
+ path_dict = util.sortdict()
+ for p in ps:
+ sub_opts = util.sortdict(sorted(pycompat.iteritems(p.suboptions)))
+ path_dict[b'url'] = p.rawloc
+ path_dict.update(sub_opts)
+ d[b'urls'] = [path_dict]
return d
- return _hybrid(None, urls, makemap, lambda k: b'%s=%s' % (k, urls[k]))
+ def format_one(k):
+ return b'%s=%s' % (k, urls[k])
+
+ return _hybrid(None, urls, makemap, format_one)
@templatekeyword(b"predecessors", requires={b'repo', b'ctx'})
diff --git a/mercurial/testing/__init__.py b/mercurial/testing/__init__.py
--- a/mercurial/testing/__init__.py
+++ b/mercurial/testing/__init__.py
@@ -16,8 +16,10 @@ environ = getattr(os, 'environ')
def _timeout_factor():
"""return the current modification to timeout"""
- default = int(environ.get('HGTEST_TIMEOUT_DEFAULT', 1))
+ default = int(environ.get('HGTEST_TIMEOUT_DEFAULT', 360))
current = int(environ.get('HGTEST_TIMEOUT', default))
+ if current == 0:
+ return 1
return current / float(default)
@@ -25,7 +27,7 @@ def wait_file(path, timeout=10):
timeout *= _timeout_factor()
start = time.time()
while not os.path.exists(path):
- if time.time() - start > timeout:
+ if timeout and time.time() - start > timeout:
raise RuntimeError(b"timed out waiting for file: %s" % path)
time.sleep(0.01)
diff --git a/mercurial/testing/storage.py b/mercurial/testing/storage.py
--- a/mercurial/testing/storage.py
+++ b/mercurial/testing/storage.py
@@ -11,7 +11,6 @@ import unittest
from ..node import (
hex,
- nullid,
nullrev,
)
from ..pycompat import getattr
@@ -51,7 +50,7 @@ class ifileindextests(basetestcase):
self.assertFalse(f.hasnode(None))
self.assertFalse(f.hasnode(0))
self.assertFalse(f.hasnode(nullrev))
- self.assertFalse(f.hasnode(nullid))
+ self.assertFalse(f.hasnode(f.nullid))
self.assertFalse(f.hasnode(b'0'))
self.assertFalse(f.hasnode(b'a' * 20))
@@ -64,8 +63,8 @@ class ifileindextests(basetestcase):
self.assertEqual(list(f.revs(start=20)), [])
- # parents() and parentrevs() work with nullid/nullrev.
- self.assertEqual(f.parents(nullid), (nullid, nullid))
+ # parents() and parentrevs() work with f.nullid/nullrev.
+ self.assertEqual(f.parents(f.nullid), (f.nullid, f.nullid))
self.assertEqual(f.parentrevs(nullrev), (nullrev, nullrev))
with self.assertRaises(error.LookupError):
@@ -78,9 +77,9 @@ class ifileindextests(basetestcase):
with self.assertRaises(IndexError):
f.parentrevs(i)
- # nullid/nullrev lookup always works.
- self.assertEqual(f.rev(nullid), nullrev)
- self.assertEqual(f.node(nullrev), nullid)
+ # f.nullid/nullrev lookup always works.
+ self.assertEqual(f.rev(f.nullid), nullrev)
+ self.assertEqual(f.node(nullrev), f.nullid)
with self.assertRaises(error.LookupError):
f.rev(b'\x01' * 20)
@@ -92,16 +91,16 @@ class ifileindextests(basetestcase):
with self.assertRaises(IndexError):
f.node(i)
- self.assertEqual(f.lookup(nullid), nullid)
- self.assertEqual(f.lookup(nullrev), nullid)
- self.assertEqual(f.lookup(hex(nullid)), nullid)
- self.assertEqual(f.lookup(b'%d' % nullrev), nullid)
+ self.assertEqual(f.lookup(f.nullid), f.nullid)
+ self.assertEqual(f.lookup(nullrev), f.nullid)
+ self.assertEqual(f.lookup(hex(f.nullid)), f.nullid)
+ self.assertEqual(f.lookup(b'%d' % nullrev), f.nullid)
with self.assertRaises(error.LookupError):
f.lookup(b'badvalue')
with self.assertRaises(error.LookupError):
- f.lookup(hex(nullid)[0:12])
+ f.lookup(hex(f.nullid)[0:12])
with self.assertRaises(error.LookupError):
f.lookup(b'-2')
@@ -140,19 +139,19 @@ class ifileindextests(basetestcase):
with self.assertRaises(IndexError):
f.iscensored(i)
- self.assertEqual(list(f.commonancestorsheads(nullid, nullid)), [])
+ self.assertEqual(list(f.commonancestorsheads(f.nullid, f.nullid)), [])
with self.assertRaises(ValueError):
self.assertEqual(list(f.descendants([])), [])
self.assertEqual(list(f.descendants([nullrev])), [])
- self.assertEqual(f.heads(), [nullid])
- self.assertEqual(f.heads(nullid), [nullid])
- self.assertEqual(f.heads(None, [nullid]), [nullid])
- self.assertEqual(f.heads(nullid, [nullid]), [nullid])
+ self.assertEqual(f.heads(), [f.nullid])
+ self.assertEqual(f.heads(f.nullid), [f.nullid])
+ self.assertEqual(f.heads(None, [f.nullid]), [f.nullid])
+ self.assertEqual(f.heads(f.nullid, [f.nullid]), [f.nullid])
- self.assertEqual(f.children(nullid), [])
+ self.assertEqual(f.children(f.nullid), [])
with self.assertRaises(error.LookupError):
f.children(b'\x01' * 20)
@@ -160,7 +159,7 @@ class ifileindextests(basetestcase):
def testsinglerevision(self):
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node = f.add(b'initial', None, tr, 0, nullid, nullid)
+ node = f.add(b'initial', None, tr, 0, f.nullid, f.nullid)
self.assertEqual(len(f), 1)
self.assertEqual(list(f), [0])
@@ -174,7 +173,7 @@ class ifileindextests(basetestcase):
self.assertTrue(f.hasnode(node))
self.assertFalse(f.hasnode(hex(node)))
self.assertFalse(f.hasnode(nullrev))
- self.assertFalse(f.hasnode(nullid))
+ self.assertFalse(f.hasnode(f.nullid))
self.assertFalse(f.hasnode(node[0:12]))
self.assertFalse(f.hasnode(hex(node)[0:20]))
@@ -188,7 +187,7 @@ class ifileindextests(basetestcase):
self.assertEqual(list(f.revs(1, 0)), [1, 0])
self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
- self.assertEqual(f.parents(node), (nullid, nullid))
+ self.assertEqual(f.parents(node), (f.nullid, f.nullid))
self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
with self.assertRaises(error.LookupError):
@@ -209,7 +208,7 @@ class ifileindextests(basetestcase):
self.assertEqual(f.lookup(node), node)
self.assertEqual(f.lookup(0), node)
- self.assertEqual(f.lookup(-1), nullid)
+ self.assertEqual(f.lookup(-1), f.nullid)
self.assertEqual(f.lookup(b'0'), node)
self.assertEqual(f.lookup(hex(node)), node)
@@ -256,9 +255,9 @@ class ifileindextests(basetestcase):
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
- node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
- node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
+ node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
+ node1 = f.add(fulltext1, None, tr, 1, node0, f.nullid)
+ node2 = f.add(fulltext2, None, tr, 3, node1, f.nullid)
self.assertEqual(len(f), 3)
self.assertEqual(list(f), [0, 1, 2])
@@ -284,9 +283,9 @@ class ifileindextests(basetestcase):
# TODO this is wrong
self.assertEqual(list(f.revs(3, 2)), [3, 2])
- self.assertEqual(f.parents(node0), (nullid, nullid))
- self.assertEqual(f.parents(node1), (node0, nullid))
- self.assertEqual(f.parents(node2), (node1, nullid))
+ self.assertEqual(f.parents(node0), (f.nullid, f.nullid))
+ self.assertEqual(f.parents(node1), (node0, f.nullid))
+ self.assertEqual(f.parents(node2), (node1, f.nullid))
self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
self.assertEqual(f.parentrevs(1), (0, nullrev))
@@ -330,7 +329,7 @@ class ifileindextests(basetestcase):
with self.assertRaises(IndexError):
f.iscensored(3)
- self.assertEqual(f.commonancestorsheads(node1, nullid), [])
+ self.assertEqual(f.commonancestorsheads(node1, f.nullid), [])
self.assertEqual(f.commonancestorsheads(node1, node0), [node0])
self.assertEqual(f.commonancestorsheads(node1, node1), [node1])
self.assertEqual(f.commonancestorsheads(node0, node1), [node0])
@@ -364,12 +363,12 @@ class ifileindextests(basetestcase):
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node0 = f.add(b'0', None, tr, 0, nullid, nullid)
- node1 = f.add(b'1', None, tr, 1, node0, nullid)
- node2 = f.add(b'2', None, tr, 2, node1, nullid)
- node3 = f.add(b'3', None, tr, 3, node0, nullid)
- node4 = f.add(b'4', None, tr, 4, node3, nullid)
- node5 = f.add(b'5', None, tr, 5, node0, nullid)
+ node0 = f.add(b'0', None, tr, 0, f.nullid, f.nullid)
+ node1 = f.add(b'1', None, tr, 1, node0, f.nullid)
+ node2 = f.add(b'2', None, tr, 2, node1, f.nullid)
+ node3 = f.add(b'3', None, tr, 3, node0, f.nullid)
+ node4 = f.add(b'4', None, tr, 4, node3, f.nullid)
+ node5 = f.add(b'5', None, tr, 5, node0, f.nullid)
self.assertEqual(len(f), 6)
@@ -427,24 +426,24 @@ class ifiledatatests(basetestcase):
with self.assertRaises(IndexError):
f.size(i)
- self.assertEqual(f.revision(nullid), b'')
- self.assertEqual(f.rawdata(nullid), b'')
+ self.assertEqual(f.revision(f.nullid), b'')
+ self.assertEqual(f.rawdata(f.nullid), b'')
with self.assertRaises(error.LookupError):
f.revision(b'\x01' * 20)
- self.assertEqual(f.read(nullid), b'')
+ self.assertEqual(f.read(f.nullid), b'')
with self.assertRaises(error.LookupError):
f.read(b'\x01' * 20)
- self.assertFalse(f.renamed(nullid))
+ self.assertFalse(f.renamed(f.nullid))
with self.assertRaises(error.LookupError):
f.read(b'\x01' * 20)
- self.assertTrue(f.cmp(nullid, b''))
- self.assertTrue(f.cmp(nullid, b'foo'))
+ self.assertTrue(f.cmp(f.nullid, b''))
+ self.assertTrue(f.cmp(f.nullid, b'foo'))
with self.assertRaises(error.LookupError):
f.cmp(b'\x01' * 20, b'irrelevant')
@@ -455,7 +454,7 @@ class ifiledatatests(basetestcase):
next(gen)
# Emitting null node yields nothing.
- gen = f.emitrevisions([nullid])
+ gen = f.emitrevisions([f.nullid])
with self.assertRaises(StopIteration):
next(gen)
@@ -468,7 +467,7 @@ class ifiledatatests(basetestcase):
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node = f.add(fulltext, None, tr, 0, nullid, nullid)
+ node = f.add(fulltext, None, tr, 0, f.nullid, f.nullid)
self.assertEqual(f.storageinfo(), {})
self.assertEqual(
@@ -496,10 +495,10 @@ class ifiledatatests(basetestcase):
rev = next(gen)
self.assertEqual(rev.node, node)
- self.assertEqual(rev.p1node, nullid)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p1node, f.nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertIsNone(rev.linknode)
- self.assertEqual(rev.basenode, nullid)
+ self.assertEqual(rev.basenode, f.nullid)
self.assertIsNone(rev.baserevisionsize)
self.assertIsNone(rev.revision)
self.assertIsNone(rev.delta)
@@ -512,10 +511,10 @@ class ifiledatatests(basetestcase):
rev = next(gen)
self.assertEqual(rev.node, node)
- self.assertEqual(rev.p1node, nullid)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p1node, f.nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertIsNone(rev.linknode)
- self.assertEqual(rev.basenode, nullid)
+ self.assertEqual(rev.basenode, f.nullid)
self.assertIsNone(rev.baserevisionsize)
self.assertEqual(rev.revision, fulltext)
self.assertIsNone(rev.delta)
@@ -534,9 +533,9 @@ class ifiledatatests(basetestcase):
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
- node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
- node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
+ node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
+ node1 = f.add(fulltext1, None, tr, 1, node0, f.nullid)
+ node2 = f.add(fulltext2, None, tr, 3, node1, f.nullid)
self.assertEqual(f.storageinfo(), {})
self.assertEqual(
@@ -596,10 +595,10 @@ class ifiledatatests(basetestcase):
rev = next(gen)
self.assertEqual(rev.node, node0)
- self.assertEqual(rev.p1node, nullid)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p1node, f.nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertIsNone(rev.linknode)
- self.assertEqual(rev.basenode, nullid)
+ self.assertEqual(rev.basenode, f.nullid)
self.assertIsNone(rev.baserevisionsize)
self.assertEqual(rev.revision, fulltext0)
self.assertIsNone(rev.delta)
@@ -608,7 +607,7 @@ class ifiledatatests(basetestcase):
self.assertEqual(rev.node, node1)
self.assertEqual(rev.p1node, node0)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertIsNone(rev.linknode)
self.assertEqual(rev.basenode, node0)
self.assertIsNone(rev.baserevisionsize)
@@ -622,7 +621,7 @@ class ifiledatatests(basetestcase):
self.assertEqual(rev.node, node2)
self.assertEqual(rev.p1node, node1)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertIsNone(rev.linknode)
self.assertEqual(rev.basenode, node1)
self.assertIsNone(rev.baserevisionsize)
@@ -641,10 +640,10 @@ class ifiledatatests(basetestcase):
rev = next(gen)
self.assertEqual(rev.node, node0)
- self.assertEqual(rev.p1node, nullid)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p1node, f.nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertIsNone(rev.linknode)
- self.assertEqual(rev.basenode, nullid)
+ self.assertEqual(rev.basenode, f.nullid)
self.assertIsNone(rev.baserevisionsize)
self.assertEqual(rev.revision, fulltext0)
self.assertIsNone(rev.delta)
@@ -653,7 +652,7 @@ class ifiledatatests(basetestcase):
self.assertEqual(rev.node, node1)
self.assertEqual(rev.p1node, node0)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertIsNone(rev.linknode)
self.assertEqual(rev.basenode, node0)
self.assertIsNone(rev.baserevisionsize)
@@ -667,7 +666,7 @@ class ifiledatatests(basetestcase):
self.assertEqual(rev.node, node2)
self.assertEqual(rev.p1node, node1)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertIsNone(rev.linknode)
self.assertEqual(rev.basenode, node1)
self.assertIsNone(rev.baserevisionsize)
@@ -700,16 +699,16 @@ class ifiledatatests(basetestcase):
rev = next(gen)
self.assertEqual(rev.node, node2)
self.assertEqual(rev.p1node, node1)
- self.assertEqual(rev.p2node, nullid)
- self.assertEqual(rev.basenode, nullid)
+ self.assertEqual(rev.p2node, f.nullid)
+ self.assertEqual(rev.basenode, f.nullid)
self.assertIsNone(rev.baserevisionsize)
self.assertEqual(rev.revision, fulltext2)
self.assertIsNone(rev.delta)
rev = next(gen)
self.assertEqual(rev.node, node0)
- self.assertEqual(rev.p1node, nullid)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p1node, f.nullid)
+ self.assertEqual(rev.p2node, f.nullid)
# Delta behavior is storage dependent, so we can't easily test it.
with self.assertRaises(StopIteration):
@@ -722,8 +721,8 @@ class ifiledatatests(basetestcase):
rev = next(gen)
self.assertEqual(rev.node, node1)
self.assertEqual(rev.p1node, node0)
- self.assertEqual(rev.p2node, nullid)
- self.assertEqual(rev.basenode, nullid)
+ self.assertEqual(rev.p2node, f.nullid)
+ self.assertEqual(rev.basenode, f.nullid)
self.assertIsNone(rev.baserevisionsize)
self.assertEqual(rev.revision, fulltext1)
self.assertIsNone(rev.delta)
@@ -731,7 +730,7 @@ class ifiledatatests(basetestcase):
rev = next(gen)
self.assertEqual(rev.node, node2)
self.assertEqual(rev.p1node, node1)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertEqual(rev.basenode, node1)
self.assertIsNone(rev.baserevisionsize)
self.assertIsNone(rev.revision)
@@ -751,7 +750,7 @@ class ifiledatatests(basetestcase):
rev = next(gen)
self.assertEqual(rev.node, node1)
self.assertEqual(rev.p1node, node0)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertEqual(rev.basenode, node0)
self.assertIsNone(rev.baserevisionsize)
self.assertIsNone(rev.revision)
@@ -768,9 +767,9 @@ class ifiledatatests(basetestcase):
rev = next(gen)
self.assertEqual(rev.node, node0)
- self.assertEqual(rev.p1node, nullid)
- self.assertEqual(rev.p2node, nullid)
- self.assertEqual(rev.basenode, nullid)
+ self.assertEqual(rev.p1node, f.nullid)
+ self.assertEqual(rev.p2node, f.nullid)
+ self.assertEqual(rev.basenode, f.nullid)
self.assertIsNone(rev.baserevisionsize)
self.assertIsNone(rev.revision)
self.assertEqual(
@@ -789,9 +788,9 @@ class ifiledatatests(basetestcase):
rev = next(gen)
self.assertEqual(rev.node, node0)
- self.assertEqual(rev.p1node, nullid)
- self.assertEqual(rev.p2node, nullid)
- self.assertEqual(rev.basenode, nullid)
+ self.assertEqual(rev.p1node, f.nullid)
+ self.assertEqual(rev.p2node, f.nullid)
+ self.assertEqual(rev.basenode, f.nullid)
self.assertIsNone(rev.baserevisionsize)
self.assertIsNone(rev.revision)
self.assertEqual(
@@ -802,7 +801,7 @@ class ifiledatatests(basetestcase):
rev = next(gen)
self.assertEqual(rev.node, node2)
self.assertEqual(rev.p1node, node1)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertEqual(rev.basenode, node0)
with self.assertRaises(StopIteration):
@@ -841,11 +840,11 @@ class ifiledatatests(basetestcase):
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
- node1 = f.add(fulltext1, meta1, tr, 1, node0, nullid)
- node2 = f.add(fulltext2, meta2, tr, 2, nullid, nullid)
+ node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
+ node1 = f.add(fulltext1, meta1, tr, 1, node0, f.nullid)
+ node2 = f.add(fulltext2, meta2, tr, 2, f.nullid, f.nullid)
- # Metadata header isn't recognized when parent isn't nullid.
+ # Metadata header isn't recognized when parent isn't f.nullid.
self.assertEqual(f.size(1), len(stored1))
self.assertEqual(f.size(2), len(fulltext2))
@@ -886,8 +885,8 @@ class ifiledatatests(basetestcase):
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node0 = f.add(fulltext0, {}, tr, 0, nullid, nullid)
- node1 = f.add(fulltext1, meta1, tr, 1, nullid, nullid)
+ node0 = f.add(fulltext0, {}, tr, 0, f.nullid, f.nullid)
+ node1 = f.add(fulltext1, meta1, tr, 1, f.nullid, f.nullid)
# TODO this is buggy.
self.assertEqual(f.size(0), len(fulltext0) + 4)
@@ -916,15 +915,15 @@ class ifiledatatests(basetestcase):
fulltext1 = fulltext0 + b'bar\n'
with self._maketransactionfn() as tr:
- node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+ node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
node1 = b'\xaa' * 20
self._addrawrevisionfn(
- f, tr, node1, node0, nullid, 1, rawtext=fulltext1
+ f, tr, node1, node0, f.nullid, 1, rawtext=fulltext1
)
self.assertEqual(len(f), 2)
- self.assertEqual(f.parents(node1), (node0, nullid))
+ self.assertEqual(f.parents(node1), (node0, f.nullid))
# revision() raises since it performs hash verification.
with self.assertRaises(error.StorageError):
@@ -951,11 +950,11 @@ class ifiledatatests(basetestcase):
fulltext1 = fulltext0 + b'bar\n'
with self._maketransactionfn() as tr:
- node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+ node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
node1 = b'\xaa' * 20
self._addrawrevisionfn(
- f, tr, node1, node0, nullid, 1, rawtext=fulltext1
+ f, tr, node1, node0, f.nullid, 1, rawtext=fulltext1
)
with self.assertRaises(error.StorageError):
@@ -973,11 +972,11 @@ class ifiledatatests(basetestcase):
fulltext1 = fulltext0 + b'bar\n'
with self._maketransactionfn() as tr:
- node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+ node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
node1 = b'\xaa' * 20
self._addrawrevisionfn(
- f, tr, node1, node0, nullid, 1, rawtext=fulltext1
+ f, tr, node1, node0, f.nullid, 1, rawtext=fulltext1
)
with self.assertRaises(error.StorageError):
@@ -994,22 +993,22 @@ class ifiledatatests(basetestcase):
fulltext2 = fulltext1 + b'baz\n'
with self._maketransactionfn() as tr:
- node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+ node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
node1 = b'\xaa' * 20
self._addrawrevisionfn(
- f, tr, node1, node0, nullid, 1, rawtext=fulltext1
+ f, tr, node1, node0, f.nullid, 1, rawtext=fulltext1
)
with self.assertRaises(error.StorageError):
f.read(node1)
- node2 = storageutil.hashrevisionsha1(fulltext2, node1, nullid)
+ node2 = storageutil.hashrevisionsha1(fulltext2, node1, f.nullid)
with self._maketransactionfn() as tr:
delta = mdiff.textdiff(fulltext1, fulltext2)
self._addrawrevisionfn(
- f, tr, node2, node1, nullid, 2, delta=(1, delta)
+ f, tr, node2, node1, f.nullid, 2, delta=(1, delta)
)
self.assertEqual(len(f), 3)
@@ -1029,13 +1028,13 @@ class ifiledatatests(basetestcase):
)
with self._maketransactionfn() as tr:
- node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
+ node0 = f.add(b'foo', None, tr, 0, f.nullid, f.nullid)
# The node value doesn't matter since we can't verify it.
node1 = b'\xbb' * 20
self._addrawrevisionfn(
- f, tr, node1, node0, nullid, 1, stored1, censored=True
+ f, tr, node1, node0, f.nullid, 1, stored1, censored=True
)
self.assertTrue(f.iscensored(1))
@@ -1063,13 +1062,13 @@ class ifiledatatests(basetestcase):
)
with self._maketransactionfn() as tr:
- node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
+ node0 = f.add(b'foo', None, tr, 0, f.nullid, f.nullid)
# The node value doesn't matter since we can't verify it.
node1 = b'\xbb' * 20
self._addrawrevisionfn(
- f, tr, node1, node0, nullid, 1, stored1, censored=True
+ f, tr, node1, node0, f.nullid, 1, stored1, censored=True
)
with self.assertRaises(error.CensoredNodeError):
@@ -1088,10 +1087,10 @@ class ifilemutationtests(basetestcase):
def testaddnoop(self):
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
- node1 = f.add(b'foo', None, tr, 0, nullid, nullid)
+ node0 = f.add(b'foo', None, tr, 0, f.nullid, f.nullid)
+ node1 = f.add(b'foo', None, tr, 0, f.nullid, f.nullid)
# Varying by linkrev shouldn't impact hash.
- node2 = f.add(b'foo', None, tr, 1, nullid, nullid)
+ node2 = f.add(b'foo', None, tr, 1, f.nullid, f.nullid)
self.assertEqual(node1, node0)
self.assertEqual(node2, node0)
@@ -1102,7 +1101,9 @@ class ifilemutationtests(basetestcase):
with self._maketransactionfn() as tr:
# Adding a revision with bad node value fails.
with self.assertRaises(error.StorageError):
- f.addrevision(b'foo', tr, 0, nullid, nullid, node=b'\x01' * 20)
+ f.addrevision(
+ b'foo', tr, 0, f.nullid, f.nullid, node=b'\x01' * 20
+ )
def testaddrevisionunknownflag(self):
f = self._makefilefn()
@@ -1113,7 +1114,7 @@ class ifilemutationtests(basetestcase):
break
with self.assertRaises(error.StorageError):
- f.addrevision(b'foo', tr, 0, nullid, nullid, flags=flags)
+ f.addrevision(b'foo', tr, 0, f.nullid, f.nullid, flags=flags)
def testaddgroupsimple(self):
f = self._makefilefn()
@@ -1153,12 +1154,12 @@ class ifilemutationtests(basetestcase):
delta0 = mdiff.trivialdiffheader(len(fulltext0)) + fulltext0
with self._maketransactionfn() as tr:
- node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+ node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
f = self._makefilefn()
deltas = [
- (node0, nullid, nullid, nullid, nullid, delta0, 0, {}),
+ (node0, f.nullid, f.nullid, f.nullid, f.nullid, delta0, 0, {}),
]
with self._maketransactionfn() as tr:
@@ -1207,7 +1208,7 @@ class ifilemutationtests(basetestcase):
nodes = []
with self._maketransactionfn() as tr:
for fulltext in fulltexts:
- nodes.append(f.add(fulltext, None, tr, 0, nullid, nullid))
+ nodes.append(f.add(fulltext, None, tr, 0, f.nullid, f.nullid))
f = self._makefilefn()
deltas = []
@@ -1215,7 +1216,7 @@ class ifilemutationtests(basetestcase):
delta = mdiff.trivialdiffheader(len(fulltext)) + fulltext
deltas.append(
- (nodes[i], nullid, nullid, nullid, nullid, delta, 0, {})
+ (nodes[i], f.nullid, f.nullid, f.nullid, f.nullid, delta, 0, {})
)
with self._maketransactionfn() as tr:
@@ -1254,18 +1255,18 @@ class ifilemutationtests(basetestcase):
)
with self._maketransactionfn() as tr:
- node0 = f.add(b'foo\n' * 30, None, tr, 0, nullid, nullid)
+ node0 = f.add(b'foo\n' * 30, None, tr, 0, f.nullid, f.nullid)
# The node value doesn't matter since we can't verify it.
node1 = b'\xbb' * 20
self._addrawrevisionfn(
- f, tr, node1, node0, nullid, 1, stored1, censored=True
+ f, tr, node1, node0, f.nullid, 1, stored1, censored=True
)
delta = mdiff.textdiff(b'bar\n' * 30, (b'bar\n' * 30) + b'baz\n')
deltas = [
- (b'\xcc' * 20, node1, nullid, b'\x01' * 20, node1, delta, 0, {})
+ (b'\xcc' * 20, node1, f.nullid, b'\x01' * 20, node1, delta, 0, {})
]
with self._maketransactionfn() as tr:
@@ -1276,9 +1277,9 @@ class ifilemutationtests(basetestcase):
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node0 = f.add(b'foo\n' * 30, None, tr, 0, nullid, nullid)
- node1 = f.add(b'foo\n' * 31, None, tr, 1, node0, nullid)
- node2 = f.add(b'foo\n' * 32, None, tr, 2, node1, nullid)
+ node0 = f.add(b'foo\n' * 30, None, tr, 0, f.nullid, f.nullid)
+ node1 = f.add(b'foo\n' * 31, None, tr, 1, node0, f.nullid)
+ node2 = f.add(b'foo\n' * 32, None, tr, 2, node1, f.nullid)
with self._maketransactionfn() as tr:
f.censorrevision(tr, node1)
@@ -1298,7 +1299,7 @@ class ifilemutationtests(basetestcase):
with self._maketransactionfn() as tr:
for rev in range(10):
- f.add(b'%d' % rev, None, tr, rev, nullid, nullid)
+ f.add(b'%d' % rev, None, tr, rev, f.nullid, f.nullid)
for rev in range(10):
self.assertEqual(f.getstrippoint(rev), (rev, set()))
@@ -1308,10 +1309,10 @@ class ifilemutationtests(basetestcase):
f = self._makefilefn()
with self._maketransactionfn() as tr:
- p1 = nullid
+ p1 = f.nullid
for rev in range(10):
- f.add(b'%d' % rev, None, tr, rev, p1, nullid)
+ f.add(b'%d' % rev, None, tr, rev, p1, f.nullid)
for rev in range(10):
self.assertEqual(f.getstrippoint(rev), (rev, set()))
@@ -1320,11 +1321,11 @@ class ifilemutationtests(basetestcase):
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node0 = f.add(b'0', None, tr, 0, nullid, nullid)
- node1 = f.add(b'1', None, tr, 1, node0, nullid)
- f.add(b'2', None, tr, 2, node1, nullid)
- f.add(b'3', None, tr, 3, node0, nullid)
- f.add(b'4', None, tr, 4, node0, nullid)
+ node0 = f.add(b'0', None, tr, 0, f.nullid, f.nullid)
+ node1 = f.add(b'1', None, tr, 1, node0, f.nullid)
+ f.add(b'2', None, tr, 2, node1, f.nullid)
+ f.add(b'3', None, tr, 3, node0, f.nullid)
+ f.add(b'4', None, tr, 4, node0, f.nullid)
for rev in range(5):
self.assertEqual(f.getstrippoint(rev), (rev, set()))
@@ -1333,9 +1334,9 @@ class ifilemutationtests(basetestcase):
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node0 = f.add(b'0', None, tr, 0, nullid, nullid)
- f.add(b'1', None, tr, 10, node0, nullid)
- f.add(b'2', None, tr, 5, node0, nullid)
+ node0 = f.add(b'0', None, tr, 0, f.nullid, f.nullid)
+ f.add(b'1', None, tr, 10, node0, f.nullid)
+ f.add(b'2', None, tr, 5, node0, f.nullid)
self.assertEqual(f.getstrippoint(0), (0, set()))
self.assertEqual(f.getstrippoint(1), (1, set()))
@@ -1362,9 +1363,9 @@ class ifilemutationtests(basetestcase):
f = self._makefilefn()
with self._maketransactionfn() as tr:
- p1 = nullid
+ p1 = f.nullid
for rev in range(10):
- p1 = f.add(b'%d' % rev, None, tr, rev, p1, nullid)
+ p1 = f.add(b'%d' % rev, None, tr, rev, p1, f.nullid)
self.assertEqual(len(f), 10)
@@ -1377,9 +1378,9 @@ class ifilemutationtests(basetestcase):
f = self._makefilefn()
with self._maketransactionfn() as tr:
- f.add(b'0', None, tr, 0, nullid, nullid)
- node1 = f.add(b'1', None, tr, 5, nullid, nullid)
- node2 = f.add(b'2', None, tr, 10, nullid, nullid)
+ f.add(b'0', None, tr, 0, f.nullid, f.nullid)
+ node1 = f.add(b'1', None, tr, 5, f.nullid, f.nullid)
+ node2 = f.add(b'2', None, tr, 10, f.nullid, f.nullid)
self.assertEqual(len(f), 3)
diff --git a/mercurial/transaction.py b/mercurial/transaction.py
--- a/mercurial/transaction.py
+++ b/mercurial/transaction.py
@@ -56,7 +56,7 @@ def _playback(
unlink=True,
checkambigfiles=None,
):
- for f, o in entries:
+ for f, o in sorted(dict(entries).items()):
if o or not unlink:
checkambig = checkambigfiles and (f, b'') in checkambigfiles
try:
@@ -94,8 +94,9 @@ def _playback(
try:
util.copyfile(backuppath, filepath, checkambig=checkambig)
backupfiles.append(b)
- except IOError:
- report(_(b"failed to recover %s\n") % f)
+ except IOError as exc:
+ e_msg = stringutil.forcebytestr(exc)
+ report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
else:
target = f or b
try:
@@ -632,9 +633,9 @@ class transaction(util.transactional):
"""write transaction data for possible future undo call"""
if self._undoname is None:
return
- undobackupfile = self._opener.open(
- b"%s.backupfiles" % self._undoname, b'w'
- )
+
+ undo_backup_path = b"%s.backupfiles" % self._undoname
+ undobackupfile = self._opener.open(undo_backup_path, b'w')
undobackupfile.write(b'%d\n' % version)
for l, f, b, c in self._backupentries:
if not f: # temporary file
@@ -701,6 +702,11 @@ class transaction(util.transactional):
self._releasefn = None # Help prevent cycles.
+BAD_VERSION_MSG = _(
+ b"journal was created by a different version of Mercurial\n"
+)
+
+
def rollback(opener, vfsmap, file, report, checkambigfiles=None):
"""Rolls back the transaction contained in the given file
@@ -720,9 +726,8 @@ def rollback(opener, vfsmap, file, repor
entries = []
backupentries = []
- fp = opener.open(file)
- lines = fp.readlines()
- fp.close()
+ with opener.open(file) as fp:
+ lines = fp.readlines()
for l in lines:
try:
f, o = l.split(b'\0')
@@ -738,20 +743,15 @@ def rollback(opener, vfsmap, file, repor
lines = fp.readlines()
if lines:
ver = lines[0][:-1]
- if ver == (b'%d' % version):
+ if ver != (b'%d' % version):
+ report(BAD_VERSION_MSG)
+ else:
for line in lines[1:]:
if line:
# Shave off the trailing newline
line = line[:-1]
l, f, b, c = line.split(b'\0')
backupentries.append((l, f, b, bool(c)))
- else:
- report(
- _(
- b"journal was created by a different version of "
- b"Mercurial\n"
- )
- )
_playback(
file,
diff --git a/mercurial/treediscovery.py b/mercurial/treediscovery.py
--- a/mercurial/treediscovery.py
+++ b/mercurial/treediscovery.py
@@ -10,10 +10,7 @@ from __future__ import absolute_import
import collections
from .i18n import _
-from .node import (
- nullid,
- short,
-)
+from .node import short
from . import (
error,
pycompat,
@@ -44,11 +41,11 @@ def findcommonincoming(repo, remote, hea
if audit is not None:
audit[b'total-roundtrips'] = 1
- if repo.changelog.tip() == nullid:
- base.add(nullid)
- if heads != [nullid]:
- return [nullid], [nullid], list(heads)
- return [nullid], [], heads
+ if repo.changelog.tip() == repo.nullid:
+ base.add(repo.nullid)
+ if heads != [repo.nullid]:
+ return [repo.nullid], [repo.nullid], list(heads)
+ return [repo.nullid], [], heads
# assume we're closer to the tip than the root
# and start by examining the heads
@@ -84,7 +81,7 @@ def findcommonincoming(repo, remote, hea
continue
repo.ui.debug(b"examining %s:%s\n" % (short(n[0]), short(n[1])))
- if n[0] == nullid: # found the end of the branch
+ if n[0] == repo.nullid: # found the end of the branch
pass
elif n in seenbranch:
repo.ui.debug(b"branch already found\n")
@@ -170,7 +167,7 @@ def findcommonincoming(repo, remote, hea
raise error.RepoError(_(b"already have changeset ") + short(f[:4]))
base = list(base)
- if base == [nullid]:
+ if base == [repo.nullid]:
if force:
repo.ui.warn(_(b"warning: repository is unrelated\n"))
else:
diff --git a/mercurial/ui.py b/mercurial/ui.py
--- a/mercurial/ui.py
+++ b/mercurial/ui.py
@@ -233,6 +233,8 @@ class ui(object):
self._trustusers = set()
self._trustgroups = set()
self.callhooks = True
+ # hold the root to use for each [paths] entry
+ self._path_to_root = {}
# Insecure server connections requested.
self.insecureconnections = False
# Blocked time
@@ -264,6 +266,7 @@ class ui(object):
self._trustgroups = src._trustgroups.copy()
self.environ = src.environ
self.callhooks = src.callhooks
+ self._path_to_root = src._path_to_root
self.insecureconnections = src.insecureconnections
self._colormode = src._colormode
self._terminfoparams = src._terminfoparams.copy()
@@ -545,22 +548,26 @@ class ui(object):
root = root or encoding.getcwd()
for c in self._tcfg, self._ucfg, self._ocfg:
for n, p in c.items(b'paths'):
+ old_p = p
+ s = self.configsource(b'paths', n) or b'none'
+ root_key = (n, p, s)
+ if root_key not in self._path_to_root:
+ self._path_to_root[root_key] = root
# Ignore sub-options.
if b':' in n:
continue
if not p:
continue
if b'%%' in p:
- s = self.configsource(b'paths', n) or b'none'
+ if s is None:
+ s = 'none'
self.warn(
_(b"(deprecated '%%' in path %s=%s from %s)\n")
% (n, p, s)
)
p = p.replace(b'%%', b'%')
- p = util.expandpath(p)
- if not urlutil.hasscheme(p) and not os.path.isabs(p):
- p = os.path.normpath(os.path.join(root, p))
- c.alter(b"paths", n, p)
+ if p != old_p:
+ c.alter(b"paths", n, p)
if section in (None, b'ui'):
# update ui options
@@ -886,10 +893,10 @@ class ui(object):
"""
# default is not always a list
v = self.configwith(
- config.parselist, section, name, default, b'list', untrusted
+ stringutil.parselist, section, name, default, b'list', untrusted
)
if isinstance(v, bytes):
- return config.parselist(v)
+ return stringutil.parselist(v)
elif v is None:
return []
return v
@@ -941,7 +948,48 @@ class ui(object):
)
return items
- def walkconfig(self, untrusted=False):
+ def walkconfig(self, untrusted=False, all_known=False):
+ defined = self._walk_config(untrusted)
+ if not all_known:
+ for d in defined:
+ yield d
+ return
+ known = self._walk_known()
+ current_defined = next(defined, None)
+ current_known = next(known, None)
+ while current_defined is not None or current_known is not None:
+ if current_defined is None:
+ yield current_known
+ current_known = next(known, None)
+ elif current_known is None:
+ yield current_defined
+ current_defined = next(defined, None)
+ elif current_known[0:2] == current_defined[0:2]:
+ yield current_defined
+ current_defined = next(defined, None)
+ current_known = next(known, None)
+ elif current_known[0:2] < current_defined[0:2]:
+ yield current_known
+ current_known = next(known, None)
+ else:
+ yield current_defined
+ current_defined = next(defined, None)
+
+ def _walk_known(self):
+ for section, items in sorted(self._knownconfig.items()):
+ for k, i in sorted(items.items()):
+ # We don't have a way to display generic well, so skip them
+ if i.generic:
+ continue
+ if callable(i.default):
+ default = i.default()
+ elif i.default is configitems.dynamicdefault:
+ default = b''
+ else:
+ default = i.default
+ yield section, i.name, default
+
+ def _walk_config(self, untrusted):
cfg = self._data(untrusted)
for section in cfg.sections():
for name, value in self.configitems(section, untrusted):
@@ -1057,6 +1105,8 @@ class ui(object):
This method exist as `getpath` need a ui for potential warning message.
"""
+ msg = b'ui.getpath is deprecated, use `get_*` functions from urlutil'
+ self.deprecwarn(msg, b'6.0')
return self.paths.getpath(self, *args, **kwargs)
@property
@@ -1096,6 +1146,14 @@ class ui(object):
self._fmsg = f
self._fmsgout, self._fmsgerr = _selectmsgdests(self)
+ @contextlib.contextmanager
+ def silent(self, error=False, subproc=False, labeled=False):
+ self.pushbuffer(error=error, subproc=subproc, labeled=labeled)
+ try:
+ yield
+ finally:
+ self.popbuffer()
+
def pushbuffer(self, error=False, subproc=False, labeled=False):
"""install a buffer to capture standard output of the ui object
diff --git a/mercurial/unionrepo.py b/mercurial/unionrepo.py
--- a/mercurial/unionrepo.py
+++ b/mercurial/unionrepo.py
@@ -31,9 +31,13 @@ from . import (
vfs as vfsmod,
)
+from .revlogutils import (
+ constants as revlog_constants,
+)
+
class unionrevlog(revlog.revlog):
- def __init__(self, opener, indexfile, revlog2, linkmapper):
+ def __init__(self, opener, radix, revlog2, linkmapper):
# How it works:
# To retrieve a revision, we just need to know the node id so we can
# look it up in revlog2.
@@ -41,7 +45,11 @@ class unionrevlog(revlog.revlog):
# To differentiate a rev in the second revlog from a rev in the revlog,
# we check revision against repotiprev.
opener = vfsmod.readonlyvfs(opener)
- revlog.revlog.__init__(self, opener, indexfile)
+ target = getattr(revlog2, 'target', None)
+ if target is None:
+ # a revlog wrapper, eg: the manifestlog that is not an actual revlog
+ target = revlog2._revlog.target
+ revlog.revlog.__init__(self, opener, target=target, radix=radix)
self.revlog2 = revlog2
n = len(self)
@@ -50,7 +58,20 @@ class unionrevlog(revlog.revlog):
for rev2 in self.revlog2:
rev = self.revlog2.index[rev2]
# rev numbers - in revlog2, very different from self.rev
- _start, _csize, rsize, base, linkrev, p1rev, p2rev, node = rev
+ (
+ _start,
+ _csize,
+ rsize,
+ base,
+ linkrev,
+ p1rev,
+ p2rev,
+ node,
+ _sdo,
+ _sds,
+ _dcm,
+ _sdcm,
+ ) = rev
flags = _start & 0xFFFF
if linkmapper is None: # link is to same revlog
@@ -82,6 +103,10 @@ class unionrevlog(revlog.revlog):
self.rev(p1node),
self.rev(p2node),
node,
+ 0, # sidedata offset
+ 0, # sidedata size
+ revlog_constants.COMP_MODE_INLINE,
+ revlog_constants.COMP_MODE_INLINE,
)
self.index.append(e)
self.bundlerevs.add(n)
@@ -147,9 +172,7 @@ class unionchangelog(unionrevlog, change
changelog.changelog.__init__(self, opener)
linkmapper = None
changelog2 = changelog.changelog(opener2)
- unionrevlog.__init__(
- self, opener, self.indexfile, changelog2, linkmapper
- )
+ unionrevlog.__init__(self, opener, self.radix, changelog2, linkmapper)
class unionmanifest(unionrevlog, manifest.manifestrevlog):
@@ -157,7 +180,7 @@ class unionmanifest(unionrevlog, manifes
manifest.manifestrevlog.__init__(self, nodeconstants, opener)
manifest2 = manifest.manifestrevlog(nodeconstants, opener2)
unionrevlog.__init__(
- self, opener, self.indexfile, manifest2, linkmapper
+ self, opener, self._revlog.radix, manifest2, linkmapper
)
@@ -166,7 +189,7 @@ class unionfilelog(filelog.filelog):
filelog.filelog.__init__(self, opener, path)
filelog2 = filelog.filelog(opener2, path)
self._revlog = unionrevlog(
- opener, self.indexfile, filelog2._revlog, linkmapper
+ opener, self._revlog.radix, filelog2._revlog, linkmapper
)
self._repo = repo
self.repotiprev = self._revlog.repotiprev
diff --git a/mercurial/upgrade_utils/actions.py b/mercurial/upgrade_utils/actions.py
--- a/mercurial/upgrade_utils/actions.py
+++ b/mercurial/upgrade_utils/actions.py
@@ -30,6 +30,8 @@ if pycompat.TYPE_CHECKING:
RECLONES_REQUIREMENTS = {
requirements.GENERALDELTA_REQUIREMENT,
requirements.SPARSEREVLOG_REQUIREMENT,
+ requirements.REVLOGV2_REQUIREMENT,
+ requirements.CHANGELOGV2_REQUIREMENT,
}
@@ -42,92 +44,16 @@ OPTIMISATION = b'optimization'
class improvement(object):
- """Represents an improvement that can be made as part of an upgrade.
-
- The following attributes are defined on each instance:
-
- name
- Machine-readable string uniquely identifying this improvement. It
- will be mapped to an action later in the upgrade process.
-
- type
- Either ``FORMAT_VARIANT`` or ``OPTIMISATION``.
- A format variant is where we change the storage format. Not all format
- variant changes are an obvious problem.
- An optimization is an action (sometimes optional) that
- can be taken to further improve the state of the repository.
-
- description
- Message intended for humans explaining the improvement in more detail,
- including the implications of it. For ``FORMAT_VARIANT`` types, should be
- worded in the present tense. For ``OPTIMISATION`` types, should be
- worded in the future tense.
+ """Represents an improvement that can be made as part of an upgrade."""
- upgrademessage
- Message intended for humans explaining what an upgrade addressing this
- issue will do. Should be worded in the future tense.
-
- postupgrademessage
- Message intended for humans which will be shown post an upgrade
- operation when the improvement will be added
-
- postdowngrademessage
- Message intended for humans which will be shown post an upgrade
- operation in which this improvement was removed
-
- touches_filelogs (bool)
- Whether this improvement touches filelogs
-
- touches_manifests (bool)
- Whether this improvement touches manifests
-
- touches_changelog (bool)
- Whether this improvement touches changelog
+ ### The following attributes should be defined for each subclass:
- touches_requirements (bool)
- Whether this improvement changes repository requirements
- """
-
- def __init__(self, name, type, description, upgrademessage):
- self.name = name
- self.type = type
- self.description = description
- self.upgrademessage = upgrademessage
- self.postupgrademessage = None
- self.postdowngrademessage = None
- # By default for now, we assume every improvement touches
- # all the things
- self.touches_filelogs = True
- self.touches_manifests = True
- self.touches_changelog = True
- self.touches_requirements = True
-
- def __eq__(self, other):
- if not isinstance(other, improvement):
- # This is what python tell use to do
- return NotImplemented
- return self.name == other.name
-
- def __ne__(self, other):
- return not (self == other)
-
- def __hash__(self):
- return hash(self.name)
-
-
-allformatvariant = [] # type: List[Type['formatvariant']]
-
-
-def registerformatvariant(cls):
- allformatvariant.append(cls)
- return cls
-
-
-class formatvariant(improvement):
- """an improvement subclass dedicated to repository format"""
-
- type = FORMAT_VARIANT
- ### The following attributes should be defined for each class:
+ # Either ``FORMAT_VARIANT`` or ``OPTIMISATION``.
+ # A format variant is where we change the storage format. Not all format
+ # variant changes are an obvious problem.
+ # An optimization is an action (sometimes optional) that
+ # can be taken to further improve the state of the repository.
+ type = None
# machine-readable string uniquely identifying this improvement. it will be
# mapped to an action later in the upgrade process.
@@ -154,14 +80,36 @@ class formatvariant(improvement):
# operation in which this improvement was removed
postdowngrademessage = None
- # By default for now, we assume every improvement touches all the things
+ # By default we assume that every improvement touches requirements and all revlogs
+
+ # Whether this improvement touches filelogs
touches_filelogs = True
+
+ # Whether this improvement touches manifests
touches_manifests = True
+
+ # Whether this improvement touches changelog
touches_changelog = True
+
+ # Whether this improvement changes repository requirements
touches_requirements = True
- def __init__(self):
- raise NotImplementedError()
+ # Whether this improvement touches the dirstate
+ touches_dirstate = False
+
+
+allformatvariant = [] # type: List[Type['formatvariant']]
+
+
+def registerformatvariant(cls):
+ allformatvariant.append(cls)
+ return cls
+
+
+class formatvariant(improvement):
+ """an improvement subclass dedicated to repository format"""
+
+ type = FORMAT_VARIANT
@staticmethod
def fromrepo(repo):
@@ -222,6 +170,27 @@ class fncache(requirementformatvariant):
@registerformatvariant
+class dirstatev2(requirementformatvariant):
+ name = b'dirstate-v2'
+ _requirement = requirements.DIRSTATE_V2_REQUIREMENT
+
+ default = False
+
+ description = _(
+ b'version 1 of the dirstate file format requires '
+ b'reading and parsing it all at once.'
+ )
+
+ upgrademessage = _(b'"hg status" will be faster')
+
+ touches_filelogs = False
+ touches_manifests = False
+ touches_changelog = False
+ touches_requirements = True
+ touches_dirstate = True
+
+
+@registerformatvariant
class dotencode(requirementformatvariant):
name = b'dotencode'
@@ -372,6 +341,15 @@ class revlogv2(requirementformatvariant)
@registerformatvariant
+class changelogv2(requirementformatvariant):
+ name = b'changelog-v2'
+ _requirement = requirements.CHANGELOGV2_REQUIREMENT
+ default = False
+ description = _(b'An iteration of the revlog focussed on changelog needs.')
+ upgrademessage = _(b'quite experimental')
+
+
+@registerformatvariant
class removecldeltachain(formatvariant):
name = b'plain-cl-delta'
@@ -534,87 +512,100 @@ def register_optimization(obj):
return obj
-register_optimization(
- improvement(
- name=b're-delta-parent',
- type=OPTIMISATION,
- description=_(
- b'deltas within internal storage will be recalculated to '
- b'choose an optimal base revision where this was not '
- b'already done; the size of the repository may shrink and '
- b'various operations may become faster; the first time '
- b'this optimization is performed could slow down upgrade '
- b'execution considerably; subsequent invocations should '
- b'not run noticeably slower'
- ),
- upgrademessage=_(
- b'deltas within internal storage will choose a new '
- b'base revision if needed'
- ),
+class optimization(improvement):
+ """an improvement subclass dedicated to optimizations"""
+
+ type = OPTIMISATION
+
+
+@register_optimization
+class redeltaparents(optimization):
+ name = b're-delta-parent'
+
+ type = OPTIMISATION
+
+ description = _(
+ b'deltas within internal storage will be recalculated to '
+ b'choose an optimal base revision where this was not '
+ b'already done; the size of the repository may shrink and '
+ b'various operations may become faster; the first time '
+ b'this optimization is performed could slow down upgrade '
+ b'execution considerably; subsequent invocations should '
+ b'not run noticeably slower'
)
-)
+
+ upgrademessage = _(
+ b'deltas within internal storage will choose a new '
+ b'base revision if needed'
+ )
+
+
+@register_optimization
+class redeltamultibase(optimization):
+ name = b're-delta-multibase'
+
+ type = OPTIMISATION
+
+ description = _(
+ b'deltas within internal storage will be recalculated '
+ b'against multiple base revision and the smallest '
+ b'difference will be used; the size of the repository may '
+ b'shrink significantly when there are many merges; this '
+ b'optimization will slow down execution in proportion to '
+ b'the number of merges in the repository and the amount '
+ b'of files in the repository; this slow down should not '
+ b'be significant unless there are tens of thousands of '
+ b'files and thousands of merges'
+ )
-register_optimization(
- improvement(
- name=b're-delta-multibase',
- type=OPTIMISATION,
- description=_(
- b'deltas within internal storage will be recalculated '
- b'against multiple base revision and the smallest '
- b'difference will be used; the size of the repository may '
- b'shrink significantly when there are many merges; this '
- b'optimization will slow down execution in proportion to '
- b'the number of merges in the repository and the amount '
- b'of files in the repository; this slow down should not '
- b'be significant unless there are tens of thousands of '
- b'files and thousands of merges'
- ),
- upgrademessage=_(
- b'deltas within internal storage will choose an '
- b'optimal delta by computing deltas against multiple '
- b'parents; may slow down execution time '
- b'significantly'
- ),
+ upgrademessage = _(
+ b'deltas within internal storage will choose an '
+ b'optimal delta by computing deltas against multiple '
+ b'parents; may slow down execution time '
+ b'significantly'
)
-)
+
+
+@register_optimization
+class redeltaall(optimization):
+ name = b're-delta-all'
+
+ type = OPTIMISATION
+
+ description = _(
+ b'deltas within internal storage will always be '
+ b'recalculated without reusing prior deltas; this will '
+ b'likely make execution run several times slower; this '
+ b'optimization is typically not needed'
+ )
-register_optimization(
- improvement(
- name=b're-delta-all',
- type=OPTIMISATION,
- description=_(
- b'deltas within internal storage will always be '
- b'recalculated without reusing prior deltas; this will '
- b'likely make execution run several times slower; this '
- b'optimization is typically not needed'
- ),
- upgrademessage=_(
- b'deltas within internal storage will be fully '
- b'recomputed; this will likely drastically slow down '
- b'execution time'
- ),
+ upgrademessage = _(
+ b'deltas within internal storage will be fully '
+ b'recomputed; this will likely drastically slow down '
+ b'execution time'
)
-)
+
+
+@register_optimization
+class redeltafulladd(optimization):
+ name = b're-delta-fulladd'
+
+ type = OPTIMISATION
-register_optimization(
- improvement(
- name=b're-delta-fulladd',
- type=OPTIMISATION,
- description=_(
- b'every revision will be re-added as if it was new '
- b'content. It will go through the full storage '
- b'mechanism giving extensions a chance to process it '
- b'(eg. lfs). This is similar to "re-delta-all" but even '
- b'slower since more logic is involved.'
- ),
- upgrademessage=_(
- b'each revision will be added as new content to the '
- b'internal storage; this will likely drastically slow '
- b'down execution time, but some extensions might need '
- b'it'
- ),
+ description = _(
+ b'every revision will be re-added as if it was new '
+ b'content. It will go through the full storage '
+ b'mechanism giving extensions a chance to process it '
+ b'(eg. lfs). This is similar to "re-delta-all" but even '
+ b'slower since more logic is involved.'
)
-)
+
+ upgrademessage = _(
+ b'each revision will be added as new content to the '
+ b'internal storage; this will likely drastically slow '
+ b'down execution time, but some extensions might need '
+ b'it'
+ )
def findoptimizations(repo):
@@ -642,7 +633,10 @@ def determine_upgrade_actions(
newactions = []
for d in format_upgrades:
- name = d._requirement
+ if util.safehasattr(d, '_requirement'):
+ name = d._requirement
+ else:
+ name = None
# If the action is a requirement that doesn't show up in the
# destination requirements, prune the action.
@@ -677,7 +671,6 @@ class UpgradeOperation(object):
self.current_requirements = current_requirements
# list of upgrade actions the operation will perform
self.upgrade_actions = upgrade_actions
- self._upgrade_actions_names = set([a.name for a in upgrade_actions])
self.removed_actions = removed_actions
self.revlogs_to_process = revlogs_to_process
# requirements which will be added by the operation
@@ -700,41 +693,42 @@ class UpgradeOperation(object):
]
# delta reuse mode of this upgrade operation
+ upgrade_actions_names = self.upgrade_actions_names
self.delta_reuse_mode = revlog.revlog.DELTAREUSEALWAYS
- if b're-delta-all' in self._upgrade_actions_names:
+ if b're-delta-all' in upgrade_actions_names:
self.delta_reuse_mode = revlog.revlog.DELTAREUSENEVER
- elif b're-delta-parent' in self._upgrade_actions_names:
+ elif b're-delta-parent' in upgrade_actions_names:
self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
- elif b're-delta-multibase' in self._upgrade_actions_names:
+ elif b're-delta-multibase' in upgrade_actions_names:
self.delta_reuse_mode = revlog.revlog.DELTAREUSESAMEREVS
- elif b're-delta-fulladd' in self._upgrade_actions_names:
+ elif b're-delta-fulladd' in upgrade_actions_names:
self.delta_reuse_mode = revlog.revlog.DELTAREUSEFULLADD
# should this operation force re-delta of both parents
self.force_re_delta_both_parents = (
- b're-delta-multibase' in self._upgrade_actions_names
+ b're-delta-multibase' in upgrade_actions_names
)
# should this operation create a backup of the store
self.backup_store = backup_store
- # whether the operation touches different revlogs at all or not
- self.touches_filelogs = self._touches_filelogs()
- self.touches_manifests = self._touches_manifests()
- self.touches_changelog = self._touches_changelog()
- # whether the operation touches requirements file or not
- self.touches_requirements = self._touches_requirements()
- self.touches_store = (
- self.touches_filelogs
- or self.touches_manifests
- or self.touches_changelog
- )
+ @property
+ def upgrade_actions_names(self):
+ return set([a.name for a in self.upgrade_actions])
+
+ @property
+ def requirements_only(self):
# does the operation only touches repository requirement
- self.requirements_only = (
- self.touches_requirements and not self.touches_store
+ return (
+ self.touches_requirements
+ and not self.touches_filelogs
+ and not self.touches_manifests
+ and not self.touches_changelog
+ and not self.touches_dirstate
)
- def _touches_filelogs(self):
+ @property
+ def touches_filelogs(self):
for a in self.upgrade_actions:
# in optimisations, we re-process the revlogs again
if a.type == OPTIMISATION:
@@ -746,7 +740,8 @@ class UpgradeOperation(object):
return True
return False
- def _touches_manifests(self):
+ @property
+ def touches_manifests(self):
for a in self.upgrade_actions:
# in optimisations, we re-process the revlogs again
if a.type == OPTIMISATION:
@@ -758,7 +753,8 @@ class UpgradeOperation(object):
return True
return False
- def _touches_changelog(self):
+ @property
+ def touches_changelog(self):
for a in self.upgrade_actions:
# in optimisations, we re-process the revlogs again
if a.type == OPTIMISATION:
@@ -770,7 +766,8 @@ class UpgradeOperation(object):
return True
return False
- def _touches_requirements(self):
+ @property
+ def touches_requirements(self):
for a in self.upgrade_actions:
# optimisations are used to re-process revlogs and does not result
# in a requirement being added or removed
@@ -782,6 +779,18 @@ class UpgradeOperation(object):
if a.touches_requirements:
return True
+ @property
+ def touches_dirstate(self):
+ for a in self.upgrade_actions:
+ # revlog optimisations do not affect the dirstate
+ if a.type == OPTIMISATION:
+ pass
+ elif a.touches_dirstate:
+ return True
+ for a in self.removed_actions:
+ if a.touches_dirstate:
+ return True
+
return False
def _write_labeled(self, l, label):
@@ -935,12 +944,13 @@ def supportremovedrequirements(repo):
"""
supported = {
requirements.SPARSEREVLOG_REQUIREMENT,
- requirements.SIDEDATA_REQUIREMENT,
requirements.COPIESSDC_REQUIREMENT,
requirements.NODEMAP_REQUIREMENT,
requirements.SHARESAFE_REQUIREMENT,
requirements.REVLOGV2_REQUIREMENT,
+ requirements.CHANGELOGV2_REQUIREMENT,
requirements.REVLOGV1_REQUIREMENT,
+ requirements.DIRSTATE_V2_REQUIREMENT,
}
for name in compression.compengines:
engine = compression.compengines[name]
@@ -966,11 +976,12 @@ def supporteddestrequirements(repo):
requirements.REVLOGV1_REQUIREMENT, # allowed in case of downgrade
requirements.STORE_REQUIREMENT,
requirements.SPARSEREVLOG_REQUIREMENT,
- requirements.SIDEDATA_REQUIREMENT,
requirements.COPIESSDC_REQUIREMENT,
requirements.NODEMAP_REQUIREMENT,
requirements.SHARESAFE_REQUIREMENT,
requirements.REVLOGV2_REQUIREMENT,
+ requirements.CHANGELOGV2_REQUIREMENT,
+ requirements.DIRSTATE_V2_REQUIREMENT,
}
for name in compression.compengines:
engine = compression.compengines[name]
@@ -996,12 +1007,13 @@ def allowednewrequirements(repo):
requirements.FNCACHE_REQUIREMENT,
requirements.GENERALDELTA_REQUIREMENT,
requirements.SPARSEREVLOG_REQUIREMENT,
- requirements.SIDEDATA_REQUIREMENT,
requirements.COPIESSDC_REQUIREMENT,
requirements.NODEMAP_REQUIREMENT,
requirements.SHARESAFE_REQUIREMENT,
requirements.REVLOGV1_REQUIREMENT,
requirements.REVLOGV2_REQUIREMENT,
+ requirements.CHANGELOGV2_REQUIREMENT,
+ requirements.DIRSTATE_V2_REQUIREMENT,
}
for name in compression.compengines:
engine = compression.compengines[name]
diff --git a/mercurial/upgrade_utils/engine.py b/mercurial/upgrade_utils/engine.py
--- a/mercurial/upgrade_utils/engine.py
+++ b/mercurial/upgrade_utils/engine.py
@@ -19,13 +19,33 @@ from .. import (
metadata,
pycompat,
requirements,
- revlog,
scmutil,
store,
util,
vfs as vfsmod,
)
-from ..revlogutils import nodemap
+from ..revlogutils import (
+ constants as revlogconst,
+ flagutil,
+ nodemap,
+ sidedata as sidedatamod,
+)
+from . import actions as upgrade_actions
+
+
+def get_sidedata_helpers(srcrepo, dstrepo):
+ use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade')
+ sequential = pycompat.iswindows or not use_w
+ if not sequential:
+ srcrepo.register_sidedata_computer(
+ revlogconst.KIND_CHANGELOG,
+ sidedatamod.SD_FILES,
+ (sidedatamod.SD_FILES,),
+ metadata._get_worker_sidedata_adder(srcrepo, dstrepo),
+ flagutil.REVIDX_HASCOPIESINFO,
+ replace=True,
+ )
+ return sidedatamod.get_sidedata_helpers(srcrepo, dstrepo._wanted_sidedata)
def _revlogfrompath(repo, rl_type, path):
@@ -44,7 +64,12 @@ def _revlogfrompath(repo, rl_type, path)
)
else:
# drop the extension and the `data/` prefix
- path = path.rsplit(b'.', 1)[0].split(b'/', 1)[1]
+ path_part = path.rsplit(b'.', 1)[0].split(b'/', 1)
+ if len(path_part) < 2:
+ msg = _('cannot recognize revlog from filename: %s')
+ msg %= path
+ raise error.Abort(msg)
+ path = path_part[1]
return filelog.filelog(repo.svfs, path)
@@ -61,16 +86,16 @@ def _copyrevlog(tr, destrepo, oldrl, rl_
oldvfs = oldrl.opener
newvfs = newrl.opener
- oldindex = oldvfs.join(oldrl.indexfile)
- newindex = newvfs.join(newrl.indexfile)
- olddata = oldvfs.join(oldrl.datafile)
- newdata = newvfs.join(newrl.datafile)
+ oldindex = oldvfs.join(oldrl._indexfile)
+ newindex = newvfs.join(newrl._indexfile)
+ olddata = oldvfs.join(oldrl._datafile)
+ newdata = newvfs.join(newrl._datafile)
- with newvfs(newrl.indexfile, b'w'):
+ with newvfs(newrl._indexfile, b'w'):
pass # create all the directories
util.copyfile(oldindex, newindex)
- copydata = oldrl.opener.exists(oldrl.datafile)
+ copydata = oldrl.opener.exists(oldrl._datafile)
if copydata:
util.copyfile(olddata, newdata)
@@ -89,25 +114,6 @@ UPGRADE_ALL_REVLOGS = frozenset(
)
-def getsidedatacompanion(srcrepo, dstrepo):
- sidedatacompanion = None
- removedreqs = srcrepo.requirements - dstrepo.requirements
- addedreqs = dstrepo.requirements - srcrepo.requirements
- if requirements.SIDEDATA_REQUIREMENT in removedreqs:
-
- def sidedatacompanion(rl, rev):
- rl = getattr(rl, '_revlog', rl)
- if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
- return True, (), {}, 0, 0
- return False, (), {}, 0, 0
-
- elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
- sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
- elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
- sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
- return sidedatacompanion
-
-
def matchrevlog(revlogfilter, rl_type):
"""check if a revlog is selected for cloning.
@@ -131,7 +137,7 @@ def _perform_clone(
rl_type,
unencoded,
upgrade_op,
- sidedatacompanion,
+ sidedata_helpers,
oncopiedrevision,
):
"""returns the new revlog object created"""
@@ -147,7 +153,7 @@ def _perform_clone(
addrevisioncb=oncopiedrevision,
deltareuse=upgrade_op.delta_reuse_mode,
forcedeltabothparents=upgrade_op.force_re_delta_both_parents,
- sidedatacompanion=sidedatacompanion,
+ sidedata_helpers=sidedata_helpers,
)
else:
msg = _(b'blindly copying %s containing %i revisions\n')
@@ -199,6 +205,17 @@ def _clonerevlogs(
if not rl_type & store.FILEFLAGS_REVLOG_MAIN:
continue
+ # the store.walk function will wrongly pickup transaction backup and
+ # get confused. As a quick fix for 5.9 release, we ignore those.
+ # (this is not a module constants because it seems better to keep the
+ # hack together)
+ skip_undo = (
+ b'undo.backup.00changelog.i',
+ b'undo.backup.00manifest.i',
+ )
+ if unencoded in skip_undo:
+ continue
+
rl = _revlogfrompath(srcrepo, rl_type, unencoded)
info = rl.storageinfo(
@@ -257,7 +274,7 @@ def _clonerevlogs(
def oncopiedrevision(rl, rev, node):
progress.increment()
- sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
+ sidedata_helpers = get_sidedata_helpers(srcrepo, dstrepo)
# Migrating filelogs
ui.status(
@@ -282,7 +299,7 @@ def _clonerevlogs(
rl_type,
unencoded,
upgrade_op,
- sidedatacompanion,
+ sidedata_helpers,
oncopiedrevision,
)
info = newrl.storageinfo(storedsize=True)
@@ -322,7 +339,7 @@ def _clonerevlogs(
rl_type,
unencoded,
upgrade_op,
- sidedatacompanion,
+ sidedata_helpers,
oncopiedrevision,
)
info = newrl.storageinfo(storedsize=True)
@@ -361,7 +378,7 @@ def _clonerevlogs(
rl_type,
unencoded,
upgrade_op,
- sidedatacompanion,
+ sidedata_helpers,
oncopiedrevision,
)
info = newrl.storageinfo(storedsize=True)
@@ -458,6 +475,19 @@ def upgrade(ui, srcrepo, dstrepo, upgrad
)
)
+ if upgrade_actions.dirstatev2 in upgrade_op.upgrade_actions:
+ ui.status(_(b'upgrading to dirstate-v2 from v1\n'))
+ upgrade_dirstate(ui, srcrepo, upgrade_op, b'v1', b'v2')
+ upgrade_op.upgrade_actions.remove(upgrade_actions.dirstatev2)
+
+ if upgrade_actions.dirstatev2 in upgrade_op.removed_actions:
+ ui.status(_(b'downgrading from dirstate-v2 to v1\n'))
+ upgrade_dirstate(ui, srcrepo, upgrade_op, b'v2', b'v1')
+ upgrade_op.removed_actions.remove(upgrade_actions.dirstatev2)
+
+ if not (upgrade_op.upgrade_actions or upgrade_op.removed_actions):
+ return
+
if upgrade_op.requirements_only:
ui.status(_(b'upgrading repository requirements\n'))
scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
@@ -466,7 +496,7 @@ def upgrade(ui, srcrepo, dstrepo, upgrad
# through the whole cloning process
elif (
len(upgrade_op.upgrade_actions) == 1
- and b'persistent-nodemap' in upgrade_op._upgrade_actions_names
+ and b'persistent-nodemap' in upgrade_op.upgrade_actions_names
and not upgrade_op.removed_actions
):
ui.status(
@@ -591,3 +621,29 @@ def upgrade(ui, srcrepo, dstrepo, upgrad
backupvfs.unlink(b'store/lock')
return backuppath
+
+
+def upgrade_dirstate(ui, srcrepo, upgrade_op, old, new):
+ if upgrade_op.backup_store:
+ backuppath = pycompat.mkdtemp(
+ prefix=b'upgradebackup.', dir=srcrepo.path
+ )
+ ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
+ backupvfs = vfsmod.vfs(backuppath)
+ util.copyfile(
+ srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires')
+ )
+ util.copyfile(
+ srcrepo.vfs.join(b'dirstate'), backupvfs.join(b'dirstate')
+ )
+
+ assert srcrepo.dirstate._use_dirstate_v2 == (old == b'v2')
+ srcrepo.dirstate._map._use_dirstate_tree = True
+ srcrepo.dirstate._map.preload()
+ srcrepo.dirstate._use_dirstate_v2 = new == b'v2'
+ srcrepo.dirstate._map._use_dirstate_v2 = srcrepo.dirstate._use_dirstate_v2
+ srcrepo.dirstate._dirty = True
+ srcrepo.vfs.unlink(b'dirstate')
+ srcrepo.dirstate.write(None)
+
+ scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
diff --git a/mercurial/url.py b/mercurial/url.py
--- a/mercurial/url.py
+++ b/mercurial/url.py
@@ -10,7 +10,6 @@
from __future__ import absolute_import
import base64
-import os
import socket
import sys
@@ -685,7 +684,7 @@ def open(ui, url_, data=None, sendaccept
u.scheme = u.scheme.lower()
url_, authinfo = u.authinfo()
else:
- path = util.normpath(os.path.abspath(url_))
+ path = util.normpath(util.abspath(url_))
url_ = b'file://' + pycompat.bytesurl(
urlreq.pathname2url(pycompat.fsdecode(path))
)
diff --git a/mercurial/util.py b/mercurial/util.py
--- a/mercurial/util.py
+++ b/mercurial/util.py
@@ -34,6 +34,7 @@ import time
import traceback
import warnings
+from .node import hex
from .thirdparty import attr
from .pycompat import (
delattr,
@@ -98,6 +99,7 @@ else:
_ = i18n._
+abspath = platform.abspath
bindunixsocket = platform.bindunixsocket
cachestat = platform.cachestat
checkexec = platform.checkexec
@@ -1908,7 +1910,16 @@ def checksignature(func, depth=1):
}
-def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
+def copyfile(
+ src,
+ dest,
+ hardlink=False,
+ copystat=False,
+ checkambig=False,
+ nb_bytes=None,
+ no_hardlink_cb=None,
+ check_fs_hardlink=True,
+):
"""copy a file, preserving mode and optionally other stat info like
atime/mtime
@@ -1917,6 +1928,8 @@ def copyfile(src, dest, hardlink=False,
repo.wlock).
copystat and checkambig should be exclusive.
+
+ nb_bytes: if set only copy the first `nb_bytes` of the source file.
"""
assert not (copystat and checkambig)
oldstat = None
@@ -1924,7 +1937,7 @@ def copyfile(src, dest, hardlink=False,
if checkambig:
oldstat = checkambig and filestat.frompath(dest)
unlink(dest)
- if hardlink:
+ if hardlink and check_fs_hardlink:
# Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
# unless we are confident that dest is on a whitelisted filesystem.
try:
@@ -1932,17 +1945,27 @@ def copyfile(src, dest, hardlink=False,
except OSError:
fstype = None
if fstype not in _hardlinkfswhitelist:
+ if no_hardlink_cb is not None:
+ no_hardlink_cb()
hardlink = False
if hardlink:
try:
oslink(src, dest)
+ if nb_bytes is not None:
+ m = "the `nb_bytes` argument is incompatible with `hardlink`"
+ raise error.ProgrammingError(m)
return
- except (IOError, OSError):
- pass # fall back to normal copy
+ except (IOError, OSError) as exc:
+ if exc.errno != errno.EEXIST and no_hardlink_cb is not None:
+ no_hardlink_cb()
+ # fall back to normal copy
if os.path.islink(src):
os.symlink(os.readlink(src), dest)
# copytime is ignored for symlinks, but in general copytime isn't needed
# for them anyway
+ if nb_bytes is not None:
+ m = "cannot use `nb_bytes` on a symlink"
+ raise error.ProgrammingError(m)
else:
try:
shutil.copyfile(src, dest)
@@ -1959,6 +1982,10 @@ def copyfile(src, dest, hardlink=False,
oldstat.stat[stat.ST_MTIME] + 1
) & 0x7FFFFFFF
os.utime(dest, (advanced, advanced))
+ # We could do something smarter using `copy_file_range` call or similar
+ if nb_bytes is not None:
+ with open(dest, mode='r+') as f:
+ f.truncate(nb_bytes)
except shutil.Error as inst:
raise error.Abort(stringutil.forcebytestr(inst))
@@ -1994,8 +2021,10 @@ def copyfiles(src, dst, hardlink=None, p
if hardlink:
try:
oslink(src, dst)
- except (IOError, OSError):
- hardlink = False
+ except (IOError, OSError) as exc:
+ if exc.errno != errno.EEXIST:
+ hardlink = False
+ # XXX maybe try to relink if the file exist ?
shutil.copy(src, dst)
else:
shutil.copy(src, dst)
@@ -2604,7 +2633,7 @@ def makedirs(name, mode=None, notindexed
return
if err.errno != errno.ENOENT or not name:
raise
- parent = os.path.dirname(os.path.abspath(name))
+ parent = os.path.dirname(abspath(name))
if parent == name:
raise
makedirs(parent, mode, notindexed)
diff --git a/mercurial/utils/storageutil.py b/mercurial/utils/storageutil.py
--- a/mercurial/utils/storageutil.py
+++ b/mercurial/utils/storageutil.py
@@ -13,8 +13,8 @@ import struct
from ..i18n import _
from ..node import (
bin,
- nullid,
nullrev,
+ sha1nodeconstants,
)
from .. import (
dagop,
@@ -26,7 +26,11 @@ from ..interfaces import repository
from ..revlogutils import sidedata as sidedatamod
from ..utils import hashutil
-_nullhash = hashutil.sha1(nullid)
+_nullhash = hashutil.sha1(sha1nodeconstants.nullid)
+
+# revision data contains extra metadata not part of the official digest
+# Only used in changegroup >= v4.
+CG_FLAG_SIDEDATA = 1
def hashrevisionsha1(text, p1, p2):
@@ -37,7 +41,7 @@ def hashrevisionsha1(text, p1, p2):
content in the revision graph.
"""
# As of now, if one of the parent node is null, p2 is null
- if p2 == nullid:
+ if p2 == sha1nodeconstants.nullid:
# deep copy of a hash is faster than creating one
s = _nullhash.copy()
s.update(p1)
@@ -107,7 +111,7 @@ def filerevisioncopied(store, node):
Returns ``False`` if the file has no copy metadata. Otherwise a
2-tuple of the source filename and node.
"""
- if store.parents(node)[0] != nullid:
+ if store.parents(node)[0] != sha1nodeconstants.nullid:
return False
meta = parsemeta(store.revision(node))[0]
@@ -360,19 +364,7 @@ def emitrevisions(
``assumehaveparentrevisions``
``sidedata_helpers`` (optional)
If not None, means that sidedata should be included.
- A dictionary of revlog type to tuples of `(repo, computers, removers)`:
- * `repo` is used as an argument for computers
- * `computers` is a list of `(category, (keys, computer)` that
- compute the missing sidedata categories that were asked:
- * `category` is the sidedata category
- * `keys` are the sidedata keys to be affected
- * `computer` is the function `(repo, store, rev, sidedata)` that
- returns a new sidedata dict.
- * `removers` will remove the keys corresponding to the categories
- that are present, but not needed.
- If both `computers` and `removers` are empty, sidedata are simply not
- transformed.
- Revlog types are `changelog`, `manifest` or `filelog`.
+ See `revlogutil.sidedata.get_sidedata_helpers`.
"""
fnode = store.node
@@ -486,51 +478,48 @@ def emitrevisions(
available.add(rev)
- sidedata = None
+ serialized_sidedata = None
+ sidedata_flags = (0, 0)
if sidedata_helpers:
- sidedata = store.sidedata(rev)
- sidedata = run_sidedata_helpers(
- store=store,
- sidedata_helpers=sidedata_helpers,
- sidedata=sidedata,
- rev=rev,
- )
- sidedata = sidedatamod.serialize_sidedata(sidedata)
+ try:
+ old_sidedata = store.sidedata(rev)
+ except error.CensoredNodeError:
+ # skip any potential sidedata of the censored revision
+ sidedata = {}
+ else:
+ sidedata, sidedata_flags = sidedatamod.run_sidedata_helpers(
+ store=store,
+ sidedata_helpers=sidedata_helpers,
+ sidedata=old_sidedata,
+ rev=rev,
+ )
+ if sidedata:
+ serialized_sidedata = sidedatamod.serialize_sidedata(sidedata)
+
+ flags = flagsfn(rev) if flagsfn else 0
+ protocol_flags = 0
+ if serialized_sidedata:
+ # Advertise that sidedata exists to the other side
+ protocol_flags |= CG_FLAG_SIDEDATA
+ # Computers and removers can return flags to add and/or remove
+ flags = flags | sidedata_flags[0] & ~sidedata_flags[1]
yield resultcls(
node=node,
p1node=fnode(p1rev),
p2node=fnode(p2rev),
basenode=fnode(baserev),
- flags=flagsfn(rev) if flagsfn else 0,
+ flags=flags,
baserevisionsize=baserevisionsize,
revision=revision,
delta=delta,
- sidedata=sidedata,
+ sidedata=serialized_sidedata,
+ protocol_flags=protocol_flags,
)
prevrev = rev
-def run_sidedata_helpers(store, sidedata_helpers, sidedata, rev):
- """Returns the sidedata for the given revision after running through
- the given helpers.
- - `store`: the revlog this applies to (changelog, manifest, or filelog
- instance)
- - `sidedata_helpers`: see `storageutil.emitrevisions`
- - `sidedata`: previous sidedata at the given rev, if any
- - `rev`: affected rev of `store`
- """
- repo, sd_computers, sd_removers = sidedata_helpers
- kind = store.revlog_kind
- for _keys, sd_computer in sd_computers.get(kind, []):
- sidedata = sd_computer(repo, store, rev, sidedata)
- for keys, _computer in sd_removers.get(kind, []):
- for key in keys:
- sidedata.pop(key, None)
- return sidedata
-
-
def deltaiscensored(delta, baserev, baselenfn):
"""Determine if a delta represents censored revision data.
diff --git a/mercurial/utils/stringutil.py b/mercurial/utils/stringutil.py
--- a/mercurial/utils/stringutil.py
+++ b/mercurial/utils/stringutil.py
@@ -868,6 +868,96 @@ def parsebool(s):
return _booleans.get(s.lower(), None)
+def parselist(value):
+ """parse a configuration value as a list of comma/space separated strings
+
+ >>> parselist(b'this,is "a small" ,test')
+ ['this', 'is', 'a small', 'test']
+ """
+
+ def _parse_plain(parts, s, offset):
+ whitespace = False
+ while offset < len(s) and (
+ s[offset : offset + 1].isspace() or s[offset : offset + 1] == b','
+ ):
+ whitespace = True
+ offset += 1
+ if offset >= len(s):
+ return None, parts, offset
+ if whitespace:
+ parts.append(b'')
+ if s[offset : offset + 1] == b'"' and not parts[-1]:
+ return _parse_quote, parts, offset + 1
+ elif s[offset : offset + 1] == b'"' and parts[-1][-1:] == b'\\':
+ parts[-1] = parts[-1][:-1] + s[offset : offset + 1]
+ return _parse_plain, parts, offset + 1
+ parts[-1] += s[offset : offset + 1]
+ return _parse_plain, parts, offset + 1
+
+ def _parse_quote(parts, s, offset):
+ if offset < len(s) and s[offset : offset + 1] == b'"': # ""
+ parts.append(b'')
+ offset += 1
+ while offset < len(s) and (
+ s[offset : offset + 1].isspace()
+ or s[offset : offset + 1] == b','
+ ):
+ offset += 1
+ return _parse_plain, parts, offset
+
+ while offset < len(s) and s[offset : offset + 1] != b'"':
+ if (
+ s[offset : offset + 1] == b'\\'
+ and offset + 1 < len(s)
+ and s[offset + 1 : offset + 2] == b'"'
+ ):
+ offset += 1
+ parts[-1] += b'"'
+ else:
+ parts[-1] += s[offset : offset + 1]
+ offset += 1
+
+ if offset >= len(s):
+ real_parts = _configlist(parts[-1])
+ if not real_parts:
+ parts[-1] = b'"'
+ else:
+ real_parts[0] = b'"' + real_parts[0]
+ parts = parts[:-1]
+ parts.extend(real_parts)
+ return None, parts, offset
+
+ offset += 1
+ while offset < len(s) and s[offset : offset + 1] in [b' ', b',']:
+ offset += 1
+
+ if offset < len(s):
+ if offset + 1 == len(s) and s[offset : offset + 1] == b'"':
+ parts[-1] += b'"'
+ offset += 1
+ else:
+ parts.append(b'')
+ else:
+ return None, parts, offset
+
+ return _parse_plain, parts, offset
+
+ def _configlist(s):
+ s = s.rstrip(b' ,')
+ if not s:
+ return []
+ parser, parts, offset = _parse_plain, [b''], 0
+ while parser:
+ parser, parts, offset = parser(parts, s, offset)
+ return parts
+
+ if value is not None and isinstance(value, bytes):
+ result = _configlist(value.lstrip(b' ,\n'))
+ else:
+ result = value
+ return result or []
+
+
def evalpythonliteral(s):
"""Evaluate a string containing a Python literal expression"""
# We could backport our tokenizer hack to rewrite '' to u'' if we want
diff --git a/mercurial/utils/urlutil.py b/mercurial/utils/urlutil.py
--- a/mercurial/utils/urlutil.py
+++ b/mercurial/utils/urlutil.py
@@ -20,6 +20,10 @@ from .. import (
urllibcompat,
)
+from . import (
+ stringutil,
+)
+
if pycompat.TYPE_CHECKING:
from typing import (
@@ -445,13 +449,41 @@ def removeauth(u):
return bytes(u)
+def list_paths(ui, target_path=None):
+ """list all the (name, paths) in the passed ui"""
+ result = []
+ if target_path is None:
+ for name, paths in sorted(pycompat.iteritems(ui.paths)):
+ for p in paths:
+ result.append((name, p))
+
+ else:
+ for path in ui.paths.get(target_path, []):
+ result.append((target_path, path))
+ return result
+
+
+def try_path(ui, url):
+ """try to build a path from a url
+
+ Return None if no Path could built.
+ """
+ try:
+ # we pass the ui instance are warning might need to be issued
+ return path(ui, None, rawloc=url)
+ except ValueError:
+ return None
+
+
def get_push_paths(repo, ui, dests):
"""yields all the `path` selected as push destination by `dests`"""
if not dests:
if b'default-push' in ui.paths:
- yield ui.paths[b'default-push']
+ for p in ui.paths[b'default-push']:
+ yield p
elif b'default' in ui.paths:
- yield ui.paths[b'default']
+ for p in ui.paths[b'default']:
+ yield p
else:
raise error.ConfigError(
_(b'default repository not configured!'),
@@ -459,7 +491,16 @@ def get_push_paths(repo, ui, dests):
)
else:
for dest in dests:
- yield ui.getpath(dest)
+ if dest in ui.paths:
+ for p in ui.paths[dest]:
+ yield p
+ else:
+ path = try_path(ui, dest)
+ if path is None:
+ msg = _(b'repository %s does not exist')
+ msg %= dest
+ raise error.RepoError(msg)
+ yield path
def get_pull_paths(repo, ui, sources, default_branches=()):
@@ -468,15 +509,16 @@ def get_pull_paths(repo, ui, sources, de
sources = [b'default']
for source in sources:
if source in ui.paths:
- url = ui.paths[source].rawloc
+ for p in ui.paths[source]:
+ yield parseurl(p.rawloc, default_branches)
else:
# Try to resolve as a local path or URI.
- try:
- # we pass the ui instance are warning might need to be issued
- url = path(ui, None, rawloc=source).rawloc
- except ValueError:
+ path = try_path(ui, source)
+ if path is not None:
+ url = path.rawloc
+ else:
url = source
- yield parseurl(url, default_branches)
+ yield parseurl(url, default_branches)
def get_unique_push_path(action, repo, ui, dest=None):
@@ -494,7 +536,16 @@ def get_unique_push_path(action, repo, u
else:
dests = [dest]
dests = list(get_push_paths(repo, ui, dests))
- assert len(dests) == 1
+ if len(dests) != 1:
+ if dest is None:
+ msg = _(
+ b"default path points to %d urls while %s only supports one"
+ )
+ msg %= (len(dests), action)
+ else:
+ msg = _(b"path points to %d urls while %s only supports one: %s")
+ msg %= (len(dests), action, dest)
+ raise error.Abort(msg)
return dests[0]
@@ -508,45 +559,68 @@ def get_unique_pull_path(action, repo, u
The `action` parameter will be used for the error message.
"""
+ urls = []
if source is None:
if b'default' in ui.paths:
- url = ui.paths[b'default'].rawloc
+ urls.extend(p.rawloc for p in ui.paths[b'default'])
else:
# XXX this is the historical default behavior, but that is not
# great, consider breaking BC on this.
- url = b'default'
+ urls.append(b'default')
else:
if source in ui.paths:
- url = ui.paths[source].rawloc
+ urls.extend(p.rawloc for p in ui.paths[source])
else:
# Try to resolve as a local path or URI.
- try:
- # we pass the ui instance are warning might need to be issued
- url = path(ui, None, rawloc=source).rawloc
- except ValueError:
- url = source
- return parseurl(url, default_branches)
+ path = try_path(ui, source)
+ if path is not None:
+ urls.append(path.rawloc)
+ else:
+ urls.append(source)
+ if len(urls) != 1:
+ if source is None:
+ msg = _(
+ b"default path points to %d urls while %s only supports one"
+ )
+ msg %= (len(urls), action)
+ else:
+ msg = _(b"path points to %d urls while %s only supports one: %s")
+ msg %= (len(urls), action, source)
+ raise error.Abort(msg)
+ return parseurl(urls[0], default_branches)
def get_clone_path(ui, source, default_branches=()):
"""return the `(origsource, path, branch)` selected as clone source"""
+ urls = []
if source is None:
if b'default' in ui.paths:
- url = ui.paths[b'default'].rawloc
+ urls.extend(p.rawloc for p in ui.paths[b'default'])
else:
# XXX this is the historical default behavior, but that is not
# great, consider breaking BC on this.
- url = b'default'
+ urls.append(b'default')
else:
if source in ui.paths:
- url = ui.paths[source].rawloc
+ urls.extend(p.rawloc for p in ui.paths[source])
else:
# Try to resolve as a local path or URI.
- try:
- # we pass the ui instance are warning might need to be issued
- url = path(ui, None, rawloc=source).rawloc
- except ValueError:
- url = source
+ path = try_path(ui, source)
+ if path is not None:
+ urls.append(path.rawloc)
+ else:
+ urls.append(source)
+ if len(urls) != 1:
+ if source is None:
+ msg = _(
+ b"default path points to %d urls while only one is supported"
+ )
+ msg %= len(urls)
+ else:
+ msg = _(b"path points to %d urls while only one is supported: %s")
+ msg %= (len(urls), source)
+ raise error.Abort(msg)
+ url = urls[0]
clone_path, branch = parseurl(url, default_branches)
return url, clone_path, branch
@@ -571,15 +645,38 @@ class paths(dict):
def __init__(self, ui):
dict.__init__(self)
- for name, loc in ui.configitems(b'paths', ignoresub=True):
+ home_path = os.path.expanduser(b'~')
+
+ for name, value in ui.configitems(b'paths', ignoresub=True):
# No location is the same as not existing.
- if not loc:
+ if not value:
continue
- loc, sub_opts = ui.configsuboptions(b'paths', name)
- self[name] = path(ui, name, rawloc=loc, suboptions=sub_opts)
+ _value, sub_opts = ui.configsuboptions(b'paths', name)
+ s = ui.configsource(b'paths', name)
+ root_key = (name, value, s)
+ root = ui._path_to_root.get(root_key, home_path)
+
+ multi_url = sub_opts.get(b'multi-urls')
+ if multi_url is not None and stringutil.parsebool(multi_url):
+ base_locs = stringutil.parselist(value)
+ else:
+ base_locs = [value]
- for name, p in sorted(self.items()):
- p.chain_path(ui, self)
+ paths = []
+ for loc in base_locs:
+ loc = os.path.expandvars(loc)
+ loc = os.path.expanduser(loc)
+ if not hasscheme(loc) and not os.path.isabs(loc):
+ loc = os.path.normpath(os.path.join(root, loc))
+ p = path(ui, name, rawloc=loc, suboptions=sub_opts)
+ paths.append(p)
+ self[name] = paths
+
+ for name, old_paths in sorted(self.items()):
+ new_paths = []
+ for p in old_paths:
+ new_paths.extend(_chain_path(p, ui, self))
+ self[name] = new_paths
def getpath(self, ui, name, default=None):
"""Return a ``path`` from a string, falling back to default.
@@ -590,6 +687,8 @@ class paths(dict):
Returns None if ``name`` is not a registered path, a URI, or a local
path to a repo.
"""
+ msg = b'getpath is deprecated, use `get_*` functions from urlutil'
+ ui.deprecwarn(msg, b'6.0')
# Only fall back to default if no path was requested.
if name is None:
if not default:
@@ -598,7 +697,7 @@ class paths(dict):
default = (default,)
for k in default:
try:
- return self[k]
+ return self[k][0]
except KeyError:
continue
return None
@@ -607,16 +706,14 @@ class paths(dict):
# This may need to raise in the future.
if not name:
return None
-
- try:
- return self[name]
- except KeyError:
+ if name in self:
+ return self[name][0]
+ else:
# Try to resolve as a local path or URI.
- try:
- # we pass the ui instance are warning might need to be issued
- return path(ui, None, rawloc=name)
- except ValueError:
+ path = try_path(ui, name)
+ if path is None:
raise error.RepoError(_(b'repository %s does not exist') % name)
+ return path.rawloc
_pathsuboptions = {}
@@ -649,7 +746,9 @@ def pushurlpathoption(ui, path, value):
u = url(value)
# Actually require a URL.
if not u.scheme:
- ui.warn(_(b'(paths.%s:pushurl not a URL; ignoring)\n') % path.name)
+ msg = _(b'(paths.%s:pushurl not a URL; ignoring: "%s")\n')
+ msg %= (path.name, value)
+ ui.warn(msg)
return None
# Don't support the #foo syntax in the push URL to declare branch to
@@ -672,10 +771,54 @@ def pushrevpathoption(ui, path, value):
return value
+@pathsuboption(b'multi-urls', b'multi_urls')
+def multiurls_pathoption(ui, path, value):
+ res = stringutil.parsebool(value)
+ if res is None:
+ ui.warn(
+ _(b'(paths.%s:multi-urls not a boolean; ignoring)\n') % path.name
+ )
+ res = False
+ return res
+
+
+def _chain_path(base_path, ui, paths):
+ """return the result of "path://" logic applied on a given path"""
+ new_paths = []
+ if base_path.url.scheme != b'path':
+ new_paths.append(base_path)
+ else:
+ assert base_path.url.path is None
+ sub_paths = paths.get(base_path.url.host)
+ if sub_paths is None:
+ m = _(b'cannot use `%s`, "%s" is not a known path')
+ m %= (base_path.rawloc, base_path.url.host)
+ raise error.Abort(m)
+ for subpath in sub_paths:
+ path = base_path.copy()
+ if subpath.raw_url.scheme == b'path':
+ m = _(b'cannot use `%s`, "%s" is also defined as a `path://`')
+ m %= (path.rawloc, path.url.host)
+ raise error.Abort(m)
+ path.url = subpath.url
+ path.rawloc = subpath.rawloc
+ path.loc = subpath.loc
+ if path.branch is None:
+ path.branch = subpath.branch
+ else:
+ base = path.rawloc.rsplit(b'#', 1)[0]
+ path.rawloc = b'%s#%s' % (base, path.branch)
+ suboptions = subpath._all_sub_opts.copy()
+ suboptions.update(path._own_sub_opts)
+ path._apply_suboptions(ui, suboptions)
+ new_paths.append(path)
+ return new_paths
+
+
class path(object):
"""Represents an individual path and its configuration."""
- def __init__(self, ui, name, rawloc=None, suboptions=None):
+ def __init__(self, ui=None, name=None, rawloc=None, suboptions=None):
"""Construct a path from its config options.
``ui`` is the ``ui`` instance the path is coming from.
@@ -687,6 +830,13 @@ class path(object):
filesystem path with a .hg directory or b) a URL. If not,
``ValueError`` is raised.
"""
+ if ui is None:
+ # used in copy
+ assert name is None
+ assert rawloc is None
+ assert suboptions is None
+ return
+
if not rawloc:
raise ValueError(b'rawloc must be defined')
@@ -717,30 +867,15 @@ class path(object):
self._apply_suboptions(ui, sub_opts)
- def chain_path(self, ui, paths):
- if self.url.scheme == b'path':
- assert self.url.path is None
- try:
- subpath = paths[self.url.host]
- except KeyError:
- m = _(b'cannot use `%s`, "%s" is not a known path')
- m %= (self.rawloc, self.url.host)
- raise error.Abort(m)
- if subpath.raw_url.scheme == b'path':
- m = _(b'cannot use `%s`, "%s" is also defined as a `path://`')
- m %= (self.rawloc, self.url.host)
- raise error.Abort(m)
- self.url = subpath.url
- self.rawloc = subpath.rawloc
- self.loc = subpath.loc
- if self.branch is None:
- self.branch = subpath.branch
- else:
- base = self.rawloc.rsplit(b'#', 1)[0]
- self.rawloc = b'%s#%s' % (base, self.branch)
- suboptions = subpath._all_sub_opts.copy()
- suboptions.update(self._own_sub_opts)
- self._apply_suboptions(ui, suboptions)
+ def copy(self):
+ """make a copy of this path object"""
+ new = self.__class__()
+ for k, v in self.__dict__.items():
+ new_copy = getattr(v, 'copy', None)
+ if new_copy is not None:
+ v = new_copy()
+ new.__dict__[k] = v
+ return new
def _validate_path(self):
# When given a raw location but not a symbolic name, validate the
diff --git a/mercurial/verify.py b/mercurial/verify.py
--- a/mercurial/verify.py
+++ b/mercurial/verify.py
@@ -10,13 +10,8 @@ from __future__ import absolute_import
import os
from .i18n import _
-from .node import (
- nullid,
- short,
-)
-from .utils import (
- stringutil,
-)
+from .node import short
+from .utils import stringutil
from . import (
error,
@@ -43,6 +38,23 @@ def _normpath(f):
return f
+HINT_FNCACHE = _(
+ b'hint: run "hg debugrebuildfncache" to recover from corrupt fncache\n'
+)
+
+WARN_PARENT_DIR_UNKNOWN_REV = _(
+ b"parent-directory manifest refers to unknown revision %s"
+)
+
+WARN_UNKNOWN_COPY_SOURCE = _(
+ b"warning: copy source of '%s' not in parents of %s"
+)
+
+WARN_NULLID_COPY_SOURCE = _(
+ b"warning: %s@%s: copy source revision is nullid %s:%s\n"
+)
+
+
class verifier(object):
def __init__(self, repo, level=None):
self.repo = repo.unfiltered()
@@ -56,7 +68,7 @@ class verifier(object):
self.warnings = 0
self.havecl = len(repo.changelog) > 0
self.havemf = len(repo.manifestlog.getstorage(b'')) > 0
- self.revlogv1 = repo.changelog.version != revlog.REVLOGV0
+ self.revlogv1 = repo.changelog._format_version != revlog.REVLOGV0
self.lrugetctx = util.lrucachefunc(repo.unfiltered().__getitem__)
self.refersmf = False
self.fncachewarned = False
@@ -107,7 +119,7 @@ class verifier(object):
if d[1]:
self._err(None, _(b"index contains %d extra bytes") % d[1], name)
- if obj.version != revlog.REVLOGV0:
+ if obj._format_version != revlog.REVLOGV0:
if not self.revlogv1:
self._warn(_(b"warning: `%s' uses revlog format 1") % name)
elif self.revlogv1:
@@ -119,7 +131,7 @@ class verifier(object):
arguments are:
- obj: the source revlog
- i: the revision number
- - node: the revision node id
+ - node: the revision node id
- seen: nodes previously seen for this revlog
- linkrevs: [changelog-revisions] introducing "node"
- f: string label ("changelog", "manifest", or filename)
@@ -144,33 +156,25 @@ class verifier(object):
if f and len(linkrevs) > 1:
try:
# attempt to filter down to real linkrevs
- linkrevs = [
- l
- for l in linkrevs
- if self.lrugetctx(l)[f].filenode() == node
- ]
+ linkrevs = []
+ for lr in linkrevs:
+ if self.lrugetctx(lr)[f].filenode() == node:
+ linkrevs.append(lr)
except Exception:
pass
- self._warn(
- _(b" (expected %s)")
- % b" ".join(map(pycompat.bytestr, linkrevs))
- )
+ msg = _(b" (expected %s)")
+ msg %= b" ".join(map(pycompat.bytestr, linkrevs))
+ self._warn(msg)
lr = None # can't be trusted
try:
p1, p2 = obj.parents(node)
- if p1 not in seen and p1 != nullid:
- self._err(
- lr,
- _(b"unknown parent 1 %s of %s") % (short(p1), short(node)),
- f,
- )
- if p2 not in seen and p2 != nullid:
- self._err(
- lr,
- _(b"unknown parent 2 %s of %s") % (short(p2), short(node)),
- f,
- )
+ if p1 not in seen and p1 != self.repo.nullid:
+ msg = _(b"unknown parent 1 %s of %s") % (short(p1), short(node))
+ self._err(lr, msg, f)
+ if p2 not in seen and p2 != self.repo.nullid:
+ msg = _(b"unknown parent 2 %s of %s") % (short(p2), short(node))
+ self._err(lr, msg, f)
except Exception as inst:
self._exc(lr, _(b"checking parents of %s") % short(node), inst, f)
@@ -215,19 +219,13 @@ class verifier(object):
if self.warnings:
ui.warn(_(b"%d warnings encountered!\n") % self.warnings)
if self.fncachewarned:
- ui.warn(
- _(
- b'hint: run "hg debugrebuildfncache" to recover from '
- b'corrupt fncache\n'
- )
- )
+ ui.warn(HINT_FNCACHE)
if self.errors:
ui.warn(_(b"%d integrity errors encountered!\n") % self.errors)
if self.badrevs:
- ui.warn(
- _(b"(first damaged changeset appears to be %d)\n")
- % min(self.badrevs)
- )
+ msg = _(b"(first damaged changeset appears to be %d)\n")
+ msg %= min(self.badrevs)
+ ui.warn(msg)
return 1
return 0
@@ -267,7 +265,7 @@ class verifier(object):
try:
changes = cl.read(n)
- if changes[0] != nullid:
+ if changes[0] != self.repo.nullid:
mflinkrevs.setdefault(changes[0], []).append(i)
self.refersmf = True
for f in changes[3]:
@@ -331,7 +329,7 @@ class verifier(object):
if self.refersmf:
# Do not check manifest if there are only changelog entries with
# null manifests.
- self._checkrevlog(mf, label, 0)
+ self._checkrevlog(mf._revlog, label, 0)
progress = ui.makeprogress(
_(b'checking'), unit=_(b'manifests'), total=len(mf)
)
@@ -343,11 +341,8 @@ class verifier(object):
if n in mflinkrevs:
del mflinkrevs[n]
elif dir:
- self._err(
- lr,
- _(b"%s not in parent-directory manifest") % short(n),
- label,
- )
+ msg = _(b"%s not in parent-directory manifest") % short(n)
+ self._err(lr, msg, label)
else:
self._err(lr, _(b"%s not in changesets") % short(n), label)
@@ -362,9 +357,8 @@ class verifier(object):
if fl == b't':
if not match.visitdir(fullpath):
continue
- subdirnodes.setdefault(fullpath + b'/', {}).setdefault(
- fn, []
- ).append(lr)
+ sdn = subdirnodes.setdefault(fullpath + b'/', {})
+ sdn.setdefault(fn, []).append(lr)
else:
if not match(fullpath):
continue
@@ -378,12 +372,8 @@ class verifier(object):
# code (eg: hash verification, filename are ordered, etc.)
mfdelta = mfl.get(dir, n).read()
except Exception as inst:
- self._exc(
- lr,
- _(b"reading full manifest %s") % short(n),
- inst,
- label,
- )
+ msg = _(b"reading full manifest %s") % short(n)
+ self._exc(lr, msg, inst, label)
if not dir:
progress.complete()
@@ -394,22 +384,11 @@ class verifier(object):
changesetpairs = [(c, m) for m in mflinkrevs for c in mflinkrevs[m]]
for c, m in sorted(changesetpairs):
if dir:
- self._err(
- c,
- _(
- b"parent-directory manifest refers to unknown"
- b" revision %s"
- )
- % short(m),
- label,
- )
+ self._err(c, WARN_PARENT_DIR_UNKNOWN_REV % short(m), label)
else:
- self._err(
- c,
- _(b"changeset refers to unknown revision %s")
- % short(m),
- label,
- )
+ msg = _(b"changeset refers to unknown revision %s")
+ msg %= short(m)
+ self._err(c, msg, label)
if not dir and subdirnodes:
self.ui.status(_(b"checking directory manifests\n"))
@@ -488,7 +467,7 @@ class verifier(object):
state = {
# TODO this assumes revlog storage for changelog.
- b'expectedversion': self.repo.changelog.version & 0xFFFF,
+ b'expectedversion': self.repo.changelog._format_version,
b'skipflags': self.skipflags,
# experimental config: censor.policy
b'erroroncensored': ui.config(b'censor', b'policy') == b'abort',
@@ -523,9 +502,8 @@ class verifier(object):
storefiles.remove(ff)
except KeyError:
if self.warnorphanstorefiles:
- self._warn(
- _(b" warning: revlog '%s' not in fncache!") % ff
- )
+ msg = _(b" warning: revlog '%s' not in fncache!")
+ self._warn(msg % ff)
self.fncachewarned = True
if not len(fl) and (self.havecl or self.havemf):
@@ -544,11 +522,8 @@ class verifier(object):
if problem.warning:
self._warn(problem.warning)
elif problem.error:
- self._err(
- linkrev if linkrev is not None else lr,
- problem.error,
- f,
- )
+ linkrev_msg = linkrev if linkrev is not None else lr
+ self._err(linkrev_msg, problem.error, f)
else:
raise error.ProgrammingError(
b'problem instance does not set warning or error '
@@ -580,32 +555,15 @@ class verifier(object):
if lr is not None and ui.verbose:
ctx = lrugetctx(lr)
if not any(rp[0] in pctx for pctx in ctx.parents()):
- self._warn(
- _(
- b"warning: copy source of '%s' not"
- b" in parents of %s"
- )
- % (f, ctx)
- )
+ self._warn(WARN_UNKNOWN_COPY_SOURCE % (f, ctx))
fl2 = repo.file(rp[0])
if not len(fl2):
- self._err(
- lr,
- _(
- b"empty or missing copy source revlog "
- b"%s:%s"
- )
- % (rp[0], short(rp[1])),
- f,
- )
- elif rp[1] == nullid:
- ui.note(
- _(
- b"warning: %s@%s: copy source"
- b" revision is nullid %s:%s\n"
- )
- % (f, lr, rp[0], short(rp[1]))
- )
+ m = _(b"empty or missing copy source revlog %s:%s")
+ self._err(lr, m % (rp[0], short(rp[1])), f)
+ elif rp[1] == self.repo.nullid:
+ msg = WARN_NULLID_COPY_SOURCE
+ msg %= (f, lr, rp[0], short(rp[1]))
+ ui.note(msg)
else:
fl2.rev(rp[1])
except Exception as inst:
@@ -617,12 +575,8 @@ class verifier(object):
if f in filenodes:
fns = [(v, k) for k, v in pycompat.iteritems(filenodes[f])]
for lr, node in sorted(fns):
- self._err(
- lr,
- _(b"manifest refers to unknown revision %s")
- % short(node),
- f,
- )
+ msg = _(b"manifest refers to unknown revision %s")
+ self._err(lr, msg % short(node), f)
progress.complete()
if self.warnorphanstorefiles:
diff --git a/mercurial/vfs.py b/mercurial/vfs.py
--- a/mercurial/vfs.py
+++ b/mercurial/vfs.py
@@ -307,7 +307,7 @@ class abstractvfs(object):
# multiple instances puts us at risk of running out of file descriptors
# only allow to use backgroundfilecloser when in main thread.
if not isinstance(
- threading.currentThread(),
+ threading.current_thread(),
threading._MainThread, # pytype: disable=module-attr
):
yield
@@ -329,6 +329,9 @@ class abstractvfs(object):
None # pytype: disable=attribute-error
)
+ def register_file(self, path):
+ """generic hook point to lets fncache steer its stew"""
+
class vfs(abstractvfs):
"""Operate files relative to a base directory
@@ -483,7 +486,7 @@ class vfs(abstractvfs):
fp = checkambigatclosing(fp)
if backgroundclose and isinstance(
- threading.currentThread(),
+ threading.current_thread(),
threading._MainThread, # pytype: disable=module-attr
):
if (
diff --git a/mercurial/windows.py b/mercurial/windows.py
--- a/mercurial/windows.py
+++ b/mercurial/windows.py
@@ -202,7 +202,7 @@ def get_password():
"""
pw = ""
while True:
- c = msvcrt.getwch()
+ c = msvcrt.getwch() # pytype: disable=module-attr
if c == '\r' or c == '\n':
break
if c == '\003':
@@ -211,8 +211,8 @@ def get_password():
pw = pw[:-1]
else:
pw = pw + c
- msvcrt.putwch('\r')
- msvcrt.putwch('\n')
+ msvcrt.putwch('\r') # pytype: disable=module-attr
+ msvcrt.putwch('\n') # pytype: disable=module-attr
return encoding.strtolocal(pw)
@@ -333,6 +333,25 @@ def normcase(path):
return encoding.upper(path) # NTFS compares via upper()
+DRIVE_RE_B = re.compile(b'^[a-z]:')
+DRIVE_RE_S = re.compile('^[a-z]:')
+
+
+def abspath(path):
+ abs_path = os.path.abspath(path) # re-exports
+ # Python on Windows is inconsistent regarding the capitalization of drive
+ # letter and this cause issue with various path comparison along the way.
+ # So we normalize the drive later to upper case here.
+ #
+ # See https://bugs.python.org/issue40368 for and example of this hell.
+ if isinstance(abs_path, bytes):
+ if DRIVE_RE_B.match(abs_path):
+ abs_path = abs_path[0:1].upper() + abs_path[1:]
+ elif DRIVE_RE_S.match(abs_path):
+ abs_path = abs_path[0:1].upper() + abs_path[1:]
+ return abs_path
+
+
# see posix.py for definitions
normcasespec = encoding.normcasespecs.upper
normcasefallback = encoding.upperfallback
diff --git a/mercurial/wireprotov1server.py b/mercurial/wireprotov1server.py
--- a/mercurial/wireprotov1server.py
+++ b/mercurial/wireprotov1server.py
@@ -11,10 +11,7 @@ import binascii
import os
from .i18n import _
-from .node import (
- hex,
- nullid,
-)
+from .node import hex
from .pycompat import getattr
from . import (
@@ -470,7 +467,7 @@ def getbundle(repo, proto, others):
clheads = set(repo.changelog.heads())
heads = set(opts.get(b'heads', set()))
common = set(opts.get(b'common', set()))
- common.discard(nullid)
+ common.discard(repo.nullid)
if (
repo.ui.configbool(b'server', b'pullbundle')
and b'partial-pull' in proto.getprotocaps()
diff --git a/mercurial/wireprotov2server.py b/mercurial/wireprotov2server.py
--- a/mercurial/wireprotov2server.py
+++ b/mercurial/wireprotov2server.py
@@ -10,10 +10,7 @@ import collections
import contextlib
from .i18n import _
-from .node import (
- hex,
- nullid,
-)
+from .node import hex
from . import (
discovery,
encoding,
@@ -950,7 +947,7 @@ def resolvenodes(repo, revisions):
if spec[b'roots']:
common = [n for n in spec[b'roots'] if clhasnode(n)]
else:
- common = [nullid]
+ common = [repo.nullid]
for n in discovery.outgoing(repo, common, spec[b'heads']).missing:
if n not in seen:
diff --git a/relnotes/next b/relnotes/next
--- a/relnotes/next
+++ b/relnotes/next
@@ -1,5 +1,8 @@
== New Features ==
-
+
+ * `hg config` now has a `--source` option to show where each
+ configuration value comes from.
+
== Default Format Change ==
@@ -18,4 +21,31 @@ Mercurial 5.8.
== Internal API Changes ==
+The Dirstate API have been updated as the previous function leaked some
+internal details and did not distinct between two important cases: "We are
+changing parent and need to adjust the dirstate" and "some command is changing
+which file is tracked". To clarify the situation:
+* the following functions have been deprecated,
+
+ - dirstate.add,
+ - dirstate.normal,
+ - dirstate.normallookup,
+ - dirstate.merge,
+ - dirstate.otherparent,
+ - dirstate.remove,
+ - dirstate.drop,
+
+* these new functions are added for the "adjusting parents" use-case:
+
+ - dirstate.update_file,
+ - dirstate.update_file_p1,
+
+* these new function are added for the "adjusting wc file" use-case":
+
+ - dirstate.set_tracked,
+ - dirstate.set_untracked,
+ - dirstate.set_clean,
+ - dirstate.set_possibly_dirty,
+
+See inline documentation of the new functions for details.
diff --git a/rust/Cargo.lock b/rust/Cargo.lock
--- a/rust/Cargo.lock
+++ b/rust/Cargo.lock
@@ -57,6 +57,15 @@ dependencies = [
]
[[package]]
+name = "block-buffer"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4"
+dependencies = [
+ "generic-array",
+]
+
+[[package]]
name = "byteorder"
version = "1.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -64,9 +73,9 @@ checksum = "08c48aae112d48ed9f069b33538e
[[package]]
name = "bytes-cast"
-version = "0.1.0"
+version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3196ba300c7bc9282a4331e878496cb3e9603a898a8f1446601317163e16ca52"
+checksum = "0d434f9a4ecbe987e7ccfda7274b6f82ea52c9b63742565a65cb5e8ba0f2c452"
dependencies = [
"bytes-cast-derive",
]
@@ -138,10 +147,19 @@ source = "registry+https://github.com/ru
checksum = "cd51eab21ab4fd6a3bf889e2d0958c0a6e3a61ad04260325e919e652a2a62826"
[[package]]
+name = "cpufeatures"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ed00c67cb5d0a7d64a44f6ad2668db7e7530311dd53ea79bcd4fb022c64911c8"
+dependencies = [
+ "libc",
+]
+
+[[package]]
name = "cpython"
-version = "0.5.2"
+version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0f11357af68648b6a227e7e2384d439cec8595de65970f45e3f7f4b2600be472"
+checksum = "8094679a4e9bfc8035572162624bc800eda35b5f9eff2537b9cd9aacc3d9782e"
dependencies = [
"libc",
"num-traits",
@@ -254,6 +272,15 @@ source = "registry+https://github.com/ru
checksum = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198"
[[package]]
+name = "digest"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066"
+dependencies = [
+ "generic-array",
+]
+
+[[package]]
name = "either"
version = "1.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -308,16 +335,14 @@ dependencies = [
]
[[package]]
-name = "fuchsia-cprng"
-version = "0.1.1"
+name = "generic-array"
+version = "0.14.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
-
-[[package]]
-name = "gcc"
-version = "0.3.55"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2"
+checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817"
+dependencies = [
+ "typenum",
+ "version_check",
+]
[[package]]
name = "getrandom"
@@ -358,18 +383,19 @@ dependencies = [
"format-bytes",
"home",
"im-rc",
+ "itertools",
"lazy_static",
"log",
"memmap",
"micro-timer",
"pretty_assertions",
- "rand 0.7.3",
+ "rand",
"rand_distr",
"rand_pcg",
"rayon",
"regex",
- "rust-crypto",
"same-file",
+ "sha-1",
"tempfile",
"twox-hash",
"zstd",
@@ -412,7 +438,7 @@ source = "registry+https://github.com/ru
checksum = "3ca8957e71f04a205cb162508f9326aea04676c8dfd0711220190d6b83664f3f"
dependencies = [
"bitmaps",
- "rand_core 0.5.1",
+ "rand_core",
"rand_xoshiro",
"sized-chunks",
"typenum",
@@ -562,6 +588,12 @@ dependencies = [
]
[[package]]
+name = "opaque-debug"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5"
+
+[[package]]
name = "output_vt100"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -572,22 +604,9 @@ dependencies = [
[[package]]
name = "paste"
-version = "0.1.18"
+version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880"
-dependencies = [
- "paste-impl",
- "proc-macro-hack",
-]
-
-[[package]]
-name = "paste-impl"
-version = "0.1.18"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6"
-dependencies = [
- "proc-macro-hack",
-]
+checksum = "acbf547ad0c65e31259204bd90935776d1c693cec2f4ff7abb7a1bbbd40dfe58"
[[package]]
name = "pkg-config"
@@ -630,9 +649,9 @@ dependencies = [
[[package]]
name = "python27-sys"
-version = "0.5.2"
+version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f485897ed7048f5032317c4e427800ef9f2053355516524d73952b8b07032054"
+checksum = "5826ddbc5366eb0b0492040fdc25bf50bb49092c192bd45e80fb7a24dc6832ab"
dependencies = [
"libc",
"regex",
@@ -640,9 +659,9 @@ dependencies = [
[[package]]
name = "python3-sys"
-version = "0.5.2"
+version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5b29b99c6868eb02beb3bf6ed025c8bcdf02efc149b8e80347d3e5d059a806db"
+checksum = "b78af21b29594951a47fc3dac9b9eff0a3f077dec2f780ee943ae16a668f3b6a"
dependencies = [
"libc",
"regex",
@@ -665,29 +684,6 @@ dependencies = [
[[package]]
name = "rand"
-version = "0.3.23"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "64ac302d8f83c0c1974bf758f6b041c6c8ada916fbb44a609158ca8b064cc76c"
-dependencies = [
- "libc",
- "rand 0.4.6",
-]
-
-[[package]]
-name = "rand"
-version = "0.4.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293"
-dependencies = [
- "fuchsia-cprng",
- "libc",
- "rand_core 0.3.1",
- "rdrand",
- "winapi",
-]
-
-[[package]]
-name = "rand"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
@@ -695,7 +691,7 @@ dependencies = [
"getrandom",
"libc",
"rand_chacha",
- "rand_core 0.5.1",
+ "rand_core",
"rand_hc",
]
@@ -706,26 +702,11 @@ source = "registry+https://github.com/ru
checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
dependencies = [
"ppv-lite86",
- "rand_core 0.5.1",
+ "rand_core",
]
[[package]]
name = "rand_core"
-version = "0.3.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b"
-dependencies = [
- "rand_core 0.4.2",
-]
-
-[[package]]
-name = "rand_core"
-version = "0.4.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc"
-
-[[package]]
-name = "rand_core"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
@@ -739,7 +720,7 @@ version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "96977acbdd3a6576fb1d27391900035bf3863d4a16422973a409b488cf29ffb2"
dependencies = [
- "rand 0.7.3",
+ "rand",
]
[[package]]
@@ -748,7 +729,7 @@ version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
dependencies = [
- "rand_core 0.5.1",
+ "rand_core",
]
[[package]]
@@ -757,7 +738,7 @@ version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429"
dependencies = [
- "rand_core 0.5.1",
+ "rand_core",
]
[[package]]
@@ -766,7 +747,7 @@ version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9fcdd2e881d02f1d9390ae47ad8e5696a9e4be7b547a1da2afbc61973217004"
dependencies = [
- "rand_core 0.5.1",
+ "rand_core",
]
[[package]]
@@ -795,15 +776,6 @@ dependencies = [
]
[[package]]
-name = "rdrand"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2"
-dependencies = [
- "rand_core 0.3.1",
-]
-
-[[package]]
name = "redox_syscall"
version = "0.1.57"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -846,6 +818,7 @@ dependencies = [
"env_logger",
"format-bytes",
"hg-core",
+ "home",
"lazy_static",
"log",
"micro-timer",
@@ -854,25 +827,6 @@ dependencies = [
]
[[package]]
-name = "rust-crypto"
-version = "0.2.36"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f76d05d3993fd5f4af9434e8e436db163a12a9d40e1a58a726f27a01dfd12a2a"
-dependencies = [
- "gcc",
- "libc",
- "rand 0.3.23",
- "rustc-serialize",
- "time",
-]
-
-[[package]]
-name = "rustc-serialize"
-version = "0.3.24"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda"
-
-[[package]]
name = "same-file"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -888,6 +842,19 @@ source = "registry+https://github.com/ru
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
+name = "sha-1"
+version = "0.9.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8c4cfa741c5832d0ef7fab46cabed29c2aae926db0b11bb2069edd8db5e64e16"
+dependencies = [
+ "block-buffer",
+ "cfg-if 1.0.0",
+ "cpufeatures",
+ "digest",
+ "opaque-debug",
+]
+
+[[package]]
name = "sized-chunks"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -928,7 +895,7 @@ checksum = "7a6e24d9338a0a5be79593e2fa15
dependencies = [
"cfg-if 0.1.10",
"libc",
- "rand 0.7.3",
+ "rand",
"redox_syscall",
"remove_dir_all",
"winapi",
@@ -979,7 +946,7 @@ source = "registry+https://github.com/ru
checksum = "04f8ab788026715fa63b31960869617cba39117e520eb415b0139543e325ab59"
dependencies = [
"cfg-if 0.1.10",
- "rand 0.7.3",
+ "rand",
"static_assertions",
]
diff --git a/rust/hg-core/Cargo.toml b/rust/hg-core/Cargo.toml
--- a/rust/hg-core/Cargo.toml
+++ b/rust/hg-core/Cargo.toml
@@ -9,25 +9,27 @@ edition = "2018"
name = "hg"
[dependencies]
-bytes-cast = "0.1"
+bytes-cast = "0.2"
byteorder = "1.3.4"
derive_more = "0.99"
home = "0.5"
im-rc = "15.0.*"
+itertools = "0.9"
lazy_static = "1.4.0"
rand = "0.7.3"
rand_pcg = "0.2.1"
rand_distr = "0.2.2"
rayon = "1.3.0"
regex = "1.3.9"
+sha-1 = "0.9.6"
twox-hash = "1.5.0"
same-file = "1.0.6"
+tempfile = "3.1.0"
crossbeam-channel = "0.4"
micro-timer = "0.3.0"
log = "0.4.8"
memmap = "0.7.0"
zstd = "0.5.3"
-rust-crypto = "0.2.36"
format-bytes = "0.2.2"
# We don't use the `miniz-oxide` backend to not change rhg benchmarks and until
@@ -40,4 +42,3 @@ default-features = false
[dev-dependencies]
clap = "*"
pretty_assertions = "0.6.1"
-tempfile = "3.1.0"
diff --git a/rust/hg-core/src/config.rs b/rust/hg-core/src/config.rs
--- a/rust/hg-core/src/config.rs
+++ b/rust/hg-core/src/config.rs
@@ -12,5 +12,5 @@
mod config;
mod layer;
mod values;
-pub use config::{Config, ConfigValueParseError};
+pub use config::{Config, ConfigSource, ConfigValueParseError};
pub use layer::{ConfigError, ConfigParseError};
diff --git a/rust/hg-core/src/config/config.rs b/rust/hg-core/src/config/config.rs
--- a/rust/hg-core/src/config/config.rs
+++ b/rust/hg-core/src/config/config.rs
@@ -88,9 +88,7 @@ impl Config {
/// Load system and user configuration from various files.
///
/// This is also affected by some environment variables.
- pub fn load(
- cli_config_args: impl IntoIterator- >,
- ) -> Result {
+ pub fn load_non_repo() -> Result {
let mut config = Self { layers: Vec::new() };
let opt_rc_path = env::var_os("HGRCPATH");
// HGRCPATH replaces system config
@@ -133,10 +131,17 @@ impl Config {
}
}
}
+ Ok(config)
+ }
+
+ pub fn load_cli_args_config(
+ &mut self,
+ cli_config_args: impl IntoIterator
- >,
+ ) -> Result<(), ConfigError> {
if let Some(layer) = ConfigLayer::parse_cli_args(cli_config_args)? {
- config.layers.push(layer)
+ self.layers.push(layer)
}
- Ok(config)
+ Ok(())
}
fn add_trusted_dir(&mut self, path: &Path) -> Result<(), ConfigError> {
@@ -361,10 +366,11 @@ impl Config {
///
/// This is appropriate for new configuration keys. The value syntax is
/// **not** the same as most existing list-valued config, which has Python
- /// parsing implemented in `parselist()` in `mercurial/config.py`.
- /// Faithfully porting that parsing algorithm to Rust (including behavior
- /// that are arguably bugs) turned out to be non-trivial and hasn’t been
- /// completed as of this writing.
+ /// parsing implemented in `parselist()` in
+ /// `mercurial/utils/stringutil.py`. Faithfully porting that parsing
+ /// algorithm to Rust (including behavior that are arguably bugs)
+ /// turned out to be non-trivial and hasn’t been completed as of this
+ /// writing.
///
/// Instead, the "simple" syntax is: split on comma, then trim leading and
/// trailing whitespace of each component. Quotes or backslashes are not
diff --git a/rust/hg-core/src/config/layer.rs b/rust/hg-core/src/config/layer.rs
--- a/rust/hg-core/src/config/layer.rs
+++ b/rust/hg-core/src/config/layer.rs
@@ -8,6 +8,7 @@
// GNU General Public License version 2 or any later version.
use crate::errors::HgError;
+use crate::exit_codes::CONFIG_PARSE_ERROR_ABORT;
use crate::utils::files::{get_bytes_from_path, get_path_from_bytes};
use format_bytes::{format_bytes, write_bytes, DisplayBytes};
use lazy_static::lazy_static;
@@ -73,11 +74,14 @@ impl ConfigLayer {
if let Some((section, item, value)) = parse_one(arg) {
layer.add(section, item, value, None);
} else {
- Err(HgError::abort(format!(
- "abort: malformed --config option: '{}' \
+ Err(HgError::abort(
+ format!(
+ "abort: malformed --config option: '{}' \
(use --config section.name=value)",
- String::from_utf8_lossy(arg),
- )))?
+ String::from_utf8_lossy(arg),
+ ),
+ CONFIG_PARSE_ERROR_ABORT,
+ ))?
}
}
if layer.sections.is_empty() {
diff --git a/rust/hg-core/src/copy_tracing/tests_support.rs b/rust/hg-core/src/copy_tracing/tests_support.rs
--- a/rust/hg-core/src/copy_tracing/tests_support.rs
+++ b/rust/hg-core/src/copy_tracing/tests_support.rs
@@ -123,7 +123,10 @@ macro_rules! merge_copies_dict {
),
)
})
- .collect::>()
+ .collect::, OrdSet)
+ >>()
}
};
}
diff --git a/rust/hg-core/src/dirstate.rs b/rust/hg-core/src/dirstate.rs
--- a/rust/hg-core/src/dirstate.rs
+++ b/rust/hg-core/src/dirstate.rs
@@ -5,11 +5,13 @@
// This software may be used and distributed according to the terms of the
// GNU General Public License version 2 or any later version.
+use crate::dirstate_tree::on_disk::DirstateV2ParseError;
use crate::errors::HgError;
+use crate::revlog::node::NULL_NODE;
use crate::revlog::Node;
-use crate::{utils::hg_path::HgPathBuf, FastHashMap};
+use crate::utils::hg_path::{HgPath, HgPathBuf};
+use crate::FastHashMap;
use bytes_cast::{unaligned, BytesCast};
-use std::collections::hash_map;
use std::convert::TryFrom;
pub mod dirs_multiset;
@@ -24,6 +26,13 @@ pub struct DirstateParents {
pub p2: Node,
}
+impl DirstateParents {
+ pub const NULL: Self = Self {
+ p1: NULL_NODE,
+ p2: NULL_NODE,
+ };
+}
+
/// The C implementation uses all signed types. This will be an issue
/// either when 4GB+ source files are commonplace or in 2038, whichever
/// comes first.
@@ -35,6 +44,35 @@ pub struct DirstateEntry {
pub size: i32,
}
+impl DirstateEntry {
+ pub fn is_non_normal(&self) -> bool {
+ self.state != EntryState::Normal || self.mtime == MTIME_UNSET
+ }
+
+ pub fn is_from_other_parent(&self) -> bool {
+ self.state == EntryState::Normal && self.size == SIZE_FROM_OTHER_PARENT
+ }
+
+ // TODO: other platforms
+ #[cfg(unix)]
+ pub fn mode_changed(
+ &self,
+ filesystem_metadata: &std::fs::Metadata,
+ ) -> bool {
+ use std::os::unix::fs::MetadataExt;
+ const EXEC_BIT_MASK: u32 = 0o100;
+ let dirstate_exec_bit = (self.mode as u32) & EXEC_BIT_MASK;
+ let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK;
+ dirstate_exec_bit != fs_exec_bit
+ }
+
+ /// Returns a `(state, mode, size, mtime)` tuple as for
+ /// `DirstateMapMethods::debug_iter`.
+ pub fn debug_tuple(&self) -> (u8, i32, i32, i32) {
+ (self.state.into(), self.mode, self.size, self.mtime)
+ }
+}
+
#[derive(BytesCast)]
#[repr(C)]
struct RawEntry {
@@ -45,16 +83,32 @@ struct RawEntry {
length: unaligned::I32Be,
}
+pub const V1_RANGEMASK: i32 = 0x7FFFFFFF;
+
+pub const MTIME_UNSET: i32 = -1;
+
/// A `DirstateEntry` with a size of `-2` means that it was merged from the
/// other parent. This allows revert to pick the right status back during a
/// merge.
pub const SIZE_FROM_OTHER_PARENT: i32 = -2;
+/// A special value used for internal representation of special case in
+/// dirstate v1 format.
+pub const SIZE_NON_NORMAL: i32 = -1;
pub type StateMap = FastHashMap;
-pub type StateMapIter<'a> = hash_map::Iter<'a, HgPathBuf, DirstateEntry>;
+pub type StateMapIter<'a> = Box<
+ dyn Iterator<
+ Item = Result<(&'a HgPath, DirstateEntry), DirstateV2ParseError>,
+ > + Send
+ + 'a,
+>;
pub type CopyMap = FastHashMap;
-pub type CopyMapIter<'a> = hash_map::Iter<'a, HgPathBuf, HgPathBuf>;
+pub type CopyMapIter<'a> = Box<
+ dyn Iterator
- >
+ + Send
+ + 'a,
+>;
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum EntryState {
@@ -65,6 +119,16 @@ pub enum EntryState {
Unknown,
}
+impl EntryState {
+ pub fn is_tracked(self) -> bool {
+ use EntryState::*;
+ match self {
+ Normal | Added | Merged => true,
+ Removed | Unknown => false,
+ }
+ }
+}
+
impl TryFrom for EntryState {
type Error = HgError;
diff --git a/rust/hg-core/src/dirstate/dirs_multiset.rs b/rust/hg-core/src/dirstate/dirs_multiset.rs
--- a/rust/hg-core/src/dirstate/dirs_multiset.rs
+++ b/rust/hg-core/src/dirstate/dirs_multiset.rs
@@ -8,13 +8,14 @@
//! A multiset of directory names.
//!
//! Used to counts the references to directories in a manifest or dirstate.
+use crate::dirstate_tree::on_disk::DirstateV2ParseError;
use crate::{
dirstate::EntryState,
utils::{
files,
hg_path::{HgPath, HgPathBuf, HgPathError},
},
- DirstateEntry, DirstateMapError, FastHashMap, StateMap,
+ DirstateEntry, DirstateError, DirstateMapError, FastHashMap,
};
use std::collections::{hash_map, hash_map::Entry, HashMap, HashSet};
@@ -30,17 +31,25 @@ impl DirsMultiset {
/// Initializes the multiset from a dirstate.
///
/// If `skip_state` is provided, skips dirstate entries with equal state.
- pub fn from_dirstate(
- dirstate: &StateMap,
+ pub fn from_dirstate(
+ dirstate: I,
skip_state: Option,
- ) -> Result {
+ ) -> Result
+ where
+ I: IntoIterator<
+ Item = Result<(P, DirstateEntry), DirstateV2ParseError>,
+ >,
+ P: AsRef,
+ {
let mut multiset = DirsMultiset {
inner: FastHashMap::default(),
};
- for (filename, DirstateEntry { state, .. }) in dirstate.iter() {
+ for item in dirstate {
+ let (filename, entry) = item?;
+ let filename = filename.as_ref();
// This `if` is optimized out of the loop
if let Some(skip) = skip_state {
- if skip != *state {
+ if skip != entry.state {
multiset.add_path(filename)?;
}
} else {
@@ -207,6 +216,7 @@ impl<'a> DirsChildrenMultiset<'a> {
#[cfg(test)]
mod tests {
use super::*;
+ use crate::StateMap;
#[test]
fn test_delete_path_path_not_found() {
@@ -331,8 +341,11 @@ mod tests {
};
assert_eq!(expected, new);
- let new =
- DirsMultiset::from_dirstate(&StateMap::default(), None).unwrap();
+ let new = DirsMultiset::from_dirstate(
+ StateMap::default().into_iter().map(Ok),
+ None,
+ )
+ .unwrap();
let expected = DirsMultiset {
inner: FastHashMap::default(),
};
@@ -356,26 +369,23 @@ mod tests {
};
assert_eq!(expected, new);
- let input_map = ["b/x", "a/c", "a/d/x"]
- .iter()
- .map(|f| {
- (
- HgPathBuf::from_bytes(f.as_bytes()),
- DirstateEntry {
- state: EntryState::Normal,
- mode: 0,
- mtime: 0,
- size: 0,
- },
- )
- })
- .collect();
+ let input_map = ["b/x", "a/c", "a/d/x"].iter().map(|f| {
+ Ok((
+ HgPathBuf::from_bytes(f.as_bytes()),
+ DirstateEntry {
+ state: EntryState::Normal,
+ mode: 0,
+ mtime: 0,
+ size: 0,
+ },
+ ))
+ });
let expected_inner = [("", 2), ("a", 2), ("b", 1), ("a/d", 1)]
.iter()
.map(|(k, v)| (HgPathBuf::from_bytes(k.as_bytes()), *v))
.collect();
- let new = DirsMultiset::from_dirstate(&input_map, None).unwrap();
+ let new = DirsMultiset::from_dirstate(input_map, None).unwrap();
let expected = DirsMultiset {
inner: expected_inner,
};
@@ -392,7 +402,7 @@ mod tests {
]
.iter()
.map(|(f, state)| {
- (
+ Ok((
HgPathBuf::from_bytes(f.as_bytes()),
DirstateEntry {
state: *state,
@@ -400,9 +410,8 @@ mod tests {
mtime: 0,
size: 0,
},
- )
- })
- .collect();
+ ))
+ });
// "a" incremented with "a/c" and "a/d/"
let expected_inner = [("", 1), ("a", 2)]
@@ -411,7 +420,7 @@ mod tests {
.collect();
let new =
- DirsMultiset::from_dirstate(&input_map, Some(EntryState::Normal))
+ DirsMultiset::from_dirstate(input_map, Some(EntryState::Normal))
.unwrap();
let expected = DirsMultiset {
inner: expected_inner,
diff --git a/rust/hg-core/src/dirstate/dirstate_map.rs b/rust/hg-core/src/dirstate/dirstate_map.rs
--- a/rust/hg-core/src/dirstate/dirstate_map.rs
+++ b/rust/hg-core/src/dirstate/dirstate_map.rs
@@ -5,40 +5,31 @@
// This software may be used and distributed according to the terms of the
// GNU General Public License version 2 or any later version.
-use crate::errors::HgError;
-use crate::revlog::node::NULL_NODE;
+use crate::dirstate::parsers::Timestamp;
use crate::{
- dirstate::{parsers::PARENT_SIZE, EntryState, SIZE_FROM_OTHER_PARENT},
+ dirstate::EntryState,
+ dirstate::MTIME_UNSET,
+ dirstate::SIZE_FROM_OTHER_PARENT,
+ dirstate::SIZE_NON_NORMAL,
+ dirstate::V1_RANGEMASK,
pack_dirstate, parse_dirstate,
- utils::{
- files::normalize_case,
- hg_path::{HgPath, HgPathBuf},
- },
- CopyMap, DirsMultiset, DirstateEntry, DirstateError, DirstateMapError,
- DirstateParents, FastHashMap, StateMap,
+ utils::hg_path::{HgPath, HgPathBuf},
+ CopyMap, DirsMultiset, DirstateEntry, DirstateError, DirstateParents,
+ StateMap,
};
use micro_timer::timed;
use std::collections::HashSet;
-use std::convert::TryInto;
use std::iter::FromIterator;
use std::ops::Deref;
-use std::time::Duration;
-
-pub type FileFoldMap = FastHashMap;
-
-const MTIME_UNSET: i32 = -1;
#[derive(Default)]
pub struct DirstateMap {
state_map: StateMap,
pub copy_map: CopyMap,
- file_fold_map: Option,
pub dirs: Option,
pub all_dirs: Option,
non_normal_set: Option>,
other_parent_set: Option>,
- parents: Option,
- dirty_parents: bool,
}
/// Should only really be used in python interface code, for clarity
@@ -69,22 +60,57 @@ impl DirstateMap {
pub fn clear(&mut self) {
self.state_map = StateMap::default();
self.copy_map.clear();
- self.file_fold_map = None;
self.non_normal_set = None;
self.other_parent_set = None;
- self.set_parents(&DirstateParents {
- p1: NULL_NODE,
- p2: NULL_NODE,
- })
+ }
+
+ pub fn set_v1_inner(&mut self, filename: &HgPath, entry: DirstateEntry) {
+ self.state_map.insert(filename.to_owned(), entry);
}
/// Add a tracked file to the dirstate
pub fn add_file(
&mut self,
filename: &HgPath,
- old_state: EntryState,
entry: DirstateEntry,
- ) -> Result<(), DirstateMapError> {
+ // XXX once the dust settle this should probably become an enum
+ added: bool,
+ merged: bool,
+ from_p2: bool,
+ possibly_dirty: bool,
+ ) -> Result<(), DirstateError> {
+ let mut entry = entry;
+ if added {
+ assert!(!merged);
+ assert!(!possibly_dirty);
+ assert!(!from_p2);
+ entry.state = EntryState::Added;
+ entry.size = SIZE_NON_NORMAL;
+ entry.mtime = MTIME_UNSET;
+ } else if merged {
+ assert!(!possibly_dirty);
+ assert!(!from_p2);
+ entry.state = EntryState::Merged;
+ entry.size = SIZE_FROM_OTHER_PARENT;
+ entry.mtime = MTIME_UNSET;
+ } else if from_p2 {
+ assert!(!possibly_dirty);
+ entry.state = EntryState::Normal;
+ entry.size = SIZE_FROM_OTHER_PARENT;
+ entry.mtime = MTIME_UNSET;
+ } else if possibly_dirty {
+ entry.state = EntryState::Normal;
+ entry.size = SIZE_NON_NORMAL;
+ entry.mtime = MTIME_UNSET;
+ } else {
+ entry.state = EntryState::Normal;
+ entry.size = entry.size & V1_RANGEMASK;
+ entry.mtime = entry.mtime & V1_RANGEMASK;
+ }
+ let old_state = match self.get(filename) {
+ Some(e) => e.state,
+ None => EntryState::Unknown,
+ };
if old_state == EntryState::Unknown || old_state == EntryState::Removed
{
if let Some(ref mut dirs) = self.dirs {
@@ -98,13 +124,13 @@ impl DirstateMap {
}
self.state_map.insert(filename.to_owned(), entry.to_owned());
- if entry.state != EntryState::Normal || entry.mtime == MTIME_UNSET {
+ if entry.is_non_normal() {
self.get_non_normal_other_parent_entries()
.0
.insert(filename.to_owned());
}
- if entry.size == SIZE_FROM_OTHER_PARENT {
+ if entry.is_from_other_parent() {
self.get_non_normal_other_parent_entries()
.1
.insert(filename.to_owned());
@@ -120,9 +146,34 @@ impl DirstateMap {
pub fn remove_file(
&mut self,
filename: &HgPath,
- old_state: EntryState,
- size: i32,
- ) -> Result<(), DirstateMapError> {
+ in_merge: bool,
+ ) -> Result<(), DirstateError> {
+ let old_entry_opt = self.get(filename);
+ let old_state = match old_entry_opt {
+ Some(e) => e.state,
+ None => EntryState::Unknown,
+ };
+ let mut size = 0;
+ if in_merge {
+ // XXX we should not be able to have 'm' state and 'FROM_P2' if not
+ // during a merge. So I (marmoute) am not sure we need the
+ // conditionnal at all. Adding double checking this with assert
+ // would be nice.
+ if let Some(old_entry) = old_entry_opt {
+ // backup the previous state
+ if old_entry.state == EntryState::Merged {
+ size = SIZE_NON_NORMAL;
+ } else if old_entry.state == EntryState::Normal
+ && old_entry.size == SIZE_FROM_OTHER_PARENT
+ {
+ // other parent
+ size = SIZE_FROM_OTHER_PARENT;
+ self.get_non_normal_other_parent_entries()
+ .1
+ .insert(filename.to_owned());
+ }
+ }
+ }
if old_state != EntryState::Unknown && old_state != EntryState::Removed
{
if let Some(ref mut dirs) = self.dirs {
@@ -134,10 +185,10 @@ impl DirstateMap {
all_dirs.add_path(filename)?;
}
}
+ if size == 0 {
+ self.copy_map.remove(filename);
+ }
- if let Some(ref mut file_fold_map) = self.file_fold_map {
- file_fold_map.remove(&normalize_case(filename));
- }
self.state_map.insert(
filename.to_owned(),
DirstateEntry {
@@ -158,8 +209,11 @@ impl DirstateMap {
pub fn drop_file(
&mut self,
filename: &HgPath,
- old_state: EntryState,
- ) -> Result {
+ ) -> Result {
+ let old_state = match self.get(filename) {
+ Some(e) => e.state,
+ None => EntryState::Unknown,
+ };
let exists = self.state_map.remove(filename).is_some();
if exists {
@@ -172,9 +226,6 @@ impl DirstateMap {
all_dirs.delete_path(filename)?;
}
}
- if let Some(ref mut file_fold_map) = self.file_fold_map {
- file_fold_map.remove(&normalize_case(filename));
- }
self.get_non_normal_other_parent_entries()
.0
.remove(filename);
@@ -188,21 +239,13 @@ impl DirstateMap {
now: i32,
) {
for filename in filenames {
- let mut changed = false;
if let Some(entry) = self.state_map.get_mut(&filename) {
- if entry.state == EntryState::Normal && entry.mtime == now {
- changed = true;
- *entry = DirstateEntry {
- mtime: MTIME_UNSET,
- ..*entry
- };
+ if entry.clear_ambiguous_mtime(now) {
+ self.get_non_normal_other_parent_entries()
+ .0
+ .insert(filename.to_owned());
}
}
- if changed {
- self.get_non_normal_other_parent_entries()
- .0
- .insert(filename.to_owned());
- }
}
}
@@ -214,6 +257,13 @@ impl DirstateMap {
.0
.remove(key.as_ref())
}
+
+ pub fn non_normal_entries_add(&mut self, key: impl AsRef) {
+ self.get_non_normal_other_parent_entries()
+ .0
+ .insert(key.as_ref().into());
+ }
+
pub fn non_normal_entries_union(
&mut self,
other: HashSet,
@@ -264,18 +314,11 @@ impl DirstateMap {
let mut non_normal = HashSet::new();
let mut other_parent = HashSet::new();
- for (
- filename,
- DirstateEntry {
- state, size, mtime, ..
- },
- ) in self.state_map.iter()
- {
- if *state != EntryState::Normal || *mtime == MTIME_UNSET {
+ for (filename, entry) in self.state_map.iter() {
+ if entry.is_non_normal() {
non_normal.insert(filename.to_owned());
}
- if *state == EntryState::Normal && *size == SIZE_FROM_OTHER_PARENT
- {
+ if entry.is_from_other_parent() {
other_parent.insert(filename.to_owned());
}
}
@@ -287,18 +330,20 @@ impl DirstateMap {
/// emulate a Python lazy property, but it is ugly and unidiomatic.
/// TODO One day, rewriting this struct using the typestate might be a
/// good idea.
- pub fn set_all_dirs(&mut self) -> Result<(), DirstateMapError> {
+ pub fn set_all_dirs(&mut self) -> Result<(), DirstateError> {
if self.all_dirs.is_none() {
- self.all_dirs =
- Some(DirsMultiset::from_dirstate(&self.state_map, None)?);
+ self.all_dirs = Some(DirsMultiset::from_dirstate(
+ self.state_map.iter().map(|(k, v)| Ok((k, *v))),
+ None,
+ )?);
}
Ok(())
}
- pub fn set_dirs(&mut self) -> Result<(), DirstateMapError> {
+ pub fn set_dirs(&mut self) -> Result<(), DirstateError> {
if self.dirs.is_none() {
self.dirs = Some(DirsMultiset::from_dirstate(
- &self.state_map,
+ self.state_map.iter().map(|(k, v)| Ok((k, *v))),
Some(EntryState::Removed),
)?);
}
@@ -308,7 +353,7 @@ impl DirstateMap {
pub fn has_tracked_dir(
&mut self,
directory: &HgPath,
- ) -> Result {
+ ) -> Result {
self.set_dirs()?;
Ok(self.dirs.as_ref().unwrap().contains(directory))
}
@@ -316,51 +361,16 @@ impl DirstateMap {
pub fn has_dir(
&mut self,
directory: &HgPath,
- ) -> Result {
+ ) -> Result {
self.set_all_dirs()?;
Ok(self.all_dirs.as_ref().unwrap().contains(directory))
}
- pub fn parents(
+ #[timed]
+ pub fn read(
&mut self,
file_contents: &[u8],
- ) -> Result<&DirstateParents, DirstateError> {
- if let Some(ref parents) = self.parents {
- return Ok(parents);
- }
- let parents;
- if file_contents.len() == PARENT_SIZE * 2 {
- parents = DirstateParents {
- p1: file_contents[..PARENT_SIZE].try_into().unwrap(),
- p2: file_contents[PARENT_SIZE..PARENT_SIZE * 2]
- .try_into()
- .unwrap(),
- };
- } else if file_contents.is_empty() {
- parents = DirstateParents {
- p1: NULL_NODE,
- p2: NULL_NODE,
- };
- } else {
- return Err(
- HgError::corrupted("Dirstate appears to be damaged").into()
- );
- }
-
- self.parents = Some(parents);
- Ok(self.parents.as_ref().unwrap())
- }
-
- pub fn set_parents(&mut self, parents: &DirstateParents) {
- self.parents = Some(parents.clone());
- self.dirty_parents = true;
- }
-
- #[timed]
- pub fn read<'a>(
- &mut self,
- file_contents: &'a [u8],
- ) -> Result