Show More
The requested changes are too big and content was truncated. Show full diff
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: new file 100644 |
|
NO CONTENT: new file 100644 | ||
The requested commit or file is too big and content was truncated. Show full diff |
@@ -158,7 +158,7 b' def build_all_windows_packages(' | |||||
158 |
|
158 | |||
159 | windows.synchronize_hg(SOURCE_ROOT, revision, instance) |
|
159 | windows.synchronize_hg(SOURCE_ROOT, revision, instance) | |
160 |
|
160 | |||
161 | for py_version in ("2.7", "3.7", "3.8", "3.9"): |
|
161 | for py_version in ("2.7", "3.7", "3.8", "3.9", "3.10"): | |
162 | for arch in ("x86", "x64"): |
|
162 | for arch in ("x86", "x64"): | |
163 | windows.purge_hg(winrm_client) |
|
163 | windows.purge_hg(winrm_client) | |
164 | windows.build_wheel( |
|
164 | windows.build_wheel( | |
@@ -377,7 +377,7 b' def get_parser():' | |||||
377 | sp.add_argument( |
|
377 | sp.add_argument( | |
378 | '--python-version', |
|
378 | '--python-version', | |
379 | help='Python version to build for', |
|
379 | help='Python version to build for', | |
380 | choices={'2.7', '3.7', '3.8', '3.9'}, |
|
380 | choices={'2.7', '3.7', '3.8', '3.9', '3.10'}, | |
381 | nargs='*', |
|
381 | nargs='*', | |
382 | default=['3.8'], |
|
382 | default=['3.8'], | |
383 | ) |
|
383 | ) | |
@@ -501,7 +501,7 b' def get_parser():' | |||||
501 | sp.add_argument( |
|
501 | sp.add_argument( | |
502 | '--python-version', |
|
502 | '--python-version', | |
503 | help='Python version to use', |
|
503 | help='Python version to use', | |
504 | choices={'2.7', '3.5', '3.6', '3.7', '3.8', '3.9'}, |
|
504 | choices={'2.7', '3.5', '3.6', '3.7', '3.8', '3.9', '3.10'}, | |
505 | default='2.7', |
|
505 | default='2.7', | |
506 | ) |
|
506 | ) | |
507 | sp.add_argument( |
|
507 | sp.add_argument( |
@@ -129,6 +129,8 b" WHEEL_FILENAME_PYTHON38_X86 = 'mercurial" | |||||
129 | WHEEL_FILENAME_PYTHON38_X64 = 'mercurial-{version}-cp38-cp38-win_amd64.whl' |
|
129 | WHEEL_FILENAME_PYTHON38_X64 = 'mercurial-{version}-cp38-cp38-win_amd64.whl' | |
130 | WHEEL_FILENAME_PYTHON39_X86 = 'mercurial-{version}-cp39-cp39-win32.whl' |
|
130 | WHEEL_FILENAME_PYTHON39_X86 = 'mercurial-{version}-cp39-cp39-win32.whl' | |
131 | WHEEL_FILENAME_PYTHON39_X64 = 'mercurial-{version}-cp39-cp39-win_amd64.whl' |
|
131 | WHEEL_FILENAME_PYTHON39_X64 = 'mercurial-{version}-cp39-cp39-win_amd64.whl' | |
|
132 | WHEEL_FILENAME_PYTHON310_X86 = 'mercurial-{version}-cp310-cp310-win32.whl' | |||
|
133 | WHEEL_FILENAME_PYTHON310_X64 = 'mercurial-{version}-cp310-cp310-win_amd64.whl' | |||
132 |
|
134 | |||
133 | EXE_FILENAME_PYTHON2_X86 = 'Mercurial-{version}-x86-python2.exe' |
|
135 | EXE_FILENAME_PYTHON2_X86 = 'Mercurial-{version}-x86-python2.exe' | |
134 | EXE_FILENAME_PYTHON2_X64 = 'Mercurial-{version}-x64-python2.exe' |
|
136 | EXE_FILENAME_PYTHON2_X64 = 'Mercurial-{version}-x64-python2.exe' | |
@@ -480,6 +482,8 b' def resolve_wheel_artifacts(dist_path: p' | |||||
480 | dist_path / WHEEL_FILENAME_PYTHON38_X64.format(version=version), |
|
482 | dist_path / WHEEL_FILENAME_PYTHON38_X64.format(version=version), | |
481 | dist_path / WHEEL_FILENAME_PYTHON39_X86.format(version=version), |
|
483 | dist_path / WHEEL_FILENAME_PYTHON39_X86.format(version=version), | |
482 | dist_path / WHEEL_FILENAME_PYTHON39_X64.format(version=version), |
|
484 | dist_path / WHEEL_FILENAME_PYTHON39_X64.format(version=version), | |
|
485 | dist_path / WHEEL_FILENAME_PYTHON310_X86.format(version=version), | |||
|
486 | dist_path / WHEEL_FILENAME_PYTHON310_X64.format(version=version), | |||
483 | ) |
|
487 | ) | |
484 |
|
488 | |||
485 |
|
489 | |||
@@ -493,6 +497,8 b' def resolve_all_artifacts(dist_path: pat' | |||||
493 | dist_path / WHEEL_FILENAME_PYTHON38_X64.format(version=version), |
|
497 | dist_path / WHEEL_FILENAME_PYTHON38_X64.format(version=version), | |
494 | dist_path / WHEEL_FILENAME_PYTHON39_X86.format(version=version), |
|
498 | dist_path / WHEEL_FILENAME_PYTHON39_X86.format(version=version), | |
495 | dist_path / WHEEL_FILENAME_PYTHON39_X64.format(version=version), |
|
499 | dist_path / WHEEL_FILENAME_PYTHON39_X64.format(version=version), | |
|
500 | dist_path / WHEEL_FILENAME_PYTHON310_X86.format(version=version), | |||
|
501 | dist_path / WHEEL_FILENAME_PYTHON310_X64.format(version=version), | |||
496 | dist_path / EXE_FILENAME_PYTHON2_X86.format(version=version), |
|
502 | dist_path / EXE_FILENAME_PYTHON2_X86.format(version=version), | |
497 | dist_path / EXE_FILENAME_PYTHON2_X64.format(version=version), |
|
503 | dist_path / EXE_FILENAME_PYTHON2_X64.format(version=version), | |
498 | dist_path / EXE_FILENAME_PYTHON3_X86.format(version=version), |
|
504 | dist_path / EXE_FILENAME_PYTHON3_X86.format(version=version), |
@@ -56,6 +56,11 b' rust-cargo-test-py3:' | |||||
56 |
|
56 | |||
57 | phabricator-refresh: |
|
57 | phabricator-refresh: | |
58 | stage: phabricator |
|
58 | stage: phabricator | |
|
59 | rules: | |||
|
60 | - if: '"$PHABRICATOR_TOKEN" != "NO-PHAB"' | |||
|
61 | when: on_success | |||
|
62 | - if: '"$PHABRICATOR_TOKEN" == "NO-PHAB"' | |||
|
63 | when: never | |||
59 | variables: |
|
64 | variables: | |
60 | DEFAULT_COMMENT: ":white_check_mark: refresh by Heptapod after a successful CI run (:octopus: :green_heart:)" |
|
65 | DEFAULT_COMMENT: ":white_check_mark: refresh by Heptapod after a successful CI run (:octopus: :green_heart:)" | |
61 | STABLE_COMMENT: ":white_check_mark: refresh by Heptapod after a successful CI run (:octopus: :green_heart:)\n⚠ This patch is intended for stable ⚠\n{image https://media.giphy.com/media/nYI8SmmChYXK0/source.gif}" |
|
66 | STABLE_COMMENT: ":white_check_mark: refresh by Heptapod after a successful CI run (:octopus: :green_heart:)\n⚠ This patch is intended for stable ⚠\n{image https://media.giphy.com/media/nYI8SmmChYXK0/source.gif}" |
@@ -29,10 +29,15 b'' | |||||
29 | $PYTHON38_x64_URL = "https://www.python.org/ftp/python/3.8.10/python-3.8.10-amd64.exe" |
|
29 | $PYTHON38_x64_URL = "https://www.python.org/ftp/python/3.8.10/python-3.8.10-amd64.exe" | |
30 | $PYTHON38_x64_SHA256 = "7628244cb53408b50639d2c1287c659f4e29d3dfdb9084b11aed5870c0c6a48a" |
|
30 | $PYTHON38_x64_SHA256 = "7628244cb53408b50639d2c1287c659f4e29d3dfdb9084b11aed5870c0c6a48a" | |
31 |
|
31 | |||
32 |
$PYTHON39_x86_URL = "https://www.python.org/ftp/python/3.9. |
|
32 | $PYTHON39_x86_URL = "https://www.python.org/ftp/python/3.9.9/python-3.9.9.exe" | |
33 | $PYTHON39_x86_SHA256 = "505129081a839b699a6ab9064b441ad922ef03767b5dd4241fd0c2166baf64de" |
|
33 | $PYTHON39_x86_SHA256 = "6646a5683adf14d35e8c53aab946895bc0f0b825f7acac3a62cc85ee7d0dc71a" | |
34 |
$PYTHON39_ |
|
34 | $PYTHON39_X64_URL = "https://www.python.org/ftp/python/3.9.9/python-3.9.9-amd64.exe" | |
35 | $PYTHON39_x64_SHA256 = "84d5243088ba00c11e51905c704dbe041040dfff044f4e1ce5476844ee2e6eac" |
|
35 | $PYTHON39_x64_SHA256 = "137d59e5c0b01a8f1bdcba08344402ae658c81c6bf03b6602bd8b4e951ad0714" | |
|
36 | ||||
|
37 | $PYTHON310_x86_URL = "https://www.python.org/ftp/python/3.10.0/python-3.10.0.exe" | |||
|
38 | $PYTHON310_x86_SHA256 = "ea896eeefb1db9e12fb89ec77a6e28c9fe52b4a162a34c85d9688be2ec2392e8" | |||
|
39 | $PYTHON310_X64_URL = "https://www.python.org/ftp/python/3.10.0/python-3.10.0-amd64.exe" | |||
|
40 | $PYTHON310_x64_SHA256 = "cb580eb7dc55f9198e650f016645023e8b2224cf7d033857d12880b46c5c94ef" | |||
36 |
|
41 | |||
37 | # PIP 19.2.3. |
|
42 | # PIP 19.2.3. | |
38 | $PIP_URL = "https://github.com/pypa/get-pip/raw/309a56c5fd94bd1134053a541cb4657a4e47e09d/get-pip.py" |
|
43 | $PIP_URL = "https://github.com/pypa/get-pip/raw/309a56c5fd94bd1134053a541cb4657a4e47e09d/get-pip.py" | |
@@ -132,6 +137,8 b' function Install-Dependencies($prefix) {' | |||||
132 | Secure-Download $PYTHON38_x64_URL ${prefix}\assets\python38-x64.exe $PYTHON38_x64_SHA256 |
|
137 | Secure-Download $PYTHON38_x64_URL ${prefix}\assets\python38-x64.exe $PYTHON38_x64_SHA256 | |
133 | Secure-Download $PYTHON39_x86_URL ${prefix}\assets\python39-x86.exe $PYTHON39_x86_SHA256 |
|
138 | Secure-Download $PYTHON39_x86_URL ${prefix}\assets\python39-x86.exe $PYTHON39_x86_SHA256 | |
134 | Secure-Download $PYTHON39_x64_URL ${prefix}\assets\python39-x64.exe $PYTHON39_x64_SHA256 |
|
139 | Secure-Download $PYTHON39_x64_URL ${prefix}\assets\python39-x64.exe $PYTHON39_x64_SHA256 | |
|
140 | Secure-Download $PYTHON310_x86_URL ${prefix}\assets\python310-x86.exe $PYTHON310_x86_SHA256 | |||
|
141 | Secure-Download $PYTHON310_x64_URL ${prefix}\assets\python310-x64.exe $PYTHON310_x64_SHA256 | |||
135 | Secure-Download $PIP_URL ${pip} $PIP_SHA256 |
|
142 | Secure-Download $PIP_URL ${pip} $PIP_SHA256 | |
136 | Secure-Download $VS_BUILD_TOOLS_URL ${prefix}\assets\vs_buildtools.exe $VS_BUILD_TOOLS_SHA256 |
|
143 | Secure-Download $VS_BUILD_TOOLS_URL ${prefix}\assets\vs_buildtools.exe $VS_BUILD_TOOLS_SHA256 | |
137 | Secure-Download $INNO_SETUP_URL ${prefix}\assets\InnoSetup.exe $INNO_SETUP_SHA256 |
|
144 | Secure-Download $INNO_SETUP_URL ${prefix}\assets\InnoSetup.exe $INNO_SETUP_SHA256 | |
@@ -146,6 +153,8 b' function Install-Dependencies($prefix) {' | |||||
146 | # Install-Python3 "Python 3.8 64-bit" ${prefix}\assets\python38-x64.exe ${prefix}\python38-x64 ${pip} |
|
153 | # Install-Python3 "Python 3.8 64-bit" ${prefix}\assets\python38-x64.exe ${prefix}\python38-x64 ${pip} | |
147 | Install-Python3 "Python 3.9 32-bit" ${prefix}\assets\python39-x86.exe ${prefix}\python39-x86 ${pip} |
|
154 | Install-Python3 "Python 3.9 32-bit" ${prefix}\assets\python39-x86.exe ${prefix}\python39-x86 ${pip} | |
148 | Install-Python3 "Python 3.9 64-bit" ${prefix}\assets\python39-x64.exe ${prefix}\python39-x64 ${pip} |
|
155 | Install-Python3 "Python 3.9 64-bit" ${prefix}\assets\python39-x64.exe ${prefix}\python39-x64 ${pip} | |
|
156 | Install-Python3 "Python 3.10 32-bit" ${prefix}\assets\python310-x86.exe ${prefix}\python310-x86 ${pip} | |||
|
157 | Install-Python3 "Python 3.10 64-bit" ${prefix}\assets\python310-x64.exe ${prefix}\python310-x64 ${pip} | |||
149 |
|
158 | |||
150 | Write-Output "installing Visual Studio 2017 Build Tools and SDKs" |
|
159 | Write-Output "installing Visual Studio 2017 Build Tools and SDKs" | |
151 | Invoke-Process ${prefix}\assets\vs_buildtools.exe "--quiet --wait --norestart --nocache --channelUri https://aka.ms/vs/15/release/channel --add Microsoft.VisualStudio.Workload.MSBuildTools --add Microsoft.VisualStudio.Component.Windows10SDK.17763 --add Microsoft.VisualStudio.Workload.VCTools --add Microsoft.VisualStudio.Component.Windows10SDK --add Microsoft.VisualStudio.Component.VC.140" |
|
160 | Invoke-Process ${prefix}\assets\vs_buildtools.exe "--quiet --wait --norestart --nocache --channelUri https://aka.ms/vs/15/release/channel --add Microsoft.VisualStudio.Workload.MSBuildTools --add Microsoft.VisualStudio.Component.Windows10SDK.17763 --add Microsoft.VisualStudio.Workload.VCTools --add Microsoft.VisualStudio.Component.Windows10SDK --add Microsoft.VisualStudio.Component.VC.140" |
@@ -1,68 +1,84 b'' | |||||
1 | # |
|
1 | # | |
2 | # This file is autogenerated by pip-compile |
|
2 | # This file is autogenerated by pip-compile with python 3.7 | |
3 | # To update, run: |
|
3 | # To update, run: | |
4 | # |
|
4 | # | |
5 | # pip-compile --generate-hashes --output-file=contrib/packaging/requirements-windows-py3.txt contrib/packaging/requirements-windows.txt.in |
|
5 | # pip-compile --generate-hashes --output-file=contrib/packaging/requirements-windows-py3.txt contrib/packaging/requirements-windows.txt.in | |
6 | # |
|
6 | # | |
7 | atomicwrites==1.4.0 \ |
|
7 | atomicwrites==1.4.0 \ | |
8 | --hash=sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197 \ |
|
8 | --hash=sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197 \ | |
9 |
--hash=sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a |
|
9 | --hash=sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a | |
10 | # via pytest |
|
10 | # via pytest | |
11 | attrs==21.2.0 \ |
|
11 | attrs==21.2.0 \ | |
12 | --hash=sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1 \ |
|
12 | --hash=sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1 \ | |
13 |
--hash=sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb |
|
13 | --hash=sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb | |
14 | # via pytest |
|
14 | # via pytest | |
15 | cached-property==1.5.2 \ |
|
15 | cached-property==1.5.2 \ | |
16 | --hash=sha256:9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130 \ |
|
16 | --hash=sha256:9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130 \ | |
17 |
--hash=sha256:df4f613cf7ad9a588cc381aaf4a512d26265ecebd5eb9e1ba12f1319eb85a6a0 |
|
17 | --hash=sha256:df4f613cf7ad9a588cc381aaf4a512d26265ecebd5eb9e1ba12f1319eb85a6a0 | |
18 | # via pygit2 |
|
18 | # via pygit2 | |
19 | certifi==2021.5.30 \ |
|
19 | certifi==2021.5.30 \ | |
20 | --hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee \ |
|
20 | --hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee \ | |
21 |
--hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 |
|
21 | --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 | |
22 | # via dulwich |
|
22 | # via dulwich | |
23 |
cffi==1.1 |
|
23 | cffi==1.15.0 \ | |
24 | --hash=sha256:00a1ba5e2e95684448de9b89888ccd02c98d512064b4cb987d48f4b40aa0421e \ |
|
24 | --hash=sha256:00c878c90cb53ccfaae6b8bc18ad05d2036553e6d9d1d9dbcf323bbe83854ca3 \ | |
25 | --hash=sha256:00e28066507bfc3fe865a31f325c8391a1ac2916219340f87dfad602c3e48e5d \ |
|
25 | --hash=sha256:0104fb5ae2391d46a4cb082abdd5c69ea4eab79d8d44eaaf79f1b1fd806ee4c2 \ | |
26 | --hash=sha256:045d792900a75e8b1e1b0ab6787dd733a8190ffcf80e8c8ceb2fb10a29ff238a \ |
|
26 | --hash=sha256:06c48159c1abed75c2e721b1715c379fa3200c7784271b3c46df01383b593636 \ | |
27 | --hash=sha256:0638c3ae1a0edfb77c6765d487fee624d2b1ee1bdfeffc1f0b58c64d149e7eec \ |
|
27 | --hash=sha256:0808014eb713677ec1292301ea4c81ad277b6cdf2fdd90fd540af98c0b101d20 \ | |
28 | --hash=sha256:105abaf8a6075dc96c1fe5ae7aae073f4696f2905fde6aeada4c9d2926752362 \ |
|
28 | --hash=sha256:10dffb601ccfb65262a27233ac273d552ddc4d8ae1bf93b21c94b8511bffe728 \ | |
29 | --hash=sha256:155136b51fd733fa94e1c2ea5211dcd4c8879869008fc811648f16541bf99668 \ |
|
29 | --hash=sha256:14cd121ea63ecdae71efa69c15c5543a4b5fbcd0bbe2aad864baca0063cecf27 \ | |
30 | --hash=sha256:1a465cbe98a7fd391d47dce4b8f7e5b921e6cd805ef421d04f5f66ba8f06086c \ |
|
30 | --hash=sha256:17771976e82e9f94976180f76468546834d22a7cc404b17c22df2a2c81db0c66 \ | |
31 | --hash=sha256:1d2c4994f515e5b485fd6d3a73d05526aa0fcf248eb135996b088d25dfa1865b \ |
|
31 | --hash=sha256:181dee03b1170ff1969489acf1c26533710231c58f95534e3edac87fff06c443 \ | |
32 | --hash=sha256:2c24d61263f511551f740d1a065eb0212db1dbbbbd241db758f5244281590c06 \ |
|
32 | --hash=sha256:23cfe892bd5dd8941608f93348c0737e369e51c100d03718f108bf1add7bd6d0 \ | |
33 | --hash=sha256:51a8b381b16ddd370178a65360ebe15fbc1c71cf6f584613a7ea08bfad946698 \ |
|
33 | --hash=sha256:263cc3d821c4ab2213cbe8cd8b355a7f72a8324577dc865ef98487c1aeee2bc7 \ | |
34 | --hash=sha256:594234691ac0e9b770aee9fcdb8fa02c22e43e5c619456efd0d6c2bf276f3eb2 \ |
|
34 | --hash=sha256:2756c88cbb94231c7a147402476be2c4df2f6078099a6f4a480d239a8817ae39 \ | |
35 | --hash=sha256:5cf4be6c304ad0b6602f5c4e90e2f59b47653ac1ed9c662ed379fe48a8f26b0c \ |
|
35 | --hash=sha256:27c219baf94952ae9d50ec19651a687b826792055353d07648a5695413e0c605 \ | |
36 | --hash=sha256:64081b3f8f6f3c3de6191ec89d7dc6c86a8a43911f7ecb422c60e90c70be41c7 \ |
|
36 | --hash=sha256:2a23af14f408d53d5e6cd4e3d9a24ff9e05906ad574822a10563efcef137979a \ | |
37 | --hash=sha256:6bc25fc545a6b3d57b5f8618e59fc13d3a3a68431e8ca5fd4c13241cd70d0009 \ |
|
37 | --hash=sha256:31fb708d9d7c3f49a60f04cf5b119aeefe5644daba1cd2a0fe389b674fd1de37 \ | |
38 | --hash=sha256:798caa2a2384b1cbe8a2a139d80734c9db54f9cc155c99d7cc92441a23871c03 \ |
|
38 | --hash=sha256:3415c89f9204ee60cd09b235810be700e993e343a408693e80ce7f6a40108029 \ | |
39 | --hash=sha256:7c6b1dece89874d9541fc974917b631406233ea0440d0bdfbb8e03bf39a49b3b \ |
|
39 | --hash=sha256:3773c4d81e6e818df2efbc7dd77325ca0dcb688116050fb2b3011218eda36139 \ | |
40 | --hash=sha256:840793c68105fe031f34d6a086eaea153a0cd5c491cde82a74b420edd0a2b909 \ |
|
40 | --hash=sha256:3b96a311ac60a3f6be21d2572e46ce67f09abcf4d09344c49274eb9e0bf345fc \ | |
41 | --hash=sha256:8d6603078baf4e11edc4168a514c5ce5b3ba6e3e9c374298cb88437957960a53 \ |
|
41 | --hash=sha256:3f7d084648d77af029acb79a0ff49a0ad7e9d09057a9bf46596dac9514dc07df \ | |
42 | --hash=sha256:9cc46bc107224ff5b6d04369e7c595acb700c3613ad7bcf2e2012f62ece80c35 \ |
|
42 | --hash=sha256:41d45de54cd277a7878919867c0f08b0cf817605e4eb94093e7516505d3c8d14 \ | |
43 | --hash=sha256:9f7a31251289b2ab6d4012f6e83e58bc3b96bd151f5b5262467f4bb6b34a7c26 \ |
|
43 | --hash=sha256:4238e6dab5d6a8ba812de994bbb0a79bddbdf80994e4ce802b6f6f3142fcc880 \ | |
44 | --hash=sha256:9ffb888f19d54a4d4dfd4b3f29bc2c16aa4972f1c2ab9c4ab09b8ab8685b9c2b \ |
|
44 | --hash=sha256:45db3a33139e9c8f7c09234b5784a5e33d31fd6907800b316decad50af323ff2 \ | |
45 | --hash=sha256:a7711edca4dcef1a75257b50a2fbfe92a65187c47dab5a0f1b9b332c5919a3fb \ |
|
45 | --hash=sha256:45e8636704eacc432a206ac7345a5d3d2c62d95a507ec70d62f23cd91770482a \ | |
46 | --hash=sha256:af5c59122a011049aad5dd87424b8e65a80e4a6477419c0c1015f73fb5ea0293 \ |
|
46 | --hash=sha256:4958391dbd6249d7ad855b9ca88fae690783a6be9e86df65865058ed81fc860e \ | |
47 | --hash=sha256:b18e0a9ef57d2b41f5c68beefa32317d286c3d6ac0484efd10d6e07491bb95dd \ |
|
47 | --hash=sha256:4a306fa632e8f0928956a41fa8e1d6243c71e7eb59ffbd165fc0b41e316b2474 \ | |
48 | --hash=sha256:b4e248d1087abf9f4c10f3c398896c87ce82a9856494a7155823eb45a892395d \ |
|
48 | --hash=sha256:57e9ac9ccc3101fac9d6014fba037473e4358ef4e89f8e181f8951a2c0162024 \ | |
49 | --hash=sha256:ba4e9e0ae13fc41c6b23299545e5ef73055213e466bd107953e4a013a5ddd7e3 \ |
|
49 | --hash=sha256:59888172256cac5629e60e72e86598027aca6bf01fa2465bdb676d37636573e8 \ | |
50 | --hash=sha256:c6332685306b6417a91b1ff9fae889b3ba65c2292d64bd9245c093b1b284809d \ |
|
50 | --hash=sha256:5e069f72d497312b24fcc02073d70cb989045d1c91cbd53979366077959933e0 \ | |
51 | --hash=sha256:d9efd8b7a3ef378dd61a1e77367f1924375befc2eba06168b6ebfa903a5e59ca \ |
|
51 | --hash=sha256:64d4ec9f448dfe041705426000cc13e34e6e5bb13736e9fd62e34a0b0c41566e \ | |
52 | --hash=sha256:df5169c4396adc04f9b0a05f13c074df878b6052430e03f50e68adf3a57aa28d \ |
|
52 | --hash=sha256:6dc2737a3674b3e344847c8686cf29e500584ccad76204efea14f451d4cc669a \ | |
53 | --hash=sha256:ebb253464a5d0482b191274f1c8bf00e33f7e0b9c66405fbffc61ed2c839c775 \ |
|
53 | --hash=sha256:74fdfdbfdc48d3f47148976f49fab3251e550a8720bebc99bf1483f5bfb5db3e \ | |
54 | --hash=sha256:ec80dc47f54e6e9a78181ce05feb71a0353854cc26999db963695f950b5fb375 \ |
|
54 | --hash=sha256:75e4024375654472cc27e91cbe9eaa08567f7fbdf822638be2814ce059f58032 \ | |
55 | --hash=sha256:f032b34669220030f905152045dfa27741ce1a6db3324a5bc0b96b6c7420c87b \ |
|
55 | --hash=sha256:786902fb9ba7433aae840e0ed609f45c7bcd4e225ebb9c753aa39725bb3e6ad6 \ | |
56 | --hash=sha256:f60567825f791c6f8a592f3c6e3bd93dd2934e3f9dac189308426bd76b00ef3b \ |
|
56 | --hash=sha256:8b6c2ea03845c9f501ed1313e78de148cd3f6cad741a75d43a29b43da27f2e1e \ | |
57 | --hash=sha256:f803eaa94c2fcda012c047e62bc7a51b0bdabda1cad7a92a522694ea2d76e49f \ |
|
57 | --hash=sha256:91d77d2a782be4274da750752bb1650a97bfd8f291022b379bb8e01c66b4e96b \ | |
|
58 | --hash=sha256:91ec59c33514b7c7559a6acda53bbfe1b283949c34fe7440bcf917f96ac0723e \ | |||
|
59 | --hash=sha256:920f0d66a896c2d99f0adbb391f990a84091179542c205fa53ce5787aff87954 \ | |||
|
60 | --hash=sha256:a5263e363c27b653a90078143adb3d076c1a748ec9ecc78ea2fb916f9b861962 \ | |||
|
61 | --hash=sha256:abb9a20a72ac4e0fdb50dae135ba5e77880518e742077ced47eb1499e29a443c \ | |||
|
62 | --hash=sha256:c2051981a968d7de9dd2d7b87bcb9c939c74a34626a6e2f8181455dd49ed69e4 \ | |||
|
63 | --hash=sha256:c21c9e3896c23007803a875460fb786118f0cdd4434359577ea25eb556e34c55 \ | |||
|
64 | --hash=sha256:c2502a1a03b6312837279c8c1bd3ebedf6c12c4228ddbad40912d671ccc8a962 \ | |||
|
65 | --hash=sha256:d4d692a89c5cf08a8557fdeb329b82e7bf609aadfaed6c0d79f5a449a3c7c023 \ | |||
|
66 | --hash=sha256:da5db4e883f1ce37f55c667e5c0de439df76ac4cb55964655906306918e7363c \ | |||
|
67 | --hash=sha256:e7022a66d9b55e93e1a845d8c9eba2a1bebd4966cd8bfc25d9cd07d515b33fa6 \ | |||
|
68 | --hash=sha256:ef1f279350da2c586a69d32fc8733092fd32cc8ac95139a00377841f59a3f8d8 \ | |||
|
69 | --hash=sha256:f54a64f8b0c8ff0b64d18aa76675262e1700f3995182267998c31ae974fbc382 \ | |||
|
70 | --hash=sha256:f5c7150ad32ba43a07c4479f40241756145a1f03b43480e058cfd862bf5041c7 \ | |||
|
71 | --hash=sha256:f6f824dc3bce0edab5f427efcfb1d63ee75b6fcb7282900ccaf925be84efb0fc \ | |||
|
72 | --hash=sha256:fd8a250edc26254fe5b33be00402e6d287f562b6a5b2152dec302fa15bb3e997 \ | |||
|
73 | --hash=sha256:ffaa5c925128e29efbde7301d8ecaf35c8c60ffbcd6a1ffd3a552177c8e5e796 | |||
58 | # via pygit2 |
|
74 | # via pygit2 | |
59 | colorama==0.4.4 \ |
|
75 | colorama==0.4.4 \ | |
60 | --hash=sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b \ |
|
76 | --hash=sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b \ | |
61 |
--hash=sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2 |
|
77 | --hash=sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2 | |
62 | # via pytest |
|
78 | # via pytest | |
63 | docutils==0.16 \ |
|
79 | docutils==0.16 \ | |
64 | --hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \ |
|
80 | --hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \ | |
65 |
--hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc |
|
81 | --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc | |
66 | # via -r contrib/packaging/requirements-windows.txt.in |
|
82 | # via -r contrib/packaging/requirements-windows.txt.in | |
67 | dulwich==0.20.6 ; python_version >= "3" \ |
|
83 | dulwich==0.20.6 ; python_version >= "3" \ | |
68 | --hash=sha256:1ccd55e38fa9f169290f93e027ab4508202f5bdd6ef534facac4edd3f6903f0d \ |
|
84 | --hash=sha256:1ccd55e38fa9f169290f93e027ab4508202f5bdd6ef534facac4edd3f6903f0d \ | |
@@ -77,26 +93,29 b' dulwich==0.20.6 ; python_version >= "3" ' | |||||
77 | --hash=sha256:8f7a7f973be2beedfb10dd8d3eb6bdf9ec466c72ad555704897cbd6357fe5021 \ |
|
93 | --hash=sha256:8f7a7f973be2beedfb10dd8d3eb6bdf9ec466c72ad555704897cbd6357fe5021 \ | |
78 | --hash=sha256:bea6e6caffc6c73bfd1647714c5715ab96ac49deb8beb8b67511529afa25685a \ |
|
94 | --hash=sha256:bea6e6caffc6c73bfd1647714c5715ab96ac49deb8beb8b67511529afa25685a \ | |
79 | --hash=sha256:e5871b86a079e9e290f52ab14559cea1b694a0b8ed2b9ebb898f6ced7f14a406 \ |
|
95 | --hash=sha256:e5871b86a079e9e290f52ab14559cea1b694a0b8ed2b9ebb898f6ced7f14a406 \ | |
80 |
--hash=sha256:e593f514b8ac740b4ceeb047745b4719bfc9f334904245c6edcb3a9d002f577b |
|
96 | --hash=sha256:e593f514b8ac740b4ceeb047745b4719bfc9f334904245c6edcb3a9d002f577b | |
81 | # via -r contrib/packaging/requirements-windows.txt.in |
|
97 | # via -r contrib/packaging/requirements-windows.txt.in | |
82 | fuzzywuzzy==0.18.0 \ |
|
98 | fuzzywuzzy==0.18.0 \ | |
83 |
--hash=sha256:45016e92264780e58972dca1b3d939ac864b78437422beecebb3095f8efd00e8 |
|
99 | --hash=sha256:45016e92264780e58972dca1b3d939ac864b78437422beecebb3095f8efd00e8 | |
84 | # via -r contrib/packaging/requirements-windows.txt.in |
|
100 | # via -r contrib/packaging/requirements-windows.txt.in | |
85 | idna==3.2 \ |
|
101 | idna==3.2 \ | |
86 | --hash=sha256:14475042e284991034cb48e06f6851428fb14c4dc953acd9be9a5e95c7b6dd7a \ |
|
102 | --hash=sha256:14475042e284991034cb48e06f6851428fb14c4dc953acd9be9a5e95c7b6dd7a \ | |
87 |
--hash=sha256:467fbad99067910785144ce333826c71fb0e63a425657295239737f7ecd125f3 |
|
103 | --hash=sha256:467fbad99067910785144ce333826c71fb0e63a425657295239737f7ecd125f3 | |
88 | # via yarl |
|
104 | # via yarl | |
89 | importlib-metadata==3.1.0 \ |
|
105 | importlib-metadata==3.1.0 \ | |
90 | --hash=sha256:590690d61efdd716ff82c39ca9a9d4209252adfe288a4b5721181050acbd4175 \ |
|
106 | --hash=sha256:590690d61efdd716ff82c39ca9a9d4209252adfe288a4b5721181050acbd4175 \ | |
91 |
--hash=sha256:d9b8a46a0885337627a6430db287176970fff18ad421becec1d64cfc763c2099 |
|
107 | --hash=sha256:d9b8a46a0885337627a6430db287176970fff18ad421becec1d64cfc763c2099 | |
92 | # via keyring, pluggy, pytest |
|
108 | # via | |
|
109 | # keyring | |||
|
110 | # pluggy | |||
|
111 | # pytest | |||
93 | iniconfig==1.1.1 \ |
|
112 | iniconfig==1.1.1 \ | |
94 | --hash=sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3 \ |
|
113 | --hash=sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3 \ | |
95 |
--hash=sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32 |
|
114 | --hash=sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32 | |
96 | # via pytest |
|
115 | # via pytest | |
97 | keyring==21.4.0 \ |
|
116 | keyring==21.4.0 \ | |
98 | --hash=sha256:4e34ea2fdec90c1c43d6610b5a5fafa1b9097db1802948e90caf5763974b8f8d \ |
|
117 | --hash=sha256:4e34ea2fdec90c1c43d6610b5a5fafa1b9097db1802948e90caf5763974b8f8d \ | |
99 |
--hash=sha256:9aeadd006a852b78f4b4ef7c7556c2774d2432bbef8ee538a3e9089ac8b11466 |
|
118 | --hash=sha256:9aeadd006a852b78f4b4ef7c7556c2774d2432bbef8ee538a3e9089ac8b11466 | |
100 | # via -r contrib/packaging/requirements-windows.txt.in |
|
119 | # via -r contrib/packaging/requirements-windows.txt.in | |
101 | multidict==5.1.0 \ |
|
120 | multidict==5.1.0 \ | |
102 | --hash=sha256:018132dbd8688c7a69ad89c4a3f39ea2f9f33302ebe567a879da8f4ca73f0d0a \ |
|
121 | --hash=sha256:018132dbd8688c7a69ad89c4a3f39ea2f9f33302ebe567a879da8f4ca73f0d0a \ | |
@@ -135,62 +154,68 b' multidict==5.1.0 \\' | |||||
135 | --hash=sha256:ecc771ab628ea281517e24fd2c52e8f31c41e66652d07599ad8818abaad38cda \ |
|
154 | --hash=sha256:ecc771ab628ea281517e24fd2c52e8f31c41e66652d07599ad8818abaad38cda \ | |
136 | --hash=sha256:f200755768dc19c6f4e2b672421e0ebb3dd54c38d5a4f262b872d8cfcc9e93b5 \ |
|
155 | --hash=sha256:f200755768dc19c6f4e2b672421e0ebb3dd54c38d5a4f262b872d8cfcc9e93b5 \ | |
137 | --hash=sha256:f21756997ad8ef815d8ef3d34edd98804ab5ea337feedcd62fb52d22bf531281 \ |
|
156 | --hash=sha256:f21756997ad8ef815d8ef3d34edd98804ab5ea337feedcd62fb52d22bf531281 \ | |
138 |
--hash=sha256:fc13a9524bc18b6fb6e0dbec3533ba0496bbed167c56d0aabefd965584557d80 |
|
157 | --hash=sha256:fc13a9524bc18b6fb6e0dbec3533ba0496bbed167c56d0aabefd965584557d80 | |
139 | # via yarl |
|
158 | # via yarl | |
140 | packaging==21.0 \ |
|
159 | packaging==21.0 \ | |
141 | --hash=sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7 \ |
|
160 | --hash=sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7 \ | |
142 |
--hash=sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14 |
|
161 | --hash=sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14 | |
143 | # via pytest |
|
162 | # via pytest | |
144 | pluggy==0.13.1 \ |
|
163 | pluggy==0.13.1 \ | |
145 | --hash=sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0 \ |
|
164 | --hash=sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0 \ | |
146 |
--hash=sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d |
|
165 | --hash=sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d | |
147 | # via pytest |
|
166 | # via pytest | |
148 | py==1.10.0 \ |
|
167 | py==1.10.0 \ | |
149 | --hash=sha256:21b81bda15b66ef5e1a777a21c4dcd9c20ad3efd0b3f817e7a809035269e1bd3 \ |
|
168 | --hash=sha256:21b81bda15b66ef5e1a777a21c4dcd9c20ad3efd0b3f817e7a809035269e1bd3 \ | |
150 |
--hash=sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a |
|
169 | --hash=sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a | |
151 | # via pytest |
|
170 | # via pytest | |
152 |
pycparser==2.2 |
|
171 | pycparser==2.21 \ | |
153 | --hash=sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0 \ |
|
172 | --hash=sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9 \ | |
154 | --hash=sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705 \ |
|
173 | --hash=sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206 | |
155 | # via cffi |
|
174 | # via cffi | |
156 |
pygit2==1. |
|
175 | pygit2==1.7.1 ; python_version >= "3" \ | |
157 | --hash=sha256:0d298098e286eeda000e49ca7e1b41f87300e10dd8b9d06b32b008bd61f50b83 \ |
|
176 | --hash=sha256:2c9e95efb86c0b32cc07c26be3d179e851ca4a7899c47fef63c4203963144f5e \ | |
158 | --hash=sha256:0ee135eb2cd8b07ce1374f3596cc5c3213472d6389bad6a4c5d87d8e267e93e9 \ |
|
177 | --hash=sha256:3ddacbf461652d3d4900382f821d9fbd5ae2dedecd7862b5245842419ad0ccba \ | |
159 | --hash=sha256:32eb863d6651d4890ced318505ea8dc229bd9637deaf29c898de1ab574d727a0 \ |
|
178 | --hash=sha256:4cb0414df6089d0072ebe93ff2f34730737172dd5f0e72289567d06a6caf09c0 \ | |
160 | --hash=sha256:37d6d7d6d7804c42a0fe23425c72e38093488525092fc5e51a05684e63503ce7 \ |
|
179 | --hash=sha256:56e960dc74f4582bfa3ca17a1a9d542732fc93b5cf8f82574c235d06b2d61eae \ | |
161 | --hash=sha256:41204b6f3406d9f53147710f3cc485d77181ba67f57c34d36b7c86de1c14a18c \ |
|
180 | --hash=sha256:6b17ab922c2a2d99b30ab9222472b07732bf7261d9f9655a4ea23b4c700049d8 \ | |
162 | --hash=sha256:818c91b582109d90580c5da74af783738838353f15eb12eeb734d80a974b05a3 \ |
|
181 | --hash=sha256:73a7b471f22cb59e8729016de1f447c472b3b2c1cc2b622194e5e3b48a7f5776 \ | |
163 | --hash=sha256:8306a302487dac67df7af6a064bb37e8a8eb4138958f9560ff49ff162e185dab \ |
|
182 | --hash=sha256:761a8850e33822796c1c24d411d5cc2460c04e1a74b04ae8560efd3596bbd6bd \ | |
164 | --hash=sha256:9c2f2d9ef59513007b66f6534b000792b614de3faf60313a0a68f6b8571aea85 \ |
|
183 | --hash=sha256:7c467e81158f5827b3bca6362e5cc9b92857eff9de65034d338c1f18524b09be \ | |
165 | --hash=sha256:9c8d5881eb709e2e2e13000b507a131bd5fb91a879581030088d0ddffbcd19af \ |
|
184 | --hash=sha256:7c56e10592e62610a19bd3e2a633aafe3488c57b906c7c2fde0299937f0f0b2f \ | |
166 | --hash=sha256:b422e417739def0a136a6355723dfe8a5ffc83db5098076f28a14f1d139779c1 \ |
|
185 | --hash=sha256:7cc2a8e29cc9598310a78cf58b70d9331277cf374802be8f97d97c4a9e5d8387 \ | |
167 | --hash=sha256:cbeb38ab1df9b5d8896548a11e63aae8a064763ab5f1eabe4475e6b8a78ee1c8 \ |
|
186 | --hash=sha256:812670f7994f31778e873a9eced29d2bbfa91674e8be0ab1e974c8a4bda9cbab \ | |
168 | --hash=sha256:cf00481ddf053e549a6edd0216bdc267b292d261eae02a67bb3737de920cbf88 \ |
|
187 | --hash=sha256:8cdb0b1d6c3d24b44f340fed143b16e64ba23fe2a449f1a5db87aaf9339a9dbe \ | |
169 | --hash=sha256:d0d889144e9487d926fecea947c3f39ce5f477e521d7d467d2e66907e4cd657d \ |
|
188 | --hash=sha256:91b77a305d8d18b649396e66e832d654cd593a3d29b5728f753f254a04533812 \ | |
170 | --hash=sha256:ddb7a1f6d38063e8724abfa1cfdfb0f9b25014b8bca0546274b7a84b873a3888 \ |
|
189 | --hash=sha256:a75bcde32238c77eb0cf7d9698a5aa899408d7ad999a5920a29a7c4b80fdeaa7 \ | |
171 | --hash=sha256:e9037a7d810750fe23c9f5641ef14a0af2525ff03e14752cd4f73e1870ecfcb0 \ |
|
190 | --hash=sha256:b060240cf3038e7a0706bbfc5436dd03b8d5ac797ac1d512b613f4d04b974c80 \ | |
172 | --hash=sha256:ec5c0365a9bdfcac1609d20868507b28685ec5ea7cc3a2c903c9b62ef2e0bbc0 \ |
|
191 | --hash=sha256:cdfa61c0428a8182e5a6a1161c017b824cd511574f080a40b10d6413774eb0ca \ | |
173 | --hash=sha256:fdd8ba30cda277290e000322f505132f590cf89bd7d31829b45a3cb57447ec32 \ |
|
192 | --hash=sha256:d7faa29558436decc2e78110f38d6677eb366b683ba5cdc2803d47195711165d \ | |
|
193 | --hash=sha256:d831825ad9c3b3c28e6b3ef8a2401ad2d3fd4db5455427ff27175a7e254e2592 \ | |||
|
194 | --hash=sha256:df4c477bdfac85d32a1e3180282cd829a0980aa69be9bd0f7cbd4db1778ca72b \ | |||
|
195 | --hash=sha256:eced3529bafcaaac015d08dfaa743b3cbad37fcd5b13ae9d280b8b7f716ec5ce \ | |||
|
196 | --hash=sha256:fec17e2da668e6bb192d777417aad9c7ca924a166d0a0b9a81a11e00362b1bc7 | |||
174 | # via -r contrib/packaging/requirements-windows.txt.in |
|
197 | # via -r contrib/packaging/requirements-windows.txt.in | |
175 | pygments==2.7.1 \ |
|
198 | pygments==2.7.1 \ | |
176 | --hash=sha256:307543fe65c0947b126e83dd5a61bd8acbd84abec11f43caebaf5534cbc17998 \ |
|
199 | --hash=sha256:307543fe65c0947b126e83dd5a61bd8acbd84abec11f43caebaf5534cbc17998 \ | |
177 |
--hash=sha256:926c3f319eda178d1bd90851e4317e6d8cdb5e292a3386aac9bd75eca29cf9c7 |
|
200 | --hash=sha256:926c3f319eda178d1bd90851e4317e6d8cdb5e292a3386aac9bd75eca29cf9c7 | |
178 | # via -r contrib/packaging/requirements-windows.txt.in |
|
201 | # via -r contrib/packaging/requirements-windows.txt.in | |
179 | pyparsing==2.4.7 \ |
|
202 | pyparsing==2.4.7 \ | |
180 | --hash=sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1 \ |
|
203 | --hash=sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1 \ | |
181 |
--hash=sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b |
|
204 | --hash=sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b | |
182 | # via packaging |
|
205 | # via packaging | |
183 | pytest-vcr==1.0.2 \ |
|
|||
184 | --hash=sha256:23ee51b75abbcc43d926272773aae4f39f93aceb75ed56852d0bf618f92e1896 \ |
|
|||
185 | # via -r contrib/packaging/requirements-windows.txt.in |
|
|||
186 | pytest==6.2.4 \ |
|
206 | pytest==6.2.4 \ | |
187 | --hash=sha256:50bcad0a0b9c5a72c8e4e7c9855a3ad496ca6a881a3641b4260605450772c54b \ |
|
207 | --hash=sha256:50bcad0a0b9c5a72c8e4e7c9855a3ad496ca6a881a3641b4260605450772c54b \ | |
188 |
--hash=sha256:91ef2131a9bd6be8f76f1f08eac5c5317221d6ad1e143ae03894b862e8976890 |
|
208 | --hash=sha256:91ef2131a9bd6be8f76f1f08eac5c5317221d6ad1e143ae03894b862e8976890 | |
189 | # via pytest-vcr |
|
209 | # via pytest-vcr | |
|
210 | pytest-vcr==1.0.2 \ | |||
|
211 | --hash=sha256:23ee51b75abbcc43d926272773aae4f39f93aceb75ed56852d0bf618f92e1896 | |||
|
212 | # via -r contrib/packaging/requirements-windows.txt.in | |||
190 | pywin32-ctypes==0.2.0 \ |
|
213 | pywin32-ctypes==0.2.0 \ | |
191 | --hash=sha256:24ffc3b341d457d48e8922352130cf2644024a4ff09762a2261fd34c36ee5942 \ |
|
214 | --hash=sha256:24ffc3b341d457d48e8922352130cf2644024a4ff09762a2261fd34c36ee5942 \ | |
192 |
--hash=sha256:9dc2d991b3479cc2df15930958b674a48a227d5361d413827a4cfd0b5876fc98 |
|
215 | --hash=sha256:9dc2d991b3479cc2df15930958b674a48a227d5361d413827a4cfd0b5876fc98 | |
193 | # via -r contrib/packaging/requirements-windows.txt.in, keyring |
|
216 | # via | |
|
217 | # -r contrib/packaging/requirements-windows.txt.in | |||
|
218 | # keyring | |||
194 | pyyaml==5.4.1 \ |
|
219 | pyyaml==5.4.1 \ | |
195 | --hash=sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf \ |
|
220 | --hash=sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf \ | |
196 | --hash=sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696 \ |
|
221 | --hash=sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696 \ | |
@@ -220,41 +245,43 b' pyyaml==5.4.1 \\' | |||||
220 | --hash=sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc \ |
|
245 | --hash=sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc \ | |
221 | --hash=sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247 \ |
|
246 | --hash=sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247 \ | |
222 | --hash=sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6 \ |
|
247 | --hash=sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6 \ | |
223 |
--hash=sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0 |
|
248 | --hash=sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0 | |
224 | # via vcrpy |
|
249 | # via vcrpy | |
225 | six==1.16.0 \ |
|
250 | six==1.16.0 \ | |
226 | --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ |
|
251 | --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ | |
227 |
--hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 |
|
252 | --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 | |
228 | # via vcrpy |
|
253 | # via vcrpy | |
229 | toml==0.10.2 \ |
|
254 | toml==0.10.2 \ | |
230 | --hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \ |
|
255 | --hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \ | |
231 |
--hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f |
|
256 | --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f | |
232 | # via pytest |
|
257 | # via pytest | |
233 | typing-extensions==3.10.0.0 \ |
|
258 | typing-extensions==3.10.0.0 \ | |
234 | --hash=sha256:0ac0f89795dd19de6b97debb0c6af1c70987fd80a2d62d1958f7e56fcc31b497 \ |
|
259 | --hash=sha256:0ac0f89795dd19de6b97debb0c6af1c70987fd80a2d62d1958f7e56fcc31b497 \ | |
235 | --hash=sha256:50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342 \ |
|
260 | --hash=sha256:50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342 \ | |
236 |
--hash=sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84 |
|
261 | --hash=sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84 | |
237 | # via yarl |
|
262 | # via yarl | |
238 | urllib3==1.25.11 \ |
|
263 | urllib3==1.25.11 \ | |
239 | --hash=sha256:8d7eaa5a82a1cac232164990f04874c594c9453ec55eef02eab885aa02fc17a2 \ |
|
264 | --hash=sha256:8d7eaa5a82a1cac232164990f04874c594c9453ec55eef02eab885aa02fc17a2 \ | |
240 |
--hash=sha256:f5321fbe4bf3fefa0efd0bfe7fb14e90909eb62a48ccda331726b4319897dd5e |
|
265 | --hash=sha256:f5321fbe4bf3fefa0efd0bfe7fb14e90909eb62a48ccda331726b4319897dd5e | |
241 | # via dulwich |
|
266 | # via dulwich | |
242 | vcrpy==4.1.1 \ |
|
267 | vcrpy==4.1.1 \ | |
243 | --hash=sha256:12c3fcdae7b88ecf11fc0d3e6d77586549d4575a2ceee18e82eee75c1f626162 \ |
|
268 | --hash=sha256:12c3fcdae7b88ecf11fc0d3e6d77586549d4575a2ceee18e82eee75c1f626162 \ | |
244 |
--hash=sha256:57095bf22fc0a2d99ee9674cdafebed0f3ba763018582450706f7d3a74fff599 |
|
269 | --hash=sha256:57095bf22fc0a2d99ee9674cdafebed0f3ba763018582450706f7d3a74fff599 | |
245 | # via pytest-vcr |
|
270 | # via pytest-vcr | |
246 |
windows-curses==2. |
|
271 | windows-curses==2.3.0 \ | |
247 | --hash=sha256:1452d771ec6f9b3fef037da2b169196a9a12be4e86a6c27dd579adac70c42028 \ |
|
272 | --hash=sha256:170c0d941c2e0cdf864e7f0441c1bdf0709232bf4aa7ce7f54d90fc76a4c0504 \ | |
248 | --hash=sha256:267544e4f60c09af6505e50a69d7f01d7f8a281cf4bd4fc7efc3b32b9a4ef64e \ |
|
273 | --hash=sha256:4d5fb991d1b90a41c2332f02241a1f84c8a1e6bc8f6e0d26f532d0da7a9f7b51 \ | |
249 | --hash=sha256:389228a3df556102e72450f599283094168aa82eee189f501ad9f131a0fc92e1 \ |
|
274 | --hash=sha256:7a35eda4cb120b9e1a5ae795f3bc06c55b92c9d391baba6be1903285a05f3551 \ | |
250 | --hash=sha256:84336fe470fa07288daec5c684dec74c0766fec6b3511ccedb4c494804acfbb7 \ |
|
275 | --hash=sha256:935be95cfdb9213f6f5d3d5bcd489960e3a8fbc9b574e7b2e8a3a3cc46efff49 \ | |
251 | --hash=sha256:9aa6ff60be76f5de696dc6dbf7897e3b1e6abcf4c0f741e9a0ee22cd6ef382f8 \ |
|
276 | --hash=sha256:a3a63a0597729e10f923724c2cf972a23ea677b400d2387dee1d668cf7116177 \ | |
252 | --hash=sha256:c4a8ce00e82635f06648cc40d99f470be4e3ffeb84f9f7ae9d6a4f68ec6361e7 \ |
|
277 | --hash=sha256:c860f596d28377e47f322b7382be4d3573fd76d1292234996bb7f72e0bc0ed0d \ | |
253 | --hash=sha256:c5cd032bc7d0f03224ab55c925059d98e81795098d59bbd10f7d05c7ea9677ce \ |
|
278 | --hash=sha256:cc5fa913780d60f4a40824d374a4f8ca45b4e205546e83a2d85147315a57457e \ | |
254 | --hash=sha256:fc0be372fe6da3c39d7093154ce029115a927bf287f34b4c615e2b3f8c23dfaa \ |
|
279 | --hash=sha256:d5cde8ec6d582aa77af791eca54f60858339fb3f391945f9cad11b1ab71062e3 \ | |
|
280 | --hash=sha256:e913dc121446d92b33fe4f5bcca26d3a34e4ad19f2af160370d57c3d1e93b4e1 \ | |||
|
281 | --hash=sha256:fbc2131cec57e422c6660e6cdb3420aff5be5169b8e45bb7c471f884b0590a2b | |||
255 | # via -r contrib/packaging/requirements-windows.txt.in |
|
282 | # via -r contrib/packaging/requirements-windows.txt.in | |
256 | wrapt==1.12.1 \ |
|
283 | wrapt==1.12.1 \ | |
257 |
--hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7 |
|
284 | --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7 | |
258 | # via vcrpy |
|
285 | # via vcrpy | |
259 | yarl==1.6.3 \ |
|
286 | yarl==1.6.3 \ | |
260 | --hash=sha256:00d7ad91b6583602eb9c1d085a2cf281ada267e9a197e8b7cae487dadbfa293e \ |
|
287 | --hash=sha256:00d7ad91b6583602eb9c1d085a2cf281ada267e9a197e8b7cae487dadbfa293e \ | |
@@ -293,9 +320,9 b' yarl==1.6.3 \\' | |||||
293 | --hash=sha256:e6b5460dc5ad42ad2b36cca524491dfcaffbfd9c8df50508bddc354e787b8dc2 \ |
|
320 | --hash=sha256:e6b5460dc5ad42ad2b36cca524491dfcaffbfd9c8df50508bddc354e787b8dc2 \ | |
294 | --hash=sha256:f040bcc6725c821a4c0665f3aa96a4d0805a7aaf2caf266d256b8ed71b9f041c \ |
|
321 | --hash=sha256:f040bcc6725c821a4c0665f3aa96a4d0805a7aaf2caf266d256b8ed71b9f041c \ | |
295 | --hash=sha256:f0b059678fd549c66b89bed03efcabb009075bd131c248ecdf087bdb6faba24a \ |
|
322 | --hash=sha256:f0b059678fd549c66b89bed03efcabb009075bd131c248ecdf087bdb6faba24a \ | |
296 |
--hash=sha256:fcbb48a93e8699eae920f8d92f7160c03567b421bc17362a9ffbbd706a816f71 |
|
323 | --hash=sha256:fcbb48a93e8699eae920f8d92f7160c03567b421bc17362a9ffbbd706a816f71 | |
297 | # via vcrpy |
|
324 | # via vcrpy | |
298 | zipp==3.4.0 \ |
|
325 | zipp==3.4.0 \ | |
299 | --hash=sha256:102c24ef8f171fd729d46599845e95c7ab894a4cf45f5de11a44cc7444fb1108 \ |
|
326 | --hash=sha256:102c24ef8f171fd729d46599845e95c7ab894a4cf45f5de11a44cc7444fb1108 \ | |
300 |
--hash=sha256:ed5eee1974372595f9e416cc7bbeeb12335201d8081ca8a0743c954d4446e5cb |
|
327 | --hash=sha256:ed5eee1974372595f9e416cc7bbeeb12335201d8081ca8a0743c954d4446e5cb | |
301 | # via importlib-metadata |
|
328 | # via importlib-metadata |
@@ -1,16 +1,16 b'' | |||||
1 | # |
|
1 | # | |
2 | # This file is autogenerated by pip-compile |
|
2 | # This file is autogenerated by pip-compile with python 3.7 | |
3 | # To update, run: |
|
3 | # To update, run: | |
4 | # |
|
4 | # | |
5 | # pip-compile --generate-hashes --output-file=contrib/packaging/requirements.txt contrib/packaging/requirements.txt.in |
|
5 | # pip-compile --generate-hashes --output-file=contrib/packaging/requirements.txt contrib/packaging/requirements.txt.in | |
6 | # |
|
6 | # | |
7 | docutils==0.16 \ |
|
7 | docutils==0.16 \ | |
8 | --hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \ |
|
8 | --hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \ | |
9 |
--hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc |
|
9 | --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc | |
10 | # via -r contrib/packaging/requirements.txt.in |
|
10 | # via -r contrib/packaging/requirements.txt.in | |
11 | jinja2==2.11.2 \ |
|
11 | jinja2==2.11.2 \ | |
12 | --hash=sha256:89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0 \ |
|
12 | --hash=sha256:89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0 \ | |
13 |
--hash=sha256:f0a4641d3cf955324a89c04f3d94663aa4d638abe8f733ecd3582848e1c37035 |
|
13 | --hash=sha256:f0a4641d3cf955324a89c04f3d94663aa4d638abe8f733ecd3582848e1c37035 | |
14 | # via -r contrib/packaging/requirements.txt.in |
|
14 | # via -r contrib/packaging/requirements.txt.in | |
15 | markupsafe==1.1.1 \ |
|
15 | markupsafe==1.1.1 \ | |
16 | --hash=sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473 \ |
|
16 | --hash=sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473 \ | |
@@ -45,5 +45,5 b' markupsafe==1.1.1 \\' | |||||
45 | --hash=sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f \ |
|
45 | --hash=sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f \ | |
46 | --hash=sha256:cdb132fc825c38e1aeec2c8aa9338310d29d337bebbd7baa06889d09a60a1fa2 \ |
|
46 | --hash=sha256:cdb132fc825c38e1aeec2c8aa9338310d29d337bebbd7baa06889d09a60a1fa2 \ | |
47 | --hash=sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7 \ |
|
47 | --hash=sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7 \ | |
48 |
--hash=sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be |
|
48 | --hash=sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be | |
49 | # via jinja2 |
|
49 | # via jinja2 |
@@ -1,6 +1,11 b'' | |||||
1 | #!/bin/bash |
|
1 | #!/bin/bash | |
2 | set -eu |
|
2 | set -eu | |
3 |
|
3 | |||
|
4 | if [[ "$PHABRICATOR_TOKEN" == "NO-PHAB" ]]; then | |||
|
5 | echo 'Skipping Phabricator Step' >&2 | |||
|
6 | exit 0 | |||
|
7 | fi | |||
|
8 | ||||
4 | revision_in_stack=`hg log \ |
|
9 | revision_in_stack=`hg log \ | |
5 | --rev '.#stack and ::. and topic()' \ |
|
10 | --rev '.#stack and ::. and topic()' \ | |
6 | -T '\nONE-REV\n' \ |
|
11 | -T '\nONE-REV\n' \ | |
@@ -27,6 +32,7 b' fi' | |||||
27 |
|
32 | |||
28 | if [[ "$PHABRICATOR_TOKEN" == "" ]]; then |
|
33 | if [[ "$PHABRICATOR_TOKEN" == "" ]]; then | |
29 | echo 'missing $PHABRICATOR_TOKEN variable' >&2 |
|
34 | echo 'missing $PHABRICATOR_TOKEN variable' >&2 | |
|
35 | echo '(use PHABRICATOR_TOKEN="NO-PHAB" to disable this step)' >&2 | |||
30 | exit 2 |
|
36 | exit 2 | |
31 | fi |
|
37 | fi | |
32 |
|
38 |
@@ -13,9 +13,9 b' from mercurial import (' | |||||
13 | context, |
|
13 | context, | |
14 | error, |
|
14 | error, | |
15 | fancyopts, |
|
15 | fancyopts, | |
16 | pycompat, |
|
|||
17 | simplemerge, |
|
16 | simplemerge, | |
18 | ui as uimod, |
|
17 | ui as uimod, | |
|
18 | util, | |||
19 | ) |
|
19 | ) | |
20 | from mercurial.utils import procutil, stringutil |
|
20 | from mercurial.utils import procutil, stringutil | |
21 |
|
21 | |||
@@ -65,6 +65,17 b' def showhelp():' | |||||
65 | procutil.stdout.write(b' %-*s %s\n' % (opts_len, first, second)) |
|
65 | procutil.stdout.write(b' %-*s %s\n' % (opts_len, first, second)) | |
66 |
|
66 | |||
67 |
|
67 | |||
|
68 | def _verifytext(input, ui, quiet=False, allow_binary=False): | |||
|
69 | """verifies that text is non-binary (unless opts[text] is passed, | |||
|
70 | then we just warn)""" | |||
|
71 | if stringutil.binary(input.text()): | |||
|
72 | msg = _(b"%s looks like a binary file.") % input.fctx.path() | |||
|
73 | if not quiet: | |||
|
74 | ui.warn(_(b'warning: %s\n') % msg) | |||
|
75 | if not allow_binary: | |||
|
76 | sys.exit(1) | |||
|
77 | ||||
|
78 | ||||
68 | try: |
|
79 | try: | |
69 | for fp in (sys.stdin, procutil.stdout, sys.stderr): |
|
80 | for fp in (sys.stdin, procutil.stdout, sys.stderr): | |
70 | procutil.setbinary(fp) |
|
81 | procutil.setbinary(fp) | |
@@ -80,16 +91,44 b' try:' | |||||
80 | sys.exit(0) |
|
91 | sys.exit(0) | |
81 | if len(args) != 3: |
|
92 | if len(args) != 3: | |
82 | raise ParseError(_(b'wrong number of arguments').decode('utf8')) |
|
93 | raise ParseError(_(b'wrong number of arguments').decode('utf8')) | |
|
94 | mode = b'merge' | |||
|
95 | if len(opts[b'label']) > 2: | |||
|
96 | mode = b'merge3' | |||
83 | local, base, other = args |
|
97 | local, base, other = args | |
84 | sys.exit( |
|
98 | overrides = opts[b'label'] | |
85 | simplemerge.simplemerge( |
|
99 | if len(overrides) > 3: | |
86 | uimod.ui.load(), |
|
100 | raise error.InputError(b'can only specify three labels.') | |
87 | context.arbitraryfilectx(local), |
|
101 | labels = [local, other, base] | |
88 | context.arbitraryfilectx(base), |
|
102 | labels[: len(overrides)] = overrides | |
89 | context.arbitraryfilectx(other), |
|
103 | local_input = simplemerge.MergeInput( | |
90 | **pycompat.strkwargs(opts) |
|
104 | context.arbitraryfilectx(local), labels[0] | |
91 |
|
|
105 | ) | |
|
106 | other_input = simplemerge.MergeInput( | |||
|
107 | context.arbitraryfilectx(other), labels[1] | |||
|
108 | ) | |||
|
109 | base_input = simplemerge.MergeInput( | |||
|
110 | context.arbitraryfilectx(base), labels[2] | |||
92 | ) |
|
111 | ) | |
|
112 | ||||
|
113 | quiet = opts.get(b'quiet') | |||
|
114 | allow_binary = opts.get(b'text') | |||
|
115 | ui = uimod.ui.load() | |||
|
116 | _verifytext(local_input, ui, quiet=quiet, allow_binary=allow_binary) | |||
|
117 | _verifytext(base_input, ui, quiet=quiet, allow_binary=allow_binary) | |||
|
118 | _verifytext(other_input, ui, quiet=quiet, allow_binary=allow_binary) | |||
|
119 | ||||
|
120 | merged_text, conflicts = simplemerge.simplemerge( | |||
|
121 | local_input, | |||
|
122 | base_input, | |||
|
123 | other_input, | |||
|
124 | mode, | |||
|
125 | allow_binary=allow_binary, | |||
|
126 | ) | |||
|
127 | if opts.get(b'print'): | |||
|
128 | ui.fout.write(merged_text) | |||
|
129 | else: | |||
|
130 | util.writefile(local, merged_text) | |||
|
131 | sys.exit(1 if conflicts else 0) | |||
93 | except ParseError as e: |
|
132 | except ParseError as e: | |
94 | e = stringutil.forcebytestr(e) |
|
133 | e = stringutil.forcebytestr(e) | |
95 | procutil.stdout.write(b"%s: %s\n" % (sys.argv[0].encode('utf8'), e)) |
|
134 | procutil.stdout.write(b"%s: %s\n" % (sys.argv[0].encode('utf8'), e)) |
@@ -36,7 +36,7 b' Examples::' | |||||
36 | maxfiles = 3 |
|
36 | maxfiles = 3 | |
37 |
|
37 | |||
38 | [blackbox] |
|
38 | [blackbox] | |
39 |
# Include |
|
39 | # Include microseconds in log entries with %f (see Python function | |
40 | # datetime.datetime.strftime) |
|
40 | # datetime.datetime.strftime) | |
41 | date-format = %Y-%m-%d @ %H:%M:%S.%f |
|
41 | date-format = %Y-%m-%d @ %H:%M:%S.%f | |
42 |
|
42 | |||
@@ -101,11 +101,7 b' configitem(' | |||||
101 | b'ignore', |
|
101 | b'ignore', | |
102 | default=lambda: [b'chgserver', b'cmdserver', b'extension'], |
|
102 | default=lambda: [b'chgserver', b'cmdserver', b'extension'], | |
103 | ) |
|
103 | ) | |
104 | configitem( |
|
104 | configitem(b'blackbox', b'date-format', default=b'') | |
105 | b'blackbox', |
|
|||
106 | b'date-format', |
|
|||
107 | default=b'%Y/%m/%d %H:%M:%S', |
|
|||
108 | ) |
|
|||
109 |
|
105 | |||
110 | _lastlogger = loggingutil.proxylogger() |
|
106 | _lastlogger = loggingutil.proxylogger() | |
111 |
|
107 | |||
@@ -138,7 +134,14 b' class blackboxlogger(object):' | |||||
138 |
|
134 | |||
139 | def _log(self, ui, event, msg, opts): |
|
135 | def _log(self, ui, event, msg, opts): | |
140 | default = ui.configdate(b'devel', b'default-date') |
|
136 | default = ui.configdate(b'devel', b'default-date') | |
141 |
date = |
|
137 | dateformat = ui.config(b'blackbox', b'date-format') | |
|
138 | if dateformat: | |||
|
139 | date = dateutil.datestr(default, dateformat) | |||
|
140 | else: | |||
|
141 | # We want to display milliseconds (more precision seems | |||
|
142 | # unnecessary). Since %.3f is not supported, use %f and truncate | |||
|
143 | # microseconds. | |||
|
144 | date = dateutil.datestr(default, b'%Y-%m-%d %H:%M:%S.%f')[:-3] | |||
142 | user = procutil.getuser() |
|
145 | user = procutil.getuser() | |
143 | pid = b'%d' % procutil.getpid() |
|
146 | pid = b'%d' % procutil.getpid() | |
144 | changed = b'' |
|
147 | changed = b'' | |
@@ -224,8 +227,14 b' def blackbox(ui, repo, *revs, **opts):' | |||||
224 | if count >= limit: |
|
227 | if count >= limit: | |
225 | break |
|
228 | break | |
226 |
|
229 | |||
227 |
# count the commands by matching lines like: |
|
230 | # count the commands by matching lines like: | |
228 | if re.match(br'^\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} .*> .*', line): |
|
231 | # 2013/01/23 19:13:36 root> | |
|
232 | # 2013/01/23 19:13:36 root (1234)> | |||
|
233 | # 2013/01/23 19:13:36 root @0000000000000000000000000000000000000000 (1234)> | |||
|
234 | # 2013-01-23 19:13:36.000 root @0000000000000000000000000000000000000000 (1234)> | |||
|
235 | if re.match( | |||
|
236 | br'^\d{4}[-/]\d{2}[-/]\d{2} \d{2}:\d{2}:\d{2}(.\d*)? .*> .*', line | |||
|
237 | ): | |||
229 | count += 1 |
|
238 | count += 1 | |
230 | output.append(line) |
|
239 | output.append(line) | |
231 |
|
240 |
@@ -65,23 +65,23 b' def _commit(orig, ui, repo, *pats, **opt' | |||||
65 | b"unable to parse '%s', should follow " |
|
65 | b"unable to parse '%s', should follow " | |
66 | b"KEY=VALUE format" |
|
66 | b"KEY=VALUE format" | |
67 | ) |
|
67 | ) | |
68 |
raise error. |
|
68 | raise error.InputError(msg % raw) | |
69 | k, v = raw.split(b'=', 1) |
|
69 | k, v = raw.split(b'=', 1) | |
70 | if not k: |
|
70 | if not k: | |
71 | msg = _(b"unable to parse '%s', keys can't be empty") |
|
71 | msg = _(b"unable to parse '%s', keys can't be empty") | |
72 |
raise error. |
|
72 | raise error.InputError(msg % raw) | |
73 | if re.search(br'[^\w-]', k): |
|
73 | if re.search(br'[^\w-]', k): | |
74 | msg = _( |
|
74 | msg = _( | |
75 | b"keys can only contain ascii letters, digits," |
|
75 | b"keys can only contain ascii letters, digits," | |
76 | b" '_' and '-'" |
|
76 | b" '_' and '-'" | |
77 | ) |
|
77 | ) | |
78 |
raise error. |
|
78 | raise error.InputError(msg) | |
79 | if k in usedinternally: |
|
79 | if k in usedinternally: | |
80 | msg = _( |
|
80 | msg = _( | |
81 | b"key '%s' is used internally, can't be set " |
|
81 | b"key '%s' is used internally, can't be set " | |
82 | b"manually" |
|
82 | b"manually" | |
83 | ) |
|
83 | ) | |
84 |
raise error. |
|
84 | raise error.InputError(msg % k) | |
85 | inneropts['extra'][k] = v |
|
85 | inneropts['extra'][k] = v | |
86 | return super(repoextra, self).commit(*innerpats, **inneropts) |
|
86 | return super(repoextra, self).commit(*innerpats, **inneropts) | |
87 |
|
87 |
@@ -38,6 +38,7 b' from mercurial import (' | |||||
38 | lock as lockmod, |
|
38 | lock as lockmod, | |
39 | logcmdutil, |
|
39 | logcmdutil, | |
40 | merge as mergemod, |
|
40 | merge as mergemod, | |
|
41 | mergestate, | |||
41 | phases, |
|
42 | phases, | |
42 | pycompat, |
|
43 | pycompat, | |
43 | util, |
|
44 | util, | |
@@ -241,7 +242,7 b' class mercurial_sink(common.converter_si' | |||||
241 |
|
242 | |||
242 | # If the file requires actual merging, abort. We don't have enough |
|
243 | # If the file requires actual merging, abort. We don't have enough | |
243 | # context to resolve merges correctly. |
|
244 | # context to resolve merges correctly. | |
244 | if action in [b'm', b'dm', b'cd', b'dc']: |
|
245 | if action in mergestate.CONVERT_MERGE_ACTIONS: | |
245 | raise error.Abort( |
|
246 | raise error.Abort( | |
246 | _( |
|
247 | _( | |
247 | b"unable to convert merge commit " |
|
248 | b"unable to convert merge commit " | |
@@ -250,7 +251,7 b' class mercurial_sink(common.converter_si' | |||||
250 | ) |
|
251 | ) | |
251 | % (file, p1ctx, p2ctx) |
|
252 | % (file, p1ctx, p2ctx) | |
252 | ) |
|
253 | ) | |
253 |
elif action == |
|
254 | elif action == mergestate.ACTION_KEEP: | |
254 | # 'keep' means nothing changed from p1 |
|
255 | # 'keep' means nothing changed from p1 | |
255 | continue |
|
256 | continue | |
256 | else: |
|
257 | else: |
@@ -149,7 +149,6 b' from mercurial import (' | |||||
149 | mdiff, |
|
149 | mdiff, | |
150 | merge, |
|
150 | merge, | |
151 | mergestate as mergestatemod, |
|
151 | mergestate as mergestatemod, | |
152 | obsolete, |
|
|||
153 | pycompat, |
|
152 | pycompat, | |
154 | registrar, |
|
153 | registrar, | |
155 | rewriteutil, |
|
154 | rewriteutil, | |
@@ -463,8 +462,6 b' def getrevstofix(ui, repo, opts):' | |||||
463 | revs = set(logcmdutil.revrange(repo, opts[b'rev'])) |
|
462 | revs = set(logcmdutil.revrange(repo, opts[b'rev'])) | |
464 | if opts.get(b'working_dir'): |
|
463 | if opts.get(b'working_dir'): | |
465 | revs.add(wdirrev) |
|
464 | revs.add(wdirrev) | |
466 | for rev in revs: |
|
|||
467 | checkfixablectx(ui, repo, repo[rev]) |
|
|||
468 | # Allow fixing only wdir() even if there's an unfinished operation |
|
465 | # Allow fixing only wdir() even if there's an unfinished operation | |
469 | if not (len(revs) == 1 and wdirrev in revs): |
|
466 | if not (len(revs) == 1 and wdirrev in revs): | |
470 | cmdutil.checkunfinished(repo) |
|
467 | cmdutil.checkunfinished(repo) | |
@@ -481,16 +478,6 b' def getrevstofix(ui, repo, opts):' | |||||
481 | return revs |
|
478 | return revs | |
482 |
|
479 | |||
483 |
|
480 | |||
484 | def checkfixablectx(ui, repo, ctx): |
|
|||
485 | """Aborts if the revision shouldn't be replaced with a fixed one.""" |
|
|||
486 | if ctx.obsolete(): |
|
|||
487 | # It would be better to actually check if the revision has a successor. |
|
|||
488 | if not obsolete.isenabled(repo, obsolete.allowdivergenceopt): |
|
|||
489 | raise error.Abort( |
|
|||
490 | b'fixing obsolete revision could cause divergence' |
|
|||
491 | ) |
|
|||
492 |
|
||||
493 |
|
||||
494 | def pathstofix(ui, repo, pats, opts, match, basectxs, fixctx): |
|
481 | def pathstofix(ui, repo, pats, opts, match, basectxs, fixctx): | |
495 | """Returns the set of files that should be fixed in a context |
|
482 | """Returns the set of files that should be fixed in a context | |
496 |
|
483 |
@@ -51,6 +51,7 b' getversion = gitutil.pygit2_version' | |||||
51 | class gitstore(object): # store.basicstore): |
|
51 | class gitstore(object): # store.basicstore): | |
52 | def __init__(self, path, vfstype): |
|
52 | def __init__(self, path, vfstype): | |
53 | self.vfs = vfstype(path) |
|
53 | self.vfs = vfstype(path) | |
|
54 | self.opener = self.vfs | |||
54 | self.path = self.vfs.base |
|
55 | self.path = self.vfs.base | |
55 | self.createmode = store._calcmode(self.vfs) |
|
56 | self.createmode = store._calcmode(self.vfs) | |
56 | # above lines should go away in favor of: |
|
57 | # above lines should go away in favor of: |
@@ -257,7 +257,7 b' class gitdirstate(object):' | |||||
257 | if match(p): |
|
257 | if match(p): | |
258 | yield p |
|
258 | yield p | |
259 |
|
259 | |||
260 |
def set_clean(self, f, parentfiledata |
|
260 | def set_clean(self, f, parentfiledata): | |
261 | """Mark a file normal and clean.""" |
|
261 | """Mark a file normal and clean.""" | |
262 | # TODO: for now we just let libgit2 re-stat the file. We can |
|
262 | # TODO: for now we just let libgit2 re-stat the file. We can | |
263 | # clearly do better. |
|
263 | # clearly do better. |
@@ -667,7 +667,15 b' def applychanges(ui, repo, ctx, opts):' | |||||
667 | repo.ui.setconfig( |
|
667 | repo.ui.setconfig( | |
668 | b'ui', b'forcemerge', opts.get(b'tool', b''), b'histedit' |
|
668 | b'ui', b'forcemerge', opts.get(b'tool', b''), b'histedit' | |
669 | ) |
|
669 | ) | |
670 |
stats = mergemod.graft( |
|
670 | stats = mergemod.graft( | |
|
671 | repo, | |||
|
672 | ctx, | |||
|
673 | labels=[ | |||
|
674 | b'already edited', | |||
|
675 | b'current change', | |||
|
676 | b'parent of current change', | |||
|
677 | ], | |||
|
678 | ) | |||
671 | finally: |
|
679 | finally: | |
672 | repo.ui.setconfig(b'ui', b'forcemerge', b'', b'histedit') |
|
680 | repo.ui.setconfig(b'ui', b'forcemerge', b'', b'histedit') | |
673 | return stats |
|
681 | return stats | |
@@ -1324,6 +1332,10 b' pgup: prev page, space/pgdn: next page, ' | |||||
1324 | d: drop, e: edit, f: fold, m: mess, p: pick, r: roll |
|
1332 | d: drop, e: edit, f: fold, m: mess, p: pick, r: roll | |
1325 | pgup/K: move patch up, pgdn/J: move patch down, c: commit, q: abort |
|
1333 | pgup/K: move patch up, pgdn/J: move patch down, c: commit, q: abort | |
1326 | """ |
|
1334 | """ | |
|
1335 | if self.later_on_top: | |||
|
1336 | help += b"Newer commits are shown above older commits.\n" | |||
|
1337 | else: | |||
|
1338 | help += b"Older commits are shown above newer commits.\n" | |||
1327 | return help.splitlines() |
|
1339 | return help.splitlines() | |
1328 |
|
1340 | |||
1329 | def render_help(self, win): |
|
1341 | def render_help(self, win): |
@@ -116,6 +116,7 b' from mercurial.utils import (' | |||||
116 | dateutil, |
|
116 | dateutil, | |
117 | stringutil, |
|
117 | stringutil, | |
118 | ) |
|
118 | ) | |
|
119 | from mercurial.dirstateutils import timestamp | |||
119 |
|
120 | |||
120 | cmdtable = {} |
|
121 | cmdtable = {} | |
121 | command = registrar.command(cmdtable) |
|
122 | command = registrar.command(cmdtable) | |
@@ -326,6 +327,7 b' class kwtemplater(object):' | |||||
326 | msg = _(b'overwriting %s expanding keywords\n') |
|
327 | msg = _(b'overwriting %s expanding keywords\n') | |
327 | else: |
|
328 | else: | |
328 | msg = _(b'overwriting %s shrinking keywords\n') |
|
329 | msg = _(b'overwriting %s shrinking keywords\n') | |
|
330 | wctx = self.repo[None] | |||
329 | for f in candidates: |
|
331 | for f in candidates: | |
330 | if self.restrict: |
|
332 | if self.restrict: | |
331 | data = self.repo.file(f).read(mf[f]) |
|
333 | data = self.repo.file(f).read(mf[f]) | |
@@ -356,7 +358,12 b' class kwtemplater(object):' | |||||
356 | fp.write(data) |
|
358 | fp.write(data) | |
357 | fp.close() |
|
359 | fp.close() | |
358 | if kwcmd: |
|
360 | if kwcmd: | |
359 |
s |
|
361 | s = wctx[f].lstat() | |
|
362 | mode = s.st_mode | |||
|
363 | size = s.st_size | |||
|
364 | mtime = timestamp.mtime_of(s) | |||
|
365 | cache_data = (mode, size, mtime) | |||
|
366 | self.repo.dirstate.set_clean(f, cache_data) | |||
360 | elif self.postcommit: |
|
367 | elif self.postcommit: | |
361 | self.repo.dirstate.update_file_p1(f, p1_tracked=True) |
|
368 | self.repo.dirstate.update_file_p1(f, p1_tracked=True) | |
362 |
|
369 |
@@ -32,6 +32,7 b' from mercurial import (' | |||||
32 | vfs as vfsmod, |
|
32 | vfs as vfsmod, | |
33 | ) |
|
33 | ) | |
34 | from mercurial.utils import hashutil |
|
34 | from mercurial.utils import hashutil | |
|
35 | from mercurial.dirstateutils import timestamp | |||
35 |
|
36 | |||
36 | shortname = b'.hglf' |
|
37 | shortname = b'.hglf' | |
37 | shortnameslash = shortname + b'/' |
|
38 | shortnameslash = shortname + b'/' | |
@@ -243,10 +244,11 b' def openlfdirstate(ui, repo, create=True' | |||||
243 | def lfdirstatestatus(lfdirstate, repo): |
|
244 | def lfdirstatestatus(lfdirstate, repo): | |
244 | pctx = repo[b'.'] |
|
245 | pctx = repo[b'.'] | |
245 | match = matchmod.always() |
|
246 | match = matchmod.always() | |
246 | unsure, s = lfdirstate.status( |
|
247 | unsure, s, mtime_boundary = lfdirstate.status( | |
247 | match, subrepos=[], ignored=False, clean=False, unknown=False |
|
248 | match, subrepos=[], ignored=False, clean=False, unknown=False | |
248 | ) |
|
249 | ) | |
249 | modified, clean = s.modified, s.clean |
|
250 | modified, clean = s.modified, s.clean | |
|
251 | wctx = repo[None] | |||
250 | for lfile in unsure: |
|
252 | for lfile in unsure: | |
251 | try: |
|
253 | try: | |
252 | fctx = pctx[standin(lfile)] |
|
254 | fctx = pctx[standin(lfile)] | |
@@ -256,7 +258,13 b' def lfdirstatestatus(lfdirstate, repo):' | |||||
256 | modified.append(lfile) |
|
258 | modified.append(lfile) | |
257 | else: |
|
259 | else: | |
258 | clean.append(lfile) |
|
260 | clean.append(lfile) | |
259 | lfdirstate.set_clean(lfile) |
|
261 | st = wctx[lfile].lstat() | |
|
262 | mode = st.st_mode | |||
|
263 | size = st.st_size | |||
|
264 | mtime = timestamp.reliable_mtime_of(st, mtime_boundary) | |||
|
265 | if mtime is not None: | |||
|
266 | cache_data = (mode, size, mtime) | |||
|
267 | lfdirstate.set_clean(lfile, cache_data) | |||
260 | return s |
|
268 | return s | |
261 |
|
269 | |||
262 |
|
270 | |||
@@ -663,7 +671,7 b' def updatestandinsbymatch(repo, match):' | |||||
663 | # large. |
|
671 | # large. | |
664 | lfdirstate = openlfdirstate(ui, repo) |
|
672 | lfdirstate = openlfdirstate(ui, repo) | |
665 | dirtymatch = matchmod.always() |
|
673 | dirtymatch = matchmod.always() | |
666 | unsure, s = lfdirstate.status( |
|
674 | unsure, s, mtime_boundary = lfdirstate.status( | |
667 | dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False |
|
675 | dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False | |
668 | ) |
|
676 | ) | |
669 | modifiedfiles = unsure + s.modified + s.added + s.removed |
|
677 | modifiedfiles = unsure + s.modified + s.added + s.removed |
@@ -51,11 +51,17 b' from . import (' | |||||
51 | storefactory, |
|
51 | storefactory, | |
52 | ) |
|
52 | ) | |
53 |
|
53 | |||
|
54 | ACTION_ADD = mergestatemod.ACTION_ADD | |||
|
55 | ACTION_DELETED_CHANGED = mergestatemod.ACTION_DELETED_CHANGED | |||
|
56 | ACTION_GET = mergestatemod.ACTION_GET | |||
|
57 | ACTION_KEEP = mergestatemod.ACTION_KEEP | |||
|
58 | ACTION_REMOVE = mergestatemod.ACTION_REMOVE | |||
|
59 | ||||
54 | eh = exthelper.exthelper() |
|
60 | eh = exthelper.exthelper() | |
55 |
|
61 | |||
56 | lfstatus = lfutil.lfstatus |
|
62 | lfstatus = lfutil.lfstatus | |
57 |
|
63 | |||
58 |
MERGE_ACTION_LARGEFILE_MARK_REMOVED = |
|
64 | MERGE_ACTION_LARGEFILE_MARK_REMOVED = mergestatemod.MergeAction('lfmr') | |
59 |
|
65 | |||
60 | # -- Utility functions: commonly/repeatedly needed functionality --------------- |
|
66 | # -- Utility functions: commonly/repeatedly needed functionality --------------- | |
61 |
|
67 | |||
@@ -563,8 +569,9 b' def overridecalculateupdates(' | |||||
563 | standin = lfutil.standin(lfile) |
|
569 | standin = lfutil.standin(lfile) | |
564 | (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None)) |
|
570 | (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None)) | |
565 | (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None)) |
|
571 | (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None)) | |
566 | if sm in (b'g', b'dc') and lm != b'r': |
|
572 | ||
567 | if sm == b'dc': |
|
573 | if sm in (ACTION_GET, ACTION_DELETED_CHANGED) and lm != ACTION_REMOVE: | |
|
574 | if sm == ACTION_DELETED_CHANGED: | |||
568 | f1, f2, fa, move, anc = sargs |
|
575 | f1, f2, fa, move, anc = sargs | |
569 | sargs = (p2[f2].flags(), False) |
|
576 | sargs = (p2[f2].flags(), False) | |
570 | # Case 1: normal file in the working copy, largefile in |
|
577 | # Case 1: normal file in the working copy, largefile in | |
@@ -578,26 +585,28 b' def overridecalculateupdates(' | |||||
578 | % lfile |
|
585 | % lfile | |
579 | ) |
|
586 | ) | |
580 | if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile |
|
587 | if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile | |
581 |
mresult.addfile( |
|
588 | mresult.addfile( | |
582 |
|
|
589 | lfile, ACTION_REMOVE, None, b'replaced by standin' | |
|
590 | ) | |||
|
591 | mresult.addfile(standin, ACTION_GET, sargs, b'replaces standin') | |||
583 | else: # keep local normal file |
|
592 | else: # keep local normal file | |
584 |
mresult.addfile(lfile, |
|
593 | mresult.addfile(lfile, ACTION_KEEP, None, b'replaces standin') | |
585 | if branchmerge: |
|
594 | if branchmerge: | |
586 | mresult.addfile( |
|
595 | mresult.addfile( | |
587 | standin, |
|
596 | standin, | |
588 |
|
|
597 | ACTION_KEEP, | |
589 | None, |
|
598 | None, | |
590 | b'replaced by non-standin', |
|
599 | b'replaced by non-standin', | |
591 | ) |
|
600 | ) | |
592 | else: |
|
601 | else: | |
593 | mresult.addfile( |
|
602 | mresult.addfile( | |
594 | standin, |
|
603 | standin, | |
595 |
|
|
604 | ACTION_REMOVE, | |
596 | None, |
|
605 | None, | |
597 | b'replaced by non-standin', |
|
606 | b'replaced by non-standin', | |
598 | ) |
|
607 | ) | |
599 | elif lm in (b'g', b'dc') and sm != b'r': |
|
608 | if lm in (ACTION_GET, ACTION_DELETED_CHANGED) and sm != ACTION_REMOVE: | |
600 |
if lm == |
|
609 | if lm == ACTION_DELETED_CHANGED: | |
601 | f1, f2, fa, move, anc = largs |
|
610 | f1, f2, fa, move, anc = largs | |
602 | largs = (p2[f2].flags(), False) |
|
611 | largs = (p2[f2].flags(), False) | |
603 | # Case 2: largefile in the working copy, normal file in |
|
612 | # Case 2: largefile in the working copy, normal file in | |
@@ -615,11 +624,13 b' def overridecalculateupdates(' | |||||
615 | # largefile can be restored from standin safely |
|
624 | # largefile can be restored from standin safely | |
616 | mresult.addfile( |
|
625 | mresult.addfile( | |
617 | lfile, |
|
626 | lfile, | |
618 |
|
|
627 | ACTION_KEEP, | |
619 | None, |
|
628 | None, | |
620 | b'replaced by standin', |
|
629 | b'replaced by standin', | |
621 | ) |
|
630 | ) | |
622 |
mresult.addfile( |
|
631 | mresult.addfile( | |
|
632 | standin, ACTION_KEEP, None, b'replaces standin' | |||
|
633 | ) | |||
623 | else: |
|
634 | else: | |
624 | # "lfile" should be marked as "removed" without |
|
635 | # "lfile" should be marked as "removed" without | |
625 | # removal of itself |
|
636 | # removal of itself | |
@@ -631,12 +642,12 b' def overridecalculateupdates(' | |||||
631 | ) |
|
642 | ) | |
632 |
|
643 | |||
633 | # linear-merge should treat this largefile as 're-added' |
|
644 | # linear-merge should treat this largefile as 're-added' | |
634 |
mresult.addfile(standin, |
|
645 | mresult.addfile(standin, ACTION_ADD, None, b'keep standin') | |
635 | else: # pick remote normal file |
|
646 | else: # pick remote normal file | |
636 |
mresult.addfile(lfile, |
|
647 | mresult.addfile(lfile, ACTION_GET, largs, b'replaces standin') | |
637 | mresult.addfile( |
|
648 | mresult.addfile( | |
638 | standin, |
|
649 | standin, | |
639 |
|
|
650 | ACTION_REMOVE, | |
640 | None, |
|
651 | None, | |
641 | b'replaced by non-standin', |
|
652 | b'replaced by non-standin', | |
642 | ) |
|
653 | ) | |
@@ -666,14 +677,12 b' def mergerecordupdates(orig, repo, actio' | |||||
666 |
|
677 | |||
667 | # Override filemerge to prompt the user about how they wish to merge |
|
678 | # Override filemerge to prompt the user about how they wish to merge | |
668 | # largefiles. This will handle identical edits without prompting the user. |
|
679 | # largefiles. This will handle identical edits without prompting the user. | |
669 |
@eh.wrapfunction(filemerge, b' |
|
680 | @eh.wrapfunction(filemerge, b'filemerge') | |
670 | def overridefilemerge( |
|
681 | def overridefilemerge( | |
671 |
origfn |
|
682 | origfn, repo, wctx, mynode, orig, fcd, fco, fca, labels=None | |
672 | ): |
|
683 | ): | |
673 | if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent(): |
|
684 | if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent(): | |
674 | return origfn( |
|
685 | return origfn(repo, wctx, mynode, orig, fcd, fco, fca, labels=labels) | |
675 | premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels |
|
|||
676 | ) |
|
|||
677 |
|
686 | |||
678 | ahash = lfutil.readasstandin(fca).lower() |
|
687 | ahash = lfutil.readasstandin(fca).lower() | |
679 | dhash = lfutil.readasstandin(fcd).lower() |
|
688 | dhash = lfutil.readasstandin(fcd).lower() | |
@@ -697,7 +706,7 b' def overridefilemerge(' | |||||
697 | ) |
|
706 | ) | |
698 | ): |
|
707 | ): | |
699 | repo.wwrite(fcd.path(), fco.data(), fco.flags()) |
|
708 | repo.wwrite(fcd.path(), fco.data(), fco.flags()) | |
700 |
return |
|
709 | return 0, False | |
701 |
|
710 | |||
702 |
|
711 | |||
703 | @eh.wrapfunction(copiesmod, b'pathcopies') |
|
712 | @eh.wrapfunction(copiesmod, b'pathcopies') | |
@@ -1519,7 +1528,7 b' def scmutiladdremove(orig, repo, matcher' | |||||
1519 | return orig(repo, matcher, prefix, uipathfn, opts) |
|
1528 | return orig(repo, matcher, prefix, uipathfn, opts) | |
1520 | # Get the list of missing largefiles so we can remove them |
|
1529 | # Get the list of missing largefiles so we can remove them | |
1521 | lfdirstate = lfutil.openlfdirstate(repo.ui, repo) |
|
1530 | lfdirstate = lfutil.openlfdirstate(repo.ui, repo) | |
1522 | unsure, s = lfdirstate.status( |
|
1531 | unsure, s, mtime_boundary = lfdirstate.status( | |
1523 | matchmod.always(), |
|
1532 | matchmod.always(), | |
1524 | subrepos=[], |
|
1533 | subrepos=[], | |
1525 | ignored=False, |
|
1534 | ignored=False, | |
@@ -1746,7 +1755,7 b' def mergeupdate(orig, repo, node, branch' | |||||
1746 | # (*1) deprecated, but used internally (e.g: "rebase --collapse") |
|
1755 | # (*1) deprecated, but used internally (e.g: "rebase --collapse") | |
1747 |
|
1756 | |||
1748 | lfdirstate = lfutil.openlfdirstate(repo.ui, repo) |
|
1757 | lfdirstate = lfutil.openlfdirstate(repo.ui, repo) | |
1749 | unsure, s = lfdirstate.status( |
|
1758 | unsure, s, mtime_boundary = lfdirstate.status( | |
1750 | matchmod.always(), |
|
1759 | matchmod.always(), | |
1751 | subrepos=[], |
|
1760 | subrepos=[], | |
1752 | ignored=False, |
|
1761 | ignored=False, |
@@ -22,6 +22,8 b' from mercurial import (' | |||||
22 | util, |
|
22 | util, | |
23 | ) |
|
23 | ) | |
24 |
|
24 | |||
|
25 | from mercurial.dirstateutils import timestamp | |||
|
26 | ||||
25 | from . import ( |
|
27 | from . import ( | |
26 | lfcommands, |
|
28 | lfcommands, | |
27 | lfutil, |
|
29 | lfutil, | |
@@ -195,7 +197,7 b' def reposetup(ui, repo):' | |||||
195 | match._files = [f for f in match._files if sfindirstate(f)] |
|
197 | match._files = [f for f in match._files if sfindirstate(f)] | |
196 | # Don't waste time getting the ignored and unknown |
|
198 | # Don't waste time getting the ignored and unknown | |
197 | # files from lfdirstate |
|
199 | # files from lfdirstate | |
198 | unsure, s = lfdirstate.status( |
|
200 | unsure, s, mtime_boundary = lfdirstate.status( | |
199 | match, |
|
201 | match, | |
200 | subrepos=[], |
|
202 | subrepos=[], | |
201 | ignored=False, |
|
203 | ignored=False, | |
@@ -210,6 +212,7 b' def reposetup(ui, repo):' | |||||
210 | s.clean, |
|
212 | s.clean, | |
211 | ) |
|
213 | ) | |
212 | if parentworking: |
|
214 | if parentworking: | |
|
215 | wctx = repo[None] | |||
213 | for lfile in unsure: |
|
216 | for lfile in unsure: | |
214 | standin = lfutil.standin(lfile) |
|
217 | standin = lfutil.standin(lfile) | |
215 | if standin not in ctx1: |
|
218 | if standin not in ctx1: | |
@@ -222,7 +225,15 b' def reposetup(ui, repo):' | |||||
222 | else: |
|
225 | else: | |
223 | if listclean: |
|
226 | if listclean: | |
224 | clean.append(lfile) |
|
227 | clean.append(lfile) | |
225 |
|
|
228 | s = wctx[lfile].lstat() | |
|
229 | mode = s.st_mode | |||
|
230 | size = s.st_size | |||
|
231 | mtime = timestamp.reliable_mtime_of( | |||
|
232 | s, mtime_boundary | |||
|
233 | ) | |||
|
234 | if mtime is not None: | |||
|
235 | cache_data = (mode, size, mtime) | |||
|
236 | lfdirstate.set_clean(lfile, cache_data) | |||
226 | else: |
|
237 | else: | |
227 | tocheck = unsure + modified + added + clean |
|
238 | tocheck = unsure + modified + added + clean | |
228 | modified, added, clean = [], [], [] |
|
239 | modified, added, clean = [], [], [] | |
@@ -444,11 +455,12 b' def reposetup(ui, repo):' | |||||
444 | repo.prepushoutgoinghooks.add(b"largefiles", prepushoutgoinghook) |
|
455 | repo.prepushoutgoinghooks.add(b"largefiles", prepushoutgoinghook) | |
445 |
|
456 | |||
446 | def checkrequireslfiles(ui, repo, **kwargs): |
|
457 | def checkrequireslfiles(ui, repo, **kwargs): | |
447 | if b'largefiles' not in repo.requirements and any( |
|
458 | with repo.lock(): | |
448 | lfutil.shortname + b'/' in f[1] for f in repo.store.datafiles() |
|
459 | if b'largefiles' not in repo.requirements and any( | |
449 | ): |
|
460 | lfutil.shortname + b'/' in f[1] for f in repo.store.datafiles() | |
450 | repo.requirements.add(b'largefiles') |
|
461 | ): | |
451 |
|
|
462 | repo.requirements.add(b'largefiles') | |
|
463 | scmutil.writereporequirements(repo) | |||
452 |
|
464 | |||
453 | ui.setconfig( |
|
465 | ui.setconfig( | |
454 | b'hooks', b'changegroup.lfiles', checkrequireslfiles, b'largefiles' |
|
466 | b'hooks', b'changegroup.lfiles', checkrequireslfiles, b'largefiles' |
@@ -257,25 +257,28 b' def _reposetup(ui, repo):' | |||||
257 | if b'lfs' not in repo.requirements: |
|
257 | if b'lfs' not in repo.requirements: | |
258 |
|
258 | |||
259 | def checkrequireslfs(ui, repo, **kwargs): |
|
259 | def checkrequireslfs(ui, repo, **kwargs): | |
260 | if b'lfs' in repo.requirements: |
|
260 | with repo.lock(): | |
261 | return 0 |
|
261 | if b'lfs' in repo.requirements: | |
|
262 | return 0 | |||
262 |
|
263 | |||
263 | last = kwargs.get('node_last') |
|
264 | last = kwargs.get('node_last') | |
264 | if last: |
|
265 | if last: | |
265 | s = repo.set(b'%n:%n', bin(kwargs['node']), bin(last)) |
|
266 | s = repo.set(b'%n:%n', bin(kwargs['node']), bin(last)) | |
266 | else: |
|
267 | else: | |
267 | s = repo.set(b'%n', bin(kwargs['node'])) |
|
268 | s = repo.set(b'%n', bin(kwargs['node'])) | |
268 | match = repo._storenarrowmatch |
|
269 | match = repo._storenarrowmatch | |
269 | for ctx in s: |
|
270 | for ctx in s: | |
270 | # TODO: is there a way to just walk the files in the commit? |
|
271 | # TODO: is there a way to just walk the files in the commit? | |
271 | if any( |
|
272 | if any( | |
272 | ctx[f].islfs() for f in ctx.files() if f in ctx and match(f) |
|
273 | ctx[f].islfs() | |
273 | ): |
|
274 | for f in ctx.files() | |
274 | repo.requirements.add(b'lfs') |
|
275 | if f in ctx and match(f) | |
275 | repo.features.add(repository.REPO_FEATURE_LFS) |
|
276 | ): | |
276 |
|
|
277 | repo.requirements.add(b'lfs') | |
277 | repo.prepushoutgoinghooks.add(b'lfs', wrapper.prepush) |
|
278 | repo.features.add(repository.REPO_FEATURE_LFS) | |
278 | break |
|
279 | scmutil.writereporequirements(repo) | |
|
280 | repo.prepushoutgoinghooks.add(b'lfs', wrapper.prepush) | |||
|
281 | break | |||
279 |
|
282 | |||
280 | ui.setconfig(b'hooks', b'commit.lfs', checkrequireslfs, b'lfs') |
|
283 | ui.setconfig(b'hooks', b'commit.lfs', checkrequireslfs, b'lfs') | |
281 | ui.setconfig( |
|
284 | ui.setconfig( |
@@ -38,8 +38,8 b' def wrapdirstate(repo, dirstate):' | |||||
38 | return super(narrowdirstate, self).normal(*args, **kwargs) |
|
38 | return super(narrowdirstate, self).normal(*args, **kwargs) | |
39 |
|
39 | |||
40 | @_editfunc |
|
40 | @_editfunc | |
41 | def set_tracked(self, *args): |
|
41 | def set_tracked(self, *args, **kwargs): | |
42 | return super(narrowdirstate, self).set_tracked(*args) |
|
42 | return super(narrowdirstate, self).set_tracked(*args, **kwargs) | |
43 |
|
43 | |||
44 | @_editfunc |
|
44 | @_editfunc | |
45 | def set_untracked(self, *args): |
|
45 | def set_untracked(self, *args): |
@@ -435,7 +435,10 b' class notifier(object):' | |||||
435 | if spec is None: |
|
435 | if spec is None: | |
436 | subs.add(sub) |
|
436 | subs.add(sub) | |
437 | continue |
|
437 | continue | |
438 | revs = self.repo.revs(b'%r and %d:', spec, ctx.rev()) |
|
438 | try: | |
|
439 | revs = self.repo.revs(b'%r and %d:', spec, ctx.rev()) | |||
|
440 | except error.RepoLookupError: | |||
|
441 | continue | |||
439 | if len(revs): |
|
442 | if len(revs): | |
440 | subs.add(sub) |
|
443 | subs.add(sub) | |
441 | continue |
|
444 | continue |
@@ -1544,7 +1544,7 b' def rebasenode(repo, rev, p1, p2, base, ' | |||||
1544 | force=True, |
|
1544 | force=True, | |
1545 | ancestor=base, |
|
1545 | ancestor=base, | |
1546 | mergeancestor=mergeancestor, |
|
1546 | mergeancestor=mergeancestor, | |
1547 | labels=[b'dest', b'source'], |
|
1547 | labels=[b'dest', b'source', b'parent of source'], | |
1548 | wc=wctx, |
|
1548 | wc=wctx, | |
1549 | ) |
|
1549 | ) | |
1550 | wctx.setparents(p1ctx.node(), repo[p2].node()) |
|
1550 | wctx.setparents(p1ctx.node(), repo[p2].node()) |
@@ -88,7 +88,9 b' 3. remotefilelog only works with ssh bas' | |||||
88 |
|
88 | |||
89 | 4. Tags are not supported in completely shallow repos. If you use tags in your repo you will have to specify `excludepattern=.hgtags` in your client configuration to ensure that file is downloaded. The include/excludepattern settings are experimental at the moment and have yet to be deployed in a production environment. |
|
89 | 4. Tags are not supported in completely shallow repos. If you use tags in your repo you will have to specify `excludepattern=.hgtags` in your client configuration to ensure that file is downloaded. The include/excludepattern settings are experimental at the moment and have yet to be deployed in a production environment. | |
90 |
|
90 | |||
91 | 5. A few commands will be slower. `hg log <filename>` will be much slower since it has to walk the entire commit history instead of just the filelog. Use `hg log -f <filename>` instead, which remains very fast. |
|
91 | 5. Similarly, subrepositories should not be used with completely shallow repos. Use `excludepattern=.hgsub*` in your client configuration to ensure that the files are downloaded. | |
|
92 | ||||
|
93 | 6. A few commands will be slower. `hg log <filename>` will be much slower since it has to walk the entire commit history instead of just the filelog. Use `hg log -f <filename>` instead, which remains very fast. | |||
92 |
|
94 | |||
93 | Contributing |
|
95 | Contributing | |
94 | ============ |
|
96 | ============ |
@@ -520,7 +520,7 b' def checkunknownfiles(orig, repo, wctx, ' | |||||
520 |
|
520 | |||
521 |
|
521 | |||
522 | # Prefetch files before status attempts to look at their size and contents |
|
522 | # Prefetch files before status attempts to look at their size and contents | |
523 | def checklookup(orig, self, files): |
|
523 | def checklookup(orig, self, files, mtime_boundary): | |
524 | repo = self._repo |
|
524 | repo = self._repo | |
525 | if isenabled(repo): |
|
525 | if isenabled(repo): | |
526 | prefetchfiles = [] |
|
526 | prefetchfiles = [] | |
@@ -530,7 +530,7 b' def checklookup(orig, self, files):' | |||||
530 | prefetchfiles.append((f, hex(parent.filenode(f)))) |
|
530 | prefetchfiles.append((f, hex(parent.filenode(f)))) | |
531 | # batch fetch the needed files from the server |
|
531 | # batch fetch the needed files from the server | |
532 | repo.fileservice.prefetch(prefetchfiles) |
|
532 | repo.fileservice.prefetch(prefetchfiles) | |
533 | return orig(self, files) |
|
533 | return orig(self, files, mtime_boundary) | |
534 |
|
534 | |||
535 |
|
535 | |||
536 | # Prefetch the logic that compares added and removed files for renames |
|
536 | # Prefetch the logic that compares added and removed files for renames |
@@ -18,7 +18,6 b' from mercurial import (' | |||||
18 | mdiff, |
|
18 | mdiff, | |
19 | pycompat, |
|
19 | pycompat, | |
20 | revlog, |
|
20 | revlog, | |
21 | util, |
|
|||
22 | ) |
|
21 | ) | |
23 | from mercurial.utils import storageutil |
|
22 | from mercurial.utils import storageutil | |
24 | from mercurial.revlogutils import flagutil |
|
23 | from mercurial.revlogutils import flagutil | |
@@ -245,11 +244,11 b' class remotefilelog(object):' | |||||
245 | __bool__ = __nonzero__ |
|
244 | __bool__ = __nonzero__ | |
246 |
|
245 | |||
247 | def __len__(self): |
|
246 | def __len__(self): | |
248 |
if self.filename |
|
247 | if self.filename in (b'.hgtags', b'.hgsub', b'.hgsubstate'): | |
249 | # The length of .hgtags is used to fast path tag checking. |
|
248 | # Global tag and subrepository support require access to the | |
250 | # remotefilelog doesn't support .hgtags since the entire .hgtags |
|
249 | # file history for various performance sensitive operations. | |
251 | # history is needed. Use the excludepattern setting to make |
|
250 | # excludepattern should be used for repositories depending on | |
252 |
# |
|
251 | # those features to fallback to regular filelog. | |
253 | return 0 |
|
252 | return 0 | |
254 |
|
253 | |||
255 | raise RuntimeError(b"len not supported") |
|
254 | raise RuntimeError(b"len not supported") | |
@@ -360,17 +359,6 b' class remotefilelog(object):' | |||||
360 | ) |
|
359 | ) | |
361 | return rev |
|
360 | return rev | |
362 |
|
361 | |||
363 | def _processflags(self, text, flags, operation, raw=False): |
|
|||
364 | """deprecated entry point to access flag processors""" |
|
|||
365 | msg = b'_processflag(...) use the specialized variant' |
|
|||
366 | util.nouideprecwarn(msg, b'5.2', stacklevel=2) |
|
|||
367 | if raw: |
|
|||
368 | return text, flagutil.processflagsraw(self, text, flags) |
|
|||
369 | elif operation == b'read': |
|
|||
370 | return flagutil.processflagsread(self, text, flags) |
|
|||
371 | else: # write operation |
|
|||
372 | return flagutil.processflagswrite(self, text, flags) |
|
|||
373 |
|
||||
374 | def revision(self, node, raw=False): |
|
362 | def revision(self, node, raw=False): | |
375 | """returns the revlog contents at this node. |
|
363 | """returns the revlog contents at this node. | |
376 | this includes the meta data traditionally included in file revlogs. |
|
364 | this includes the meta data traditionally included in file revlogs. |
@@ -76,6 +76,7 b' from __future__ import absolute_import' | |||||
76 | from mercurial.i18n import _ |
|
76 | from mercurial.i18n import _ | |
77 | from mercurial.pycompat import setattr |
|
77 | from mercurial.pycompat import setattr | |
78 | from mercurial import ( |
|
78 | from mercurial import ( | |
|
79 | cmdutil, | |||
79 | commands, |
|
80 | commands, | |
80 | dirstate, |
|
81 | dirstate, | |
81 | error, |
|
82 | error, | |
@@ -153,22 +154,11 b' def _setuplog(ui):' | |||||
153 |
|
154 | |||
154 |
|
155 | |||
155 | def _clonesparsecmd(orig, ui, repo, *args, **opts): |
|
156 | def _clonesparsecmd(orig, ui, repo, *args, **opts): | |
156 |
include |
|
157 | include = opts.get('include') | |
157 |
exclude |
|
158 | exclude = opts.get('exclude') | |
158 |
enableprofile |
|
159 | enableprofile = opts.get('enable_profile') | |
159 | narrow_pat = opts.get('narrow') |
|
160 | narrow_pat = opts.get('narrow') | |
160 | include = exclude = enableprofile = False |
|
161 | ||
161 | if include_pat: |
|
|||
162 | pat = include_pat |
|
|||
163 | include = True |
|
|||
164 | if exclude_pat: |
|
|||
165 | pat = exclude_pat |
|
|||
166 | exclude = True |
|
|||
167 | if enableprofile_pat: |
|
|||
168 | pat = enableprofile_pat |
|
|||
169 | enableprofile = True |
|
|||
170 | if sum([include, exclude, enableprofile]) > 1: |
|
|||
171 | raise error.Abort(_(b"too many flags specified.")) |
|
|||
172 | # if --narrow is passed, it means they are includes and excludes for narrow |
|
162 | # if --narrow is passed, it means they are includes and excludes for narrow | |
173 | # clone |
|
163 | # clone | |
174 | if not narrow_pat and (include or exclude or enableprofile): |
|
164 | if not narrow_pat and (include or exclude or enableprofile): | |
@@ -176,7 +166,6 b' def _clonesparsecmd(orig, ui, repo, *arg' | |||||
176 | def clonesparse(orig, ctx, *args, **kwargs): |
|
166 | def clonesparse(orig, ctx, *args, **kwargs): | |
177 | sparse.updateconfig( |
|
167 | sparse.updateconfig( | |
178 | ctx.repo().unfiltered(), |
|
168 | ctx.repo().unfiltered(), | |
179 | pat, |
|
|||
180 | {}, |
|
169 | {}, | |
181 | include=include, |
|
170 | include=include, | |
182 | exclude=exclude, |
|
171 | exclude=exclude, | |
@@ -214,7 +203,7 b' def _setupadd(ui):' | |||||
214 | for pat in pats: |
|
203 | for pat in pats: | |
215 | dirname, basename = util.split(pat) |
|
204 | dirname, basename = util.split(pat) | |
216 | dirs.add(dirname) |
|
205 | dirs.add(dirname) | |
217 |
sparse.updateconfig(repo, |
|
206 | sparse.updateconfig(repo, opts, include=list(dirs)) | |
218 | return orig(ui, repo, *pats, **opts) |
|
207 | return orig(ui, repo, *pats, **opts) | |
219 |
|
208 | |||
220 | extensions.wrapcommand(commands.table, b'add', _add) |
|
209 | extensions.wrapcommand(commands.table, b'add', _add) | |
@@ -286,18 +275,54 b' def _setupdirstate(ui):' | |||||
286 | @command( |
|
275 | @command( | |
287 | b'debugsparse', |
|
276 | b'debugsparse', | |
288 | [ |
|
277 | [ | |
289 | (b'I', b'include', False, _(b'include files in the sparse checkout')), |
|
278 | ( | |
290 | (b'X', b'exclude', False, _(b'exclude files in the sparse checkout')), |
|
279 | b'I', | |
291 | (b'd', b'delete', False, _(b'delete an include/exclude rule')), |
|
280 | b'include', | |
|
281 | [], | |||
|
282 | _(b'include files in the sparse checkout'), | |||
|
283 | _(b'PATTERN'), | |||
|
284 | ), | |||
|
285 | ( | |||
|
286 | b'X', | |||
|
287 | b'exclude', | |||
|
288 | [], | |||
|
289 | _(b'exclude files in the sparse checkout'), | |||
|
290 | _(b'PATTERN'), | |||
|
291 | ), | |||
|
292 | ( | |||
|
293 | b'd', | |||
|
294 | b'delete', | |||
|
295 | [], | |||
|
296 | _(b'delete an include/exclude rule'), | |||
|
297 | _(b'PATTERN'), | |||
|
298 | ), | |||
292 | ( |
|
299 | ( | |
293 | b'f', |
|
300 | b'f', | |
294 | b'force', |
|
301 | b'force', | |
295 | False, |
|
302 | False, | |
296 | _(b'allow changing rules even with pending changes'), |
|
303 | _(b'allow changing rules even with pending changes'), | |
297 | ), |
|
304 | ), | |
298 | (b'', b'enable-profile', False, _(b'enables the specified profile')), |
|
305 | ( | |
299 | (b'', b'disable-profile', False, _(b'disables the specified profile')), |
|
306 | b'', | |
300 | (b'', b'import-rules', False, _(b'imports rules from a file')), |
|
307 | b'enable-profile', | |
|
308 | [], | |||
|
309 | _(b'enables the specified profile'), | |||
|
310 | _(b'PATTERN'), | |||
|
311 | ), | |||
|
312 | ( | |||
|
313 | b'', | |||
|
314 | b'disable-profile', | |||
|
315 | [], | |||
|
316 | _(b'disables the specified profile'), | |||
|
317 | _(b'PATTERN'), | |||
|
318 | ), | |||
|
319 | ( | |||
|
320 | b'', | |||
|
321 | b'import-rules', | |||
|
322 | [], | |||
|
323 | _(b'imports rules from a file'), | |||
|
324 | _(b'PATTERN'), | |||
|
325 | ), | |||
301 | (b'', b'clear-rules', False, _(b'clears local include/exclude rules')), |
|
326 | (b'', b'clear-rules', False, _(b'clears local include/exclude rules')), | |
302 | ( |
|
327 | ( | |
303 | b'', |
|
328 | b'', | |
@@ -308,10 +333,10 b' def _setupdirstate(ui):' | |||||
308 | (b'', b'reset', False, _(b'makes the repo full again')), |
|
333 | (b'', b'reset', False, _(b'makes the repo full again')), | |
309 | ] |
|
334 | ] | |
310 | + commands.templateopts, |
|
335 | + commands.templateopts, | |
311 |
_(b'[--OPTION] |
|
336 | _(b'[--OPTION]'), | |
312 | helpbasic=True, |
|
337 | helpbasic=True, | |
313 | ) |
|
338 | ) | |
314 |
def debugsparse(ui, repo, * |
|
339 | def debugsparse(ui, repo, **opts): | |
315 | """make the current checkout sparse, or edit the existing checkout |
|
340 | """make the current checkout sparse, or edit the existing checkout | |
316 |
|
341 | |||
317 | The sparse command is used to make the current checkout sparse. |
|
342 | The sparse command is used to make the current checkout sparse. | |
@@ -363,19 +388,13 b' def debugsparse(ui, repo, *pats, **opts)' | |||||
363 | delete = opts.get(b'delete') |
|
388 | delete = opts.get(b'delete') | |
364 | refresh = opts.get(b'refresh') |
|
389 | refresh = opts.get(b'refresh') | |
365 | reset = opts.get(b'reset') |
|
390 | reset = opts.get(b'reset') | |
366 | count = sum( |
|
391 | action = cmdutil.check_at_most_one_arg( | |
367 | [ |
|
392 | opts, b'import_rules', b'clear_rules', b'refresh' | |
368 | include, |
|
|||
369 | exclude, |
|
|||
370 | enableprofile, |
|
|||
371 | disableprofile, |
|
|||
372 | delete, |
|
|||
373 | importrules, |
|
|||
374 | refresh, |
|
|||
375 | clearrules, |
|
|||
376 | reset, |
|
|||
377 | ] |
|
|||
378 | ) |
|
393 | ) | |
|
394 | updateconfig = bool( | |||
|
395 | include or exclude or delete or reset or enableprofile or disableprofile | |||
|
396 | ) | |||
|
397 | count = sum([updateconfig, bool(action)]) | |||
379 | if count > 1: |
|
398 | if count > 1: | |
380 | raise error.Abort(_(b"too many flags specified")) |
|
399 | raise error.Abort(_(b"too many flags specified")) | |
381 |
|
400 | |||
@@ -397,10 +416,9 b' def debugsparse(ui, repo, *pats, **opts)' | |||||
397 | ) |
|
416 | ) | |
398 | ) |
|
417 | ) | |
399 |
|
418 | |||
400 | if include or exclude or delete or reset or enableprofile or disableprofile: |
|
419 | if updateconfig: | |
401 | sparse.updateconfig( |
|
420 | sparse.updateconfig( | |
402 | repo, |
|
421 | repo, | |
403 | pats, |
|
|||
404 | opts, |
|
422 | opts, | |
405 | include=include, |
|
423 | include=include, | |
406 | exclude=exclude, |
|
424 | exclude=exclude, | |
@@ -412,7 +430,7 b' def debugsparse(ui, repo, *pats, **opts)' | |||||
412 | ) |
|
430 | ) | |
413 |
|
431 | |||
414 | if importrules: |
|
432 | if importrules: | |
415 |
sparse.importfromfiles(repo, opts, |
|
433 | sparse.importfromfiles(repo, opts, importrules, force=force) | |
416 |
|
434 | |||
417 | if clearrules: |
|
435 | if clearrules: | |
418 | sparse.clearrules(repo, force=force) |
|
436 | sparse.clearrules(repo, force=force) |
@@ -47,6 +47,8 b' import re' | |||||
47 | from mercurial.i18n import _ |
|
47 | from mercurial.i18n import _ | |
48 | from mercurial.node import short |
|
48 | from mercurial.node import short | |
49 | from mercurial import ( |
|
49 | from mercurial import ( | |
|
50 | cmdutil, | |||
|
51 | extensions, | |||
50 | pycompat, |
|
52 | pycompat, | |
51 | registrar, |
|
53 | registrar, | |
52 | ) |
|
54 | ) | |
@@ -215,6 +217,23 b' def reposetup(ui, repo):' | |||||
215 | repo.adddatafilter(name, fn) |
|
217 | repo.adddatafilter(name, fn) | |
216 |
|
218 | |||
217 |
|
219 | |||
|
220 | def wrap_revert(orig, repo, ctx, names, uipathfn, actions, *args, **kwargs): | |||
|
221 | # reset dirstate cache for file we touch | |||
|
222 | ds = repo.dirstate | |||
|
223 | with ds.parentchange(): | |||
|
224 | for filename in actions[b'revert'][0]: | |||
|
225 | entry = ds.get_entry(filename) | |||
|
226 | if entry is not None: | |||
|
227 | if entry.p1_tracked: | |||
|
228 | ds.update_file( | |||
|
229 | filename, | |||
|
230 | entry.tracked, | |||
|
231 | p1_tracked=True, | |||
|
232 | p2_info=entry.p2_info, | |||
|
233 | ) | |||
|
234 | return orig(repo, ctx, names, uipathfn, actions, *args, **kwargs) | |||
|
235 | ||||
|
236 | ||||
218 | def extsetup(ui): |
|
237 | def extsetup(ui): | |
219 | # deprecated config: win32text.warn |
|
238 | # deprecated config: win32text.warn | |
220 | if ui.configbool(b'win32text', b'warn'): |
|
239 | if ui.configbool(b'win32text', b'warn'): | |
@@ -224,3 +243,4 b' def extsetup(ui):' | |||||
224 | b"https://mercurial-scm.org/wiki/Win32TextExtension\n" |
|
243 | b"https://mercurial-scm.org/wiki/Win32TextExtension\n" | |
225 | ) |
|
244 | ) | |
226 | ) |
|
245 | ) | |
|
246 | extensions.wrapfunction(cmdutil, '_performrevert', wrap_revert) |
@@ -22,6 +22,7 b' from . import (' | |||||
22 | error, |
|
22 | error, | |
23 | obsutil, |
|
23 | obsutil, | |
24 | pycompat, |
|
24 | pycompat, | |
|
25 | requirements, | |||
25 | scmutil, |
|
26 | scmutil, | |
26 | txnutil, |
|
27 | txnutil, | |
27 | util, |
|
28 | util, | |
@@ -36,11 +37,9 b' from .utils import (' | |||||
36 | # custom styles |
|
37 | # custom styles | |
37 | activebookmarklabel = b'bookmarks.active bookmarks.current' |
|
38 | activebookmarklabel = b'bookmarks.active bookmarks.current' | |
38 |
|
39 | |||
39 | BOOKMARKS_IN_STORE_REQUIREMENT = b'bookmarksinstore' |
|
|||
40 |
|
||||
41 |
|
40 | |||
42 | def bookmarksinstore(repo): |
|
41 | def bookmarksinstore(repo): | |
43 | return BOOKMARKS_IN_STORE_REQUIREMENT in repo.requirements |
|
42 | return requirements.BOOKMARKS_IN_STORE_REQUIREMENT in repo.requirements | |
44 |
|
43 | |||
45 |
|
44 | |||
46 | def bookmarksvfs(repo): |
|
45 | def bookmarksvfs(repo): | |
@@ -213,7 +212,11 b' class bmstore(object):' | |||||
213 | The transaction is then responsible for updating the file content.""" |
|
212 | The transaction is then responsible for updating the file content.""" | |
214 | location = b'' if bookmarksinstore(self._repo) else b'plain' |
|
213 | location = b'' if bookmarksinstore(self._repo) else b'plain' | |
215 | tr.addfilegenerator( |
|
214 | tr.addfilegenerator( | |
216 | b'bookmarks', (b'bookmarks',), self._write, location=location |
|
215 | b'bookmarks', | |
|
216 | (b'bookmarks',), | |||
|
217 | self._write, | |||
|
218 | location=location, | |||
|
219 | post_finalize=True, | |||
217 | ) |
|
220 | ) | |
218 | tr.hookargs[b'bookmark_moved'] = b'1' |
|
221 | tr.hookargs[b'bookmark_moved'] = b'1' | |
219 |
|
222 |
@@ -17,6 +17,7 b' from .node import (' | |||||
17 | from . import ( |
|
17 | from . import ( | |
18 | encoding, |
|
18 | encoding, | |
19 | error, |
|
19 | error, | |
|
20 | obsolete, | |||
20 | pycompat, |
|
21 | pycompat, | |
21 | scmutil, |
|
22 | scmutil, | |
22 | util, |
|
23 | util, | |
@@ -184,7 +185,7 b' class branchcache(object):' | |||||
184 |
|
185 | |||
185 | The first line is used to check if the cache is still valid. If the |
|
186 | The first line is used to check if the cache is still valid. If the | |
186 | branch cache is for a filtered repo view, an optional third hash is |
|
187 | branch cache is for a filtered repo view, an optional third hash is | |
187 | included that hashes the hashes of all filtered revisions. |
|
188 | included that hashes the hashes of all filtered and obsolete revisions. | |
188 |
|
189 | |||
189 | The open/closed state is represented by a single letter 'o' or 'c'. |
|
190 | The open/closed state is represented by a single letter 'o' or 'c'. | |
190 | This field can be used to avoid changelog reads when determining if a |
|
191 | This field can be used to avoid changelog reads when determining if a | |
@@ -351,16 +352,25 b' class branchcache(object):' | |||||
351 | return filename |
|
352 | return filename | |
352 |
|
353 | |||
353 | def validfor(self, repo): |
|
354 | def validfor(self, repo): | |
354 |
""" |
|
355 | """check that cache contents are valid for (a subset of) this repo | |
355 |
|
356 | |||
356 |
- False when |
|
357 | - False when the order of changesets changed or if we detect a strip. | |
357 |
- True when cache is up |
|
358 | - True when cache is up-to-date for the current repo or its subset.""" | |
358 | try: |
|
359 | try: | |
359 |
|
|
360 | node = repo.changelog.node(self.tiprev) | |
360 | self.filteredhash == scmutil.filteredhash(repo, self.tiprev) |
|
|||
361 | ) |
|
|||
362 | except IndexError: |
|
361 | except IndexError: | |
|
362 | # changesets were stripped and now we don't even have enough to | |||
|
363 | # find tiprev | |||
363 | return False |
|
364 | return False | |
|
365 | if self.tipnode != node: | |||
|
366 | # tiprev doesn't correspond to tipnode: repo was stripped, or this | |||
|
367 | # repo has a different order of changesets | |||
|
368 | return False | |||
|
369 | tiphash = scmutil.filteredhash(repo, self.tiprev, needobsolete=True) | |||
|
370 | # hashes don't match if this repo view has a different set of filtered | |||
|
371 | # revisions (e.g. due to phase changes) or obsolete revisions (e.g. | |||
|
372 | # history was rewritten) | |||
|
373 | return self.filteredhash == tiphash | |||
364 |
|
374 | |||
365 | def _branchtip(self, heads): |
|
375 | def _branchtip(self, heads): | |
366 | """Return tuple with last open head in heads and false, |
|
376 | """Return tuple with last open head in heads and false, | |
@@ -478,6 +488,9 b' class branchcache(object):' | |||||
478 | # use the faster unfiltered parent accessor. |
|
488 | # use the faster unfiltered parent accessor. | |
479 | parentrevs = repo.unfiltered().changelog.parentrevs |
|
489 | parentrevs = repo.unfiltered().changelog.parentrevs | |
480 |
|
490 | |||
|
491 | # Faster than using ctx.obsolete() | |||
|
492 | obsrevs = obsolete.getrevs(repo, b'obsolete') | |||
|
493 | ||||
481 | for branch, newheadrevs in pycompat.iteritems(newbranches): |
|
494 | for branch, newheadrevs in pycompat.iteritems(newbranches): | |
482 | # For every branch, compute the new branchheads. |
|
495 | # For every branch, compute the new branchheads. | |
483 | # A branchhead is a revision such that no descendant is on |
|
496 | # A branchhead is a revision such that no descendant is on | |
@@ -514,10 +527,15 b' class branchcache(object):' | |||||
514 | # checks can be skipped. Otherwise, the ancestors of the |
|
527 | # checks can be skipped. Otherwise, the ancestors of the | |
515 | # "uncertain" set are removed from branchheads. |
|
528 | # "uncertain" set are removed from branchheads. | |
516 | # This computation is heavy and avoided if at all possible. |
|
529 | # This computation is heavy and avoided if at all possible. | |
517 |
bheads = self._entries. |
|
530 | bheads = self._entries.get(branch, []) | |
518 | bheadset = {cl.rev(node) for node in bheads} |
|
531 | bheadset = {cl.rev(node) for node in bheads} | |
519 | uncertain = set() |
|
532 | uncertain = set() | |
520 | for newrev in sorted(newheadrevs): |
|
533 | for newrev in sorted(newheadrevs): | |
|
534 | if newrev in obsrevs: | |||
|
535 | # We ignore obsolete changesets as they shouldn't be | |||
|
536 | # considered heads. | |||
|
537 | continue | |||
|
538 | ||||
521 | if not bheadset: |
|
539 | if not bheadset: | |
522 | bheadset.add(newrev) |
|
540 | bheadset.add(newrev) | |
523 | continue |
|
541 | continue | |
@@ -525,13 +543,22 b' class branchcache(object):' | |||||
525 | parents = [p for p in parentrevs(newrev) if p != nullrev] |
|
543 | parents = [p for p in parentrevs(newrev) if p != nullrev] | |
526 | samebranch = set() |
|
544 | samebranch = set() | |
527 | otherbranch = set() |
|
545 | otherbranch = set() | |
|
546 | obsparents = set() | |||
528 | for p in parents: |
|
547 | for p in parents: | |
529 |
if p in |
|
548 | if p in obsrevs: | |
|
549 | # We ignored this obsolete changeset earlier, but now | |||
|
550 | # that it has non-ignored children, we need to make | |||
|
551 | # sure their ancestors are not considered heads. To | |||
|
552 | # achieve that, we will simply treat this obsolete | |||
|
553 | # changeset as a parent from other branch. | |||
|
554 | obsparents.add(p) | |||
|
555 | elif p in bheadset or getbranchinfo(p)[0] == branch: | |||
530 | samebranch.add(p) |
|
556 | samebranch.add(p) | |
531 | else: |
|
557 | else: | |
532 | otherbranch.add(p) |
|
558 | otherbranch.add(p) | |
533 |
if |
|
559 | if not (len(bheadset) == len(samebranch) == 1): | |
534 | uncertain.update(otherbranch) |
|
560 | uncertain.update(otherbranch) | |
|
561 | uncertain.update(obsparents) | |||
535 | bheadset.difference_update(samebranch) |
|
562 | bheadset.difference_update(samebranch) | |
536 | bheadset.add(newrev) |
|
563 | bheadset.add(newrev) | |
537 |
|
564 | |||
@@ -540,11 +567,12 b' class branchcache(object):' | |||||
540 | topoheads = set(cl.headrevs()) |
|
567 | topoheads = set(cl.headrevs()) | |
541 | if bheadset - topoheads: |
|
568 | if bheadset - topoheads: | |
542 | floorrev = min(bheadset) |
|
569 | floorrev = min(bheadset) | |
543 | ancestors = set(cl.ancestors(newheadrevs, floorrev)) |
|
570 | if floorrev <= max(uncertain): | |
544 | bheadset -= ancestors |
|
571 | ancestors = set(cl.ancestors(uncertain, floorrev)) | |
545 | bheadrevs = sorted(bheadset) |
|
572 | bheadset -= ancestors | |
546 | self[branch] = [cl.node(rev) for rev in bheadrevs] |
|
573 | if bheadset: | |
547 | tiprev = bheadrevs[-1] |
|
574 | self[branch] = [cl.node(rev) for rev in sorted(bheadset)] | |
|
575 | tiprev = max(newheadrevs) | |||
548 | if tiprev > ntiprev: |
|
576 | if tiprev > ntiprev: | |
549 | ntiprev = tiprev |
|
577 | ntiprev = tiprev | |
550 |
|
578 | |||
@@ -553,15 +581,24 b' class branchcache(object):' | |||||
553 | self.tipnode = cl.node(ntiprev) |
|
581 | self.tipnode = cl.node(ntiprev) | |
554 |
|
582 | |||
555 | if not self.validfor(repo): |
|
583 | if not self.validfor(repo): | |
556 | # cache key are not valid anymore |
|
584 | # old cache key is now invalid for the repo, but we've just updated | |
|
585 | # the cache and we assume it's valid, so let's make the cache key | |||
|
586 | # valid as well by recomputing it from the cached data | |||
557 | self.tipnode = repo.nullid |
|
587 | self.tipnode = repo.nullid | |
558 | self.tiprev = nullrev |
|
588 | self.tiprev = nullrev | |
559 | for heads in self.iterheads(): |
|
589 | for heads in self.iterheads(): | |
|
590 | if not heads: | |||
|
591 | # all revisions on a branch are obsolete | |||
|
592 | continue | |||
|
593 | # note: tiprev is not necessarily the tip revision of repo, | |||
|
594 | # because the tip could be obsolete (i.e. not a head) | |||
560 | tiprev = max(cl.rev(node) for node in heads) |
|
595 | tiprev = max(cl.rev(node) for node in heads) | |
561 | if tiprev > self.tiprev: |
|
596 | if tiprev > self.tiprev: | |
562 | self.tipnode = cl.node(tiprev) |
|
597 | self.tipnode = cl.node(tiprev) | |
563 | self.tiprev = tiprev |
|
598 | self.tiprev = tiprev | |
564 |
self.filteredhash = scmutil.filteredhash( |
|
599 | self.filteredhash = scmutil.filteredhash( | |
|
600 | repo, self.tiprev, needobsolete=True | |||
|
601 | ) | |||
565 |
|
602 | |||
566 | duration = util.timer() - starttime |
|
603 | duration = util.timer() - starttime | |
567 | repo.ui.log( |
|
604 | repo.ui.log( |
@@ -1886,7 +1886,8 b' def addpartbundlestream2(bundler, repo, ' | |||||
1886 | filecount, bytecount, it = streamclone.generatev2( |
|
1886 | filecount, bytecount, it = streamclone.generatev2( | |
1887 | repo, includepats, excludepats, includeobsmarkers |
|
1887 | repo, includepats, excludepats, includeobsmarkers | |
1888 | ) |
|
1888 | ) | |
1889 |
requirements = |
|
1889 | requirements = streamclone.streamed_requirements(repo) | |
|
1890 | requirements = _formatrequirementsspec(requirements) | |||
1890 | part = bundler.newpart(b'stream2', data=it) |
|
1891 | part = bundler.newpart(b'stream2', data=it) | |
1891 | part.addparam(b'bytecount', b'%d' % bytecount, mandatory=True) |
|
1892 | part.addparam(b'bytecount', b'%d' % bytecount, mandatory=True) | |
1892 | part.addparam(b'filecount', b'%d' % filecount, mandatory=True) |
|
1893 | part.addparam(b'filecount', b'%d' % filecount, mandatory=True) | |
@@ -2419,7 +2420,7 b' def handlebookmark(op, inpart):' | |||||
2419 | op.records.add(b'bookmarks', record) |
|
2420 | op.records.add(b'bookmarks', record) | |
2420 | else: |
|
2421 | else: | |
2421 | raise error.ProgrammingError( |
|
2422 | raise error.ProgrammingError( | |
2422 | b'unkown bookmark mode: %s' % bookmarksmode |
|
2423 | b'unknown bookmark mode: %s' % bookmarksmode | |
2423 | ) |
|
2424 | ) | |
2424 |
|
2425 | |||
2425 |
|
2426 |
@@ -195,7 +195,7 b' def parsebundlespec(repo, spec, strict=T' | |||||
195 | # repo supports and error if the bundle isn't compatible. |
|
195 | # repo supports and error if the bundle isn't compatible. | |
196 | if version == b'packed1' and b'requirements' in params: |
|
196 | if version == b'packed1' and b'requirements' in params: | |
197 | requirements = set(params[b'requirements'].split(b',')) |
|
197 | requirements = set(params[b'requirements'].split(b',')) | |
198 |
missingreqs = requirements - re |
|
198 | missingreqs = requirements - requirementsmod.STREAM_FIXED_REQUIREMENTS | |
199 | if missingreqs: |
|
199 | if missingreqs: | |
200 | raise error.UnsupportedBundleSpecification( |
|
200 | raise error.UnsupportedBundleSpecification( | |
201 | _(b'missing support for repository features: %s') |
|
201 | _(b'missing support for repository features: %s') |
@@ -61,11 +61,13 b' static PyObject *dirstate_item_new(PyTyp' | |||||
61 | int p2_info; |
|
61 | int p2_info; | |
62 | int has_meaningful_data; |
|
62 | int has_meaningful_data; | |
63 | int has_meaningful_mtime; |
|
63 | int has_meaningful_mtime; | |
|
64 | int mtime_second_ambiguous; | |||
64 | int mode; |
|
65 | int mode; | |
65 | int size; |
|
66 | int size; | |
66 | int mtime_s; |
|
67 | int mtime_s; | |
67 | int mtime_ns; |
|
68 | int mtime_ns; | |
68 | PyObject *parentfiledata; |
|
69 | PyObject *parentfiledata; | |
|
70 | PyObject *mtime; | |||
69 | PyObject *fallback_exec; |
|
71 | PyObject *fallback_exec; | |
70 | PyObject *fallback_symlink; |
|
72 | PyObject *fallback_symlink; | |
71 | static char *keywords_name[] = { |
|
73 | static char *keywords_name[] = { | |
@@ -78,6 +80,7 b' static PyObject *dirstate_item_new(PyTyp' | |||||
78 | p2_info = 0; |
|
80 | p2_info = 0; | |
79 | has_meaningful_mtime = 1; |
|
81 | has_meaningful_mtime = 1; | |
80 | has_meaningful_data = 1; |
|
82 | has_meaningful_data = 1; | |
|
83 | mtime_second_ambiguous = 0; | |||
81 | parentfiledata = Py_None; |
|
84 | parentfiledata = Py_None; | |
82 | fallback_exec = Py_None; |
|
85 | fallback_exec = Py_None; | |
83 | fallback_symlink = Py_None; |
|
86 | fallback_symlink = Py_None; | |
@@ -118,10 +121,18 b' static PyObject *dirstate_item_new(PyTyp' | |||||
118 | } |
|
121 | } | |
119 |
|
122 | |||
120 | if (parentfiledata != Py_None) { |
|
123 | if (parentfiledata != Py_None) { | |
121 |
if (!PyArg_ParseTuple(parentfiledata, "ii |
|
124 | if (!PyArg_ParseTuple(parentfiledata, "iiO", &mode, &size, | |
122 |
&mtime |
|
125 | &mtime)) { | |
123 | return NULL; |
|
126 | return NULL; | |
124 | } |
|
127 | } | |
|
128 | if (mtime != Py_None) { | |||
|
129 | if (!PyArg_ParseTuple(mtime, "iii", &mtime_s, &mtime_ns, | |||
|
130 | &mtime_second_ambiguous)) { | |||
|
131 | return NULL; | |||
|
132 | } | |||
|
133 | } else { | |||
|
134 | has_meaningful_mtime = 0; | |||
|
135 | } | |||
125 | } else { |
|
136 | } else { | |
126 | has_meaningful_data = 0; |
|
137 | has_meaningful_data = 0; | |
127 | has_meaningful_mtime = 0; |
|
138 | has_meaningful_mtime = 0; | |
@@ -130,6 +141,9 b' static PyObject *dirstate_item_new(PyTyp' | |||||
130 | t->flags |= dirstate_flag_has_meaningful_data; |
|
141 | t->flags |= dirstate_flag_has_meaningful_data; | |
131 | t->mode = mode; |
|
142 | t->mode = mode; | |
132 | t->size = size; |
|
143 | t->size = size; | |
|
144 | if (mtime_second_ambiguous) { | |||
|
145 | t->flags |= dirstate_flag_mtime_second_ambiguous; | |||
|
146 | } | |||
133 | } else { |
|
147 | } else { | |
134 | t->mode = 0; |
|
148 | t->mode = 0; | |
135 | t->size = 0; |
|
149 | t->size = 0; | |
@@ -255,7 +269,8 b' static inline int dirstate_item_c_v1_mti' | |||||
255 | } else if (!(self->flags & dirstate_flag_has_mtime) || |
|
269 | } else if (!(self->flags & dirstate_flag_has_mtime) || | |
256 | !(self->flags & dirstate_flag_p1_tracked) || |
|
270 | !(self->flags & dirstate_flag_p1_tracked) || | |
257 | !(self->flags & dirstate_flag_wc_tracked) || |
|
271 | !(self->flags & dirstate_flag_wc_tracked) || | |
258 |
(self->flags & dirstate_flag_p2_info) |
|
272 | (self->flags & dirstate_flag_p2_info) || | |
|
273 | (self->flags & dirstate_flag_mtime_second_ambiguous)) { | |||
259 | return ambiguous_time; |
|
274 | return ambiguous_time; | |
260 | } else { |
|
275 | } else { | |
261 | return self->mtime_s; |
|
276 | return self->mtime_s; | |
@@ -311,33 +326,30 b' static PyObject *dirstate_item_v1_mtime(' | |||||
311 | return PyInt_FromLong(dirstate_item_c_v1_mtime(self)); |
|
326 | return PyInt_FromLong(dirstate_item_c_v1_mtime(self)); | |
312 | }; |
|
327 | }; | |
313 |
|
328 | |||
314 | static PyObject *dirstate_item_need_delay(dirstateItemObject *self, |
|
|||
315 | PyObject *now) |
|
|||
316 | { |
|
|||
317 | int now_s; |
|
|||
318 | int now_ns; |
|
|||
319 | if (!PyArg_ParseTuple(now, "ii", &now_s, &now_ns)) { |
|
|||
320 | return NULL; |
|
|||
321 | } |
|
|||
322 | if (dirstate_item_c_v1_state(self) == 'n' && self->mtime_s == now_s) { |
|
|||
323 | Py_RETURN_TRUE; |
|
|||
324 | } else { |
|
|||
325 | Py_RETURN_FALSE; |
|
|||
326 | } |
|
|||
327 | }; |
|
|||
328 |
|
||||
329 | static PyObject *dirstate_item_mtime_likely_equal_to(dirstateItemObject *self, |
|
329 | static PyObject *dirstate_item_mtime_likely_equal_to(dirstateItemObject *self, | |
330 | PyObject *other) |
|
330 | PyObject *other) | |
331 | { |
|
331 | { | |
332 | int other_s; |
|
332 | int other_s; | |
333 | int other_ns; |
|
333 | int other_ns; | |
334 | if (!PyArg_ParseTuple(other, "ii", &other_s, &other_ns)) { |
|
334 | int other_second_ambiguous; | |
|
335 | if (!PyArg_ParseTuple(other, "iii", &other_s, &other_ns, | |||
|
336 | &other_second_ambiguous)) { | |||
335 | return NULL; |
|
337 | return NULL; | |
336 | } |
|
338 | } | |
337 |
if ((self->flags & dirstate_flag_has_mtime) |
|
339 | if (!(self->flags & dirstate_flag_has_mtime)) { | |
338 | self->mtime_s == other_s && |
|
340 | Py_RETURN_FALSE; | |
339 | (self->mtime_ns == other_ns || self->mtime_ns == 0 || |
|
341 | } | |
340 | other_ns == 0)) { |
|
342 | if (self->mtime_s != other_s) { | |
|
343 | Py_RETURN_FALSE; | |||
|
344 | } | |||
|
345 | if (self->mtime_ns == 0 || other_ns == 0) { | |||
|
346 | if (self->flags & dirstate_flag_mtime_second_ambiguous) { | |||
|
347 | Py_RETURN_FALSE; | |||
|
348 | } else { | |||
|
349 | Py_RETURN_TRUE; | |||
|
350 | } | |||
|
351 | } | |||
|
352 | if (self->mtime_ns == other_ns) { | |||
341 | Py_RETURN_TRUE; |
|
353 | Py_RETURN_TRUE; | |
342 | } else { |
|
354 | } else { | |
343 | Py_RETURN_FALSE; |
|
355 | Py_RETURN_FALSE; | |
@@ -438,14 +450,6 b' static PyObject *dirstate_item_from_v2_m' | |||||
438 | dirstate_flag_has_meaningful_data | |
|
450 | dirstate_flag_has_meaningful_data | | |
439 | dirstate_flag_has_mtime); |
|
451 | dirstate_flag_has_mtime); | |
440 | } |
|
452 | } | |
441 | if (t->flags & dirstate_flag_mtime_second_ambiguous) { |
|
|||
442 | /* The current code is not able to do the more subtle comparison |
|
|||
443 | * that the MTIME_SECOND_AMBIGUOUS requires. So we ignore the |
|
|||
444 | * mtime */ |
|
|||
445 | t->flags &= ~(dirstate_flag_mtime_second_ambiguous | |
|
|||
446 | dirstate_flag_has_meaningful_data | |
|
|||
447 | dirstate_flag_has_mtime); |
|
|||
448 | } |
|
|||
449 | t->mode = 0; |
|
453 | t->mode = 0; | |
450 | if (t->flags & dirstate_flag_has_meaningful_data) { |
|
454 | if (t->flags & dirstate_flag_has_meaningful_data) { | |
451 | if (t->flags & dirstate_flag_mode_exec_perm) { |
|
455 | if (t->flags & dirstate_flag_mode_exec_perm) { | |
@@ -474,14 +478,28 b' static PyObject *dirstate_item_set_possi' | |||||
474 | static PyObject *dirstate_item_set_clean(dirstateItemObject *self, |
|
478 | static PyObject *dirstate_item_set_clean(dirstateItemObject *self, | |
475 | PyObject *args) |
|
479 | PyObject *args) | |
476 | { |
|
480 | { | |
477 | int size, mode, mtime_s, mtime_ns; |
|
481 | int size, mode, mtime_s, mtime_ns, mtime_second_ambiguous; | |
478 | if (!PyArg_ParseTuple(args, "ii(ii)", &mode, &size, &mtime_s, |
|
482 | PyObject *mtime; | |
479 | &mtime_ns)) { |
|
483 | mtime_s = 0; | |
|
484 | mtime_ns = 0; | |||
|
485 | mtime_second_ambiguous = 0; | |||
|
486 | if (!PyArg_ParseTuple(args, "iiO", &mode, &size, &mtime)) { | |||
480 | return NULL; |
|
487 | return NULL; | |
481 | } |
|
488 | } | |
|
489 | if (mtime != Py_None) { | |||
|
490 | if (!PyArg_ParseTuple(mtime, "iii", &mtime_s, &mtime_ns, | |||
|
491 | &mtime_second_ambiguous)) { | |||
|
492 | return NULL; | |||
|
493 | } | |||
|
494 | } else { | |||
|
495 | self->flags &= ~dirstate_flag_has_mtime; | |||
|
496 | } | |||
482 | self->flags = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked | |
|
497 | self->flags = dirstate_flag_wc_tracked | dirstate_flag_p1_tracked | | |
483 | dirstate_flag_has_meaningful_data | |
|
498 | dirstate_flag_has_meaningful_data | | |
484 | dirstate_flag_has_mtime; |
|
499 | dirstate_flag_has_mtime; | |
|
500 | if (mtime_second_ambiguous) { | |||
|
501 | self->flags |= dirstate_flag_mtime_second_ambiguous; | |||
|
502 | } | |||
485 | self->mode = mode; |
|
503 | self->mode = mode; | |
486 | self->size = size; |
|
504 | self->size = size; | |
487 | self->mtime_s = mtime_s; |
|
505 | self->mtime_s = mtime_s; | |
@@ -530,8 +548,6 b' static PyMethodDef dirstate_item_methods' | |||||
530 | "return a \"size\" suitable for v1 serialization"}, |
|
548 | "return a \"size\" suitable for v1 serialization"}, | |
531 | {"v1_mtime", (PyCFunction)dirstate_item_v1_mtime, METH_NOARGS, |
|
549 | {"v1_mtime", (PyCFunction)dirstate_item_v1_mtime, METH_NOARGS, | |
532 | "return a \"mtime\" suitable for v1 serialization"}, |
|
550 | "return a \"mtime\" suitable for v1 serialization"}, | |
533 | {"need_delay", (PyCFunction)dirstate_item_need_delay, METH_O, |
|
|||
534 | "True if the stored mtime would be ambiguous with the current time"}, |
|
|||
535 | {"mtime_likely_equal_to", (PyCFunction)dirstate_item_mtime_likely_equal_to, |
|
551 | {"mtime_likely_equal_to", (PyCFunction)dirstate_item_mtime_likely_equal_to, | |
536 | METH_O, "True if the stored mtime is likely equal to the given mtime"}, |
|
552 | METH_O, "True if the stored mtime is likely equal to the given mtime"}, | |
537 | {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth, |
|
553 | {"from_v1_data", (PyCFunction)dirstate_item_from_v1_meth, | |
@@ -904,12 +920,9 b' static PyObject *pack_dirstate(PyObject ' | |||||
904 | Py_ssize_t nbytes, pos, l; |
|
920 | Py_ssize_t nbytes, pos, l; | |
905 | PyObject *k, *v = NULL, *pn; |
|
921 | PyObject *k, *v = NULL, *pn; | |
906 | char *p, *s; |
|
922 | char *p, *s; | |
907 | int now_s; |
|
|||
908 | int now_ns; |
|
|||
909 |
|
923 | |||
910 |
if (!PyArg_ParseTuple(args, "O!O!O! |
|
924 | if (!PyArg_ParseTuple(args, "O!O!O!:pack_dirstate", &PyDict_Type, &map, | |
911 |
|
|
925 | &PyDict_Type, ©map, &PyTuple_Type, &pl)) { | |
912 | &now_s, &now_ns)) { |
|
|||
913 | return NULL; |
|
926 | return NULL; | |
914 | } |
|
927 | } | |
915 |
|
928 | |||
@@ -978,21 +991,6 b' static PyObject *pack_dirstate(PyObject ' | |||||
978 | mode = dirstate_item_c_v1_mode(tuple); |
|
991 | mode = dirstate_item_c_v1_mode(tuple); | |
979 | size = dirstate_item_c_v1_size(tuple); |
|
992 | size = dirstate_item_c_v1_size(tuple); | |
980 | mtime = dirstate_item_c_v1_mtime(tuple); |
|
993 | mtime = dirstate_item_c_v1_mtime(tuple); | |
981 | if (state == 'n' && tuple->mtime_s == now_s) { |
|
|||
982 | /* See pure/parsers.py:pack_dirstate for why we do |
|
|||
983 | * this. */ |
|
|||
984 | mtime = -1; |
|
|||
985 | mtime_unset = (PyObject *)dirstate_item_from_v1_data( |
|
|||
986 | state, mode, size, mtime); |
|
|||
987 | if (!mtime_unset) { |
|
|||
988 | goto bail; |
|
|||
989 | } |
|
|||
990 | if (PyDict_SetItem(map, k, mtime_unset) == -1) { |
|
|||
991 | goto bail; |
|
|||
992 | } |
|
|||
993 | Py_DECREF(mtime_unset); |
|
|||
994 | mtime_unset = NULL; |
|
|||
995 | } |
|
|||
996 | *p++ = state; |
|
994 | *p++ = state; | |
997 | putbe32((uint32_t)mode, p); |
|
995 | putbe32((uint32_t)mode, p); | |
998 | putbe32((uint32_t)size, p + 4); |
|
996 | putbe32((uint32_t)size, p + 4); |
@@ -103,8 +103,7 b' struct indexObjectStruct {' | |||||
103 | */ |
|
103 | */ | |
104 | long rust_ext_compat; /* compatibility with being used in rust |
|
104 | long rust_ext_compat; /* compatibility with being used in rust | |
105 | extensions */ |
|
105 | extensions */ | |
106 | char format_version; /* size of index headers. Differs in v1 v.s. v2 |
|
106 | long format_version; /* format version selector (format_*) */ | |
107 | format */ |
|
|||
108 | }; |
|
107 | }; | |
109 |
|
108 | |||
110 | static Py_ssize_t index_length(const indexObject *self) |
|
109 | static Py_ssize_t index_length(const indexObject *self) | |
@@ -120,9 +119,11 b' static Py_ssize_t inline_scan(indexObjec' | |||||
120 | static int index_find_node(indexObject *self, const char *node); |
|
119 | static int index_find_node(indexObject *self, const char *node); | |
121 |
|
120 | |||
122 | #if LONG_MAX == 0x7fffffffL |
|
121 | #if LONG_MAX == 0x7fffffffL | |
123 |
static const char *const tuple_format = |
|
122 | static const char *const tuple_format = | |
|
123 | PY23("Kiiiiiis#KiBBi", "Kiiiiiiy#KiBBi"); | |||
124 | #else |
|
124 | #else | |
125 |
static const char *const tuple_format = |
|
125 | static const char *const tuple_format = | |
|
126 | PY23("kiiiiiis#kiBBi", "kiiiiiiy#kiBBi"); | |||
126 | #endif |
|
127 | #endif | |
127 |
|
128 | |||
128 | /* A RevlogNG v1 index entry is 64 bytes long. */ |
|
129 | /* A RevlogNG v1 index entry is 64 bytes long. */ | |
@@ -131,10 +132,54 b' static const long v1_entry_size = 64;' | |||||
131 | /* A Revlogv2 index entry is 96 bytes long. */ |
|
132 | /* A Revlogv2 index entry is 96 bytes long. */ | |
132 | static const long v2_entry_size = 96; |
|
133 | static const long v2_entry_size = 96; | |
133 |
|
134 | |||
134 | static const long format_v1 = 1; /* Internal only, could be any number */ |
|
135 | /* A Changelogv2 index entry is 96 bytes long. */ | |
135 | static const long format_v2 = 2; /* Internal only, could be any number */ |
|
136 | static const long cl2_entry_size = 96; | |
|
137 | ||||
|
138 | /* Internal format version. | |||
|
139 | * Must match their counterparts in revlogutils/constants.py */ | |||
|
140 | static const long format_v1 = 1; /* constants.py: REVLOGV1 */ | |||
|
141 | static const long format_v2 = 0xDEAD; /* constants.py: REVLOGV2 */ | |||
|
142 | static const long format_cl2 = 0xD34D; /* constants.py: CHANGELOGV2 */ | |||
|
143 | ||||
|
144 | static const long entry_v1_offset_high = 0; | |||
|
145 | static const long entry_v1_offset_offset_flags = 4; | |||
|
146 | static const long entry_v1_offset_comp_len = 8; | |||
|
147 | static const long entry_v1_offset_uncomp_len = 12; | |||
|
148 | static const long entry_v1_offset_base_rev = 16; | |||
|
149 | static const long entry_v1_offset_link_rev = 20; | |||
|
150 | static const long entry_v1_offset_parent_1 = 24; | |||
|
151 | static const long entry_v1_offset_parent_2 = 28; | |||
|
152 | static const long entry_v1_offset_node_id = 32; | |||
|
153 | ||||
|
154 | static const long entry_v2_offset_high = 0; | |||
|
155 | static const long entry_v2_offset_offset_flags = 4; | |||
|
156 | static const long entry_v2_offset_comp_len = 8; | |||
|
157 | static const long entry_v2_offset_uncomp_len = 12; | |||
|
158 | static const long entry_v2_offset_base_rev = 16; | |||
|
159 | static const long entry_v2_offset_link_rev = 20; | |||
|
160 | static const long entry_v2_offset_parent_1 = 24; | |||
|
161 | static const long entry_v2_offset_parent_2 = 28; | |||
|
162 | static const long entry_v2_offset_node_id = 32; | |||
|
163 | static const long entry_v2_offset_sidedata_offset = 64; | |||
|
164 | static const long entry_v2_offset_sidedata_comp_len = 72; | |||
|
165 | static const long entry_v2_offset_all_comp_mode = 76; | |||
|
166 | /* next free offset: 77 */ | |||
|
167 | ||||
|
168 | static const long entry_cl2_offset_high = 0; | |||
|
169 | static const long entry_cl2_offset_offset_flags = 4; | |||
|
170 | static const long entry_cl2_offset_comp_len = 8; | |||
|
171 | static const long entry_cl2_offset_uncomp_len = 12; | |||
|
172 | static const long entry_cl2_offset_parent_1 = 16; | |||
|
173 | static const long entry_cl2_offset_parent_2 = 20; | |||
|
174 | static const long entry_cl2_offset_node_id = 24; | |||
|
175 | static const long entry_cl2_offset_sidedata_offset = 56; | |||
|
176 | static const long entry_cl2_offset_sidedata_comp_len = 64; | |||
|
177 | static const long entry_cl2_offset_all_comp_mode = 68; | |||
|
178 | static const long entry_cl2_offset_rank = 69; | |||
|
179 | /* next free offset: 73 */ | |||
136 |
|
180 | |||
137 | static const char comp_mode_inline = 2; |
|
181 | static const char comp_mode_inline = 2; | |
|
182 | static const char rank_unknown = -1; | |||
138 |
|
183 | |||
139 | static void raise_revlog_error(void) |
|
184 | static void raise_revlog_error(void) | |
140 | { |
|
185 | { | |
@@ -203,8 +248,19 b' static inline int index_get_parents(inde' | |||||
203 | { |
|
248 | { | |
204 | const char *data = index_deref(self, rev); |
|
249 | const char *data = index_deref(self, rev); | |
205 |
|
250 | |||
206 | ps[0] = getbe32(data + 24); |
|
251 | if (self->format_version == format_v1) { | |
207 |
ps[ |
|
252 | ps[0] = getbe32(data + entry_v1_offset_parent_1); | |
|
253 | ps[1] = getbe32(data + entry_v1_offset_parent_2); | |||
|
254 | } else if (self->format_version == format_v2) { | |||
|
255 | ps[0] = getbe32(data + entry_v2_offset_parent_1); | |||
|
256 | ps[1] = getbe32(data + entry_v2_offset_parent_2); | |||
|
257 | } else if (self->format_version == format_cl2) { | |||
|
258 | ps[0] = getbe32(data + entry_cl2_offset_parent_1); | |||
|
259 | ps[1] = getbe32(data + entry_cl2_offset_parent_2); | |||
|
260 | } else { | |||
|
261 | raise_revlog_error(); | |||
|
262 | return -1; | |||
|
263 | } | |||
208 |
|
264 | |||
209 | /* If index file is corrupted, ps[] may point to invalid revisions. So |
|
265 | /* If index file is corrupted, ps[] may point to invalid revisions. So | |
210 | * there is a risk of buffer overflow to trust them unconditionally. */ |
|
266 | * there is a risk of buffer overflow to trust them unconditionally. */ | |
@@ -251,14 +307,36 b' static inline int64_t index_get_start(in' | |||||
251 | return 0; |
|
307 | return 0; | |
252 |
|
308 | |||
253 | data = index_deref(self, rev); |
|
309 | data = index_deref(self, rev); | |
254 | offset = getbe32(data + 4); |
|
310 | ||
255 | if (rev == 0) { |
|
311 | if (self->format_version == format_v1) { | |
256 | /* mask out version number for the first entry */ |
|
312 | offset = getbe32(data + entry_v1_offset_offset_flags); | |
257 | offset &= 0xFFFF; |
|
313 | if (rev == 0) { | |
|
314 | /* mask out version number for the first entry */ | |||
|
315 | offset &= 0xFFFF; | |||
|
316 | } else { | |||
|
317 | uint32_t offset_high = | |||
|
318 | getbe32(data + entry_v1_offset_high); | |||
|
319 | offset |= ((uint64_t)offset_high) << 32; | |||
|
320 | } | |||
|
321 | } else if (self->format_version == format_v2) { | |||
|
322 | offset = getbe32(data + entry_v2_offset_offset_flags); | |||
|
323 | if (rev == 0) { | |||
|
324 | /* mask out version number for the first entry */ | |||
|
325 | offset &= 0xFFFF; | |||
|
326 | } else { | |||
|
327 | uint32_t offset_high = | |||
|
328 | getbe32(data + entry_v2_offset_high); | |||
|
329 | offset |= ((uint64_t)offset_high) << 32; | |||
|
330 | } | |||
|
331 | } else if (self->format_version == format_cl2) { | |||
|
332 | uint32_t offset_high = getbe32(data + entry_cl2_offset_high); | |||
|
333 | offset = getbe32(data + entry_cl2_offset_offset_flags); | |||
|
334 | offset |= ((uint64_t)offset_high) << 32; | |||
258 | } else { |
|
335 | } else { | |
259 | uint32_t offset_high = getbe32(data); |
|
336 | raise_revlog_error(); | |
260 | offset |= ((uint64_t)offset_high) << 32; |
|
337 | return -1; | |
261 | } |
|
338 | } | |
|
339 | ||||
262 | return (int64_t)(offset >> 16); |
|
340 | return (int64_t)(offset >> 16); | |
263 | } |
|
341 | } | |
264 |
|
342 | |||
@@ -272,7 +350,16 b' static inline int index_get_length(index' | |||||
272 |
|
350 | |||
273 | data = index_deref(self, rev); |
|
351 | data = index_deref(self, rev); | |
274 |
|
352 | |||
275 | tmp = (int)getbe32(data + 8); |
|
353 | if (self->format_version == format_v1) { | |
|
354 | tmp = (int)getbe32(data + entry_v1_offset_comp_len); | |||
|
355 | } else if (self->format_version == format_v2) { | |||
|
356 | tmp = (int)getbe32(data + entry_v2_offset_comp_len); | |||
|
357 | } else if (self->format_version == format_cl2) { | |||
|
358 | tmp = (int)getbe32(data + entry_cl2_offset_comp_len); | |||
|
359 | } else { | |||
|
360 | raise_revlog_error(); | |||
|
361 | return -1; | |||
|
362 | } | |||
276 | if (tmp < 0) { |
|
363 | if (tmp < 0) { | |
277 | PyErr_Format(PyExc_OverflowError, |
|
364 | PyErr_Format(PyExc_OverflowError, | |
278 | "revlog entry size out of bound (%d)", tmp); |
|
365 | "revlog entry size out of bound (%d)", tmp); | |
@@ -297,7 +384,7 b' static PyObject *index_get(indexObject *' | |||||
297 | { |
|
384 | { | |
298 | uint64_t offset_flags, sidedata_offset; |
|
385 | uint64_t offset_flags, sidedata_offset; | |
299 | int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2, |
|
386 | int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2, | |
300 | sidedata_comp_len; |
|
387 | sidedata_comp_len, rank = rank_unknown; | |
301 | char data_comp_mode, sidedata_comp_mode; |
|
388 | char data_comp_mode, sidedata_comp_mode; | |
302 | const char *c_node_id; |
|
389 | const char *c_node_id; | |
303 | const char *data; |
|
390 | const char *data; | |
@@ -317,42 +404,96 b' static PyObject *index_get(indexObject *' | |||||
317 | if (data == NULL) |
|
404 | if (data == NULL) | |
318 | return NULL; |
|
405 | return NULL; | |
319 |
|
406 | |||
320 | offset_flags = getbe32(data + 4); |
|
|||
321 | /* |
|
|||
322 | * The first entry on-disk needs the version number masked out, |
|
|||
323 | * but this doesn't apply if entries are added to an empty index. |
|
|||
324 | */ |
|
|||
325 | if (self->length && pos == 0) |
|
|||
326 | offset_flags &= 0xFFFF; |
|
|||
327 | else { |
|
|||
328 | uint32_t offset_high = getbe32(data); |
|
|||
329 | offset_flags |= ((uint64_t)offset_high) << 32; |
|
|||
330 | } |
|
|||
331 |
|
||||
332 | comp_len = getbe32(data + 8); |
|
|||
333 | uncomp_len = getbe32(data + 12); |
|
|||
334 | base_rev = getbe32(data + 16); |
|
|||
335 | link_rev = getbe32(data + 20); |
|
|||
336 | parent_1 = getbe32(data + 24); |
|
|||
337 | parent_2 = getbe32(data + 28); |
|
|||
338 | c_node_id = data + 32; |
|
|||
339 |
|
||||
340 | if (self->format_version == format_v1) { |
|
407 | if (self->format_version == format_v1) { | |
|
408 | offset_flags = getbe32(data + entry_v1_offset_offset_flags); | |||
|
409 | /* | |||
|
410 | * The first entry on-disk needs the version number masked out, | |||
|
411 | * but this doesn't apply if entries are added to an empty | |||
|
412 | * index. | |||
|
413 | */ | |||
|
414 | if (self->length && pos == 0) | |||
|
415 | offset_flags &= 0xFFFF; | |||
|
416 | else { | |||
|
417 | uint32_t offset_high = | |||
|
418 | getbe32(data + entry_v1_offset_high); | |||
|
419 | offset_flags |= ((uint64_t)offset_high) << 32; | |||
|
420 | } | |||
|
421 | ||||
|
422 | comp_len = getbe32(data + entry_v1_offset_comp_len); | |||
|
423 | uncomp_len = getbe32(data + entry_v1_offset_uncomp_len); | |||
|
424 | base_rev = getbe32(data + entry_v1_offset_base_rev); | |||
|
425 | link_rev = getbe32(data + entry_v1_offset_link_rev); | |||
|
426 | parent_1 = getbe32(data + entry_v1_offset_parent_1); | |||
|
427 | parent_2 = getbe32(data + entry_v1_offset_parent_2); | |||
|
428 | c_node_id = data + entry_v1_offset_node_id; | |||
|
429 | ||||
341 | sidedata_offset = 0; |
|
430 | sidedata_offset = 0; | |
342 | sidedata_comp_len = 0; |
|
431 | sidedata_comp_len = 0; | |
343 | data_comp_mode = comp_mode_inline; |
|
432 | data_comp_mode = comp_mode_inline; | |
344 | sidedata_comp_mode = comp_mode_inline; |
|
433 | sidedata_comp_mode = comp_mode_inline; | |
|
434 | } else if (self->format_version == format_v2) { | |||
|
435 | offset_flags = getbe32(data + entry_v2_offset_offset_flags); | |||
|
436 | /* | |||
|
437 | * The first entry on-disk needs the version number masked out, | |||
|
438 | * but this doesn't apply if entries are added to an empty | |||
|
439 | * index. | |||
|
440 | */ | |||
|
441 | if (self->length && pos == 0) | |||
|
442 | offset_flags &= 0xFFFF; | |||
|
443 | else { | |||
|
444 | uint32_t offset_high = | |||
|
445 | getbe32(data + entry_v2_offset_high); | |||
|
446 | offset_flags |= ((uint64_t)offset_high) << 32; | |||
|
447 | } | |||
|
448 | ||||
|
449 | comp_len = getbe32(data + entry_v2_offset_comp_len); | |||
|
450 | uncomp_len = getbe32(data + entry_v2_offset_uncomp_len); | |||
|
451 | base_rev = getbe32(data + entry_v2_offset_base_rev); | |||
|
452 | link_rev = getbe32(data + entry_v2_offset_link_rev); | |||
|
453 | parent_1 = getbe32(data + entry_v2_offset_parent_1); | |||
|
454 | parent_2 = getbe32(data + entry_v2_offset_parent_2); | |||
|
455 | c_node_id = data + entry_v2_offset_node_id; | |||
|
456 | ||||
|
457 | sidedata_offset = | |||
|
458 | getbe64(data + entry_v2_offset_sidedata_offset); | |||
|
459 | sidedata_comp_len = | |||
|
460 | getbe32(data + entry_v2_offset_sidedata_comp_len); | |||
|
461 | data_comp_mode = data[entry_v2_offset_all_comp_mode] & 3; | |||
|
462 | sidedata_comp_mode = | |||
|
463 | ((data[entry_v2_offset_all_comp_mode] >> 2) & 3); | |||
|
464 | } else if (self->format_version == format_cl2) { | |||
|
465 | uint32_t offset_high = getbe32(data + entry_cl2_offset_high); | |||
|
466 | offset_flags = getbe32(data + entry_cl2_offset_offset_flags); | |||
|
467 | offset_flags |= ((uint64_t)offset_high) << 32; | |||
|
468 | comp_len = getbe32(data + entry_cl2_offset_comp_len); | |||
|
469 | uncomp_len = getbe32(data + entry_cl2_offset_uncomp_len); | |||
|
470 | /* base_rev and link_rev are not stored in changelogv2, but are | |||
|
471 | still used by some functions shared with the other revlogs. | |||
|
472 | They are supposed to contain links to other revisions, | |||
|
473 | but they always point to themselves in the case of a changelog. | |||
|
474 | */ | |||
|
475 | base_rev = pos; | |||
|
476 | link_rev = pos; | |||
|
477 | parent_1 = getbe32(data + entry_cl2_offset_parent_1); | |||
|
478 | parent_2 = getbe32(data + entry_cl2_offset_parent_2); | |||
|
479 | c_node_id = data + entry_cl2_offset_node_id; | |||
|
480 | sidedata_offset = | |||
|
481 | getbe64(data + entry_cl2_offset_sidedata_offset); | |||
|
482 | sidedata_comp_len = | |||
|
483 | getbe32(data + entry_cl2_offset_sidedata_comp_len); | |||
|
484 | data_comp_mode = data[entry_cl2_offset_all_comp_mode] & 3; | |||
|
485 | sidedata_comp_mode = | |||
|
486 | ((data[entry_cl2_offset_all_comp_mode] >> 2) & 3); | |||
|
487 | rank = getbe32(data + entry_cl2_offset_rank); | |||
345 | } else { |
|
488 | } else { | |
346 | sidedata_offset = getbe64(data + 64); |
|
489 | raise_revlog_error(); | |
347 | sidedata_comp_len = getbe32(data + 72); |
|
490 | return NULL; | |
348 | data_comp_mode = data[76] & 3; |
|
|||
349 | sidedata_comp_mode = ((data[76] >> 2) & 3); |
|
|||
350 | } |
|
491 | } | |
351 |
|
492 | |||
352 | return Py_BuildValue(tuple_format, offset_flags, comp_len, uncomp_len, |
|
493 | return Py_BuildValue(tuple_format, offset_flags, comp_len, uncomp_len, | |
353 | base_rev, link_rev, parent_1, parent_2, c_node_id, |
|
494 | base_rev, link_rev, parent_1, parent_2, c_node_id, | |
354 | self->nodelen, sidedata_offset, sidedata_comp_len, |
|
495 | self->nodelen, sidedata_offset, sidedata_comp_len, | |
355 | data_comp_mode, sidedata_comp_mode); |
|
496 | data_comp_mode, sidedata_comp_mode, rank); | |
356 | } |
|
497 | } | |
357 | /* |
|
498 | /* | |
358 | * Pack header information in binary |
|
499 | * Pack header information in binary | |
@@ -410,6 +551,7 b' static const char *index_node(indexObjec' | |||||
410 | { |
|
551 | { | |
411 | Py_ssize_t length = index_length(self); |
|
552 | Py_ssize_t length = index_length(self); | |
412 | const char *data; |
|
553 | const char *data; | |
|
554 | const char *node_id; | |||
413 |
|
555 | |||
414 | if (pos == nullrev) |
|
556 | if (pos == nullrev) | |
415 | return nullid; |
|
557 | return nullid; | |
@@ -418,7 +560,19 b' static const char *index_node(indexObjec' | |||||
418 | return NULL; |
|
560 | return NULL; | |
419 |
|
561 | |||
420 | data = index_deref(self, pos); |
|
562 | data = index_deref(self, pos); | |
421 | return data ? data + 32 : NULL; |
|
563 | ||
|
564 | if (self->format_version == format_v1) { | |||
|
565 | node_id = data + entry_v1_offset_node_id; | |||
|
566 | } else if (self->format_version == format_v2) { | |||
|
567 | node_id = data + entry_v2_offset_node_id; | |||
|
568 | } else if (self->format_version == format_cl2) { | |||
|
569 | node_id = data + entry_cl2_offset_node_id; | |||
|
570 | } else { | |||
|
571 | raise_revlog_error(); | |||
|
572 | return NULL; | |||
|
573 | } | |||
|
574 | ||||
|
575 | return data ? node_id : NULL; | |||
422 | } |
|
576 | } | |
423 |
|
577 | |||
424 | /* |
|
578 | /* | |
@@ -453,7 +607,7 b' static PyObject *index_append(indexObjec' | |||||
453 | { |
|
607 | { | |
454 | uint64_t offset_flags, sidedata_offset; |
|
608 | uint64_t offset_flags, sidedata_offset; | |
455 | int rev, comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2, |
|
609 | int rev, comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2, | |
456 | sidedata_comp_len; |
|
610 | sidedata_comp_len, rank; | |
457 | char data_comp_mode, sidedata_comp_mode; |
|
611 | char data_comp_mode, sidedata_comp_mode; | |
458 | Py_ssize_t c_node_id_len; |
|
612 | Py_ssize_t c_node_id_len; | |
459 | const char *c_node_id; |
|
613 | const char *c_node_id; | |
@@ -464,8 +618,8 b' static PyObject *index_append(indexObjec' | |||||
464 | &uncomp_len, &base_rev, &link_rev, &parent_1, |
|
618 | &uncomp_len, &base_rev, &link_rev, &parent_1, | |
465 | &parent_2, &c_node_id, &c_node_id_len, |
|
619 | &parent_2, &c_node_id, &c_node_id_len, | |
466 | &sidedata_offset, &sidedata_comp_len, |
|
620 | &sidedata_offset, &sidedata_comp_len, | |
467 | &data_comp_mode, &sidedata_comp_mode)) { |
|
621 | &data_comp_mode, &sidedata_comp_mode, &rank)) { | |
468 |
PyErr_SetString(PyExc_TypeError, "1 |
|
622 | PyErr_SetString(PyExc_TypeError, "12-tuple required"); | |
469 | return NULL; |
|
623 | return NULL; | |
470 | } |
|
624 | } | |
471 |
|
625 | |||
@@ -501,25 +655,61 b' static PyObject *index_append(indexObjec' | |||||
501 | } |
|
655 | } | |
502 | rev = self->length + self->new_length; |
|
656 | rev = self->length + self->new_length; | |
503 | data = self->added + self->entry_size * self->new_length++; |
|
657 | data = self->added + self->entry_size * self->new_length++; | |
504 | putbe32(offset_flags >> 32, data); |
|
658 | ||
505 | putbe32(offset_flags & 0xffffffffU, data + 4); |
|
659 | memset(data, 0, self->entry_size); | |
506 | putbe32(comp_len, data + 8); |
|
660 | ||
507 | putbe32(uncomp_len, data + 12); |
|
661 | if (self->format_version == format_v1) { | |
508 | putbe32(base_rev, data + 16); |
|
662 | putbe32(offset_flags >> 32, data + entry_v1_offset_high); | |
509 | putbe32(link_rev, data + 20); |
|
663 | putbe32(offset_flags & 0xffffffffU, | |
510 | putbe32(parent_1, data + 24); |
|
664 | data + entry_v1_offset_offset_flags); | |
511 | putbe32(parent_2, data + 28); |
|
665 | putbe32(comp_len, data + entry_v1_offset_comp_len); | |
512 | memcpy(data + 32, c_node_id, c_node_id_len); |
|
666 | putbe32(uncomp_len, data + entry_v1_offset_uncomp_len); | |
513 | /* Padding since SHA-1 is only 20 bytes for now */ |
|
667 | putbe32(base_rev, data + entry_v1_offset_base_rev); | |
514 | memset(data + 32 + c_node_id_len, 0, 32 - c_node_id_len); |
|
668 | putbe32(link_rev, data + entry_v1_offset_link_rev); | |
515 | if (self->format_version == format_v2) { |
|
669 | putbe32(parent_1, data + entry_v1_offset_parent_1); | |
516 | putbe64(sidedata_offset, data + 64); |
|
670 | putbe32(parent_2, data + entry_v1_offset_parent_2); | |
517 | putbe32(sidedata_comp_len, data + 72); |
|
671 | memcpy(data + entry_v1_offset_node_id, c_node_id, | |
|
672 | c_node_id_len); | |||
|
673 | } else if (self->format_version == format_v2) { | |||
|
674 | putbe32(offset_flags >> 32, data + entry_v2_offset_high); | |||
|
675 | putbe32(offset_flags & 0xffffffffU, | |||
|
676 | data + entry_v2_offset_offset_flags); | |||
|
677 | putbe32(comp_len, data + entry_v2_offset_comp_len); | |||
|
678 | putbe32(uncomp_len, data + entry_v2_offset_uncomp_len); | |||
|
679 | putbe32(base_rev, data + entry_v2_offset_base_rev); | |||
|
680 | putbe32(link_rev, data + entry_v2_offset_link_rev); | |||
|
681 | putbe32(parent_1, data + entry_v2_offset_parent_1); | |||
|
682 | putbe32(parent_2, data + entry_v2_offset_parent_2); | |||
|
683 | memcpy(data + entry_v2_offset_node_id, c_node_id, | |||
|
684 | c_node_id_len); | |||
|
685 | putbe64(sidedata_offset, | |||
|
686 | data + entry_v2_offset_sidedata_offset); | |||
|
687 | putbe32(sidedata_comp_len, | |||
|
688 | data + entry_v2_offset_sidedata_comp_len); | |||
518 | comp_field = data_comp_mode & 3; |
|
689 | comp_field = data_comp_mode & 3; | |
519 | comp_field = comp_field | (sidedata_comp_mode & 3) << 2; |
|
690 | comp_field = comp_field | (sidedata_comp_mode & 3) << 2; | |
520 | data[76] = comp_field; |
|
691 | data[entry_v2_offset_all_comp_mode] = comp_field; | |
521 | /* Padding for 96 bytes alignment */ |
|
692 | } else if (self->format_version == format_cl2) { | |
522 | memset(data + 77, 0, self->entry_size - 77); |
|
693 | putbe32(offset_flags >> 32, data + entry_cl2_offset_high); | |
|
694 | putbe32(offset_flags & 0xffffffffU, | |||
|
695 | data + entry_cl2_offset_offset_flags); | |||
|
696 | putbe32(comp_len, data + entry_cl2_offset_comp_len); | |||
|
697 | putbe32(uncomp_len, data + entry_cl2_offset_uncomp_len); | |||
|
698 | putbe32(parent_1, data + entry_cl2_offset_parent_1); | |||
|
699 | putbe32(parent_2, data + entry_cl2_offset_parent_2); | |||
|
700 | memcpy(data + entry_cl2_offset_node_id, c_node_id, | |||
|
701 | c_node_id_len); | |||
|
702 | putbe64(sidedata_offset, | |||
|
703 | data + entry_cl2_offset_sidedata_offset); | |||
|
704 | putbe32(sidedata_comp_len, | |||
|
705 | data + entry_cl2_offset_sidedata_comp_len); | |||
|
706 | comp_field = data_comp_mode & 3; | |||
|
707 | comp_field = comp_field | (sidedata_comp_mode & 3) << 2; | |||
|
708 | data[entry_cl2_offset_all_comp_mode] = comp_field; | |||
|
709 | putbe32(rank, data + entry_cl2_offset_rank); | |||
|
710 | } else { | |||
|
711 | raise_revlog_error(); | |||
|
712 | return NULL; | |||
523 | } |
|
713 | } | |
524 |
|
714 | |||
525 | if (self->ntinitialized) |
|
715 | if (self->ntinitialized) | |
@@ -574,10 +764,28 b' static PyObject *index_replace_sidedata_' | |||||
574 | /* Find the newly added node, offset from the "already on-disk" length |
|
764 | /* Find the newly added node, offset from the "already on-disk" length | |
575 | */ |
|
765 | */ | |
576 | data = self->added + self->entry_size * (rev - self->length); |
|
766 | data = self->added + self->entry_size * (rev - self->length); | |
577 | putbe64(offset_flags, data); |
|
767 | if (self->format_version == format_v2) { | |
578 | putbe64(sidedata_offset, data + 64); |
|
768 | putbe64(offset_flags, data + entry_v2_offset_high); | |
579 | putbe32(sidedata_comp_len, data + 72); |
|
769 | putbe64(sidedata_offset, | |
580 | data[76] = (data[76] & ~(3 << 2)) | ((comp_mode & 3) << 2); |
|
770 | data + entry_v2_offset_sidedata_offset); | |
|
771 | putbe32(sidedata_comp_len, | |||
|
772 | data + entry_v2_offset_sidedata_comp_len); | |||
|
773 | data[entry_v2_offset_all_comp_mode] = | |||
|
774 | (data[entry_v2_offset_all_comp_mode] & ~(3 << 2)) | | |||
|
775 | ((comp_mode & 3) << 2); | |||
|
776 | } else if (self->format_version == format_cl2) { | |||
|
777 | putbe64(offset_flags, data + entry_cl2_offset_high); | |||
|
778 | putbe64(sidedata_offset, | |||
|
779 | data + entry_cl2_offset_sidedata_offset); | |||
|
780 | putbe32(sidedata_comp_len, | |||
|
781 | data + entry_cl2_offset_sidedata_comp_len); | |||
|
782 | data[entry_cl2_offset_all_comp_mode] = | |||
|
783 | (data[entry_cl2_offset_all_comp_mode] & ~(3 << 2)) | | |||
|
784 | ((comp_mode & 3) << 2); | |||
|
785 | } else { | |||
|
786 | raise_revlog_error(); | |||
|
787 | return NULL; | |||
|
788 | } | |||
581 |
|
789 | |||
582 | Py_RETURN_NONE; |
|
790 | Py_RETURN_NONE; | |
583 | } |
|
791 | } | |
@@ -1120,7 +1328,17 b' static inline int index_baserev(indexObj' | |||||
1120 | data = index_deref(self, rev); |
|
1328 | data = index_deref(self, rev); | |
1121 | if (data == NULL) |
|
1329 | if (data == NULL) | |
1122 | return -2; |
|
1330 | return -2; | |
1123 | result = getbe32(data + 16); |
|
1331 | ||
|
1332 | if (self->format_version == format_v1) { | |||
|
1333 | result = getbe32(data + entry_v1_offset_base_rev); | |||
|
1334 | } else if (self->format_version == format_v2) { | |||
|
1335 | result = getbe32(data + entry_v2_offset_base_rev); | |||
|
1336 | } else if (self->format_version == format_cl2) { | |||
|
1337 | return rev; | |||
|
1338 | } else { | |||
|
1339 | raise_revlog_error(); | |||
|
1340 | return -1; | |||
|
1341 | } | |||
1124 |
|
1342 | |||
1125 | if (result > rev) { |
|
1343 | if (result > rev) { | |
1126 | PyErr_Format( |
|
1344 | PyErr_Format( | |
@@ -2598,8 +2816,10 b' static void index_invalidate_added(index' | |||||
2598 | if (i < 0) |
|
2816 | if (i < 0) | |
2599 | return; |
|
2817 | return; | |
2600 |
|
2818 | |||
2601 | for (i = start; i < len; i++) |
|
2819 | for (i = start; i < len; i++) { | |
2602 | nt_delete_node(&self->nt, index_deref(self, i) + 32); |
|
2820 | const char *node = index_node(self, i); | |
|
2821 | nt_delete_node(&self->nt, node); | |||
|
2822 | } | |||
2603 |
|
2823 | |||
2604 | self->new_length = start - self->length; |
|
2824 | self->new_length = start - self->length; | |
2605 | } |
|
2825 | } | |
@@ -2732,9 +2952,18 b' static Py_ssize_t inline_scan(indexObjec' | |||||
2732 | while (pos + self->entry_size <= end && pos >= 0) { |
|
2952 | while (pos + self->entry_size <= end && pos >= 0) { | |
2733 | uint32_t comp_len, sidedata_comp_len = 0; |
|
2953 | uint32_t comp_len, sidedata_comp_len = 0; | |
2734 | /* 3rd element of header is length of compressed inline data */ |
|
2954 | /* 3rd element of header is length of compressed inline data */ | |
2735 | comp_len = getbe32(data + pos + 8); |
|
2955 | if (self->format_version == format_v1) { | |
2736 | if (self->entry_size == v2_entry_size) { |
|
2956 | comp_len = | |
2737 | sidedata_comp_len = getbe32(data + pos + 72); |
|
2957 | getbe32(data + pos + entry_v1_offset_comp_len); | |
|
2958 | sidedata_comp_len = 0; | |||
|
2959 | } else if (self->format_version == format_v2) { | |||
|
2960 | comp_len = | |||
|
2961 | getbe32(data + pos + entry_v2_offset_comp_len); | |||
|
2962 | sidedata_comp_len = getbe32( | |||
|
2963 | data + pos + entry_v2_offset_sidedata_comp_len); | |||
|
2964 | } else { | |||
|
2965 | raise_revlog_error(); | |||
|
2966 | return -1; | |||
2738 | } |
|
2967 | } | |
2739 | incr = self->entry_size + comp_len + sidedata_comp_len; |
|
2968 | incr = self->entry_size + comp_len + sidedata_comp_len; | |
2740 | if (offsets) |
|
2969 | if (offsets) | |
@@ -2754,10 +2983,10 b' static Py_ssize_t inline_scan(indexObjec' | |||||
2754 |
|
2983 | |||
2755 | static int index_init(indexObject *self, PyObject *args, PyObject *kwargs) |
|
2984 | static int index_init(indexObject *self, PyObject *args, PyObject *kwargs) | |
2756 | { |
|
2985 | { | |
2757 |
PyObject *data_obj, *inlined_obj |
|
2986 | PyObject *data_obj, *inlined_obj; | |
2758 | Py_ssize_t size; |
|
2987 | Py_ssize_t size; | |
2759 |
|
2988 | |||
2760 |
static char *kwlist[] = {"data", "inlined", " |
|
2989 | static char *kwlist[] = {"data", "inlined", "format", NULL}; | |
2761 |
|
2990 | |||
2762 | /* Initialize before argument-checking to avoid index_dealloc() crash. |
|
2991 | /* Initialize before argument-checking to avoid index_dealloc() crash. | |
2763 | */ |
|
2992 | */ | |
@@ -2774,10 +3003,11 b' static int index_init(indexObject *self,' | |||||
2774 | self->nodelen = 20; |
|
3003 | self->nodelen = 20; | |
2775 | self->nullentry = NULL; |
|
3004 | self->nullentry = NULL; | |
2776 | self->rust_ext_compat = 1; |
|
3005 | self->rust_ext_compat = 1; | |
2777 |
|
3006 | self->format_version = format_v1; | ||
2778 | revlogv2 = NULL; |
|
3007 | ||
2779 |
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO| |
|
3008 | if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|l", kwlist, | |
2780 |
&data_obj, &inlined_obj, |
|
3009 | &data_obj, &inlined_obj, | |
|
3010 | &(self->format_version))) | |||
2781 | return -1; |
|
3011 | return -1; | |
2782 | if (!PyObject_CheckBuffer(data_obj)) { |
|
3012 | if (!PyObject_CheckBuffer(data_obj)) { | |
2783 | PyErr_SetString(PyExc_TypeError, |
|
3013 | PyErr_SetString(PyExc_TypeError, | |
@@ -2789,17 +3019,18 b' static int index_init(indexObject *self,' | |||||
2789 | return -1; |
|
3019 | return -1; | |
2790 | } |
|
3020 | } | |
2791 |
|
3021 | |||
2792 | if (revlogv2 && PyObject_IsTrue(revlogv2)) { |
|
3022 | if (self->format_version == format_v1) { | |
2793 | self->format_version = format_v2; |
|
|||
2794 | self->entry_size = v2_entry_size; |
|
|||
2795 | } else { |
|
|||
2796 | self->format_version = format_v1; |
|
|||
2797 | self->entry_size = v1_entry_size; |
|
3023 | self->entry_size = v1_entry_size; | |
|
3024 | } else if (self->format_version == format_v2) { | |||
|
3025 | self->entry_size = v2_entry_size; | |||
|
3026 | } else if (self->format_version == format_cl2) { | |||
|
3027 | self->entry_size = cl2_entry_size; | |||
2798 | } |
|
3028 | } | |
2799 |
|
3029 | |||
2800 |
self->nullentry = |
|
3030 | self->nullentry = | |
2801 |
PY23("iiiiiiis#iiBB", "iiiiiiiy#iiBB"), 0, 0, 0, -1, |
|
3031 | Py_BuildValue(PY23("iiiiiiis#iiBBi", "iiiiiiiy#iiBBi"), 0, 0, 0, -1, | |
2802 | nullid, self->nodelen, 0, 0, comp_mode_inline, comp_mode_inline); |
|
3032 | -1, -1, -1, nullid, self->nodelen, 0, 0, | |
|
3033 | comp_mode_inline, comp_mode_inline, rank_unknown); | |||
2803 |
|
3034 | |||
2804 | if (!self->nullentry) |
|
3035 | if (!self->nullentry) | |
2805 | return -1; |
|
3036 | return -1; |
@@ -350,10 +350,11 b' class cg1unpacker(object):' | |||||
350 |
|
350 | |||
351 | def ondupchangelog(cl, rev): |
|
351 | def ondupchangelog(cl, rev): | |
352 | if rev < clstart: |
|
352 | if rev < clstart: | |
353 | duprevs.append(rev) |
|
353 | duprevs.append(rev) # pytype: disable=attribute-error | |
354 |
|
354 | |||
355 | def onchangelog(cl, rev): |
|
355 | def onchangelog(cl, rev): | |
356 | ctx = cl.changelogrevision(rev) |
|
356 | ctx = cl.changelogrevision(rev) | |
|
357 | assert efilesset is not None # help pytype | |||
357 | efilesset.update(ctx.files) |
|
358 | efilesset.update(ctx.files) | |
358 | repo.register_changeset(rev, ctx) |
|
359 | repo.register_changeset(rev, ctx) | |
359 |
|
360 |
@@ -643,6 +643,13 b' class chgunixservicehandler(object):' | |||||
643 |
|
643 | |||
644 | def __init__(self, ui): |
|
644 | def __init__(self, ui): | |
645 | self.ui = ui |
|
645 | self.ui = ui | |
|
646 | ||||
|
647 | # TODO: use PEP 526 syntax (`_hashstate: hashstate` at the class level) | |||
|
648 | # when 3.5 support is dropped. | |||
|
649 | self._hashstate = None # type: hashstate | |||
|
650 | self._baseaddress = None # type: bytes | |||
|
651 | self._realaddress = None # type: bytes | |||
|
652 | ||||
646 | self._idletimeout = ui.configint(b'chgserver', b'idletimeout') |
|
653 | self._idletimeout = ui.configint(b'chgserver', b'idletimeout') | |
647 | self._lastactive = time.time() |
|
654 | self._lastactive = time.time() | |
648 |
|
655 |
@@ -522,8 +522,10 b' def dorecord(' | |||||
522 | # 1. filter patch, since we are intending to apply subset of it |
|
522 | # 1. filter patch, since we are intending to apply subset of it | |
523 | try: |
|
523 | try: | |
524 | chunks, newopts = filterfn(ui, original_headers, match) |
|
524 | chunks, newopts = filterfn(ui, original_headers, match) | |
525 | except error.PatchError as err: |
|
525 | except error.PatchParseError as err: | |
526 | raise error.InputError(_(b'error parsing patch: %s') % err) |
|
526 | raise error.InputError(_(b'error parsing patch: %s') % err) | |
|
527 | except error.PatchApplicationError as err: | |||
|
528 | raise error.StateError(_(b'error applying patch: %s') % err) | |||
527 | opts.update(newopts) |
|
529 | opts.update(newopts) | |
528 |
|
530 | |||
529 | # We need to keep a backup of files that have been newly added and |
|
531 | # We need to keep a backup of files that have been newly added and | |
@@ -608,8 +610,10 b' def dorecord(' | |||||
608 | ui.debug(b'applying patch\n') |
|
610 | ui.debug(b'applying patch\n') | |
609 | ui.debug(fp.getvalue()) |
|
611 | ui.debug(fp.getvalue()) | |
610 | patch.internalpatch(ui, repo, fp, 1, eolmode=None) |
|
612 | patch.internalpatch(ui, repo, fp, 1, eolmode=None) | |
611 | except error.PatchError as err: |
|
613 | except error.PatchParseError as err: | |
612 | raise error.InputError(pycompat.bytestr(err)) |
|
614 | raise error.InputError(pycompat.bytestr(err)) | |
|
615 | except error.PatchApplicationError as err: | |||
|
616 | raise error.StateError(pycompat.bytestr(err)) | |||
613 | del fp |
|
617 | del fp | |
614 |
|
618 | |||
615 | # 4. We prepared working directory according to filtered |
|
619 | # 4. We prepared working directory according to filtered | |
@@ -2020,9 +2024,16 b' def tryimportone(ui, repo, patchdata, pa' | |||||
2020 | eolmode=None, |
|
2024 | eolmode=None, | |
2021 | similarity=sim / 100.0, |
|
2025 | similarity=sim / 100.0, | |
2022 | ) |
|
2026 | ) | |
2023 | except error.PatchError as e: |
|
2027 | except error.PatchParseError as e: | |
|
2028 | raise error.InputError( | |||
|
2029 | pycompat.bytestr(e), | |||
|
2030 | hint=_( | |||
|
2031 | b'check that whitespace in the patch has not been mangled' | |||
|
2032 | ), | |||
|
2033 | ) | |||
|
2034 | except error.PatchApplicationError as e: | |||
2024 | if not partial: |
|
2035 | if not partial: | |
2025 |
raise error. |
|
2036 | raise error.StateError(pycompat.bytestr(e)) | |
2026 | if partial: |
|
2037 | if partial: | |
2027 | rejects = True |
|
2038 | rejects = True | |
2028 |
|
2039 | |||
@@ -2079,8 +2090,15 b' def tryimportone(ui, repo, patchdata, pa' | |||||
2079 | files, |
|
2090 | files, | |
2080 | eolmode=None, |
|
2091 | eolmode=None, | |
2081 | ) |
|
2092 | ) | |
2082 | except error.PatchError as e: |
|
2093 | except error.PatchParseError as e: | |
2083 |
raise error. |
|
2094 | raise error.InputError( | |
|
2095 | stringutil.forcebytestr(e), | |||
|
2096 | hint=_( | |||
|
2097 | b'check that whitespace in the patch has not been mangled' | |||
|
2098 | ), | |||
|
2099 | ) | |||
|
2100 | except error.PatchApplicationError as e: | |||
|
2101 | raise error.StateError(stringutil.forcebytestr(e)) | |||
2084 | if opts.get(b'exact'): |
|
2102 | if opts.get(b'exact'): | |
2085 | editor = None |
|
2103 | editor = None | |
2086 | else: |
|
2104 | else: | |
@@ -3628,15 +3646,14 b' def _performrevert(' | |||||
3628 | prntstatusmsg(b'drop', f) |
|
3646 | prntstatusmsg(b'drop', f) | |
3629 | repo.dirstate.set_untracked(f) |
|
3647 | repo.dirstate.set_untracked(f) | |
3630 |
|
3648 | |||
3631 | normal = None |
|
3649 | # We are reverting to our parent. If possible, we had like `hg status` | |
3632 | if node == parent: |
|
3650 | # to report the file as clean. We have to be less agressive for | |
3633 | # We're reverting to our parent. If possible, we'd like status |
|
3651 | # merges to avoid losing information about copy introduced by the merge. | |
3634 | # to report the file as clean. We have to use normallookup for |
|
3652 | # This might comes with bugs ? | |
3635 | # merges to avoid losing information about merged/dirty files. |
|
3653 | reset_copy = p2 == repo.nullid | |
3636 | if p2 != repo.nullid: |
|
3654 | ||
3637 | normal = repo.dirstate.set_tracked |
|
3655 | def normal(filename): | |
3638 | else: |
|
3656 | return repo.dirstate.set_tracked(filename, reset_copy=reset_copy) | |
3639 | normal = repo.dirstate.set_clean |
|
|||
3640 |
|
3657 | |||
3641 | newlyaddedandmodifiedfiles = set() |
|
3658 | newlyaddedandmodifiedfiles = set() | |
3642 | if interactive: |
|
3659 | if interactive: | |
@@ -3674,8 +3691,10 b' def _performrevert(' | |||||
3674 | if operation == b'discard': |
|
3691 | if operation == b'discard': | |
3675 | chunks = patch.reversehunks(chunks) |
|
3692 | chunks = patch.reversehunks(chunks) | |
3676 |
|
3693 | |||
3677 | except error.PatchError as err: |
|
3694 | except error.PatchParseError as err: | |
3678 |
raise error. |
|
3695 | raise error.InputError(_(b'error parsing patch: %s') % err) | |
|
3696 | except error.PatchApplicationError as err: | |||
|
3697 | raise error.StateError(_(b'error applying patch: %s') % err) | |||
3679 |
|
3698 | |||
3680 | # FIXME: when doing an interactive revert of a copy, there's no way of |
|
3699 | # FIXME: when doing an interactive revert of a copy, there's no way of | |
3681 | # performing a partial revert of the added file, the only option is |
|
3700 | # performing a partial revert of the added file, the only option is | |
@@ -3710,8 +3729,10 b' def _performrevert(' | |||||
3710 | if dopatch: |
|
3729 | if dopatch: | |
3711 | try: |
|
3730 | try: | |
3712 | patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None) |
|
3731 | patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None) | |
3713 | except error.PatchError as err: |
|
3732 | except error.PatchParseError as err: | |
3714 |
raise error. |
|
3733 | raise error.InputError(pycompat.bytestr(err)) | |
|
3734 | except error.PatchApplicationError as err: | |||
|
3735 | raise error.StateError(pycompat.bytestr(err)) | |||
3715 | del fp |
|
3736 | del fp | |
3716 | else: |
|
3737 | else: | |
3717 | for f in actions[b'revert'][0]: |
|
3738 | for f in actions[b'revert'][0]: | |
@@ -3727,9 +3748,6 b' def _performrevert(' | |||||
3727 | checkout(f) |
|
3748 | checkout(f) | |
3728 | repo.dirstate.set_tracked(f) |
|
3749 | repo.dirstate.set_tracked(f) | |
3729 |
|
3750 | |||
3730 | normal = repo.dirstate.set_tracked |
|
|||
3731 | if node == parent and p2 == repo.nullid: |
|
|||
3732 | normal = repo.dirstate.set_clean |
|
|||
3733 | for f in actions[b'undelete'][0]: |
|
3751 | for f in actions[b'undelete'][0]: | |
3734 | if interactive: |
|
3752 | if interactive: | |
3735 | choice = repo.ui.promptchoice( |
|
3753 | choice = repo.ui.promptchoice( |
@@ -248,28 +248,19 b' def _modesetup(ui):' | |||||
248 | if pycompat.iswindows: |
|
248 | if pycompat.iswindows: | |
249 | from . import win32 |
|
249 | from . import win32 | |
250 |
|
250 | |||
251 | term = encoding.environ.get(b'TERM') |
|
|||
252 | # TERM won't be defined in a vanilla cmd.exe environment. |
|
|||
253 |
|
||||
254 | # UNIX-like environments on Windows such as Cygwin and MSYS will |
|
|||
255 | # set TERM. They appear to make a best effort attempt at setting it |
|
|||
256 | # to something appropriate. However, not all environments with TERM |
|
|||
257 | # defined support ANSI. |
|
|||
258 | ansienviron = term and b'xterm' in term |
|
|||
259 |
|
||||
260 | if mode == b'auto': |
|
251 | if mode == b'auto': | |
261 | # Since "ansi" could result in terminal gibberish, we error on the |
|
252 | # Since "ansi" could result in terminal gibberish, we error on the | |
262 | # side of selecting "win32". However, if w32effects is not defined, |
|
253 | # side of selecting "win32". However, if w32effects is not defined, | |
263 | # we almost certainly don't support "win32", so don't even try. |
|
254 | # we almost certainly don't support "win32", so don't even try. | |
264 | # w32effects is not populated when stdout is redirected, so checking |
|
255 | # w32effects is not populated when stdout is redirected, so checking | |
265 | # it first avoids win32 calls in a state known to error out. |
|
256 | # it first avoids win32 calls in a state known to error out. | |
266 |
if |
|
257 | if not w32effects or win32.enablevtmode(): | |
267 | realmode = b'ansi' |
|
258 | realmode = b'ansi' | |
268 | else: |
|
259 | else: | |
269 | realmode = b'win32' |
|
260 | realmode = b'win32' | |
270 | # An empty w32effects is a clue that stdout is redirected, and thus |
|
261 | # An empty w32effects is a clue that stdout is redirected, and thus | |
271 | # cannot enable VT mode. |
|
262 | # cannot enable VT mode. | |
272 |
elif mode == b'ansi' and w32effects |
|
263 | elif mode == b'ansi' and w32effects: | |
273 | win32.enablevtmode() |
|
264 | win32.enablevtmode() | |
274 | elif mode == b'auto': |
|
265 | elif mode == b'auto': | |
275 | realmode = b'ansi' |
|
266 | realmode = b'ansi' |
@@ -3309,7 +3309,9 b' def _dograft(ui, repo, *revs, **opts):' | |||||
3309 | overrides = {(b'ui', b'forcemerge'): opts.get('tool', b'')} |
|
3309 | overrides = {(b'ui', b'forcemerge'): opts.get('tool', b'')} | |
3310 | base = ctx.p1() if basectx is None else basectx |
|
3310 | base = ctx.p1() if basectx is None else basectx | |
3311 | with ui.configoverride(overrides, b'graft'): |
|
3311 | with ui.configoverride(overrides, b'graft'): | |
3312 |
stats = mergemod.graft( |
|
3312 | stats = mergemod.graft( | |
|
3313 | repo, ctx, base, [b'local', b'graft', b'parent of graft'] | |||
|
3314 | ) | |||
3313 | # report any conflicts |
|
3315 | # report any conflicts | |
3314 | if stats.unresolvedcount > 0: |
|
3316 | if stats.unresolvedcount > 0: | |
3315 | # write out state for --continue |
|
3317 | # write out state for --continue | |
@@ -4914,7 +4916,7 b' def merge(ui, repo, node=None, **opts):' | |||||
4914 | overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')} |
|
4916 | overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')} | |
4915 | with ui.configoverride(overrides, b'merge'): |
|
4917 | with ui.configoverride(overrides, b'merge'): | |
4916 | force = opts.get(b'force') |
|
4918 | force = opts.get(b'force') | |
4917 | labels = [b'working copy', b'merge rev'] |
|
4919 | labels = [b'working copy', b'merge rev', b'common ancestor'] | |
4918 | return hg.merge(ctx, force=force, labels=labels) |
|
4920 | return hg.merge(ctx, force=force, labels=labels) | |
4919 |
|
4921 | |||
4920 |
|
4922 | |||
@@ -6130,7 +6132,6 b' def resolve(ui, repo, *pats, **opts):' | |||||
6130 | ret = 0 |
|
6132 | ret = 0 | |
6131 | didwork = False |
|
6133 | didwork = False | |
6132 |
|
6134 | |||
6133 | tocomplete = [] |
|
|||
6134 | hasconflictmarkers = [] |
|
6135 | hasconflictmarkers = [] | |
6135 | if mark: |
|
6136 | if mark: | |
6136 | markcheck = ui.config(b'commands', b'resolve.mark-check') |
|
6137 | markcheck = ui.config(b'commands', b'resolve.mark-check') | |
@@ -6183,24 +6184,20 b' def resolve(ui, repo, *pats, **opts):' | |||||
6183 | # preresolve file |
|
6184 | # preresolve file | |
6184 | overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')} |
|
6185 | overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')} | |
6185 | with ui.configoverride(overrides, b'resolve'): |
|
6186 | with ui.configoverride(overrides, b'resolve'): | |
6186 |
|
|
6187 | r = ms.resolve(f, wctx) | |
6187 |
if |
|
6188 | if r: | |
6188 | tocomplete.append(f) |
|
|||
6189 | elif r: |
|
|||
6190 | ret = 1 |
|
6189 | ret = 1 | |
6191 | finally: |
|
6190 | finally: | |
6192 | ms.commit() |
|
6191 | ms.commit() | |
6193 |
|
6192 | |||
6194 |
# replace filemerge's .orig file with our resolve file |
|
6193 | # replace filemerge's .orig file with our resolve file | |
6195 | # for merges that are complete |
|
6194 | try: | |
6196 |
|
|
6195 | util.rename( | |
6197 | try: |
|
6196 | a + b".resolve", scmutil.backuppath(ui, repo, f) | |
6198 |
|
|
6197 | ) | |
6199 | a + b".resolve", scmutil.backuppath(ui, repo, f) |
|
6198 | except OSError as inst: | |
6200 |
|
|
6199 | if inst.errno != errno.ENOENT: | |
6201 | except OSError as inst: |
|
6200 | raise | |
6202 | if inst.errno != errno.ENOENT: |
|
|||
6203 | raise |
|
|||
6204 |
|
6201 | |||
6205 | if hasconflictmarkers: |
|
6202 | if hasconflictmarkers: | |
6206 | ui.warn( |
|
6203 | ui.warn( | |
@@ -6218,25 +6215,6 b' def resolve(ui, repo, *pats, **opts):' | |||||
6218 | hint=_(b'use --all to mark anyway'), |
|
6215 | hint=_(b'use --all to mark anyway'), | |
6219 | ) |
|
6216 | ) | |
6220 |
|
6217 | |||
6221 | for f in tocomplete: |
|
|||
6222 | try: |
|
|||
6223 | # resolve file |
|
|||
6224 | overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')} |
|
|||
6225 | with ui.configoverride(overrides, b'resolve'): |
|
|||
6226 | r = ms.resolve(f, wctx) |
|
|||
6227 | if r: |
|
|||
6228 | ret = 1 |
|
|||
6229 | finally: |
|
|||
6230 | ms.commit() |
|
|||
6231 |
|
||||
6232 | # replace filemerge's .orig file with our resolve file |
|
|||
6233 | a = repo.wjoin(f) |
|
|||
6234 | try: |
|
|||
6235 | util.rename(a + b".resolve", scmutil.backuppath(ui, repo, f)) |
|
|||
6236 | except OSError as inst: |
|
|||
6237 | if inst.errno != errno.ENOENT: |
|
|||
6238 | raise |
|
|||
6239 |
|
||||
6240 | ms.commit() |
|
6218 | ms.commit() | |
6241 | branchmerge = repo.dirstate.p2() != repo.nullid |
|
6219 | branchmerge = repo.dirstate.p2() != repo.nullid | |
6242 | # resolve is not doing a parent change here, however, `record updates` |
|
6220 | # resolve is not doing a parent change here, however, `record updates` | |
@@ -6897,9 +6875,9 b' def status(ui, repo, *pats, **opts):' | |||||
6897 |
|
6875 | |||
6898 | cmdutil.check_at_most_one_arg(opts, 'rev', 'change') |
|
6876 | cmdutil.check_at_most_one_arg(opts, 'rev', 'change') | |
6899 | opts = pycompat.byteskwargs(opts) |
|
6877 | opts = pycompat.byteskwargs(opts) | |
6900 | revs = opts.get(b'rev') |
|
6878 | revs = opts.get(b'rev', []) | |
6901 | change = opts.get(b'change') |
|
6879 | change = opts.get(b'change', b'') | |
6902 | terse = opts.get(b'terse') |
|
6880 | terse = opts.get(b'terse', _NOTTERSE) | |
6903 | if terse is _NOTTERSE: |
|
6881 | if terse is _NOTTERSE: | |
6904 | if revs: |
|
6882 | if revs: | |
6905 | terse = b'' |
|
6883 | terse = b'' | |
@@ -7832,9 +7810,9 b' def update(ui, repo, node=None, **opts):' | |||||
7832 | raise error.InputError(_(b"you can't specify a revision and a date")) |
|
7810 | raise error.InputError(_(b"you can't specify a revision and a date")) | |
7833 |
|
7811 | |||
7834 | updatecheck = None |
|
7812 | updatecheck = None | |
7835 | if check: |
|
7813 | if check or merge is not None and not merge: | |
7836 | updatecheck = b'abort' |
|
7814 | updatecheck = b'abort' | |
7837 | elif merge: |
|
7815 | elif merge or check is not None and not check: | |
7838 | updatecheck = b'none' |
|
7816 | updatecheck = b'none' | |
7839 |
|
7817 | |||
7840 | with repo.wlock(): |
|
7818 | with repo.wlock(): |
@@ -134,7 +134,13 b' def _prepare_files(tr, ctx, error=False,' | |||||
134 | for s in salvaged: |
|
134 | for s in salvaged: | |
135 | files.mark_salvaged(s) |
|
135 | files.mark_salvaged(s) | |
136 |
|
136 | |||
137 | if ctx.manifestnode(): |
|
137 | narrow_files = {} | |
|
138 | if not ctx.repo().narrowmatch().always(): | |||
|
139 | for f, e in ms.allextras().items(): | |||
|
140 | action = e.get(b'outside-narrow-merge-action') | |||
|
141 | if action is not None: | |||
|
142 | narrow_files[f] = action | |||
|
143 | if ctx.manifestnode() and not narrow_files: | |||
138 | # reuse an existing manifest revision |
|
144 | # reuse an existing manifest revision | |
139 | repo.ui.debug(b'reusing known manifest\n') |
|
145 | repo.ui.debug(b'reusing known manifest\n') | |
140 | mn = ctx.manifestnode() |
|
146 | mn = ctx.manifestnode() | |
@@ -142,11 +148,11 b' def _prepare_files(tr, ctx, error=False,' | |||||
142 | if writechangesetcopy: |
|
148 | if writechangesetcopy: | |
143 | files.update_added(ctx.filesadded()) |
|
149 | files.update_added(ctx.filesadded()) | |
144 | files.update_removed(ctx.filesremoved()) |
|
150 | files.update_removed(ctx.filesremoved()) | |
145 | elif not ctx.files(): |
|
151 | elif not ctx.files() and not narrow_files: | |
146 | repo.ui.debug(b'reusing manifest from p1 (no file change)\n') |
|
152 | repo.ui.debug(b'reusing manifest from p1 (no file change)\n') | |
147 | mn = p1.manifestnode() |
|
153 | mn = p1.manifestnode() | |
148 | else: |
|
154 | else: | |
149 | mn = _process_files(tr, ctx, ms, files, error=error) |
|
155 | mn = _process_files(tr, ctx, ms, files, narrow_files, error=error) | |
150 |
|
156 | |||
151 | if origctx and origctx.manifestnode() == mn: |
|
157 | if origctx and origctx.manifestnode() == mn: | |
152 | origfiles = origctx.files() |
|
158 | origfiles = origctx.files() | |
@@ -177,7 +183,7 b' def _get_salvaged(repo, ms, ctx):' | |||||
177 | return salvaged |
|
183 | return salvaged | |
178 |
|
184 | |||
179 |
|
185 | |||
180 | def _process_files(tr, ctx, ms, files, error=False): |
|
186 | def _process_files(tr, ctx, ms, files, narrow_files=None, error=False): | |
181 | repo = ctx.repo() |
|
187 | repo = ctx.repo() | |
182 | p1 = ctx.p1() |
|
188 | p1 = ctx.p1() | |
183 | p2 = ctx.p2() |
|
189 | p2 = ctx.p2() | |
@@ -198,8 +204,33 b' def _process_files(tr, ctx, ms, files, e' | |||||
198 | linkrev = len(repo) |
|
204 | linkrev = len(repo) | |
199 | repo.ui.note(_(b"committing files:\n")) |
|
205 | repo.ui.note(_(b"committing files:\n")) | |
200 | uipathfn = scmutil.getuipathfn(repo) |
|
206 | uipathfn = scmutil.getuipathfn(repo) | |
201 |
|
|
207 | all_files = ctx.modified() + ctx.added() | |
|
208 | all_files.extend(narrow_files.keys()) | |||
|
209 | all_files.sort() | |||
|
210 | for f in all_files: | |||
202 | repo.ui.note(uipathfn(f) + b"\n") |
|
211 | repo.ui.note(uipathfn(f) + b"\n") | |
|
212 | if f in narrow_files: | |||
|
213 | narrow_action = narrow_files.get(f) | |||
|
214 | if narrow_action == mergestate.CHANGE_REMOVED: | |||
|
215 | files.mark_removed(f) | |||
|
216 | removed.append(f) | |||
|
217 | elif narrow_action == mergestate.CHANGE_ADDED: | |||
|
218 | files.mark_added(f) | |||
|
219 | added.append(f) | |||
|
220 | m[f] = m2[f] | |||
|
221 | flags = m2ctx.find(f)[1] or b'' | |||
|
222 | m.setflag(f, flags) | |||
|
223 | elif narrow_action == mergestate.CHANGE_MODIFIED: | |||
|
224 | files.mark_touched(f) | |||
|
225 | added.append(f) | |||
|
226 | m[f] = m2[f] | |||
|
227 | flags = m2ctx.find(f)[1] or b'' | |||
|
228 | m.setflag(f, flags) | |||
|
229 | else: | |||
|
230 | msg = _(b"corrupted mergestate, unknown narrow action: %b") | |||
|
231 | hint = _(b"restart the merge") | |||
|
232 | raise error.Abort(msg, hint=hint) | |||
|
233 | continue | |||
203 | try: |
|
234 | try: | |
204 | fctx = ctx[f] |
|
235 | fctx = ctx[f] | |
205 | if fctx is None: |
|
236 | if fctx is None: | |
@@ -239,7 +270,17 b' def _process_files(tr, ctx, ms, files, e' | |||||
239 | if not rf(f): |
|
270 | if not rf(f): | |
240 | files.mark_removed(f) |
|
271 | files.mark_removed(f) | |
241 |
|
272 | |||
242 | mn = _commit_manifest(tr, linkrev, ctx, mctx, m, files.touched, added, drop) |
|
273 | mn = _commit_manifest( | |
|
274 | tr, | |||
|
275 | linkrev, | |||
|
276 | ctx, | |||
|
277 | mctx, | |||
|
278 | m, | |||
|
279 | files.touched, | |||
|
280 | added, | |||
|
281 | drop, | |||
|
282 | bool(narrow_files), | |||
|
283 | ) | |||
243 |
|
284 | |||
244 | return mn |
|
285 | return mn | |
245 |
|
286 | |||
@@ -409,7 +450,17 b' def _filecommit(' | |||||
409 | return fnode, touched |
|
450 | return fnode, touched | |
410 |
|
451 | |||
411 |
|
452 | |||
412 | def _commit_manifest(tr, linkrev, ctx, mctx, manifest, files, added, drop): |
|
453 | def _commit_manifest( | |
|
454 | tr, | |||
|
455 | linkrev, | |||
|
456 | ctx, | |||
|
457 | mctx, | |||
|
458 | manifest, | |||
|
459 | files, | |||
|
460 | added, | |||
|
461 | drop, | |||
|
462 | has_some_narrow_action=False, | |||
|
463 | ): | |||
413 | """make a new manifest entry (or reuse a new one) |
|
464 | """make a new manifest entry (or reuse a new one) | |
414 |
|
465 | |||
415 | given an initialised manifest context and precomputed list of |
|
466 | given an initialised manifest context and precomputed list of | |
@@ -451,6 +502,10 b' def _commit_manifest(tr, linkrev, ctx, m' | |||||
451 | # at this point is merges, and we already error out in the |
|
502 | # at this point is merges, and we already error out in the | |
452 | # case where the merge has files outside of the narrowspec, |
|
503 | # case where the merge has files outside of the narrowspec, | |
453 | # so this is safe. |
|
504 | # so this is safe. | |
|
505 | if has_some_narrow_action: | |||
|
506 | match = None | |||
|
507 | else: | |||
|
508 | match = repo.narrowmatch() | |||
454 | mn = mctx.write( |
|
509 | mn = mctx.write( | |
455 | tr, |
|
510 | tr, | |
456 | linkrev, |
|
511 | linkrev, | |
@@ -458,7 +513,7 b' def _commit_manifest(tr, linkrev, ctx, m' | |||||
458 | p2.manifestnode(), |
|
513 | p2.manifestnode(), | |
459 | added, |
|
514 | added, | |
460 | drop, |
|
515 | drop, | |
461 |
match= |
|
516 | match=match, | |
462 | ) |
|
517 | ) | |
463 | else: |
|
518 | else: | |
464 | repo.ui.debug( |
|
519 | repo.ui.debug( |
@@ -1042,11 +1042,6 b' coreconfigitem(' | |||||
1042 | ) |
|
1042 | ) | |
1043 | coreconfigitem( |
|
1043 | coreconfigitem( | |
1044 | b'experimental', |
|
1044 | b'experimental', | |
1045 | b'mergetempdirprefix', |
|
|||
1046 | default=None, |
|
|||
1047 | ) |
|
|||
1048 | coreconfigitem( |
|
|||
1049 | b'experimental', |
|
|||
1050 | b'mmapindexthreshold', |
|
1045 | b'mmapindexthreshold', | |
1051 | default=None, |
|
1046 | default=None, | |
1052 | ) |
|
1047 | ) | |
@@ -1102,16 +1097,6 b' coreconfigitem(' | |||||
1102 | ) |
|
1097 | ) | |
1103 | coreconfigitem( |
|
1098 | coreconfigitem( | |
1104 | b'experimental', |
|
1099 | b'experimental', | |
1105 | b'httppeer.advertise-v2', |
|
|||
1106 | default=False, |
|
|||
1107 | ) |
|
|||
1108 | coreconfigitem( |
|
|||
1109 | b'experimental', |
|
|||
1110 | b'httppeer.v2-encoder-order', |
|
|||
1111 | default=None, |
|
|||
1112 | ) |
|
|||
1113 | coreconfigitem( |
|
|||
1114 | b'experimental', |
|
|||
1115 | b'httppostargs', |
|
1100 | b'httppostargs', | |
1116 | default=False, |
|
1101 | default=False, | |
1117 | ) |
|
1102 | ) | |
@@ -1211,11 +1196,6 b' coreconfigitem(' | |||||
1211 | ) |
|
1196 | ) | |
1212 | coreconfigitem( |
|
1197 | coreconfigitem( | |
1213 | b'experimental', |
|
1198 | b'experimental', | |
1214 | b'sshserver.support-v2', |
|
|||
1215 | default=False, |
|
|||
1216 | ) |
|
|||
1217 | coreconfigitem( |
|
|||
1218 | b'experimental', |
|
|||
1219 | b'sparse-read', |
|
1199 | b'sparse-read', | |
1220 | default=False, |
|
1200 | default=False, | |
1221 | ) |
|
1201 | ) | |
@@ -1241,26 +1221,6 b' coreconfigitem(' | |||||
1241 | ) |
|
1221 | ) | |
1242 | coreconfigitem( |
|
1222 | coreconfigitem( | |
1243 | b'experimental', |
|
1223 | b'experimental', | |
1244 | b'sshpeer.advertise-v2', |
|
|||
1245 | default=False, |
|
|||
1246 | ) |
|
|||
1247 | coreconfigitem( |
|
|||
1248 | b'experimental', |
|
|||
1249 | b'web.apiserver', |
|
|||
1250 | default=False, |
|
|||
1251 | ) |
|
|||
1252 | coreconfigitem( |
|
|||
1253 | b'experimental', |
|
|||
1254 | b'web.api.http-v2', |
|
|||
1255 | default=False, |
|
|||
1256 | ) |
|
|||
1257 | coreconfigitem( |
|
|||
1258 | b'experimental', |
|
|||
1259 | b'web.api.debugreflect', |
|
|||
1260 | default=False, |
|
|||
1261 | ) |
|
|||
1262 | coreconfigitem( |
|
|||
1263 | b'experimental', |
|
|||
1264 | b'web.full-garbage-collection-rate', |
|
1224 | b'web.full-garbage-collection-rate', | |
1265 | default=1, # still forcing a full collection on each request |
|
1225 | default=1, # still forcing a full collection on each request | |
1266 | ) |
|
1226 | ) | |
@@ -1281,11 +1241,17 b' coreconfigitem(' | |||||
1281 | ) |
|
1241 | ) | |
1282 | coreconfigitem( |
|
1242 | coreconfigitem( | |
1283 | b'extensions', |
|
1243 | b'extensions', | |
1284 |
b' |
|
1244 | b'[^:]*', | |
1285 | default=None, |
|
1245 | default=None, | |
1286 | generic=True, |
|
1246 | generic=True, | |
1287 | ) |
|
1247 | ) | |
1288 | coreconfigitem( |
|
1248 | coreconfigitem( | |
|
1249 | b'extensions', | |||
|
1250 | b'[^:]*:required', | |||
|
1251 | default=False, | |||
|
1252 | generic=True, | |||
|
1253 | ) | |||
|
1254 | coreconfigitem( | |||
1289 | b'extdata', |
|
1255 | b'extdata', | |
1290 | b'.*', |
|
1256 | b'.*', | |
1291 | default=None, |
|
1257 | default=None, | |
@@ -1313,6 +1279,18 b' coreconfigitem(' | |||||
1313 | ) |
|
1279 | ) | |
1314 | coreconfigitem( |
|
1280 | coreconfigitem( | |
1315 | b'format', |
|
1281 | b'format', | |
|
1282 | b'use-dirstate-tracked-hint', | |||
|
1283 | default=False, | |||
|
1284 | experimental=True, | |||
|
1285 | ) | |||
|
1286 | coreconfigitem( | |||
|
1287 | b'format', | |||
|
1288 | b'use-dirstate-tracked-hint.version', | |||
|
1289 | default=1, | |||
|
1290 | experimental=True, | |||
|
1291 | ) | |||
|
1292 | coreconfigitem( | |||
|
1293 | b'format', | |||
1316 | b'dotencode', |
|
1294 | b'dotencode', | |
1317 | default=True, |
|
1295 | default=True, | |
1318 | ) |
|
1296 | ) | |
@@ -1352,10 +1330,10 b' coreconfigitem(' | |||||
1352 | ) |
|
1330 | ) | |
1353 | # Experimental TODOs: |
|
1331 | # Experimental TODOs: | |
1354 | # |
|
1332 | # | |
1355 | # * Same as for evlogv2 (but for the reduction of the number of files) |
|
1333 | # * Same as for revlogv2 (but for the reduction of the number of files) | |
|
1334 | # * Actually computing the rank of changesets | |||
1356 | # * Improvement to investigate |
|
1335 | # * Improvement to investigate | |
1357 | # - storing .hgtags fnode |
|
1336 | # - storing .hgtags fnode | |
1358 | # - storing `rank` of changesets |
|
|||
1359 | # - storing branch related identifier |
|
1337 | # - storing branch related identifier | |
1360 |
|
1338 | |||
1361 | coreconfigitem( |
|
1339 | coreconfigitem( | |
@@ -1405,7 +1383,7 b' coreconfigitem(' | |||||
1405 | coreconfigitem( |
|
1383 | coreconfigitem( | |
1406 | b'format', |
|
1384 | b'format', | |
1407 | b'use-share-safe', |
|
1385 | b'use-share-safe', | |
1408 |
default= |
|
1386 | default=True, | |
1409 | ) |
|
1387 | ) | |
1410 | coreconfigitem( |
|
1388 | coreconfigitem( | |
1411 | b'format', |
|
1389 | b'format', |
@@ -20,7 +20,6 b' from .node import (' | |||||
20 | ) |
|
20 | ) | |
21 | from .pycompat import ( |
|
21 | from .pycompat import ( | |
22 | getattr, |
|
22 | getattr, | |
23 | open, |
|
|||
24 | ) |
|
23 | ) | |
25 | from . import ( |
|
24 | from . import ( | |
26 | dagop, |
|
25 | dagop, | |
@@ -46,6 +45,9 b' from .utils import (' | |||||
46 | dateutil, |
|
45 | dateutil, | |
47 | stringutil, |
|
46 | stringutil, | |
48 | ) |
|
47 | ) | |
|
48 | from .dirstateutils import ( | |||
|
49 | timestamp, | |||
|
50 | ) | |||
49 |
|
51 | |||
50 | propertycache = util.propertycache |
|
52 | propertycache = util.propertycache | |
51 |
|
53 | |||
@@ -682,6 +684,14 b' class changectx(basectx):' | |||||
682 | """Return a list of byte bookmark names.""" |
|
684 | """Return a list of byte bookmark names.""" | |
683 | return self._repo.nodebookmarks(self._node) |
|
685 | return self._repo.nodebookmarks(self._node) | |
684 |
|
686 | |||
|
687 | def fast_rank(self): | |||
|
688 | repo = self._repo | |||
|
689 | if self._maybe_filtered: | |||
|
690 | cl = repo.changelog | |||
|
691 | else: | |||
|
692 | cl = repo.unfiltered().changelog | |||
|
693 | return cl.fast_rank(self._rev) | |||
|
694 | ||||
685 | def phase(self): |
|
695 | def phase(self): | |
686 | return self._repo._phasecache.phase(self._repo, self._rev) |
|
696 | return self._repo._phasecache.phase(self._repo, self._rev) | |
687 |
|
697 | |||
@@ -1793,13 +1803,14 b' class workingctx(committablectx):' | |||||
1793 | sane.append(f) |
|
1803 | sane.append(f) | |
1794 | return sane |
|
1804 | return sane | |
1795 |
|
1805 | |||
1796 | def _checklookup(self, files): |
|
1806 | def _checklookup(self, files, mtime_boundary): | |
1797 | # check for any possibly clean files |
|
1807 | # check for any possibly clean files | |
1798 | if not files: |
|
1808 | if not files: | |
1799 | return [], [], [] |
|
1809 | return [], [], [], [] | |
1800 |
|
1810 | |||
1801 | modified = [] |
|
1811 | modified = [] | |
1802 | deleted = [] |
|
1812 | deleted = [] | |
|
1813 | clean = [] | |||
1803 | fixup = [] |
|
1814 | fixup = [] | |
1804 | pctx = self._parents[0] |
|
1815 | pctx = self._parents[0] | |
1805 | # do a full compare of any files that might have changed |
|
1816 | # do a full compare of any files that might have changed | |
@@ -1813,8 +1824,18 b' class workingctx(committablectx):' | |||||
1813 | or pctx[f].cmp(self[f]) |
|
1824 | or pctx[f].cmp(self[f]) | |
1814 | ): |
|
1825 | ): | |
1815 | modified.append(f) |
|
1826 | modified.append(f) | |
|
1827 | elif mtime_boundary is None: | |||
|
1828 | clean.append(f) | |||
1816 | else: |
|
1829 | else: | |
1817 |
|
|
1830 | s = self[f].lstat() | |
|
1831 | mode = s.st_mode | |||
|
1832 | size = s.st_size | |||
|
1833 | file_mtime = timestamp.reliable_mtime_of(s, mtime_boundary) | |||
|
1834 | if file_mtime is not None: | |||
|
1835 | cache_info = (mode, size, file_mtime) | |||
|
1836 | fixup.append((f, cache_info)) | |||
|
1837 | else: | |||
|
1838 | clean.append(f) | |||
1818 | except (IOError, OSError): |
|
1839 | except (IOError, OSError): | |
1819 | # A file become inaccessible in between? Mark it as deleted, |
|
1840 | # A file become inaccessible in between? Mark it as deleted, | |
1820 | # matching dirstate behavior (issue5584). |
|
1841 | # matching dirstate behavior (issue5584). | |
@@ -1824,7 +1845,7 b' class workingctx(committablectx):' | |||||
1824 | # it's in the dirstate. |
|
1845 | # it's in the dirstate. | |
1825 | deleted.append(f) |
|
1846 | deleted.append(f) | |
1826 |
|
1847 | |||
1827 | return modified, deleted, fixup |
|
1848 | return modified, deleted, clean, fixup | |
1828 |
|
1849 | |||
1829 | def _poststatusfixup(self, status, fixup): |
|
1850 | def _poststatusfixup(self, status, fixup): | |
1830 | """update dirstate for files that are actually clean""" |
|
1851 | """update dirstate for files that are actually clean""" | |
@@ -1842,13 +1863,13 b' class workingctx(committablectx):' | |||||
1842 | if dirstate.identity() == oldid: |
|
1863 | if dirstate.identity() == oldid: | |
1843 | if fixup: |
|
1864 | if fixup: | |
1844 | if dirstate.pendingparentchange(): |
|
1865 | if dirstate.pendingparentchange(): | |
1845 | normal = lambda f: dirstate.update_file( |
|
1866 | normal = lambda f, pfd: dirstate.update_file( | |
1846 | f, p1_tracked=True, wc_tracked=True |
|
1867 | f, p1_tracked=True, wc_tracked=True | |
1847 | ) |
|
1868 | ) | |
1848 | else: |
|
1869 | else: | |
1849 | normal = dirstate.set_clean |
|
1870 | normal = dirstate.set_clean | |
1850 | for f in fixup: |
|
1871 | for f, pdf in fixup: | |
1851 | normal(f) |
|
1872 | normal(f, pdf) | |
1852 | # write changes out explicitly, because nesting |
|
1873 | # write changes out explicitly, because nesting | |
1853 | # wlock at runtime may prevent 'wlock.release()' |
|
1874 | # wlock at runtime may prevent 'wlock.release()' | |
1854 | # after this block from doing so for subsequent |
|
1875 | # after this block from doing so for subsequent | |
@@ -1878,19 +1899,23 b' class workingctx(committablectx):' | |||||
1878 | subrepos = [] |
|
1899 | subrepos = [] | |
1879 | if b'.hgsub' in self: |
|
1900 | if b'.hgsub' in self: | |
1880 | subrepos = sorted(self.substate) |
|
1901 | subrepos = sorted(self.substate) | |
1881 | cmp, s = self._repo.dirstate.status( |
|
1902 | cmp, s, mtime_boundary = self._repo.dirstate.status( | |
1882 | match, subrepos, ignored=ignored, clean=clean, unknown=unknown |
|
1903 | match, subrepos, ignored=ignored, clean=clean, unknown=unknown | |
1883 | ) |
|
1904 | ) | |
1884 |
|
1905 | |||
1885 | # check for any possibly clean files |
|
1906 | # check for any possibly clean files | |
1886 | fixup = [] |
|
1907 | fixup = [] | |
1887 | if cmp: |
|
1908 | if cmp: | |
1888 |
modified2, deleted2, fixup = self._checklookup( |
|
1909 | modified2, deleted2, clean_set, fixup = self._checklookup( | |
|
1910 | cmp, mtime_boundary | |||
|
1911 | ) | |||
1889 | s.modified.extend(modified2) |
|
1912 | s.modified.extend(modified2) | |
1890 | s.deleted.extend(deleted2) |
|
1913 | s.deleted.extend(deleted2) | |
1891 |
|
1914 | |||
|
1915 | if clean_set and clean: | |||
|
1916 | s.clean.extend(clean_set) | |||
1892 | if fixup and clean: |
|
1917 | if fixup and clean: | |
1893 | s.clean.extend(fixup) |
|
1918 | s.clean.extend((f for f, _ in fixup)) | |
1894 |
|
1919 | |||
1895 | self._poststatusfixup(s, fixup) |
|
1920 | self._poststatusfixup(s, fixup) | |
1896 |
|
1921 | |||
@@ -3111,13 +3136,11 b' class arbitraryfilectx(object):' | |||||
3111 | return util.readfile(self._path) |
|
3136 | return util.readfile(self._path) | |
3112 |
|
3137 | |||
3113 | def decodeddata(self): |
|
3138 | def decodeddata(self): | |
3114 | with open(self._path, b"rb") as f: |
|
3139 | return util.readfile(self._path) | |
3115 | return f.read() |
|
|||
3116 |
|
3140 | |||
3117 | def remove(self): |
|
3141 | def remove(self): | |
3118 | util.unlink(self._path) |
|
3142 | util.unlink(self._path) | |
3119 |
|
3143 | |||
3120 | def write(self, data, flags, **kwargs): |
|
3144 | def write(self, data, flags, **kwargs): | |
3121 | assert not flags |
|
3145 | assert not flags | |
3122 |
|
|
3146 | util.writefile(self._path, data) | |
3123 | f.write(data) |
|
@@ -246,7 +246,6 b' def _changesetforwardcopies(a, b, match)' | |||||
246 | return {} |
|
246 | return {} | |
247 |
|
247 | |||
248 | repo = a.repo().unfiltered() |
|
248 | repo = a.repo().unfiltered() | |
249 | children = {} |
|
|||
250 |
|
249 | |||
251 | cl = repo.changelog |
|
250 | cl = repo.changelog | |
252 | isancestor = cl.isancestorrev |
|
251 | isancestor = cl.isancestorrev | |
@@ -290,7 +289,7 b' def _changesetforwardcopies(a, b, match)' | |||||
290 | # no common revision to track copies from |
|
289 | # no common revision to track copies from | |
291 | return {} |
|
290 | return {} | |
292 | if has_graph_roots: |
|
291 | if has_graph_roots: | |
293 |
# this deal with the special case mention |
|
292 | # this deal with the special case mentioned in the [1] footnotes. We | |
294 | # must filter out revisions that leads to non-common graphroots. |
|
293 | # must filter out revisions that leads to non-common graphroots. | |
295 | roots = list(roots) |
|
294 | roots = list(roots) | |
296 | m = min(roots) |
|
295 | m = min(roots) | |
@@ -301,11 +300,11 b' def _changesetforwardcopies(a, b, match)' | |||||
301 |
|
300 | |||
302 | if repo.filecopiesmode == b'changeset-sidedata': |
|
301 | if repo.filecopiesmode == b'changeset-sidedata': | |
303 | # When using side-data, we will process the edges "from" the children. |
|
302 | # When using side-data, we will process the edges "from" the children. | |
304 | # We iterate over the childre, gathering previous collected data for |
|
303 | # We iterate over the children, gathering previous collected data for | |
305 | # the parents. Do know when the parents data is no longer necessary, we |
|
304 | # the parents. Do know when the parents data is no longer necessary, we | |
306 | # keep a counter of how many children each revision has. |
|
305 | # keep a counter of how many children each revision has. | |
307 | # |
|
306 | # | |
308 |
# An inter |
|
307 | # An interesting property of `children_count` is that it only contains | |
309 | # revision that will be relevant for a edge of the graph. So if a |
|
308 | # revision that will be relevant for a edge of the graph. So if a | |
310 | # children has parent not in `children_count`, that edges should not be |
|
309 | # children has parent not in `children_count`, that edges should not be | |
311 | # processed. |
|
310 | # processed. | |
@@ -449,7 +448,11 b' def _combine_changeset_copies(' | |||||
449 |
|
448 | |||
450 | # filter out internal details and return a {dest: source mapping} |
|
449 | # filter out internal details and return a {dest: source mapping} | |
451 | final_copies = {} |
|
450 | final_copies = {} | |
452 | for dest, (tt, source) in all_copies[targetrev].items(): |
|
451 | ||
|
452 | targetrev_items = all_copies[targetrev] | |||
|
453 | assert targetrev_items is not None # help pytype | |||
|
454 | ||||
|
455 | for dest, (tt, source) in targetrev_items.items(): | |||
453 | if source is not None: |
|
456 | if source is not None: | |
454 | final_copies[dest] = source |
|
457 | final_copies[dest] = source | |
455 | if not alwaysmatch: |
|
458 | if not alwaysmatch: |
@@ -9,7 +9,6 b' from __future__ import absolute_import' | |||||
9 |
|
9 | |||
10 | import heapq |
|
10 | import heapq | |
11 |
|
11 | |||
12 | from .node import nullrev |
|
|||
13 | from .thirdparty import attr |
|
12 | from .thirdparty import attr | |
14 | from .node import nullrev |
|
13 | from .node import nullrev | |
15 | from . import ( |
|
14 | from . import ( |
@@ -91,7 +91,6 b' from . import (' | |||||
91 | vfs as vfsmod, |
|
91 | vfs as vfsmod, | |
92 | wireprotoframing, |
|
92 | wireprotoframing, | |
93 | wireprotoserver, |
|
93 | wireprotoserver, | |
94 | wireprotov2peer, |
|
|||
95 | ) |
|
94 | ) | |
96 | from .interfaces import repository |
|
95 | from .interfaces import repository | |
97 | from .utils import ( |
|
96 | from .utils import ( | |
@@ -179,6 +178,12 b' def debugapplystreamclonebundle(ui, repo' | |||||
179 | _(b'add single file all revs overwrite'), |
|
178 | _(b'add single file all revs overwrite'), | |
180 | ), |
|
179 | ), | |
181 | (b'n', b'new-file', None, _(b'add new file at each rev')), |
|
180 | (b'n', b'new-file', None, _(b'add new file at each rev')), | |
|
181 | ( | |||
|
182 | b'', | |||
|
183 | b'from-existing', | |||
|
184 | None, | |||
|
185 | _(b'continue from a non-empty repository'), | |||
|
186 | ), | |||
182 | ], |
|
187 | ], | |
183 | _(b'[OPTION]... [TEXT]'), |
|
188 | _(b'[OPTION]... [TEXT]'), | |
184 | ) |
|
189 | ) | |
@@ -189,6 +194,7 b' def debugbuilddag(' | |||||
189 | mergeable_file=False, |
|
194 | mergeable_file=False, | |
190 | overwritten_file=False, |
|
195 | overwritten_file=False, | |
191 | new_file=False, |
|
196 | new_file=False, | |
|
197 | from_existing=False, | |||
192 | ): |
|
198 | ): | |
193 | """builds a repo with a given DAG from scratch in the current empty repo |
|
199 | """builds a repo with a given DAG from scratch in the current empty repo | |
194 |
|
200 | |||
@@ -227,7 +233,7 b' def debugbuilddag(' | |||||
227 | text = ui.fin.read() |
|
233 | text = ui.fin.read() | |
228 |
|
234 | |||
229 | cl = repo.changelog |
|
235 | cl = repo.changelog | |
230 | if len(cl) > 0: |
|
236 | if len(cl) > 0 and not from_existing: | |
231 | raise error.Abort(_(b'repository is not empty')) |
|
237 | raise error.Abort(_(b'repository is not empty')) | |
232 |
|
238 | |||
233 | # determine number of revs in DAG |
|
239 | # determine number of revs in DAG | |
@@ -273,7 +279,10 b' def debugbuilddag(' | |||||
273 | x[fn].data() for x in (pa, p1, p2) |
|
279 | x[fn].data() for x in (pa, p1, p2) | |
274 | ] |
|
280 | ] | |
275 | m3 = simplemerge.Merge3Text(base, local, other) |
|
281 | m3 = simplemerge.Merge3Text(base, local, other) | |
276 |
ml = [ |
|
282 | ml = [ | |
|
283 | l.strip() | |||
|
284 | for l in simplemerge.render_minimized(m3)[0] | |||
|
285 | ] | |||
277 | ml.append(b"") |
|
286 | ml.append(b"") | |
278 | elif at > 0: |
|
287 | elif at > 0: | |
279 | ml = p1[fn].data().split(b"\n") |
|
288 | ml = p1[fn].data().split(b"\n") | |
@@ -4352,8 +4361,8 b' def debugwireproto(ui, repo, path=None, ' | |||||
4352 |
|
4361 | |||
4353 | ``--peer`` can be used to bypass the handshake protocol and construct a |
|
4362 | ``--peer`` can be used to bypass the handshake protocol and construct a | |
4354 | peer instance using the specified class type. Valid values are ``raw``, |
|
4363 | peer instance using the specified class type. Valid values are ``raw``, | |
4355 |
`` |
|
4364 | ``ssh1``. ``raw`` instances only allow sending raw data payloads and | |
4356 |
|
|
4365 | don't support higher-level command actions. | |
4357 |
|
4366 | |||
4358 | ``--noreadstderr`` can be used to disable automatic reading from stderr |
|
4367 | ``--noreadstderr`` can be used to disable automatic reading from stderr | |
4359 | of the peer (for SSH connections only). Disabling automatic reading of |
|
4368 | of the peer (for SSH connections only). Disabling automatic reading of | |
@@ -4528,13 +4537,11 b' def debugwireproto(ui, repo, path=None, ' | |||||
4528 |
|
4537 | |||
4529 | if opts[b'peer'] and opts[b'peer'] not in ( |
|
4538 | if opts[b'peer'] and opts[b'peer'] not in ( | |
4530 | b'raw', |
|
4539 | b'raw', | |
4531 | b'http2', |
|
|||
4532 | b'ssh1', |
|
4540 | b'ssh1', | |
4533 | b'ssh2', |
|
|||
4534 | ): |
|
4541 | ): | |
4535 | raise error.Abort( |
|
4542 | raise error.Abort( | |
4536 | _(b'invalid value for --peer'), |
|
4543 | _(b'invalid value for --peer'), | |
4537 |
hint=_(b'valid values are "raw" |
|
4544 | hint=_(b'valid values are "raw" and "ssh1"'), | |
4538 | ) |
|
4545 | ) | |
4539 |
|
4546 | |||
4540 | if path and opts[b'localssh']: |
|
4547 | if path and opts[b'localssh']: | |
@@ -4602,18 +4609,6 b' def debugwireproto(ui, repo, path=None, ' | |||||
4602 | None, |
|
4609 | None, | |
4603 | autoreadstderr=autoreadstderr, |
|
4610 | autoreadstderr=autoreadstderr, | |
4604 | ) |
|
4611 | ) | |
4605 | elif opts[b'peer'] == b'ssh2': |
|
|||
4606 | ui.write(_(b'creating ssh peer for wire protocol version 2\n')) |
|
|||
4607 | peer = sshpeer.sshv2peer( |
|
|||
4608 | ui, |
|
|||
4609 | url, |
|
|||
4610 | proc, |
|
|||
4611 | stdin, |
|
|||
4612 | stdout, |
|
|||
4613 | stderr, |
|
|||
4614 | None, |
|
|||
4615 | autoreadstderr=autoreadstderr, |
|
|||
4616 | ) |
|
|||
4617 | elif opts[b'peer'] == b'raw': |
|
4612 | elif opts[b'peer'] == b'raw': | |
4618 | ui.write(_(b'using raw connection to peer\n')) |
|
4613 | ui.write(_(b'using raw connection to peer\n')) | |
4619 | peer = None |
|
4614 | peer = None | |
@@ -4666,34 +4661,7 b' def debugwireproto(ui, repo, path=None, ' | |||||
4666 |
|
4661 | |||
4667 | opener = urlmod.opener(ui, authinfo, **openerargs) |
|
4662 | opener = urlmod.opener(ui, authinfo, **openerargs) | |
4668 |
|
4663 | |||
4669 |
if opts[b'peer'] == b' |
|
4664 | if opts[b'peer'] == b'raw': | |
4670 | ui.write(_(b'creating http peer for wire protocol version 2\n')) |
|
|||
4671 | # We go through makepeer() because we need an API descriptor for |
|
|||
4672 | # the peer instance to be useful. |
|
|||
4673 | maybe_silent = ( |
|
|||
4674 | ui.silent() |
|
|||
4675 | if opts[b'nologhandshake'] |
|
|||
4676 | else util.nullcontextmanager() |
|
|||
4677 | ) |
|
|||
4678 | with maybe_silent, ui.configoverride( |
|
|||
4679 | {(b'experimental', b'httppeer.advertise-v2'): True} |
|
|||
4680 | ): |
|
|||
4681 | peer = httppeer.makepeer(ui, path, opener=opener) |
|
|||
4682 |
|
||||
4683 | if not isinstance(peer, httppeer.httpv2peer): |
|
|||
4684 | raise error.Abort( |
|
|||
4685 | _( |
|
|||
4686 | b'could not instantiate HTTP peer for ' |
|
|||
4687 | b'wire protocol version 2' |
|
|||
4688 | ), |
|
|||
4689 | hint=_( |
|
|||
4690 | b'the server may not have the feature ' |
|
|||
4691 | b'enabled or is not allowing this ' |
|
|||
4692 | b'client version' |
|
|||
4693 | ), |
|
|||
4694 | ) |
|
|||
4695 |
|
||||
4696 | elif opts[b'peer'] == b'raw': |
|
|||
4697 | ui.write(_(b'using raw connection to peer\n')) |
|
4665 | ui.write(_(b'using raw connection to peer\n')) | |
4698 | peer = None |
|
4666 | peer = None | |
4699 | elif opts[b'peer']: |
|
4667 | elif opts[b'peer']: | |
@@ -4774,17 +4742,10 b' def debugwireproto(ui, repo, path=None, ' | |||||
4774 | with peer.commandexecutor() as e: |
|
4742 | with peer.commandexecutor() as e: | |
4775 | res = e.callcommand(command, args).result() |
|
4743 | res = e.callcommand(command, args).result() | |
4776 |
|
4744 | |||
4777 | if isinstance(res, wireprotov2peer.commandresponse): |
|
4745 | ui.status( | |
4778 |
|
|
4746 | _(b'response: %s\n') | |
4779 | ui.status( |
|
4747 | % stringutil.pprint(res, bprefix=True, indent=2) | |
4780 | _(b'response: %s\n') |
|
4748 | ) | |
4781 | % stringutil.pprint(val, bprefix=True, indent=2) |
|
|||
4782 | ) |
|
|||
4783 | else: |
|
|||
4784 | ui.status( |
|
|||
4785 | _(b'response: %s\n') |
|
|||
4786 | % stringutil.pprint(res, bprefix=True, indent=2) |
|
|||
4787 | ) |
|
|||
4788 |
|
4749 | |||
4789 | elif action == b'batchbegin': |
|
4750 | elif action == b'batchbegin': | |
4790 | if batchedcommands is not None: |
|
4751 | if batchedcommands is not None: |
@@ -65,9 +65,8 b' def _destupdateobs(repo, clean):' | |||||
65 | # replaced changesets: same as divergent except we know there |
|
65 | # replaced changesets: same as divergent except we know there | |
66 | # is no conflict |
|
66 | # is no conflict | |
67 | # |
|
67 | # | |
68 |
# pruned changeset: |
|
68 | # pruned changeset: update to the closest non-obsolete ancestor, | |
69 | # consider updating to the first non-obsolete parent, |
|
69 | # similar to what 'hg prune' currently does | |
70 | # similar to what is current done for 'hg prune' |
|
|||
71 |
|
70 | |||
72 | if successors: |
|
71 | if successors: | |
73 | # flatten the list here handles both divergent (len > 1) |
|
72 | # flatten the list here handles both divergent (len > 1) | |
@@ -77,8 +76,15 b' def _destupdateobs(repo, clean):' | |||||
77 | # get the max revision for the given successors set, |
|
76 | # get the max revision for the given successors set, | |
78 | # i.e. the 'tip' of a set |
|
77 | # i.e. the 'tip' of a set | |
79 | node = repo.revs(b'max(%ln)', successors).first() |
|
78 | node = repo.revs(b'max(%ln)', successors).first() | |
80 | if bookmarks.isactivewdirparent(repo): |
|
79 | else: | |
81 | movemark = repo[b'.'].node() |
|
80 | p1 = p1.p1() | |
|
81 | while p1.obsolete(): | |||
|
82 | p1 = p1.p1() | |||
|
83 | node = p1.node() | |||
|
84 | ||||
|
85 | if node is not None and bookmarks.isactivewdirparent(repo): | |||
|
86 | movemark = repo[b'.'].node() | |||
|
87 | ||||
82 | return node, movemark, None |
|
88 | return node, movemark, None | |
83 |
|
89 | |||
84 |
|
90 |
@@ -12,6 +12,7 b' import contextlib' | |||||
12 | import errno |
|
12 | import errno | |
13 | import os |
|
13 | import os | |
14 | import stat |
|
14 | import stat | |
|
15 | import uuid | |||
15 |
|
16 | |||
16 | from .i18n import _ |
|
17 | from .i18n import _ | |
17 | from .pycompat import delattr |
|
18 | from .pycompat import delattr | |
@@ -23,6 +24,7 b' from . import (' | |||||
23 | encoding, |
|
24 | encoding, | |
24 | error, |
|
25 | error, | |
25 | match as matchmod, |
|
26 | match as matchmod, | |
|
27 | node, | |||
26 | pathutil, |
|
28 | pathutil, | |
27 | policy, |
|
29 | policy, | |
28 | pycompat, |
|
30 | pycompat, | |
@@ -66,16 +68,6 b' class rootcache(filecache):' | |||||
66 | return obj._join(fname) |
|
68 | return obj._join(fname) | |
67 |
|
69 | |||
68 |
|
70 | |||
69 | def _getfsnow(vfs): |
|
|||
70 | '''Get "now" timestamp on filesystem''' |
|
|||
71 | tmpfd, tmpname = vfs.mkstemp() |
|
|||
72 | try: |
|
|||
73 | return timestamp.mtime_of(os.fstat(tmpfd)) |
|
|||
74 | finally: |
|
|||
75 | os.close(tmpfd) |
|
|||
76 | vfs.unlink(tmpname) |
|
|||
77 |
|
||||
78 |
|
||||
79 | def requires_parents_change(func): |
|
71 | def requires_parents_change(func): | |
80 | def wrap(self, *args, **kwargs): |
|
72 | def wrap(self, *args, **kwargs): | |
81 | if not self.pendingparentchange(): |
|
73 | if not self.pendingparentchange(): | |
@@ -109,6 +101,7 b' class dirstate(object):' | |||||
109 | sparsematchfn, |
|
101 | sparsematchfn, | |
110 | nodeconstants, |
|
102 | nodeconstants, | |
111 | use_dirstate_v2, |
|
103 | use_dirstate_v2, | |
|
104 | use_tracked_hint=False, | |||
112 | ): |
|
105 | ): | |
113 | """Create a new dirstate object. |
|
106 | """Create a new dirstate object. | |
114 |
|
107 | |||
@@ -117,6 +110,7 b' class dirstate(object):' | |||||
117 | the dirstate. |
|
110 | the dirstate. | |
118 | """ |
|
111 | """ | |
119 | self._use_dirstate_v2 = use_dirstate_v2 |
|
112 | self._use_dirstate_v2 = use_dirstate_v2 | |
|
113 | self._use_tracked_hint = use_tracked_hint | |||
120 | self._nodeconstants = nodeconstants |
|
114 | self._nodeconstants = nodeconstants | |
121 | self._opener = opener |
|
115 | self._opener = opener | |
122 | self._validate = validate |
|
116 | self._validate = validate | |
@@ -125,12 +119,15 b' class dirstate(object):' | |||||
125 | # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is |
|
119 | # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is | |
126 | # UNC path pointing to root share (issue4557) |
|
120 | # UNC path pointing to root share (issue4557) | |
127 | self._rootdir = pathutil.normasprefix(root) |
|
121 | self._rootdir = pathutil.normasprefix(root) | |
|
122 | # True is any internal state may be different | |||
128 | self._dirty = False |
|
123 | self._dirty = False | |
129 | self._lastnormaltime = timestamp.zero() |
|
124 | # True if the set of tracked file may be different | |
|
125 | self._dirty_tracked_set = False | |||
130 | self._ui = ui |
|
126 | self._ui = ui | |
131 | self._filecache = {} |
|
127 | self._filecache = {} | |
132 | self._parentwriters = 0 |
|
128 | self._parentwriters = 0 | |
133 | self._filename = b'dirstate' |
|
129 | self._filename = b'dirstate' | |
|
130 | self._filename_th = b'dirstate-tracked-hint' | |||
134 | self._pendingfilename = b'%s.pending' % self._filename |
|
131 | self._pendingfilename = b'%s.pending' % self._filename | |
135 | self._plchangecallbacks = {} |
|
132 | self._plchangecallbacks = {} | |
136 | self._origpl = None |
|
133 | self._origpl = None | |
@@ -332,27 +329,6 b' class dirstate(object):' | |||||
332 | return util.pconvert(path) |
|
329 | return util.pconvert(path) | |
333 | return path |
|
330 | return path | |
334 |
|
331 | |||
335 | def __getitem__(self, key): |
|
|||
336 | """Return the current state of key (a filename) in the dirstate. |
|
|||
337 |
|
||||
338 | States are: |
|
|||
339 | n normal |
|
|||
340 | m needs merging |
|
|||
341 | r marked for removal |
|
|||
342 | a marked for addition |
|
|||
343 | ? not tracked |
|
|||
344 |
|
||||
345 | XXX The "state" is a bit obscure to be in the "public" API. we should |
|
|||
346 | consider migrating all user of this to going through the dirstate entry |
|
|||
347 | instead. |
|
|||
348 | """ |
|
|||
349 | msg = b"don't use dirstate[file], use dirstate.get_entry(file)" |
|
|||
350 | util.nouideprecwarn(msg, b'6.1', stacklevel=2) |
|
|||
351 | entry = self._map.get(key) |
|
|||
352 | if entry is not None: |
|
|||
353 | return entry.state |
|
|||
354 | return b'?' |
|
|||
355 |
|
||||
356 | def get_entry(self, path): |
|
332 | def get_entry(self, path): | |
357 | """return a DirstateItem for the associated path""" |
|
333 | """return a DirstateItem for the associated path""" | |
358 | entry = self._map.get(path) |
|
334 | entry = self._map.get(path) | |
@@ -440,8 +416,8 b' class dirstate(object):' | |||||
440 | for a in ("_map", "_branch", "_ignore"): |
|
416 | for a in ("_map", "_branch", "_ignore"): | |
441 | if a in self.__dict__: |
|
417 | if a in self.__dict__: | |
442 | delattr(self, a) |
|
418 | delattr(self, a) | |
443 | self._lastnormaltime = timestamp.zero() |
|
|||
444 | self._dirty = False |
|
419 | self._dirty = False | |
|
420 | self._dirty_tracked_set = False | |||
445 | self._parentwriters = 0 |
|
421 | self._parentwriters = 0 | |
446 | self._origpl = None |
|
422 | self._origpl = None | |
447 |
|
423 | |||
@@ -462,19 +438,26 b' class dirstate(object):' | |||||
462 | return self._map.copymap |
|
438 | return self._map.copymap | |
463 |
|
439 | |||
464 | @requires_no_parents_change |
|
440 | @requires_no_parents_change | |
465 | def set_tracked(self, filename): |
|
441 | def set_tracked(self, filename, reset_copy=False): | |
466 | """a "public" method for generic code to mark a file as tracked |
|
442 | """a "public" method for generic code to mark a file as tracked | |
467 |
|
443 | |||
468 | This function is to be called outside of "update/merge" case. For |
|
444 | This function is to be called outside of "update/merge" case. For | |
469 | example by a command like `hg add X`. |
|
445 | example by a command like `hg add X`. | |
470 |
|
446 | |||
|
447 | if reset_copy is set, any existing copy information will be dropped. | |||
|
448 | ||||
471 | return True the file was previously untracked, False otherwise. |
|
449 | return True the file was previously untracked, False otherwise. | |
472 | """ |
|
450 | """ | |
473 | self._dirty = True |
|
451 | self._dirty = True | |
474 | entry = self._map.get(filename) |
|
452 | entry = self._map.get(filename) | |
475 | if entry is None or not entry.tracked: |
|
453 | if entry is None or not entry.tracked: | |
476 | self._check_new_tracked_filename(filename) |
|
454 | self._check_new_tracked_filename(filename) | |
477 |
|
|
455 | pre_tracked = self._map.set_tracked(filename) | |
|
456 | if reset_copy: | |||
|
457 | self._map.copymap.pop(filename, None) | |||
|
458 | if pre_tracked: | |||
|
459 | self._dirty_tracked_set = True | |||
|
460 | return pre_tracked | |||
478 |
|
461 | |||
479 | @requires_no_parents_change |
|
462 | @requires_no_parents_change | |
480 | def set_untracked(self, filename): |
|
463 | def set_untracked(self, filename): | |
@@ -488,24 +471,17 b' class dirstate(object):' | |||||
488 | ret = self._map.set_untracked(filename) |
|
471 | ret = self._map.set_untracked(filename) | |
489 | if ret: |
|
472 | if ret: | |
490 | self._dirty = True |
|
473 | self._dirty = True | |
|
474 | self._dirty_tracked_set = True | |||
491 | return ret |
|
475 | return ret | |
492 |
|
476 | |||
493 | @requires_no_parents_change |
|
477 | @requires_no_parents_change | |
494 |
def set_clean(self, filename, parentfiledata |
|
478 | def set_clean(self, filename, parentfiledata): | |
495 | """record that the current state of the file on disk is known to be clean""" |
|
479 | """record that the current state of the file on disk is known to be clean""" | |
496 | self._dirty = True |
|
480 | self._dirty = True | |
497 | if parentfiledata: |
|
|||
498 | (mode, size, mtime) = parentfiledata |
|
|||
499 | else: |
|
|||
500 | (mode, size, mtime) = self._get_filedata(filename) |
|
|||
501 | if not self._map[filename].tracked: |
|
481 | if not self._map[filename].tracked: | |
502 | self._check_new_tracked_filename(filename) |
|
482 | self._check_new_tracked_filename(filename) | |
|
483 | (mode, size, mtime) = parentfiledata | |||
503 | self._map.set_clean(filename, mode, size, mtime) |
|
484 | self._map.set_clean(filename, mode, size, mtime) | |
504 | if mtime > self._lastnormaltime: |
|
|||
505 | # Remember the most recent modification timeslot for status(), |
|
|||
506 | # to make sure we won't miss future size-preserving file content |
|
|||
507 | # modifications that happen within the same timeslot. |
|
|||
508 | self._lastnormaltime = mtime |
|
|||
509 |
|
485 | |||
510 | @requires_no_parents_change |
|
486 | @requires_no_parents_change | |
511 | def set_possibly_dirty(self, filename): |
|
487 | def set_possibly_dirty(self, filename): | |
@@ -544,10 +520,6 b' class dirstate(object):' | |||||
544 | if entry is not None and entry.added: |
|
520 | if entry is not None and entry.added: | |
545 | return # avoid dropping copy information (maybe?) |
|
521 | return # avoid dropping copy information (maybe?) | |
546 |
|
522 | |||
547 | parentfiledata = None |
|
|||
548 | if wc_tracked and p1_tracked: |
|
|||
549 | parentfiledata = self._get_filedata(filename) |
|
|||
550 |
|
||||
551 | self._map.reset_state( |
|
523 | self._map.reset_state( | |
552 | filename, |
|
524 | filename, | |
553 | wc_tracked, |
|
525 | wc_tracked, | |
@@ -555,16 +527,7 b' class dirstate(object):' | |||||
555 | # the underlying reference might have changed, we will have to |
|
527 | # the underlying reference might have changed, we will have to | |
556 | # check it. |
|
528 | # check it. | |
557 | has_meaningful_mtime=False, |
|
529 | has_meaningful_mtime=False, | |
558 | parentfiledata=parentfiledata, |
|
|||
559 | ) |
|
530 | ) | |
560 | if ( |
|
|||
561 | parentfiledata is not None |
|
|||
562 | and parentfiledata[2] > self._lastnormaltime |
|
|||
563 | ): |
|
|||
564 | # Remember the most recent modification timeslot for status(), |
|
|||
565 | # to make sure we won't miss future size-preserving file content |
|
|||
566 | # modifications that happen within the same timeslot. |
|
|||
567 | self._lastnormaltime = parentfiledata[2] |
|
|||
568 |
|
531 | |||
569 | @requires_parents_change |
|
532 | @requires_parents_change | |
570 | def update_file( |
|
533 | def update_file( | |
@@ -593,13 +556,13 b' class dirstate(object):' | |||||
593 | # this. The test agrees |
|
556 | # this. The test agrees | |
594 |
|
557 | |||
595 | self._dirty = True |
|
558 | self._dirty = True | |
596 |
|
559 | old_entry = self._map.get(filename) | ||
597 | need_parent_file_data = ( |
|
560 | if old_entry is None: | |
598 | not possibly_dirty and not p2_info and wc_tracked and p1_tracked |
|
561 | prev_tracked = False | |
599 |
|
|
562 | else: | |
600 |
|
563 | prev_tracked = old_entry.tracked | ||
601 | if need_parent_file_data and parentfiledata is None: |
|
564 | if prev_tracked != wc_tracked: | |
602 | parentfiledata = self._get_filedata(filename) |
|
565 | self._dirty_tracked_set = True | |
603 |
|
566 | |||
604 | self._map.reset_state( |
|
567 | self._map.reset_state( | |
605 | filename, |
|
568 | filename, | |
@@ -609,14 +572,6 b' class dirstate(object):' | |||||
609 | has_meaningful_mtime=not possibly_dirty, |
|
572 | has_meaningful_mtime=not possibly_dirty, | |
610 | parentfiledata=parentfiledata, |
|
573 | parentfiledata=parentfiledata, | |
611 | ) |
|
574 | ) | |
612 | if ( |
|
|||
613 | parentfiledata is not None |
|
|||
614 | and parentfiledata[2] > self._lastnormaltime |
|
|||
615 | ): |
|
|||
616 | # Remember the most recent modification timeslot for status(), |
|
|||
617 | # to make sure we won't miss future size-preserving file content |
|
|||
618 | # modifications that happen within the same timeslot. |
|
|||
619 | self._lastnormaltime = parentfiledata[2] |
|
|||
620 |
|
575 | |||
621 | def _check_new_tracked_filename(self, filename): |
|
576 | def _check_new_tracked_filename(self, filename): | |
622 | scmutil.checkfilename(filename) |
|
577 | scmutil.checkfilename(filename) | |
@@ -634,14 +589,6 b' class dirstate(object):' | |||||
634 | msg %= (pycompat.bytestr(d), pycompat.bytestr(filename)) |
|
589 | msg %= (pycompat.bytestr(d), pycompat.bytestr(filename)) | |
635 | raise error.Abort(msg) |
|
590 | raise error.Abort(msg) | |
636 |
|
591 | |||
637 | def _get_filedata(self, filename): |
|
|||
638 | """returns""" |
|
|||
639 | s = os.lstat(self._join(filename)) |
|
|||
640 | mode = s.st_mode |
|
|||
641 | size = s.st_size |
|
|||
642 | mtime = timestamp.mtime_of(s) |
|
|||
643 | return (mode, size, mtime) |
|
|||
644 |
|
||||
645 | def _discoverpath(self, path, normed, ignoremissing, exists, storemap): |
|
592 | def _discoverpath(self, path, normed, ignoremissing, exists, storemap): | |
646 | if exists is None: |
|
593 | if exists is None: | |
647 | exists = os.path.lexists(os.path.join(self._root, path)) |
|
594 | exists = os.path.lexists(os.path.join(self._root, path)) | |
@@ -720,7 +667,6 b' class dirstate(object):' | |||||
720 |
|
667 | |||
721 | def clear(self): |
|
668 | def clear(self): | |
722 | self._map.clear() |
|
669 | self._map.clear() | |
723 | self._lastnormaltime = timestamp.zero() |
|
|||
724 | self._dirty = True |
|
670 | self._dirty = True | |
725 |
|
671 | |||
726 | def rebuild(self, parent, allfiles, changedfiles=None): |
|
672 | def rebuild(self, parent, allfiles, changedfiles=None): | |
@@ -728,9 +674,7 b' class dirstate(object):' | |||||
728 | # Rebuild entire dirstate |
|
674 | # Rebuild entire dirstate | |
729 | to_lookup = allfiles |
|
675 | to_lookup = allfiles | |
730 | to_drop = [] |
|
676 | to_drop = [] | |
731 | lastnormaltime = self._lastnormaltime |
|
|||
732 | self.clear() |
|
677 | self.clear() | |
733 | self._lastnormaltime = lastnormaltime |
|
|||
734 | elif len(changedfiles) < 10: |
|
678 | elif len(changedfiles) < 10: | |
735 | # Avoid turning allfiles into a set, which can be expensive if it's |
|
679 | # Avoid turning allfiles into a set, which can be expensive if it's | |
736 | # large. |
|
680 | # large. | |
@@ -777,28 +721,41 b' class dirstate(object):' | |||||
777 | if not self._dirty: |
|
721 | if not self._dirty: | |
778 | return |
|
722 | return | |
779 |
|
723 | |||
780 | filename = self._filename |
|
724 | write_key = self._use_tracked_hint and self._dirty_tracked_set | |
781 | if tr: |
|
725 | if tr: | |
782 | # 'dirstate.write()' is not only for writing in-memory |
|
|||
783 | # changes out, but also for dropping ambiguous timestamp. |
|
|||
784 | # delayed writing re-raise "ambiguous timestamp issue". |
|
|||
785 | # See also the wiki page below for detail: |
|
|||
786 | # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan |
|
|||
787 |
|
||||
788 | # record when mtime start to be ambiguous |
|
|||
789 | now = _getfsnow(self._opener) |
|
|||
790 |
|
||||
791 | # delay writing in-memory changes out |
|
726 | # delay writing in-memory changes out | |
792 | tr.addfilegenerator( |
|
727 | tr.addfilegenerator( | |
793 | b'dirstate', |
|
728 | b'dirstate-1-main', | |
794 | (self._filename,), |
|
729 | (self._filename,), | |
795 |
lambda f: self._writedirstate(tr, f |
|
730 | lambda f: self._writedirstate(tr, f), | |
796 | location=b'plain', |
|
731 | location=b'plain', | |
|
732 | post_finalize=True, | |||
797 | ) |
|
733 | ) | |
|
734 | if write_key: | |||
|
735 | tr.addfilegenerator( | |||
|
736 | b'dirstate-2-key-post', | |||
|
737 | (self._filename_th,), | |||
|
738 | lambda f: self._write_tracked_hint(tr, f), | |||
|
739 | location=b'plain', | |||
|
740 | post_finalize=True, | |||
|
741 | ) | |||
798 | return |
|
742 | return | |
799 |
|
743 | |||
800 |
|
|
744 | file = lambda f: self._opener(f, b"w", atomictemp=True, checkambig=True) | |
801 | self._writedirstate(tr, st) |
|
745 | with file(self._filename) as f: | |
|
746 | self._writedirstate(tr, f) | |||
|
747 | if write_key: | |||
|
748 | # we update the key-file after writing to make sure reader have a | |||
|
749 | # key that match the newly written content | |||
|
750 | with file(self._filename_th) as f: | |||
|
751 | self._write_tracked_hint(tr, f) | |||
|
752 | ||||
|
753 | def delete_tracked_hint(self): | |||
|
754 | """remove the tracked_hint file | |||
|
755 | ||||
|
756 | To be used by format downgrades operation""" | |||
|
757 | self._opener.unlink(self._filename_th) | |||
|
758 | self._use_tracked_hint = False | |||
802 |
|
759 | |||
803 | def addparentchangecallback(self, category, callback): |
|
760 | def addparentchangecallback(self, category, callback): | |
804 | """add a callback to be called when the wd parents are changed |
|
761 | """add a callback to be called when the wd parents are changed | |
@@ -811,7 +768,7 b' class dirstate(object):' | |||||
811 | """ |
|
768 | """ | |
812 | self._plchangecallbacks[category] = callback |
|
769 | self._plchangecallbacks[category] = callback | |
813 |
|
770 | |||
814 |
def _writedirstate(self, tr, st |
|
771 | def _writedirstate(self, tr, st): | |
815 | # notify callbacks about parents change |
|
772 | # notify callbacks about parents change | |
816 | if self._origpl is not None and self._origpl != self._pl: |
|
773 | if self._origpl is not None and self._origpl != self._pl: | |
817 | for c, callback in sorted( |
|
774 | for c, callback in sorted( | |
@@ -819,34 +776,13 b' class dirstate(object):' | |||||
819 | ): |
|
776 | ): | |
820 | callback(self, self._origpl, self._pl) |
|
777 | callback(self, self._origpl, self._pl) | |
821 | self._origpl = None |
|
778 | self._origpl = None | |
822 |
|
779 | self._map.write(tr, st) | ||
823 | if now is None: |
|
780 | self._dirty = False | |
824 | # use the modification time of the newly created temporary file as the |
|
781 | self._dirty_tracked_set = False | |
825 | # filesystem's notion of 'now' |
|
|||
826 | now = timestamp.mtime_of(util.fstat(st)) |
|
|||
827 |
|
782 | |||
828 | # enough 'delaywrite' prevents 'pack_dirstate' from dropping |
|
783 | def _write_tracked_hint(self, tr, f): | |
829 | # timestamp of each entries in dirstate, because of 'now > mtime' |
|
784 | key = node.hex(uuid.uuid4().bytes) | |
830 | delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite') |
|
785 | f.write(b"1\n%s\n" % key) # 1 is the format version | |
831 | if delaywrite > 0: |
|
|||
832 | # do we have any files to delay for? |
|
|||
833 | for f, e in pycompat.iteritems(self._map): |
|
|||
834 | if e.need_delay(now): |
|
|||
835 | import time # to avoid useless import |
|
|||
836 |
|
||||
837 | # rather than sleep n seconds, sleep until the next |
|
|||
838 | # multiple of n seconds |
|
|||
839 | clock = time.time() |
|
|||
840 | start = int(clock) - (int(clock) % delaywrite) |
|
|||
841 | end = start + delaywrite |
|
|||
842 | time.sleep(end - clock) |
|
|||
843 | # trust our estimate that the end is near now |
|
|||
844 | now = timestamp.timestamp((end, 0)) |
|
|||
845 | break |
|
|||
846 |
|
||||
847 | self._map.write(tr, st, now) |
|
|||
848 | self._lastnormaltime = timestamp.zero() |
|
|||
849 | self._dirty = False |
|
|||
850 |
|
786 | |||
851 | def _dirignore(self, f): |
|
787 | def _dirignore(self, f): | |
852 | if self._ignore(f): |
|
788 | if self._ignore(f): | |
@@ -1243,7 +1179,6 b' class dirstate(object):' | |||||
1243 | self._rootdir, |
|
1179 | self._rootdir, | |
1244 | self._ignorefiles(), |
|
1180 | self._ignorefiles(), | |
1245 | self._checkexec, |
|
1181 | self._checkexec, | |
1246 | self._lastnormaltime, |
|
|||
1247 | bool(list_clean), |
|
1182 | bool(list_clean), | |
1248 | bool(list_ignored), |
|
1183 | bool(list_ignored), | |
1249 | bool(list_unknown), |
|
1184 | bool(list_unknown), | |
@@ -1335,11 +1270,20 b' class dirstate(object):' | |||||
1335 | # Some matchers have yet to be implemented |
|
1270 | # Some matchers have yet to be implemented | |
1336 | use_rust = False |
|
1271 | use_rust = False | |
1337 |
|
1272 | |||
|
1273 | # Get the time from the filesystem so we can disambiguate files that | |||
|
1274 | # appear modified in the present or future. | |||
|
1275 | try: | |||
|
1276 | mtime_boundary = timestamp.get_fs_now(self._opener) | |||
|
1277 | except OSError: | |||
|
1278 | # In largefiles or readonly context | |||
|
1279 | mtime_boundary = None | |||
|
1280 | ||||
1338 | if use_rust: |
|
1281 | if use_rust: | |
1339 | try: |
|
1282 | try: | |
1340 |
re |
|
1283 | res = self._rust_status( | |
1341 | match, listclean, listignored, listunknown |
|
1284 | match, listclean, listignored, listunknown | |
1342 | ) |
|
1285 | ) | |
|
1286 | return res + (mtime_boundary,) | |||
1343 | except rustmod.FallbackError: |
|
1287 | except rustmod.FallbackError: | |
1344 | pass |
|
1288 | pass | |
1345 |
|
1289 | |||
@@ -1361,7 +1305,6 b' class dirstate(object):' | |||||
1361 | checkexec = self._checkexec |
|
1305 | checkexec = self._checkexec | |
1362 | checklink = self._checklink |
|
1306 | checklink = self._checklink | |
1363 | copymap = self._map.copymap |
|
1307 | copymap = self._map.copymap | |
1364 | lastnormaltime = self._lastnormaltime |
|
|||
1365 |
|
1308 | |||
1366 | # We need to do full walks when either |
|
1309 | # We need to do full walks when either | |
1367 | # - we're listing all clean files, or |
|
1310 | # - we're listing all clean files, or | |
@@ -1417,19 +1360,17 b' class dirstate(object):' | |||||
1417 | else: |
|
1360 | else: | |
1418 | madd(fn) |
|
1361 | madd(fn) | |
1419 | elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)): |
|
1362 | elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)): | |
1420 | ladd(fn) |
|
1363 | # There might be a change in the future if for example the | |
1421 | elif timestamp.mtime_of(st) == lastnormaltime: |
|
1364 | # internal clock is off, but this is a case where the issues | |
1422 | # fn may have just been marked as normal and it may have |
|
1365 | # the user would face would be a lot worse and there is | |
1423 | # changed in the same second without changing its size. |
|
1366 | # nothing we can really do. | |
1424 | # This can happen if we quickly do multiple commits. |
|
|||
1425 | # Force lookup, so we don't miss such a racy file change. |
|
|||
1426 | ladd(fn) |
|
1367 | ladd(fn) | |
1427 | elif listclean: |
|
1368 | elif listclean: | |
1428 | cadd(fn) |
|
1369 | cadd(fn) | |
1429 | status = scmutil.status( |
|
1370 | status = scmutil.status( | |
1430 | modified, added, removed, deleted, unknown, ignored, clean |
|
1371 | modified, added, removed, deleted, unknown, ignored, clean | |
1431 | ) |
|
1372 | ) | |
1432 | return (lookup, status) |
|
1373 | return (lookup, status, mtime_boundary) | |
1433 |
|
1374 | |||
1434 | def matches(self, match): |
|
1375 | def matches(self, match): | |
1435 | """ |
|
1376 | """ | |
@@ -1477,10 +1418,11 b' class dirstate(object):' | |||||
1477 | # changes written out above, even if dirstate is never |
|
1418 | # changes written out above, even if dirstate is never | |
1478 | # changed after this |
|
1419 | # changed after this | |
1479 | tr.addfilegenerator( |
|
1420 | tr.addfilegenerator( | |
1480 | b'dirstate', |
|
1421 | b'dirstate-1-main', | |
1481 | (self._filename,), |
|
1422 | (self._filename,), | |
1482 | lambda f: self._writedirstate(tr, f), |
|
1423 | lambda f: self._writedirstate(tr, f), | |
1483 | location=b'plain', |
|
1424 | location=b'plain', | |
|
1425 | post_finalize=True, | |||
1484 | ) |
|
1426 | ) | |
1485 |
|
1427 | |||
1486 | # ensure that pending file written above is unlinked at |
|
1428 | # ensure that pending file written above is unlinked at |
@@ -444,13 +444,13 b' class dirstatemap(_dirstatemapcommon):' | |||||
444 | self.__getitem__ = self._map.__getitem__ |
|
444 | self.__getitem__ = self._map.__getitem__ | |
445 | self.get = self._map.get |
|
445 | self.get = self._map.get | |
446 |
|
446 | |||
447 |
def write(self, tr, st |
|
447 | def write(self, tr, st): | |
448 | if self._use_dirstate_v2: |
|
448 | if self._use_dirstate_v2: | |
449 |
packed, meta = v2.pack_dirstate(self._map, self.copymap |
|
449 | packed, meta = v2.pack_dirstate(self._map, self.copymap) | |
450 | self.write_v2_no_append(tr, st, meta, packed) |
|
450 | self.write_v2_no_append(tr, st, meta, packed) | |
451 | else: |
|
451 | else: | |
452 | packed = parsers.pack_dirstate( |
|
452 | packed = parsers.pack_dirstate( | |
453 |
self._map, self.copymap, self.parents() |
|
453 | self._map, self.copymap, self.parents() | |
454 | ) |
|
454 | ) | |
455 | st.write(packed) |
|
455 | st.write(packed) | |
456 | st.close() |
|
456 | st.close() | |
@@ -655,10 +655,10 b' if rustmod is not None:' | |||||
655 | self._map |
|
655 | self._map | |
656 | return self.identity |
|
656 | return self.identity | |
657 |
|
657 | |||
658 |
def write(self, tr, st |
|
658 | def write(self, tr, st): | |
659 | if not self._use_dirstate_v2: |
|
659 | if not self._use_dirstate_v2: | |
660 | p1, p2 = self.parents() |
|
660 | p1, p2 = self.parents() | |
661 |
packed = self._map.write_v1(p1, p2 |
|
661 | packed = self._map.write_v1(p1, p2) | |
662 | st.write(packed) |
|
662 | st.write(packed) | |
663 | st.close() |
|
663 | st.close() | |
664 | self._dirtyparents = False |
|
664 | self._dirtyparents = False | |
@@ -666,7 +666,7 b' if rustmod is not None:' | |||||
666 |
|
666 | |||
667 | # We can only append to an existing data file if there is one |
|
667 | # We can only append to an existing data file if there is one | |
668 | can_append = self.docket.uuid is not None |
|
668 | can_append = self.docket.uuid is not None | |
669 |
packed, meta, append = self._map.write_v2( |
|
669 | packed, meta, append = self._map.write_v2(can_append) | |
670 | if append: |
|
670 | if append: | |
671 | docket = self.docket |
|
671 | docket = self.docket | |
672 | data_filename = docket.data_filename() |
|
672 | data_filename = docket.data_filename() |
@@ -6,8 +6,11 b'' | |||||
6 | from __future__ import absolute_import |
|
6 | from __future__ import absolute_import | |
7 |
|
7 | |||
8 | import functools |
|
8 | import functools | |
|
9 | import os | |||
9 | import stat |
|
10 | import stat | |
10 |
|
11 | |||
|
12 | from .. import error | |||
|
13 | ||||
11 |
|
14 | |||
12 | rangemask = 0x7FFFFFFF |
|
15 | rangemask = 0x7FFFFFFF | |
13 |
|
16 | |||
@@ -18,40 +21,45 b' class timestamp(tuple):' | |||||
18 | A Unix timestamp with optional nanoseconds precision, |
|
21 | A Unix timestamp with optional nanoseconds precision, | |
19 | modulo 2**31 seconds. |
|
22 | modulo 2**31 seconds. | |
20 |
|
23 | |||
21 |
A |
|
24 | A 3-tuple containing: | |
22 |
|
25 | |||
23 | `truncated_seconds`: seconds since the Unix epoch, |
|
26 | `truncated_seconds`: seconds since the Unix epoch, | |
24 | truncated to its lower 31 bits |
|
27 | truncated to its lower 31 bits | |
25 |
|
28 | |||
26 | `subsecond_nanoseconds`: number of nanoseconds since `truncated_seconds`. |
|
29 | `subsecond_nanoseconds`: number of nanoseconds since `truncated_seconds`. | |
27 | When this is zero, the sub-second precision is considered unknown. |
|
30 | When this is zero, the sub-second precision is considered unknown. | |
|
31 | ||||
|
32 | `second_ambiguous`: whether this timestamp is still "reliable" | |||
|
33 | (see `reliable_mtime_of`) if we drop its sub-second component. | |||
28 | """ |
|
34 | """ | |
29 |
|
35 | |||
30 | def __new__(cls, value): |
|
36 | def __new__(cls, value): | |
31 | truncated_seconds, subsec_nanos = value |
|
37 | truncated_seconds, subsec_nanos, second_ambiguous = value | |
32 | value = (truncated_seconds & rangemask, subsec_nanos) |
|
38 | value = (truncated_seconds & rangemask, subsec_nanos, second_ambiguous) | |
33 | return super(timestamp, cls).__new__(cls, value) |
|
39 | return super(timestamp, cls).__new__(cls, value) | |
34 |
|
40 | |||
35 | def __eq__(self, other): |
|
41 | def __eq__(self, other): | |
36 | self_secs, self_subsec_nanos = self |
|
42 | raise error.ProgrammingError( | |
37 | other_secs, other_subsec_nanos = other |
|
43 | 'timestamp should never be compared directly' | |
38 | return self_secs == other_secs and ( |
|
|||
39 | self_subsec_nanos == other_subsec_nanos |
|
|||
40 | or self_subsec_nanos == 0 |
|
|||
41 | or other_subsec_nanos == 0 |
|
|||
42 | ) |
|
44 | ) | |
43 |
|
45 | |||
44 | def __gt__(self, other): |
|
46 | def __gt__(self, other): | |
45 | self_secs, self_subsec_nanos = self |
|
47 | raise error.ProgrammingError( | |
46 | other_secs, other_subsec_nanos = other |
|
48 | 'timestamp should never be compared directly' | |
47 | if self_secs > other_secs: |
|
49 | ) | |
48 | return True |
|
50 | ||
49 | if self_secs < other_secs: |
|
51 | ||
50 | return False |
|
52 | def get_fs_now(vfs): | |
51 | if self_subsec_nanos == 0 or other_subsec_nanos == 0: |
|
53 | """return a timestamp for "now" in the current vfs | |
52 | # they are considered equal, so not "greater than" |
|
54 | ||
53 | return False |
|
55 | This will raise an exception if no temporary files could be created. | |
54 | return self_subsec_nanos > other_subsec_nanos |
|
56 | """ | |
|
57 | tmpfd, tmpname = vfs.mkstemp() | |||
|
58 | try: | |||
|
59 | return mtime_of(os.fstat(tmpfd)) | |||
|
60 | finally: | |||
|
61 | os.close(tmpfd) | |||
|
62 | vfs.unlink(tmpname) | |||
55 |
|
63 | |||
56 |
|
64 | |||
57 | def zero(): |
|
65 | def zero(): | |
@@ -84,4 +92,37 b' def mtime_of(stat_result):' | |||||
84 | secs = nanos // billion |
|
92 | secs = nanos // billion | |
85 | subsec_nanos = nanos % billion |
|
93 | subsec_nanos = nanos % billion | |
86 |
|
94 | |||
87 | return timestamp((secs, subsec_nanos)) |
|
95 | return timestamp((secs, subsec_nanos, False)) | |
|
96 | ||||
|
97 | ||||
|
98 | def reliable_mtime_of(stat_result, present_mtime): | |||
|
99 | """Same as `mtime_of`, but return `None` or a `Timestamp` with | |||
|
100 | `second_ambiguous` set if the date might be ambiguous. | |||
|
101 | ||||
|
102 | A modification time is reliable if it is older than "present_time" (or | |||
|
103 | sufficiently in the future). | |||
|
104 | ||||
|
105 | Otherwise a concurrent modification might happens with the same mtime. | |||
|
106 | """ | |||
|
107 | file_mtime = mtime_of(stat_result) | |||
|
108 | file_second = file_mtime[0] | |||
|
109 | file_ns = file_mtime[1] | |||
|
110 | boundary_second = present_mtime[0] | |||
|
111 | boundary_ns = present_mtime[1] | |||
|
112 | # If the mtime of the ambiguous file is younger (or equal) to the starting | |||
|
113 | # point of the `status` walk, we cannot garantee that another, racy, write | |||
|
114 | # will not happen right after with the same mtime and we cannot cache the | |||
|
115 | # information. | |||
|
116 | # | |||
|
117 | # However if the mtime is far away in the future, this is likely some | |||
|
118 | # mismatch between the current clock and previous file system operation. So | |||
|
119 | # mtime more than one days in the future are considered fine. | |||
|
120 | if boundary_second == file_second: | |||
|
121 | if file_ns and boundary_ns: | |||
|
122 | if file_ns < boundary_ns: | |||
|
123 | return timestamp((file_second, file_ns, True)) | |||
|
124 | return None | |||
|
125 | elif boundary_second < file_second < (3600 * 24 + boundary_second): | |||
|
126 | return None | |||
|
127 | else: | |||
|
128 | return file_mtime |
@@ -174,12 +174,10 b' class Node(object):' | |||||
174 | ) |
|
174 | ) | |
175 |
|
175 | |||
176 |
|
176 | |||
177 |
def pack_dirstate(map, copy_map |
|
177 | def pack_dirstate(map, copy_map): | |
178 | """ |
|
178 | """ | |
179 | Pack `map` and `copy_map` into the dirstate v2 binary format and return |
|
179 | Pack `map` and `copy_map` into the dirstate v2 binary format and return | |
180 | the bytearray. |
|
180 | the bytearray. | |
181 | `now` is a timestamp of the current filesystem time used to detect race |
|
|||
182 | conditions in writing the dirstate to disk, see inline comment. |
|
|||
183 |
|
181 | |||
184 | The on-disk format expects a tree-like structure where the leaves are |
|
182 | The on-disk format expects a tree-like structure where the leaves are | |
185 | written first (and sorted per-directory), going up levels until the root |
|
183 | written first (and sorted per-directory), going up levels until the root | |
@@ -284,17 +282,6 b' def pack_dirstate(map, copy_map, now):' | |||||
284 | stack.append(current_node) |
|
282 | stack.append(current_node) | |
285 |
|
283 | |||
286 | for index, (path, entry) in enumerate(sorted_map, 1): |
|
284 | for index, (path, entry) in enumerate(sorted_map, 1): | |
287 | if entry.need_delay(now): |
|
|||
288 | # The file was last modified "simultaneously" with the current |
|
|||
289 | # write to dirstate (i.e. within the same second for file- |
|
|||
290 | # systems with a granularity of 1 sec). This commonly happens |
|
|||
291 | # for at least a couple of files on 'update'. |
|
|||
292 | # The user could change the file without changing its size |
|
|||
293 | # within the same second. Invalidate the file's mtime in |
|
|||
294 | # dirstate, forcing future 'status' calls to compare the |
|
|||
295 | # contents of the file if the size is the same. This prevents |
|
|||
296 | # mistakenly treating such files as clean. |
|
|||
297 | entry.set_possibly_dirty() |
|
|||
298 | nodes_with_entry_count += 1 |
|
285 | nodes_with_entry_count += 1 | |
299 | if path in copy_map: |
|
286 | if path in copy_map: | |
300 | nodes_with_copy_source_count += 1 |
|
287 | nodes_with_copy_source_count += 1 |
@@ -19,6 +19,7 b' from . import (' | |||||
19 | bookmarks, |
|
19 | bookmarks, | |
20 | branchmap, |
|
20 | branchmap, | |
21 | error, |
|
21 | error, | |
|
22 | obsolete, | |||
22 | phases, |
|
23 | phases, | |
23 | pycompat, |
|
24 | pycompat, | |
24 | scmutil, |
|
25 | scmutil, | |
@@ -141,17 +142,6 b' class outgoing(object):' | |||||
141 | self._computecommonmissing() |
|
142 | self._computecommonmissing() | |
142 | return self._missing |
|
143 | return self._missing | |
143 |
|
144 | |||
144 | @property |
|
|||
145 | def missingheads(self): |
|
|||
146 | util.nouideprecwarn( |
|
|||
147 | b'outgoing.missingheads never contained what the name suggests and ' |
|
|||
148 | b'was renamed to outgoing.ancestorsof. check your code for ' |
|
|||
149 | b'correctness.', |
|
|||
150 | b'5.5', |
|
|||
151 | stacklevel=2, |
|
|||
152 | ) |
|
|||
153 | return self.ancestorsof |
|
|||
154 |
|
||||
155 |
|
145 | |||
156 | def findcommonoutgoing( |
|
146 | def findcommonoutgoing( | |
157 | repo, other, onlyheads=None, force=False, commoninc=None, portable=False |
|
147 | repo, other, onlyheads=None, force=False, commoninc=None, portable=False | |
@@ -556,12 +546,16 b' def _postprocessobsolete(pushop, futurec' | |||||
556 | if len(localcandidate) == 1: |
|
546 | if len(localcandidate) == 1: | |
557 | return unknownheads | set(candidate_newhs), set() |
|
547 | return unknownheads | set(candidate_newhs), set() | |
558 |
|
548 | |||
|
549 | obsrevs = obsolete.getrevs(unfi, b'obsolete') | |||
|
550 | futurenonobsolete = frozenset(futurecommon) - obsrevs | |||
|
551 | ||||
559 | # actually process branch replacement |
|
552 | # actually process branch replacement | |
560 | while localcandidate: |
|
553 | while localcandidate: | |
561 | nh = localcandidate.pop() |
|
554 | nh = localcandidate.pop() | |
|
555 | r = torev(nh) | |||
562 | current_branch = unfi[nh].branch() |
|
556 | current_branch = unfi[nh].branch() | |
563 | # run this check early to skip the evaluation of the whole branch |
|
557 | # run this check early to skip the evaluation of the whole branch | |
564 | if torev(nh) in futurecommon or ispublic(torev(nh)): |
|
558 | if ispublic(r) or r not in obsrevs: | |
565 | newhs.add(nh) |
|
559 | newhs.add(nh) | |
566 | continue |
|
560 | continue | |
567 |
|
561 | |||
@@ -583,7 +577,7 b' def _postprocessobsolete(pushop, futurec' | |||||
583 | # * if we have no markers to push to obsolete it. |
|
577 | # * if we have no markers to push to obsolete it. | |
584 | if ( |
|
578 | if ( | |
585 | any(ispublic(r) for r in branchrevs) |
|
579 | any(ispublic(r) for r in branchrevs) | |
586 |
or any(torev(n) in future |
|
580 | or any(torev(n) in futurenonobsolete for n in branchnodes) | |
587 | or any(not hasoutmarker(n) for n in branchnodes) |
|
581 | or any(not hasoutmarker(n) for n in branchnodes) | |
588 | ): |
|
582 | ): | |
589 | newhs.add(nh) |
|
583 | newhs.add(nh) |
@@ -511,17 +511,21 b" def trim(s, width, ellipsis=b'', leftsid" | |||||
511 | if width <= 0: # no enough room even for ellipsis |
|
511 | if width <= 0: # no enough room even for ellipsis | |
512 | return ellipsis[: width + len(ellipsis)] |
|
512 | return ellipsis[: width + len(ellipsis)] | |
513 |
|
513 | |||
|
514 | chars = list(u) | |||
514 | if leftside: |
|
515 | if leftside: | |
515 | uslice = lambda i: u[i:] |
|
516 | chars.reverse() | |
516 | concat = lambda s: ellipsis + s |
|
517 | width_so_far = 0 | |
517 | else: |
|
518 | for i, c in enumerate(chars): | |
518 | uslice = lambda i: u[:-i] |
|
519 | width_so_far += ucolwidth(c) | |
519 | concat = lambda s: s + ellipsis |
|
520 | if width_so_far > width: | |
520 | for i in pycompat.xrange(1, len(u)): |
|
521 | break | |
521 | usub = uslice(i) |
|
522 | chars = chars[:i] | |
522 | if ucolwidth(usub) <= width: |
|
523 | if leftside: | |
523 | return concat(usub.encode(_sysstr(encoding))) |
|
524 | chars.reverse() | |
524 | return ellipsis # no enough room for multi-column characters |
|
525 | u = u''.join(chars).encode(_sysstr(encoding)) | |
|
526 | if leftside: | |||
|
527 | return ellipsis + u | |||
|
528 | return u + ellipsis | |||
525 |
|
529 | |||
526 |
|
530 | |||
527 | class normcasespecs(object): |
|
531 | class normcasespecs(object): |
@@ -388,6 +388,14 b' class PatchError(Exception):' | |||||
388 | __bytes__ = _tobytes |
|
388 | __bytes__ = _tobytes | |
389 |
|
389 | |||
390 |
|
390 | |||
|
391 | class PatchParseError(PatchError): | |||
|
392 | __bytes__ = _tobytes | |||
|
393 | ||||
|
394 | ||||
|
395 | class PatchApplicationError(PatchError): | |||
|
396 | __bytes__ = _tobytes | |||
|
397 | ||||
|
398 | ||||
391 | def getsimilar(symbols, value): |
|
399 | def getsimilar(symbols, value): | |
392 | # type: (Iterable[bytes], bytes) -> List[bytes] |
|
400 | # type: (Iterable[bytes], bytes) -> List[bytes] | |
393 | sim = lambda x: difflib.SequenceMatcher(None, value, x).ratio() |
|
401 | sim = lambda x: difflib.SequenceMatcher(None, value, x).ratio() |
@@ -22,7 +22,6 b' from . import (' | |||||
22 | changegroup, |
|
22 | changegroup, | |
23 | discovery, |
|
23 | discovery, | |
24 | error, |
|
24 | error, | |
25 | exchangev2, |
|
|||
26 | lock as lockmod, |
|
25 | lock as lockmod, | |
27 | logexchange, |
|
26 | logexchange, | |
28 | narrowspec, |
|
27 | narrowspec, | |
@@ -522,8 +521,16 b' def _pushdiscovery(pushop):' | |||||
522 |
|
521 | |||
523 | def _checksubrepostate(pushop): |
|
522 | def _checksubrepostate(pushop): | |
524 | """Ensure all outgoing referenced subrepo revisions are present locally""" |
|
523 | """Ensure all outgoing referenced subrepo revisions are present locally""" | |
|
524 | ||||
|
525 | repo = pushop.repo | |||
|
526 | ||||
|
527 | # If the repository does not use subrepos, skip the expensive | |||
|
528 | # manifest checks. | |||
|
529 | if not len(repo.file(b'.hgsub')) or not len(repo.file(b'.hgsubstate')): | |||
|
530 | return | |||
|
531 | ||||
525 | for n in pushop.outgoing.missing: |
|
532 | for n in pushop.outgoing.missing: | |
526 |
ctx = |
|
533 | ctx = repo[n] | |
527 |
|
534 | |||
528 | if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files(): |
|
535 | if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files(): | |
529 | for subpath in sorted(ctx.substate): |
|
536 | for subpath in sorted(ctx.substate): | |
@@ -1666,21 +1673,17 b' def pull(' | |||||
1666 | ): |
|
1673 | ): | |
1667 | add_confirm_callback(repo, pullop) |
|
1674 | add_confirm_callback(repo, pullop) | |
1668 |
|
1675 | |||
1669 | # Use the modern wire protocol, if available. |
|
1676 | # This should ideally be in _pullbundle2(). However, it needs to run | |
1670 | if remote.capable(b'command-changesetdata'): |
|
1677 | # before discovery to avoid extra work. | |
1671 |
|
|
1678 | _maybeapplyclonebundle(pullop) | |
1672 | else: |
|
1679 | streamclone.maybeperformlegacystreamclone(pullop) | |
1673 | # This should ideally be in _pullbundle2(). However, it needs to run |
|
1680 | _pulldiscovery(pullop) | |
1674 | # before discovery to avoid extra work. |
|
1681 | if pullop.canusebundle2: | |
1675 |
_ |
|
1682 | _fullpullbundle2(repo, pullop) | |
1676 | streamclone.maybeperformlegacystreamclone(pullop) |
|
1683 | _pullchangeset(pullop) | |
1677 |
|
|
1684 | _pullphase(pullop) | |
1678 | if pullop.canusebundle2: |
|
1685 | _pullbookmarks(pullop) | |
1679 |
|
|
1686 | _pullobsolete(pullop) | |
1680 | _pullchangeset(pullop) |
|
|||
1681 | _pullphase(pullop) |
|
|||
1682 | _pullbookmarks(pullop) |
|
|||
1683 | _pullobsolete(pullop) |
|
|||
1684 |
|
1687 | |||
1685 | # storing remotenames |
|
1688 | # storing remotenames | |
1686 | if repo.ui.configbool(b'experimental', b'remotenames'): |
|
1689 | if repo.ui.configbool(b'experimental', b'remotenames'): |
@@ -282,6 +282,7 b' def loadall(ui, whitelist=None):' | |||||
282 | result = ui.configitems(b"extensions") |
|
282 | result = ui.configitems(b"extensions") | |
283 | if whitelist is not None: |
|
283 | if whitelist is not None: | |
284 | result = [(k, v) for (k, v) in result if k in whitelist] |
|
284 | result = [(k, v) for (k, v) in result if k in whitelist] | |
|
285 | result = [(k, v) for (k, v) in result if b':' not in k] | |||
285 | newindex = len(_order) |
|
286 | newindex = len(_order) | |
286 | ui.log( |
|
287 | ui.log( | |
287 | b'extension', |
|
288 | b'extension', | |
@@ -290,6 +291,8 b' def loadall(ui, whitelist=None):' | |||||
290 | ) |
|
291 | ) | |
291 | ui.log(b'extension', b'- processing %d entries\n', len(result)) |
|
292 | ui.log(b'extension', b'- processing %d entries\n', len(result)) | |
292 | with util.timedcm('load all extensions') as stats: |
|
293 | with util.timedcm('load all extensions') as stats: | |
|
294 | default_sub_options = ui.configsuboptions(b"extensions", b"*")[1] | |||
|
295 | ||||
293 | for (name, path) in result: |
|
296 | for (name, path) in result: | |
294 | if path: |
|
297 | if path: | |
295 | if path[0:1] == b'!': |
|
298 | if path[0:1] == b'!': | |
@@ -306,18 +309,32 b' def loadall(ui, whitelist=None):' | |||||
306 | except Exception as inst: |
|
309 | except Exception as inst: | |
307 | msg = stringutil.forcebytestr(inst) |
|
310 | msg = stringutil.forcebytestr(inst) | |
308 | if path: |
|
311 | if path: | |
309 |
|
|
312 | error_msg = _( | |
310 |
|
|
313 | b'failed to import extension "%s" from %s: %s' | |
311 | % (name, path, msg) |
|
|||
312 | ) |
|
314 | ) | |
|
315 | error_msg %= (name, path, msg) | |||
313 | else: |
|
316 | else: | |
314 | ui.warn( |
|
317 | error_msg = _(b'failed to import extension "%s": %s') | |
315 | _(b"*** failed to import extension %s: %s\n") |
|
318 | error_msg %= (name, msg) | |
316 | % (name, msg) |
|
319 | ||
317 | ) |
|
320 | options = default_sub_options.copy() | |
318 | if isinstance(inst, error.Hint) and inst.hint: |
|
321 | ext_options = ui.configsuboptions(b"extensions", name)[1] | |
319 | ui.warn(_(b"*** (%s)\n") % inst.hint) |
|
322 | options.update(ext_options) | |
320 | ui.traceback() |
|
323 | if stringutil.parsebool(options.get(b"required", b'no')): | |
|
324 | hint = None | |||
|
325 | if isinstance(inst, error.Hint) and inst.hint: | |||
|
326 | hint = inst.hint | |||
|
327 | if hint is None: | |||
|
328 | hint = _( | |||
|
329 | b"loading of this extension was required, " | |||
|
330 | b"see `hg help config.extensions` for details" | |||
|
331 | ) | |||
|
332 | raise error.Abort(error_msg, hint=hint) | |||
|
333 | else: | |||
|
334 | ui.warn((b"*** %s\n") % error_msg) | |||
|
335 | if isinstance(inst, error.Hint) and inst.hint: | |||
|
336 | ui.warn(_(b"*** (%s)\n") % inst.hint) | |||
|
337 | ui.traceback() | |||
321 |
|
338 | |||
322 | ui.log( |
|
339 | ui.log( | |
323 | b'extension', |
|
340 | b'extension', |
@@ -97,8 +97,8 b' class filelog(object):' | |||||
97 | def iscensored(self, rev): |
|
97 | def iscensored(self, rev): | |
98 | return self._revlog.iscensored(rev) |
|
98 | return self._revlog.iscensored(rev) | |
99 |
|
99 | |||
100 |
def revision(self, node, _df=None |
|
100 | def revision(self, node, _df=None): | |
101 |
return self._revlog.revision(node, _df=_df |
|
101 | return self._revlog.revision(node, _df=_df) | |
102 |
|
102 | |||
103 | def rawdata(self, node, _df=None): |
|
103 | def rawdata(self, node, _df=None): | |
104 | return self._revlog.rawdata(node, _df=_df) |
|
104 | return self._revlog.rawdata(node, _df=_df) |
This diff has been collapsed as it changes many lines, (527 lines changed) Show them Hide them | |||||
@@ -19,7 +19,6 b' from .node import (' | |||||
19 | ) |
|
19 | ) | |
20 | from .pycompat import ( |
|
20 | from .pycompat import ( | |
21 | getattr, |
|
21 | getattr, | |
22 | open, |
|
|||
23 | ) |
|
22 | ) | |
24 |
|
23 | |||
25 | from . import ( |
|
24 | from . import ( | |
@@ -293,9 +292,9 b' def _eoltype(data):' | |||||
293 | return None # unknown |
|
292 | return None # unknown | |
294 |
|
293 | |||
295 |
|
294 | |||
296 | def _matcheol(file, back): |
|
295 | def _matcheol(file, backup): | |
297 | """Convert EOL markers in a file to match origfile""" |
|
296 | """Convert EOL markers in a file to match origfile""" | |
298 | tostyle = _eoltype(back.data()) # No repo.wread filters? |
|
297 | tostyle = _eoltype(backup.data()) # No repo.wread filters? | |
299 | if tostyle: |
|
298 | if tostyle: | |
300 | data = util.readfile(file) |
|
299 | data = util.readfile(file) | |
301 | style = _eoltype(data) |
|
300 | style = _eoltype(data) | |
@@ -306,27 +305,27 b' def _matcheol(file, back):' | |||||
306 |
|
305 | |||
307 |
|
306 | |||
308 | @internaltool(b'prompt', nomerge) |
|
307 | @internaltool(b'prompt', nomerge) | |
309 |
def _iprompt(repo, mynode, |
|
308 | def _iprompt(repo, mynode, local, other, base, toolconf): | |
310 | """Asks the user which of the local `p1()` or the other `p2()` version to |
|
309 | """Asks the user which of the local `p1()` or the other `p2()` version to | |
311 | keep as the merged version.""" |
|
310 | keep as the merged version.""" | |
312 | ui = repo.ui |
|
311 | ui = repo.ui | |
313 |
fd = |
|
312 | fd = local.fctx.path() | |
314 | uipathfn = scmutil.getuipathfn(repo) |
|
313 | uipathfn = scmutil.getuipathfn(repo) | |
315 |
|
314 | |||
316 | # Avoid prompting during an in-memory merge since it doesn't support merge |
|
315 | # Avoid prompting during an in-memory merge since it doesn't support merge | |
317 | # conflicts. |
|
316 | # conflicts. | |
318 |
if |
|
317 | if local.fctx.changectx().isinmemory(): | |
319 | raise error.InMemoryMergeConflictsError( |
|
318 | raise error.InMemoryMergeConflictsError( | |
320 | b'in-memory merge does not support file conflicts' |
|
319 | b'in-memory merge does not support file conflicts' | |
321 | ) |
|
320 | ) | |
322 |
|
321 | |||
323 |
prompts = partextras( |
|
322 | prompts = partextras([local.label, other.label]) | |
324 | prompts[b'fd'] = uipathfn(fd) |
|
323 | prompts[b'fd'] = uipathfn(fd) | |
325 | try: |
|
324 | try: | |
326 |
if |
|
325 | if other.fctx.isabsent(): | |
327 | index = ui.promptchoice(_localchangedotherdeletedmsg % prompts, 2) |
|
326 | index = ui.promptchoice(_localchangedotherdeletedmsg % prompts, 2) | |
328 | choice = [b'local', b'other', b'unresolved'][index] |
|
327 | choice = [b'local', b'other', b'unresolved'][index] | |
329 |
elif |
|
328 | elif local.fctx.isabsent(): | |
330 | index = ui.promptchoice(_otherchangedlocaldeletedmsg % prompts, 2) |
|
329 | index = ui.promptchoice(_otherchangedlocaldeletedmsg % prompts, 2) | |
331 | choice = [b'other', b'local', b'unresolved'][index] |
|
330 | choice = [b'other', b'local', b'unresolved'][index] | |
332 | else: |
|
331 | else: | |
@@ -347,44 +346,48 b' def _iprompt(repo, mynode, orig, fcd, fc' | |||||
347 | choice = [b'local', b'other', b'unresolved'][index] |
|
346 | choice = [b'local', b'other', b'unresolved'][index] | |
348 |
|
347 | |||
349 | if choice == b'other': |
|
348 | if choice == b'other': | |
350 |
return _iother(repo, mynode, |
|
349 | return _iother(repo, mynode, local, other, base, toolconf) | |
351 | elif choice == b'local': |
|
350 | elif choice == b'local': | |
352 |
return _ilocal(repo, mynode, |
|
351 | return _ilocal(repo, mynode, local, other, base, toolconf) | |
353 | elif choice == b'unresolved': |
|
352 | elif choice == b'unresolved': | |
354 |
return _ifail(repo, mynode, |
|
353 | return _ifail(repo, mynode, local, other, base, toolconf) | |
355 | except error.ResponseExpected: |
|
354 | except error.ResponseExpected: | |
356 | ui.write(b"\n") |
|
355 | ui.write(b"\n") | |
357 |
return _ifail(repo, mynode, |
|
356 | return _ifail(repo, mynode, local, other, base, toolconf) | |
358 |
|
357 | |||
359 |
|
358 | |||
360 | @internaltool(b'local', nomerge) |
|
359 | @internaltool(b'local', nomerge) | |
361 |
def _ilocal(repo, mynode, |
|
360 | def _ilocal(repo, mynode, local, other, base, toolconf): | |
362 | """Uses the local `p1()` version of files as the merged version.""" |
|
361 | """Uses the local `p1()` version of files as the merged version.""" | |
363 |
return 0, |
|
362 | return 0, local.fctx.isabsent() | |
364 |
|
363 | |||
365 |
|
364 | |||
366 | @internaltool(b'other', nomerge) |
|
365 | @internaltool(b'other', nomerge) | |
367 |
def _iother(repo, mynode, |
|
366 | def _iother(repo, mynode, local, other, base, toolconf): | |
368 | """Uses the other `p2()` version of files as the merged version.""" |
|
367 | """Uses the other `p2()` version of files as the merged version.""" | |
369 |
if |
|
368 | if other.fctx.isabsent(): | |
370 | # local changed, remote deleted -- 'deleted' picked |
|
369 | # local changed, remote deleted -- 'deleted' picked | |
371 |
_underlyingfctxifabsent( |
|
370 | _underlyingfctxifabsent(local.fctx).remove() | |
372 | deleted = True |
|
371 | deleted = True | |
373 | else: |
|
372 | else: | |
374 |
_underlyingfctxifabsent( |
|
373 | _underlyingfctxifabsent(local.fctx).write( | |
|
374 | other.fctx.data(), other.fctx.flags() | |||
|
375 | ) | |||
375 | deleted = False |
|
376 | deleted = False | |
376 | return 0, deleted |
|
377 | return 0, deleted | |
377 |
|
378 | |||
378 |
|
379 | |||
379 | @internaltool(b'fail', nomerge) |
|
380 | @internaltool(b'fail', nomerge) | |
380 |
def _ifail(repo, mynode, |
|
381 | def _ifail(repo, mynode, local, other, base, toolconf): | |
381 | """ |
|
382 | """ | |
382 | Rather than attempting to merge files that were modified on both |
|
383 | Rather than attempting to merge files that were modified on both | |
383 | branches, it marks them as unresolved. The resolve command must be |
|
384 | branches, it marks them as unresolved. The resolve command must be | |
384 | used to resolve these conflicts.""" |
|
385 | used to resolve these conflicts.""" | |
385 | # for change/delete conflicts write out the changed version, then fail |
|
386 | # for change/delete conflicts write out the changed version, then fail | |
386 |
if |
|
387 | if local.fctx.isabsent(): | |
387 |
_underlyingfctxifabsent( |
|
388 | _underlyingfctxifabsent(local.fctx).write( | |
|
389 | other.fctx.data(), other.fctx.flags() | |||
|
390 | ) | |||
388 | return 1, False |
|
391 | return 1, False | |
389 |
|
392 | |||
390 |
|
393 | |||
@@ -399,11 +402,18 b' def _underlyingfctxifabsent(filectx):' | |||||
399 | return filectx |
|
402 | return filectx | |
400 |
|
403 | |||
401 |
|
404 | |||
402 | def _premerge(repo, fcd, fco, fca, toolconf, files, labels=None): |
|
405 | def _verifytext(input, ui): | |
|
406 | """verifies that text is non-binary""" | |||
|
407 | if stringutil.binary(input.text()): | |||
|
408 | msg = _(b"%s looks like a binary file.") % input.fctx.path() | |||
|
409 | ui.warn(_(b'warning: %s\n') % msg) | |||
|
410 | raise error.Abort(msg) | |||
|
411 | ||||
|
412 | ||||
|
413 | def _premerge(repo, local, other, base, toolconf): | |||
403 | tool, toolpath, binary, symlink, scriptfn = toolconf |
|
414 | tool, toolpath, binary, symlink, scriptfn = toolconf | |
404 |
if symlink or |
|
415 | if symlink or local.fctx.isabsent() or other.fctx.isabsent(): | |
405 | return 1 |
|
416 | return 1 | |
406 | unused, unused, unused, back = files |
|
|||
407 |
|
417 | |||
408 | ui = repo.ui |
|
418 | ui = repo.ui | |
409 |
|
419 | |||
@@ -423,26 +433,28 b' def _premerge(repo, fcd, fco, fca, toolc' | |||||
423 |
|
433 | |||
424 | if premerge: |
|
434 | if premerge: | |
425 | mode = b'merge' |
|
435 | mode = b'merge' | |
426 |
if premerge |
|
436 | if premerge == b'keep-mergediff': | |
427 | if not labels: |
|
437 | mode = b'mergediff' | |
428 | labels = _defaultconflictlabels |
|
438 | elif premerge == b'keep-merge3': | |
429 | if len(labels) < 3: |
|
439 | mode = b'merge3' | |
430 | labels.append(b'base') |
|
440 | if any( | |
431 | if premerge == b'keep-mergediff': |
|
441 | stringutil.binary(input.text()) for input in (local, base, other) | |
432 | mode = b'mergediff' |
|
442 | ): | |
433 | r = simplemerge.simplemerge( |
|
443 | return 1 # continue merging | |
434 | ui, fcd, fca, fco, quiet=True, label=labels, mode=mode |
|
444 | merged_text, conflicts = simplemerge.simplemerge( | |
|
445 | local, base, other, mode=mode | |||
435 | ) |
|
446 | ) | |
436 | if not r: |
|
447 | if not conflicts or premerge in validkeep: | |
|
448 | # fcd.flags() already has the merged flags (done in | |||
|
449 | # mergestate.resolve()) | |||
|
450 | local.fctx.write(merged_text, local.fctx.flags()) | |||
|
451 | if not conflicts: | |||
437 | ui.debug(b" premerge successful\n") |
|
452 | ui.debug(b" premerge successful\n") | |
438 | return 0 |
|
453 | return 0 | |
439 | if premerge not in validkeep: |
|
|||
440 | # restore from backup and try again |
|
|||
441 | _restorebackup(fcd, back) |
|
|||
442 | return 1 # continue merging |
|
454 | return 1 # continue merging | |
443 |
|
455 | |||
444 |
|
456 | |||
445 |
def _mergecheck(repo, mynode |
|
457 | def _mergecheck(repo, mynode, fcd, fco, fca, toolconf): | |
446 | tool, toolpath, binary, symlink, scriptfn = toolconf |
|
458 | tool, toolpath, binary, symlink, scriptfn = toolconf | |
447 | uipathfn = scmutil.getuipathfn(repo) |
|
459 | uipathfn = scmutil.getuipathfn(repo) | |
448 | if symlink: |
|
460 | if symlink: | |
@@ -463,7 +475,7 b' def _mergecheck(repo, mynode, orig, fcd,' | |||||
463 | return True |
|
475 | return True | |
464 |
|
476 | |||
465 |
|
477 | |||
466 | def _merge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels, mode): |
|
478 | def _merge(repo, local, other, base, mode): | |
467 | """ |
|
479 | """ | |
468 | Uses the internal non-interactive simple merge algorithm for merging |
|
480 | Uses the internal non-interactive simple merge algorithm for merging | |
469 | files. It will fail if there are any conflicts and leave markers in |
|
481 | files. It will fail if there are any conflicts and leave markers in | |
@@ -471,8 +483,20 b' def _merge(repo, mynode, orig, fcd, fco,' | |||||
471 | of merge, unless mode equals 'union' which suppresses the markers.""" |
|
483 | of merge, unless mode equals 'union' which suppresses the markers.""" | |
472 | ui = repo.ui |
|
484 | ui = repo.ui | |
473 |
|
485 | |||
474 | r = simplemerge.simplemerge(ui, fcd, fca, fco, label=labels, mode=mode) |
|
486 | try: | |
475 | return True, r, False |
|
487 | _verifytext(local, ui) | |
|
488 | _verifytext(base, ui) | |||
|
489 | _verifytext(other, ui) | |||
|
490 | except error.Abort: | |||
|
491 | return True, True, False | |||
|
492 | else: | |||
|
493 | merged_text, conflicts = simplemerge.simplemerge( | |||
|
494 | local, base, other, mode=mode | |||
|
495 | ) | |||
|
496 | # fcd.flags() already has the merged flags (done in | |||
|
497 | # mergestate.resolve()) | |||
|
498 | local.fctx.write(merged_text, local.fctx.flags()) | |||
|
499 | return True, conflicts, False | |||
476 |
|
500 | |||
477 |
|
501 | |||
478 | @internaltool( |
|
502 | @internaltool( | |
@@ -484,14 +508,12 b' def _merge(repo, mynode, orig, fcd, fco,' | |||||
484 | ), |
|
508 | ), | |
485 | precheck=_mergecheck, |
|
509 | precheck=_mergecheck, | |
486 | ) |
|
510 | ) | |
487 |
def _iunion(repo, mynode, |
|
511 | def _iunion(repo, mynode, local, other, base, toolconf, backup): | |
488 | """ |
|
512 | """ | |
489 | Uses the internal non-interactive simple merge algorithm for merging |
|
513 | Uses the internal non-interactive simple merge algorithm for merging | |
490 | files. It will use both left and right sides for conflict regions. |
|
514 | files. It will use both left and right sides for conflict regions. | |
491 | No markers are inserted.""" |
|
515 | No markers are inserted.""" | |
492 | return _merge( |
|
516 | return _merge(repo, local, other, base, b'union') | |
493 | repo, mynode, orig, fcd, fco, fca, toolconf, files, labels, b'union' |
|
|||
494 | ) |
|
|||
495 |
|
517 | |||
496 |
|
518 | |||
497 | @internaltool( |
|
519 | @internaltool( | |
@@ -503,15 +525,13 b' def _iunion(repo, mynode, orig, fcd, fco' | |||||
503 | ), |
|
525 | ), | |
504 | precheck=_mergecheck, |
|
526 | precheck=_mergecheck, | |
505 | ) |
|
527 | ) | |
506 |
def _imerge(repo, mynode, |
|
528 | def _imerge(repo, mynode, local, other, base, toolconf, backup): | |
507 | """ |
|
529 | """ | |
508 | Uses the internal non-interactive simple merge algorithm for merging |
|
530 | Uses the internal non-interactive simple merge algorithm for merging | |
509 | files. It will fail if there are any conflicts and leave markers in |
|
531 | files. It will fail if there are any conflicts and leave markers in | |
510 | the partially merged file. Markers will have two sections, one for each side |
|
532 | the partially merged file. Markers will have two sections, one for each side | |
511 | of merge.""" |
|
533 | of merge.""" | |
512 | return _merge( |
|
534 | return _merge(repo, local, other, base, b'merge') | |
513 | repo, mynode, orig, fcd, fco, fca, toolconf, files, labels, b'merge' |
|
|||
514 | ) |
|
|||
515 |
|
535 | |||
516 |
|
536 | |||
517 | @internaltool( |
|
537 | @internaltool( | |
@@ -523,17 +543,13 b' def _imerge(repo, mynode, orig, fcd, fco' | |||||
523 | ), |
|
543 | ), | |
524 | precheck=_mergecheck, |
|
544 | precheck=_mergecheck, | |
525 | ) |
|
545 | ) | |
526 |
def _imerge3(repo, mynode, |
|
546 | def _imerge3(repo, mynode, local, other, base, toolconf, backup): | |
527 | """ |
|
547 | """ | |
528 | Uses the internal non-interactive simple merge algorithm for merging |
|
548 | Uses the internal non-interactive simple merge algorithm for merging | |
529 | files. It will fail if there are any conflicts and leave markers in |
|
549 | files. It will fail if there are any conflicts and leave markers in | |
530 | the partially merged file. Marker will have three sections, one from each |
|
550 | the partially merged file. Marker will have three sections, one from each | |
531 | side of the merge and one for the base content.""" |
|
551 | side of the merge and one for the base content.""" | |
532 | if not labels: |
|
552 | return _merge(repo, local, other, base, b'merge3') | |
533 | labels = _defaultconflictlabels |
|
|||
534 | if len(labels) < 3: |
|
|||
535 | labels.append(b'base') |
|
|||
536 | return _imerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels) |
|
|||
537 |
|
553 | |||
538 |
|
554 | |||
539 | @internaltool( |
|
555 | @internaltool( | |
@@ -564,62 +580,30 b' def _imerge3alwaysgood(*args, **kwargs):' | |||||
564 | ), |
|
580 | ), | |
565 | precheck=_mergecheck, |
|
581 | precheck=_mergecheck, | |
566 | ) |
|
582 | ) | |
567 | def _imerge_diff( |
|
583 | def _imerge_diff(repo, mynode, local, other, base, toolconf, backup): | |
568 | repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None |
|
|||
569 | ): |
|
|||
570 | """ |
|
584 | """ | |
571 | Uses the internal non-interactive simple merge algorithm for merging |
|
585 | Uses the internal non-interactive simple merge algorithm for merging | |
572 | files. It will fail if there are any conflicts and leave markers in |
|
586 | files. It will fail if there are any conflicts and leave markers in | |
573 | the partially merged file. The marker will have two sections, one with the |
|
587 | the partially merged file. The marker will have two sections, one with the | |
574 | content from one side of the merge, and one with a diff from the base |
|
588 | content from one side of the merge, and one with a diff from the base | |
575 | content to the content on the other side. (experimental)""" |
|
589 | content to the content on the other side. (experimental)""" | |
576 | if not labels: |
|
590 | return _merge(repo, local, other, base, b'mergediff') | |
577 | labels = _defaultconflictlabels |
|
|||
578 | if len(labels) < 3: |
|
|||
579 | labels.append(b'base') |
|
|||
580 | return _merge( |
|
|||
581 | repo, mynode, orig, fcd, fco, fca, toolconf, files, labels, b'mergediff' |
|
|||
582 | ) |
|
|||
583 |
|
||||
584 |
|
||||
585 | def _imergeauto( |
|
|||
586 | repo, |
|
|||
587 | mynode, |
|
|||
588 | orig, |
|
|||
589 | fcd, |
|
|||
590 | fco, |
|
|||
591 | fca, |
|
|||
592 | toolconf, |
|
|||
593 | files, |
|
|||
594 | labels=None, |
|
|||
595 | localorother=None, |
|
|||
596 | ): |
|
|||
597 | """ |
|
|||
598 | Generic driver for _imergelocal and _imergeother |
|
|||
599 | """ |
|
|||
600 | assert localorother is not None |
|
|||
601 | r = simplemerge.simplemerge( |
|
|||
602 | repo.ui, fcd, fca, fco, label=labels, localorother=localorother |
|
|||
603 | ) |
|
|||
604 | return True, r |
|
|||
605 |
|
591 | |||
606 |
|
592 | |||
607 | @internaltool(b'merge-local', mergeonly, precheck=_mergecheck) |
|
593 | @internaltool(b'merge-local', mergeonly, precheck=_mergecheck) | |
608 | def _imergelocal(*args, **kwargs): |
|
594 | def _imergelocal(repo, mynode, local, other, base, toolconf, backup): | |
609 | """ |
|
595 | """ | |
610 | Like :merge, but resolve all conflicts non-interactively in favor |
|
596 | Like :merge, but resolve all conflicts non-interactively in favor | |
611 | of the local `p1()` changes.""" |
|
597 | of the local `p1()` changes.""" | |
612 | success, status = _imergeauto(localorother=b'local', *args, **kwargs) |
|
598 | return _merge(repo, local, other, base, b'local') | |
613 | return success, status, False |
|
|||
614 |
|
599 | |||
615 |
|
600 | |||
616 | @internaltool(b'merge-other', mergeonly, precheck=_mergecheck) |
|
601 | @internaltool(b'merge-other', mergeonly, precheck=_mergecheck) | |
617 | def _imergeother(*args, **kwargs): |
|
602 | def _imergeother(repo, mynode, local, other, base, toolconf, backup): | |
618 | """ |
|
603 | """ | |
619 | Like :merge, but resolve all conflicts non-interactively in favor |
|
604 | Like :merge, but resolve all conflicts non-interactively in favor | |
620 | of the other `p2()` changes.""" |
|
605 | of the other `p2()` changes.""" | |
621 | success, status = _imergeauto(localorother=b'other', *args, **kwargs) |
|
606 | return _merge(repo, local, other, base, b'other') | |
622 | return success, status, False |
|
|||
623 |
|
607 | |||
624 |
|
608 | |||
625 | @internaltool( |
|
609 | @internaltool( | |
@@ -631,16 +615,16 b' def _imergeother(*args, **kwargs):' | |||||
631 | b"tool of your choice)\n" |
|
615 | b"tool of your choice)\n" | |
632 | ), |
|
616 | ), | |
633 | ) |
|
617 | ) | |
634 |
def _itagmerge(repo, mynode, |
|
618 | def _itagmerge(repo, mynode, local, other, base, toolconf, backup): | |
635 | """ |
|
619 | """ | |
636 | Uses the internal tag merge algorithm (experimental). |
|
620 | Uses the internal tag merge algorithm (experimental). | |
637 | """ |
|
621 | """ | |
638 |
success, status = tagmerge.merge(repo, |
|
622 | success, status = tagmerge.merge(repo, local.fctx, other.fctx, base.fctx) | |
639 | return success, status, False |
|
623 | return success, status, False | |
640 |
|
624 | |||
641 |
|
625 | |||
642 | @internaltool(b'dump', fullmerge, binary=True, symlink=True) |
|
626 | @internaltool(b'dump', fullmerge, binary=True, symlink=True) | |
643 |
def _idump(repo, mynode, |
|
627 | def _idump(repo, mynode, local, other, base, toolconf, backup): | |
644 | """ |
|
628 | """ | |
645 | Creates three versions of the files to merge, containing the |
|
629 | Creates three versions of the files to merge, containing the | |
646 | contents of local, other and base. These files can then be used to |
|
630 | contents of local, other and base. These files can then be used to | |
@@ -652,33 +636,31 b' def _idump(repo, mynode, orig, fcd, fco,' | |||||
652 | This implies premerge. Therefore, files aren't dumped, if premerge |
|
636 | This implies premerge. Therefore, files aren't dumped, if premerge | |
653 | runs successfully. Use :forcedump to forcibly write files out. |
|
637 | runs successfully. Use :forcedump to forcibly write files out. | |
654 | """ |
|
638 | """ | |
655 |
a = _workingpath(repo, |
|
639 | a = _workingpath(repo, local.fctx) | |
656 |
fd = |
|
640 | fd = local.fctx.path() | |
657 |
|
641 | |||
658 | from . import context |
|
642 | from . import context | |
659 |
|
643 | |||
660 |
if isinstance( |
|
644 | if isinstance(local.fctx, context.overlayworkingfilectx): | |
661 | raise error.InMemoryMergeConflictsError( |
|
645 | raise error.InMemoryMergeConflictsError( | |
662 | b'in-memory merge does not support the :dump tool.' |
|
646 | b'in-memory merge does not support the :dump tool.' | |
663 | ) |
|
647 | ) | |
664 |
|
648 | |||
665 |
util.writefile(a + b".local", |
|
649 | util.writefile(a + b".local", local.fctx.decodeddata()) | |
666 |
repo.wwrite(fd + b".other", |
|
650 | repo.wwrite(fd + b".other", other.fctx.data(), other.fctx.flags()) | |
667 |
repo.wwrite(fd + b".base", |
|
651 | repo.wwrite(fd + b".base", base.fctx.data(), base.fctx.flags()) | |
668 | return False, 1, False |
|
652 | return False, 1, False | |
669 |
|
653 | |||
670 |
|
654 | |||
671 | @internaltool(b'forcedump', mergeonly, binary=True, symlink=True) |
|
655 | @internaltool(b'forcedump', mergeonly, binary=True, symlink=True) | |
672 |
def _forcedump(repo, mynode, |
|
656 | def _forcedump(repo, mynode, local, other, base, toolconf, backup): | |
673 | """ |
|
657 | """ | |
674 | Creates three versions of the files as same as :dump, but omits premerge. |
|
658 | Creates three versions of the files as same as :dump, but omits premerge. | |
675 | """ |
|
659 | """ | |
676 | return _idump( |
|
660 | return _idump(repo, mynode, local, other, base, toolconf, backup) | |
677 | repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=labels |
|
|||
678 | ) |
|
|||
679 |
|
661 | |||
680 |
|
662 | |||
681 |
def _xmergeimm(repo, mynode, |
|
663 | def _xmergeimm(repo, mynode, local, other, base, toolconf, backup): | |
682 | # In-memory merge simply raises an exception on all external merge tools, |
|
664 | # In-memory merge simply raises an exception on all external merge tools, | |
683 | # for now. |
|
665 | # for now. | |
684 | # |
|
666 | # | |
@@ -746,7 +728,10 b' def _describemerge(ui, repo, mynode, fcl' | |||||
746 | ui.status(t.renderdefault(props)) |
|
728 | ui.status(t.renderdefault(props)) | |
747 |
|
729 | |||
748 |
|
730 | |||
749 |
def _xmerge(repo, mynode, |
|
731 | def _xmerge(repo, mynode, local, other, base, toolconf, backup): | |
|
732 | fcd = local.fctx | |||
|
733 | fco = other.fctx | |||
|
734 | fca = base.fctx | |||
750 | tool, toolpath, binary, symlink, scriptfn = toolconf |
|
735 | tool, toolpath, binary, symlink, scriptfn = toolconf | |
751 | uipathfn = scmutil.getuipathfn(repo) |
|
736 | uipathfn = scmutil.getuipathfn(repo) | |
752 | if fcd.isabsent() or fco.isabsent(): |
|
737 | if fcd.isabsent() or fco.isabsent(): | |
@@ -755,20 +740,35 b' def _xmerge(repo, mynode, orig, fcd, fco' | |||||
755 | % (tool, uipathfn(fcd.path())) |
|
740 | % (tool, uipathfn(fcd.path())) | |
756 | ) |
|
741 | ) | |
757 | return False, 1, None |
|
742 | return False, 1, None | |
758 | unused, unused, unused, back = files |
|
|||
759 | localpath = _workingpath(repo, fcd) |
|
743 | localpath = _workingpath(repo, fcd) | |
760 | args = _toolstr(repo.ui, tool, b"args") |
|
744 | args = _toolstr(repo.ui, tool, b"args") | |
761 |
|
745 | |||
762 | with _maketempfiles( |
|
746 | files = [ | |
763 | repo, fco, fca, repo.wvfs.join(back.path()), b"$output" in args |
|
747 | (b"base", fca.path(), fca.decodeddata()), | |
764 | ) as temppaths: |
|
748 | (b"other", fco.path(), fco.decodeddata()), | |
765 | basepath, otherpath, localoutputpath = temppaths |
|
749 | ] | |
766 |
|
|
750 | outpath = b"" | |
767 | mylabel, otherlabel = labels[:2] |
|
751 | if b"$output" in args: | |
768 | if len(labels) >= 3: |
|
752 | # read input from backup, write to original | |
769 | baselabel = labels[2] |
|
753 | outpath = localpath | |
770 | else: |
|
754 | localoutputpath = backup.path() | |
771 | baselabel = b'base' |
|
755 | # Remove the .orig to make syntax-highlighting more likely. | |
|
756 | if localoutputpath.endswith(b'.orig'): | |||
|
757 | localoutputpath, ext = os.path.splitext(localoutputpath) | |||
|
758 | localdata = util.readfile(localpath) | |||
|
759 | files.append((b"local", localoutputpath, localdata)) | |||
|
760 | ||||
|
761 | with _maketempfiles(files) as temppaths: | |||
|
762 | basepath, otherpath = temppaths[:2] | |||
|
763 | if len(temppaths) == 3: | |||
|
764 | localpath = temppaths[2] | |||
|
765 | ||||
|
766 | def format_label(input): | |||
|
767 | if input.label_detail: | |||
|
768 | return b'%s: %s' % (input.label, input.label_detail) | |||
|
769 | else: | |||
|
770 | return input.label | |||
|
771 | ||||
772 | env = { |
|
772 | env = { | |
773 | b'HG_FILE': fcd.path(), |
|
773 | b'HG_FILE': fcd.path(), | |
774 | b'HG_MY_NODE': short(mynode), |
|
774 | b'HG_MY_NODE': short(mynode), | |
@@ -777,24 +777,20 b' def _xmerge(repo, mynode, orig, fcd, fco' | |||||
777 | b'HG_MY_ISLINK': b'l' in fcd.flags(), |
|
777 | b'HG_MY_ISLINK': b'l' in fcd.flags(), | |
778 | b'HG_OTHER_ISLINK': b'l' in fco.flags(), |
|
778 | b'HG_OTHER_ISLINK': b'l' in fco.flags(), | |
779 | b'HG_BASE_ISLINK': b'l' in fca.flags(), |
|
779 | b'HG_BASE_ISLINK': b'l' in fca.flags(), | |
780 |
b'HG_MY_LABEL': |
|
780 | b'HG_MY_LABEL': format_label(local), | |
781 |
b'HG_OTHER_LABEL': o |
|
781 | b'HG_OTHER_LABEL': format_label(other), | |
782 |
b'HG_BASE_LABEL': |
|
782 | b'HG_BASE_LABEL': format_label(base), | |
783 | } |
|
783 | } | |
784 | ui = repo.ui |
|
784 | ui = repo.ui | |
785 |
|
785 | |||
786 | if b"$output" in args: |
|
|||
787 | # read input from backup, write to original |
|
|||
788 | outpath = localpath |
|
|||
789 | localpath = localoutputpath |
|
|||
790 | replace = { |
|
786 | replace = { | |
791 | b'local': localpath, |
|
787 | b'local': localpath, | |
792 | b'base': basepath, |
|
788 | b'base': basepath, | |
793 | b'other': otherpath, |
|
789 | b'other': otherpath, | |
794 | b'output': outpath, |
|
790 | b'output': outpath, | |
795 |
b'labellocal': |
|
791 | b'labellocal': format_label(local), | |
796 |
b'labelother': o |
|
792 | b'labelother': format_label(other), | |
797 |
b'labelbase': |
|
793 | b'labelbase': format_label(base), | |
798 | } |
|
794 | } | |
799 | args = util.interpolate( |
|
795 | args = util.interpolate( | |
800 | br'\$', |
|
796 | br'\$', | |
@@ -846,40 +842,19 b' def _xmerge(repo, mynode, orig, fcd, fco' | |||||
846 | return True, r, False |
|
842 | return True, r, False | |
847 |
|
843 | |||
848 |
|
844 | |||
849 | def _formatconflictmarker(ctx, template, label, pad): |
|
845 | def _populate_label_detail(input, template): | |
850 |
"""Applies the given template to the ctx |
|
846 | """Applies the given template to the ctx and stores it in the input.""" | |
851 |
|
847 | ctx = input.fctx.changectx() | ||
852 | Pad is the minimum width of the label prefix, so that multiple markers |
|
|||
853 | can have aligned templated parts. |
|
|||
854 | """ |
|
|||
855 | if ctx.node() is None: |
|
848 | if ctx.node() is None: | |
856 | ctx = ctx.p1() |
|
849 | ctx = ctx.p1() | |
857 |
|
850 | |||
858 | props = {b'ctx': ctx} |
|
851 | props = {b'ctx': ctx} | |
859 | templateresult = template.renderdefault(props) |
|
852 | templateresult = template.renderdefault(props) | |
860 |
|
853 | input.label_detail = templateresult.splitlines()[0] # split for safety | ||
861 | label = (b'%s:' % label).ljust(pad + 1) |
|
|||
862 | mark = b'%s %s' % (label, templateresult) |
|
|||
863 |
|
||||
864 | if mark: |
|
|||
865 | mark = mark.splitlines()[0] # split for safety |
|
|||
866 |
|
||||
867 | # 8 for the prefix of conflict marker lines (e.g. '<<<<<<< ') |
|
|||
868 | return stringutil.ellipsis(mark, 80 - 8) |
|
|||
869 |
|
854 | |||
870 |
|
855 | |||
871 | _defaultconflictlabels = [b'local', b'other'] |
|
856 | def _populate_label_details(repo, inputs, tool=None): | |
872 |
|
857 | """Populates the label details using the conflict marker template.""" | ||
873 |
|
||||
874 | def _formatlabels(repo, fcd, fco, fca, labels, tool=None): |
|
|||
875 | """Formats the given labels using the conflict marker template. |
|
|||
876 |
|
||||
877 | Returns a list of formatted labels. |
|
|||
878 | """ |
|
|||
879 | cd = fcd.changectx() |
|
|||
880 | co = fco.changectx() |
|
|||
881 | ca = fca.changectx() |
|
|||
882 |
|
||||
883 | ui = repo.ui |
|
858 | ui = repo.ui | |
884 | template = ui.config(b'command-templates', b'mergemarker') |
|
859 | template = ui.config(b'command-templates', b'mergemarker') | |
885 | if tool is not None: |
|
860 | if tool is not None: | |
@@ -890,15 +865,8 b' def _formatlabels(repo, fcd, fco, fca, l' | |||||
890 | ui, template, defaults=templatekw.keywords, resources=tres |
|
865 | ui, template, defaults=templatekw.keywords, resources=tres | |
891 | ) |
|
866 | ) | |
892 |
|
867 | |||
893 | pad = max(len(l) for l in labels) |
|
868 | for input in inputs: | |
894 |
|
869 | _populate_label_detail(input, tmpl) | ||
895 | newlabels = [ |
|
|||
896 | _formatconflictmarker(cd, tmpl, labels[0], pad), |
|
|||
897 | _formatconflictmarker(co, tmpl, labels[1], pad), |
|
|||
898 | ] |
|
|||
899 | if len(labels) > 2: |
|
|||
900 | newlabels.append(_formatconflictmarker(ca, tmpl, labels[2], pad)) |
|
|||
901 | return newlabels |
|
|||
902 |
|
870 | |||
903 |
|
871 | |||
904 | def partextras(labels): |
|
872 | def partextras(labels): | |
@@ -918,13 +886,7 b' def partextras(labels):' | |||||
918 | } |
|
886 | } | |
919 |
|
887 | |||
920 |
|
888 | |||
921 | def _restorebackup(fcd, back): |
|
889 | def _makebackup(repo, ui, fcd): | |
922 | # TODO: Add a workingfilectx.write(otherfilectx) path so we can use |
|
|||
923 | # util.copy here instead. |
|
|||
924 | fcd.write(back.data(), fcd.flags()) |
|
|||
925 |
|
||||
926 |
|
||||
927 | def _makebackup(repo, ui, wctx, fcd, premerge): |
|
|||
928 | """Makes and returns a filectx-like object for ``fcd``'s backup file. |
|
890 | """Makes and returns a filectx-like object for ``fcd``'s backup file. | |
929 |
|
891 | |||
930 | In addition to preserving the user's pre-existing modifications to `fcd` |
|
892 | In addition to preserving the user's pre-existing modifications to `fcd` | |
@@ -932,8 +894,8 b' def _makebackup(repo, ui, wctx, fcd, pre' | |||||
932 | merge changed anything, and determine what line endings the new file should |
|
894 | merge changed anything, and determine what line endings the new file should | |
933 | have. |
|
895 | have. | |
934 |
|
896 | |||
935 |
Backups only need to be written once |
|
897 | Backups only need to be written once since their content doesn't change | |
936 | content doesn't change afterwards. |
|
898 | afterwards. | |
937 | """ |
|
899 | """ | |
938 | if fcd.isabsent(): |
|
900 | if fcd.isabsent(): | |
939 | return None |
|
901 | return None | |
@@ -941,96 +903,47 b' def _makebackup(repo, ui, wctx, fcd, pre' | |||||
941 | # merge -> filemerge). (I suspect the fileset import is the weakest link) |
|
903 | # merge -> filemerge). (I suspect the fileset import is the weakest link) | |
942 | from . import context |
|
904 | from . import context | |
943 |
|
905 | |||
944 | back = scmutil.backuppath(ui, repo, fcd.path()) |
|
906 | if isinstance(fcd, context.overlayworkingfilectx): | |
945 | inworkingdir = back.startswith(repo.wvfs.base) and not back.startswith( |
|
907 | # If we're merging in-memory, we're free to put the backup anywhere. | |
946 | repo.vfs.base |
|
908 | fd, backup = pycompat.mkstemp(b'hg-merge-backup') | |
947 | ) |
|
909 | with os.fdopen(fd, 'wb') as f: | |
948 | if isinstance(fcd, context.overlayworkingfilectx) and inworkingdir: |
|
910 | f.write(fcd.data()) | |
949 | # If the backup file is to be in the working directory, and we're |
|
|||
950 | # merging in-memory, we must redirect the backup to the memory context |
|
|||
951 | # so we don't disturb the working directory. |
|
|||
952 | relpath = back[len(repo.wvfs.base) + 1 :] |
|
|||
953 | if premerge: |
|
|||
954 | wctx[relpath].write(fcd.data(), fcd.flags()) |
|
|||
955 | return wctx[relpath] |
|
|||
956 | else: |
|
911 | else: | |
957 | if premerge: |
|
912 | backup = scmutil.backuppath(ui, repo, fcd.path()) | |
958 | # Otherwise, write to wherever path the user specified the backups |
|
913 | a = _workingpath(repo, fcd) | |
959 | # should go. We still need to switch based on whether the source is |
|
914 | util.copyfile(a, backup) | |
960 | # in-memory so we can use the fast path of ``util.copy`` if both are |
|
915 | ||
961 | # on disk. |
|
916 | return context.arbitraryfilectx(backup, repo=repo) | |
962 | if isinstance(fcd, context.overlayworkingfilectx): |
|
|||
963 | util.writefile(back, fcd.data()) |
|
|||
964 | else: |
|
|||
965 | a = _workingpath(repo, fcd) |
|
|||
966 | util.copyfile(a, back) |
|
|||
967 | # A arbitraryfilectx is returned, so we can run the same functions on |
|
|||
968 | # the backup context regardless of where it lives. |
|
|||
969 | return context.arbitraryfilectx(back, repo=repo) |
|
|||
970 |
|
917 | |||
971 |
|
918 | |||
972 | @contextlib.contextmanager |
|
919 | @contextlib.contextmanager | |
973 | def _maketempfiles(repo, fco, fca, localpath, uselocalpath): |
|
920 | def _maketempfiles(files): | |
974 | """Writes out `fco` and `fca` as temporary files, and (if uselocalpath) |
|
921 | """Creates a temporary file for each (prefix, path, data) tuple in `files`, | |
975 | copies `localpath` to another temporary file, so an external merge tool may |
|
922 | so an external merge tool may use them. | |
976 | use them. |
|
|||
977 | """ |
|
923 | """ | |
978 | tmproot = None |
|
924 | tmproot = pycompat.mkdtemp(prefix=b'hgmerge-') | |
979 | tmprootprefix = repo.ui.config(b'experimental', b'mergetempdirprefix') |
|
|||
980 | if tmprootprefix: |
|
|||
981 | tmproot = pycompat.mkdtemp(prefix=tmprootprefix) |
|
|||
982 |
|
925 | |||
983 | def maketempfrompath(prefix, path): |
|
926 | def maketempfrompath(prefix, path, data): | |
984 | fullbase, ext = os.path.splitext(path) |
|
927 | fullbase, ext = os.path.splitext(path) | |
985 | pre = b"%s~%s" % (os.path.basename(fullbase), prefix) |
|
928 | pre = b"%s~%s" % (os.path.basename(fullbase), prefix) | |
986 | if tmproot: |
|
929 | name = os.path.join(tmproot, pre) | |
987 | name = os.path.join(tmproot, pre) |
|
930 | if ext: | |
988 |
|
|
931 | name += ext | |
989 | name += ext |
|
932 | util.writefile(name, data) | |
990 | f = open(name, "wb") |
|
|||
991 | else: |
|
|||
992 | fd, name = pycompat.mkstemp(prefix=pre + b'.', suffix=ext) |
|
|||
993 | f = os.fdopen(fd, "wb") |
|
|||
994 | return f, name |
|
|||
995 |
|
||||
996 | def tempfromcontext(prefix, ctx): |
|
|||
997 | f, name = maketempfrompath(prefix, ctx.path()) |
|
|||
998 | data = repo.wwritedata(ctx.path(), ctx.data()) |
|
|||
999 | f.write(data) |
|
|||
1000 | f.close() |
|
|||
1001 | return name |
|
933 | return name | |
1002 |
|
934 | |||
1003 | b = tempfromcontext(b"base", fca) |
|
935 | temp_files = [] | |
1004 | c = tempfromcontext(b"other", fco) |
|
936 | for prefix, path, data in files: | |
1005 | d = localpath |
|
937 | temp_files.append(maketempfrompath(prefix, path, data)) | |
1006 | if uselocalpath: |
|
|||
1007 | # We start off with this being the backup filename, so remove the .orig |
|
|||
1008 | # to make syntax-highlighting more likely. |
|
|||
1009 | if d.endswith(b'.orig'): |
|
|||
1010 | d, _ = os.path.splitext(d) |
|
|||
1011 | f, d = maketempfrompath(b"local", d) |
|
|||
1012 | with open(localpath, b'rb') as src: |
|
|||
1013 | f.write(src.read()) |
|
|||
1014 | f.close() |
|
|||
1015 |
|
||||
1016 | try: |
|
938 | try: | |
1017 |
yield |
|
939 | yield temp_files | |
1018 | finally: |
|
940 | finally: | |
1019 |
|
|
941 | shutil.rmtree(tmproot) | |
1020 | shutil.rmtree(tmproot) |
|
|||
1021 | else: |
|
|||
1022 | util.unlink(b) |
|
|||
1023 | util.unlink(c) |
|
|||
1024 | # if not uselocalpath, d is the 'orig'/backup file which we |
|
|||
1025 | # shouldn't delete. |
|
|||
1026 | if d and uselocalpath: |
|
|||
1027 | util.unlink(d) |
|
|||
1028 |
|
942 | |||
1029 |
|
943 | |||
1030 |
def |
|
944 | def filemerge(repo, wctx, mynode, orig, fcd, fco, fca, labels=None): | |
1031 | """perform a 3-way merge in the working directory |
|
945 | """perform a 3-way merge in the working directory | |
1032 |
|
946 | |||
1033 | premerge = whether this is a premerge |
|
|||
1034 | mynode = parent node before merge |
|
947 | mynode = parent node before merge | |
1035 | orig = original local filename before merge |
|
948 | orig = original local filename before merge | |
1036 | fco = other file context |
|
949 | fco = other file context | |
@@ -1039,10 +952,6 b' def _filemerge(premerge, repo, wctx, myn' | |||||
1039 |
|
952 | |||
1040 | Returns whether the merge is complete, the return value of the merge, and |
|
953 | Returns whether the merge is complete, the return value of the merge, and | |
1041 | a boolean indicating whether the file was deleted from disk.""" |
|
954 | a boolean indicating whether the file was deleted from disk.""" | |
1042 |
|
||||
1043 | if not fco.cmp(fcd): # files identical? |
|
|||
1044 | return True, None, False |
|
|||
1045 |
|
||||
1046 | ui = repo.ui |
|
955 | ui = repo.ui | |
1047 | fd = fcd.path() |
|
956 | fd = fcd.path() | |
1048 | uipathfn = scmutil.getuipathfn(repo) |
|
957 | uipathfn = scmutil.getuipathfn(repo) | |
@@ -1098,32 +1007,43 b' def _filemerge(premerge, repo, wctx, myn' | |||||
1098 |
|
1007 | |||
1099 | toolconf = tool, toolpath, binary, symlink, scriptfn |
|
1008 | toolconf = tool, toolpath, binary, symlink, scriptfn | |
1100 |
|
1009 | |||
|
1010 | if not labels: | |||
|
1011 | labels = [b'local', b'other'] | |||
|
1012 | if len(labels) < 3: | |||
|
1013 | labels.append(b'base') | |||
|
1014 | local = simplemerge.MergeInput(fcd, labels[0]) | |||
|
1015 | other = simplemerge.MergeInput(fco, labels[1]) | |||
|
1016 | base = simplemerge.MergeInput(fca, labels[2]) | |||
1101 | if mergetype == nomerge: |
|
1017 | if mergetype == nomerge: | |
1102 | r, deleted = func(repo, mynode, orig, fcd, fco, fca, toolconf, labels) |
|
1018 | return func( | |
1103 | return True, r, deleted |
|
1019 | repo, | |
|
1020 | mynode, | |||
|
1021 | local, | |||
|
1022 | other, | |||
|
1023 | base, | |||
|
1024 | toolconf, | |||
|
1025 | ) | |||
1104 |
|
1026 | |||
1105 | if premerge: |
|
1027 | if orig != fco.path(): | |
1106 | if orig != fco.path(): |
|
1028 | ui.status( | |
1107 | ui.status( |
|
1029 | _(b"merging %s and %s to %s\n") | |
1108 | _(b"merging %s and %s to %s\n") |
|
1030 | % (uipathfn(orig), uipathfn(fco.path()), fduipath) | |
1109 | % (uipathfn(orig), uipathfn(fco.path()), fduipath) |
|
1031 | ) | |
1110 | ) |
|
1032 | else: | |
1111 | else: |
|
1033 | ui.status(_(b"merging %s\n") % fduipath) | |
1112 | ui.status(_(b"merging %s\n") % fduipath) |
|
|||
1113 |
|
1034 | |||
1114 | ui.debug(b"my %s other %s ancestor %s\n" % (fcd, fco, fca)) |
|
1035 | ui.debug(b"my %s other %s ancestor %s\n" % (fcd, fco, fca)) | |
1115 |
|
1036 | |||
1116 |
if precheck and not precheck(repo, mynode, |
|
1037 | if precheck and not precheck(repo, mynode, fcd, fco, fca, toolconf): | |
1117 | if onfailure: |
|
1038 | if onfailure: | |
1118 | if wctx.isinmemory(): |
|
1039 | if wctx.isinmemory(): | |
1119 | raise error.InMemoryMergeConflictsError( |
|
1040 | raise error.InMemoryMergeConflictsError( | |
1120 | b'in-memory merge does not support merge conflicts' |
|
1041 | b'in-memory merge does not support merge conflicts' | |
1121 | ) |
|
1042 | ) | |
1122 | ui.warn(onfailure % fduipath) |
|
1043 | ui.warn(onfailure % fduipath) | |
1123 |
return |
|
1044 | return 1, False | |
1124 |
|
1045 | |||
1125 |
back = _makebackup(repo, ui, |
|
1046 | backup = _makebackup(repo, ui, fcd) | |
1126 | files = (None, None, None, back) |
|
|||
1127 | r = 1 |
|
1047 | r = 1 | |
1128 | try: |
|
1048 | try: | |
1129 | internalmarkerstyle = ui.config(b'ui', b'mergemarkers') |
|
1049 | internalmarkerstyle = ui.config(b'ui', b'mergemarkers') | |
@@ -1132,51 +1052,53 b' def _filemerge(premerge, repo, wctx, myn' | |||||
1132 | else: |
|
1052 | else: | |
1133 | markerstyle = internalmarkerstyle |
|
1053 | markerstyle = internalmarkerstyle | |
1134 |
|
1054 | |||
1135 | if not labels: |
|
1055 | if mergetype == fullmerge: | |
1136 | labels = _defaultconflictlabels |
|
|||
1137 | formattedlabels = labels |
|
|||
1138 | if markerstyle != b'basic': |
|
|||
1139 | formattedlabels = _formatlabels( |
|
|||
1140 | repo, fcd, fco, fca, labels, tool=tool |
|
|||
1141 | ) |
|
|||
1142 |
|
||||
1143 | if premerge and mergetype == fullmerge: |
|
|||
1144 | # conflict markers generated by premerge will use 'detailed' |
|
1056 | # conflict markers generated by premerge will use 'detailed' | |
1145 | # settings if either ui.mergemarkers or the tool's mergemarkers |
|
1057 | # settings if either ui.mergemarkers or the tool's mergemarkers | |
1146 | # setting is 'detailed'. This way tools can have basic labels in |
|
1058 | # setting is 'detailed'. This way tools can have basic labels in | |
1147 | # space-constrained areas of the UI, but still get full information |
|
1059 | # space-constrained areas of the UI, but still get full information | |
1148 | # in conflict markers if premerge is 'keep' or 'keep-merge3'. |
|
1060 | # in conflict markers if premerge is 'keep' or 'keep-merge3'. | |
1149 | premergelabels = labels |
|
|||
1150 | labeltool = None |
|
1061 | labeltool = None | |
1151 | if markerstyle != b'basic': |
|
1062 | if markerstyle != b'basic': | |
1152 | # respect 'tool's mergemarkertemplate (which defaults to |
|
1063 | # respect 'tool's mergemarkertemplate (which defaults to | |
1153 | # command-templates.mergemarker) |
|
1064 | # command-templates.mergemarker) | |
1154 | labeltool = tool |
|
1065 | labeltool = tool | |
1155 | if internalmarkerstyle != b'basic' or markerstyle != b'basic': |
|
1066 | if internalmarkerstyle != b'basic' or markerstyle != b'basic': | |
1156 |
|
|
1067 | _populate_label_details( | |
1157 |
repo, |
|
1068 | repo, [local, other, base], tool=labeltool | |
1158 | ) |
|
1069 | ) | |
1159 |
|
1070 | |||
1160 | r = _premerge( |
|
1071 | r = _premerge( | |
1161 | repo, fcd, fco, fca, toolconf, files, labels=premergelabels |
|
1072 | repo, | |
|
1073 | local, | |||
|
1074 | other, | |||
|
1075 | base, | |||
|
1076 | toolconf, | |||
1162 | ) |
|
1077 | ) | |
1163 |
# |
|
1078 | # we're done if premerge was successful (r is 0) | |
1164 |
|
|
1079 | if not r: | |
|
1080 | return r, False | |||
|
1081 | ||||
|
1082 | # Reset to basic labels | |||
|
1083 | local.label_detail = None | |||
|
1084 | other.label_detail = None | |||
|
1085 | base.label_detail = None | |||
|
1086 | ||||
|
1087 | if markerstyle != b'basic': | |||
|
1088 | _populate_label_details(repo, [local, other, base], tool=tool) | |||
1165 |
|
1089 | |||
1166 | needcheck, r, deleted = func( |
|
1090 | needcheck, r, deleted = func( | |
1167 | repo, |
|
1091 | repo, | |
1168 | mynode, |
|
1092 | mynode, | |
1169 |
|
|
1093 | local, | |
1170 |
|
|
1094 | other, | |
1171 |
|
|
1095 | base, | |
1172 | fca, |
|
|||
1173 | toolconf, |
|
1096 | toolconf, | |
1174 |
|
|
1097 | backup, | |
1175 | labels=formattedlabels, |
|
|||
1176 | ) |
|
1098 | ) | |
1177 |
|
1099 | |||
1178 | if needcheck: |
|
1100 | if needcheck: | |
1179 |
r = _check(repo, r, ui, tool, fcd, |
|
1101 | r = _check(repo, r, ui, tool, fcd, backup) | |
1180 |
|
1102 | |||
1181 | if r: |
|
1103 | if r: | |
1182 | if onfailure: |
|
1104 | if onfailure: | |
@@ -1189,10 +1111,10 b' def _filemerge(premerge, repo, wctx, myn' | |||||
1189 | ui.warn(onfailure % fduipath) |
|
1111 | ui.warn(onfailure % fduipath) | |
1190 | _onfilemergefailure(ui) |
|
1112 | _onfilemergefailure(ui) | |
1191 |
|
1113 | |||
1192 |
return |
|
1114 | return r, deleted | |
1193 | finally: |
|
1115 | finally: | |
1194 | if not r and back is not None: |
|
1116 | if not r and backup is not None: | |
1195 | back.remove() |
|
1117 | backup.remove() | |
1196 |
|
1118 | |||
1197 |
|
1119 | |||
1198 | def _haltmerge(): |
|
1120 | def _haltmerge(): | |
@@ -1225,10 +1147,9 b' def hasconflictmarkers(data):' | |||||
1225 | ) |
|
1147 | ) | |
1226 |
|
1148 | |||
1227 |
|
1149 | |||
1228 |
def _check(repo, r, ui, tool, fcd, |
|
1150 | def _check(repo, r, ui, tool, fcd, backup): | |
1229 | fd = fcd.path() |
|
1151 | fd = fcd.path() | |
1230 | uipathfn = scmutil.getuipathfn(repo) |
|
1152 | uipathfn = scmutil.getuipathfn(repo) | |
1231 | unused, unused, unused, back = files |
|
|||
1232 |
|
1153 | |||
1233 | if not r and ( |
|
1154 | if not r and ( | |
1234 | _toolbool(ui, tool, b"checkconflicts") |
|
1155 | _toolbool(ui, tool, b"checkconflicts") | |
@@ -1255,7 +1176,7 b' def _check(repo, r, ui, tool, fcd, files' | |||||
1255 | or b'changed' in _toollist(ui, tool, b"check") |
|
1176 | or b'changed' in _toollist(ui, tool, b"check") | |
1256 | ) |
|
1177 | ) | |
1257 | ): |
|
1178 | ): | |
1258 | if back is not None and not fcd.cmp(back): |
|
1179 | if backup is not None and not fcd.cmp(backup): | |
1259 | if ui.promptchoice( |
|
1180 | if ui.promptchoice( | |
1260 | _( |
|
1181 | _( | |
1261 | b" output file %s appears unchanged\n" |
|
1182 | b" output file %s appears unchanged\n" | |
@@ -1267,8 +1188,8 b' def _check(repo, r, ui, tool, fcd, files' | |||||
1267 | ): |
|
1188 | ): | |
1268 | r = 1 |
|
1189 | r = 1 | |
1269 |
|
1190 | |||
1270 | if back is not None and _toolbool(ui, tool, b"fixeol"): |
|
1191 | if backup is not None and _toolbool(ui, tool, b"fixeol"): | |
1271 | _matcheol(_workingpath(repo, fcd), back) |
|
1192 | _matcheol(_workingpath(repo, fcd), backup) | |
1272 |
|
1193 | |||
1273 | return r |
|
1194 | return r | |
1274 |
|
1195 | |||
@@ -1277,18 +1198,6 b' def _workingpath(repo, ctx):' | |||||
1277 | return repo.wjoin(ctx.path()) |
|
1198 | return repo.wjoin(ctx.path()) | |
1278 |
|
1199 | |||
1279 |
|
1200 | |||
1280 | def premerge(repo, wctx, mynode, orig, fcd, fco, fca, labels=None): |
|
|||
1281 | return _filemerge( |
|
|||
1282 | True, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels |
|
|||
1283 | ) |
|
|||
1284 |
|
||||
1285 |
|
||||
1286 | def filemerge(repo, wctx, mynode, orig, fcd, fco, fca, labels=None): |
|
|||
1287 | return _filemerge( |
|
|||
1288 | False, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels |
|
|||
1289 | ) |
|
|||
1290 |
|
||||
1291 |
|
||||
1292 | def loadinternalmerge(ui, extname, registrarobj): |
|
1201 | def loadinternalmerge(ui, extname, registrarobj): | |
1293 | """Load internal merge tool from specified registrarobj""" |
|
1202 | """Load internal merge tool from specified registrarobj""" | |
1294 | for name, func in pycompat.iteritems(registrarobj._table): |
|
1203 | for name, func in pycompat.iteritems(registrarobj._table): |
@@ -513,13 +513,18 b' effect and style see :hg:`help color`.' | |||||
513 | ``update.check`` |
|
513 | ``update.check`` | |
514 | Determines what level of checking :hg:`update` will perform before moving |
|
514 | Determines what level of checking :hg:`update` will perform before moving | |
515 | to a destination revision. Valid values are ``abort``, ``none``, |
|
515 | to a destination revision. Valid values are ``abort``, ``none``, | |
516 |
``linear``, and ``noconflict``. |
|
516 | ``linear``, and ``noconflict``. | |
517 | directory has uncommitted changes. ``none`` performs no checking, and may |
|
517 | ||
518 | result in a merge with uncommitted changes. ``linear`` allows any update |
|
518 | - ``abort`` always fails if the working directory has uncommitted changes. | |
519 | as long as it follows a straight line in the revision history, and may |
|
519 | ||
520 | trigger a merge with uncommitted changes. ``noconflict`` will allow any |
|
520 | - ``none`` performs no checking, and may result in a merge with uncommitted changes. | |
521 | update which would not trigger a merge with uncommitted changes, if any |
|
521 | ||
522 | are present. |
|
522 | - ``linear`` allows any update as long as it follows a straight line in the | |
|
523 | revision history, and may trigger a merge with uncommitted changes. | |||
|
524 | ||||
|
525 | - ``noconflict`` will allow any update which would not trigger a merge with | |||
|
526 | uncommitted changes, if any are present. | |||
|
527 | ||||
523 | (default: ``linear``) |
|
528 | (default: ``linear``) | |
524 |
|
529 | |||
525 | ``update.requiredest`` |
|
530 | ``update.requiredest`` | |
@@ -850,6 +855,24 b' Example for ``~/.hgrc``::' | |||||
850 | # (this extension will get loaded from the file specified) |
|
855 | # (this extension will get loaded from the file specified) | |
851 | myfeature = ~/.hgext/myfeature.py |
|
856 | myfeature = ~/.hgext/myfeature.py | |
852 |
|
857 | |||
|
858 | If an extension fails to load, a warning will be issued, and Mercurial will | |||
|
859 | proceed. To enforce that an extension must be loaded, one can set the `required` | |||
|
860 | suboption in the config:: | |||
|
861 | ||||
|
862 | [extensions] | |||
|
863 | myfeature = ~/.hgext/myfeature.py | |||
|
864 | myfeature:required = yes | |||
|
865 | ||||
|
866 | To debug extension loading issue, one can add `--traceback` to their mercurial | |||
|
867 | invocation. | |||
|
868 | ||||
|
869 | A default setting can we set using the special `*` extension key:: | |||
|
870 | ||||
|
871 | [extensions] | |||
|
872 | *:required = yes | |||
|
873 | myfeature = ~/.hgext/myfeature.py | |||
|
874 | rebase= | |||
|
875 | ||||
853 |
|
876 | |||
854 | ``format`` |
|
877 | ``format`` | |
855 | ---------- |
|
878 | ---------- | |
@@ -921,6 +944,38 b' https://www.mercurial-scm.org/wiki/Missi' | |||||
921 |
|
944 | |||
922 | For a more comprehensive guide, see :hg:`help internals.dirstate-v2`. |
|
945 | For a more comprehensive guide, see :hg:`help internals.dirstate-v2`. | |
923 |
|
946 | |||
|
947 | ``use-dirstate-tracked-hint`` | |||
|
948 | Enable or disable the writing of "tracked key" file alongside the dirstate. | |||
|
949 | (default to disabled) | |||
|
950 | ||||
|
951 | That "tracked-hint" can help external automations to detect changes to the | |||
|
952 | set of tracked files. (i.e the result of `hg files` or `hg status -macd`) | |||
|
953 | ||||
|
954 | The tracked-hint is written in a new `.hg/dirstate-tracked-hint`. That file | |||
|
955 | contains two lines: | |||
|
956 | - the first line is the file version (currently: 1), | |||
|
957 | - the second line contains the "tracked-hint". | |||
|
958 | That file is written right after the dirstate is written. | |||
|
959 | ||||
|
960 | The tracked-hint changes whenever the set of file tracked in the dirstate | |||
|
961 | changes. The general idea is: | |||
|
962 | - if the hint is identical, the set of tracked file SHOULD be identical, | |||
|
963 | - if the hint is different, the set of tracked file MIGHT be different. | |||
|
964 | ||||
|
965 | The "hint is identical" case uses `SHOULD` as the dirstate and the hint file | |||
|
966 | are two distinct files and therefore that cannot be read or written to in an | |||
|
967 | atomic way. If the key is identical, nothing garantees that the dirstate is | |||
|
968 | not updated right after the hint file. This is considered a negligible | |||
|
969 | limitation for the intended usecase. It is actually possible to prevent this | |||
|
970 | race by taking the repository lock during read operations. | |||
|
971 | ||||
|
972 | They are two "ways" to use this feature: | |||
|
973 | ||||
|
974 | 1) monitoring changes to the `.hg/dirstate-tracked-hint`, if the file | |||
|
975 | changes, the tracked set might have changed. | |||
|
976 | ||||
|
977 | 2) storing the value and comparing it to a later value. | |||
|
978 | ||||
924 | ``use-persistent-nodemap`` |
|
979 | ``use-persistent-nodemap`` | |
925 | Enable or disable the "persistent-nodemap" feature which improves |
|
980 | Enable or disable the "persistent-nodemap" feature which improves | |
926 | performance if the Rust extensions are available. |
|
981 | performance if the Rust extensions are available. | |
@@ -975,7 +1030,7 b' https://www.mercurial-scm.org/wiki/Missi' | |||||
975 |
|
1030 | |||
976 | Introduced in Mercurial 5.7. |
|
1031 | Introduced in Mercurial 5.7. | |
977 |
|
1032 | |||
978 |
|
|
1033 | Enabled by default in Mercurial 6.1. | |
979 |
|
1034 | |||
980 | ``usestore`` |
|
1035 | ``usestore`` | |
981 | Enable or disable the "store" repository format which improves |
|
1036 | Enable or disable the "store" repository format which improves |
@@ -332,95 +332,6 b' part of the response payload and not par' | |||||
332 | after responses. In other words, the length of the response contains the |
|
332 | after responses. In other words, the length of the response contains the | |
333 | trailing ``\n``. |
|
333 | trailing ``\n``. | |
334 |
|
334 | |||
335 | Clients supporting version 2 of the SSH transport send a line beginning |
|
|||
336 | with ``upgrade`` before the ``hello`` and ``between`` commands. The line |
|
|||
337 | (which isn't a well-formed command line because it doesn't consist of a |
|
|||
338 | single command name) serves to both communicate the client's intent to |
|
|||
339 | switch to transport version 2 (transports are version 1 by default) as |
|
|||
340 | well as to advertise the client's transport-level capabilities so the |
|
|||
341 | server may satisfy that request immediately. |
|
|||
342 |
|
||||
343 | The upgrade line has the form: |
|
|||
344 |
|
||||
345 | upgrade <token> <transport capabilities> |
|
|||
346 |
|
||||
347 | That is the literal string ``upgrade`` followed by a space, followed by |
|
|||
348 | a randomly generated string, followed by a space, followed by a string |
|
|||
349 | denoting the client's transport capabilities. |
|
|||
350 |
|
||||
351 | The token can be anything. However, a random UUID is recommended. (Use |
|
|||
352 | of version 4 UUIDs is recommended because version 1 UUIDs can leak the |
|
|||
353 | client's MAC address.) |
|
|||
354 |
|
||||
355 | The transport capabilities string is a URL/percent encoded string |
|
|||
356 | containing key-value pairs defining the client's transport-level |
|
|||
357 | capabilities. The following capabilities are defined: |
|
|||
358 |
|
||||
359 | proto |
|
|||
360 | A comma-delimited list of transport protocol versions the client |
|
|||
361 | supports. e.g. ``ssh-v2``. |
|
|||
362 |
|
||||
363 | If the server does not recognize the ``upgrade`` line, it should issue |
|
|||
364 | an empty response and continue processing the ``hello`` and ``between`` |
|
|||
365 | commands. Here is an example handshake between a version 2 aware client |
|
|||
366 | and a non version 2 aware server: |
|
|||
367 |
|
||||
368 | c: upgrade 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a proto=ssh-v2 |
|
|||
369 | c: hello\n |
|
|||
370 | c: between\n |
|
|||
371 | c: pairs 81\n |
|
|||
372 | c: 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 |
|
|||
373 | s: 0\n |
|
|||
374 | s: 324\n |
|
|||
375 | s: capabilities: lookup changegroupsubset branchmap pushkey known getbundle ...\n |
|
|||
376 | s: 1\n |
|
|||
377 | s: \n |
|
|||
378 |
|
||||
379 | (The initial ``0\n`` line from the server indicates an empty response to |
|
|||
380 | the unknown ``upgrade ..`` command/line.) |
|
|||
381 |
|
||||
382 | If the server recognizes the ``upgrade`` line and is willing to satisfy that |
|
|||
383 | upgrade request, it replies to with a payload of the following form: |
|
|||
384 |
|
||||
385 | upgraded <token> <transport name>\n |
|
|||
386 |
|
||||
387 | This line is the literal string ``upgraded``, a space, the token that was |
|
|||
388 | specified by the client in its ``upgrade ...`` request line, a space, and the |
|
|||
389 | name of the transport protocol that was chosen by the server. The transport |
|
|||
390 | name MUST match one of the names the client specified in the ``proto`` field |
|
|||
391 | of its ``upgrade ...`` request line. |
|
|||
392 |
|
||||
393 | If a server issues an ``upgraded`` response, it MUST also read and ignore |
|
|||
394 | the lines associated with the ``hello`` and ``between`` command requests |
|
|||
395 | that were issued by the server. It is assumed that the negotiated transport |
|
|||
396 | will respond with equivalent requested information following the transport |
|
|||
397 | handshake. |
|
|||
398 |
|
||||
399 | All data following the ``\n`` terminating the ``upgraded`` line is the |
|
|||
400 | domain of the negotiated transport. It is common for the data immediately |
|
|||
401 | following to contain additional metadata about the state of the transport and |
|
|||
402 | the server. However, this isn't strictly speaking part of the transport |
|
|||
403 | handshake and isn't covered by this section. |
|
|||
404 |
|
||||
405 | Here is an example handshake between a version 2 aware client and a version |
|
|||
406 | 2 aware server: |
|
|||
407 |
|
||||
408 | c: upgrade 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a proto=ssh-v2 |
|
|||
409 | c: hello\n |
|
|||
410 | c: between\n |
|
|||
411 | c: pairs 81\n |
|
|||
412 | c: 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 |
|
|||
413 | s: upgraded 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a ssh-v2\n |
|
|||
414 | s: <additional transport specific data> |
|
|||
415 |
|
||||
416 | The client-issued token that is echoed in the response provides a more |
|
|||
417 | resilient mechanism for differentiating *banner* output from Mercurial |
|
|||
418 | output. In version 1, properly formatted banner output could get confused |
|
|||
419 | for Mercurial server output. By submitting a randomly generated token |
|
|||
420 | that is then present in the response, the client can look for that token |
|
|||
421 | in response lines and have reasonable certainty that the line did not |
|
|||
422 | originate from a *banner* message. |
|
|||
423 |
|
||||
424 | SSH Version 1 Transport |
|
335 | SSH Version 1 Transport | |
425 | ----------------------- |
|
336 | ----------------------- | |
426 |
|
337 | |||
@@ -488,31 +399,6 b' If the server announces support for the ' | |||||
488 | should issue a ``protocaps`` command after the initial handshake to annonunce |
|
399 | should issue a ``protocaps`` command after the initial handshake to annonunce | |
489 | its own capabilities. The client capabilities are persistent. |
|
400 | its own capabilities. The client capabilities are persistent. | |
490 |
|
401 | |||
491 | SSH Version 2 Transport |
|
|||
492 | ----------------------- |
|
|||
493 |
|
||||
494 | **Experimental and under development** |
|
|||
495 |
|
||||
496 | Version 2 of the SSH transport behaves identically to version 1 of the SSH |
|
|||
497 | transport with the exception of handshake semantics. See above for how |
|
|||
498 | version 2 of the SSH transport is negotiated. |
|
|||
499 |
|
||||
500 | Immediately following the ``upgraded`` line signaling a switch to version |
|
|||
501 | 2 of the SSH protocol, the server automatically sends additional details |
|
|||
502 | about the capabilities of the remote server. This has the form: |
|
|||
503 |
|
||||
504 | <integer length of value>\n |
|
|||
505 | capabilities: ...\n |
|
|||
506 |
|
||||
507 | e.g. |
|
|||
508 |
|
||||
509 | s: upgraded 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a ssh-v2\n |
|
|||
510 | s: 240\n |
|
|||
511 | s: capabilities: known getbundle batch ...\n |
|
|||
512 |
|
||||
513 | Following capabilities advertisement, the peers communicate using version |
|
|||
514 | 1 of the SSH transport. |
|
|||
515 |
|
||||
516 | Capabilities |
|
402 | Capabilities | |
517 | ============ |
|
403 | ============ | |
518 |
|
404 |
@@ -1,8 +1,10 b'' | |||||
1 | Mercurial accepts several notations for identifying one or more files |
|
1 | Mercurial accepts several notations for identifying one or more files | |
2 | at a time. |
|
2 | at a time. | |
3 |
|
3 | |||
4 |
By default, Mercurial treats filenames |
|
4 | By default, Mercurial treats filenames verbatim without pattern | |
5 | patterns. |
|
5 | matching, relative to the current working directory. Note that your | |
|
6 | system shell might perform pattern matching of its own before passing | |||
|
7 | filenames into Mercurial. | |||
6 |
|
8 | |||
7 | Alternate pattern notations must be specified explicitly. |
|
9 | Alternate pattern notations must be specified explicitly. | |
8 |
|
10 |
@@ -132,13 +132,6 b' def addbranchrevs(lrepo, other, branches' | |||||
132 | return revs, revs[0] |
|
132 | return revs, revs[0] | |
133 |
|
133 | |||
134 |
|
134 | |||
135 | def parseurl(path, branches=None): |
|
|||
136 | '''parse url#branch, returning (url, (branch, branches))''' |
|
|||
137 | msg = b'parseurl(...) moved to mercurial.utils.urlutil' |
|
|||
138 | util.nouideprecwarn(msg, b'6.0', stacklevel=2) |
|
|||
139 | return urlutil.parseurl(path, branches=branches) |
|
|||
140 |
|
||||
141 |
|
||||
142 | schemes = { |
|
135 | schemes = { | |
143 | b'bundle': bundlerepo, |
|
136 | b'bundle': bundlerepo, | |
144 | b'union': unionrepo, |
|
137 | b'union': unionrepo, |
@@ -366,17 +366,6 b' class hgweb(object):' | |||||
366 | # replace it. |
|
366 | # replace it. | |
367 | res.headers[b'Content-Security-Policy'] = rctx.csp |
|
367 | res.headers[b'Content-Security-Policy'] = rctx.csp | |
368 |
|
368 | |||
369 | # /api/* is reserved for various API implementations. Dispatch |
|
|||
370 | # accordingly. But URL paths can conflict with subrepos and virtual |
|
|||
371 | # repos in hgwebdir. So until we have a workaround for this, only |
|
|||
372 | # expose the URLs if the feature is enabled. |
|
|||
373 | apienabled = rctx.repo.ui.configbool(b'experimental', b'web.apiserver') |
|
|||
374 | if apienabled and req.dispatchparts and req.dispatchparts[0] == b'api': |
|
|||
375 | wireprotoserver.handlewsgiapirequest( |
|
|||
376 | rctx, req, res, self.check_perm |
|
|||
377 | ) |
|
|||
378 | return res.sendresponse() |
|
|||
379 |
|
||||
380 | handled = wireprotoserver.handlewsgirequest( |
|
369 | handled = wireprotoserver.handlewsgirequest( | |
381 | rctx, req, res, self.check_perm |
|
370 | rctx, req, res, self.check_perm | |
382 | ) |
|
371 | ) |
@@ -519,6 +519,7 b" rev = webcommand(b'rev')(changeset)" | |||||
519 |
|
519 | |||
520 |
|
520 | |||
521 | def decodepath(path): |
|
521 | def decodepath(path): | |
|
522 | # type: (bytes) -> bytes | |||
522 | """Hook for mapping a path in the repository to a path in the |
|
523 | """Hook for mapping a path in the repository to a path in the | |
523 | working copy. |
|
524 | working copy. | |
524 |
|
525 | |||
@@ -616,7 +617,9 b' def manifest(web):' | |||||
616 | yield { |
|
617 | yield { | |
617 | b"parity": next(parity), |
|
618 | b"parity": next(parity), | |
618 | b"path": path, |
|
619 | b"path": path, | |
|
620 | # pytype: disable=wrong-arg-types | |||
619 | b"emptydirs": b"/".join(emptydirs), |
|
621 | b"emptydirs": b"/".join(emptydirs), | |
|
622 | # pytype: enable=wrong-arg-types | |||
620 | b"basename": d, |
|
623 | b"basename": d, | |
621 | } |
|
624 | } | |
622 |
|
625 |
@@ -13,7 +13,6 b' import io' | |||||
13 | import os |
|
13 | import os | |
14 | import socket |
|
14 | import socket | |
15 | import struct |
|
15 | import struct | |
16 | import weakref |
|
|||
17 |
|
16 | |||
18 | from .i18n import _ |
|
17 | from .i18n import _ | |
19 | from .pycompat import getattr |
|
18 | from .pycompat import getattr | |
@@ -25,21 +24,9 b' from . import (' | |||||
25 | statichttprepo, |
|
24 | statichttprepo, | |
26 | url as urlmod, |
|
25 | url as urlmod, | |
27 | util, |
|
26 | util, | |
28 | wireprotoframing, |
|
|||
29 | wireprototypes, |
|
|||
30 | wireprotov1peer, |
|
27 | wireprotov1peer, | |
31 | wireprotov2peer, |
|
|||
32 | wireprotov2server, |
|
|||
33 | ) |
|
28 | ) | |
34 |
from . |
|
29 | from .utils import urlutil | |
35 | repository, |
|
|||
36 | util as interfaceutil, |
|
|||
37 | ) |
|
|||
38 | from .utils import ( |
|
|||
39 | cborutil, |
|
|||
40 | stringutil, |
|
|||
41 | urlutil, |
|
|||
42 | ) |
|
|||
43 |
|
30 | |||
44 | httplib = util.httplib |
|
31 | httplib = util.httplib | |
45 | urlerr = util.urlerr |
|
32 | urlerr = util.urlerr | |
@@ -331,9 +318,7 b' class RedirectedRepoError(error.RepoErro' | |||||
331 | self.respurl = respurl |
|
318 | self.respurl = respurl | |
332 |
|
319 | |||
333 |
|
320 | |||
334 | def parsev1commandresponse( |
|
321 | def parsev1commandresponse(ui, baseurl, requrl, qs, resp, compressible): | |
335 | ui, baseurl, requrl, qs, resp, compressible, allowcbor=False |
|
|||
336 | ): |
|
|||
337 | # record the url we got redirected to |
|
322 | # record the url we got redirected to | |
338 | redirected = False |
|
323 | redirected = False | |
339 | respurl = pycompat.bytesurl(resp.geturl()) |
|
324 | respurl = pycompat.bytesurl(resp.geturl()) | |
@@ -376,17 +361,6 b' def parsev1commandresponse(' | |||||
376 | try: |
|
361 | try: | |
377 | subtype = proto.split(b'-', 1)[1] |
|
362 | subtype = proto.split(b'-', 1)[1] | |
378 |
|
363 | |||
379 | # Unless we end up supporting CBOR in the legacy wire protocol, |
|
|||
380 | # this should ONLY be encountered for the initial capabilities |
|
|||
381 | # request during handshake. |
|
|||
382 | if subtype == b'cbor': |
|
|||
383 | if allowcbor: |
|
|||
384 | return respurl, proto, resp |
|
|||
385 | else: |
|
|||
386 | raise error.RepoError( |
|
|||
387 | _(b'unexpected CBOR response from server') |
|
|||
388 | ) |
|
|||
389 |
|
||||
390 | version_info = tuple([int(n) for n in subtype.split(b'.')]) |
|
364 | version_info = tuple([int(n) for n in subtype.split(b'.')]) | |
391 | except ValueError: |
|
365 | except ValueError: | |
392 | raise error.RepoError( |
|
366 | raise error.RepoError( | |
@@ -564,85 +538,6 b' class httppeer(wireprotov1peer.wirepeer)' | |||||
564 | raise exception |
|
538 | raise exception | |
565 |
|
539 | |||
566 |
|
540 | |||
567 | def sendv2request( |
|
|||
568 | ui, opener, requestbuilder, apiurl, permission, requests, redirect |
|
|||
569 | ): |
|
|||
570 | wireprotoframing.populatestreamencoders() |
|
|||
571 |
|
||||
572 | uiencoders = ui.configlist(b'experimental', b'httppeer.v2-encoder-order') |
|
|||
573 |
|
||||
574 | if uiencoders: |
|
|||
575 | encoders = [] |
|
|||
576 |
|
||||
577 | for encoder in uiencoders: |
|
|||
578 | if encoder not in wireprotoframing.STREAM_ENCODERS: |
|
|||
579 | ui.warn( |
|
|||
580 | _( |
|
|||
581 | b'wire protocol version 2 encoder referenced in ' |
|
|||
582 | b'config (%s) is not known; ignoring\n' |
|
|||
583 | ) |
|
|||
584 | % encoder |
|
|||
585 | ) |
|
|||
586 | else: |
|
|||
587 | encoders.append(encoder) |
|
|||
588 |
|
||||
589 | else: |
|
|||
590 | encoders = wireprotoframing.STREAM_ENCODERS_ORDER |
|
|||
591 |
|
||||
592 | reactor = wireprotoframing.clientreactor( |
|
|||
593 | ui, |
|
|||
594 | hasmultiplesend=False, |
|
|||
595 | buffersends=True, |
|
|||
596 | clientcontentencoders=encoders, |
|
|||
597 | ) |
|
|||
598 |
|
||||
599 | handler = wireprotov2peer.clienthandler( |
|
|||
600 | ui, reactor, opener=opener, requestbuilder=requestbuilder |
|
|||
601 | ) |
|
|||
602 |
|
||||
603 | url = b'%s/%s' % (apiurl, permission) |
|
|||
604 |
|
||||
605 | if len(requests) > 1: |
|
|||
606 | url += b'/multirequest' |
|
|||
607 | else: |
|
|||
608 | url += b'/%s' % requests[0][0] |
|
|||
609 |
|
||||
610 | ui.debug(b'sending %d commands\n' % len(requests)) |
|
|||
611 | for command, args, f in requests: |
|
|||
612 | ui.debug( |
|
|||
613 | b'sending command %s: %s\n' |
|
|||
614 | % (command, stringutil.pprint(args, indent=2)) |
|
|||
615 | ) |
|
|||
616 | assert not list( |
|
|||
617 | handler.callcommand(command, args, f, redirect=redirect) |
|
|||
618 | ) |
|
|||
619 |
|
||||
620 | # TODO stream this. |
|
|||
621 | body = b''.join(map(bytes, handler.flushcommands())) |
|
|||
622 |
|
||||
623 | # TODO modify user-agent to reflect v2 |
|
|||
624 | headers = { |
|
|||
625 | 'Accept': wireprotov2server.FRAMINGTYPE, |
|
|||
626 | 'Content-Type': wireprotov2server.FRAMINGTYPE, |
|
|||
627 | } |
|
|||
628 |
|
||||
629 | req = requestbuilder(pycompat.strurl(url), body, headers) |
|
|||
630 | req.add_unredirected_header('Content-Length', '%d' % len(body)) |
|
|||
631 |
|
||||
632 | try: |
|
|||
633 | res = opener.open(req) |
|
|||
634 | except urlerr.httperror as e: |
|
|||
635 | if e.code == 401: |
|
|||
636 | raise error.Abort(_(b'authorization failed')) |
|
|||
637 |
|
||||
638 | raise |
|
|||
639 | except httplib.HTTPException as e: |
|
|||
640 | ui.traceback() |
|
|||
641 | raise IOError(None, e) |
|
|||
642 |
|
||||
643 | return handler, res |
|
|||
644 |
|
||||
645 |
|
||||
646 | class queuedcommandfuture(pycompat.futures.Future): |
|
541 | class queuedcommandfuture(pycompat.futures.Future): | |
647 | """Wraps result() on command futures to trigger submission on call.""" |
|
542 | """Wraps result() on command futures to trigger submission on call.""" | |
648 |
|
543 | |||
@@ -657,302 +552,6 b' class queuedcommandfuture(pycompat.futur' | |||||
657 | return self.result(timeout) |
|
552 | return self.result(timeout) | |
658 |
|
553 | |||
659 |
|
554 | |||
660 | @interfaceutil.implementer(repository.ipeercommandexecutor) |
|
|||
661 | class httpv2executor(object): |
|
|||
662 | def __init__( |
|
|||
663 | self, ui, opener, requestbuilder, apiurl, descriptor, redirect |
|
|||
664 | ): |
|
|||
665 | self._ui = ui |
|
|||
666 | self._opener = opener |
|
|||
667 | self._requestbuilder = requestbuilder |
|
|||
668 | self._apiurl = apiurl |
|
|||
669 | self._descriptor = descriptor |
|
|||
670 | self._redirect = redirect |
|
|||
671 | self._sent = False |
|
|||
672 | self._closed = False |
|
|||
673 | self._neededpermissions = set() |
|
|||
674 | self._calls = [] |
|
|||
675 | self._futures = weakref.WeakSet() |
|
|||
676 | self._responseexecutor = None |
|
|||
677 | self._responsef = None |
|
|||
678 |
|
||||
679 | def __enter__(self): |
|
|||
680 | return self |
|
|||
681 |
|
||||
682 | def __exit__(self, exctype, excvalue, exctb): |
|
|||
683 | self.close() |
|
|||
684 |
|
||||
685 | def callcommand(self, command, args): |
|
|||
686 | if self._sent: |
|
|||
687 | raise error.ProgrammingError( |
|
|||
688 | b'callcommand() cannot be used after commands are sent' |
|
|||
689 | ) |
|
|||
690 |
|
||||
691 | if self._closed: |
|
|||
692 | raise error.ProgrammingError( |
|
|||
693 | b'callcommand() cannot be used after close()' |
|
|||
694 | ) |
|
|||
695 |
|
||||
696 | # The service advertises which commands are available. So if we attempt |
|
|||
697 | # to call an unknown command or pass an unknown argument, we can screen |
|
|||
698 | # for this. |
|
|||
699 | if command not in self._descriptor[b'commands']: |
|
|||
700 | raise error.ProgrammingError( |
|
|||
701 | b'wire protocol command %s is not available' % command |
|
|||
702 | ) |
|
|||
703 |
|
||||
704 | cmdinfo = self._descriptor[b'commands'][command] |
|
|||
705 | unknownargs = set(args.keys()) - set(cmdinfo.get(b'args', {})) |
|
|||
706 |
|
||||
707 | if unknownargs: |
|
|||
708 | raise error.ProgrammingError( |
|
|||
709 | b'wire protocol command %s does not accept argument: %s' |
|
|||
710 | % (command, b', '.join(sorted(unknownargs))) |
|
|||
711 | ) |
|
|||
712 |
|
||||
713 | self._neededpermissions |= set(cmdinfo[b'permissions']) |
|
|||
714 |
|
||||
715 | # TODO we /could/ also validate types here, since the API descriptor |
|
|||
716 | # includes types... |
|
|||
717 |
|
||||
718 | f = pycompat.futures.Future() |
|
|||
719 |
|
||||
720 | # Monkeypatch it so result() triggers sendcommands(), otherwise result() |
|
|||
721 | # could deadlock. |
|
|||
722 | f.__class__ = queuedcommandfuture |
|
|||
723 | f._peerexecutor = self |
|
|||
724 |
|
||||
725 | self._futures.add(f) |
|
|||
726 | self._calls.append((command, args, f)) |
|
|||
727 |
|
||||
728 | return f |
|
|||
729 |
|
||||
730 | def sendcommands(self): |
|
|||
731 | if self._sent: |
|
|||
732 | return |
|
|||
733 |
|
||||
734 | if not self._calls: |
|
|||
735 | return |
|
|||
736 |
|
||||
737 | self._sent = True |
|
|||
738 |
|
||||
739 | # Unhack any future types so caller sees a clean type and so we |
|
|||
740 | # break reference cycle. |
|
|||
741 | for f in self._futures: |
|
|||
742 | if isinstance(f, queuedcommandfuture): |
|
|||
743 | f.__class__ = pycompat.futures.Future |
|
|||
744 | f._peerexecutor = None |
|
|||
745 |
|
||||
746 | # Mark the future as running and filter out cancelled futures. |
|
|||
747 | calls = [ |
|
|||
748 | (command, args, f) |
|
|||
749 | for command, args, f in self._calls |
|
|||
750 | if f.set_running_or_notify_cancel() |
|
|||
751 | ] |
|
|||
752 |
|
||||
753 | # Clear out references, prevent improper object usage. |
|
|||
754 | self._calls = None |
|
|||
755 |
|
||||
756 | if not calls: |
|
|||
757 | return |
|
|||
758 |
|
||||
759 | permissions = set(self._neededpermissions) |
|
|||
760 |
|
||||
761 | if b'push' in permissions and b'pull' in permissions: |
|
|||
762 | permissions.remove(b'pull') |
|
|||
763 |
|
||||
764 | if len(permissions) > 1: |
|
|||
765 | raise error.RepoError( |
|
|||
766 | _(b'cannot make request requiring multiple permissions: %s') |
|
|||
767 | % _(b', ').join(sorted(permissions)) |
|
|||
768 | ) |
|
|||
769 |
|
||||
770 | permission = { |
|
|||
771 | b'push': b'rw', |
|
|||
772 | b'pull': b'ro', |
|
|||
773 | }[permissions.pop()] |
|
|||
774 |
|
||||
775 | handler, resp = sendv2request( |
|
|||
776 | self._ui, |
|
|||
777 | self._opener, |
|
|||
778 | self._requestbuilder, |
|
|||
779 | self._apiurl, |
|
|||
780 | permission, |
|
|||
781 | calls, |
|
|||
782 | self._redirect, |
|
|||
783 | ) |
|
|||
784 |
|
||||
785 | # TODO we probably want to validate the HTTP code, media type, etc. |
|
|||
786 |
|
||||
787 | self._responseexecutor = pycompat.futures.ThreadPoolExecutor(1) |
|
|||
788 | self._responsef = self._responseexecutor.submit( |
|
|||
789 | self._handleresponse, handler, resp |
|
|||
790 | ) |
|
|||
791 |
|
||||
792 | def close(self): |
|
|||
793 | if self._closed: |
|
|||
794 | return |
|
|||
795 |
|
||||
796 | self.sendcommands() |
|
|||
797 |
|
||||
798 | self._closed = True |
|
|||
799 |
|
||||
800 | if not self._responsef: |
|
|||
801 | return |
|
|||
802 |
|
||||
803 | # TODO ^C here may not result in immediate program termination. |
|
|||
804 |
|
||||
805 | try: |
|
|||
806 | self._responsef.result() |
|
|||
807 | finally: |
|
|||
808 | self._responseexecutor.shutdown(wait=True) |
|
|||
809 | self._responsef = None |
|
|||
810 | self._responseexecutor = None |
|
|||
811 |
|
||||
812 | # If any of our futures are still in progress, mark them as |
|
|||
813 | # errored, otherwise a result() could wait indefinitely. |
|
|||
814 | for f in self._futures: |
|
|||
815 | if not f.done(): |
|
|||
816 | f.set_exception( |
|
|||
817 | error.ResponseError(_(b'unfulfilled command response')) |
|
|||
818 | ) |
|
|||
819 |
|
||||
820 | self._futures = None |
|
|||
821 |
|
||||
822 | def _handleresponse(self, handler, resp): |
|
|||
823 | # Called in a thread to read the response. |
|
|||
824 |
|
||||
825 | while handler.readdata(resp): |
|
|||
826 | pass |
|
|||
827 |
|
||||
828 |
|
||||
829 | @interfaceutil.implementer(repository.ipeerv2) |
|
|||
830 | class httpv2peer(object): |
|
|||
831 |
|
||||
832 | limitedarguments = False |
|
|||
833 |
|
||||
834 | def __init__( |
|
|||
835 | self, ui, repourl, apipath, opener, requestbuilder, apidescriptor |
|
|||
836 | ): |
|
|||
837 | self.ui = ui |
|
|||
838 | self.apidescriptor = apidescriptor |
|
|||
839 |
|
||||
840 | if repourl.endswith(b'/'): |
|
|||
841 | repourl = repourl[:-1] |
|
|||
842 |
|
||||
843 | self._url = repourl |
|
|||
844 | self._apipath = apipath |
|
|||
845 | self._apiurl = b'%s/%s' % (repourl, apipath) |
|
|||
846 | self._opener = opener |
|
|||
847 | self._requestbuilder = requestbuilder |
|
|||
848 |
|
||||
849 | self._redirect = wireprotov2peer.supportedredirects(ui, apidescriptor) |
|
|||
850 |
|
||||
851 | # Start of ipeerconnection. |
|
|||
852 |
|
||||
853 | def url(self): |
|
|||
854 | return self._url |
|
|||
855 |
|
||||
856 | def local(self): |
|
|||
857 | return None |
|
|||
858 |
|
||||
859 | def peer(self): |
|
|||
860 | return self |
|
|||
861 |
|
||||
862 | def canpush(self): |
|
|||
863 | # TODO change once implemented. |
|
|||
864 | return False |
|
|||
865 |
|
||||
866 | def close(self): |
|
|||
867 | self.ui.note( |
|
|||
868 | _( |
|
|||
869 | b'(sent %d HTTP requests and %d bytes; ' |
|
|||
870 | b'received %d bytes in responses)\n' |
|
|||
871 | ) |
|
|||
872 | % ( |
|
|||
873 | self._opener.requestscount, |
|
|||
874 | self._opener.sentbytescount, |
|
|||
875 | self._opener.receivedbytescount, |
|
|||
876 | ) |
|
|||
877 | ) |
|
|||
878 |
|
||||
879 | # End of ipeerconnection. |
|
|||
880 |
|
||||
881 | # Start of ipeercapabilities. |
|
|||
882 |
|
||||
883 | def capable(self, name): |
|
|||
884 | # The capabilities used internally historically map to capabilities |
|
|||
885 | # advertised from the "capabilities" wire protocol command. However, |
|
|||
886 | # version 2 of that command works differently. |
|
|||
887 |
|
||||
888 | # Maps to commands that are available. |
|
|||
889 | if name in ( |
|
|||
890 | b'branchmap', |
|
|||
891 | b'getbundle', |
|
|||
892 | b'known', |
|
|||
893 | b'lookup', |
|
|||
894 | b'pushkey', |
|
|||
895 | ): |
|
|||
896 | return True |
|
|||
897 |
|
||||
898 | # Other concepts. |
|
|||
899 | if name in (b'bundle2',): |
|
|||
900 | return True |
|
|||
901 |
|
||||
902 | # Alias command-* to presence of command of that name. |
|
|||
903 | if name.startswith(b'command-'): |
|
|||
904 | return name[len(b'command-') :] in self.apidescriptor[b'commands'] |
|
|||
905 |
|
||||
906 | return False |
|
|||
907 |
|
||||
908 | def requirecap(self, name, purpose): |
|
|||
909 | if self.capable(name): |
|
|||
910 | return |
|
|||
911 |
|
||||
912 | raise error.CapabilityError( |
|
|||
913 | _( |
|
|||
914 | b'cannot %s; client or remote repository does not support the ' |
|
|||
915 | b'\'%s\' capability' |
|
|||
916 | ) |
|
|||
917 | % (purpose, name) |
|
|||
918 | ) |
|
|||
919 |
|
||||
920 | # End of ipeercapabilities. |
|
|||
921 |
|
||||
922 | def _call(self, name, **args): |
|
|||
923 | with self.commandexecutor() as e: |
|
|||
924 | return e.callcommand(name, args).result() |
|
|||
925 |
|
||||
926 | def commandexecutor(self): |
|
|||
927 | return httpv2executor( |
|
|||
928 | self.ui, |
|
|||
929 | self._opener, |
|
|||
930 | self._requestbuilder, |
|
|||
931 | self._apiurl, |
|
|||
932 | self.apidescriptor, |
|
|||
933 | self._redirect, |
|
|||
934 | ) |
|
|||
935 |
|
||||
936 |
|
||||
937 | # Registry of API service names to metadata about peers that handle it. |
|
|||
938 | # |
|
|||
939 | # The following keys are meaningful: |
|
|||
940 | # |
|
|||
941 | # init |
|
|||
942 | # Callable receiving (ui, repourl, servicepath, opener, requestbuilder, |
|
|||
943 | # apidescriptor) to create a peer. |
|
|||
944 | # |
|
|||
945 | # priority |
|
|||
946 | # Integer priority for the service. If we could choose from multiple |
|
|||
947 | # services, we choose the one with the highest priority. |
|
|||
948 | API_PEERS = { |
|
|||
949 | wireprototypes.HTTP_WIREPROTO_V2: { |
|
|||
950 | b'init': httpv2peer, |
|
|||
951 | b'priority': 50, |
|
|||
952 | }, |
|
|||
953 | } |
|
|||
954 |
|
||||
955 |
|
||||
956 | def performhandshake(ui, url, opener, requestbuilder): |
|
555 | def performhandshake(ui, url, opener, requestbuilder): | |
957 | # The handshake is a request to the capabilities command. |
|
556 | # The handshake is a request to the capabilities command. | |
958 |
|
557 | |||
@@ -963,28 +562,6 b' def performhandshake(ui, url, opener, re' | |||||
963 |
|
562 | |||
964 | args = {} |
|
563 | args = {} | |
965 |
|
564 | |||
966 | # The client advertises support for newer protocols by adding an |
|
|||
967 | # X-HgUpgrade-* header with a list of supported APIs and an |
|
|||
968 | # X-HgProto-* header advertising which serializing formats it supports. |
|
|||
969 | # We only support the HTTP version 2 transport and CBOR responses for |
|
|||
970 | # now. |
|
|||
971 | advertisev2 = ui.configbool(b'experimental', b'httppeer.advertise-v2') |
|
|||
972 |
|
||||
973 | if advertisev2: |
|
|||
974 | args[b'headers'] = { |
|
|||
975 | 'X-HgProto-1': 'cbor', |
|
|||
976 | } |
|
|||
977 |
|
||||
978 | args[b'headers'].update( |
|
|||
979 | encodevalueinheaders( |
|
|||
980 | b' '.join(sorted(API_PEERS)), |
|
|||
981 | b'X-HgUpgrade', |
|
|||
982 | # We don't know the header limit this early. |
|
|||
983 | # So make it small. |
|
|||
984 | 1024, |
|
|||
985 | ) |
|
|||
986 | ) |
|
|||
987 |
|
||||
988 | req, requrl, qs = makev1commandrequest( |
|
565 | req, requrl, qs = makev1commandrequest( | |
989 | ui, requestbuilder, caps, capable, url, b'capabilities', args |
|
566 | ui, requestbuilder, caps, capable, url, b'capabilities', args | |
990 | ) |
|
567 | ) | |
@@ -1004,7 +581,7 b' def performhandshake(ui, url, opener, re' | |||||
1004 | # redirect that drops the query string to "just work." |
|
581 | # redirect that drops the query string to "just work." | |
1005 | try: |
|
582 | try: | |
1006 | respurl, ct, resp = parsev1commandresponse( |
|
583 | respurl, ct, resp = parsev1commandresponse( | |
1007 |
ui, url, requrl, qs, resp, compressible=False |
|
584 | ui, url, requrl, qs, resp, compressible=False | |
1008 | ) |
|
585 | ) | |
1009 | except RedirectedRepoError as e: |
|
586 | except RedirectedRepoError as e: | |
1010 | req, requrl, qs = makev1commandrequest( |
|
587 | req, requrl, qs = makev1commandrequest( | |
@@ -1012,7 +589,7 b' def performhandshake(ui, url, opener, re' | |||||
1012 | ) |
|
589 | ) | |
1013 | resp = sendrequest(ui, opener, req) |
|
590 | resp = sendrequest(ui, opener, req) | |
1014 | respurl, ct, resp = parsev1commandresponse( |
|
591 | respurl, ct, resp = parsev1commandresponse( | |
1015 |
ui, url, requrl, qs, resp, compressible=False |
|
592 | ui, url, requrl, qs, resp, compressible=False | |
1016 | ) |
|
593 | ) | |
1017 |
|
594 | |||
1018 | try: |
|
595 | try: | |
@@ -1023,29 +600,7 b' def performhandshake(ui, url, opener, re' | |||||
1023 | if not ct.startswith(b'application/mercurial-'): |
|
600 | if not ct.startswith(b'application/mercurial-'): | |
1024 | raise error.ProgrammingError(b'unexpected content-type: %s' % ct) |
|
601 | raise error.ProgrammingError(b'unexpected content-type: %s' % ct) | |
1025 |
|
602 | |||
1026 | if advertisev2: |
|
603 | info = {b'v1capabilities': set(rawdata.split())} | |
1027 | if ct == b'application/mercurial-cbor': |
|
|||
1028 | try: |
|
|||
1029 | info = cborutil.decodeall(rawdata)[0] |
|
|||
1030 | except cborutil.CBORDecodeError: |
|
|||
1031 | raise error.Abort( |
|
|||
1032 | _(b'error decoding CBOR from remote server'), |
|
|||
1033 | hint=_( |
|
|||
1034 | b'try again and consider contacting ' |
|
|||
1035 | b'the server operator' |
|
|||
1036 | ), |
|
|||
1037 | ) |
|
|||
1038 |
|
||||
1039 | # We got a legacy response. That's fine. |
|
|||
1040 | elif ct in (b'application/mercurial-0.1', b'application/mercurial-0.2'): |
|
|||
1041 | info = {b'v1capabilities': set(rawdata.split())} |
|
|||
1042 |
|
||||
1043 | else: |
|
|||
1044 | raise error.RepoError( |
|
|||
1045 | _(b'unexpected response type from server: %s') % ct |
|
|||
1046 | ) |
|
|||
1047 | else: |
|
|||
1048 | info = {b'v1capabilities': set(rawdata.split())} |
|
|||
1049 |
|
604 | |||
1050 | return respurl, info |
|
605 | return respurl, info | |
1051 |
|
606 | |||
@@ -1073,29 +628,6 b' def makepeer(ui, path, opener=None, requ' | |||||
1073 |
|
628 | |||
1074 | respurl, info = performhandshake(ui, url, opener, requestbuilder) |
|
629 | respurl, info = performhandshake(ui, url, opener, requestbuilder) | |
1075 |
|
630 | |||
1076 | # Given the intersection of APIs that both we and the server support, |
|
|||
1077 | # sort by their advertised priority and pick the first one. |
|
|||
1078 | # |
|
|||
1079 | # TODO consider making this request-based and interface driven. For |
|
|||
1080 | # example, the caller could say "I want a peer that does X." It's quite |
|
|||
1081 | # possible that not all peers would do that. Since we know the service |
|
|||
1082 | # capabilities, we could filter out services not meeting the |
|
|||
1083 | # requirements. Possibly by consulting the interfaces defined by the |
|
|||
1084 | # peer type. |
|
|||
1085 | apipeerchoices = set(info.get(b'apis', {}).keys()) & set(API_PEERS.keys()) |
|
|||
1086 |
|
||||
1087 | preferredchoices = sorted( |
|
|||
1088 | apipeerchoices, key=lambda x: API_PEERS[x][b'priority'], reverse=True |
|
|||
1089 | ) |
|
|||
1090 |
|
||||
1091 | for service in preferredchoices: |
|
|||
1092 | apipath = b'%s/%s' % (info[b'apibase'].rstrip(b'/'), service) |
|
|||
1093 |
|
||||
1094 | return API_PEERS[service][b'init']( |
|
|||
1095 | ui, respurl, apipath, opener, requestbuilder, info[b'apis'][service] |
|
|||
1096 | ) |
|
|||
1097 |
|
||||
1098 | # Failed to construct an API peer. Fall back to legacy. |
|
|||
1099 | return httppeer( |
|
631 | return httppeer( | |
1100 | ui, path, respurl, opener, requestbuilder, info[b'v1capabilities'] |
|
632 | ui, path, respurl, opener, requestbuilder, info[b'v1capabilities'] | |
1101 | ) |
|
633 | ) |
@@ -66,17 +66,6 b' class idirstate(interfaceutil.Interface)' | |||||
66 | def pathto(f, cwd=None): |
|
66 | def pathto(f, cwd=None): | |
67 | pass |
|
67 | pass | |
68 |
|
68 | |||
69 | def __getitem__(key): |
|
|||
70 | """Return the current state of key (a filename) in the dirstate. |
|
|||
71 |
|
||||
72 | States are: |
|
|||
73 | n normal |
|
|||
74 | m needs merging |
|
|||
75 | r marked for removal |
|
|||
76 | a marked for addition |
|
|||
77 | ? not tracked |
|
|||
78 | """ |
|
|||
79 |
|
||||
80 | def __contains__(key): |
|
69 | def __contains__(key): | |
81 | """Check if bytestring `key` is known to the dirstate.""" |
|
70 | """Check if bytestring `key` is known to the dirstate.""" | |
82 |
|
71 |
@@ -1278,7 +1278,7 b' class imanifeststorage(interfaceutil.Int' | |||||
1278 | def linkrev(rev): |
|
1278 | def linkrev(rev): | |
1279 | """Obtain the changeset revision number a revision is linked to.""" |
|
1279 | """Obtain the changeset revision number a revision is linked to.""" | |
1280 |
|
1280 | |||
1281 |
def revision(node, _df=None |
|
1281 | def revision(node, _df=None): | |
1282 | """Obtain fulltext data for a node.""" |
|
1282 | """Obtain fulltext data for a node.""" | |
1283 |
|
1283 | |||
1284 | def rawdata(node, _df=None): |
|
1284 | def rawdata(node, _df=None): | |
@@ -1495,13 +1495,6 b' class ilocalrepositorymain(interfaceutil' | |||||
1495 | """null revision for the hash function used by the repository.""" |
|
1495 | """null revision for the hash function used by the repository.""" | |
1496 | ) |
|
1496 | ) | |
1497 |
|
1497 | |||
1498 | supportedformats = interfaceutil.Attribute( |
|
|||
1499 | """Set of requirements that apply to stream clone. |
|
|||
1500 |
|
||||
1501 | This is actually a class attribute and is shared among all instances. |
|
|||
1502 | """ |
|
|||
1503 | ) |
|
|||
1504 |
|
||||
1505 | supported = interfaceutil.Attribute( |
|
1498 | supported = interfaceutil.Attribute( | |
1506 | """Set of requirements that this repo is capable of opening.""" |
|
1499 | """Set of requirements that this repo is capable of opening.""" | |
1507 | ) |
|
1500 | ) | |
@@ -1794,7 +1787,7 b' class ilocalrepositorymain(interfaceutil' | |||||
1794 | DANGEROUS. |
|
1787 | DANGEROUS. | |
1795 | """ |
|
1788 | """ | |
1796 |
|
1789 | |||
1797 | def updatecaches(tr=None, full=False): |
|
1790 | def updatecaches(tr=None, full=False, caches=None): | |
1798 | """Warm repo caches.""" |
|
1791 | """Warm repo caches.""" | |
1799 |
|
1792 | |||
1800 | def invalidatecaches(): |
|
1793 | def invalidatecaches(): |
@@ -1,4 +1,5 b'' | |||||
1 | # localrepo.py - read/write repository class for mercurial |
|
1 | # localrepo.py - read/write repository class for mercurial | |
|
2 | # coding: utf-8 | |||
2 | # |
|
3 | # | |
3 | # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com> |
|
4 | # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com> | |
4 | # |
|
5 | # | |
@@ -931,7 +932,7 b' def gathersupportedrequirements(ui):' | |||||
931 | if engine.available() and engine.revlogheader(): |
|
932 | if engine.available() and engine.revlogheader(): | |
932 | supported.add(b'exp-compression-%s' % name) |
|
933 | supported.add(b'exp-compression-%s' % name) | |
933 | if engine.name() == b'zstd': |
|
934 | if engine.name() == b'zstd': | |
934 | supported.add(b'revlog-compression-zstd') |
|
935 | supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD) | |
935 |
|
936 | |||
936 | return supported |
|
937 | return supported | |
937 |
|
938 | |||
@@ -1273,32 +1274,26 b' class localrepository(object):' | |||||
1273 | used. |
|
1274 | used. | |
1274 | """ |
|
1275 | """ | |
1275 |
|
1276 | |||
1276 | # obsolete experimental requirements: |
|
1277 | _basesupported = { | |
1277 | # - manifestv2: An experimental new manifest format that allowed |
|
1278 | requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT, | |
1278 | # for stem compression of long paths. Experiment ended up not |
|
1279 | requirementsmod.CHANGELOGV2_REQUIREMENT, | |
1279 | # being successful (repository sizes went up due to worse delta |
|
1280 | requirementsmod.COPIESSDC_REQUIREMENT, | |
1280 | # chains), and the code was deleted in 4.6. |
|
1281 | requirementsmod.DIRSTATE_TRACKED_HINT_V1, | |
1281 | supportedformats = { |
|
1282 | requirementsmod.DIRSTATE_V2_REQUIREMENT, | |
|
1283 | requirementsmod.DOTENCODE_REQUIREMENT, | |||
|
1284 | requirementsmod.FNCACHE_REQUIREMENT, | |||
|
1285 | requirementsmod.GENERALDELTA_REQUIREMENT, | |||
|
1286 | requirementsmod.INTERNAL_PHASE_REQUIREMENT, | |||
|
1287 | requirementsmod.NODEMAP_REQUIREMENT, | |||
|
1288 | requirementsmod.RELATIVE_SHARED_REQUIREMENT, | |||
1282 | requirementsmod.REVLOGV1_REQUIREMENT, |
|
1289 | requirementsmod.REVLOGV1_REQUIREMENT, | |
1283 | requirementsmod.GENERALDELTA_REQUIREMENT, |
|
|||
1284 | requirementsmod.TREEMANIFEST_REQUIREMENT, |
|
|||
1285 | requirementsmod.COPIESSDC_REQUIREMENT, |
|
|||
1286 | requirementsmod.REVLOGV2_REQUIREMENT, |
|
1290 | requirementsmod.REVLOGV2_REQUIREMENT, | |
1287 |
requirementsmod. |
|
1291 | requirementsmod.SHARED_REQUIREMENT, | |
|
1292 | requirementsmod.SHARESAFE_REQUIREMENT, | |||
|
1293 | requirementsmod.SPARSE_REQUIREMENT, | |||
1288 | requirementsmod.SPARSEREVLOG_REQUIREMENT, |
|
1294 | requirementsmod.SPARSEREVLOG_REQUIREMENT, | |
1289 | requirementsmod.NODEMAP_REQUIREMENT, |
|
|||
1290 | bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT, |
|
|||
1291 | requirementsmod.SHARESAFE_REQUIREMENT, |
|
|||
1292 | requirementsmod.DIRSTATE_V2_REQUIREMENT, |
|
|||
1293 | } |
|
|||
1294 | _basesupported = supportedformats | { |
|
|||
1295 | requirementsmod.STORE_REQUIREMENT, |
|
1295 | requirementsmod.STORE_REQUIREMENT, | |
1296 |
requirementsmod.F |
|
1296 | requirementsmod.TREEMANIFEST_REQUIREMENT, | |
1297 | requirementsmod.SHARED_REQUIREMENT, |
|
|||
1298 | requirementsmod.RELATIVE_SHARED_REQUIREMENT, |
|
|||
1299 | requirementsmod.DOTENCODE_REQUIREMENT, |
|
|||
1300 | requirementsmod.SPARSE_REQUIREMENT, |
|
|||
1301 | requirementsmod.INTERNAL_PHASE_REQUIREMENT, |
|
|||
1302 | } |
|
1297 | } | |
1303 |
|
1298 | |||
1304 | # list of prefix for file which can be written without 'wlock' |
|
1299 | # list of prefix for file which can be written without 'wlock' | |
@@ -1748,7 +1743,9 b' class localrepository(object):' | |||||
1748 | """Extension point for wrapping the dirstate per-repo.""" |
|
1743 | """Extension point for wrapping the dirstate per-repo.""" | |
1749 | sparsematchfn = lambda: sparse.matcher(self) |
|
1744 | sparsematchfn = lambda: sparse.matcher(self) | |
1750 | v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT |
|
1745 | v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT | |
|
1746 | th = requirementsmod.DIRSTATE_TRACKED_HINT_V1 | |||
1751 | use_dirstate_v2 = v2_req in self.requirements |
|
1747 | use_dirstate_v2 = v2_req in self.requirements | |
|
1748 | use_tracked_hint = th in self.requirements | |||
1752 |
|
1749 | |||
1753 | return dirstate.dirstate( |
|
1750 | return dirstate.dirstate( | |
1754 | self.vfs, |
|
1751 | self.vfs, | |
@@ -1758,6 +1755,7 b' class localrepository(object):' | |||||
1758 | sparsematchfn, |
|
1755 | sparsematchfn, | |
1759 | self.nodeconstants, |
|
1756 | self.nodeconstants, | |
1760 | use_dirstate_v2, |
|
1757 | use_dirstate_v2, | |
|
1758 | use_tracked_hint=use_tracked_hint, | |||
1761 | ) |
|
1759 | ) | |
1762 |
|
1760 | |||
1763 | def _dirstatevalidate(self, node): |
|
1761 | def _dirstatevalidate(self, node): | |
@@ -3551,6 +3549,10 b' def clone_requirements(ui, createopts, s' | |||||
3551 | depends on the configuration |
|
3549 | depends on the configuration | |
3552 | """ |
|
3550 | """ | |
3553 | target_requirements = set() |
|
3551 | target_requirements = set() | |
|
3552 | if not srcrepo.requirements: | |||
|
3553 | # this is a legacy revlog "v0" repository, we cannot do anything fancy | |||
|
3554 | # with it. | |||
|
3555 | return target_requirements | |||
3554 | createopts = defaultcreateopts(ui, createopts=createopts) |
|
3556 | createopts = defaultcreateopts(ui, createopts=createopts) | |
3555 | for r in newreporequirements(ui, createopts): |
|
3557 | for r in newreporequirements(ui, createopts): | |
3556 | if r in requirementsmod.WORKING_DIR_REQUIREMENTS: |
|
3558 | if r in requirementsmod.WORKING_DIR_REQUIREMENTS: | |
@@ -3568,16 +3570,6 b' def newreporequirements(ui, createopts):' | |||||
3568 | Extensions can wrap this function to specify custom requirements for |
|
3570 | Extensions can wrap this function to specify custom requirements for | |
3569 | new repositories. |
|
3571 | new repositories. | |
3570 | """ |
|
3572 | """ | |
3571 | # If the repo is being created from a shared repository, we copy |
|
|||
3572 | # its requirements. |
|
|||
3573 | if b'sharedrepo' in createopts: |
|
|||
3574 | requirements = set(createopts[b'sharedrepo'].requirements) |
|
|||
3575 | if createopts.get(b'sharedrelative'): |
|
|||
3576 | requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT) |
|
|||
3577 | else: |
|
|||
3578 | requirements.add(requirementsmod.SHARED_REQUIREMENT) |
|
|||
3579 |
|
||||
3580 | return requirements |
|
|||
3581 |
|
3573 | |||
3582 | if b'backend' not in createopts: |
|
3574 | if b'backend' not in createopts: | |
3583 | raise error.ProgrammingError( |
|
3575 | raise error.ProgrammingError( | |
@@ -3663,7 +3655,7 b' def newreporequirements(ui, createopts):' | |||||
3663 | requirements.add(b'lfs') |
|
3655 | requirements.add(b'lfs') | |
3664 |
|
3656 | |||
3665 | if ui.configbool(b'format', b'bookmarks-in-store'): |
|
3657 | if ui.configbool(b'format', b'bookmarks-in-store'): | |
3666 |
requirements.add( |
|
3658 | requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT) | |
3667 |
|
3659 | |||
3668 | if ui.configbool(b'format', b'use-persistent-nodemap'): |
|
3660 | if ui.configbool(b'format', b'use-persistent-nodemap'): | |
3669 | requirements.add(requirementsmod.NODEMAP_REQUIREMENT) |
|
3661 | requirements.add(requirementsmod.NODEMAP_REQUIREMENT) | |
@@ -3673,6 +3665,45 b' def newreporequirements(ui, createopts):' | |||||
3673 | if ui.configbool(b'format', b'use-share-safe'): |
|
3665 | if ui.configbool(b'format', b'use-share-safe'): | |
3674 | requirements.add(requirementsmod.SHARESAFE_REQUIREMENT) |
|
3666 | requirements.add(requirementsmod.SHARESAFE_REQUIREMENT) | |
3675 |
|
3667 | |||
|
3668 | # if we are creating a share-repo¹ we have to handle requirement | |||
|
3669 | # differently. | |||
|
3670 | # | |||
|
3671 | # [1] (i.e. reusing the store from another repository, just having a | |||
|
3672 | # working copy) | |||
|
3673 | if b'sharedrepo' in createopts: | |||
|
3674 | source_requirements = set(createopts[b'sharedrepo'].requirements) | |||
|
3675 | ||||
|
3676 | if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements: | |||
|
3677 | # share to an old school repository, we have to copy the | |||
|
3678 | # requirements and hope for the best. | |||
|
3679 | requirements = source_requirements | |||
|
3680 | else: | |||
|
3681 | # We have control on the working copy only, so "copy" the non | |||
|
3682 | # working copy part over, ignoring previous logic. | |||
|
3683 | to_drop = set() | |||
|
3684 | for req in requirements: | |||
|
3685 | if req in requirementsmod.WORKING_DIR_REQUIREMENTS: | |||
|
3686 | continue | |||
|
3687 | if req in source_requirements: | |||
|
3688 | continue | |||
|
3689 | to_drop.add(req) | |||
|
3690 | requirements -= to_drop | |||
|
3691 | requirements |= source_requirements | |||
|
3692 | ||||
|
3693 | if createopts.get(b'sharedrelative'): | |||
|
3694 | requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT) | |||
|
3695 | else: | |||
|
3696 | requirements.add(requirementsmod.SHARED_REQUIREMENT) | |||
|
3697 | ||||
|
3698 | if ui.configbool(b'format', b'use-dirstate-tracked-hint'): | |||
|
3699 | version = ui.configint(b'format', b'use-dirstate-tracked-hint.version') | |||
|
3700 | msg = _("ignoring unknown tracked key version: %d\n") | |||
|
3701 | hint = _("see `hg help config.format.use-dirstate-tracked-hint-version") | |||
|
3702 | if version != 1: | |||
|
3703 | ui.warn(msg % version, hint=hint) | |||
|
3704 | else: | |||
|
3705 | requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1) | |||
|
3706 | ||||
3676 | return requirements |
|
3707 | return requirements | |
3677 |
|
3708 | |||
3678 |
|
3709 | |||
@@ -3685,7 +3716,7 b' def checkrequirementscompat(ui, requirem' | |||||
3685 | dropped = set() |
|
3716 | dropped = set() | |
3686 |
|
3717 | |||
3687 | if requirementsmod.STORE_REQUIREMENT not in requirements: |
|
3718 | if requirementsmod.STORE_REQUIREMENT not in requirements: | |
3688 |
if |
|
3719 | if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements: | |
3689 | ui.warn( |
|
3720 | ui.warn( | |
3690 | _( |
|
3721 | _( | |
3691 | b'ignoring enabled \'format.bookmarks-in-store\' config ' |
|
3722 | b'ignoring enabled \'format.bookmarks-in-store\' config ' | |
@@ -3693,7 +3724,7 b' def checkrequirementscompat(ui, requirem' | |||||
3693 | b'\'format.usestore\' config\n' |
|
3724 | b'\'format.usestore\' config\n' | |
3694 | ) |
|
3725 | ) | |
3695 | ) |
|
3726 | ) | |
3696 |
dropped.add( |
|
3727 | dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT) | |
3697 |
|
3728 | |||
3698 | if ( |
|
3729 | if ( | |
3699 | requirementsmod.SHARED_REQUIREMENT in requirements |
|
3730 | requirementsmod.SHARED_REQUIREMENT in requirements | |
@@ -3707,13 +3738,13 b' def checkrequirementscompat(ui, requirem' | |||||
3707 | ) |
|
3738 | ) | |
3708 |
|
3739 | |||
3709 | if requirementsmod.SHARESAFE_REQUIREMENT in requirements: |
|
3740 | if requirementsmod.SHARESAFE_REQUIREMENT in requirements: | |
3710 | ui.warn( |
|
3741 | if ui.hasconfig(b'format', b'use-share-safe'): | |
3711 | _( |
|
3742 | msg = _( | |
3712 | b"ignoring enabled 'format.use-share-safe' config because " |
|
3743 | b"ignoring enabled 'format.use-share-safe' config because " | |
3713 | b"it is incompatible with disabled 'format.usestore'" |
|
3744 | b"it is incompatible with disabled 'format.usestore'" | |
3714 | b" config\n" |
|
3745 | b" config\n" | |
3715 | ) |
|
3746 | ) | |
3716 | ) |
|
3747 | ui.warn(msg) | |
3717 | dropped.add(requirementsmod.SHARESAFE_REQUIREMENT) |
|
3748 | dropped.add(requirementsmod.SHARESAFE_REQUIREMENT) | |
3718 |
|
3749 | |||
3719 | return dropped |
|
3750 | return dropped |
@@ -62,9 +62,9 b' def getlimit(opts):' | |||||
62 | try: |
|
62 | try: | |
63 | limit = int(limit) |
|
63 | limit = int(limit) | |
64 | except ValueError: |
|
64 | except ValueError: | |
65 |
raise error. |
|
65 | raise error.InputError(_(b'limit must be a positive integer')) | |
66 | if limit <= 0: |
|
66 | if limit <= 0: | |
67 |
raise error. |
|
67 | raise error.InputError(_(b'limit must be positive')) | |
68 | else: |
|
68 | else: | |
69 | limit = None |
|
69 | limit = None | |
70 | return limit |
|
70 | return limit | |
@@ -831,7 +831,7 b' def _makematcher(repo, revs, wopts):' | |||||
831 | # take the slow path. |
|
831 | # take the slow path. | |
832 | found = slowpath = True |
|
832 | found = slowpath = True | |
833 | if not found: |
|
833 | if not found: | |
834 |
raise error. |
|
834 | raise error.StateError( | |
835 | _( |
|
835 | _( | |
836 | b'cannot follow file not in any of the specified ' |
|
836 | b'cannot follow file not in any of the specified ' | |
837 | b'revisions: "%s"' |
|
837 | b'revisions: "%s"' | |
@@ -847,7 +847,7 b' def _makematcher(repo, revs, wopts):' | |||||
847 | slowpath = True |
|
847 | slowpath = True | |
848 | continue |
|
848 | continue | |
849 | else: |
|
849 | else: | |
850 |
raise error. |
|
850 | raise error.StateError( | |
851 | _( |
|
851 | _( | |
852 | b'cannot follow file not in parent ' |
|
852 | b'cannot follow file not in parent ' | |
853 | b'revision: "%s"' |
|
853 | b'revision: "%s"' | |
@@ -858,7 +858,7 b' def _makematcher(repo, revs, wopts):' | |||||
858 | if not filelog: |
|
858 | if not filelog: | |
859 | # A file exists in wdir but not in history, which means |
|
859 | # A file exists in wdir but not in history, which means | |
860 | # the file isn't committed yet. |
|
860 | # the file isn't committed yet. | |
861 |
raise error. |
|
861 | raise error.StateError( | |
862 | _(b'cannot follow nonexistent file: "%s"') % f |
|
862 | _(b'cannot follow nonexistent file: "%s"') % f | |
863 | ) |
|
863 | ) | |
864 | else: |
|
864 | else: | |
@@ -1108,11 +1108,13 b' def _parselinerangeopt(repo, opts):' | |||||
1108 | try: |
|
1108 | try: | |
1109 | pat, linerange = pat.rsplit(b',', 1) |
|
1109 | pat, linerange = pat.rsplit(b',', 1) | |
1110 | except ValueError: |
|
1110 | except ValueError: | |
1111 | raise error.Abort(_(b'malformatted line-range pattern %s') % pat) |
|
1111 | raise error.InputError( | |
|
1112 | _(b'malformatted line-range pattern %s') % pat | |||
|
1113 | ) | |||
1112 | try: |
|
1114 | try: | |
1113 | fromline, toline = map(int, linerange.split(b':')) |
|
1115 | fromline, toline = map(int, linerange.split(b':')) | |
1114 | except ValueError: |
|
1116 | except ValueError: | |
1115 |
raise error. |
|
1117 | raise error.InputError(_(b"invalid line range for %s") % pat) | |
1116 | msg = _(b"line range pattern '%s' must match exactly one file") % pat |
|
1118 | msg = _(b"line range pattern '%s' must match exactly one file") % pat | |
1117 | fname = scmutil.parsefollowlinespattern(repo, None, pat, msg) |
|
1119 | fname = scmutil.parsefollowlinespattern(repo, None, pat, msg) | |
1118 | linerangebyfname.append( |
|
1120 | linerangebyfname.append( | |
@@ -1136,7 +1138,7 b' def getlinerangerevs(repo, userrevs, opt' | |||||
1136 | linerangesbyrev = {} |
|
1138 | linerangesbyrev = {} | |
1137 | for fname, (fromline, toline) in _parselinerangeopt(repo, opts): |
|
1139 | for fname, (fromline, toline) in _parselinerangeopt(repo, opts): | |
1138 | if fname not in wctx: |
|
1140 | if fname not in wctx: | |
1139 |
raise error. |
|
1141 | raise error.StateError( | |
1140 | _(b'cannot follow file not in parent revision: "%s"') % fname |
|
1142 | _(b'cannot follow file not in parent revision: "%s"') % fname | |
1141 | ) |
|
1143 | ) | |
1142 | fctx = wctx.filectx(fname) |
|
1144 | fctx = wctx.filectx(fname) | |
@@ -1271,7 +1273,7 b' def displayrevs(ui, repo, revs, displaye' | |||||
1271 | def checkunsupportedgraphflags(pats, opts): |
|
1273 | def checkunsupportedgraphflags(pats, opts): | |
1272 | for op in [b"newest_first"]: |
|
1274 | for op in [b"newest_first"]: | |
1273 | if op in opts and opts[op]: |
|
1275 | if op in opts and opts[op]: | |
1274 |
raise error. |
|
1276 | raise error.InputError( | |
1275 | _(b"-G/--graph option is incompatible with --%s") |
|
1277 | _(b"-G/--graph option is incompatible with --%s") | |
1276 | % op.replace(b"_", b"-") |
|
1278 | % op.replace(b"_", b"-") | |
1277 | ) |
|
1279 | ) |
@@ -1819,8 +1819,8 b' class manifestrevlog(object):' | |||||
1819 | def checksize(self): |
|
1819 | def checksize(self): | |
1820 | return self._revlog.checksize() |
|
1820 | return self._revlog.checksize() | |
1821 |
|
1821 | |||
1822 |
def revision(self, node, _df=None |
|
1822 | def revision(self, node, _df=None): | |
1823 |
return self._revlog.revision(node, _df=_df |
|
1823 | return self._revlog.revision(node, _df=_df) | |
1824 |
|
1824 | |||
1825 | def rawdata(self, node, _df=None): |
|
1825 | def rawdata(self, node, _df=None): | |
1826 | return self._revlog.rawdata(node, _df=_df) |
|
1826 | return self._revlog.rawdata(node, _df=_df) |
@@ -84,7 +84,7 b' class diffopts(object):' | |||||
84 | try: |
|
84 | try: | |
85 | self.context = int(self.context) |
|
85 | self.context = int(self.context) | |
86 | except ValueError: |
|
86 | except ValueError: | |
87 |
raise error. |
|
87 | raise error.InputError( | |
88 | _(b'diff context lines count must be an integer, not %r') |
|
88 | _(b'diff context lines count must be an integer, not %r') | |
89 | % pycompat.bytestr(self.context) |
|
89 | % pycompat.bytestr(self.context) | |
90 | ) |
|
90 | ) |
@@ -41,10 +41,9 b' def _getcheckunknownconfig(repo, section' | |||||
41 | valid = [b'abort', b'ignore', b'warn'] |
|
41 | valid = [b'abort', b'ignore', b'warn'] | |
42 | if config not in valid: |
|
42 | if config not in valid: | |
43 | validstr = b', '.join([b"'" + v + b"'" for v in valid]) |
|
43 | validstr = b', '.join([b"'" + v + b"'" for v in valid]) | |
44 | raise error.ConfigError( |
|
44 | msg = _(b"%s.%s not valid ('%s' is none of %s)") | |
45 | _(b"%s.%s not valid ('%s' is none of %s)") |
|
45 | msg %= (section, name, config, validstr) | |
46 | % (section, name, config, validstr) |
|
46 | raise error.ConfigError(msg) | |
47 | ) |
|
|||
48 | return config |
|
47 | return config | |
49 |
|
48 | |||
50 |
|
49 | |||
@@ -337,10 +336,9 b' def _checkcollision(repo, wmf, mresult):' | |||||
337 | for f in pmmf: |
|
336 | for f in pmmf: | |
338 | fold = util.normcase(f) |
|
337 | fold = util.normcase(f) | |
339 | if fold in foldmap: |
|
338 | if fold in foldmap: | |
340 | raise error.StateError( |
|
339 | msg = _(b"case-folding collision between %s and %s") | |
341 | _(b"case-folding collision between %s and %s") |
|
340 | msg %= (f, foldmap[fold]) | |
342 | % (f, foldmap[fold]) |
|
341 | raise error.StateError(msg) | |
343 | ) |
|
|||
344 | foldmap[fold] = f |
|
342 | foldmap[fold] = f | |
345 |
|
343 | |||
346 | # check case-folding of directories |
|
344 | # check case-folding of directories | |
@@ -348,10 +346,9 b' def _checkcollision(repo, wmf, mresult):' | |||||
348 | for fold, f in sorted(foldmap.items()): |
|
346 | for fold, f in sorted(foldmap.items()): | |
349 | if fold.startswith(foldprefix) and not f.startswith(unfoldprefix): |
|
347 | if fold.startswith(foldprefix) and not f.startswith(unfoldprefix): | |
350 | # the folded prefix matches but actual casing is different |
|
348 | # the folded prefix matches but actual casing is different | |
351 | raise error.StateError( |
|
349 | msg = _(b"case-folding collision between %s and directory of %s") | |
352 | _(b"case-folding collision between %s and directory of %s") |
|
350 | msg %= (lastfull, f) | |
353 | % (lastfull, f) |
|
351 | raise error.StateError(msg) | |
354 | ) |
|
|||
355 | foldprefix = fold + b'/' |
|
352 | foldprefix = fold + b'/' | |
356 | unfoldprefix = f + b'/' |
|
353 | unfoldprefix = f + b'/' | |
357 | lastfull = f |
|
354 | lastfull = f | |
@@ -491,7 +488,7 b' def checkpathconflicts(repo, wctx, mctx,' | |||||
491 | mresult.addfile( |
|
488 | mresult.addfile( | |
492 | p, |
|
489 | p, | |
493 | mergestatemod.ACTION_PATH_CONFLICT, |
|
490 | mergestatemod.ACTION_PATH_CONFLICT, | |
494 |
(pnew, |
|
491 | (pnew, b'r'), | |
495 | b'path conflict', |
|
492 | b'path conflict', | |
496 | ) |
|
493 | ) | |
497 | remoteconflicts.remove(p) |
|
494 | remoteconflicts.remove(p) | |
@@ -512,17 +509,6 b' def _filternarrowactions(narrowmatch, br' | |||||
512 | Raise an exception if the merge cannot be completed because the repo is |
|
509 | Raise an exception if the merge cannot be completed because the repo is | |
513 | narrowed. |
|
510 | narrowed. | |
514 | """ |
|
511 | """ | |
515 | # TODO: handle with nonconflicttypes |
|
|||
516 | nonconflicttypes = { |
|
|||
517 | mergestatemod.ACTION_ADD, |
|
|||
518 | mergestatemod.ACTION_ADD_MODIFIED, |
|
|||
519 | mergestatemod.ACTION_CREATED, |
|
|||
520 | mergestatemod.ACTION_CREATED_MERGE, |
|
|||
521 | mergestatemod.ACTION_FORGET, |
|
|||
522 | mergestatemod.ACTION_GET, |
|
|||
523 | mergestatemod.ACTION_REMOVE, |
|
|||
524 | mergestatemod.ACTION_EXEC, |
|
|||
525 | } |
|
|||
526 | # We mutate the items in the dict during iteration, so iterate |
|
512 | # We mutate the items in the dict during iteration, so iterate | |
527 | # over a copy. |
|
513 | # over a copy. | |
528 | for f, action in mresult.filemap(): |
|
514 | for f, action in mresult.filemap(): | |
@@ -530,21 +516,25 b' def _filternarrowactions(narrowmatch, br' | |||||
530 | pass |
|
516 | pass | |
531 | elif not branchmerge: |
|
517 | elif not branchmerge: | |
532 | mresult.removefile(f) # just updating, ignore changes outside clone |
|
518 | mresult.removefile(f) # just updating, ignore changes outside clone | |
533 |
elif action[0] |
|
519 | elif action[0].no_op: | |
534 | mresult.removefile(f) # merge does not affect file |
|
520 | mresult.removefile(f) # merge does not affect file | |
535 |
elif action[0] |
|
521 | elif action[0].narrow_safe: | |
536 | raise error.Abort( |
|
522 | if not f.endswith(b'/'): | |
537 | _( |
|
523 | mresult.removefile(f) # merge won't affect on-disk files | |
|
524 | ||||
|
525 | mresult.addcommitinfo( | |||
|
526 | f, b'outside-narrow-merge-action', action[0].changes | |||
|
527 | ) | |||
|
528 | else: # TODO: handle the tree case | |||
|
529 | msg = _( | |||
538 | b'merge affects file \'%s\' outside narrow, ' |
|
530 | b'merge affects file \'%s\' outside narrow, ' | |
539 | b'which is not yet supported' |
|
531 | b'which is not yet supported' | |
540 | ) |
|
532 | ) | |
541 | % f, |
|
533 | hint = _(b'merging in the other direction may work') | |
542 | hint=_(b'merging in the other direction may work'), |
|
534 | raise error.Abort(msg % f, hint=hint) | |
543 | ) |
|
|||
544 | else: |
|
535 | else: | |
545 | raise error.Abort( |
|
536 | msg = _(b'conflict in file \'%s\' is outside narrow clone') | |
546 | _(b'conflict in file \'%s\' is outside narrow clone') % f |
|
537 | raise error.StateError(msg % f) | |
547 | ) |
|
|||
548 |
|
538 | |||
549 |
|
539 | |||
550 | class mergeresult(object): |
|
540 | class mergeresult(object): | |
@@ -705,7 +695,7 b' class mergeresult(object):' | |||||
705 | mergestatemod.ACTION_PATH_CONFLICT_RESOLVE, |
|
695 | mergestatemod.ACTION_PATH_CONFLICT_RESOLVE, | |
706 | ) |
|
696 | ) | |
707 | and self._actionmapping[a] |
|
697 | and self._actionmapping[a] | |
708 |
and |
|
698 | and not a.no_op | |
709 | ): |
|
699 | ): | |
710 | return True |
|
700 | return True | |
711 |
|
701 | |||
@@ -1207,7 +1197,7 b' def calculateupdates(' | |||||
1207 |
|
1197 | |||
1208 | for f, a in mresult1.filemap(sort=True): |
|
1198 | for f, a in mresult1.filemap(sort=True): | |
1209 | m, args, msg = a |
|
1199 | m, args, msg = a | |
1210 | repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m)) |
|
1200 | repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m.__bytes__())) | |
1211 | if f in fbids: |
|
1201 | if f in fbids: | |
1212 | d = fbids[f] |
|
1202 | d = fbids[f] | |
1213 | if m in d: |
|
1203 | if m in d: | |
@@ -1228,13 +1218,15 b' def calculateupdates(' | |||||
1228 | repo.ui.debug(b" list of bids for %s:\n" % f) |
|
1218 | repo.ui.debug(b" list of bids for %s:\n" % f) | |
1229 | for m, l in sorted(bids.items()): |
|
1219 | for m, l in sorted(bids.items()): | |
1230 | for _f, args, msg in l: |
|
1220 | for _f, args, msg in l: | |
1231 | repo.ui.debug(b' %s -> %s\n' % (msg, m)) |
|
1221 | repo.ui.debug(b' %s -> %s\n' % (msg, m.__bytes__())) | |
1232 | # bids is a mapping from action method to list af actions |
|
1222 | # bids is a mapping from action method to list af actions | |
1233 | # Consensus? |
|
1223 | # Consensus? | |
1234 | if len(bids) == 1: # all bids are the same kind of method |
|
1224 | if len(bids) == 1: # all bids are the same kind of method | |
1235 | m, l = list(bids.items())[0] |
|
1225 | m, l = list(bids.items())[0] | |
1236 | if all(a == l[0] for a in l[1:]): # len(bids) is > 1 |
|
1226 | if all(a == l[0] for a in l[1:]): # len(bids) is > 1 | |
1237 |
repo.ui.note( |
|
1227 | repo.ui.note( | |
|
1228 | _(b" %s: consensus for %s\n") % (f, m.__bytes__()) | |||
|
1229 | ) | |||
1238 | mresult.addfile(f, *l[0]) |
|
1230 | mresult.addfile(f, *l[0]) | |
1239 | continue |
|
1231 | continue | |
1240 | # If keep is an option, just do it. |
|
1232 | # If keep is an option, just do it. | |
@@ -1292,11 +1284,12 b' def calculateupdates(' | |||||
1292 | repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f) |
|
1284 | repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f) | |
1293 | for m, l in sorted(bids.items()): |
|
1285 | for m, l in sorted(bids.items()): | |
1294 | for _f, args, msg in l: |
|
1286 | for _f, args, msg in l: | |
1295 | repo.ui.note(b' %s -> %s\n' % (msg, m)) |
|
1287 | repo.ui.note(b' %s -> %s\n' % (msg, m.__bytes__())) | |
1296 | # Pick random action. TODO: Instead, prompt user when resolving |
|
1288 | # Pick random action. TODO: Instead, prompt user when resolving | |
1297 | m, l = list(bids.items())[0] |
|
1289 | m, l = list(bids.items())[0] | |
1298 | repo.ui.warn( |
|
1290 | repo.ui.warn( | |
1299 |
_(b' %s: ambiguous merge - picked %s action\n') |
|
1291 | _(b' %s: ambiguous merge - picked %s action\n') | |
|
1292 | % (f, m.__bytes__()) | |||
1300 | ) |
|
1293 | ) | |
1301 | mresult.addfile(f, *l[0]) |
|
1294 | mresult.addfile(f, *l[0]) | |
1302 | continue |
|
1295 | continue | |
@@ -1404,6 +1397,34 b' def batchget(repo, mctx, wctx, wantfiled' | |||||
1404 | atomictemp=atomictemp, |
|
1397 | atomictemp=atomictemp, | |
1405 | ) |
|
1398 | ) | |
1406 | if wantfiledata: |
|
1399 | if wantfiledata: | |
|
1400 | # XXX note that there is a race window between the time we | |||
|
1401 | # write the clean data into the file and we stats it. So another | |||
|
1402 | # writing process meddling with the file content right after we | |||
|
1403 | # wrote it could cause bad stat data to be gathered. | |||
|
1404 | # | |||
|
1405 | # They are 2 data we gather here | |||
|
1406 | # - the mode: | |||
|
1407 | # That we actually just wrote, we should not need to read | |||
|
1408 | # it from disk, (except not all mode might have survived | |||
|
1409 | # the disk round-trip, which is another issue: we should | |||
|
1410 | # not depends on this) | |||
|
1411 | # - the mtime, | |||
|
1412 | # On system that support nanosecond precision, the mtime | |||
|
1413 | # could be accurate enough to tell the two writes appart. | |||
|
1414 | # However gathering it in a racy way make the mtime we | |||
|
1415 | # gather "unreliable". | |||
|
1416 | # | |||
|
1417 | # (note: we get the size from the data we write, which is sane) | |||
|
1418 | # | |||
|
1419 | # So in theory the data returned here are fully racy, but in | |||
|
1420 | # practice "it works mostly fine". | |||
|
1421 | # | |||
|
1422 | # Do not be surprised if you end up reading this while looking | |||
|
1423 | # for the causes of some buggy status. Feel free to improve | |||
|
1424 | # this in the future, but we cannot simply stop gathering | |||
|
1425 | # information. Otherwise `hg status` call made after a large `hg | |||
|
1426 | # update` runs would have to redo a similar amount of work to | |||
|
1427 | # restore and compare all files content. | |||
1407 | s = wfctx.lstat() |
|
1428 | s = wfctx.lstat() | |
1408 | mode = s.st_mode |
|
1429 | mode = s.st_mode | |
1409 | mtime = timestamp.mtime_of(s) |
|
1430 | mtime = timestamp.mtime_of(s) | |
@@ -1495,7 +1516,8 b' def applyupdates(' | |||||
1495 | # mergestate so that it can be reused on commit |
|
1516 | # mergestate so that it can be reused on commit | |
1496 | ms.addcommitinfo(f, op) |
|
1517 | ms.addcommitinfo(f, op) | |
1497 |
|
1518 | |||
1498 |
num |
|
1519 | num_no_op = mresult.len(mergestatemod.MergeAction.NO_OP_ACTIONS) | |
|
1520 | numupdates = mresult.len() - num_no_op | |||
1499 | progress = repo.ui.makeprogress( |
|
1521 | progress = repo.ui.makeprogress( | |
1500 | _(b'updating'), unit=_(b'files'), total=numupdates |
|
1522 | _(b'updating'), unit=_(b'files'), total=numupdates | |
1501 | ) |
|
1523 | ) | |
@@ -1599,9 +1621,9 b' def applyupdates(' | |||||
1599 | progress.increment(item=f) |
|
1621 | progress.increment(item=f) | |
1600 |
|
1622 | |||
1601 | # keep (noop, just log it) |
|
1623 | # keep (noop, just log it) | |
1602 | for a in mergestatemod.NO_OP_ACTIONS: |
|
1624 | for a in mergestatemod.MergeAction.NO_OP_ACTIONS: | |
1603 | for f, args, msg in mresult.getactions((a,), sort=True): |
|
1625 | for f, args, msg in mresult.getactions((a,), sort=True): | |
1604 | repo.ui.debug(b" %s: %s -> %s\n" % (f, msg, a)) |
|
1626 | repo.ui.debug(b" %s: %s -> %s\n" % (f, msg, a.__bytes__())) | |
1605 | # no progress |
|
1627 | # no progress | |
1606 |
|
1628 | |||
1607 | # directory rename, move local |
|
1629 | # directory rename, move local | |
@@ -1690,10 +1712,8 b' def applyupdates(' | |||||
1690 | ) |
|
1712 | ) | |
1691 |
|
1713 | |||
1692 | try: |
|
1714 | try: | |
1693 | # premerge |
|
|||
1694 | tocomplete = [] |
|
|||
1695 | for f, args, msg in mergeactions: |
|
1715 | for f, args, msg in mergeactions: | |
1696 |
repo.ui.debug(b" %s: %s -> m |
|
1716 | repo.ui.debug(b" %s: %s -> m\n" % (f, msg)) | |
1697 | ms.addcommitinfo(f, {b'merged': b'yes'}) |
|
1717 | ms.addcommitinfo(f, {b'merged': b'yes'}) | |
1698 | progress.increment(item=f) |
|
1718 | progress.increment(item=f) | |
1699 | if f == b'.hgsubstate': # subrepo states need updating |
|
1719 | if f == b'.hgsubstate': # subrepo states need updating | |
@@ -1702,16 +1722,6 b' def applyupdates(' | |||||
1702 | ) |
|
1722 | ) | |
1703 | continue |
|
1723 | continue | |
1704 | wctx[f].audit() |
|
1724 | wctx[f].audit() | |
1705 | complete, r = ms.preresolve(f, wctx) |
|
|||
1706 | if not complete: |
|
|||
1707 | numupdates += 1 |
|
|||
1708 | tocomplete.append((f, args, msg)) |
|
|||
1709 |
|
||||
1710 | # merge |
|
|||
1711 | for f, args, msg in tocomplete: |
|
|||
1712 | repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg)) |
|
|||
1713 | ms.addcommitinfo(f, {b'merged': b'yes'}) |
|
|||
1714 | progress.increment(item=f, total=numupdates) |
|
|||
1715 | ms.resolve(f, wctx) |
|
1725 | ms.resolve(f, wctx) | |
1716 |
|
1726 | |||
1717 | except error.InterventionRequired: |
|
1727 | except error.InterventionRequired: | |
@@ -1823,7 +1833,7 b' def _update(' | |||||
1823 | If false, merging with an ancestor (fast-forward) is only allowed |
|
1833 | If false, merging with an ancestor (fast-forward) is only allowed | |
1824 | between different named branches. This flag is used by rebase extension |
|
1834 | between different named branches. This flag is used by rebase extension | |
1825 | as a temporary fix and should be avoided in general. |
|
1835 | as a temporary fix and should be avoided in general. | |
1826 |
labels = labels to use for |
|
1836 | labels = labels to use for local, other, and base | |
1827 | mergeforce = whether the merge was run with 'merge --force' (deprecated): if |
|
1837 | mergeforce = whether the merge was run with 'merge --force' (deprecated): if | |
1828 | this is True, then 'force' should be True as well. |
|
1838 | this is True, then 'force' should be True as well. | |
1829 |
|
1839 | |||
@@ -1875,22 +1885,11 b' def _update(' | |||||
1875 | # updatecheck='abort' to better suppport some of these callers. |
|
1885 | # updatecheck='abort' to better suppport some of these callers. | |
1876 | if updatecheck is None: |
|
1886 | if updatecheck is None: | |
1877 | updatecheck = UPDATECHECK_LINEAR |
|
1887 | updatecheck = UPDATECHECK_LINEAR | |
1878 | if updatecheck not in ( |
|
1888 | okay = (UPDATECHECK_NONE, UPDATECHECK_LINEAR, UPDATECHECK_NO_CONFLICT) | |
1879 | UPDATECHECK_NONE, |
|
1889 | if updatecheck not in okay: | |
1880 | UPDATECHECK_LINEAR, |
|
1890 | msg = r'Invalid updatecheck %r (can accept %r)' | |
1881 | UPDATECHECK_NO_CONFLICT, |
|
1891 | msg %= (updatecheck, okay) | |
1882 | ): |
|
1892 | raise ValueError(msg) | |
1883 | raise ValueError( |
|
|||
1884 | r'Invalid updatecheck %r (can accept %r)' |
|
|||
1885 | % ( |
|
|||
1886 | updatecheck, |
|
|||
1887 | ( |
|
|||
1888 | UPDATECHECK_NONE, |
|
|||
1889 | UPDATECHECK_LINEAR, |
|
|||
1890 | UPDATECHECK_NO_CONFLICT, |
|
|||
1891 | ), |
|
|||
1892 | ) |
|
|||
1893 | ) |
|
|||
1894 | if wc is not None and wc.isinmemory(): |
|
1893 | if wc is not None and wc.isinmemory(): | |
1895 | maybe_wlock = util.nullcontextmanager() |
|
1894 | maybe_wlock = util.nullcontextmanager() | |
1896 | else: |
|
1895 | else: | |
@@ -1919,29 +1918,22 b' def _update(' | |||||
1919 | raise error.StateError(_(b"outstanding uncommitted merge")) |
|
1918 | raise error.StateError(_(b"outstanding uncommitted merge")) | |
1920 | ms = wc.mergestate() |
|
1919 | ms = wc.mergestate() | |
1921 | if ms.unresolvedcount(): |
|
1920 | if ms.unresolvedcount(): | |
1922 | raise error.StateError( |
|
1921 | msg = _(b"outstanding merge conflicts") | |
1923 |
|
|
1922 | hint = _(b"use 'hg resolve' to resolve") | |
1924 | hint=_(b"use 'hg resolve' to resolve"), |
|
1923 | raise error.StateError(msg, hint=hint) | |
1925 | ) |
|
|||
1926 | if branchmerge: |
|
1924 | if branchmerge: | |
|
1925 | m_a = _(b"merging with a working directory ancestor has no effect") | |||
1927 | if pas == [p2]: |
|
1926 | if pas == [p2]: | |
1928 | raise error.Abort( |
|
1927 | raise error.Abort(m_a) | |
1929 | _( |
|
|||
1930 | b"merging with a working directory ancestor" |
|
|||
1931 | b" has no effect" |
|
|||
1932 | ) |
|
|||
1933 | ) |
|
|||
1934 | elif pas == [p1]: |
|
1928 | elif pas == [p1]: | |
1935 | if not mergeancestor and wc.branch() == p2.branch(): |
|
1929 | if not mergeancestor and wc.branch() == p2.branch(): | |
1936 | raise error.Abort( |
|
1930 | msg = _(b"nothing to merge") | |
1937 |
|
|
1931 | hint = _(b"use 'hg update' or check 'hg heads'") | |
1938 | hint=_(b"use 'hg update' or check 'hg heads'"), |
|
1932 | raise error.Abort(msg, hint=hint) | |
1939 | ) |
|
|||
1940 | if not force and (wc.files() or wc.deleted()): |
|
1933 | if not force and (wc.files() or wc.deleted()): | |
1941 | raise error.StateError( |
|
1934 | msg = _(b"uncommitted changes") | |
1942 |
|
|
1935 | hint = _(b"use 'hg status' to list changes") | |
1943 | hint=_(b"use 'hg status' to list changes"), |
|
1936 | raise error.StateError(msg, hint=hint) | |
1944 | ) |
|
|||
1945 | if not wc.isinmemory(): |
|
1937 | if not wc.isinmemory(): | |
1946 | for s in sorted(wc.substate): |
|
1938 | for s in sorted(wc.substate): | |
1947 | wc.sub(s).bailifchanged() |
|
1939 | wc.sub(s).bailifchanged() | |
@@ -2144,6 +2136,71 b' def _update(' | |||||
2144 | mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0 |
|
2136 | mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0 | |
2145 | ) |
|
2137 | ) | |
2146 | with repo.dirstate.parentchange(): |
|
2138 | with repo.dirstate.parentchange(): | |
|
2139 | ### Filter Filedata | |||
|
2140 | # | |||
|
2141 | # We gathered "cache" information for the clean file while | |||
|
2142 | # updating them: mtime, size and mode. | |||
|
2143 | # | |||
|
2144 | # At the time this comment is written, they are various issues | |||
|
2145 | # with how we gather the `mode` and `mtime` information (see | |||
|
2146 | # the comment in `batchget`). | |||
|
2147 | # | |||
|
2148 | # We are going to smooth one of this issue here : mtime ambiguity. | |||
|
2149 | # | |||
|
2150 | # i.e. even if the mtime gathered during `batchget` was | |||
|
2151 | # correct[1] a change happening right after it could change the | |||
|
2152 | # content while keeping the same mtime[2]. | |||
|
2153 | # | |||
|
2154 | # When we reach the current code, the "on disk" part of the | |||
|
2155 | # update operation is finished. We still assume that no other | |||
|
2156 | # process raced that "on disk" part, but we want to at least | |||
|
2157 | # prevent later file change to alter the content of the file | |||
|
2158 | # right after the update operation. So quickly that the same | |||
|
2159 | # mtime is record for the operation. | |||
|
2160 | # To prevent such ambiguity to happens, we will only keep the | |||
|
2161 | # "file data" for files with mtime that are stricly in the past, | |||
|
2162 | # i.e. whose mtime is strictly lower than the current time. | |||
|
2163 | # | |||
|
2164 | # This protect us from race conditions from operation that could | |||
|
2165 | # run right after this one, especially other Mercurial | |||
|
2166 | # operation that could be waiting for the wlock to touch files | |||
|
2167 | # content and the dirstate. | |||
|
2168 | # | |||
|
2169 | # In an ideal world, we could only get reliable information in | |||
|
2170 | # `getfiledata` (from `getbatch`), however the current approach | |||
|
2171 | # have been a successful compromise since many years. | |||
|
2172 | # | |||
|
2173 | # At the time this comment is written, not using any "cache" | |||
|
2174 | # file data at all here would not be viable. As it would result is | |||
|
2175 | # a very large amount of work (equivalent to the previous `hg | |||
|
2176 | # update` during the next status after an update). | |||
|
2177 | # | |||
|
2178 | # [1] the current code cannot grantee that the `mtime` and | |||
|
2179 | # `mode` are correct, but the result is "okay in practice". | |||
|
2180 | # (see the comment in `batchget`). # | |||
|
2181 | # | |||
|
2182 | # [2] using nano-second precision can greatly help here because | |||
|
2183 | # it makes the "different write with same mtime" issue | |||
|
2184 | # virtually vanish. However, dirstate v1 cannot store such | |||
|
2185 | # precision and a bunch of python-runtime, operating-system and | |||
|
2186 | # filesystem does not provide use with such precision, so we | |||
|
2187 | # have to operate as if it wasn't available. | |||
|
2188 | if getfiledata: | |||
|
2189 | ambiguous_mtime = {} | |||
|
2190 | now = timestamp.get_fs_now(repo.vfs) | |||
|
2191 | if now is None: | |||
|
2192 | # we can't write to the FS, so we won't actually update | |||
|
2193 | # the dirstate content anyway, no need to put cache | |||
|
2194 | # information. | |||
|
2195 | getfiledata = None | |||
|
2196 | else: | |||
|
2197 | now_sec = now[0] | |||
|
2198 | for f, m in pycompat.iteritems(getfiledata): | |||
|
2199 | if m is not None and m[2][0] >= now_sec: | |||
|
2200 | ambiguous_mtime[f] = (m[0], m[1], None) | |||
|
2201 | for f, m in pycompat.iteritems(ambiguous_mtime): | |||
|
2202 | getfiledata[f] = m | |||
|
2203 | ||||
2147 | repo.setparents(fp1, fp2) |
|
2204 | repo.setparents(fp1, fp2) | |
2148 | mergestatemod.recordupdates( |
|
2205 | mergestatemod.recordupdates( | |
2149 | repo, mresult.actionsdict, branchmerge, getfiledata |
|
2206 | repo, mresult.actionsdict, branchmerge, getfiledata | |
@@ -2199,7 +2256,7 b' def update(ctx, updatecheck=None, wc=Non' | |||||
2199 | ctx.rev(), |
|
2256 | ctx.rev(), | |
2200 | branchmerge=False, |
|
2257 | branchmerge=False, | |
2201 | force=False, |
|
2258 | force=False, | |
2202 | labels=[b'working copy', b'destination'], |
|
2259 | labels=[b'working copy', b'destination', b'working copy parent'], | |
2203 | updatecheck=updatecheck, |
|
2260 | updatecheck=updatecheck, | |
2204 | wc=wc, |
|
2261 | wc=wc, | |
2205 | ) |
|
2262 | ) | |
@@ -2311,9 +2368,8 b' def graft(' | |||||
2311 | def back_out(ctx, parent=None, wc=None): |
|
2368 | def back_out(ctx, parent=None, wc=None): | |
2312 | if parent is None: |
|
2369 | if parent is None: | |
2313 | if ctx.p2() is not None: |
|
2370 | if ctx.p2() is not None: | |
2314 | raise error.ProgrammingError( |
|
2371 | msg = b"must specify parent of merge commit to back out" | |
2315 | b"must specify parent of merge commit to back out" |
|
2372 | raise error.ProgrammingError(msg) | |
2316 | ) |
|
|||
2317 | parent = ctx.p1() |
|
2373 | parent = ctx.p1() | |
2318 | return _update( |
|
2374 | return _update( | |
2319 | ctx.repo(), |
|
2375 | ctx.repo(), | |
@@ -2386,13 +2442,13 b' def purge(' | |||||
2386 |
|
2442 | |||
2387 | if confirm: |
|
2443 | if confirm: | |
2388 | nb_ignored = len(status.ignored) |
|
2444 | nb_ignored = len(status.ignored) | |
2389 | nb_unkown = len(status.unknown) |
|
2445 | nb_unknown = len(status.unknown) | |
2390 | if nb_unkown and nb_ignored: |
|
2446 | if nb_unknown and nb_ignored: | |
2391 | msg = _(b"permanently delete %d unkown and %d ignored files?") |
|
2447 | msg = _(b"permanently delete %d unknown and %d ignored files?") | |
2392 | msg %= (nb_unkown, nb_ignored) |
|
2448 | msg %= (nb_unknown, nb_ignored) | |
2393 | elif nb_unkown: |
|
2449 | elif nb_unknown: | |
2394 | msg = _(b"permanently delete %d unkown files?") |
|
2450 | msg = _(b"permanently delete %d unknown files?") | |
2395 | msg %= nb_unkown |
|
2451 | msg %= nb_unknown | |
2396 | elif nb_ignored: |
|
2452 | elif nb_ignored: | |
2397 | msg = _(b"permanently delete %d ignored files?") |
|
2453 | msg = _(b"permanently delete %d ignored files?") | |
2398 | msg %= nb_ignored |
|
2454 | msg %= nb_ignored |
@@ -4,6 +4,7 b' import collections' | |||||
4 | import errno |
|
4 | import errno | |
5 | import shutil |
|
5 | import shutil | |
6 | import struct |
|
6 | import struct | |
|
7 | import weakref | |||
7 |
|
8 | |||
8 | from .i18n import _ |
|
9 | from .i18n import _ | |
9 | from .node import ( |
|
10 | from .node import ( | |
@@ -97,36 +98,102 b" LEGACY_MERGE_DRIVER_STATE = b'm'" | |||||
97 | # This record was release in 3.7 and usage was removed in 5.6 |
|
98 | # This record was release in 3.7 and usage was removed in 5.6 | |
98 | LEGACY_MERGE_DRIVER_MERGE = b'D' |
|
99 | LEGACY_MERGE_DRIVER_MERGE = b'D' | |
99 |
|
100 | |||
|
101 | CHANGE_ADDED = b'added' | |||
|
102 | CHANGE_REMOVED = b'removed' | |||
|
103 | CHANGE_MODIFIED = b'modified' | |||
100 |
|
104 | |||
101 | ACTION_FORGET = b'f' |
|
105 | ||
102 | ACTION_REMOVE = b'r' |
|
106 | class MergeAction(object): | |
103 | ACTION_ADD = b'a' |
|
107 | """represent an "action" merge need to take for a given file | |
104 | ACTION_GET = b'g' |
|
108 | ||
105 | ACTION_PATH_CONFLICT = b'p' |
|
109 | Attributes: | |
106 | ACTION_PATH_CONFLICT_RESOLVE = b'pr' |
|
110 | ||
107 | ACTION_ADD_MODIFIED = b'am' |
|
111 | _short: internal representation used to identify each action | |
108 | ACTION_CREATED = b'c' |
|
112 | ||
109 | ACTION_DELETED_CHANGED = b'dc' |
|
113 | no_op: True if the action does affect the file content or tracking status | |
110 | ACTION_CHANGED_DELETED = b'cd' |
|
114 | ||
111 | ACTION_MERGE = b'm' |
|
115 | narrow_safe: | |
112 | ACTION_LOCAL_DIR_RENAME_GET = b'dg' |
|
116 | True if the action can be safely used for a file outside of the narrow | |
113 | ACTION_DIR_RENAME_MOVE_LOCAL = b'dm' |
|
117 | set | |
114 | ACTION_KEEP = b'k' |
|
118 | ||
|
119 | changes: | |||
|
120 | The types of changes that this actions involves. This is a work in | |||
|
121 | progress and not all actions have one yet. In addition, some requires | |||
|
122 | user changes and cannot be fully decided. The value currently available | |||
|
123 | are: | |||
|
124 | ||||
|
125 | - ADDED: the files is new in both parents | |||
|
126 | - REMOVED: the files existed in one parent and is getting removed | |||
|
127 | - MODIFIED: the files existed in at least one parent and is getting changed | |||
|
128 | """ | |||
|
129 | ||||
|
130 | ALL_ACTIONS = weakref.WeakSet() | |||
|
131 | NO_OP_ACTIONS = weakref.WeakSet() | |||
|
132 | ||||
|
133 | def __init__(self, short, no_op=False, narrow_safe=False, changes=None): | |||
|
134 | self._short = short | |||
|
135 | self.ALL_ACTIONS.add(self) | |||
|
136 | self.no_op = no_op | |||
|
137 | if self.no_op: | |||
|
138 | self.NO_OP_ACTIONS.add(self) | |||
|
139 | self.narrow_safe = narrow_safe | |||
|
140 | self.changes = changes | |||
|
141 | ||||
|
142 | def __hash__(self): | |||
|
143 | return hash(self._short) | |||
|
144 | ||||
|
145 | def __repr__(self): | |||
|
146 | return 'MergeAction<%s>' % self._short.decode('ascii') | |||
|
147 | ||||
|
148 | def __bytes__(self): | |||
|
149 | return self._short | |||
|
150 | ||||
|
151 | def __eq__(self, other): | |||
|
152 | if other is None: | |||
|
153 | return False | |||
|
154 | assert isinstance(other, MergeAction) | |||
|
155 | return self._short == other._short | |||
|
156 | ||||
|
157 | def __lt__(self, other): | |||
|
158 | return self._short < other._short | |||
|
159 | ||||
|
160 | ||||
|
161 | ACTION_FORGET = MergeAction(b'f', narrow_safe=True, changes=CHANGE_REMOVED) | |||
|
162 | ACTION_REMOVE = MergeAction(b'r', narrow_safe=True, changes=CHANGE_REMOVED) | |||
|
163 | ACTION_ADD = MergeAction(b'a', narrow_safe=True, changes=CHANGE_ADDED) | |||
|
164 | ACTION_GET = MergeAction(b'g', narrow_safe=True, changes=CHANGE_MODIFIED) | |||
|
165 | ACTION_PATH_CONFLICT = MergeAction(b'p') | |||
|
166 | ACTION_PATH_CONFLICT_RESOLVE = MergeAction('pr') | |||
|
167 | ACTION_ADD_MODIFIED = MergeAction( | |||
|
168 | b'am', narrow_safe=True, changes=CHANGE_ADDED | |||
|
169 | ) # not 100% about the changes value here | |||
|
170 | ACTION_CREATED = MergeAction(b'c', narrow_safe=True, changes=CHANGE_ADDED) | |||
|
171 | ACTION_DELETED_CHANGED = MergeAction(b'dc') | |||
|
172 | ACTION_CHANGED_DELETED = MergeAction(b'cd') | |||
|
173 | ACTION_MERGE = MergeAction(b'm') | |||
|
174 | ACTION_LOCAL_DIR_RENAME_GET = MergeAction(b'dg') | |||
|
175 | ACTION_DIR_RENAME_MOVE_LOCAL = MergeAction(b'dm') | |||
|
176 | ACTION_KEEP = MergeAction(b'k', no_op=True) | |||
115 | # the file was absent on local side before merge and we should |
|
177 | # the file was absent on local side before merge and we should | |
116 | # keep it absent (absent means file not present, it can be a result |
|
178 | # keep it absent (absent means file not present, it can be a result | |
117 | # of file deletion, rename etc.) |
|
179 | # of file deletion, rename etc.) | |
118 | ACTION_KEEP_ABSENT = b'ka' |
|
180 | ACTION_KEEP_ABSENT = MergeAction(b'ka', no_op=True) | |
119 | # the file is absent on the ancestor and remote side of the merge |
|
181 | # the file is absent on the ancestor and remote side of the merge | |
120 | # hence this file is new and we should keep it |
|
182 | # hence this file is new and we should keep it | |
121 | ACTION_KEEP_NEW = b'kn' |
|
183 | ACTION_KEEP_NEW = MergeAction(b'kn', no_op=True) | |
122 | ACTION_EXEC = b'e' |
|
184 | ACTION_EXEC = MergeAction(b'e', narrow_safe=True, changes=CHANGE_MODIFIED) | |
123 |
ACTION_CREATED_MERGE = |
|
185 | ACTION_CREATED_MERGE = MergeAction( | |
|
186 | b'cm', narrow_safe=True, changes=CHANGE_ADDED | |||
|
187 | ) | |||
|
188 | ||||
124 |
|
189 | |||
125 | # actions which are no op |
|
190 | # Used by concert to detect situation it does not like, not sure what the exact | |
126 | NO_OP_ACTIONS = ( |
|
191 | # criteria is | |
127 | ACTION_KEEP, |
|
192 | CONVERT_MERGE_ACTIONS = ( | |
128 |
ACTION_ |
|
193 | ACTION_MERGE, | |
129 |
ACTION_ |
|
194 | ACTION_DIR_RENAME_MOVE_LOCAL, | |
|
195 | ACTION_CHANGED_DELETED, | |||
|
196 | ACTION_DELETED_CHANGED, | |||
130 | ) |
|
197 | ) | |
131 |
|
198 | |||
132 |
|
199 | |||
@@ -313,16 +380,15 b' class _mergestate_base(object):' | |||||
313 | """return extras stored with the mergestate for the given filename""" |
|
380 | """return extras stored with the mergestate for the given filename""" | |
314 | return self._stateextras[filename] |
|
381 | return self._stateextras[filename] | |
315 |
|
382 | |||
316 |
def |
|
383 | def resolve(self, dfile, wctx): | |
317 |
"""r |
|
384 | """run merge process for dfile | |
318 | Returns whether the merge was completed and the return value of merge |
|
385 | ||
319 | obtained from filemerge._filemerge(). |
|
386 | Returns the exit code of the merge.""" | |
320 | """ |
|
|||
321 | if self[dfile] in ( |
|
387 | if self[dfile] in ( | |
322 | MERGE_RECORD_RESOLVED, |
|
388 | MERGE_RECORD_RESOLVED, | |
323 | LEGACY_RECORD_DRIVER_RESOLVED, |
|
389 | LEGACY_RECORD_DRIVER_RESOLVED, | |
324 | ): |
|
390 | ): | |
325 |
return |
|
391 | return 0 | |
326 | stateentry = self._state[dfile] |
|
392 | stateentry = self._state[dfile] | |
327 | state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry |
|
393 | state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry | |
328 | octx = self._repo[self._other] |
|
394 | octx = self._repo[self._other] | |
@@ -341,84 +407,63 b' class _mergestate_base(object):' | |||||
341 | fla = fca.flags() |
|
407 | fla = fca.flags() | |
342 | if b'x' in flags + flo + fla and b'l' not in flags + flo + fla: |
|
408 | if b'x' in flags + flo + fla and b'l' not in flags + flo + fla: | |
343 | if fca.rev() == nullrev and flags != flo: |
|
409 | if fca.rev() == nullrev and flags != flo: | |
344 |
|
|
410 | self._repo.ui.warn( | |
345 |
|
|
411 | _( | |
346 | _( |
|
412 | b'warning: cannot merge flags for %s ' | |
347 |
|
|
413 | b'without common ancestor - keeping local flags\n' | |
348 | b'without common ancestor - keeping local flags\n' |
|
|||
349 | ) |
|
|||
350 | % afile |
|
|||
351 | ) |
|
414 | ) | |
|
415 | % afile | |||
|
416 | ) | |||
352 | elif flags == fla: |
|
417 | elif flags == fla: | |
353 | flags = flo |
|
418 | flags = flo | |
354 |
|
|
419 | # restore local | |
355 | # restore local |
|
420 | if localkey != self._repo.nodeconstants.nullhex: | |
356 | if localkey != self._repo.nodeconstants.nullhex: |
|
421 | self._restore_backup(wctx[dfile], localkey, flags) | |
357 | self._restore_backup(wctx[dfile], localkey, flags) |
|
|||
358 | else: |
|
|||
359 | wctx[dfile].remove(ignoremissing=True) |
|
|||
360 | complete, merge_ret, deleted = filemerge.premerge( |
|
|||
361 | self._repo, |
|
|||
362 | wctx, |
|
|||
363 | self._local, |
|
|||
364 | lfile, |
|
|||
365 | fcd, |
|
|||
366 | fco, |
|
|||
367 | fca, |
|
|||
368 | labels=self._labels, |
|
|||
369 | ) |
|
|||
370 | else: |
|
422 | else: | |
371 | complete, merge_ret, deleted = filemerge.filemerge( |
|
423 | wctx[dfile].remove(ignoremissing=True) | |
372 | self._repo, |
|
424 | ||
373 | wctx, |
|
425 | if not fco.cmp(fcd): # files identical? | |
374 | self._local, |
|
|||
375 | lfile, |
|
|||
376 | fcd, |
|
|||
377 | fco, |
|
|||
378 | fca, |
|
|||
379 | labels=self._labels, |
|
|||
380 | ) |
|
|||
381 | if merge_ret is None: |
|
|||
382 | # If return value of merge is None, then there are no real conflict |
|
426 | # If return value of merge is None, then there are no real conflict | |
383 | del self._state[dfile] |
|
427 | del self._state[dfile] | |
|
428 | self._results[dfile] = None, None | |||
384 | self._dirty = True |
|
429 | self._dirty = True | |
385 | elif not merge_ret: |
|
430 | return None | |
|
431 | ||||
|
432 | merge_ret, deleted = filemerge.filemerge( | |||
|
433 | self._repo, | |||
|
434 | wctx, | |||
|
435 | self._local, | |||
|
436 | lfile, | |||
|
437 | fcd, | |||
|
438 | fco, | |||
|
439 | fca, | |||
|
440 | labels=self._labels, | |||
|
441 | ) | |||
|
442 | ||||
|
443 | if not merge_ret: | |||
386 | self.mark(dfile, MERGE_RECORD_RESOLVED) |
|
444 | self.mark(dfile, MERGE_RECORD_RESOLVED) | |
387 |
|
445 | |||
388 | if complete: |
|
446 | action = None | |
389 | action = None |
|
447 | if deleted: | |
390 |
if |
|
448 | if fcd.isabsent(): | |
391 | if fcd.isabsent(): |
|
449 | # dc: local picked. Need to drop if present, which may | |
392 | # dc: local picked. Need to drop if present, which may |
|
450 | # happen on re-resolves. | |
393 | # happen on re-resolves. |
|
451 | action = ACTION_FORGET | |
394 | action = ACTION_FORGET |
|
|||
395 | else: |
|
|||
396 | # cd: remote picked (or otherwise deleted) |
|
|||
397 | action = ACTION_REMOVE |
|
|||
398 | else: |
|
452 | else: | |
399 |
|
|
453 | # cd: remote picked (or otherwise deleted) | |
400 |
|
|
454 | action = ACTION_REMOVE | |
401 | elif fco.isabsent(): # cd: local picked |
|
455 | else: | |
402 | if dfile in self.localctx: |
|
456 | if fcd.isabsent(): # dc: remote picked | |
403 |
|
|
457 | action = ACTION_GET | |
404 | else: |
|
458 | elif fco.isabsent(): # cd: local picked | |
405 | action = ACTION_ADD |
|
459 | if dfile in self.localctx: | |
406 | # else: regular merges (no action necessary) |
|
460 | action = ACTION_ADD_MODIFIED | |
407 | self._results[dfile] = merge_ret, action |
|
461 | else: | |
408 |
|
462 | action = ACTION_ADD | ||
409 | return complete, merge_ret |
|
463 | # else: regular merges (no action necessary) | |
|
464 | self._results[dfile] = merge_ret, action | |||
410 |
|
465 | |||
411 | def preresolve(self, dfile, wctx): |
|
466 | return merge_ret | |
412 | """run premerge process for dfile |
|
|||
413 |
|
||||
414 | Returns whether the merge is complete, and the exit code.""" |
|
|||
415 | return self._resolve(True, dfile, wctx) |
|
|||
416 |
|
||||
417 | def resolve(self, dfile, wctx): |
|
|||
418 | """run merge process (assuming premerge was run) for dfile |
|
|||
419 |
|
||||
420 | Returns the exit code of the merge.""" |
|
|||
421 | return self._resolve(False, dfile, wctx)[1] |
|
|||
422 |
|
467 | |||
423 | def counts(self): |
|
468 | def counts(self): | |
424 | """return counts for updated, merged and removed files in this |
|
469 | """return counts for updated, merged and removed files in this |
@@ -109,23 +109,24 b' def validatepatterns(pats):' | |||||
109 | and patterns that are loaded from sources that use the internal, |
|
109 | and patterns that are loaded from sources that use the internal, | |
110 | prefixed pattern representation (but can't necessarily be fully trusted). |
|
110 | prefixed pattern representation (but can't necessarily be fully trusted). | |
111 | """ |
|
111 | """ | |
112 | if not isinstance(pats, set): |
|
112 | with util.timedcm('narrowspec.validatepatterns(pats size=%d)', len(pats)): | |
113 | raise error.ProgrammingError( |
|
113 | if not isinstance(pats, set): | |
114 | b'narrow patterns should be a set; got %r' % pats |
|
114 | raise error.ProgrammingError( | |
115 | ) |
|
115 | b'narrow patterns should be a set; got %r' % pats | |
|
116 | ) | |||
116 |
|
117 | |||
117 | for pat in pats: |
|
118 | for pat in pats: | |
118 | if not pat.startswith(VALID_PREFIXES): |
|
119 | if not pat.startswith(VALID_PREFIXES): | |
119 | # Use a Mercurial exception because this can happen due to user |
|
120 | # Use a Mercurial exception because this can happen due to user | |
120 | # bugs (e.g. manually updating spec file). |
|
121 | # bugs (e.g. manually updating spec file). | |
121 | raise error.Abort( |
|
122 | raise error.Abort( | |
122 | _(b'invalid prefix on narrow pattern: %s') % pat, |
|
123 | _(b'invalid prefix on narrow pattern: %s') % pat, | |
123 | hint=_( |
|
124 | hint=_( | |
124 | b'narrow patterns must begin with one of ' |
|
125 | b'narrow patterns must begin with one of ' | |
125 | b'the following: %s' |
|
126 | b'the following: %s' | |
|
127 | ) | |||
|
128 | % b', '.join(VALID_PREFIXES), | |||
126 | ) |
|
129 | ) | |
127 | % b', '.join(VALID_PREFIXES), |
|
|||
128 | ) |
|
|||
129 |
|
130 | |||
130 |
|
131 | |||
131 | def format(includes, excludes): |
|
132 | def format(includes, excludes): | |
@@ -323,7 +324,7 b' def updateworkingcopy(repo, assumeclean=' | |||||
323 | removedmatch = matchmod.differencematcher(oldmatch, newmatch) |
|
324 | removedmatch = matchmod.differencematcher(oldmatch, newmatch) | |
324 |
|
325 | |||
325 | ds = repo.dirstate |
|
326 | ds = repo.dirstate | |
326 | lookup, status = ds.status( |
|
327 | lookup, status, _mtime_boundary = ds.status( | |
327 | removedmatch, subrepos=[], ignored=True, clean=True, unknown=True |
|
328 | removedmatch, subrepos=[], ignored=True, clean=True, unknown=True | |
328 | ) |
|
329 | ) | |
329 | trackeddirty = status.modified + status.added |
|
330 | trackeddirty = status.modified + status.added |
@@ -73,10 +73,6 b' import errno' | |||||
73 | import struct |
|
73 | import struct | |
74 |
|
74 | |||
75 | from .i18n import _ |
|
75 | from .i18n import _ | |
76 | from .node import ( |
|
|||
77 | bin, |
|
|||
78 | hex, |
|
|||
79 | ) |
|
|||
80 | from .pycompat import getattr |
|
76 | from .pycompat import getattr | |
81 | from .node import ( |
|
77 | from .node import ( | |
82 | bin, |
|
78 | bin, | |
@@ -579,6 +575,12 b' class obsstore(object):' | |||||
579 | return len(self._all) |
|
575 | return len(self._all) | |
580 |
|
576 | |||
581 | def __nonzero__(self): |
|
577 | def __nonzero__(self): | |
|
578 | from . import statichttprepo | |||
|
579 | ||||
|
580 | if isinstance(self.repo, statichttprepo.statichttprepository): | |||
|
581 | # If repo is accessed via static HTTP, then we can't use os.stat() | |||
|
582 | # to just peek at the file size. | |||
|
583 | return len(self._data) > 1 | |||
582 | if not self._cached('_all'): |
|
584 | if not self._cached('_all'): | |
583 | try: |
|
585 | try: | |
584 | return self.svfs.stat(b'obsstore').st_size > 1 |
|
586 | return self.svfs.stat(b'obsstore').st_size > 1 | |
@@ -944,8 +946,7 b' def _computeobsoleteset(repo):' | |||||
944 | getnode = repo.changelog.node |
|
946 | getnode = repo.changelog.node | |
945 | notpublic = _mutablerevs(repo) |
|
947 | notpublic = _mutablerevs(repo) | |
946 | isobs = repo.obsstore.successors.__contains__ |
|
948 | isobs = repo.obsstore.successors.__contains__ | |
947 |
|
|
949 | return frozenset(r for r in notpublic if isobs(getnode(r))) | |
948 | return obs |
|
|||
949 |
|
950 | |||
950 |
|
951 | |||
951 | @cachefor(b'orphan') |
|
952 | @cachefor(b'orphan') | |
@@ -963,14 +964,14 b' def _computeorphanset(repo):' | |||||
963 | if p in obsolete or p in unstable: |
|
964 | if p in obsolete or p in unstable: | |
964 | unstable.add(r) |
|
965 | unstable.add(r) | |
965 | break |
|
966 | break | |
966 | return unstable |
|
967 | return frozenset(unstable) | |
967 |
|
968 | |||
968 |
|
969 | |||
969 | @cachefor(b'suspended') |
|
970 | @cachefor(b'suspended') | |
970 | def _computesuspendedset(repo): |
|
971 | def _computesuspendedset(repo): | |
971 | """the set of obsolete parents with non obsolete descendants""" |
|
972 | """the set of obsolete parents with non obsolete descendants""" | |
972 | suspended = repo.changelog.ancestors(getrevs(repo, b'orphan')) |
|
973 | suspended = repo.changelog.ancestors(getrevs(repo, b'orphan')) | |
973 |
return |
|
974 | return frozenset(r for r in getrevs(repo, b'obsolete') if r in suspended) | |
974 |
|
975 | |||
975 |
|
976 | |||
976 | @cachefor(b'extinct') |
|
977 | @cachefor(b'extinct') | |
@@ -1002,7 +1003,7 b' def _computephasedivergentset(repo):' | |||||
1002 | # we have a public predecessor |
|
1003 | # we have a public predecessor | |
1003 | bumped.add(rev) |
|
1004 | bumped.add(rev) | |
1004 | break # Next draft! |
|
1005 | break # Next draft! | |
1005 | return bumped |
|
1006 | return frozenset(bumped) | |
1006 |
|
1007 | |||
1007 |
|
1008 | |||
1008 | @cachefor(b'contentdivergent') |
|
1009 | @cachefor(b'contentdivergent') | |
@@ -1029,7 +1030,7 b' def _computecontentdivergentset(repo):' | |||||
1029 | divergent.add(rev) |
|
1030 | divergent.add(rev) | |
1030 | break |
|
1031 | break | |
1031 | toprocess.update(obsstore.predecessors.get(prec, ())) |
|
1032 | toprocess.update(obsstore.predecessors.get(prec, ())) | |
1032 | return divergent |
|
1033 | return frozenset(divergent) | |
1033 |
|
1034 | |||
1034 |
|
1035 | |||
1035 | def makefoldid(relation, user): |
|
1036 | def makefoldid(relation, user): |
@@ -218,7 +218,7 b' def exclusivemarkers(repo, nodes):' | |||||
218 |
|
218 | |||
219 | or |
|
219 | or | |
220 |
|
220 | |||
221 | # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally) |
|
221 | # (A0 rewritten as AX; AX rewritten as A1; AX is unknown locally) | |
222 | # |
|
222 | # | |
223 | # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1 |
|
223 | # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1 | |
224 |
|
224 |
@@ -55,6 +55,8 b' wordsplitter = re.compile(' | |||||
55 | ) |
|
55 | ) | |
56 |
|
56 | |||
57 | PatchError = error.PatchError |
|
57 | PatchError = error.PatchError | |
|
58 | PatchParseError = error.PatchParseError | |||
|
59 | PatchApplicationError = error.PatchApplicationError | |||
58 |
|
60 | |||
59 | # public functions |
|
61 | # public functions | |
60 |
|
62 | |||
@@ -107,7 +109,9 b' def split(stream):' | |||||
107 | def mimesplit(stream, cur): |
|
109 | def mimesplit(stream, cur): | |
108 | def msgfp(m): |
|
110 | def msgfp(m): | |
109 | fp = stringio() |
|
111 | fp = stringio() | |
|
112 | # pytype: disable=wrong-arg-types | |||
110 | g = mail.Generator(fp, mangle_from_=False) |
|
113 | g = mail.Generator(fp, mangle_from_=False) | |
|
114 | # pytype: enable=wrong-arg-types | |||
111 | g.flatten(m) |
|
115 | g.flatten(m) | |
112 | fp.seek(0) |
|
116 | fp.seek(0) | |
113 | return fp |
|
117 | return fp | |
@@ -553,7 +557,9 b' class workingbackend(fsbackend):' | |||||
553 | if not self.repo.dirstate.get_entry(fname).any_tracked and self.exists( |
|
557 | if not self.repo.dirstate.get_entry(fname).any_tracked and self.exists( | |
554 | fname |
|
558 | fname | |
555 | ): |
|
559 | ): | |
556 | raise PatchError(_(b'cannot patch %s: file is not tracked') % fname) |
|
560 | raise PatchApplicationError( | |
|
561 | _(b'cannot patch %s: file is not tracked') % fname | |||
|
562 | ) | |||
557 |
|
563 | |||
558 | def setfile(self, fname, data, mode, copysource): |
|
564 | def setfile(self, fname, data, mode, copysource): | |
559 | self._checkknown(fname) |
|
565 | self._checkknown(fname) | |
@@ -637,7 +643,9 b' class repobackend(abstractbackend):' | |||||
637 |
|
643 | |||
638 | def _checkknown(self, fname): |
|
644 | def _checkknown(self, fname): | |
639 | if fname not in self.ctx: |
|
645 | if fname not in self.ctx: | |
640 | raise PatchError(_(b'cannot patch %s: file is not tracked') % fname) |
|
646 | raise PatchApplicationError( | |
|
647 | _(b'cannot patch %s: file is not tracked') % fname | |||
|
648 | ) | |||
641 |
|
649 | |||
642 | def getfile(self, fname): |
|
650 | def getfile(self, fname): | |
643 | try: |
|
651 | try: | |
@@ -793,7 +801,7 b' class patchfile(object):' | |||||
793 |
|
801 | |||
794 | def apply(self, h): |
|
802 | def apply(self, h): | |
795 | if not h.complete(): |
|
803 | if not h.complete(): | |
796 | raise PatchError( |
|
804 | raise PatchParseError( | |
797 | _(b"bad hunk #%d %s (%d %d %d %d)") |
|
805 | _(b"bad hunk #%d %s (%d %d %d %d)") | |
798 | % (h.number, h.desc, len(h.a), h.lena, len(h.b), h.lenb) |
|
806 | % (h.number, h.desc, len(h.a), h.lena, len(h.b), h.lenb) | |
799 | ) |
|
807 | ) | |
@@ -1388,7 +1396,7 b' class hunk(object):' | |||||
1388 | def read_unified_hunk(self, lr): |
|
1396 | def read_unified_hunk(self, lr): | |
1389 | m = unidesc.match(self.desc) |
|
1397 | m = unidesc.match(self.desc) | |
1390 | if not m: |
|
1398 | if not m: | |
1391 | raise PatchError(_(b"bad hunk #%d") % self.number) |
|
1399 | raise PatchParseError(_(b"bad hunk #%d") % self.number) | |
1392 | self.starta, self.lena, self.startb, self.lenb = m.groups() |
|
1400 | self.starta, self.lena, self.startb, self.lenb = m.groups() | |
1393 | if self.lena is None: |
|
1401 | if self.lena is None: | |
1394 | self.lena = 1 |
|
1402 | self.lena = 1 | |
@@ -1405,7 +1413,7 b' class hunk(object):' | |||||
1405 | lr, self.hunk, self.lena, self.lenb, self.a, self.b |
|
1413 | lr, self.hunk, self.lena, self.lenb, self.a, self.b | |
1406 | ) |
|
1414 | ) | |
1407 | except error.ParseError as e: |
|
1415 | except error.ParseError as e: | |
1408 | raise PatchError(_(b"bad hunk #%d: %s") % (self.number, e)) |
|
1416 | raise PatchParseError(_(b"bad hunk #%d: %s") % (self.number, e)) | |
1409 | # if we hit eof before finishing out the hunk, the last line will |
|
1417 | # if we hit eof before finishing out the hunk, the last line will | |
1410 | # be zero length. Lets try to fix it up. |
|
1418 | # be zero length. Lets try to fix it up. | |
1411 | while len(self.hunk[-1]) == 0: |
|
1419 | while len(self.hunk[-1]) == 0: | |
@@ -1420,7 +1428,7 b' class hunk(object):' | |||||
1420 | self.desc = lr.readline() |
|
1428 | self.desc = lr.readline() | |
1421 | m = contextdesc.match(self.desc) |
|
1429 | m = contextdesc.match(self.desc) | |
1422 | if not m: |
|
1430 | if not m: | |
1423 | raise PatchError(_(b"bad hunk #%d") % self.number) |
|
1431 | raise PatchParseError(_(b"bad hunk #%d") % self.number) | |
1424 | self.starta, aend = m.groups() |
|
1432 | self.starta, aend = m.groups() | |
1425 | self.starta = int(self.starta) |
|
1433 | self.starta = int(self.starta) | |
1426 | if aend is None: |
|
1434 | if aend is None: | |
@@ -1440,7 +1448,7 b' class hunk(object):' | |||||
1440 | elif l.startswith(b' '): |
|
1448 | elif l.startswith(b' '): | |
1441 | u = b' ' + s |
|
1449 | u = b' ' + s | |
1442 | else: |
|
1450 | else: | |
1443 | raise PatchError( |
|
1451 | raise PatchParseError( | |
1444 | _(b"bad hunk #%d old text line %d") % (self.number, x) |
|
1452 | _(b"bad hunk #%d old text line %d") % (self.number, x) | |
1445 | ) |
|
1453 | ) | |
1446 | self.a.append(u) |
|
1454 | self.a.append(u) | |
@@ -1454,7 +1462,7 b' class hunk(object):' | |||||
1454 | l = lr.readline() |
|
1462 | l = lr.readline() | |
1455 | m = contextdesc.match(l) |
|
1463 | m = contextdesc.match(l) | |
1456 | if not m: |
|
1464 | if not m: | |
1457 | raise PatchError(_(b"bad hunk #%d") % self.number) |
|
1465 | raise PatchParseError(_(b"bad hunk #%d") % self.number) | |
1458 | self.startb, bend = m.groups() |
|
1466 | self.startb, bend = m.groups() | |
1459 | self.startb = int(self.startb) |
|
1467 | self.startb = int(self.startb) | |
1460 | if bend is None: |
|
1468 | if bend is None: | |
@@ -1487,7 +1495,7 b' class hunk(object):' | |||||
1487 | lr.push(l) |
|
1495 | lr.push(l) | |
1488 | break |
|
1496 | break | |
1489 | else: |
|
1497 | else: | |
1490 | raise PatchError( |
|
1498 | raise PatchParseError( | |
1491 | _(b"bad hunk #%d old text line %d") % (self.number, x) |
|
1499 | _(b"bad hunk #%d old text line %d") % (self.number, x) | |
1492 | ) |
|
1500 | ) | |
1493 | self.b.append(s) |
|
1501 | self.b.append(s) | |
@@ -1601,7 +1609,7 b' class binhunk(object):' | |||||
1601 | while True: |
|
1609 | while True: | |
1602 | line = getline(lr, self.hunk) |
|
1610 | line = getline(lr, self.hunk) | |
1603 | if not line: |
|
1611 | if not line: | |
1604 | raise PatchError( |
|
1612 | raise PatchParseError( | |
1605 | _(b'could not extract "%s" binary data') % self._fname |
|
1613 | _(b'could not extract "%s" binary data') % self._fname | |
1606 | ) |
|
1614 | ) | |
1607 | if line.startswith(b'literal '): |
|
1615 | if line.startswith(b'literal '): | |
@@ -1622,14 +1630,14 b' class binhunk(object):' | |||||
1622 | try: |
|
1630 | try: | |
1623 | dec.append(util.b85decode(line[1:])[:l]) |
|
1631 | dec.append(util.b85decode(line[1:])[:l]) | |
1624 | except ValueError as e: |
|
1632 | except ValueError as e: | |
1625 | raise PatchError( |
|
1633 | raise PatchParseError( | |
1626 | _(b'could not decode "%s" binary patch: %s') |
|
1634 | _(b'could not decode "%s" binary patch: %s') | |
1627 | % (self._fname, stringutil.forcebytestr(e)) |
|
1635 | % (self._fname, stringutil.forcebytestr(e)) | |
1628 | ) |
|
1636 | ) | |
1629 | line = getline(lr, self.hunk) |
|
1637 | line = getline(lr, self.hunk) | |
1630 | text = zlib.decompress(b''.join(dec)) |
|
1638 | text = zlib.decompress(b''.join(dec)) | |
1631 | if len(text) != size: |
|
1639 | if len(text) != size: | |
1632 | raise PatchError( |
|
1640 | raise PatchParseError( | |
1633 | _(b'"%s" length is %d bytes, should be %d') |
|
1641 | _(b'"%s" length is %d bytes, should be %d') | |
1634 | % (self._fname, len(text), size) |
|
1642 | % (self._fname, len(text), size) | |
1635 | ) |
|
1643 | ) | |
@@ -1847,7 +1855,7 b' def parsepatch(originalchunks, maxcontex' | |||||
1847 | try: |
|
1855 | try: | |
1848 | p.transitions[state][newstate](p, data) |
|
1856 | p.transitions[state][newstate](p, data) | |
1849 | except KeyError: |
|
1857 | except KeyError: | |
1850 | raise PatchError( |
|
1858 | raise PatchParseError( | |
1851 | b'unhandled transition: %s -> %s' % (state, newstate) |
|
1859 | b'unhandled transition: %s -> %s' % (state, newstate) | |
1852 | ) |
|
1860 | ) | |
1853 | state = newstate |
|
1861 | state = newstate | |
@@ -1874,7 +1882,7 b' def pathtransform(path, strip, prefix):' | |||||
1874 | ('a//b/', 'd/e/c') |
|
1882 | ('a//b/', 'd/e/c') | |
1875 | >>> pathtransform(b'a/b/c', 3, b'') |
|
1883 | >>> pathtransform(b'a/b/c', 3, b'') | |
1876 | Traceback (most recent call last): |
|
1884 | Traceback (most recent call last): | |
1877 | PatchError: unable to strip away 1 of 3 dirs from a/b/c |
|
1885 | PatchApplicationError: unable to strip away 1 of 3 dirs from a/b/c | |
1878 | """ |
|
1886 | """ | |
1879 | pathlen = len(path) |
|
1887 | pathlen = len(path) | |
1880 | i = 0 |
|
1888 | i = 0 | |
@@ -1884,7 +1892,7 b' def pathtransform(path, strip, prefix):' | |||||
1884 | while count > 0: |
|
1892 | while count > 0: | |
1885 | i = path.find(b'/', i) |
|
1893 | i = path.find(b'/', i) | |
1886 | if i == -1: |
|
1894 | if i == -1: | |
1887 | raise PatchError( |
|
1895 | raise PatchApplicationError( | |
1888 | _(b"unable to strip away %d of %d dirs from %s") |
|
1896 | _(b"unable to strip away %d of %d dirs from %s") | |
1889 | % (count, strip, path) |
|
1897 | % (count, strip, path) | |
1890 | ) |
|
1898 | ) | |
@@ -1947,7 +1955,7 b' def makepatchmeta(backend, afile_orig, b' | |||||
1947 | elif not nulla: |
|
1955 | elif not nulla: | |
1948 | fname = afile |
|
1956 | fname = afile | |
1949 | else: |
|
1957 | else: | |
1950 | raise PatchError(_(b"undefined source and destination files")) |
|
1958 | raise PatchParseError(_(b"undefined source and destination files")) | |
1951 |
|
1959 | |||
1952 | gp = patchmeta(fname) |
|
1960 | gp = patchmeta(fname) | |
1953 | if create: |
|
1961 | if create: | |
@@ -2097,7 +2105,7 b' def iterhunks(fp):' | |||||
2097 | gp.copy(), |
|
2105 | gp.copy(), | |
2098 | ) |
|
2106 | ) | |
2099 | if not gitpatches: |
|
2107 | if not gitpatches: | |
2100 | raise PatchError( |
|
2108 | raise PatchParseError( | |
2101 | _(b'failed to synchronize metadata for "%s"') % afile[2:] |
|
2109 | _(b'failed to synchronize metadata for "%s"') % afile[2:] | |
2102 | ) |
|
2110 | ) | |
2103 | newfile = True |
|
2111 | newfile = True | |
@@ -2193,7 +2201,7 b' def applybindelta(binchunk, data):' | |||||
2193 | out += binchunk[i:offset_end] |
|
2201 | out += binchunk[i:offset_end] | |
2194 | i += cmd |
|
2202 | i += cmd | |
2195 | else: |
|
2203 | else: | |
2196 | raise PatchError(_(b'unexpected delta opcode 0')) |
|
2204 | raise PatchApplicationError(_(b'unexpected delta opcode 0')) | |
2197 | return out |
|
2205 | return out | |
2198 |
|
2206 | |||
2199 |
|
2207 | |||
@@ -2270,7 +2278,7 b' def _applydiff(' | |||||
2270 | data, mode = store.getfile(gp.oldpath)[:2] |
|
2278 | data, mode = store.getfile(gp.oldpath)[:2] | |
2271 | if data is None: |
|
2279 | if data is None: | |
2272 | # This means that the old path does not exist |
|
2280 | # This means that the old path does not exist | |
2273 | raise PatchError( |
|
2281 | raise PatchApplicationError( | |
2274 | _(b"source file '%s' does not exist") % gp.oldpath |
|
2282 | _(b"source file '%s' does not exist") % gp.oldpath | |
2275 | ) |
|
2283 | ) | |
2276 | if gp.mode: |
|
2284 | if gp.mode: | |
@@ -2283,7 +2291,7 b' def _applydiff(' | |||||
2283 | if gp.op in (b'ADD', b'RENAME', b'COPY') and backend.exists( |
|
2291 | if gp.op in (b'ADD', b'RENAME', b'COPY') and backend.exists( | |
2284 | gp.path |
|
2292 | gp.path | |
2285 | ): |
|
2293 | ): | |
2286 | raise PatchError( |
|
2294 | raise PatchApplicationError( | |
2287 | _( |
|
2295 | _( | |
2288 | b"cannot create %s: destination " |
|
2296 | b"cannot create %s: destination " | |
2289 | b"already exists" |
|
2297 | b"already exists" | |
@@ -2365,7 +2373,7 b' def _externalpatch(ui, repo, patcher, pa' | |||||
2365 | scmutil.marktouched(repo, files, similarity) |
|
2373 | scmutil.marktouched(repo, files, similarity) | |
2366 | code = fp.close() |
|
2374 | code = fp.close() | |
2367 | if code: |
|
2375 | if code: | |
2368 | raise PatchError( |
|
2376 | raise PatchApplicationError( | |
2369 | _(b"patch command failed: %s") % procutil.explainexit(code) |
|
2377 | _(b"patch command failed: %s") % procutil.explainexit(code) | |
2370 | ) |
|
2378 | ) | |
2371 | return fuzz |
|
2379 | return fuzz | |
@@ -2397,7 +2405,7 b' def patchbackend(' | |||||
2397 | files.update(backend.close()) |
|
2405 | files.update(backend.close()) | |
2398 | store.close() |
|
2406 | store.close() | |
2399 | if ret < 0: |
|
2407 | if ret < 0: | |
2400 | raise PatchError(_(b'patch failed to apply')) |
|
2408 | raise PatchApplicationError(_(b'patch failed to apply')) | |
2401 | return ret > 0 |
|
2409 | return ret > 0 | |
2402 |
|
2410 | |||
2403 |
|
2411 |
@@ -79,20 +79,24 b' class pathauditor(object):' | |||||
79 | return |
|
79 | return | |
80 | # AIX ignores "/" at end of path, others raise EISDIR. |
|
80 | # AIX ignores "/" at end of path, others raise EISDIR. | |
81 | if util.endswithsep(path): |
|
81 | if util.endswithsep(path): | |
82 | raise error.Abort(_(b"path ends in directory separator: %s") % path) |
|
82 | raise error.InputError( | |
|
83 | _(b"path ends in directory separator: %s") % path | |||
|
84 | ) | |||
83 | parts = util.splitpath(path) |
|
85 | parts = util.splitpath(path) | |
84 | if ( |
|
86 | if ( | |
85 | os.path.splitdrive(path)[0] |
|
87 | os.path.splitdrive(path)[0] | |
86 | or _lowerclean(parts[0]) in (b'.hg', b'.hg.', b'') |
|
88 | or _lowerclean(parts[0]) in (b'.hg', b'.hg.', b'') | |
87 | or pycompat.ospardir in parts |
|
89 | or pycompat.ospardir in parts | |
88 | ): |
|
90 | ): | |
89 | raise error.Abort(_(b"path contains illegal component: %s") % path) |
|
91 | raise error.InputError( | |
|
92 | _(b"path contains illegal component: %s") % path | |||
|
93 | ) | |||
90 | # Windows shortname aliases |
|
94 | # Windows shortname aliases | |
91 | for p in parts: |
|
95 | for p in parts: | |
92 | if b"~" in p: |
|
96 | if b"~" in p: | |
93 | first, last = p.split(b"~", 1) |
|
97 | first, last = p.split(b"~", 1) | |
94 | if last.isdigit() and first.upper() in [b"HG", b"HG8B6C"]: |
|
98 | if last.isdigit() and first.upper() in [b"HG", b"HG8B6C"]: | |
95 |
raise error. |
|
99 | raise error.InputError( | |
96 | _(b"path contains illegal component: %s") % path |
|
100 | _(b"path contains illegal component: %s") % path | |
97 | ) |
|
101 | ) | |
98 | if b'.hg' in _lowerclean(path): |
|
102 | if b'.hg' in _lowerclean(path): | |
@@ -101,7 +105,7 b' class pathauditor(object):' | |||||
101 | if p in lparts[1:]: |
|
105 | if p in lparts[1:]: | |
102 | pos = lparts.index(p) |
|
106 | pos = lparts.index(p) | |
103 | base = os.path.join(*parts[:pos]) |
|
107 | base = os.path.join(*parts[:pos]) | |
104 |
raise error. |
|
108 | raise error.InputError( | |
105 | _(b"path '%s' is inside nested repo %r") |
|
109 | _(b"path '%s' is inside nested repo %r") | |
106 | % (path, pycompat.bytestr(base)) |
|
110 | % (path, pycompat.bytestr(base)) | |
107 | ) |
|
111 | ) |
@@ -104,6 +104,7 b' class DirstateItem(object):' | |||||
104 | _mtime_ns = attr.ib() |
|
104 | _mtime_ns = attr.ib() | |
105 | _fallback_exec = attr.ib() |
|
105 | _fallback_exec = attr.ib() | |
106 | _fallback_symlink = attr.ib() |
|
106 | _fallback_symlink = attr.ib() | |
|
107 | _mtime_second_ambiguous = attr.ib() | |||
107 |
|
108 | |||
108 | def __init__( |
|
109 | def __init__( | |
109 | self, |
|
110 | self, | |
@@ -127,24 +128,27 b' class DirstateItem(object):' | |||||
127 | self._size = None |
|
128 | self._size = None | |
128 | self._mtime_s = None |
|
129 | self._mtime_s = None | |
129 | self._mtime_ns = None |
|
130 | self._mtime_ns = None | |
|
131 | self._mtime_second_ambiguous = False | |||
130 | if parentfiledata is None: |
|
132 | if parentfiledata is None: | |
131 | has_meaningful_mtime = False |
|
133 | has_meaningful_mtime = False | |
132 | has_meaningful_data = False |
|
134 | has_meaningful_data = False | |
|
135 | elif parentfiledata[2] is None: | |||
|
136 | has_meaningful_mtime = False | |||
133 | if has_meaningful_data: |
|
137 | if has_meaningful_data: | |
134 | self._mode = parentfiledata[0] |
|
138 | self._mode = parentfiledata[0] | |
135 | self._size = parentfiledata[1] |
|
139 | self._size = parentfiledata[1] | |
136 | if has_meaningful_mtime: |
|
140 | if has_meaningful_mtime: | |
137 | self._mtime_s, self._mtime_ns = parentfiledata[2] |
|
141 | ( | |
|
142 | self._mtime_s, | |||
|
143 | self._mtime_ns, | |||
|
144 | self._mtime_second_ambiguous, | |||
|
145 | ) = parentfiledata[2] | |||
138 |
|
146 | |||
139 | @classmethod |
|
147 | @classmethod | |
140 | def from_v2_data(cls, flags, size, mtime_s, mtime_ns): |
|
148 | def from_v2_data(cls, flags, size, mtime_s, mtime_ns): | |
141 | """Build a new DirstateItem object from V2 data""" |
|
149 | """Build a new DirstateItem object from V2 data""" | |
142 | has_mode_size = bool(flags & DIRSTATE_V2_HAS_MODE_AND_SIZE) |
|
150 | has_mode_size = bool(flags & DIRSTATE_V2_HAS_MODE_AND_SIZE) | |
143 | has_meaningful_mtime = bool(flags & DIRSTATE_V2_HAS_MTIME) |
|
151 | has_meaningful_mtime = bool(flags & DIRSTATE_V2_HAS_MTIME) | |
144 | if flags & DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS: |
|
|||
145 | # The current code is not able to do the more subtle comparison that the |
|
|||
146 | # MTIME_SECOND_AMBIGUOUS requires. So we ignore the mtime |
|
|||
147 | has_meaningful_mtime = False |
|
|||
148 | mode = None |
|
152 | mode = None | |
149 |
|
153 | |||
150 | if flags & +DIRSTATE_V2_EXPECTED_STATE_IS_MODIFIED: |
|
154 | if flags & +DIRSTATE_V2_EXPECTED_STATE_IS_MODIFIED: | |
@@ -171,13 +175,15 b' class DirstateItem(object):' | |||||
171 | mode |= stat.S_IFLNK |
|
175 | mode |= stat.S_IFLNK | |
172 | else: |
|
176 | else: | |
173 | mode |= stat.S_IFREG |
|
177 | mode |= stat.S_IFREG | |
|
178 | ||||
|
179 | second_ambiguous = flags & DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS | |||
174 | return cls( |
|
180 | return cls( | |
175 | wc_tracked=bool(flags & DIRSTATE_V2_WDIR_TRACKED), |
|
181 | wc_tracked=bool(flags & DIRSTATE_V2_WDIR_TRACKED), | |
176 | p1_tracked=bool(flags & DIRSTATE_V2_P1_TRACKED), |
|
182 | p1_tracked=bool(flags & DIRSTATE_V2_P1_TRACKED), | |
177 | p2_info=bool(flags & DIRSTATE_V2_P2_INFO), |
|
183 | p2_info=bool(flags & DIRSTATE_V2_P2_INFO), | |
178 | has_meaningful_data=has_mode_size, |
|
184 | has_meaningful_data=has_mode_size, | |
179 | has_meaningful_mtime=has_meaningful_mtime, |
|
185 | has_meaningful_mtime=has_meaningful_mtime, | |
180 | parentfiledata=(mode, size, (mtime_s, mtime_ns)), |
|
186 | parentfiledata=(mode, size, (mtime_s, mtime_ns, second_ambiguous)), | |
181 | fallback_exec=fallback_exec, |
|
187 | fallback_exec=fallback_exec, | |
182 | fallback_symlink=fallback_symlink, |
|
188 | fallback_symlink=fallback_symlink, | |
183 | ) |
|
189 | ) | |
@@ -214,13 +220,13 b' class DirstateItem(object):' | |||||
214 | wc_tracked=True, |
|
220 | wc_tracked=True, | |
215 | p1_tracked=True, |
|
221 | p1_tracked=True, | |
216 | has_meaningful_mtime=False, |
|
222 | has_meaningful_mtime=False, | |
217 | parentfiledata=(mode, size, (42, 0)), |
|
223 | parentfiledata=(mode, size, (42, 0, False)), | |
218 | ) |
|
224 | ) | |
219 | else: |
|
225 | else: | |
220 | return cls( |
|
226 | return cls( | |
221 | wc_tracked=True, |
|
227 | wc_tracked=True, | |
222 | p1_tracked=True, |
|
228 | p1_tracked=True, | |
223 | parentfiledata=(mode, size, (mtime, 0)), |
|
229 | parentfiledata=(mode, size, (mtime, 0, False)), | |
224 | ) |
|
230 | ) | |
225 | else: |
|
231 | else: | |
226 | raise RuntimeError(b'unknown state: %s' % state) |
|
232 | raise RuntimeError(b'unknown state: %s' % state) | |
@@ -246,7 +252,7 b' class DirstateItem(object):' | |||||
246 | self._p1_tracked = True |
|
252 | self._p1_tracked = True | |
247 | self._mode = mode |
|
253 | self._mode = mode | |
248 | self._size = size |
|
254 | self._size = size | |
249 | self._mtime_s, self._mtime_ns = mtime |
|
255 | self._mtime_s, self._mtime_ns, self._mtime_second_ambiguous = mtime | |
250 |
|
256 | |||
251 | def set_tracked(self): |
|
257 | def set_tracked(self): | |
252 | """mark a file as tracked in the working copy |
|
258 | """mark a file as tracked in the working copy | |
@@ -301,10 +307,22 b' class DirstateItem(object):' | |||||
301 | if self_sec is None: |
|
307 | if self_sec is None: | |
302 | return False |
|
308 | return False | |
303 | self_ns = self._mtime_ns |
|
309 | self_ns = self._mtime_ns | |
304 | other_sec, other_ns = other_mtime |
|
310 | other_sec, other_ns, second_ambiguous = other_mtime | |
305 |
|
|
311 | if self_sec != other_sec: | |
306 | self_ns == other_ns or self_ns == 0 or other_ns == 0 |
|
312 | # seconds are different theses mtime are definitly not equal | |
307 | ) |
|
313 | return False | |
|
314 | elif other_ns == 0 or self_ns == 0: | |||
|
315 | # at least one side as no nano-seconds information | |||
|
316 | ||||
|
317 | if self._mtime_second_ambiguous: | |||
|
318 | # We cannot trust the mtime in this case | |||
|
319 | return False | |||
|
320 | else: | |||
|
321 | # the "seconds" value was reliable on its own. We are good to go. | |||
|
322 | return True | |||
|
323 | else: | |||
|
324 | # We have nano second information, let us use them ! | |||
|
325 | return self_ns == other_ns | |||
308 |
|
326 | |||
309 | @property |
|
327 | @property | |
310 | def state(self): |
|
328 | def state(self): | |
@@ -463,6 +481,8 b' class DirstateItem(object):' | |||||
463 | flags |= DIRSTATE_V2_MODE_IS_SYMLINK |
|
481 | flags |= DIRSTATE_V2_MODE_IS_SYMLINK | |
464 | if self._mtime_s is not None: |
|
482 | if self._mtime_s is not None: | |
465 | flags |= DIRSTATE_V2_HAS_MTIME |
|
483 | flags |= DIRSTATE_V2_HAS_MTIME | |
|
484 | if self._mtime_second_ambiguous: | |||
|
485 | flags |= DIRSTATE_V2_MTIME_SECOND_AMBIGUOUS | |||
466 |
|
486 | |||
467 | if self._fallback_exec is not None: |
|
487 | if self._fallback_exec is not None: | |
468 | flags |= DIRSTATE_V2_HAS_FALLBACK_EXEC |
|
488 | flags |= DIRSTATE_V2_HAS_FALLBACK_EXEC | |
@@ -531,13 +551,11 b' class DirstateItem(object):' | |||||
531 | return AMBIGUOUS_TIME |
|
551 | return AMBIGUOUS_TIME | |
532 | elif not self._p1_tracked: |
|
552 | elif not self._p1_tracked: | |
533 | return AMBIGUOUS_TIME |
|
553 | return AMBIGUOUS_TIME | |
|
554 | elif self._mtime_second_ambiguous: | |||
|
555 | return AMBIGUOUS_TIME | |||
534 | else: |
|
556 | else: | |
535 | return self._mtime_s |
|
557 | return self._mtime_s | |
536 |
|
558 | |||
537 | def need_delay(self, now): |
|
|||
538 | """True if the stored mtime would be ambiguous with the current time""" |
|
|||
539 | return self.v1_state() == b'n' and self._mtime_s == now[0] |
|
|||
540 |
|
||||
541 |
|
559 | |||
542 | def gettype(q): |
|
560 | def gettype(q): | |
543 | return int(q & 0xFFFF) |
|
561 | return int(q & 0xFFFF) | |
@@ -566,18 +584,13 b' class BaseIndexObject(object):' | |||||
566 | 0, |
|
584 | 0, | |
567 | revlog_constants.COMP_MODE_INLINE, |
|
585 | revlog_constants.COMP_MODE_INLINE, | |
568 | revlog_constants.COMP_MODE_INLINE, |
|
586 | revlog_constants.COMP_MODE_INLINE, | |
|
587 | revlog_constants.RANK_UNKNOWN, | |||
569 | ) |
|
588 | ) | |
570 |
|
589 | |||
571 | @util.propertycache |
|
590 | @util.propertycache | |
572 | def entry_size(self): |
|
591 | def entry_size(self): | |
573 | return self.index_format.size |
|
592 | return self.index_format.size | |
574 |
|
593 | |||
575 | @property |
|
|||
576 | def nodemap(self): |
|
|||
577 | msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]" |
|
|||
578 | util.nouideprecwarn(msg, b'5.3', stacklevel=2) |
|
|||
579 | return self._nodemap |
|
|||
580 |
|
||||
581 | @util.propertycache |
|
594 | @util.propertycache | |
582 | def _nodemap(self): |
|
595 | def _nodemap(self): | |
583 | nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev}) |
|
596 | nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev}) | |
@@ -629,7 +642,7 b' class BaseIndexObject(object):' | |||||
629 | if not isinstance(i, int): |
|
642 | if not isinstance(i, int): | |
630 | raise TypeError(b"expecting int indexes") |
|
643 | raise TypeError(b"expecting int indexes") | |
631 | if i < 0 or i >= len(self): |
|
644 | if i < 0 or i >= len(self): | |
632 | raise IndexError |
|
645 | raise IndexError(i) | |
633 |
|
646 | |||
634 | def __getitem__(self, i): |
|
647 | def __getitem__(self, i): | |
635 | if i == -1: |
|
648 | if i == -1: | |
@@ -653,6 +666,7 b' class BaseIndexObject(object):' | |||||
653 | 0, |
|
666 | 0, | |
654 | revlog_constants.COMP_MODE_INLINE, |
|
667 | revlog_constants.COMP_MODE_INLINE, | |
655 | revlog_constants.COMP_MODE_INLINE, |
|
668 | revlog_constants.COMP_MODE_INLINE, | |
|
669 | revlog_constants.RANK_UNKNOWN, | |||
656 | ) |
|
670 | ) | |
657 | return r |
|
671 | return r | |
658 |
|
672 | |||
@@ -785,9 +799,14 b' class InlinedIndexObject(BaseIndexObject' | |||||
785 | return self._offsets[i] |
|
799 | return self._offsets[i] | |
786 |
|
800 | |||
787 |
|
801 | |||
788 |
def parse_index2(data, inline, |
|
802 | def parse_index2(data, inline, format=revlog_constants.REVLOGV1): | |
|
803 | if format == revlog_constants.CHANGELOGV2: | |||
|
804 | return parse_index_cl_v2(data) | |||
789 | if not inline: |
|
805 | if not inline: | |
790 | cls = IndexObject2 if revlogv2 else IndexObject |
|
806 | if format == revlog_constants.REVLOGV2: | |
|
807 | cls = IndexObject2 | |||
|
808 | else: | |||
|
809 | cls = IndexObject | |||
791 | return cls(data), None |
|
810 | return cls(data), None | |
792 | cls = InlinedIndexObject |
|
811 | cls = InlinedIndexObject | |
793 | return cls(data, inline), (0, data) |
|
812 | return cls(data, inline), (0, data) | |
@@ -835,7 +854,7 b' class IndexObject2(IndexObject):' | |||||
835 | entry = data[:10] |
|
854 | entry = data[:10] | |
836 | data_comp = data[10] & 3 |
|
855 | data_comp = data[10] & 3 | |
837 | sidedata_comp = (data[10] & (3 << 2)) >> 2 |
|
856 | sidedata_comp = (data[10] & (3 << 2)) >> 2 | |
838 | return entry + (data_comp, sidedata_comp) |
|
857 | return entry + (data_comp, sidedata_comp, revlog_constants.RANK_UNKNOWN) | |
839 |
|
858 | |||
840 | def _pack_entry(self, rev, entry): |
|
859 | def _pack_entry(self, rev, entry): | |
841 | data = entry[:10] |
|
860 | data = entry[:10] | |
@@ -860,20 +879,53 b' class IndexObject2(IndexObject):' | |||||
860 | class IndexChangelogV2(IndexObject2): |
|
879 | class IndexChangelogV2(IndexObject2): | |
861 | index_format = revlog_constants.INDEX_ENTRY_CL_V2 |
|
880 | index_format = revlog_constants.INDEX_ENTRY_CL_V2 | |
862 |
|
881 | |||
|
882 | null_item = ( | |||
|
883 | IndexObject2.null_item[: revlog_constants.ENTRY_RANK] | |||
|
884 | + (0,) # rank of null is 0 | |||
|
885 | + IndexObject2.null_item[revlog_constants.ENTRY_RANK :] | |||
|
886 | ) | |||
|
887 | ||||
863 | def _unpack_entry(self, rev, data, r=True): |
|
888 | def _unpack_entry(self, rev, data, r=True): | |
864 | items = self.index_format.unpack(data) |
|
889 | items = self.index_format.unpack(data) | |
865 | entry = items[:3] + (rev, rev) + items[3:8] |
|
890 | return ( | |
866 | data_comp = items[8] & 3 |
|
891 | items[revlog_constants.INDEX_ENTRY_V2_IDX_OFFSET], | |
867 | sidedata_comp = (items[8] >> 2) & 3 |
|
892 | items[revlog_constants.INDEX_ENTRY_V2_IDX_COMPRESSED_LENGTH], | |
868 | return entry + (data_comp, sidedata_comp) |
|
893 | items[revlog_constants.INDEX_ENTRY_V2_IDX_UNCOMPRESSED_LENGTH], | |
|
894 | rev, | |||
|
895 | rev, | |||
|
896 | items[revlog_constants.INDEX_ENTRY_V2_IDX_PARENT_1], | |||
|
897 | items[revlog_constants.INDEX_ENTRY_V2_IDX_PARENT_2], | |||
|
898 | items[revlog_constants.INDEX_ENTRY_V2_IDX_NODEID], | |||
|
899 | items[revlog_constants.INDEX_ENTRY_V2_IDX_SIDEDATA_OFFSET], | |||
|
900 | items[ | |||
|
901 | revlog_constants.INDEX_ENTRY_V2_IDX_SIDEDATA_COMPRESSED_LENGTH | |||
|
902 | ], | |||
|
903 | items[revlog_constants.INDEX_ENTRY_V2_IDX_COMPRESSION_MODE] & 3, | |||
|
904 | (items[revlog_constants.INDEX_ENTRY_V2_IDX_COMPRESSION_MODE] >> 2) | |||
|
905 | & 3, | |||
|
906 | items[revlog_constants.INDEX_ENTRY_V2_IDX_RANK], | |||
|
907 | ) | |||
869 |
|
908 | |||
870 | def _pack_entry(self, rev, entry): |
|
909 | def _pack_entry(self, rev, entry): | |
871 | assert entry[3] == rev, entry[3] |
|
910 | ||
872 | assert entry[4] == rev, entry[4] |
|
911 | base = entry[revlog_constants.ENTRY_DELTA_BASE] | |
873 | data = entry[:3] + entry[5:10] |
|
912 | link_rev = entry[revlog_constants.ENTRY_LINK_REV] | |
874 | data_comp = entry[10] & 3 |
|
913 | assert base == rev, (base, rev) | |
875 | sidedata_comp = (entry[11] & 3) << 2 |
|
914 | assert link_rev == rev, (link_rev, rev) | |
876 | data += (data_comp | sidedata_comp,) |
|
915 | data = ( | |
|
916 | entry[revlog_constants.ENTRY_DATA_OFFSET], | |||
|
917 | entry[revlog_constants.ENTRY_DATA_COMPRESSED_LENGTH], | |||
|
918 | entry[revlog_constants.ENTRY_DATA_UNCOMPRESSED_LENGTH], | |||
|
919 | entry[revlog_constants.ENTRY_PARENT_1], | |||
|
920 | entry[revlog_constants.ENTRY_PARENT_2], | |||
|
921 | entry[revlog_constants.ENTRY_NODE_ID], | |||
|
922 | entry[revlog_constants.ENTRY_SIDEDATA_OFFSET], | |||
|
923 | entry[revlog_constants.ENTRY_SIDEDATA_COMPRESSED_LENGTH], | |||
|
924 | entry[revlog_constants.ENTRY_DATA_COMPRESSION_MODE] & 3 | |||
|
925 | | (entry[revlog_constants.ENTRY_SIDEDATA_COMPRESSION_MODE] & 3) | |||
|
926 | << 2, | |||
|
927 | entry[revlog_constants.ENTRY_RANK], | |||
|
928 | ) | |||
877 | return self.index_format.pack(*data) |
|
929 | return self.index_format.pack(*data) | |
878 |
|
930 | |||
879 |
|
931 | |||
@@ -903,23 +955,11 b' def parse_dirstate(dmap, copymap, st):' | |||||
903 | return parents |
|
955 | return parents | |
904 |
|
956 | |||
905 |
|
957 | |||
906 |
def pack_dirstate(dmap, copymap, pl |
|
958 | def pack_dirstate(dmap, copymap, pl): | |
907 | cs = stringio() |
|
959 | cs = stringio() | |
908 | write = cs.write |
|
960 | write = cs.write | |
909 | write(b"".join(pl)) |
|
961 | write(b"".join(pl)) | |
910 | for f, e in pycompat.iteritems(dmap): |
|
962 | for f, e in pycompat.iteritems(dmap): | |
911 | if e.need_delay(now): |
|
|||
912 | # The file was last modified "simultaneously" with the current |
|
|||
913 | # write to dirstate (i.e. within the same second for file- |
|
|||
914 | # systems with a granularity of 1 sec). This commonly happens |
|
|||
915 | # for at least a couple of files on 'update'. |
|
|||
916 | # The user could change the file without changing its size |
|
|||
917 | # within the same second. Invalidate the file's mtime in |
|
|||
918 | # dirstate, forcing future 'status' calls to compare the |
|
|||
919 | # contents of the file if the size is the same. This prevents |
|
|||
920 | # mistakenly treating such files as clean. |
|
|||
921 | e.set_possibly_dirty() |
|
|||
922 |
|
||||
923 | if f in copymap: |
|
963 | if f in copymap: | |
924 | f = b"%s\0%s" % (f, copymap[f]) |
|
964 | f = b"%s\0%s" % (f, copymap[f]) | |
925 | e = _pack( |
|
965 | e = _pack( |
@@ -7,11 +7,18 b'' | |||||
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
|
10 | # obsolete experimental requirements: | |||
|
11 | # - manifestv2: An experimental new manifest format that allowed | |||
|
12 | # for stem compression of long paths. Experiment ended up not | |||
|
13 | # being successful (repository sizes went up due to worse delta | |||
|
14 | # chains), and the code was deleted in 4.6. | |||
|
15 | ||||
10 | GENERALDELTA_REQUIREMENT = b'generaldelta' |
|
16 | GENERALDELTA_REQUIREMENT = b'generaldelta' | |
11 | DOTENCODE_REQUIREMENT = b'dotencode' |
|
17 | DOTENCODE_REQUIREMENT = b'dotencode' | |
12 | STORE_REQUIREMENT = b'store' |
|
18 | STORE_REQUIREMENT = b'store' | |
13 | FNCACHE_REQUIREMENT = b'fncache' |
|
19 | FNCACHE_REQUIREMENT = b'fncache' | |
14 |
|
20 | |||
|
21 | DIRSTATE_TRACKED_HINT_V1 = b'dirstate-tracked-key-v1' | |||
15 | DIRSTATE_V2_REQUIREMENT = b'dirstate-v2' |
|
22 | DIRSTATE_V2_REQUIREMENT = b'dirstate-v2' | |
16 |
|
23 | |||
17 | # When narrowing is finalized and no longer subject to format changes, |
|
24 | # When narrowing is finalized and no longer subject to format changes, | |
@@ -30,6 +37,9 b" TREEMANIFEST_REQUIREMENT = b'treemanifes" | |||||
30 |
|
37 | |||
31 | REVLOGV1_REQUIREMENT = b'revlogv1' |
|
38 | REVLOGV1_REQUIREMENT = b'revlogv1' | |
32 |
|
39 | |||
|
40 | # allow using ZSTD as compression engine for revlog content | |||
|
41 | REVLOG_COMPRESSION_ZSTD = b'revlog-compression-zstd' | |||
|
42 | ||||
33 | # Increment the sub-version when the revlog v2 format changes to lock out old |
|
43 | # Increment the sub-version when the revlog v2 format changes to lock out old | |
34 | # clients. |
|
44 | # clients. | |
35 | CHANGELOGV2_REQUIREMENT = b'exp-changelog-v2' |
|
45 | CHANGELOGV2_REQUIREMENT = b'exp-changelog-v2' | |
@@ -66,6 +76,10 b" RELATIVE_SHARED_REQUIREMENT = b'relshare" | |||||
66 | # `.hg/store/requires` are present. |
|
76 | # `.hg/store/requires` are present. | |
67 | SHARESAFE_REQUIREMENT = b'share-safe' |
|
77 | SHARESAFE_REQUIREMENT = b'share-safe' | |
68 |
|
78 | |||
|
79 | # Bookmarks must be stored in the `store` part of the repository and will be | |||
|
80 | # share accross shares | |||
|
81 | BOOKMARKS_IN_STORE_REQUIREMENT = b'bookmarksinstore' | |||
|
82 | ||||
69 | # List of requirements which are working directory specific |
|
83 | # List of requirements which are working directory specific | |
70 | # These requirements cannot be shared between repositories if they |
|
84 | # These requirements cannot be shared between repositories if they | |
71 | # share the same store |
|
85 | # share the same store | |
@@ -83,5 +97,25 b' WORKING_DIR_REQUIREMENTS = {' | |||||
83 | SHARED_REQUIREMENT, |
|
97 | SHARED_REQUIREMENT, | |
84 | RELATIVE_SHARED_REQUIREMENT, |
|
98 | RELATIVE_SHARED_REQUIREMENT, | |
85 | SHARESAFE_REQUIREMENT, |
|
99 | SHARESAFE_REQUIREMENT, | |
|
100 | DIRSTATE_TRACKED_HINT_V1, | |||
86 | DIRSTATE_V2_REQUIREMENT, |
|
101 | DIRSTATE_V2_REQUIREMENT, | |
87 | } |
|
102 | } | |
|
103 | ||||
|
104 | # List of requirement that impact "stream-clone" (and hardlink clone) and | |||
|
105 | # cannot be changed in such cases. | |||
|
106 | # | |||
|
107 | # requirements not in this list are safe to be altered during stream-clone. | |||
|
108 | # | |||
|
109 | # note: the list is currently inherited from previous code and miss some relevant requirement while containing some irrelevant ones. | |||
|
110 | STREAM_FIXED_REQUIREMENTS = { | |||
|
111 | BOOKMARKS_IN_STORE_REQUIREMENT, | |||
|
112 | CHANGELOGV2_REQUIREMENT, | |||
|
113 | COPIESSDC_REQUIREMENT, | |||
|
114 | GENERALDELTA_REQUIREMENT, | |||
|
115 | INTERNAL_PHASE_REQUIREMENT, | |||
|
116 | REVLOG_COMPRESSION_ZSTD, | |||
|
117 | REVLOGV1_REQUIREMENT, | |||
|
118 | REVLOGV2_REQUIREMENT, | |||
|
119 | SPARSEREVLOG_REQUIREMENT, | |||
|
120 | TREEMANIFEST_REQUIREMENT, | |||
|
121 | } |
@@ -40,11 +40,13 b' from .revlogutils.constants import (' | |||||
40 | COMP_MODE_DEFAULT, |
|
40 | COMP_MODE_DEFAULT, | |
41 | COMP_MODE_INLINE, |
|
41 | COMP_MODE_INLINE, | |
42 | COMP_MODE_PLAIN, |
|
42 | COMP_MODE_PLAIN, | |
|
43 | ENTRY_RANK, | |||
43 | FEATURES_BY_VERSION, |
|
44 | FEATURES_BY_VERSION, | |
44 | FLAG_GENERALDELTA, |
|
45 | FLAG_GENERALDELTA, | |
45 | FLAG_INLINE_DATA, |
|
46 | FLAG_INLINE_DATA, | |
46 | INDEX_HEADER, |
|
47 | INDEX_HEADER, | |
47 | KIND_CHANGELOG, |
|
48 | KIND_CHANGELOG, | |
|
49 | RANK_UNKNOWN, | |||
48 | REVLOGV0, |
|
50 | REVLOGV0, | |
49 | REVLOGV1, |
|
51 | REVLOGV1, | |
50 | REVLOGV1_FLAGS, |
|
52 | REVLOGV1_FLAGS, | |
@@ -101,6 +103,7 b' from .utils import (' | |||||
101 | REVLOGV0 |
|
103 | REVLOGV0 | |
102 | REVLOGV1 |
|
104 | REVLOGV1 | |
103 | REVLOGV2 |
|
105 | REVLOGV2 | |
|
106 | CHANGELOGV2 | |||
104 | FLAG_INLINE_DATA |
|
107 | FLAG_INLINE_DATA | |
105 | FLAG_GENERALDELTA |
|
108 | FLAG_GENERALDELTA | |
106 | REVLOG_DEFAULT_FLAGS |
|
109 | REVLOG_DEFAULT_FLAGS | |
@@ -199,16 +202,13 b' def parse_index_v1(data, inline):' | |||||
199 |
|
202 | |||
200 | def parse_index_v2(data, inline): |
|
203 | def parse_index_v2(data, inline): | |
201 | # call the C implementation to parse the index data |
|
204 | # call the C implementation to parse the index data | |
202 |
index, cache = parsers.parse_index2(data, inline, |
|
205 | index, cache = parsers.parse_index2(data, inline, format=REVLOGV2) | |
203 | return index, cache |
|
206 | return index, cache | |
204 |
|
207 | |||
205 |
|
208 | |||
206 | def parse_index_cl_v2(data, inline): |
|
209 | def parse_index_cl_v2(data, inline): | |
207 | # call the C implementation to parse the index data |
|
210 | # call the C implementation to parse the index data | |
208 | assert not inline |
|
211 | index, cache = parsers.parse_index2(data, inline, format=CHANGELOGV2) | |
209 | from .pure.parsers import parse_index_cl_v2 |
|
|||
210 |
|
||||
211 | index, cache = parse_index_cl_v2(data) |
|
|||
212 | return index, cache |
|
212 | return index, cache | |
213 |
|
213 | |||
214 |
|
214 | |||
@@ -741,21 +741,6 b' class revlog(object):' | |||||
741 | """iterate over all rev in this revlog (from start to stop)""" |
|
741 | """iterate over all rev in this revlog (from start to stop)""" | |
742 | return storageutil.iterrevs(len(self), start=start, stop=stop) |
|
742 | return storageutil.iterrevs(len(self), start=start, stop=stop) | |
743 |
|
743 | |||
744 | @property |
|
|||
745 | def nodemap(self): |
|
|||
746 | msg = ( |
|
|||
747 | b"revlog.nodemap is deprecated, " |
|
|||
748 | b"use revlog.index.[has_node|rev|get_rev]" |
|
|||
749 | ) |
|
|||
750 | util.nouideprecwarn(msg, b'5.3', stacklevel=2) |
|
|||
751 | return self.index.nodemap |
|
|||
752 |
|
||||
753 | @property |
|
|||
754 | def _nodecache(self): |
|
|||
755 | msg = b"revlog._nodecache is deprecated, use revlog.index.nodemap" |
|
|||
756 | util.nouideprecwarn(msg, b'5.3', stacklevel=2) |
|
|||
757 | return self.index.nodemap |
|
|||
758 |
|
||||
759 | def hasnode(self, node): |
|
744 | def hasnode(self, node): | |
760 | try: |
|
745 | try: | |
761 | self.rev(node) |
|
746 | self.rev(node) | |
@@ -870,7 +855,23 b' class revlog(object):' | |||||
870 | if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0: |
|
855 | if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0: | |
871 | return self.rawsize(rev) |
|
856 | return self.rawsize(rev) | |
872 |
|
857 | |||
873 |
return len(self.revision(rev |
|
858 | return len(self.revision(rev)) | |
|
859 | ||||
|
860 | def fast_rank(self, rev): | |||
|
861 | """Return the rank of a revision if already known, or None otherwise. | |||
|
862 | ||||
|
863 | The rank of a revision is the size of the sub-graph it defines as a | |||
|
864 | head. Equivalently, the rank of a revision `r` is the size of the set | |||
|
865 | `ancestors(r)`, `r` included. | |||
|
866 | ||||
|
867 | This method returns the rank retrieved from the revlog in constant | |||
|
868 | time. It makes no attempt at computing unknown values for versions of | |||
|
869 | the revlog which do not persist the rank. | |||
|
870 | """ | |||
|
871 | rank = self.index[rev][ENTRY_RANK] | |||
|
872 | if rank == RANK_UNKNOWN: | |||
|
873 | return None | |||
|
874 | return rank | |||
874 |
|
875 | |||
875 | def chainbase(self, rev): |
|
876 | def chainbase(self, rev): | |
876 | base = self._chainbasecache.get(rev) |
|
877 | base = self._chainbasecache.get(rev) | |
@@ -1776,33 +1777,13 b' class revlog(object):' | |||||
1776 |
|
1777 | |||
1777 | return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2)) |
|
1778 | return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2)) | |
1778 |
|
1779 | |||
1779 | def _processflags(self, text, flags, operation, raw=False): |
|
1780 | def revision(self, nodeorrev, _df=None): | |
1780 | """deprecated entry point to access flag processors""" |
|
|||
1781 | msg = b'_processflag(...) use the specialized variant' |
|
|||
1782 | util.nouideprecwarn(msg, b'5.2', stacklevel=2) |
|
|||
1783 | if raw: |
|
|||
1784 | return text, flagutil.processflagsraw(self, text, flags) |
|
|||
1785 | elif operation == b'read': |
|
|||
1786 | return flagutil.processflagsread(self, text, flags) |
|
|||
1787 | else: # write operation |
|
|||
1788 | return flagutil.processflagswrite(self, text, flags) |
|
|||
1789 |
|
||||
1790 | def revision(self, nodeorrev, _df=None, raw=False): |
|
|||
1791 | """return an uncompressed revision of a given node or revision |
|
1781 | """return an uncompressed revision of a given node or revision | |
1792 | number. |
|
1782 | number. | |
1793 |
|
1783 | |||
1794 | _df - an existing file handle to read from. (internal-only) |
|
1784 | _df - an existing file handle to read from. (internal-only) | |
1795 | raw - an optional argument specifying if the revision data is to be |
|
|||
1796 | treated as raw data when applying flag transforms. 'raw' should be set |
|
|||
1797 | to True when generating changegroups or in debug commands. |
|
|||
1798 | """ |
|
1785 | """ | |
1799 | if raw: |
|
1786 | return self._revisiondata(nodeorrev, _df) | |
1800 | msg = ( |
|
|||
1801 | b'revlog.revision(..., raw=True) is deprecated, ' |
|
|||
1802 | b'use revlog.rawdata(...)' |
|
|||
1803 | ) |
|
|||
1804 | util.nouideprecwarn(msg, b'5.2', stacklevel=2) |
|
|||
1805 | return self._revisiondata(nodeorrev, _df, raw=raw) |
|
|||
1806 |
|
1787 | |||
1807 | def sidedata(self, nodeorrev, _df=None): |
|
1788 | def sidedata(self, nodeorrev, _df=None): | |
1808 | """a map of extra data related to the changeset but not part of the hash |
|
1789 | """a map of extra data related to the changeset but not part of the hash | |
@@ -2479,6 +2460,19 b' class revlog(object):' | |||||
2479 | # than ones we manually add. |
|
2460 | # than ones we manually add. | |
2480 | sidedata_offset = 0 |
|
2461 | sidedata_offset = 0 | |
2481 |
|
2462 | |||
|
2463 | rank = RANK_UNKNOWN | |||
|
2464 | if self._format_version == CHANGELOGV2: | |||
|
2465 | if (p1r, p2r) == (nullrev, nullrev): | |||
|
2466 | rank = 1 | |||
|
2467 | elif p1r != nullrev and p2r == nullrev: | |||
|
2468 | rank = 1 + self.fast_rank(p1r) | |||
|
2469 | elif p1r == nullrev and p2r != nullrev: | |||
|
2470 | rank = 1 + self.fast_rank(p2r) | |||
|
2471 | else: # merge node | |||
|
2472 | pmin, pmax = sorted((p1r, p2r)) | |||
|
2473 | rank = 1 + self.fast_rank(pmax) | |||
|
2474 | rank += sum(1 for _ in self.findmissingrevs([pmax], [pmin])) | |||
|
2475 | ||||
2482 | e = revlogutils.entry( |
|
2476 | e = revlogutils.entry( | |
2483 | flags=flags, |
|
2477 | flags=flags, | |
2484 | data_offset=offset, |
|
2478 | data_offset=offset, | |
@@ -2493,6 +2487,7 b' class revlog(object):' | |||||
2493 | sidedata_offset=sidedata_offset, |
|
2487 | sidedata_offset=sidedata_offset, | |
2494 | sidedata_compressed_length=len(serialized_sidedata), |
|
2488 | sidedata_compressed_length=len(serialized_sidedata), | |
2495 | sidedata_compression_mode=sidedata_compression_mode, |
|
2489 | sidedata_compression_mode=sidedata_compression_mode, | |
|
2490 | rank=rank, | |||
2496 | ) |
|
2491 | ) | |
2497 |
|
2492 | |||
2498 | self.index.append(e) |
|
2493 | self.index.append(e) |
@@ -12,6 +12,7 b' from ..interfaces import repository' | |||||
12 |
|
12 | |||
13 | # See mercurial.revlogutils.constants for doc |
|
13 | # See mercurial.revlogutils.constants for doc | |
14 | COMP_MODE_INLINE = 2 |
|
14 | COMP_MODE_INLINE = 2 | |
|
15 | RANK_UNKNOWN = -1 | |||
15 |
|
16 | |||
16 |
|
17 | |||
17 | def offset_type(offset, type): |
|
18 | def offset_type(offset, type): | |
@@ -34,6 +35,7 b' def entry(' | |||||
34 | sidedata_offset=0, |
|
35 | sidedata_offset=0, | |
35 | sidedata_compressed_length=0, |
|
36 | sidedata_compressed_length=0, | |
36 | sidedata_compression_mode=COMP_MODE_INLINE, |
|
37 | sidedata_compression_mode=COMP_MODE_INLINE, | |
|
38 | rank=RANK_UNKNOWN, | |||
37 | ): |
|
39 | ): | |
38 | """Build one entry from symbolic name |
|
40 | """Build one entry from symbolic name | |
39 |
|
41 | |||
@@ -56,6 +58,7 b' def entry(' | |||||
56 | sidedata_compressed_length, |
|
58 | sidedata_compressed_length, | |
57 | data_compression_mode, |
|
59 | data_compression_mode, | |
58 | sidedata_compression_mode, |
|
60 | sidedata_compression_mode, | |
|
61 | rank, | |||
59 | ) |
|
62 | ) | |
60 |
|
63 | |||
61 |
|
64 |
@@ -103,6 +103,17 b' ENTRY_DATA_COMPRESSION_MODE = 10' | |||||
103 | # (see "COMP_MODE_*" constants for details) |
|
103 | # (see "COMP_MODE_*" constants for details) | |
104 | ENTRY_SIDEDATA_COMPRESSION_MODE = 11 |
|
104 | ENTRY_SIDEDATA_COMPRESSION_MODE = 11 | |
105 |
|
105 | |||
|
106 | # [12] Revision rank: | |||
|
107 | # The number of revision under this one. | |||
|
108 | # | |||
|
109 | # Formally this is defined as : rank(X) = len(ancestors(X) + X) | |||
|
110 | # | |||
|
111 | # If rank == -1; then we do not have this information available. | |||
|
112 | # Only `null` has a rank of 0. | |||
|
113 | ENTRY_RANK = 12 | |||
|
114 | ||||
|
115 | RANK_UNKNOWN = -1 | |||
|
116 | ||||
106 | ### main revlog header |
|
117 | ### main revlog header | |
107 |
|
118 | |||
108 | # We cannot rely on Struct.format is inconsistent for python <=3.6 versus above |
|
119 | # We cannot rely on Struct.format is inconsistent for python <=3.6 versus above | |
@@ -181,9 +192,20 b' assert INDEX_ENTRY_V2.size == 32 * 3, IN' | |||||
181 | # 8 bytes: sidedata offset |
|
192 | # 8 bytes: sidedata offset | |
182 | # 4 bytes: sidedata compressed length |
|
193 | # 4 bytes: sidedata compressed length | |
183 | # 1 bytes: compression mode (2 lower bit are data_compression_mode) |
|
194 | # 1 bytes: compression mode (2 lower bit are data_compression_mode) | |
184 | # 27 bytes: Padding to align to 96 bytes (see RevlogV2Plan wiki page) |
|
195 | # 4 bytes: changeset rank (i.e. `len(::REV)`) | |
185 | INDEX_ENTRY_CL_V2 = struct.Struct(b">Qiiii20s12xQiB27x") |
|
196 | # 23 bytes: Padding to align to 96 bytes (see RevlogV2Plan wiki page) | |
186 | assert INDEX_ENTRY_CL_V2.size == 32 * 3, INDEX_ENTRY_V2.size |
|
197 | INDEX_ENTRY_CL_V2 = struct.Struct(b">Qiiii20s12xQiBi23x") | |
|
198 | assert INDEX_ENTRY_CL_V2.size == 32 * 3, INDEX_ENTRY_CL_V2.size | |||
|
199 | INDEX_ENTRY_V2_IDX_OFFSET = 0 | |||
|
200 | INDEX_ENTRY_V2_IDX_COMPRESSED_LENGTH = 1 | |||
|
201 | INDEX_ENTRY_V2_IDX_UNCOMPRESSED_LENGTH = 2 | |||
|
202 | INDEX_ENTRY_V2_IDX_PARENT_1 = 3 | |||
|
203 | INDEX_ENTRY_V2_IDX_PARENT_2 = 4 | |||
|
204 | INDEX_ENTRY_V2_IDX_NODEID = 5 | |||
|
205 | INDEX_ENTRY_V2_IDX_SIDEDATA_OFFSET = 6 | |||
|
206 | INDEX_ENTRY_V2_IDX_SIDEDATA_COMPRESSED_LENGTH = 7 | |||
|
207 | INDEX_ENTRY_V2_IDX_COMPRESSION_MODE = 8 | |||
|
208 | INDEX_ENTRY_V2_IDX_RANK = 9 | |||
187 |
|
209 | |||
188 | # revlog index flags |
|
210 | # revlog index flags | |
189 |
|
211 |
@@ -526,7 +526,7 b' def _textfromdelta(fh, revlog, baserev, ' | |||||
526 | else: |
|
526 | else: | |
527 | # deltabase is rawtext before changed by flag processors, which is |
|
527 | # deltabase is rawtext before changed by flag processors, which is | |
528 | # equivalent to non-raw text |
|
528 | # equivalent to non-raw text | |
529 |
basetext = revlog.revision(baserev, _df=fh |
|
529 | basetext = revlog.revision(baserev, _df=fh) | |
530 | fulltext = mdiff.patch(basetext, delta) |
|
530 | fulltext = mdiff.patch(basetext, delta) | |
531 |
|
531 | |||
532 | try: |
|
532 | try: |
@@ -32,6 +32,7 b' REVIDX_DEFAULT_FLAGS' | |||||
32 | REVIDX_FLAGS_ORDER |
|
32 | REVIDX_FLAGS_ORDER | |
33 | REVIDX_RAWTEXT_CHANGING_FLAGS |
|
33 | REVIDX_RAWTEXT_CHANGING_FLAGS | |
34 |
|
34 | |||
|
35 | # Keep this in sync with REVIDX_KNOWN_FLAGS in rust/hg-core/src/revlog/revlog.rs | |||
35 | REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER) |
|
36 | REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER) | |
36 |
|
37 | |||
37 | # Store flag processors (cf. 'addflagprocessor()' to register) |
|
38 | # Store flag processors (cf. 'addflagprocessor()' to register) |
@@ -16,6 +16,7 b' from ..node import hex' | |||||
16 |
|
16 | |||
17 | from .. import ( |
|
17 | from .. import ( | |
18 | error, |
|
18 | error, | |
|
19 | requirements, | |||
19 | util, |
|
20 | util, | |
20 | ) |
|
21 | ) | |
21 | from . import docket as docket_mod |
|
22 | from . import docket as docket_mod | |
@@ -34,6 +35,19 b' def test_race_hook_1():' | |||||
34 | pass |
|
35 | pass | |
35 |
|
36 | |||
36 |
|
37 | |||
|
38 | def post_stream_cleanup(repo): | |||
|
39 | """The stream clone might needs to remove some file if persisten nodemap | |||
|
40 | was dropped while stream cloning | |||
|
41 | """ | |||
|
42 | if requirements.REVLOGV1_REQUIREMENT not in repo.requirements: | |||
|
43 | return | |||
|
44 | if requirements.NODEMAP_REQUIREMENT in repo.requirements: | |||
|
45 | return | |||
|
46 | unfi = repo.unfiltered() | |||
|
47 | delete_nodemap(None, unfi, unfi.changelog) | |||
|
48 | delete_nodemap(None, repo, unfi.manifestlog._rootstore._revlog) | |||
|
49 | ||||
|
50 | ||||
37 | def persisted_data(revlog): |
|
51 | def persisted_data(revlog): | |
38 | """read the nodemap for a revlog from disk""" |
|
52 | """read the nodemap for a revlog from disk""" | |
39 | if revlog._nodemap_file is None: |
|
53 | if revlog._nodemap_file is None: | |
@@ -144,10 +158,12 b' def update_persistent_nodemap(revlog):' | |||||
144 |
|
158 | |||
145 | def delete_nodemap(tr, repo, revlog): |
|
159 | def delete_nodemap(tr, repo, revlog): | |
146 | """Delete nodemap data on disk for a given revlog""" |
|
160 | """Delete nodemap data on disk for a given revlog""" | |
147 | if revlog._nodemap_file is None: |
|
161 | prefix = revlog.radix | |
148 | msg = "calling persist nodemap on a revlog without the feature enabled" |
|
162 | pattern = re.compile(br"(^|/)%s(-[0-9a-f]+\.nd|\.n(\.a)?)$" % prefix) | |
149 | raise error.ProgrammingError(msg) |
|
163 | dirpath = revlog.opener.dirname(revlog._indexfile) | |
150 | repo.svfs.tryunlink(revlog._nodemap_file) |
|
164 | for f in revlog.opener.listdir(dirpath): | |
|
165 | if pattern.match(f): | |||
|
166 | repo.svfs.tryunlink(f) | |||
151 |
|
167 | |||
152 |
|
168 | |||
153 | def persist_nodemap(tr, revlog, pending=False, force=False): |
|
169 | def persist_nodemap(tr, revlog, pending=False, force=False): |
@@ -47,12 +47,6 b' class revlogoldindex(list):' | |||||
47 | node_id=sha1nodeconstants.nullid, |
|
47 | node_id=sha1nodeconstants.nullid, | |
48 | ) |
|
48 | ) | |
49 |
|
49 | |||
50 | @property |
|
|||
51 | def nodemap(self): |
|
|||
52 | msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]" |
|
|||
53 | util.nouideprecwarn(msg, b'5.3', stacklevel=2) |
|
|||
54 | return self._nodemap |
|
|||
55 |
|
||||
56 | @util.propertycache |
|
50 | @util.propertycache | |
57 | def _nodemap(self): |
|
51 | def _nodemap(self): | |
58 | nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: node.nullrev}) |
|
52 | nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: node.nullrev}) |
@@ -180,6 +180,8 b' def callcatch(ui, func):' | |||||
180 | ) |
|
180 | ) | |
181 | ) |
|
181 | ) | |
182 | except error.RepoError as inst: |
|
182 | except error.RepoError as inst: | |
|
183 | if isinstance(inst, error.RepoLookupError): | |||
|
184 | detailed_exit_code = 10 | |||
183 | ui.error(_(b"abort: %s\n") % inst) |
|
185 | ui.error(_(b"abort: %s\n") % inst) | |
184 | if inst.hint: |
|
186 | if inst.hint: | |
185 | ui.error(_(b"(%s)\n") % inst.hint) |
|
187 | ui.error(_(b"(%s)\n") % inst.hint) | |
@@ -341,13 +343,13 b' class casecollisionauditor(object):' | |||||
341 | if fl in self._loweredfiles and f not in self._dirstate: |
|
343 | if fl in self._loweredfiles and f not in self._dirstate: | |
342 | msg = _(b'possible case-folding collision for %s') % f |
|
344 | msg = _(b'possible case-folding collision for %s') % f | |
343 | if self._abort: |
|
345 | if self._abort: | |
344 |
raise error. |
|
346 | raise error.StateError(msg) | |
345 | self._ui.warn(_(b"warning: %s\n") % msg) |
|
347 | self._ui.warn(_(b"warning: %s\n") % msg) | |
346 | self._loweredfiles.add(fl) |
|
348 | self._loweredfiles.add(fl) | |
347 | self._newfiles.add(f) |
|
349 | self._newfiles.add(f) | |
348 |
|
350 | |||
349 |
|
351 | |||
350 | def filteredhash(repo, maxrev): |
|
352 | def filteredhash(repo, maxrev, needobsolete=False): | |
351 | """build hash of filtered revisions in the current repoview. |
|
353 | """build hash of filtered revisions in the current repoview. | |
352 |
|
354 | |||
353 | Multiple caches perform up-to-date validation by checking that the |
|
355 | Multiple caches perform up-to-date validation by checking that the | |
@@ -356,22 +358,31 b' def filteredhash(repo, maxrev):' | |||||
356 | of revisions in the view may change without the repository tiprev and |
|
358 | of revisions in the view may change without the repository tiprev and | |
357 | tipnode changing. |
|
359 | tipnode changing. | |
358 |
|
360 | |||
359 |
This function hashes all the revs filtered from the view |
|
361 | This function hashes all the revs filtered from the view (and, optionally, | |
360 | that SHA-1 digest. |
|
362 | all obsolete revs) up to maxrev and returns that SHA-1 digest. | |
361 | """ |
|
363 | """ | |
362 | cl = repo.changelog |
|
364 | cl = repo.changelog | |
363 | if not cl.filteredrevs: |
|
365 | if needobsolete: | |
364 | return None |
|
366 | obsrevs = obsolete.getrevs(repo, b'obsolete') | |
365 | key = cl._filteredrevs_hashcache.get(maxrev) |
|
367 | if not cl.filteredrevs and not obsrevs: | |
366 | if not key: |
|
368 | return None | |
367 | revs = sorted(r for r in cl.filteredrevs if r <= maxrev) |
|
369 | key = (maxrev, hash(cl.filteredrevs), hash(obsrevs)) | |
|
370 | else: | |||
|
371 | if not cl.filteredrevs: | |||
|
372 | return None | |||
|
373 | key = maxrev | |||
|
374 | obsrevs = frozenset() | |||
|
375 | ||||
|
376 | result = cl._filteredrevs_hashcache.get(key) | |||
|
377 | if not result: | |||
|
378 | revs = sorted(r for r in cl.filteredrevs | obsrevs if r <= maxrev) | |||
368 | if revs: |
|
379 | if revs: | |
369 | s = hashutil.sha1() |
|
380 | s = hashutil.sha1() | |
370 | for rev in revs: |
|
381 | for rev in revs: | |
371 | s.update(b'%d;' % rev) |
|
382 | s.update(b'%d;' % rev) | |
372 |
|
|
383 | result = s.digest() | |
373 |
cl._filteredrevs_hashcache[ |
|
384 | cl._filteredrevs_hashcache[key] = result | |
374 |
return |
|
385 | return result | |
375 |
|
386 | |||
376 |
|
387 | |||
377 | def walkrepos(path, followsym=False, seen_dirs=None, recurse=False): |
|
388 | def walkrepos(path, followsym=False, seen_dirs=None, recurse=False): | |
@@ -2195,6 +2206,9 b' def unhidehashlikerevs(repo, specs, hidd' | |||||
2195 |
|
2206 | |||
2196 | returns a repo object with the required changesets unhidden |
|
2207 | returns a repo object with the required changesets unhidden | |
2197 | """ |
|
2208 | """ | |
|
2209 | if not specs: | |||
|
2210 | return repo | |||
|
2211 | ||||
2198 | if not repo.filtername or not repo.ui.configbool( |
|
2212 | if not repo.filtername or not repo.ui.configbool( | |
2199 | b'experimental', b'directaccess' |
|
2213 | b'experimental', b'directaccess' | |
2200 | ): |
|
2214 | ): |
@@ -1000,7 +1000,11 b' def _rebaserestoredcommit(' | |||||
1000 | stats = merge.graft( |
|
1000 | stats = merge.graft( | |
1001 | repo, |
|
1001 | repo, | |
1002 | shelvectx, |
|
1002 | shelvectx, | |
1003 | labels=[b'working-copy', b'shelve'], |
|
1003 | labels=[ | |
|
1004 | b'working-copy', | |||
|
1005 | b'shelved change', | |||
|
1006 | b'parent of shelved change', | |||
|
1007 | ], | |||
1004 | keepconflictparent=True, |
|
1008 | keepconflictparent=True, | |
1005 | ) |
|
1009 | ) | |
1006 | if stats.unresolvedcount: |
|
1010 | if stats.unresolvedcount: |
@@ -19,20 +19,14 b'' | |||||
19 | from __future__ import absolute_import |
|
19 | from __future__ import absolute_import | |
20 |
|
20 | |||
21 | from .i18n import _ |
|
21 | from .i18n import _ | |
22 | from .node import nullrev |
|
|||
23 | from . import ( |
|
22 | from . import ( | |
24 | error, |
|
23 | error, | |
25 | mdiff, |
|
24 | mdiff, | |
26 | pycompat, |
|
25 | pycompat, | |
27 | util, |
|
|||
28 | ) |
|
26 | ) | |
29 | from .utils import stringutil |
|
27 | from .utils import stringutil | |
30 |
|
28 | |||
31 |
|
29 | |||
32 | class CantReprocessAndShowBase(Exception): |
|
|||
33 | pass |
|
|||
34 |
|
||||
35 |
|
||||
36 | def intersect(ra, rb): |
|
30 | def intersect(ra, rb): | |
37 | """Given two ranges return the range where they intersect or None. |
|
31 | """Given two ranges return the range where they intersect or None. | |
38 |
|
32 | |||
@@ -89,72 +83,6 b' class Merge3Text(object):' | |||||
89 | self.a = a |
|
83 | self.a = a | |
90 | self.b = b |
|
84 | self.b = b | |
91 |
|
85 | |||
92 | def merge_lines( |
|
|||
93 | self, |
|
|||
94 | name_a=None, |
|
|||
95 | name_b=None, |
|
|||
96 | name_base=None, |
|
|||
97 | start_marker=b'<<<<<<<', |
|
|||
98 | mid_marker=b'=======', |
|
|||
99 | end_marker=b'>>>>>>>', |
|
|||
100 | base_marker=None, |
|
|||
101 | localorother=None, |
|
|||
102 | minimize=False, |
|
|||
103 | ): |
|
|||
104 | """Return merge in cvs-like form.""" |
|
|||
105 | self.conflicts = False |
|
|||
106 | newline = b'\n' |
|
|||
107 | if len(self.a) > 0: |
|
|||
108 | if self.a[0].endswith(b'\r\n'): |
|
|||
109 | newline = b'\r\n' |
|
|||
110 | elif self.a[0].endswith(b'\r'): |
|
|||
111 | newline = b'\r' |
|
|||
112 | if name_a and start_marker: |
|
|||
113 | start_marker = start_marker + b' ' + name_a |
|
|||
114 | if name_b and end_marker: |
|
|||
115 | end_marker = end_marker + b' ' + name_b |
|
|||
116 | if name_base and base_marker: |
|
|||
117 | base_marker = base_marker + b' ' + name_base |
|
|||
118 | merge_regions = self.merge_regions() |
|
|||
119 | if minimize: |
|
|||
120 | merge_regions = self.minimize(merge_regions) |
|
|||
121 | for t in merge_regions: |
|
|||
122 | what = t[0] |
|
|||
123 | if what == b'unchanged': |
|
|||
124 | for i in range(t[1], t[2]): |
|
|||
125 | yield self.base[i] |
|
|||
126 | elif what == b'a' or what == b'same': |
|
|||
127 | for i in range(t[1], t[2]): |
|
|||
128 | yield self.a[i] |
|
|||
129 | elif what == b'b': |
|
|||
130 | for i in range(t[1], t[2]): |
|
|||
131 | yield self.b[i] |
|
|||
132 | elif what == b'conflict': |
|
|||
133 | if localorother == b'local': |
|
|||
134 | for i in range(t[3], t[4]): |
|
|||
135 | yield self.a[i] |
|
|||
136 | elif localorother == b'other': |
|
|||
137 | for i in range(t[5], t[6]): |
|
|||
138 | yield self.b[i] |
|
|||
139 | else: |
|
|||
140 | self.conflicts = True |
|
|||
141 | if start_marker is not None: |
|
|||
142 | yield start_marker + newline |
|
|||
143 | for i in range(t[3], t[4]): |
|
|||
144 | yield self.a[i] |
|
|||
145 | if base_marker is not None: |
|
|||
146 | yield base_marker + newline |
|
|||
147 | for i in range(t[1], t[2]): |
|
|||
148 | yield self.base[i] |
|
|||
149 | if mid_marker is not None: |
|
|||
150 | yield mid_marker + newline |
|
|||
151 | for i in range(t[5], t[6]): |
|
|||
152 | yield self.b[i] |
|
|||
153 | if end_marker is not None: |
|
|||
154 | yield end_marker + newline |
|
|||
155 | else: |
|
|||
156 | raise ValueError(what) |
|
|||
157 |
|
||||
158 | def merge_groups(self): |
|
86 | def merge_groups(self): | |
159 | """Yield sequence of line groups. Each one is a tuple: |
|
87 | """Yield sequence of line groups. Each one is a tuple: | |
160 |
|
88 | |||
@@ -170,7 +98,7 b' class Merge3Text(object):' | |||||
170 | 'b', lines |
|
98 | 'b', lines | |
171 | Lines taken from b |
|
99 | Lines taken from b | |
172 |
|
100 | |||
173 | 'conflict', base_lines, a_lines, b_lines |
|
101 | 'conflict', (base_lines, a_lines, b_lines) | |
174 | Lines from base were changed to either a or b and conflict. |
|
102 | Lines from base were changed to either a or b and conflict. | |
175 | """ |
|
103 | """ | |
176 | for t in self.merge_regions(): |
|
104 | for t in self.merge_regions(): | |
@@ -184,9 +112,11 b' class Merge3Text(object):' | |||||
184 | elif what == b'conflict': |
|
112 | elif what == b'conflict': | |
185 | yield ( |
|
113 | yield ( | |
186 | what, |
|
114 | what, | |
187 |
|
|
115 | ( | |
188 |
self.a[t[ |
|
116 | self.base[t[1] : t[2]], | |
189 |
self. |
|
117 | self.a[t[3] : t[4]], | |
|
118 | self.b[t[5] : t[6]], | |||
|
119 | ), | |||
190 | ) |
|
120 | ) | |
191 | else: |
|
121 | else: | |
192 | raise ValueError(what) |
|
122 | raise ValueError(what) | |
@@ -280,67 +210,6 b' class Merge3Text(object):' | |||||
280 | ia = aend |
|
210 | ia = aend | |
281 | ib = bend |
|
211 | ib = bend | |
282 |
|
212 | |||
283 | def minimize(self, merge_regions): |
|
|||
284 | """Trim conflict regions of lines where A and B sides match. |
|
|||
285 |
|
||||
286 | Lines where both A and B have made the same changes at the beginning |
|
|||
287 | or the end of each merge region are eliminated from the conflict |
|
|||
288 | region and are instead considered the same. |
|
|||
289 | """ |
|
|||
290 | for region in merge_regions: |
|
|||
291 | if region[0] != b"conflict": |
|
|||
292 | yield region |
|
|||
293 | continue |
|
|||
294 | # pytype thinks this tuple contains only 3 things, but |
|
|||
295 | # that's clearly not true because this code successfully |
|
|||
296 | # executes. It might be wise to rework merge_regions to be |
|
|||
297 | # some kind of attrs type. |
|
|||
298 | ( |
|
|||
299 | issue, |
|
|||
300 | z1, |
|
|||
301 | z2, |
|
|||
302 | a1, |
|
|||
303 | a2, |
|
|||
304 | b1, |
|
|||
305 | b2, |
|
|||
306 | ) = region # pytype: disable=bad-unpacking |
|
|||
307 | alen = a2 - a1 |
|
|||
308 | blen = b2 - b1 |
|
|||
309 |
|
||||
310 | # find matches at the front |
|
|||
311 | ii = 0 |
|
|||
312 | while ( |
|
|||
313 | ii < alen and ii < blen and self.a[a1 + ii] == self.b[b1 + ii] |
|
|||
314 | ): |
|
|||
315 | ii += 1 |
|
|||
316 | startmatches = ii |
|
|||
317 |
|
||||
318 | # find matches at the end |
|
|||
319 | ii = 0 |
|
|||
320 | while ( |
|
|||
321 | ii < alen |
|
|||
322 | and ii < blen |
|
|||
323 | and self.a[a2 - ii - 1] == self.b[b2 - ii - 1] |
|
|||
324 | ): |
|
|||
325 | ii += 1 |
|
|||
326 | endmatches = ii |
|
|||
327 |
|
||||
328 | if startmatches > 0: |
|
|||
329 | yield b'same', a1, a1 + startmatches |
|
|||
330 |
|
||||
331 | yield ( |
|
|||
332 | b'conflict', |
|
|||
333 | z1, |
|
|||
334 | z2, |
|
|||
335 | a1 + startmatches, |
|
|||
336 | a2 - endmatches, |
|
|||
337 | b1 + startmatches, |
|
|||
338 | b2 - endmatches, |
|
|||
339 | ) |
|
|||
340 |
|
||||
341 | if endmatches > 0: |
|
|||
342 | yield b'same', a2 - endmatches, a2 |
|
|||
343 |
|
||||
344 | def find_sync_regions(self): |
|
213 | def find_sync_regions(self): | |
345 | """Return a list of sync regions, where both descendants match the base. |
|
214 | """Return a list of sync regions, where both descendants match the base. | |
346 |
|
215 | |||
@@ -403,39 +272,136 b' class Merge3Text(object):' | |||||
403 | return sl |
|
272 | return sl | |
404 |
|
273 | |||
405 |
|
274 | |||
406 |
def _verifytext( |
|
275 | def _verifytext(input): | |
407 | """verifies that text is non-binary (unless opts[text] is passed, |
|
276 | """verifies that text is non-binary (unless opts[text] is passed, | |
408 | then we just warn)""" |
|
277 | then we just warn)""" | |
409 | if stringutil.binary(text): |
|
278 | if stringutil.binary(input.text()): | |
410 | msg = _(b"%s looks like a binary file.") % path |
|
279 | msg = _(b"%s looks like a binary file.") % input.fctx.path() | |
411 | if not opts.get('quiet'): |
|
280 | raise error.Abort(msg) | |
412 | ui.warn(_(b'warning: %s\n') % msg) |
|
281 | ||
413 | if not opts.get('text'): |
|
282 | ||
414 | raise error.Abort(msg) |
|
283 | def _format_labels(*inputs): | |
415 | return text |
|
284 | pad = max(len(input.label) if input.label else 0 for input in inputs) | |
|
285 | labels = [] | |||
|
286 | for input in inputs: | |||
|
287 | if input.label: | |||
|
288 | if input.label_detail: | |||
|
289 | label = ( | |||
|
290 | (input.label + b':').ljust(pad + 1) | |||
|
291 | + b' ' | |||
|
292 | + input.label_detail | |||
|
293 | ) | |||
|
294 | else: | |||
|
295 | label = input.label | |||
|
296 | # 8 for the prefix of conflict marker lines (e.g. '<<<<<<< ') | |||
|
297 | labels.append(stringutil.ellipsis(label, 80 - 8)) | |||
|
298 | else: | |||
|
299 | labels.append(None) | |||
|
300 | return labels | |||
|
301 | ||||
|
302 | ||||
|
303 | def _detect_newline(m3): | |||
|
304 | if len(m3.a) > 0: | |||
|
305 | if m3.a[0].endswith(b'\r\n'): | |||
|
306 | return b'\r\n' | |||
|
307 | elif m3.a[0].endswith(b'\r'): | |||
|
308 | return b'\r' | |||
|
309 | return b'\n' | |||
416 |
|
310 | |||
417 |
|
311 | |||
418 | def _picklabels(defaults, overrides): |
|
312 | def _minimize(a_lines, b_lines): | |
419 | if len(overrides) > 3: |
|
313 | """Trim conflict regions of lines where A and B sides match. | |
420 | raise error.Abort(_(b"can only specify three labels.")) |
|
314 | ||
421 | result = defaults[:] |
|
315 | Lines where both A and B have made the same changes at the beginning | |
422 | for i, override in enumerate(overrides): |
|
316 | or the end of each merge region are eliminated from the conflict | |
423 | result[i] = override |
|
317 | region and are instead considered the same. | |
424 | return result |
|
318 | """ | |
|
319 | alen = len(a_lines) | |||
|
320 | blen = len(b_lines) | |||
|
321 | ||||
|
322 | # find matches at the front | |||
|
323 | ii = 0 | |||
|
324 | while ii < alen and ii < blen and a_lines[ii] == b_lines[ii]: | |||
|
325 | ii += 1 | |||
|
326 | startmatches = ii | |||
|
327 | ||||
|
328 | # find matches at the end | |||
|
329 | ii = 0 | |||
|
330 | while ii < alen and ii < blen and a_lines[-ii - 1] == b_lines[-ii - 1]: | |||
|
331 | ii += 1 | |||
|
332 | endmatches = ii | |||
|
333 | ||||
|
334 | lines_before = a_lines[:startmatches] | |||
|
335 | new_a_lines = a_lines[startmatches : alen - endmatches] | |||
|
336 | new_b_lines = b_lines[startmatches : blen - endmatches] | |||
|
337 | lines_after = a_lines[alen - endmatches :] | |||
|
338 | return lines_before, new_a_lines, new_b_lines, lines_after | |||
425 |
|
339 | |||
426 |
|
340 | |||
427 | def is_not_null(ctx): |
|
341 | def render_minimized( | |
428 | if not util.safehasattr(ctx, "node"): |
|
342 | m3, | |
429 | return False |
|
343 | name_a=None, | |
430 | return ctx.rev() != nullrev |
|
344 | name_b=None, | |
|
345 | start_marker=b'<<<<<<<', | |||
|
346 | mid_marker=b'=======', | |||
|
347 | end_marker=b'>>>>>>>', | |||
|
348 | ): | |||
|
349 | """Return merge in cvs-like form.""" | |||
|
350 | newline = _detect_newline(m3) | |||
|
351 | conflicts = False | |||
|
352 | if name_a: | |||
|
353 | start_marker = start_marker + b' ' + name_a | |||
|
354 | if name_b: | |||
|
355 | end_marker = end_marker + b' ' + name_b | |||
|
356 | merge_groups = m3.merge_groups() | |||
|
357 | lines = [] | |||
|
358 | for what, group_lines in merge_groups: | |||
|
359 | if what == b'conflict': | |||
|
360 | conflicts = True | |||
|
361 | base_lines, a_lines, b_lines = group_lines | |||
|
362 | minimized = _minimize(a_lines, b_lines) | |||
|
363 | lines_before, a_lines, b_lines, lines_after = minimized | |||
|
364 | lines.extend(lines_before) | |||
|
365 | lines.append(start_marker + newline) | |||
|
366 | lines.extend(a_lines) | |||
|
367 | lines.append(mid_marker + newline) | |||
|
368 | lines.extend(b_lines) | |||
|
369 | lines.append(end_marker + newline) | |||
|
370 | lines.extend(lines_after) | |||
|
371 | else: | |||
|
372 | lines.extend(group_lines) | |||
|
373 | return lines, conflicts | |||
431 |
|
374 | |||
432 |
|
375 | |||
433 |
def _merge |
|
376 | def render_merge3(m3, name_a, name_b, name_base): | |
|
377 | """Render conflicts as 3-way conflict markers.""" | |||
|
378 | newline = _detect_newline(m3) | |||
|
379 | conflicts = False | |||
|
380 | lines = [] | |||
|
381 | for what, group_lines in m3.merge_groups(): | |||
|
382 | if what == b'conflict': | |||
|
383 | base_lines, a_lines, b_lines = group_lines | |||
|
384 | conflicts = True | |||
|
385 | lines.append(b'<<<<<<< ' + name_a + newline) | |||
|
386 | lines.extend(a_lines) | |||
|
387 | lines.append(b'||||||| ' + name_base + newline) | |||
|
388 | lines.extend(base_lines) | |||
|
389 | lines.append(b'=======' + newline) | |||
|
390 | lines.extend(b_lines) | |||
|
391 | lines.append(b'>>>>>>> ' + name_b + newline) | |||
|
392 | else: | |||
|
393 | lines.extend(group_lines) | |||
|
394 | return lines, conflicts | |||
|
395 | ||||
|
396 | ||||
|
397 | def render_mergediff(m3, name_a, name_b, name_base): | |||
|
398 | """Render conflicts as conflict markers with one snapshot and one diff.""" | |||
|
399 | newline = _detect_newline(m3) | |||
434 | lines = [] |
|
400 | lines = [] | |
435 | conflicts = False |
|
401 | conflicts = False | |
436 | for group in m3.merge_groups(): |
|
402 | for what, group_lines in m3.merge_groups(): | |
437 |
if |
|
403 | if what == b'conflict': | |
438 |
base_lines, a_lines, b_lines = group |
|
404 | base_lines, a_lines, b_lines = group_lines | |
439 | base_text = b''.join(base_lines) |
|
405 | base_text = b''.join(base_lines) | |
440 | b_blocks = list( |
|
406 | b_blocks = list( | |
441 | mdiff.allblocks( |
|
407 | mdiff.allblocks( | |
@@ -472,95 +438,95 b' def _mergediff(m3, name_a, name_b, name_' | |||||
472 | for line in lines2[block[2] : block[3]]: |
|
438 | for line in lines2[block[2] : block[3]]: | |
473 | yield b'+' + line |
|
439 | yield b'+' + line | |
474 |
|
440 | |||
475 |
lines.append(b"<<<<<<< |
|
441 | lines.append(b"<<<<<<<" + newline) | |
476 | if matching_lines(a_blocks) < matching_lines(b_blocks): |
|
442 | if matching_lines(a_blocks) < matching_lines(b_blocks): | |
477 |
lines.append(b"======= |
|
443 | lines.append(b"======= " + name_a + newline) | |
478 | lines.extend(a_lines) |
|
444 | lines.extend(a_lines) | |
479 |
lines.append(b"------- |
|
445 | lines.append(b"------- " + name_base + newline) | |
480 |
lines.append(b"+++++++ |
|
446 | lines.append(b"+++++++ " + name_b + newline) | |
481 | lines.extend(diff_lines(b_blocks, base_lines, b_lines)) |
|
447 | lines.extend(diff_lines(b_blocks, base_lines, b_lines)) | |
482 | else: |
|
448 | else: | |
483 |
lines.append(b"------- |
|
449 | lines.append(b"------- " + name_base + newline) | |
484 |
lines.append(b"+++++++ |
|
450 | lines.append(b"+++++++ " + name_a + newline) | |
485 | lines.extend(diff_lines(a_blocks, base_lines, a_lines)) |
|
451 | lines.extend(diff_lines(a_blocks, base_lines, a_lines)) | |
486 |
lines.append(b"======= |
|
452 | lines.append(b"======= " + name_b + newline) | |
487 | lines.extend(b_lines) |
|
453 | lines.extend(b_lines) | |
488 |
lines.append(b">>>>>>> |
|
454 | lines.append(b">>>>>>>" + newline) | |
489 | conflicts = True |
|
455 | conflicts = True | |
490 | else: |
|
456 | else: | |
491 |
lines.extend(group |
|
457 | lines.extend(group_lines) | |
492 | return lines, conflicts |
|
458 | return lines, conflicts | |
493 |
|
459 | |||
494 |
|
460 | |||
495 | def simplemerge(ui, localctx, basectx, otherctx, **opts): |
|
461 | def _resolve(m3, sides): | |
|
462 | lines = [] | |||
|
463 | for what, group_lines in m3.merge_groups(): | |||
|
464 | if what == b'conflict': | |||
|
465 | for side in sides: | |||
|
466 | lines.extend(group_lines[side]) | |||
|
467 | else: | |||
|
468 | lines.extend(group_lines) | |||
|
469 | return lines | |||
|
470 | ||||
|
471 | ||||
|
472 | class MergeInput(object): | |||
|
473 | def __init__(self, fctx, label=None, label_detail=None): | |||
|
474 | self.fctx = fctx | |||
|
475 | self.label = label | |||
|
476 | # If the "detail" part is set, then that is rendered after the label and | |||
|
477 | # separated by a ':'. The label is padded to make the ':' aligned among | |||
|
478 | # all merge inputs. | |||
|
479 | self.label_detail = label_detail | |||
|
480 | self._text = None | |||
|
481 | ||||
|
482 | def text(self): | |||
|
483 | if self._text is None: | |||
|
484 | # Merges were always run in the working copy before, which means | |||
|
485 | # they used decoded data, if the user defined any repository | |||
|
486 | # filters. | |||
|
487 | # | |||
|
488 | # Maintain that behavior today for BC, though perhaps in the future | |||
|
489 | # it'd be worth considering whether merging encoded data (what the | |||
|
490 | # repository usually sees) might be more useful. | |||
|
491 | self._text = self.fctx.decodeddata() | |||
|
492 | return self._text | |||
|
493 | ||||
|
494 | ||||
|
495 | def simplemerge( | |||
|
496 | local, | |||
|
497 | base, | |||
|
498 | other, | |||
|
499 | mode=b'merge', | |||
|
500 | allow_binary=False, | |||
|
501 | ): | |||
496 | """Performs the simplemerge algorithm. |
|
502 | """Performs the simplemerge algorithm. | |
497 |
|
503 | |||
498 | The merged result is written into `localctx`. |
|
504 | The merged result is written into `localctx`. | |
499 | """ |
|
505 | """ | |
500 |
|
506 | |||
501 | def readctx(ctx): |
|
507 | if not allow_binary: | |
502 | # Merges were always run in the working copy before, which means |
|
508 | _verifytext(local) | |
503 | # they used decoded data, if the user defined any repository |
|
509 | _verifytext(base) | |
504 | # filters. |
|
510 | _verifytext(other) | |
505 | # |
|
|||
506 | # Maintain that behavior today for BC, though perhaps in the future |
|
|||
507 | # it'd be worth considering whether merging encoded data (what the |
|
|||
508 | # repository usually sees) might be more useful. |
|
|||
509 | return _verifytext(ctx.decodeddata(), ctx.path(), ui, opts) |
|
|||
510 |
|
||||
511 | mode = opts.get('mode', b'merge') |
|
|||
512 | name_a, name_b, name_base = None, None, None |
|
|||
513 | if mode != b'union': |
|
|||
514 | name_a, name_b, name_base = _picklabels( |
|
|||
515 | [localctx.path(), otherctx.path(), None], opts.get('label', []) |
|
|||
516 | ) |
|
|||
517 |
|
||||
518 | try: |
|
|||
519 | localtext = readctx(localctx) |
|
|||
520 | basetext = readctx(basectx) |
|
|||
521 | othertext = readctx(otherctx) |
|
|||
522 | except error.Abort: |
|
|||
523 | return 1 |
|
|||
524 |
|
511 | |||
525 | m3 = Merge3Text(basetext, localtext, othertext) |
|
512 | m3 = Merge3Text(base.text(), local.text(), other.text()) | |
526 | extrakwargs = { |
|
513 | conflicts = False | |
527 | b"localorother": opts.get("localorother", None), |
|
|||
528 | b'minimize': True, |
|
|||
529 | } |
|
|||
530 | if mode == b'union': |
|
514 | if mode == b'union': | |
531 | extrakwargs[b'start_marker'] = None |
|
515 | lines = _resolve(m3, (1, 2)) | |
532 | extrakwargs[b'mid_marker'] = None |
|
516 | elif mode == b'local': | |
533 | extrakwargs[b'end_marker'] = None |
|
517 | lines = _resolve(m3, (1,)) | |
534 | elif name_base is not None: |
|
518 | elif mode == b'other': | |
535 | extrakwargs[b'base_marker'] = b'|||||||' |
|
519 | lines = _resolve(m3, (2,)) | |
536 | extrakwargs[b'name_base'] = name_base |
|
|||
537 | extrakwargs[b'minimize'] = False |
|
|||
538 |
|
||||
539 | if mode == b'mergediff': |
|
|||
540 | lines, conflicts = _mergediff(m3, name_a, name_b, name_base) |
|
|||
541 | else: |
|
520 | else: | |
542 | lines = list( |
|
521 | if mode == b'mergediff': | |
543 | m3.merge_lines( |
|
522 | labels = _format_labels(local, other, base) | |
544 | name_a=name_a, name_b=name_b, **pycompat.strkwargs(extrakwargs) |
|
523 | lines, conflicts = render_mergediff(m3, *labels) | |
545 | ) |
|
524 | elif mode == b'merge3': | |
546 | ) |
|
525 | labels = _format_labels(local, other, base) | |
547 | conflicts = m3.conflicts |
|
526 | lines, conflicts = render_merge3(m3, *labels) | |
548 |
|
527 | else: | ||
549 | # merge flags if necessary |
|
528 | labels = _format_labels(local, other) | |
550 | flags = localctx.flags() |
|
529 | lines, conflicts = render_minimized(m3, *labels) | |
551 | localflags = set(pycompat.iterbytestr(flags)) |
|
|||
552 | otherflags = set(pycompat.iterbytestr(otherctx.flags())) |
|
|||
553 | if is_not_null(basectx) and localflags != otherflags: |
|
|||
554 | baseflags = set(pycompat.iterbytestr(basectx.flags())) |
|
|||
555 | commonflags = localflags & otherflags |
|
|||
556 | addedflags = (localflags ^ otherflags) - baseflags |
|
|||
557 | flags = b''.join(sorted(commonflags | addedflags)) |
|
|||
558 |
|
530 | |||
559 | mergedtext = b''.join(lines) |
|
531 | mergedtext = b''.join(lines) | |
560 | if opts.get('print'): |
|
532 | return mergedtext, conflicts | |
561 | ui.fout.write(mergedtext) |
|
|||
562 | else: |
|
|||
563 | localctx.write(mergedtext, flags) |
|
|||
564 |
|
||||
565 | if conflicts and not mode == b'union': |
|
|||
566 | return 1 |
|
@@ -38,63 +38,66 b' def parseconfig(ui, raw, action):' | |||||
38 |
|
38 | |||
39 | Returns a tuple of includes, excludes, and profiles. |
|
39 | Returns a tuple of includes, excludes, and profiles. | |
40 | """ |
|
40 | """ | |
41 | includes = set() |
|
41 | with util.timedcm( | |
42 | excludes = set() |
|
42 | 'sparse.parseconfig(ui, %d bytes, action=%s)', len(raw), action | |
43 | profiles = set() |
|
43 | ): | |
44 | current = None |
|
44 | includes = set() | |
45 | havesection = False |
|
45 | excludes = set() | |
|
46 | profiles = set() | |||
|
47 | current = None | |||
|
48 | havesection = False | |||
46 |
|
49 | |||
47 | for line in raw.split(b'\n'): |
|
50 | for line in raw.split(b'\n'): | |
48 | line = line.strip() |
|
51 | line = line.strip() | |
49 | if not line or line.startswith(b'#'): |
|
52 | if not line or line.startswith(b'#'): | |
50 | # empty or comment line, skip |
|
53 | # empty or comment line, skip | |
51 | continue |
|
54 | continue | |
52 | elif line.startswith(b'%include '): |
|
55 | elif line.startswith(b'%include '): | |
53 | line = line[9:].strip() |
|
56 | line = line[9:].strip() | |
54 | if line: |
|
57 | if line: | |
55 | profiles.add(line) |
|
58 | profiles.add(line) | |
56 | elif line == b'[include]': |
|
59 | elif line == b'[include]': | |
57 | if havesection and current != includes: |
|
60 | if havesection and current != includes: | |
58 | # TODO pass filename into this API so we can report it. |
|
61 | # TODO pass filename into this API so we can report it. | |
59 | raise error.Abort( |
|
62 | raise error.Abort( | |
60 | _( |
|
63 | _( | |
61 | b'%(action)s config cannot have includes ' |
|
64 | b'%(action)s config cannot have includes ' | |
62 | b'after excludes' |
|
65 | b'after excludes' | |
|
66 | ) | |||
|
67 | % {b'action': action} | |||
63 | ) |
|
68 | ) | |
64 |
|
|
69 | havesection = True | |
65 |
|
|
70 | current = includes | |
66 |
|
|
71 | continue | |
67 |
|
|
72 | elif line == b'[exclude]': | |
68 |
c |
|
73 | havesection = True | |
69 | elif line == b'[exclude]': |
|
74 | current = excludes | |
70 | havesection = True |
|
75 | elif line: | |
71 |
current |
|
76 | if current is None: | |
72 | elif line: |
|
77 | raise error.Abort( | |
73 | if current is None: |
|
78 | _( | |
74 | raise error.Abort( |
|
79 | b'%(action)s config entry outside of ' | |
75 |
|
|
80 | b'section: %(line)s' | |
76 | b'%(action)s config entry outside of ' |
|
81 | ) | |
77 |
b' |
|
82 | % {b'action': action, b'line': line}, | |
|
83 | hint=_( | |||
|
84 | b'add an [include] or [exclude] line ' | |||
|
85 | b'to declare the entry type' | |||
|
86 | ), | |||
78 | ) |
|
87 | ) | |
79 | % {b'action': action, b'line': line}, |
|
|||
80 | hint=_( |
|
|||
81 | b'add an [include] or [exclude] line ' |
|
|||
82 | b'to declare the entry type' |
|
|||
83 | ), |
|
|||
84 | ) |
|
|||
85 |
|
88 | |||
86 | if line.strip().startswith(b'/'): |
|
89 | if line.strip().startswith(b'/'): | |
87 | ui.warn( |
|
90 | ui.warn( | |
88 | _( |
|
91 | _( | |
89 | b'warning: %(action)s profile cannot use' |
|
92 | b'warning: %(action)s profile cannot use' | |
90 | b' paths starting with /, ignoring %(line)s\n' |
|
93 | b' paths starting with /, ignoring %(line)s\n' | |
|
94 | ) | |||
|
95 | % {b'action': action, b'line': line} | |||
91 | ) |
|
96 | ) | |
92 | % {b'action': action, b'line': line} |
|
97 | continue | |
93 | ) |
|
98 | current.add(line) | |
94 | continue |
|
|||
95 | current.add(line) |
|
|||
96 |
|
99 | |||
97 | return includes, excludes, profiles |
|
100 | return includes, excludes, profiles | |
98 |
|
101 | |||
99 |
|
102 | |||
100 | # Exists as separate function to facilitate monkeypatching. |
|
103 | # Exists as separate function to facilitate monkeypatching. | |
@@ -396,7 +399,7 b' def filterupdatesactions(repo, wctx, mct' | |||||
396 | temporaryfiles.append(file) |
|
399 | temporaryfiles.append(file) | |
397 | prunedactions[file] = action |
|
400 | prunedactions[file] = action | |
398 | elif branchmerge: |
|
401 | elif branchmerge: | |
399 | if type not in mergestatemod.NO_OP_ACTIONS: |
|
402 | if not type.no_op: | |
400 | temporaryfiles.append(file) |
|
403 | temporaryfiles.append(file) | |
401 | prunedactions[file] = action |
|
404 | prunedactions[file] = action | |
402 | elif type == mergestatemod.ACTION_FORGET: |
|
405 | elif type == mergestatemod.ACTION_FORGET: | |
@@ -600,38 +603,41 b' def _updateconfigandrefreshwdir(' | |||||
600 | repo, includes, excludes, profiles, force=False, removing=False |
|
603 | repo, includes, excludes, profiles, force=False, removing=False | |
601 | ): |
|
604 | ): | |
602 | """Update the sparse config and working directory state.""" |
|
605 | """Update the sparse config and working directory state.""" | |
603 | raw = repo.vfs.tryread(b'sparse') |
|
606 | with repo.lock(): | |
604 | oldincludes, oldexcludes, oldprofiles = parseconfig(repo.ui, raw, b'sparse') |
|
607 | raw = repo.vfs.tryread(b'sparse') | |
605 |
|
608 | oldincludes, oldexcludes, oldprofiles = parseconfig( | ||
606 | oldstatus = repo.status() |
|
609 | repo.ui, raw, b'sparse' | |
607 | oldmatch = matcher(repo) |
|
610 | ) | |
608 | oldrequires = set(repo.requirements) |
|
|||
609 |
|
611 | |||
610 | # TODO remove this try..except once the matcher integrates better |
|
612 | oldstatus = repo.status() | |
611 | # with dirstate. We currently have to write the updated config |
|
613 | oldmatch = matcher(repo) | |
612 | # because that will invalidate the matcher cache and force a |
|
614 | oldrequires = set(repo.requirements) | |
613 | # re-read. We ideally want to update the cached matcher on the |
|
615 | ||
614 | # repo instance then flush the new config to disk once wdir is |
|
616 | # TODO remove this try..except once the matcher integrates better | |
615 | # updated. But this requires massive rework to matcher() and its |
|
617 | # with dirstate. We currently have to write the updated config | |
616 | # consumers. |
|
618 | # because that will invalidate the matcher cache and force a | |
|
619 | # re-read. We ideally want to update the cached matcher on the | |||
|
620 | # repo instance then flush the new config to disk once wdir is | |||
|
621 | # updated. But this requires massive rework to matcher() and its | |||
|
622 | # consumers. | |||
617 |
|
623 | |||
618 | if requirements.SPARSE_REQUIREMENT in oldrequires and removing: |
|
624 | if requirements.SPARSE_REQUIREMENT in oldrequires and removing: | |
619 | repo.requirements.discard(requirements.SPARSE_REQUIREMENT) |
|
625 | repo.requirements.discard(requirements.SPARSE_REQUIREMENT) | |
620 | scmutil.writereporequirements(repo) |
|
626 | scmutil.writereporequirements(repo) | |
621 | elif requirements.SPARSE_REQUIREMENT not in oldrequires: |
|
627 | elif requirements.SPARSE_REQUIREMENT not in oldrequires: | |
622 | repo.requirements.add(requirements.SPARSE_REQUIREMENT) |
|
628 | repo.requirements.add(requirements.SPARSE_REQUIREMENT) | |
623 | scmutil.writereporequirements(repo) |
|
629 | scmutil.writereporequirements(repo) | |
624 |
|
630 | |||
625 | try: |
|
631 | try: | |
626 | writeconfig(repo, includes, excludes, profiles) |
|
632 | writeconfig(repo, includes, excludes, profiles) | |
627 | return refreshwdir(repo, oldstatus, oldmatch, force=force) |
|
633 | return refreshwdir(repo, oldstatus, oldmatch, force=force) | |
628 | except Exception: |
|
634 | except Exception: | |
629 | if repo.requirements != oldrequires: |
|
635 | if repo.requirements != oldrequires: | |
630 | repo.requirements.clear() |
|
636 | repo.requirements.clear() | |
631 | repo.requirements |= oldrequires |
|
637 | repo.requirements |= oldrequires | |
632 | scmutil.writereporequirements(repo) |
|
638 | scmutil.writereporequirements(repo) | |
633 | writeconfig(repo, oldincludes, oldexcludes, oldprofiles) |
|
639 | writeconfig(repo, oldincludes, oldexcludes, oldprofiles) | |
634 | raise |
|
640 | raise | |
635 |
|
641 | |||
636 |
|
642 | |||
637 | def clearrules(repo, force=False): |
|
643 | def clearrules(repo, force=False): | |
@@ -701,21 +707,18 b' def importfromfiles(repo, opts, paths, f' | |||||
701 |
|
707 | |||
702 | def updateconfig( |
|
708 | def updateconfig( | |
703 | repo, |
|
709 | repo, | |
704 | pats, |
|
|||
705 | opts, |
|
710 | opts, | |
706 |
include= |
|
711 | include=(), | |
707 |
exclude= |
|
712 | exclude=(), | |
708 | reset=False, |
|
713 | reset=False, | |
709 |
delete= |
|
714 | delete=(), | |
710 |
enableprofile= |
|
715 | enableprofile=(), | |
711 |
disableprofile= |
|
716 | disableprofile=(), | |
712 | force=False, |
|
717 | force=False, | |
713 | usereporootpaths=False, |
|
718 | usereporootpaths=False, | |
714 | ): |
|
719 | ): | |
715 | """Perform a sparse config update. |
|
720 | """Perform a sparse config update. | |
716 |
|
721 | |||
717 | Only one of the actions may be performed. |
|
|||
718 |
|
||||
719 | The new config is written out and a working directory refresh is performed. |
|
722 | The new config is written out and a working directory refresh is performed. | |
720 | """ |
|
723 | """ | |
721 | with repo.wlock(), repo.lock(), repo.dirstate.parentchange(): |
|
724 | with repo.wlock(), repo.lock(), repo.dirstate.parentchange(): | |
@@ -733,10 +736,13 b' def updateconfig(' | |||||
733 | newexclude = set(oldexclude) |
|
736 | newexclude = set(oldexclude) | |
734 | newprofiles = set(oldprofiles) |
|
737 | newprofiles = set(oldprofiles) | |
735 |
|
738 | |||
736 | if any(os.path.isabs(pat) for pat in pats): |
|
739 | def normalize_pats(pats): | |
737 | raise error.Abort(_(b'paths cannot be absolute')) |
|
740 | if any(os.path.isabs(pat) for pat in pats): | |
|
741 | raise error.Abort(_(b'paths cannot be absolute')) | |||
738 |
|
742 | |||
739 |
if |
|
743 | if usereporootpaths: | |
|
744 | return pats | |||
|
745 | ||||
740 | # let's treat paths as relative to cwd |
|
746 | # let's treat paths as relative to cwd | |
741 | root, cwd = repo.root, repo.getcwd() |
|
747 | root, cwd = repo.root, repo.getcwd() | |
742 | abspats = [] |
|
748 | abspats = [] | |
@@ -749,19 +755,20 b' def updateconfig(' | |||||
749 | abspats.append(ap) |
|
755 | abspats.append(ap) | |
750 | else: |
|
756 | else: | |
751 | abspats.append(kindpat) |
|
757 | abspats.append(kindpat) | |
752 |
|
|
758 | return abspats | |
753 |
|
759 | |||
754 | if include: |
|
760 | include = normalize_pats(include) | |
755 | newinclude.update(pats) |
|
761 | exclude = normalize_pats(exclude) | |
756 | elif exclude: |
|
762 | delete = normalize_pats(delete) | |
757 | newexclude.update(pats) |
|
763 | disableprofile = normalize_pats(disableprofile) | |
758 | elif enableprofile: |
|
764 | enableprofile = normalize_pats(enableprofile) | |
759 | newprofiles.update(pats) |
|
765 | ||
760 | elif disableprofile: |
|
766 | newinclude.difference_update(delete) | |
761 |
|
|
767 | newexclude.difference_update(delete) | |
762 | elif delete: |
|
768 | newprofiles.difference_update(disableprofile) | |
763 |
|
|
769 | newinclude.update(include) | |
764 | newexclude.difference_update(pats) |
|
770 | newprofiles.update(enableprofile) | |
|
771 | newexclude.update(exclude) | |||
765 |
|
772 | |||
766 | profilecount = len(newprofiles - oldprofiles) - len( |
|
773 | profilecount = len(newprofiles - oldprofiles) - len( | |
767 | oldprofiles - newprofiles |
|
774 | oldprofiles - newprofiles |
@@ -16,7 +16,6 b' from . import (' | |||||
16 | error, |
|
16 | error, | |
17 | pycompat, |
|
17 | pycompat, | |
18 | util, |
|
18 | util, | |
19 | wireprotoserver, |
|
|||
20 | wireprototypes, |
|
19 | wireprototypes, | |
21 | wireprotov1peer, |
|
20 | wireprotov1peer, | |
22 | wireprotov1server, |
|
21 | wireprotov1server, | |
@@ -288,10 +287,6 b' def _performhandshake(ui, stdin, stdout,' | |||||
288 | # Generate a random token to help identify responses to version 2 |
|
287 | # Generate a random token to help identify responses to version 2 | |
289 | # upgrade request. |
|
288 | # upgrade request. | |
290 | token = pycompat.sysbytes(str(uuid.uuid4())) |
|
289 | token = pycompat.sysbytes(str(uuid.uuid4())) | |
291 | upgradecaps = [ |
|
|||
292 | (b'proto', wireprotoserver.SSHV2), |
|
|||
293 | ] |
|
|||
294 | upgradecaps = util.urlreq.urlencode(upgradecaps) |
|
|||
295 |
|
290 | |||
296 | try: |
|
291 | try: | |
297 | pairsarg = b'%s-%s' % (b'0' * 40, b'0' * 40) |
|
292 | pairsarg = b'%s-%s' % (b'0' * 40, b'0' * 40) | |
@@ -302,11 +297,6 b' def _performhandshake(ui, stdin, stdout,' | |||||
302 | pairsarg, |
|
297 | pairsarg, | |
303 | ] |
|
298 | ] | |
304 |
|
299 | |||
305 | # Request upgrade to version 2 if configured. |
|
|||
306 | if ui.configbool(b'experimental', b'sshpeer.advertise-v2'): |
|
|||
307 | ui.debug(b'sending upgrade request: %s %s\n' % (token, upgradecaps)) |
|
|||
308 | handshake.insert(0, b'upgrade %s %s\n' % (token, upgradecaps)) |
|
|||
309 |
|
||||
310 | if requestlog: |
|
300 | if requestlog: | |
311 | ui.debug(b'devel-peer-request: hello+between\n') |
|
301 | ui.debug(b'devel-peer-request: hello+between\n') | |
312 | ui.debug(b'devel-peer-request: pairs: %d bytes\n' % len(pairsarg)) |
|
302 | ui.debug(b'devel-peer-request: pairs: %d bytes\n' % len(pairsarg)) | |
@@ -365,24 +355,6 b' def _performhandshake(ui, stdin, stdout,' | |||||
365 | if l.startswith(b'capabilities:'): |
|
355 | if l.startswith(b'capabilities:'): | |
366 | caps.update(l[:-1].split(b':')[1].split()) |
|
356 | caps.update(l[:-1].split(b':')[1].split()) | |
367 | break |
|
357 | break | |
368 | elif protoname == wireprotoserver.SSHV2: |
|
|||
369 | # We see a line with number of bytes to follow and then a value |
|
|||
370 | # looking like ``capabilities: *``. |
|
|||
371 | line = stdout.readline() |
|
|||
372 | try: |
|
|||
373 | valuelen = int(line) |
|
|||
374 | except ValueError: |
|
|||
375 | badresponse() |
|
|||
376 |
|
||||
377 | capsline = stdout.read(valuelen) |
|
|||
378 | if not capsline.startswith(b'capabilities: '): |
|
|||
379 | badresponse() |
|
|||
380 |
|
||||
381 | ui.debug(b'remote: %s\n' % capsline) |
|
|||
382 |
|
||||
383 | caps.update(capsline.split(b':')[1].split()) |
|
|||
384 | # Trailing newline. |
|
|||
385 | stdout.read(1) |
|
|||
386 |
|
358 | |||
387 | # Error if we couldn't find capabilities, this means: |
|
359 | # Error if we couldn't find capabilities, this means: | |
388 | # |
|
360 | # | |
@@ -601,14 +573,6 b' class sshv1peer(wireprotov1peer.wirepeer' | |||||
601 | self._readerr() |
|
573 | self._readerr() | |
602 |
|
574 | |||
603 |
|
575 | |||
604 | class sshv2peer(sshv1peer): |
|
|||
605 | """A peer that speakers version 2 of the transport protocol.""" |
|
|||
606 |
|
||||
607 | # Currently version 2 is identical to version 1 post handshake. |
|
|||
608 | # And handshake is performed before the peer is instantiated. So |
|
|||
609 | # we need no custom code. |
|
|||
610 |
|
||||
611 |
|
||||
612 | def makepeer(ui, path, proc, stdin, stdout, stderr, autoreadstderr=True): |
|
576 | def makepeer(ui, path, proc, stdin, stdout, stderr, autoreadstderr=True): | |
613 | """Make a peer instance from existing pipes. |
|
577 | """Make a peer instance from existing pipes. | |
614 |
|
578 | |||
@@ -640,17 +604,6 b' def makepeer(ui, path, proc, stdin, stdo' | |||||
640 | caps, |
|
604 | caps, | |
641 | autoreadstderr=autoreadstderr, |
|
605 | autoreadstderr=autoreadstderr, | |
642 | ) |
|
606 | ) | |
643 | elif protoname == wireprototypes.SSHV2: |
|
|||
644 | return sshv2peer( |
|
|||
645 | ui, |
|
|||
646 | path, |
|
|||
647 | proc, |
|
|||
648 | stdin, |
|
|||
649 | stdout, |
|
|||
650 | stderr, |
|
|||
651 | caps, |
|
|||
652 | autoreadstderr=autoreadstderr, |
|
|||
653 | ) |
|
|||
654 | else: |
|
607 | else: | |
655 | _cleanuppipes(ui, stdout, stdin, stderr, warn=None) |
|
608 | _cleanuppipes(ui, stdout, stdin, stderr, warn=None) | |
656 | raise error.RepoError( |
|
609 | raise error.RepoError( |
@@ -139,12 +139,18 b' def _hostsettings(ui, hostname):' | |||||
139 |
|
139 | |||
140 | alg, fingerprint = fingerprint.split(b':', 1) |
|
140 | alg, fingerprint = fingerprint.split(b':', 1) | |
141 | fingerprint = fingerprint.replace(b':', b'').lower() |
|
141 | fingerprint = fingerprint.replace(b':', b'').lower() | |
|
142 | # pytype: disable=attribute-error | |||
|
143 | # `s` is heterogeneous, but this entry is always a list of tuples | |||
142 | s[b'certfingerprints'].append((alg, fingerprint)) |
|
144 | s[b'certfingerprints'].append((alg, fingerprint)) | |
|
145 | # pytype: enable=attribute-error | |||
143 |
|
146 | |||
144 | # Fingerprints from [hostfingerprints] are always SHA-1. |
|
147 | # Fingerprints from [hostfingerprints] are always SHA-1. | |
145 | for fingerprint in ui.configlist(b'hostfingerprints', bhostname): |
|
148 | for fingerprint in ui.configlist(b'hostfingerprints', bhostname): | |
146 | fingerprint = fingerprint.replace(b':', b'').lower() |
|
149 | fingerprint = fingerprint.replace(b':', b'').lower() | |
|
150 | # pytype: disable=attribute-error | |||
|
151 | # `s` is heterogeneous, but this entry is always a list of tuples | |||
147 | s[b'certfingerprints'].append((b'sha1', fingerprint)) |
|
152 | s[b'certfingerprints'].append((b'sha1', fingerprint)) | |
|
153 | # pytype: enable=attribute-error | |||
148 | s[b'legacyfingerprint'] = True |
|
154 | s[b'legacyfingerprint'] = True | |
149 |
|
155 | |||
150 | # If a host cert fingerprint is defined, it is the only thing that |
|
156 | # If a host cert fingerprint is defined, it is the only thing that |
@@ -22,6 +22,7 b' from . import (' | |||||
22 | namespaces, |
|
22 | namespaces, | |
23 | pathutil, |
|
23 | pathutil, | |
24 | pycompat, |
|
24 | pycompat, | |
|
25 | requirements as requirementsmod, | |||
25 | url, |
|
26 | url, | |
26 | util, |
|
27 | util, | |
27 | vfs as vfsmod, |
|
28 | vfs as vfsmod, | |
@@ -197,6 +198,9 b' class statichttprepository(' | |||||
197 | # we do not care about empty old-style repositories here |
|
198 | # we do not care about empty old-style repositories here | |
198 | msg = _(b"'%s' does not appear to be an hg repository") % path |
|
199 | msg = _(b"'%s' does not appear to be an hg repository") % path | |
199 | raise error.RepoError(msg) |
|
200 | raise error.RepoError(msg) | |
|
201 | if requirementsmod.SHARESAFE_REQUIREMENT in requirements: | |||
|
202 | storevfs = vfsclass(self.vfs.join(b'store')) | |||
|
203 | requirements |= set(storevfs.read(b'requires').splitlines()) | |||
200 |
|
204 | |||
201 | supportedrequirements = localrepo.gathersupportedrequirements(ui) |
|
205 | supportedrequirements = localrepo.gathersupportedrequirements(ui) | |
202 | localrepo.ensurerequirementsrecognized( |
|
206 | localrepo.ensurerequirementsrecognized( |
@@ -494,9 +494,9 b' def display(fp=None, format=3, data=None' | |||||
494 | data = state |
|
494 | data = state | |
495 |
|
495 | |||
496 | if fp is None: |
|
496 | if fp is None: | |
497 | import sys |
|
497 | from .utils import procutil | |
498 |
|
498 | |||
499 |
fp = |
|
499 | fp = procutil.stdout | |
500 | if len(data.samples) == 0: |
|
500 | if len(data.samples) == 0: | |
501 | fp.write(b'No samples recorded.\n') |
|
501 | fp.write(b'No samples recorded.\n') | |
502 | return |
|
502 | return | |
@@ -516,7 +516,7 b' def display(fp=None, format=3, data=None' | |||||
516 | elif format == DisplayFormats.Chrome: |
|
516 | elif format == DisplayFormats.Chrome: | |
517 | write_to_chrome(data, fp, **kwargs) |
|
517 | write_to_chrome(data, fp, **kwargs) | |
518 | else: |
|
518 | else: | |
519 |
raise Exception( |
|
519 | raise Exception("Invalid display format") | |
520 |
|
520 | |||
521 | if format not in (DisplayFormats.Json, DisplayFormats.Chrome): |
|
521 | if format not in (DisplayFormats.Json, DisplayFormats.Chrome): | |
522 | fp.write(b'---\n') |
|
522 | fp.write(b'---\n') | |
@@ -625,7 +625,7 b' def display_by_method(data, fp):' | |||||
625 |
|
625 | |||
626 | def display_about_method(data, fp, function=None, **kwargs): |
|
626 | def display_about_method(data, fp, function=None, **kwargs): | |
627 | if function is None: |
|
627 | if function is None: | |
628 |
raise Exception( |
|
628 | raise Exception("Invalid function") | |
629 |
|
629 | |||
630 | filename = None |
|
630 | filename = None | |
631 | if b':' in function: |
|
631 | if b':' in function: | |
@@ -1080,7 +1080,7 b' def main(argv=None):' | |||||
1080 | printusage() |
|
1080 | printusage() | |
1081 | return 0 |
|
1081 | return 0 | |
1082 | else: |
|
1082 | else: | |
1083 |
assert False, |
|
1083 | assert False, "unhandled option %s" % o | |
1084 |
|
1084 | |||
1085 | if not path: |
|
1085 | if not path: | |
1086 | print('must specify --file to load') |
|
1086 | print('must specify --file to load') |
@@ -27,11 +27,38 b' from . import (' | |||||
27 | store, |
|
27 | store, | |
28 | util, |
|
28 | util, | |
29 | ) |
|
29 | ) | |
|
30 | from .revlogutils import ( | |||
|
31 | nodemap, | |||
|
32 | ) | |||
30 | from .utils import ( |
|
33 | from .utils import ( | |
31 | stringutil, |
|
34 | stringutil, | |
32 | ) |
|
35 | ) | |
33 |
|
36 | |||
34 |
|
37 | |||
|
38 | def new_stream_clone_requirements(default_requirements, streamed_requirements): | |||
|
39 | """determine the final set of requirement for a new stream clone | |||
|
40 | ||||
|
41 | this method combine the "default" requirements that a new repository would | |||
|
42 | use with the constaint we get from the stream clone content. We keep local | |||
|
43 | configuration choice when possible. | |||
|
44 | """ | |||
|
45 | requirements = set(default_requirements) | |||
|
46 | requirements -= requirementsmod.STREAM_FIXED_REQUIREMENTS | |||
|
47 | requirements.update(streamed_requirements) | |||
|
48 | return requirements | |||
|
49 | ||||
|
50 | ||||
|
51 | def streamed_requirements(repo): | |||
|
52 | """the set of requirement the new clone will have to support | |||
|
53 | ||||
|
54 | This is used for advertising the stream options and to generate the actual | |||
|
55 | stream content.""" | |||
|
56 | requiredformats = ( | |||
|
57 | repo.requirements & requirementsmod.STREAM_FIXED_REQUIREMENTS | |||
|
58 | ) | |||
|
59 | return requiredformats | |||
|
60 | ||||
|
61 | ||||
35 | def canperformstreamclone(pullop, bundle2=False): |
|
62 | def canperformstreamclone(pullop, bundle2=False): | |
36 | """Whether it is possible to perform a streaming clone as part of pull. |
|
63 | """Whether it is possible to perform a streaming clone as part of pull. | |
37 |
|
64 | |||
@@ -184,17 +211,15 b' def maybeperformlegacystreamclone(pullop' | |||||
184 |
|
211 | |||
185 | with repo.lock(): |
|
212 | with repo.lock(): | |
186 | consumev1(repo, fp, filecount, bytecount) |
|
213 | consumev1(repo, fp, filecount, bytecount) | |
187 |
|
214 | repo.requirements = new_stream_clone_requirements( | ||
188 | # new requirements = old non-format requirements + |
|
215 | repo.requirements, | |
189 | # new format-related remote requirements |
|
216 | requirements, | |
190 | # requirements from the streamed-in repository |
|
|||
191 | repo.requirements = requirements | ( |
|
|||
192 | repo.requirements - repo.supportedformats |
|
|||
193 | ) |
|
217 | ) | |
194 | repo.svfs.options = localrepo.resolvestorevfsoptions( |
|
218 | repo.svfs.options = localrepo.resolvestorevfsoptions( | |
195 | repo.ui, repo.requirements, repo.features |
|
219 | repo.ui, repo.requirements, repo.features | |
196 | ) |
|
220 | ) | |
197 | scmutil.writereporequirements(repo) |
|
221 | scmutil.writereporequirements(repo) | |
|
222 | nodemap.post_stream_cleanup(repo) | |||
198 |
|
223 | |||
199 | if rbranchmap: |
|
224 | if rbranchmap: | |
200 | repo._branchcaches.replace(repo, rbranchmap) |
|
225 | repo._branchcaches.replace(repo, rbranchmap) | |
@@ -333,7 +358,7 b' def generatebundlev1(repo, compression=b' | |||||
333 | if compression != b'UN': |
|
358 | if compression != b'UN': | |
334 | raise ValueError(b'we do not support the compression argument yet') |
|
359 | raise ValueError(b'we do not support the compression argument yet') | |
335 |
|
360 | |||
336 |
requirements = repo |
|
361 | requirements = streamed_requirements(repo) | |
337 | requires = b','.join(sorted(requirements)) |
|
362 | requires = b','.join(sorted(requirements)) | |
338 |
|
363 | |||
339 | def gen(): |
|
364 | def gen(): | |
@@ -489,6 +514,7 b' def applybundlev1(repo, fp):' | |||||
489 | ) |
|
514 | ) | |
490 |
|
515 | |||
491 | consumev1(repo, fp, filecount, bytecount) |
|
516 | consumev1(repo, fp, filecount, bytecount) | |
|
517 | nodemap.post_stream_cleanup(repo) | |||
492 |
|
518 | |||
493 |
|
519 | |||
494 | class streamcloneapplier(object): |
|
520 | class streamcloneapplier(object): | |
@@ -797,16 +823,15 b' def applybundlev2(repo, fp, filecount, f' | |||||
797 |
|
823 | |||
798 | consumev2(repo, fp, filecount, filesize) |
|
824 | consumev2(repo, fp, filecount, filesize) | |
799 |
|
825 | |||
800 |
|
|
826 | repo.requirements = new_stream_clone_requirements( | |
801 | # new format-related remote requirements |
|
827 | repo.requirements, | |
802 | # requirements from the streamed-in repository |
|
828 | requirements, | |
803 | repo.requirements = set(requirements) | ( |
|
|||
804 | repo.requirements - repo.supportedformats |
|
|||
805 | ) |
|
829 | ) | |
806 | repo.svfs.options = localrepo.resolvestorevfsoptions( |
|
830 | repo.svfs.options = localrepo.resolvestorevfsoptions( | |
807 | repo.ui, repo.requirements, repo.features |
|
831 | repo.ui, repo.requirements, repo.features | |
808 | ) |
|
832 | ) | |
809 | scmutil.writereporequirements(repo) |
|
833 | scmutil.writereporequirements(repo) | |
|
834 | nodemap.post_stream_cleanup(repo) | |||
810 |
|
835 | |||
811 |
|
836 | |||
812 | def _copy_files(src_vfs_map, dst_vfs_map, entries, progress): |
|
837 | def _copy_files(src_vfs_map, dst_vfs_map, entries, progress): |
@@ -304,6 +304,21 b' def showextras(context, mapping):' | |||||
304 | ) |
|
304 | ) | |
305 |
|
305 | |||
306 |
|
306 | |||
|
307 | @templatekeyword(b'_fast_rank', requires={b'ctx'}) | |||
|
308 | def fast_rank(context, mapping): | |||
|
309 | """the rank of a changeset if cached | |||
|
310 | ||||
|
311 | The rank of a revision is the size of the sub-graph it defines as a head. | |||
|
312 | Equivalently, the rank of a revision `r` is the size of the set | |||
|
313 | `ancestors(r)`, `r` included. | |||
|
314 | """ | |||
|
315 | ctx = context.resource(mapping, b'ctx') | |||
|
316 | rank = ctx.fast_rank() | |||
|
317 | if rank is None: | |||
|
318 | return None | |||
|
319 | return b"%d" % rank | |||
|
320 | ||||
|
321 | ||||
307 | def _getfilestatus(context, mapping, listall=False): |
|
322 | def _getfilestatus(context, mapping, listall=False): | |
308 | ctx = context.resource(mapping, b'ctx') |
|
323 | ctx = context.resource(mapping, b'ctx') | |
309 | revcache = context.resource(mapping, b'revcache') |
|
324 | revcache = context.resource(mapping, b'revcache') |
@@ -25,11 +25,6 b' from .utils import stringutil' | |||||
25 |
|
25 | |||
26 | version = 2 |
|
26 | version = 2 | |
27 |
|
27 | |||
28 | # These are the file generators that should only be executed after the |
|
|||
29 | # finalizers are done, since they rely on the output of the finalizers (like |
|
|||
30 | # the changelog having been written). |
|
|||
31 | postfinalizegenerators = {b'bookmarks', b'dirstate'} |
|
|||
32 |
|
||||
33 | GEN_GROUP_ALL = b'all' |
|
28 | GEN_GROUP_ALL = b'all' | |
34 | GEN_GROUP_PRE_FINALIZE = b'prefinalize' |
|
29 | GEN_GROUP_PRE_FINALIZE = b'prefinalize' | |
35 | GEN_GROUP_POST_FINALIZE = b'postfinalize' |
|
30 | GEN_GROUP_POST_FINALIZE = b'postfinalize' | |
@@ -334,7 +329,13 b' class transaction(util.transactional):' | |||||
334 |
|
329 | |||
335 | @active |
|
330 | @active | |
336 | def addfilegenerator( |
|
331 | def addfilegenerator( | |
337 | self, genid, filenames, genfunc, order=0, location=b'' |
|
332 | self, | |
|
333 | genid, | |||
|
334 | filenames, | |||
|
335 | genfunc, | |||
|
336 | order=0, | |||
|
337 | location=b'', | |||
|
338 | post_finalize=False, | |||
338 | ): |
|
339 | ): | |
339 | """add a function to generates some files at transaction commit |
|
340 | """add a function to generates some files at transaction commit | |
340 |
|
341 | |||
@@ -357,10 +358,14 b' class transaction(util.transactional):' | |||||
357 | The `location` arguments may be used to indicate the files are located |
|
358 | The `location` arguments may be used to indicate the files are located | |
358 | outside of the the standard directory for transaction. It should match |
|
359 | outside of the the standard directory for transaction. It should match | |
359 | one of the key of the `transaction.vfsmap` dictionary. |
|
360 | one of the key of the `transaction.vfsmap` dictionary. | |
|
361 | ||||
|
362 | The `post_finalize` argument can be set to `True` for file generation | |||
|
363 | that must be run after the transaction has been finalized. | |||
360 | """ |
|
364 | """ | |
361 | # For now, we are unable to do proper backup and restore of custom vfs |
|
365 | # For now, we are unable to do proper backup and restore of custom vfs | |
362 | # but for bookmarks that are handled outside this mechanism. |
|
366 | # but for bookmarks that are handled outside this mechanism. | |
363 |
|
|
367 | entry = (order, filenames, genfunc, location, post_finalize) | |
|
368 | self._filegenerators[genid] = entry | |||
364 |
|
369 | |||
365 | @active |
|
370 | @active | |
366 | def removefilegenerator(self, genid): |
|
371 | def removefilegenerator(self, genid): | |
@@ -380,13 +385,12 b' class transaction(util.transactional):' | |||||
380 |
|
385 | |||
381 | for id, entry in sorted(pycompat.iteritems(self._filegenerators)): |
|
386 | for id, entry in sorted(pycompat.iteritems(self._filegenerators)): | |
382 | any = True |
|
387 | any = True | |
383 | order, filenames, genfunc, location = entry |
|
388 | order, filenames, genfunc, location, post_finalize = entry | |
384 |
|
389 | |||
385 | # for generation at closing, check if it's before or after finalize |
|
390 | # for generation at closing, check if it's before or after finalize | |
386 |
i |
|
391 | if skip_post and post_finalize: | |
387 | if skip_post and is_post: |
|
|||
388 | continue |
|
392 | continue | |
389 |
elif skip_pre and not |
|
393 | elif skip_pre and not post_finalize: | |
390 | continue |
|
394 | continue | |
391 |
|
395 | |||
392 | vfs = self._vfsmap[location] |
|
396 | vfs = self._vfsmap[location] |
@@ -71,6 +71,7 b' class unionrevlog(revlog.revlog):' | |||||
71 | _sds, |
|
71 | _sds, | |
72 | _dcm, |
|
72 | _dcm, | |
73 | _sdcm, |
|
73 | _sdcm, | |
|
74 | rank, | |||
74 | ) = rev |
|
75 | ) = rev | |
75 | flags = _start & 0xFFFF |
|
76 | flags = _start & 0xFFFF | |
76 |
|
77 | |||
@@ -107,6 +108,7 b' class unionrevlog(revlog.revlog):' | |||||
107 | 0, # sidedata size |
|
108 | 0, # sidedata size | |
108 | revlog_constants.COMP_MODE_INLINE, |
|
109 | revlog_constants.COMP_MODE_INLINE, | |
109 | revlog_constants.COMP_MODE_INLINE, |
|
110 | revlog_constants.COMP_MODE_INLINE, | |
|
111 | rank, | |||
110 | ) |
|
112 | ) | |
111 | self.index.append(e) |
|
113 | self.index.append(e) | |
112 | self.bundlerevs.add(n) |
|
114 | self.bundlerevs.add(n) |
@@ -42,27 +42,16 b' def upgraderepo(' | |||||
42 | ): |
|
42 | ): | |
43 | """Upgrade a repository in place.""" |
|
43 | """Upgrade a repository in place.""" | |
44 | if optimize is None: |
|
44 | if optimize is None: | |
45 |
optimize = |
|
45 | optimize = set() | |
46 | repo = repo.unfiltered() |
|
46 | repo = repo.unfiltered() | |
47 |
|
47 | |||
48 | revlogs = set(upgrade_engine.UPGRADE_ALL_REVLOGS) |
|
48 | specified_revlogs = {} | |
49 | specentries = ( |
|
49 | if changelog is not None: | |
50 |
|
|
50 | specified_revlogs[upgrade_engine.UPGRADE_CHANGELOG] = changelog | |
51 | (upgrade_engine.UPGRADE_MANIFEST, manifest), |
|
51 | if manifest is not None: | |
52 |
|
|
52 | specified_revlogs[upgrade_engine.UPGRADE_MANIFEST] = manifest | |
53 | ) |
|
53 | if filelogs is not None: | |
54 | specified = [(y, x) for (y, x) in specentries if x is not None] |
|
54 | specified_revlogs[upgrade_engine.UPGRADE_FILELOGS] = filelogs | |
55 | if specified: |
|
|||
56 | # we have some limitation on revlogs to be recloned |
|
|||
57 | if any(x for y, x in specified): |
|
|||
58 | revlogs = set() |
|
|||
59 | for upgrade, enabled in specified: |
|
|||
60 | if enabled: |
|
|||
61 | revlogs.add(upgrade) |
|
|||
62 | else: |
|
|||
63 | # none are enabled |
|
|||
64 | for upgrade, __ in specified: |
|
|||
65 | revlogs.discard(upgrade) |
|
|||
66 |
|
55 | |||
67 | # Ensure the repository can be upgraded. |
|
56 | # Ensure the repository can be upgraded. | |
68 | upgrade_actions.check_source_requirements(repo) |
|
57 | upgrade_actions.check_source_requirements(repo) | |
@@ -96,20 +85,92 b' def upgraderepo(' | |||||
96 | ) |
|
85 | ) | |
97 | removed_actions = upgrade_actions.find_format_downgrades(repo) |
|
86 | removed_actions = upgrade_actions.find_format_downgrades(repo) | |
98 |
|
87 | |||
99 | removedreqs = repo.requirements - newreqs |
|
88 | # check if we need to touch revlog and if so, which ones | |
100 | addedreqs = newreqs - repo.requirements |
|
89 | ||
|
90 | touched_revlogs = set() | |||
|
91 | overwrite_msg = _(b'warning: ignoring %14s, as upgrade is changing: %s\n') | |||
|
92 | select_msg = _(b'note: selecting %s for processing to change: %s\n') | |||
|
93 | msg_issued = 0 | |||
|
94 | ||||
|
95 | FL = upgrade_engine.UPGRADE_FILELOGS | |||
|
96 | MN = upgrade_engine.UPGRADE_MANIFEST | |||
|
97 | CL = upgrade_engine.UPGRADE_CHANGELOG | |||
|
98 | ||||
|
99 | if optimizations: | |||
|
100 | if any(specified_revlogs.values()): | |||
|
101 | # we have some limitation on revlogs to be recloned | |||
|
102 | for rl, enabled in specified_revlogs.items(): | |||
|
103 | if enabled: | |||
|
104 | touched_revlogs.add(rl) | |||
|
105 | else: | |||
|
106 | touched_revlogs = set(upgrade_engine.UPGRADE_ALL_REVLOGS) | |||
|
107 | for rl, enabled in specified_revlogs.items(): | |||
|
108 | if not enabled: | |||
|
109 | touched_revlogs.discard(rl) | |||
|
110 | ||||
|
111 | if repo.shared(): | |||
|
112 | unsafe_actions = set() | |||
|
113 | unsafe_actions.update(up_actions) | |||
|
114 | unsafe_actions.update(removed_actions) | |||
|
115 | unsafe_actions.update(optimizations) | |||
|
116 | unsafe_actions = [ | |||
|
117 | a for a in unsafe_actions if not a.compatible_with_share | |||
|
118 | ] | |||
|
119 | unsafe_actions.sort(key=lambda a: a.name) | |||
|
120 | if unsafe_actions: | |||
|
121 | m = _(b'cannot use these actions on a share repository: %s') | |||
|
122 | h = _(b'upgrade the main repository directly') | |||
|
123 | actions = b', '.join(a.name for a in unsafe_actions) | |||
|
124 | m %= actions | |||
|
125 | raise error.Abort(m, hint=h) | |||
101 |
|
126 | |||
102 | if revlogs != upgrade_engine.UPGRADE_ALL_REVLOGS: |
|
127 | for action in sorted(up_actions + removed_actions, key=lambda a: a.name): | |
103 | incompatible = upgrade_actions.RECLONES_REQUIREMENTS & ( |
|
128 | # optimisation does not "requires anything, they just needs it. | |
104 | removedreqs | addedreqs |
|
129 | if action.type != upgrade_actions.FORMAT_VARIANT: | |
105 | ) |
|
130 | continue | |
106 | if incompatible: |
|
131 | ||
107 | msg = _( |
|
132 | if action.touches_filelogs and FL not in touched_revlogs: | |
108 | b'ignoring revlogs selection flags, format requirements ' |
|
133 | if FL in specified_revlogs: | |
109 | b'change: %s\n' |
|
134 | if not specified_revlogs[FL]: | |
110 | ) |
|
135 | msg = overwrite_msg % (b'--no-filelogs', action.name) | |
111 | ui.warn(msg % b', '.join(sorted(incompatible))) |
|
136 | ui.warn(msg) | |
112 | revlogs = upgrade_engine.UPGRADE_ALL_REVLOGS |
|
137 | msg_issued = 2 | |
|
138 | else: | |||
|
139 | msg = select_msg % (b'all-filelogs', action.name) | |||
|
140 | ui.status(msg) | |||
|
141 | if not ui.quiet: | |||
|
142 | msg_issued = 1 | |||
|
143 | touched_revlogs.add(FL) | |||
|
144 | ||||
|
145 | if action.touches_manifests and MN not in touched_revlogs: | |||
|
146 | if MN in specified_revlogs: | |||
|
147 | if not specified_revlogs[MN]: | |||
|
148 | msg = overwrite_msg % (b'--no-manifest', action.name) | |||
|
149 | ui.warn(msg) | |||
|
150 | msg_issued = 2 | |||
|
151 | else: | |||
|
152 | msg = select_msg % (b'all-manifestlogs', action.name) | |||
|
153 | ui.status(msg) | |||
|
154 | if not ui.quiet: | |||
|
155 | msg_issued = 1 | |||
|
156 | touched_revlogs.add(MN) | |||
|
157 | ||||
|
158 | if action.touches_changelog and CL not in touched_revlogs: | |||
|
159 | if CL in specified_revlogs: | |||
|
160 | if not specified_revlogs[CL]: | |||
|
161 | msg = overwrite_msg % (b'--no-changelog', action.name) | |||
|
162 | ui.warn(msg) | |||
|
163 | msg_issued = True | |||
|
164 | else: | |||
|
165 | msg = select_msg % (b'changelog', action.name) | |||
|
166 | ui.status(msg) | |||
|
167 | if not ui.quiet: | |||
|
168 | msg_issued = 1 | |||
|
169 | touched_revlogs.add(CL) | |||
|
170 | if msg_issued >= 2: | |||
|
171 | ui.warn((b"\n")) | |||
|
172 | elif msg_issued >= 1: | |||
|
173 | ui.status((b"\n")) | |||
113 |
|
174 | |||
114 | upgrade_op = upgrade_actions.UpgradeOperation( |
|
175 | upgrade_op = upgrade_actions.UpgradeOperation( | |
115 | ui, |
|
176 | ui, | |
@@ -117,7 +178,7 b' def upgraderepo(' | |||||
117 | repo.requirements, |
|
178 | repo.requirements, | |
118 | up_actions, |
|
179 | up_actions, | |
119 | removed_actions, |
|
180 | removed_actions, | |
120 | revlogs, |
|
181 | touched_revlogs, | |
121 | backup, |
|
182 | backup, | |
122 | ) |
|
183 | ) | |
123 |
|
184 |
@@ -36,7 +36,10 b' RECLONES_REQUIREMENTS = {' | |||||
36 |
|
36 | |||
37 |
|
37 | |||
38 | def preservedrequirements(repo): |
|
38 | def preservedrequirements(repo): | |
39 | return set() |
|
39 | preserved = { | |
|
40 | requirements.SHARED_REQUIREMENT, | |||
|
41 | } | |||
|
42 | return preserved & repo.requirements | |||
40 |
|
43 | |||
41 |
|
44 | |||
42 | FORMAT_VARIANT = b'deficiency' |
|
45 | FORMAT_VARIANT = b'deficiency' | |
@@ -97,6 +100,9 b' class improvement(object):' | |||||
97 | # Whether this improvement touches the dirstate |
|
100 | # Whether this improvement touches the dirstate | |
98 | touches_dirstate = False |
|
101 | touches_dirstate = False | |
99 |
|
102 | |||
|
103 | # Can this action be run on a share instead of its mains repository | |||
|
104 | compatible_with_share = False | |||
|
105 | ||||
100 |
|
106 | |||
101 | allformatvariant = [] # type: List[Type['formatvariant']] |
|
107 | allformatvariant = [] # type: List[Type['formatvariant']] | |
102 |
|
108 | |||
@@ -190,6 +196,30 b' class dirstatev2(requirementformatvarian' | |||||
190 | touches_changelog = False |
|
196 | touches_changelog = False | |
191 | touches_requirements = True |
|
197 | touches_requirements = True | |
192 | touches_dirstate = True |
|
198 | touches_dirstate = True | |
|
199 | compatible_with_share = True | |||
|
200 | ||||
|
201 | ||||
|
202 | @registerformatvariant | |||
|
203 | class dirstatetrackedkey(requirementformatvariant): | |||
|
204 | name = b'tracked-hint' | |||
|
205 | _requirement = requirements.DIRSTATE_TRACKED_HINT_V1 | |||
|
206 | ||||
|
207 | default = False | |||
|
208 | ||||
|
209 | description = _( | |||
|
210 | b'Add a small file to help external tooling that watch the tracked set' | |||
|
211 | ) | |||
|
212 | ||||
|
213 | upgrademessage = _( | |||
|
214 | b'external tools will be informated of potential change in the tracked set' | |||
|
215 | ) | |||
|
216 | ||||
|
217 | touches_filelogs = False | |||
|
218 | touches_manifests = False | |||
|
219 | touches_changelog = False | |||
|
220 | touches_requirements = True | |||
|
221 | touches_dirstate = True | |||
|
222 | compatible_with_share = True | |||
193 |
|
223 | |||
194 |
|
224 | |||
195 | @registerformatvariant |
|
225 | @registerformatvariant | |
@@ -243,7 +273,7 b' class sharesafe(requirementformatvariant' | |||||
243 | name = b'share-safe' |
|
273 | name = b'share-safe' | |
244 | _requirement = requirements.SHARESAFE_REQUIREMENT |
|
274 | _requirement = requirements.SHARESAFE_REQUIREMENT | |
245 |
|
275 | |||
246 |
default = |
|
276 | default = True | |
247 |
|
277 | |||
248 | description = _( |
|
278 | description = _( | |
249 | b'old shared repositories do not share source repository ' |
|
279 | b'old shared repositories do not share source repository ' | |
@@ -899,8 +929,6 b' def blocksourcerequirements(repo):' | |||||
899 | # This was a precursor to generaldelta and was never enabled by default. |
|
929 | # This was a precursor to generaldelta and was never enabled by default. | |
900 | # It should (hopefully) not exist in the wild. |
|
930 | # It should (hopefully) not exist in the wild. | |
901 | b'parentdelta', |
|
931 | b'parentdelta', | |
902 | # Upgrade should operate on the actual store, not the shared link. |
|
|||
903 | requirements.SHARED_REQUIREMENT, |
|
|||
904 | } |
|
932 | } | |
905 |
|
933 | |||
906 |
|
934 | |||
@@ -932,6 +960,16 b' def check_source_requirements(repo):' | |||||
932 | m = _(b'cannot upgrade repository; unsupported source requirement: %s') |
|
960 | m = _(b'cannot upgrade repository; unsupported source requirement: %s') | |
933 | blockingreqs = b', '.join(sorted(blockingreqs)) |
|
961 | blockingreqs = b', '.join(sorted(blockingreqs)) | |
934 | raise error.Abort(m % blockingreqs) |
|
962 | raise error.Abort(m % blockingreqs) | |
|
963 | # Upgrade should operate on the actual store, not the shared link. | |||
|
964 | ||||
|
965 | bad_share = ( | |||
|
966 | requirements.SHARED_REQUIREMENT in repo.requirements | |||
|
967 | and requirements.SHARESAFE_REQUIREMENT not in repo.requirements | |||
|
968 | ) | |||
|
969 | if bad_share: | |||
|
970 | m = _(b'cannot upgrade repository; share repository without share-safe') | |||
|
971 | h = _(b'check :hg:`help config.format.use-share-safe`') | |||
|
972 | raise error.Abort(m, hint=h) | |||
935 |
|
973 | |||
936 |
|
974 | |||
937 | ### Verify the validity of the planned requirement changes #################### |
|
975 | ### Verify the validity of the planned requirement changes #################### | |
@@ -952,6 +990,7 b' def supportremovedrequirements(repo):' | |||||
952 | requirements.REVLOGV2_REQUIREMENT, |
|
990 | requirements.REVLOGV2_REQUIREMENT, | |
953 | requirements.CHANGELOGV2_REQUIREMENT, |
|
991 | requirements.CHANGELOGV2_REQUIREMENT, | |
954 | requirements.REVLOGV1_REQUIREMENT, |
|
992 | requirements.REVLOGV1_REQUIREMENT, | |
|
993 | requirements.DIRSTATE_TRACKED_HINT_V1, | |||
955 | requirements.DIRSTATE_V2_REQUIREMENT, |
|
994 | requirements.DIRSTATE_V2_REQUIREMENT, | |
956 | } |
|
995 | } | |
957 | for name in compression.compengines: |
|
996 | for name in compression.compengines: | |
@@ -972,18 +1011,20 b' def supporteddestrequirements(repo):' | |||||
972 | Extensions should monkeypatch this to add their custom requirements. |
|
1011 | Extensions should monkeypatch this to add their custom requirements. | |
973 | """ |
|
1012 | """ | |
974 | supported = { |
|
1013 | supported = { | |
|
1014 | requirements.CHANGELOGV2_REQUIREMENT, | |||
|
1015 | requirements.COPIESSDC_REQUIREMENT, | |||
|
1016 | requirements.DIRSTATE_TRACKED_HINT_V1, | |||
|
1017 | requirements.DIRSTATE_V2_REQUIREMENT, | |||
975 | requirements.DOTENCODE_REQUIREMENT, |
|
1018 | requirements.DOTENCODE_REQUIREMENT, | |
976 | requirements.FNCACHE_REQUIREMENT, |
|
1019 | requirements.FNCACHE_REQUIREMENT, | |
977 | requirements.GENERALDELTA_REQUIREMENT, |
|
1020 | requirements.GENERALDELTA_REQUIREMENT, | |
|
1021 | requirements.NODEMAP_REQUIREMENT, | |||
978 | requirements.REVLOGV1_REQUIREMENT, # allowed in case of downgrade |
|
1022 | requirements.REVLOGV1_REQUIREMENT, # allowed in case of downgrade | |
979 |
requirements. |
|
1023 | requirements.REVLOGV2_REQUIREMENT, | |
|
1024 | requirements.SHARED_REQUIREMENT, | |||
|
1025 | requirements.SHARESAFE_REQUIREMENT, | |||
980 | requirements.SPARSEREVLOG_REQUIREMENT, |
|
1026 | requirements.SPARSEREVLOG_REQUIREMENT, | |
981 |
requirements. |
|
1027 | requirements.STORE_REQUIREMENT, | |
982 | requirements.NODEMAP_REQUIREMENT, |
|
|||
983 | requirements.SHARESAFE_REQUIREMENT, |
|
|||
984 | requirements.REVLOGV2_REQUIREMENT, |
|
|||
985 | requirements.CHANGELOGV2_REQUIREMENT, |
|
|||
986 | requirements.DIRSTATE_V2_REQUIREMENT, |
|
|||
987 | } |
|
1028 | } | |
988 | for name in compression.compengines: |
|
1029 | for name in compression.compengines: | |
989 | engine = compression.compengines[name] |
|
1030 | engine = compression.compengines[name] | |
@@ -1015,6 +1056,7 b' def allowednewrequirements(repo):' | |||||
1015 | requirements.REVLOGV1_REQUIREMENT, |
|
1056 | requirements.REVLOGV1_REQUIREMENT, | |
1016 | requirements.REVLOGV2_REQUIREMENT, |
|
1057 | requirements.REVLOGV2_REQUIREMENT, | |
1017 | requirements.CHANGELOGV2_REQUIREMENT, |
|
1058 | requirements.CHANGELOGV2_REQUIREMENT, | |
|
1059 | requirements.DIRSTATE_TRACKED_HINT_V1, | |||
1018 | requirements.DIRSTATE_V2_REQUIREMENT, |
|
1060 | requirements.DIRSTATE_V2_REQUIREMENT, | |
1019 | } |
|
1061 | } | |
1020 | for name in compression.compengines: |
|
1062 | for name in compression.compengines: |
@@ -486,6 +486,15 b' def upgrade(ui, srcrepo, dstrepo, upgrad' | |||||
486 | upgrade_dirstate(ui, srcrepo, upgrade_op, b'v2', b'v1') |
|
486 | upgrade_dirstate(ui, srcrepo, upgrade_op, b'v2', b'v1') | |
487 | upgrade_op.removed_actions.remove(upgrade_actions.dirstatev2) |
|
487 | upgrade_op.removed_actions.remove(upgrade_actions.dirstatev2) | |
488 |
|
488 | |||
|
489 | if upgrade_actions.dirstatetrackedkey in upgrade_op.upgrade_actions: | |||
|
490 | ui.status(_(b'create dirstate-tracked-hint file\n')) | |||
|
491 | upgrade_tracked_hint(ui, srcrepo, upgrade_op, add=True) | |||
|
492 | upgrade_op.upgrade_actions.remove(upgrade_actions.dirstatetrackedkey) | |||
|
493 | elif upgrade_actions.dirstatetrackedkey in upgrade_op.removed_actions: | |||
|
494 | ui.status(_(b'remove dirstate-tracked-hint file\n')) | |||
|
495 | upgrade_tracked_hint(ui, srcrepo, upgrade_op, add=False) | |||
|
496 | upgrade_op.removed_actions.remove(upgrade_actions.dirstatetrackedkey) | |||
|
497 | ||||
489 | if not (upgrade_op.upgrade_actions or upgrade_op.removed_actions): |
|
498 | if not (upgrade_op.upgrade_actions or upgrade_op.removed_actions): | |
490 | return |
|
499 | return | |
491 |
|
500 | |||
@@ -660,3 +669,15 b' def upgrade_dirstate(ui, srcrepo, upgrad' | |||||
660 | srcrepo.dirstate.write(None) |
|
669 | srcrepo.dirstate.write(None) | |
661 |
|
670 | |||
662 | scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) |
|
671 | scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) | |
|
672 | ||||
|
673 | ||||
|
674 | def upgrade_tracked_hint(ui, srcrepo, upgrade_op, add): | |||
|
675 | if add: | |||
|
676 | srcrepo.dirstate._use_tracked_hint = True | |||
|
677 | srcrepo.dirstate._dirty = True | |||
|
678 | srcrepo.dirstate._dirty_tracked_set = True | |||
|
679 | srcrepo.dirstate.write(None) | |||
|
680 | if not add: | |||
|
681 | srcrepo.dirstate.delete_tracked_hint() | |||
|
682 | ||||
|
683 | scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements) |
@@ -57,7 +57,6 b' from .utils import (' | |||||
57 | hashutil, |
|
57 | hashutil, | |
58 | procutil, |
|
58 | procutil, | |
59 | stringutil, |
|
59 | stringutil, | |
60 | urlutil, |
|
|||
61 | ) |
|
60 | ) | |
62 |
|
61 | |||
63 | if pycompat.TYPE_CHECKING: |
|
62 | if pycompat.TYPE_CHECKING: | |
@@ -2991,54 +2990,6 b' def interpolate(prefix, mapping, s, fn=N' | |||||
2991 | return r.sub(lambda x: fn(mapping[x.group()[1:]]), s) |
|
2990 | return r.sub(lambda x: fn(mapping[x.group()[1:]]), s) | |
2992 |
|
2991 | |||
2993 |
|
2992 | |||
2994 | def getport(*args, **kwargs): |
|
|||
2995 | msg = b'getport(...) moved to mercurial.utils.urlutil' |
|
|||
2996 | nouideprecwarn(msg, b'6.0', stacklevel=2) |
|
|||
2997 | return urlutil.getport(*args, **kwargs) |
|
|||
2998 |
|
||||
2999 |
|
||||
3000 | def url(*args, **kwargs): |
|
|||
3001 | msg = b'url(...) moved to mercurial.utils.urlutil' |
|
|||
3002 | nouideprecwarn(msg, b'6.0', stacklevel=2) |
|
|||
3003 | return urlutil.url(*args, **kwargs) |
|
|||
3004 |
|
||||
3005 |
|
||||
3006 | def hasscheme(*args, **kwargs): |
|
|||
3007 | msg = b'hasscheme(...) moved to mercurial.utils.urlutil' |
|
|||
3008 | nouideprecwarn(msg, b'6.0', stacklevel=2) |
|
|||
3009 | return urlutil.hasscheme(*args, **kwargs) |
|
|||
3010 |
|
||||
3011 |
|
||||
3012 | def hasdriveletter(*args, **kwargs): |
|
|||
3013 | msg = b'hasdriveletter(...) moved to mercurial.utils.urlutil' |
|
|||
3014 | nouideprecwarn(msg, b'6.0', stacklevel=2) |
|
|||
3015 | return urlutil.hasdriveletter(*args, **kwargs) |
|
|||
3016 |
|
||||
3017 |
|
||||
3018 | def urllocalpath(*args, **kwargs): |
|
|||
3019 | msg = b'urllocalpath(...) moved to mercurial.utils.urlutil' |
|
|||
3020 | nouideprecwarn(msg, b'6.0', stacklevel=2) |
|
|||
3021 | return urlutil.urllocalpath(*args, **kwargs) |
|
|||
3022 |
|
||||
3023 |
|
||||
3024 | def checksafessh(*args, **kwargs): |
|
|||
3025 | msg = b'checksafessh(...) moved to mercurial.utils.urlutil' |
|
|||
3026 | nouideprecwarn(msg, b'6.0', stacklevel=2) |
|
|||
3027 | return urlutil.checksafessh(*args, **kwargs) |
|
|||
3028 |
|
||||
3029 |
|
||||
3030 | def hidepassword(*args, **kwargs): |
|
|||
3031 | msg = b'hidepassword(...) moved to mercurial.utils.urlutil' |
|
|||
3032 | nouideprecwarn(msg, b'6.0', stacklevel=2) |
|
|||
3033 | return urlutil.hidepassword(*args, **kwargs) |
|
|||
3034 |
|
||||
3035 |
|
||||
3036 | def removeauth(*args, **kwargs): |
|
|||
3037 | msg = b'removeauth(...) moved to mercurial.utils.urlutil' |
|
|||
3038 | nouideprecwarn(msg, b'6.0', stacklevel=2) |
|
|||
3039 | return urlutil.removeauth(*args, **kwargs) |
|
|||
3040 |
|
||||
3041 |
|
||||
3042 | timecount = unitcountfn( |
|
2993 | timecount = unitcountfn( | |
3043 | (1, 1e3, _(b'%.0f s')), |
|
2994 | (1, 1e3, _(b'%.0f s')), | |
3044 | (100, 1, _(b'%.1f s')), |
|
2995 | (100, 1, _(b'%.1f s')), |
@@ -75,7 +75,9 b' class LineBufferedWrapper(object):' | |||||
75 | return res |
|
75 | return res | |
76 |
|
76 | |||
77 |
|
77 | |||
|
78 | # pytype: disable=attribute-error | |||
78 | io.BufferedIOBase.register(LineBufferedWrapper) |
|
79 | io.BufferedIOBase.register(LineBufferedWrapper) | |
|
80 | # pytype: enable=attribute-error | |||
79 |
|
81 | |||
80 |
|
82 | |||
81 | def make_line_buffered(stream): |
|
83 | def make_line_buffered(stream): | |
@@ -114,7 +116,9 b' class WriteAllWrapper(object):' | |||||
114 | return total_written |
|
116 | return total_written | |
115 |
|
117 | |||
116 |
|
118 | |||
|
119 | # pytype: disable=attribute-error | |||
117 | io.IOBase.register(WriteAllWrapper) |
|
120 | io.IOBase.register(WriteAllWrapper) | |
|
121 | # pytype: enable=attribute-error | |||
118 |
|
122 | |||
119 |
|
123 | |||
120 | def _make_write_all(stream): |
|
124 | def _make_write_all(stream): | |
@@ -738,6 +742,8 b' else:' | |||||
738 | start_new_session = False |
|
742 | start_new_session = False | |
739 | ensurestart = True |
|
743 | ensurestart = True | |
740 |
|
744 | |||
|
745 | stdin = None | |||
|
746 | ||||
741 | try: |
|
747 | try: | |
742 | if stdin_bytes is None: |
|
748 | if stdin_bytes is None: | |
743 | stdin = subprocess.DEVNULL |
|
749 | stdin = subprocess.DEVNULL | |
@@ -766,7 +772,8 b' else:' | |||||
766 | record_wait(255) |
|
772 | record_wait(255) | |
767 | raise |
|
773 | raise | |
768 | finally: |
|
774 | finally: | |
769 | if stdin_bytes is not None: |
|
775 | if stdin_bytes is not None and stdin is not None: | |
|
776 | assert not isinstance(stdin, int) | |||
770 | stdin.close() |
|
777 | stdin.close() | |
771 | if not ensurestart: |
|
778 | if not ensurestart: | |
772 | # Even though we're not waiting on the child process, |
|
779 | # Even though we're not waiting on the child process, | |
@@ -847,6 +854,8 b' else:' | |||||
847 | return |
|
854 | return | |
848 |
|
855 | |||
849 | returncode = 255 |
|
856 | returncode = 255 | |
|
857 | stdin = None | |||
|
858 | ||||
850 | try: |
|
859 | try: | |
851 | if record_wait is None: |
|
860 | if record_wait is None: | |
852 | # Start a new session |
|
861 | # Start a new session | |
@@ -889,7 +898,8 b' else:' | |||||
889 | finally: |
|
898 | finally: | |
890 | # mission accomplished, this child needs to exit and not |
|
899 | # mission accomplished, this child needs to exit and not | |
891 | # continue the hg process here. |
|
900 | # continue the hg process here. | |
892 |
stdin |
|
901 | if stdin is not None: | |
|
902 | stdin.close() | |||
893 | if record_wait is None: |
|
903 | if record_wait is None: | |
894 | os._exit(returncode) |
|
904 | os._exit(returncode) | |
895 |
|
905 |
@@ -112,6 +112,13 b' def filerevisioncopied(store, node):' | |||||
112 | 2-tuple of the source filename and node. |
|
112 | 2-tuple of the source filename and node. | |
113 | """ |
|
113 | """ | |
114 | if store.parents(node)[0] != sha1nodeconstants.nullid: |
|
114 | if store.parents(node)[0] != sha1nodeconstants.nullid: | |
|
115 | # When creating a copy or move we set filelog parents to null, | |||
|
116 | # because contents are probably unrelated and making a delta | |||
|
117 | # would not be useful. | |||
|
118 | # Conversely, if filelog p1 is non-null we know | |||
|
119 | # there is no copy metadata. | |||
|
120 | # In the presence of merges, this reasoning becomes invalid | |||
|
121 | # if we reorder parents. See tests/test-issue6528.t. | |||
115 | return False |
|
122 | return False | |
116 |
|
123 | |||
117 | meta = parsemeta(store.revision(node))[0] |
|
124 | meta = parsemeta(store.revision(node))[0] |
@@ -264,7 +264,11 b' def prettyrepr(o):' | |||||
264 | q1 = rs.find(b'<', p1 + 1) |
|
264 | q1 = rs.find(b'<', p1 + 1) | |
265 | if q1 < 0: |
|
265 | if q1 < 0: | |
266 | q1 = len(rs) |
|
266 | q1 = len(rs) | |
|
267 | # pytype: disable=wrong-arg-count | |||
|
268 | # TODO: figure out why pytype doesn't recognize the optional start | |||
|
269 | # arg | |||
267 | elif q1 > p1 + 1 and rs.startswith(b'=', q1 - 1): |
|
270 | elif q1 > p1 + 1 and rs.startswith(b'=', q1 - 1): | |
|
271 | # pytype: enable=wrong-arg-count | |||
268 | # backtrack for ' field=<' |
|
272 | # backtrack for ' field=<' | |
269 | q0 = rs.rfind(b' ', p1 + 1, q1 - 1) |
|
273 | q0 = rs.rfind(b' ', p1 + 1, q1 - 1) | |
270 | if q0 < 0: |
|
274 | if q0 < 0: | |
@@ -692,11 +696,11 b' def escapestr(s):' | |||||
692 | s = bytes(s) |
|
696 | s = bytes(s) | |
693 | # call underlying function of s.encode('string_escape') directly for |
|
697 | # call underlying function of s.encode('string_escape') directly for | |
694 | # Python 3 compatibility |
|
698 | # Python 3 compatibility | |
695 | return codecs.escape_encode(s)[0] |
|
699 | return codecs.escape_encode(s)[0] # pytype: disable=module-attr | |
696 |
|
700 | |||
697 |
|
701 | |||
698 | def unescapestr(s): |
|
702 | def unescapestr(s): | |
699 | return codecs.escape_decode(s)[0] |
|
703 | return codecs.escape_decode(s)[0] # pytype: disable=module-attr | |
700 |
|
704 | |||
701 |
|
705 | |||
702 | def forcebytestr(obj): |
|
706 | def forcebytestr(obj): |
@@ -18,11 +18,9 b' from . import (' | |||||
18 | util, |
|
18 | util, | |
19 | wireprototypes, |
|
19 | wireprototypes, | |
20 | wireprotov1server, |
|
20 | wireprotov1server, | |
21 | wireprotov2server, |
|
|||
22 | ) |
|
21 | ) | |
23 | from .interfaces import util as interfaceutil |
|
22 | from .interfaces import util as interfaceutil | |
24 | from .utils import ( |
|
23 | from .utils import ( | |
25 | cborutil, |
|
|||
26 | compression, |
|
24 | compression, | |
27 | stringutil, |
|
25 | stringutil, | |
28 | ) |
|
26 | ) | |
@@ -39,7 +37,6 b" HGTYPE2 = b'application/mercurial-0.2'" | |||||
39 | HGERRTYPE = b'application/hg-error' |
|
37 | HGERRTYPE = b'application/hg-error' | |
40 |
|
38 | |||
41 | SSHV1 = wireprototypes.SSHV1 |
|
39 | SSHV1 = wireprototypes.SSHV1 | |
42 | SSHV2 = wireprototypes.SSHV2 |
|
|||
43 |
|
40 | |||
44 |
|
41 | |||
45 | def decodevaluefromheaders(req, headerprefix): |
|
42 | def decodevaluefromheaders(req, headerprefix): | |
@@ -244,97 +241,6 b' def handlewsgirequest(rctx, req, res, ch' | |||||
244 | return True |
|
241 | return True | |
245 |
|
242 | |||
246 |
|
243 | |||
247 | def _availableapis(repo): |
|
|||
248 | apis = set() |
|
|||
249 |
|
||||
250 | # Registered APIs are made available via config options of the name of |
|
|||
251 | # the protocol. |
|
|||
252 | for k, v in API_HANDLERS.items(): |
|
|||
253 | section, option = v[b'config'] |
|
|||
254 | if repo.ui.configbool(section, option): |
|
|||
255 | apis.add(k) |
|
|||
256 |
|
||||
257 | return apis |
|
|||
258 |
|
||||
259 |
|
||||
260 | def handlewsgiapirequest(rctx, req, res, checkperm): |
|
|||
261 | """Handle requests to /api/*.""" |
|
|||
262 | assert req.dispatchparts[0] == b'api' |
|
|||
263 |
|
||||
264 | repo = rctx.repo |
|
|||
265 |
|
||||
266 | # This whole URL space is experimental for now. But we want to |
|
|||
267 | # reserve the URL space. So, 404 all URLs if the feature isn't enabled. |
|
|||
268 | if not repo.ui.configbool(b'experimental', b'web.apiserver'): |
|
|||
269 | res.status = b'404 Not Found' |
|
|||
270 | res.headers[b'Content-Type'] = b'text/plain' |
|
|||
271 | res.setbodybytes(_(b'Experimental API server endpoint not enabled')) |
|
|||
272 | return |
|
|||
273 |
|
||||
274 | # The URL space is /api/<protocol>/*. The structure of URLs under varies |
|
|||
275 | # by <protocol>. |
|
|||
276 |
|
||||
277 | availableapis = _availableapis(repo) |
|
|||
278 |
|
||||
279 | # Requests to /api/ list available APIs. |
|
|||
280 | if req.dispatchparts == [b'api']: |
|
|||
281 | res.status = b'200 OK' |
|
|||
282 | res.headers[b'Content-Type'] = b'text/plain' |
|
|||
283 | lines = [ |
|
|||
284 | _( |
|
|||
285 | b'APIs can be accessed at /api/<name>, where <name> can be ' |
|
|||
286 | b'one of the following:\n' |
|
|||
287 | ) |
|
|||
288 | ] |
|
|||
289 | if availableapis: |
|
|||
290 | lines.extend(sorted(availableapis)) |
|
|||
291 | else: |
|
|||
292 | lines.append(_(b'(no available APIs)\n')) |
|
|||
293 | res.setbodybytes(b'\n'.join(lines)) |
|
|||
294 | return |
|
|||
295 |
|
||||
296 | proto = req.dispatchparts[1] |
|
|||
297 |
|
||||
298 | if proto not in API_HANDLERS: |
|
|||
299 | res.status = b'404 Not Found' |
|
|||
300 | res.headers[b'Content-Type'] = b'text/plain' |
|
|||
301 | res.setbodybytes( |
|
|||
302 | _(b'Unknown API: %s\nKnown APIs: %s') |
|
|||
303 | % (proto, b', '.join(sorted(availableapis))) |
|
|||
304 | ) |
|
|||
305 | return |
|
|||
306 |
|
||||
307 | if proto not in availableapis: |
|
|||
308 | res.status = b'404 Not Found' |
|
|||
309 | res.headers[b'Content-Type'] = b'text/plain' |
|
|||
310 | res.setbodybytes(_(b'API %s not enabled\n') % proto) |
|
|||
311 | return |
|
|||
312 |
|
||||
313 | API_HANDLERS[proto][b'handler']( |
|
|||
314 | rctx, req, res, checkperm, req.dispatchparts[2:] |
|
|||
315 | ) |
|
|||
316 |
|
||||
317 |
|
||||
318 | # Maps API name to metadata so custom API can be registered. |
|
|||
319 | # Keys are: |
|
|||
320 | # |
|
|||
321 | # config |
|
|||
322 | # Config option that controls whether service is enabled. |
|
|||
323 | # handler |
|
|||
324 | # Callable receiving (rctx, req, res, checkperm, urlparts) that is called |
|
|||
325 | # when a request to this API is received. |
|
|||
326 | # apidescriptor |
|
|||
327 | # Callable receiving (req, repo) that is called to obtain an API |
|
|||
328 | # descriptor for this service. The response must be serializable to CBOR. |
|
|||
329 | API_HANDLERS = { |
|
|||
330 | wireprotov2server.HTTP_WIREPROTO_V2: { |
|
|||
331 | b'config': (b'experimental', b'web.api.http-v2'), |
|
|||
332 | b'handler': wireprotov2server.handlehttpv2request, |
|
|||
333 | b'apidescriptor': wireprotov2server.httpv2apidescriptor, |
|
|||
334 | }, |
|
|||
335 | } |
|
|||
336 |
|
||||
337 |
|
||||
338 | def _httpresponsetype(ui, proto, prefer_uncompressed): |
|
244 | def _httpresponsetype(ui, proto, prefer_uncompressed): | |
339 | """Determine the appropriate response type and compression settings. |
|
245 | """Determine the appropriate response type and compression settings. | |
340 |
|
246 | |||
@@ -371,55 +277,6 b' def _httpresponsetype(ui, proto, prefer_' | |||||
371 | return HGTYPE, util.compengines[b'zlib'], opts |
|
277 | return HGTYPE, util.compengines[b'zlib'], opts | |
372 |
|
278 | |||
373 |
|
279 | |||
374 | def processcapabilitieshandshake(repo, req, res, proto): |
|
|||
375 | """Called during a ?cmd=capabilities request. |
|
|||
376 |
|
||||
377 | If the client is advertising support for a newer protocol, we send |
|
|||
378 | a CBOR response with information about available services. If no |
|
|||
379 | advertised services are available, we don't handle the request. |
|
|||
380 | """ |
|
|||
381 | # Fall back to old behavior unless the API server is enabled. |
|
|||
382 | if not repo.ui.configbool(b'experimental', b'web.apiserver'): |
|
|||
383 | return False |
|
|||
384 |
|
||||
385 | clientapis = decodevaluefromheaders(req, b'X-HgUpgrade') |
|
|||
386 | protocaps = decodevaluefromheaders(req, b'X-HgProto') |
|
|||
387 | if not clientapis or not protocaps: |
|
|||
388 | return False |
|
|||
389 |
|
||||
390 | # We currently only support CBOR responses. |
|
|||
391 | protocaps = set(protocaps.split(b' ')) |
|
|||
392 | if b'cbor' not in protocaps: |
|
|||
393 | return False |
|
|||
394 |
|
||||
395 | descriptors = {} |
|
|||
396 |
|
||||
397 | for api in sorted(set(clientapis.split()) & _availableapis(repo)): |
|
|||
398 | handler = API_HANDLERS[api] |
|
|||
399 |
|
||||
400 | descriptorfn = handler.get(b'apidescriptor') |
|
|||
401 | if not descriptorfn: |
|
|||
402 | continue |
|
|||
403 |
|
||||
404 | descriptors[api] = descriptorfn(req, repo) |
|
|||
405 |
|
||||
406 | v1caps = wireprotov1server.dispatch(repo, proto, b'capabilities') |
|
|||
407 | assert isinstance(v1caps, wireprototypes.bytesresponse) |
|
|||
408 |
|
||||
409 | m = { |
|
|||
410 | # TODO allow this to be configurable. |
|
|||
411 | b'apibase': b'api/', |
|
|||
412 | b'apis': descriptors, |
|
|||
413 | b'v1capabilities': v1caps.data, |
|
|||
414 | } |
|
|||
415 |
|
||||
416 | res.status = b'200 OK' |
|
|||
417 | res.headers[b'Content-Type'] = b'application/mercurial-cbor' |
|
|||
418 | res.setbodybytes(b''.join(cborutil.streamencode(m))) |
|
|||
419 |
|
||||
420 | return True |
|
|||
421 |
|
||||
422 |
|
||||
423 | def _callhttp(repo, req, res, proto, cmd): |
|
280 | def _callhttp(repo, req, res, proto, cmd): | |
424 | # Avoid cycle involving hg module. |
|
281 | # Avoid cycle involving hg module. | |
425 | from .hgweb import common as hgwebcommon |
|
282 | from .hgweb import common as hgwebcommon | |
@@ -461,13 +318,6 b' def _callhttp(repo, req, res, proto, cmd' | |||||
461 |
|
318 | |||
462 | proto.checkperm(wireprotov1server.commands[cmd].permission) |
|
319 | proto.checkperm(wireprotov1server.commands[cmd].permission) | |
463 |
|
320 | |||
464 | # Possibly handle a modern client wanting to switch protocols. |
|
|||
465 | if cmd == b'capabilities' and processcapabilitieshandshake( |
|
|||
466 | repo, req, res, proto |
|
|||
467 | ): |
|
|||
468 |
|
||||
469 | return |
|
|||
470 |
|
||||
471 | rsp = wireprotov1server.dispatch(repo, proto, cmd) |
|
321 | rsp = wireprotov1server.dispatch(repo, proto, cmd) | |
472 |
|
322 | |||
473 | if isinstance(rsp, bytes): |
|
323 | if isinstance(rsp, bytes): | |
@@ -596,17 +446,6 b' class sshv1protocolhandler(object):' | |||||
596 | pass |
|
446 | pass | |
597 |
|
447 | |||
598 |
|
448 | |||
599 | class sshv2protocolhandler(sshv1protocolhandler): |
|
|||
600 | """Protocol handler for version 2 of the SSH protocol.""" |
|
|||
601 |
|
||||
602 | @property |
|
|||
603 | def name(self): |
|
|||
604 | return wireprototypes.SSHV2 |
|
|||
605 |
|
||||
606 | def addcapabilities(self, repo, caps): |
|
|||
607 | return caps |
|
|||
608 |
|
||||
609 |
|
||||
610 | def _runsshserver(ui, repo, fin, fout, ev): |
|
449 | def _runsshserver(ui, repo, fin, fout, ev): | |
611 | # This function operates like a state machine of sorts. The following |
|
450 | # This function operates like a state machine of sorts. The following | |
612 | # states are defined: |
|
451 | # states are defined: | |
@@ -616,19 +455,6 b' def _runsshserver(ui, repo, fin, fout, e' | |||||
616 | # new lines. These commands are processed in this state, one command |
|
455 | # new lines. These commands are processed in this state, one command | |
617 | # after the other. |
|
456 | # after the other. | |
618 | # |
|
457 | # | |
619 | # protov2-serving |
|
|||
620 | # Server is in protocol version 2 serving mode. |
|
|||
621 | # |
|
|||
622 | # upgrade-initial |
|
|||
623 | # The server is going to process an upgrade request. |
|
|||
624 | # |
|
|||
625 | # upgrade-v2-filter-legacy-handshake |
|
|||
626 | # The protocol is being upgraded to version 2. The server is expecting |
|
|||
627 | # the legacy handshake from version 1. |
|
|||
628 | # |
|
|||
629 | # upgrade-v2-finish |
|
|||
630 | # The upgrade to version 2 of the protocol is imminent. |
|
|||
631 | # |
|
|||
632 | # shutdown |
|
458 | # shutdown | |
633 | # The server is shutting down, possibly in reaction to a client event. |
|
459 | # The server is shutting down, possibly in reaction to a client event. | |
634 | # |
|
460 | # | |
@@ -637,32 +463,9 b' def _runsshserver(ui, repo, fin, fout, e' | |||||
637 | # protov1-serving -> shutdown |
|
463 | # protov1-serving -> shutdown | |
638 | # When server receives an empty request or encounters another |
|
464 | # When server receives an empty request or encounters another | |
639 | # error. |
|
465 | # error. | |
640 | # |
|
|||
641 | # protov1-serving -> upgrade-initial |
|
|||
642 | # An upgrade request line was seen. |
|
|||
643 | # |
|
|||
644 | # upgrade-initial -> upgrade-v2-filter-legacy-handshake |
|
|||
645 | # Upgrade to version 2 in progress. Server is expecting to |
|
|||
646 | # process a legacy handshake. |
|
|||
647 | # |
|
|||
648 | # upgrade-v2-filter-legacy-handshake -> shutdown |
|
|||
649 | # Client did not fulfill upgrade handshake requirements. |
|
|||
650 | # |
|
|||
651 | # upgrade-v2-filter-legacy-handshake -> upgrade-v2-finish |
|
|||
652 | # Client fulfilled version 2 upgrade requirements. Finishing that |
|
|||
653 | # upgrade. |
|
|||
654 | # |
|
|||
655 | # upgrade-v2-finish -> protov2-serving |
|
|||
656 | # Protocol upgrade to version 2 complete. Server can now speak protocol |
|
|||
657 | # version 2. |
|
|||
658 | # |
|
|||
659 | # protov2-serving -> protov1-serving |
|
|||
660 | # Ths happens by default since protocol version 2 is the same as |
|
|||
661 | # version 1 except for the handshake. |
|
|||
662 |
|
466 | |||
663 | state = b'protov1-serving' |
|
467 | state = b'protov1-serving' | |
664 | proto = sshv1protocolhandler(ui, fin, fout) |
|
468 | proto = sshv1protocolhandler(ui, fin, fout) | |
665 | protoswitched = False |
|
|||
666 |
|
469 | |||
667 | while not ev.is_set(): |
|
470 | while not ev.is_set(): | |
668 | if state == b'protov1-serving': |
|
471 | if state == b'protov1-serving': | |
@@ -674,21 +477,6 b' def _runsshserver(ui, repo, fin, fout, e' | |||||
674 | state = b'shutdown' |
|
477 | state = b'shutdown' | |
675 | continue |
|
478 | continue | |
676 |
|
479 | |||
677 | # It looks like a protocol upgrade request. Transition state to |
|
|||
678 | # handle it. |
|
|||
679 | if request.startswith(b'upgrade '): |
|
|||
680 | if protoswitched: |
|
|||
681 | _sshv1respondooberror( |
|
|||
682 | fout, |
|
|||
683 | ui.ferr, |
|
|||
684 | b'cannot upgrade protocols multiple times', |
|
|||
685 | ) |
|
|||
686 | state = b'shutdown' |
|
|||
687 | continue |
|
|||
688 |
|
||||
689 | state = b'upgrade-initial' |
|
|||
690 | continue |
|
|||
691 |
|
||||
692 | available = wireprotov1server.commands.commandavailable( |
|
480 | available = wireprotov1server.commands.commandavailable( | |
693 | request, proto |
|
481 | request, proto | |
694 | ) |
|
482 | ) | |
@@ -724,108 +512,6 b' def _runsshserver(ui, repo, fin, fout, e' | |||||
724 | b'wire protocol command: %s' % rsp |
|
512 | b'wire protocol command: %s' % rsp | |
725 | ) |
|
513 | ) | |
726 |
|
514 | |||
727 | # For now, protocol version 2 serving just goes back to version 1. |
|
|||
728 | elif state == b'protov2-serving': |
|
|||
729 | state = b'protov1-serving' |
|
|||
730 | continue |
|
|||
731 |
|
||||
732 | elif state == b'upgrade-initial': |
|
|||
733 | # We should never transition into this state if we've switched |
|
|||
734 | # protocols. |
|
|||
735 | assert not protoswitched |
|
|||
736 | assert proto.name == wireprototypes.SSHV1 |
|
|||
737 |
|
||||
738 | # Expected: upgrade <token> <capabilities> |
|
|||
739 | # If we get something else, the request is malformed. It could be |
|
|||
740 | # from a future client that has altered the upgrade line content. |
|
|||
741 | # We treat this as an unknown command. |
|
|||
742 | try: |
|
|||
743 | token, caps = request.split(b' ')[1:] |
|
|||
744 | except ValueError: |
|
|||
745 | _sshv1respondbytes(fout, b'') |
|
|||
746 | state = b'protov1-serving' |
|
|||
747 | continue |
|
|||
748 |
|
||||
749 | # Send empty response if we don't support upgrading protocols. |
|
|||
750 | if not ui.configbool(b'experimental', b'sshserver.support-v2'): |
|
|||
751 | _sshv1respondbytes(fout, b'') |
|
|||
752 | state = b'protov1-serving' |
|
|||
753 | continue |
|
|||
754 |
|
||||
755 | try: |
|
|||
756 | caps = urlreq.parseqs(caps) |
|
|||
757 | except ValueError: |
|
|||
758 | _sshv1respondbytes(fout, b'') |
|
|||
759 | state = b'protov1-serving' |
|
|||
760 | continue |
|
|||
761 |
|
||||
762 | # We don't see an upgrade request to protocol version 2. Ignore |
|
|||
763 | # the upgrade request. |
|
|||
764 | wantedprotos = caps.get(b'proto', [b''])[0] |
|
|||
765 | if SSHV2 not in wantedprotos: |
|
|||
766 | _sshv1respondbytes(fout, b'') |
|
|||
767 | state = b'protov1-serving' |
|
|||
768 | continue |
|
|||
769 |
|
||||
770 | # It looks like we can honor this upgrade request to protocol 2. |
|
|||
771 | # Filter the rest of the handshake protocol request lines. |
|
|||
772 | state = b'upgrade-v2-filter-legacy-handshake' |
|
|||
773 | continue |
|
|||
774 |
|
||||
775 | elif state == b'upgrade-v2-filter-legacy-handshake': |
|
|||
776 | # Client should have sent legacy handshake after an ``upgrade`` |
|
|||
777 | # request. Expected lines: |
|
|||
778 | # |
|
|||
779 | # hello |
|
|||
780 | # between |
|
|||
781 | # pairs 81 |
|
|||
782 | # 0000...-0000... |
|
|||
783 |
|
||||
784 | ok = True |
|
|||
785 | for line in (b'hello', b'between', b'pairs 81'): |
|
|||
786 | request = fin.readline()[:-1] |
|
|||
787 |
|
||||
788 | if request != line: |
|
|||
789 | _sshv1respondooberror( |
|
|||
790 | fout, |
|
|||
791 | ui.ferr, |
|
|||
792 | b'malformed handshake protocol: missing %s' % line, |
|
|||
793 | ) |
|
|||
794 | ok = False |
|
|||
795 | state = b'shutdown' |
|
|||
796 | break |
|
|||
797 |
|
||||
798 | if not ok: |
|
|||
799 | continue |
|
|||
800 |
|
||||
801 | request = fin.read(81) |
|
|||
802 | if request != b'%s-%s' % (b'0' * 40, b'0' * 40): |
|
|||
803 | _sshv1respondooberror( |
|
|||
804 | fout, |
|
|||
805 | ui.ferr, |
|
|||
806 | b'malformed handshake protocol: ' |
|
|||
807 | b'missing between argument value', |
|
|||
808 | ) |
|
|||
809 | state = b'shutdown' |
|
|||
810 | continue |
|
|||
811 |
|
||||
812 | state = b'upgrade-v2-finish' |
|
|||
813 | continue |
|
|||
814 |
|
||||
815 | elif state == b'upgrade-v2-finish': |
|
|||
816 | # Send the upgrade response. |
|
|||
817 | fout.write(b'upgraded %s %s\n' % (token, SSHV2)) |
|
|||
818 | servercaps = wireprotov1server.capabilities(repo, proto) |
|
|||
819 | rsp = b'capabilities: %s' % servercaps.data |
|
|||
820 | fout.write(b'%d\n%s\n' % (len(rsp), rsp)) |
|
|||
821 | fout.flush() |
|
|||
822 |
|
||||
823 | proto = sshv2protocolhandler(ui, fin, fout) |
|
|||
824 | protoswitched = True |
|
|||
825 |
|
||||
826 | state = b'protov2-serving' |
|
|||
827 | continue |
|
|||
828 |
|
||||
829 | elif state == b'shutdown': |
|
515 | elif state == b'shutdown': | |
830 | break |
|
516 | break | |
831 |
|
517 |
@@ -21,10 +21,6 b' from .utils import compression' | |||||
21 |
|
21 | |||
22 | # Names of the SSH protocol implementations. |
|
22 | # Names of the SSH protocol implementations. | |
23 | SSHV1 = b'ssh-v1' |
|
23 | SSHV1 = b'ssh-v1' | |
24 | # These are advertised over the wire. Increment the counters at the end |
|
|||
25 | # to reflect BC breakages. |
|
|||
26 | SSHV2 = b'exp-ssh-v2-0003' |
|
|||
27 | HTTP_WIREPROTO_V2 = b'exp-http-v2-0003' |
|
|||
28 |
|
24 | |||
29 | NARROWCAP = b'exp-narrow-1' |
|
25 | NARROWCAP = b'exp-narrow-1' | |
30 | ELLIPSESCAP1 = b'exp-ellipses-1' |
|
26 | ELLIPSESCAP1 = b'exp-ellipses-1' | |
@@ -37,19 +33,10 b' TRANSPORTS = {' | |||||
37 | b'transport': b'ssh', |
|
33 | b'transport': b'ssh', | |
38 | b'version': 1, |
|
34 | b'version': 1, | |
39 | }, |
|
35 | }, | |
40 | SSHV2: { |
|
|||
41 | b'transport': b'ssh', |
|
|||
42 | # TODO mark as version 2 once all commands are implemented. |
|
|||
43 | b'version': 1, |
|
|||
44 | }, |
|
|||
45 | b'http-v1': { |
|
36 | b'http-v1': { | |
46 | b'transport': b'http', |
|
37 | b'transport': b'http', | |
47 | b'version': 1, |
|
38 | b'version': 1, | |
48 | }, |
|
39 | }, | |
49 | HTTP_WIREPROTO_V2: { |
|
|||
50 | b'transport': b'http', |
|
|||
51 | b'version': 2, |
|
|||
52 | }, |
|
|||
53 | } |
|
40 | } | |
54 |
|
41 | |||
55 |
|
42 |
@@ -147,12 +147,6 b' def wireprotocommand(name, args=None, pe' | |||||
147 | k for k, v in wireprototypes.TRANSPORTS.items() if v[b'version'] == 1 |
|
147 | k for k, v in wireprototypes.TRANSPORTS.items() if v[b'version'] == 1 | |
148 | } |
|
148 | } | |
149 |
|
149 | |||
150 | # Because SSHv2 is a mirror of SSHv1, we allow "batch" commands through to |
|
|||
151 | # SSHv2. |
|
|||
152 | # TODO undo this hack when SSH is using the unified frame protocol. |
|
|||
153 | if name == b'batch': |
|
|||
154 | transports.add(wireprototypes.SSHV2) |
|
|||
155 |
|
||||
156 | if permission not in (b'push', b'pull'): |
|
150 | if permission not in (b'push', b'pull'): | |
157 | raise error.ProgrammingError( |
|
151 | raise error.ProgrammingError( | |
158 | b'invalid wire protocol permission; ' |
|
152 | b'invalid wire protocol permission; ' | |
@@ -306,7 +300,7 b' def _capabilities(repo, proto):' | |||||
306 | if streamclone.allowservergeneration(repo): |
|
300 | if streamclone.allowservergeneration(repo): | |
307 | if repo.ui.configbool(b'server', b'preferuncompressed'): |
|
301 | if repo.ui.configbool(b'server', b'preferuncompressed'): | |
308 | caps.append(b'stream-preferred') |
|
302 | caps.append(b'stream-preferred') | |
309 |
requiredformats = repo |
|
303 | requiredformats = streamclone.streamed_requirements(repo) | |
310 | # if our local revlogs are just revlogv1, add 'stream' cap |
|
304 | # if our local revlogs are just revlogv1, add 'stream' cap | |
311 | if not requiredformats - {requirementsmod.REVLOGV1_REQUIREMENT}: |
|
305 | if not requiredformats - {requirementsmod.REVLOGV1_REQUIREMENT}: | |
312 | caps.append(b'stream') |
|
306 | caps.append(b'stream') |
@@ -4,16 +4,42 b'' | |||||
4 | == Default Format Change == |
|
4 | == Default Format Change == | |
5 |
|
5 | |||
6 | These changes affects newly created repositories (or new clone) done with |
|
6 | These changes affects newly created repositories (or new clone) done with | |
7 |
Mercurial |
|
7 | Mercurial 6.1. | |
|
8 | ||||
|
9 | The `share-safe` format variant is now enabled by default. It makes | |||
|
10 | configuration and requirements more consistent across repository and their | |||
|
11 | shares. This introduces a behavior change as shares from a repository using the | |||
|
12 | new format will also use their main repository's configuration. | |||
|
13 | ||||
|
14 | See `hg help config.format.use-share-safe` for details about the feature and | |||
|
15 | the available options for auto-upgrading existing shares. | |||
8 |
|
16 | |||
9 |
|
17 | |||
10 | == New Experimental Features == |
|
18 | == New Experimental Features == | |
11 |
|
19 | |||
12 | == Bug Fixes == |
|
20 | == Bug Fixes == | |
13 |
|
21 | |||
|
22 | The `--no-check` and `--no-merge` now properly overwrite the behavior from `commands.update.check`. | |||
14 |
|
23 | |||
15 | == Backwards Compatibility Changes == |
|
24 | == Backwards Compatibility Changes == | |
16 |
|
25 | |||
|
26 | The remotefilelog extension now requires an appropiate excludepattern | |||
|
27 | for subrepositories. | |||
|
28 | ||||
|
29 | The labels passed to merge tools have changed slightly. Merge tools can get | |||
|
30 | labels passed to them if you include `$labellocal`, `$labelbase`, and/or | |||
|
31 | `$labelother` in the `merge-tool.<tool name>.args` configuration. These labels | |||
|
32 | used to have some space-padding, and truncation to fit within 72 columns. Both | |||
|
33 | the padding and the truncation has been removed. | |||
|
34 | ||||
|
35 | Some of the text in labels passed to merge tools has changed. For example, | |||
|
36 | in conflicts while running `hg histedit`, the labels used to be "local", | |||
|
37 | "base", and "histedit". They are now "already edited", | |||
|
38 | "parent of current change", and "current change", respectively. | |||
|
39 | ||||
|
40 | The use of `share-safe`, means shares (of new repositories) will also use their | |||
|
41 | main repository's configuration see the `Default Format Change` section | |||
|
42 | for details. | |||
17 |
|
43 | |||
18 | == Internal API Changes == |
|
44 | == Internal API Changes == | |
19 |
|
45 |
@@ -1,5 +1,7 b'' | |||||
1 | # This file is automatically @generated by Cargo. |
|
1 | # This file is automatically @generated by Cargo. | |
2 | # It is not intended for manual editing. |
|
2 | # It is not intended for manual editing. | |
|
3 | version = 3 | |||
|
4 | ||||
3 | [[package]] |
|
5 | [[package]] | |
4 | name = "adler" |
|
6 | name = "adler" | |
5 | version = "0.2.3" |
|
7 | version = "0.2.3" | |
@@ -314,21 +316,19 b' dependencies = [' | |||||
314 |
|
316 | |||
315 | [[package]] |
|
317 | [[package]] | |
316 | name = "format-bytes" |
|
318 | name = "format-bytes" | |
317 |
version = "0. |
|
319 | version = "0.3.0" | |
318 | source = "registry+https://github.com/rust-lang/crates.io-index" |
|
320 | source = "registry+https://github.com/rust-lang/crates.io-index" | |
319 | checksum = "1c4e89040c7fd7b4e6ba2820ac705a45def8a0c098ec78d170ae88f1ef1d5762" |
|
321 | checksum = "48942366ef93975da38e175ac9e10068c6fc08ca9e85930d4f098f4d5b14c2fd" | |
320 | dependencies = [ |
|
322 | dependencies = [ | |
321 | "format-bytes-macros", |
|
323 | "format-bytes-macros", | |
322 | "proc-macro-hack", |
|
|||
323 | ] |
|
324 | ] | |
324 |
|
325 | |||
325 | [[package]] |
|
326 | [[package]] | |
326 | name = "format-bytes-macros" |
|
327 | name = "format-bytes-macros" | |
327 |
version = "0. |
|
328 | version = "0.4.0" | |
328 | source = "registry+https://github.com/rust-lang/crates.io-index" |
|
329 | source = "registry+https://github.com/rust-lang/crates.io-index" | |
329 | checksum = "b05089e341a0460449e2210c3bf7b61597860b07f0deae58da38dbed0a4c6b6d" |
|
330 | checksum = "203aadebefcc73d12038296c228eabf830f99cba991b0032adf20e9fa6ce7e4f" | |
330 | dependencies = [ |
|
331 | dependencies = [ | |
331 | "proc-macro-hack", |
|
|||
332 | "proc-macro2", |
|
332 | "proc-macro2", | |
333 | "quote", |
|
333 | "quote", | |
334 | "syn", |
|
334 | "syn", | |
@@ -356,6 +356,17 b' dependencies = [' | |||||
356 | ] |
|
356 | ] | |
357 |
|
357 | |||
358 | [[package]] |
|
358 | [[package]] | |
|
359 | name = "getrandom" | |||
|
360 | version = "0.2.4" | |||
|
361 | source = "registry+https://github.com/rust-lang/crates.io-index" | |||
|
362 | checksum = "418d37c8b1d42553c93648be529cb70f920d3baf8ef469b74b9638df426e0b4c" | |||
|
363 | dependencies = [ | |||
|
364 | "cfg-if 1.0.0", | |||
|
365 | "libc", | |||
|
366 | "wasi 0.10.0+wasi-snapshot-preview1", | |||
|
367 | ] | |||
|
368 | ||||
|
369 | [[package]] | |||
359 | name = "glob" |
|
370 | name = "glob" | |
360 | version = "0.3.0" |
|
371 | version = "0.3.0" | |
361 | source = "registry+https://github.com/rust-lang/crates.io-index" |
|
372 | source = "registry+https://github.com/rust-lang/crates.io-index" | |
@@ -371,6 +382,12 b' dependencies = [' | |||||
371 | ] |
|
382 | ] | |
372 |
|
383 | |||
373 | [[package]] |
|
384 | [[package]] | |
|
385 | name = "hex" | |||
|
386 | version = "0.4.3" | |||
|
387 | source = "registry+https://github.com/rust-lang/crates.io-index" | |||
|
388 | checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" | |||
|
389 | ||||
|
390 | [[package]] | |||
374 | name = "hg-core" |
|
391 | name = "hg-core" | |
375 | version = "0.1.0" |
|
392 | version = "0.1.0" | |
376 | dependencies = [ |
|
393 | dependencies = [ | |
@@ -391,7 +408,7 b' dependencies = [' | |||||
391 | "memmap2", |
|
408 | "memmap2", | |
392 | "micro-timer", |
|
409 | "micro-timer", | |
393 | "pretty_assertions", |
|
410 | "pretty_assertions", | |
394 | "rand", |
|
411 | "rand 0.8.4", | |
395 | "rand_distr", |
|
412 | "rand_distr", | |
396 | "rand_pcg", |
|
413 | "rand_pcg", | |
397 | "rayon", |
|
414 | "rayon", | |
@@ -415,6 +432,7 b' dependencies = [' | |||||
415 | "libc", |
|
432 | "libc", | |
416 | "log", |
|
433 | "log", | |
417 | "stable_deref_trait", |
|
434 | "stable_deref_trait", | |
|
435 | "vcsgraph", | |||
418 | ] |
|
436 | ] | |
419 |
|
437 | |||
420 | [[package]] |
|
438 | [[package]] | |
@@ -442,7 +460,7 b' source = "registry+https://github.com/ru' | |||||
442 | checksum = "3ca8957e71f04a205cb162508f9326aea04676c8dfd0711220190d6b83664f3f" |
|
460 | checksum = "3ca8957e71f04a205cb162508f9326aea04676c8dfd0711220190d6b83664f3f" | |
443 | dependencies = [ |
|
461 | dependencies = [ | |
444 | "bitmaps", |
|
462 | "bitmaps", | |
445 | "rand_core", |
|
463 | "rand_core 0.5.1", | |
446 | "rand_xoshiro", |
|
464 | "rand_xoshiro", | |
447 | "sized-chunks", |
|
465 | "sized-chunks", | |
448 | "typenum", |
|
466 | "typenum", | |
@@ -480,6 +498,12 b' source = "registry+https://github.com/ru' | |||||
480 | checksum = "1482821306169ec4d07f6aca392a4681f66c75c9918aa49641a2595db64053cb" |
|
498 | checksum = "1482821306169ec4d07f6aca392a4681f66c75c9918aa49641a2595db64053cb" | |
481 |
|
499 | |||
482 | [[package]] |
|
500 | [[package]] | |
|
501 | name = "libm" | |||
|
502 | version = "0.2.1" | |||
|
503 | source = "registry+https://github.com/rust-lang/crates.io-index" | |||
|
504 | checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" | |||
|
505 | ||||
|
506 | [[package]] | |||
483 | name = "libz-sys" |
|
507 | name = "libz-sys" | |
484 | version = "1.1.2" |
|
508 | version = "1.1.2" | |
485 | source = "registry+https://github.com/rust-lang/crates.io-index" |
|
509 | source = "registry+https://github.com/rust-lang/crates.io-index" | |
@@ -579,6 +603,7 b' source = "registry+https://github.com/ru' | |||||
579 | checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" |
|
603 | checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" | |
580 | dependencies = [ |
|
604 | dependencies = [ | |
581 | "autocfg", |
|
605 | "autocfg", | |
|
606 | "libm", | |||
582 | ] |
|
607 | ] | |
583 |
|
608 | |||
584 | [[package]] |
|
609 | [[package]] | |
@@ -637,12 +662,6 b' dependencies = [' | |||||
637 | ] |
|
662 | ] | |
638 |
|
663 | |||
639 | [[package]] |
|
664 | [[package]] | |
640 | name = "proc-macro-hack" |
|
|||
641 | version = "0.5.19" |
|
|||
642 | source = "registry+https://github.com/rust-lang/crates.io-index" |
|
|||
643 | checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" |
|
|||
644 |
|
||||
645 | [[package]] |
|
|||
646 | name = "proc-macro2" |
|
665 | name = "proc-macro2" | |
647 | version = "1.0.24" |
|
666 | version = "1.0.24" | |
648 | source = "registry+https://github.com/rust-lang/crates.io-index" |
|
667 | source = "registry+https://github.com/rust-lang/crates.io-index" | |
@@ -692,11 +711,23 b' version = "0.7.3"' | |||||
692 | source = "registry+https://github.com/rust-lang/crates.io-index" |
|
711 | source = "registry+https://github.com/rust-lang/crates.io-index" | |
693 | checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" |
|
712 | checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" | |
694 | dependencies = [ |
|
713 | dependencies = [ | |
695 | "getrandom", |
|
714 | "getrandom 0.1.15", | |
696 | "libc", |
|
715 | "libc", | |
697 | "rand_chacha", |
|
716 | "rand_chacha 0.2.2", | |
698 | "rand_core", |
|
717 | "rand_core 0.5.1", | |
699 | "rand_hc", |
|
718 | "rand_hc 0.2.0", | |
|
719 | ] | |||
|
720 | ||||
|
721 | [[package]] | |||
|
722 | name = "rand" | |||
|
723 | version = "0.8.4" | |||
|
724 | source = "registry+https://github.com/rust-lang/crates.io-index" | |||
|
725 | checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" | |||
|
726 | dependencies = [ | |||
|
727 | "libc", | |||
|
728 | "rand_chacha 0.3.1", | |||
|
729 | "rand_core 0.6.3", | |||
|
730 | "rand_hc 0.3.1", | |||
700 | ] |
|
731 | ] | |
701 |
|
732 | |||
702 | [[package]] |
|
733 | [[package]] | |
@@ -706,7 +737,17 b' source = "registry+https://github.com/ru' | |||||
706 | checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" |
|
737 | checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" | |
707 | dependencies = [ |
|
738 | dependencies = [ | |
708 | "ppv-lite86", |
|
739 | "ppv-lite86", | |
709 | "rand_core", |
|
740 | "rand_core 0.5.1", | |
|
741 | ] | |||
|
742 | ||||
|
743 | [[package]] | |||
|
744 | name = "rand_chacha" | |||
|
745 | version = "0.3.1" | |||
|
746 | source = "registry+https://github.com/rust-lang/crates.io-index" | |||
|
747 | checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" | |||
|
748 | dependencies = [ | |||
|
749 | "ppv-lite86", | |||
|
750 | "rand_core 0.6.3", | |||
710 | ] |
|
751 | ] | |
711 |
|
752 | |||
712 | [[package]] |
|
753 | [[package]] | |
@@ -715,16 +756,26 b' version = "0.5.1"' | |||||
715 | source = "registry+https://github.com/rust-lang/crates.io-index" |
|
756 | source = "registry+https://github.com/rust-lang/crates.io-index" | |
716 | checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" |
|
757 | checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" | |
717 | dependencies = [ |
|
758 | dependencies = [ | |
718 | "getrandom", |
|
759 | "getrandom 0.1.15", | |
|
760 | ] | |||
|
761 | ||||
|
762 | [[package]] | |||
|
763 | name = "rand_core" | |||
|
764 | version = "0.6.3" | |||
|
765 | source = "registry+https://github.com/rust-lang/crates.io-index" | |||
|
766 | checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" | |||
|
767 | dependencies = [ | |||
|
768 | "getrandom 0.2.4", | |||
719 | ] |
|
769 | ] | |
720 |
|
770 | |||
721 | [[package]] |
|
771 | [[package]] | |
722 | name = "rand_distr" |
|
772 | name = "rand_distr" | |
723 |
version = "0. |
|
773 | version = "0.4.2" | |
724 | source = "registry+https://github.com/rust-lang/crates.io-index" |
|
774 | source = "registry+https://github.com/rust-lang/crates.io-index" | |
725 | checksum = "96977acbdd3a6576fb1d27391900035bf3863d4a16422973a409b488cf29ffb2" |
|
775 | checksum = "964d548f8e7d12e102ef183a0de7e98180c9f8729f555897a857b96e48122d2f" | |
726 | dependencies = [ |
|
776 | dependencies = [ | |
727 | "rand", |
|
777 | "num-traits", | |
|
778 | "rand 0.8.4", | |||
728 | ] |
|
779 | ] | |
729 |
|
780 | |||
730 | [[package]] |
|
781 | [[package]] | |
@@ -733,16 +784,25 b' version = "0.2.0"' | |||||
733 | source = "registry+https://github.com/rust-lang/crates.io-index" |
|
784 | source = "registry+https://github.com/rust-lang/crates.io-index" | |
734 | checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" |
|
785 | checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" | |
735 | dependencies = [ |
|
786 | dependencies = [ | |
736 | "rand_core", |
|
787 | "rand_core 0.5.1", | |
|
788 | ] | |||
|
789 | ||||
|
790 | [[package]] | |||
|
791 | name = "rand_hc" | |||
|
792 | version = "0.3.1" | |||
|
793 | source = "registry+https://github.com/rust-lang/crates.io-index" | |||
|
794 | checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" | |||
|
795 | dependencies = [ | |||
|
796 | "rand_core 0.6.3", | |||
737 | ] |
|
797 | ] | |
738 |
|
798 | |||
739 | [[package]] |
|
799 | [[package]] | |
740 | name = "rand_pcg" |
|
800 | name = "rand_pcg" | |
741 |
version = "0. |
|
801 | version = "0.3.1" | |
742 | source = "registry+https://github.com/rust-lang/crates.io-index" |
|
802 | source = "registry+https://github.com/rust-lang/crates.io-index" | |
743 | checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429" |
|
803 | checksum = "59cad018caf63deb318e5a4586d99a24424a364f40f1e5778c29aca23f4fc73e" | |
744 | dependencies = [ |
|
804 | dependencies = [ | |
745 | "rand_core", |
|
805 | "rand_core 0.6.3", | |
746 | ] |
|
806 | ] | |
747 |
|
807 | |||
748 | [[package]] |
|
808 | [[package]] | |
@@ -751,7 +811,7 b' version = "0.4.0"' | |||||
751 | source = "registry+https://github.com/rust-lang/crates.io-index" |
|
811 | source = "registry+https://github.com/rust-lang/crates.io-index" | |
752 | checksum = "a9fcdd2e881d02f1d9390ae47ad8e5696a9e4be7b547a1da2afbc61973217004" |
|
812 | checksum = "a9fcdd2e881d02f1d9390ae47ad8e5696a9e4be7b547a1da2afbc61973217004" | |
753 | dependencies = [ |
|
813 | dependencies = [ | |
754 | "rand_core", |
|
814 | "rand_core 0.5.1", | |
755 | ] |
|
815 | ] | |
756 |
|
816 | |||
757 | [[package]] |
|
817 | [[package]] | |
@@ -816,6 +876,7 b' dependencies = [' | |||||
816 | name = "rhg" |
|
876 | name = "rhg" | |
817 | version = "0.1.0" |
|
877 | version = "0.1.0" | |
818 | dependencies = [ |
|
878 | dependencies = [ | |
|
879 | "atty", | |||
819 | "chrono", |
|
880 | "chrono", | |
820 | "clap", |
|
881 | "clap", | |
821 | "derive_more", |
|
882 | "derive_more", | |
@@ -905,7 +966,7 b' checksum = "7a6e24d9338a0a5be79593e2fa15' | |||||
905 | dependencies = [ |
|
966 | dependencies = [ | |
906 | "cfg-if 0.1.10", |
|
967 | "cfg-if 0.1.10", | |
907 | "libc", |
|
968 | "libc", | |
908 | "rand", |
|
969 | "rand 0.7.3", | |
909 | "redox_syscall", |
|
970 | "redox_syscall", | |
910 | "remove_dir_all", |
|
971 | "remove_dir_all", | |
911 | "winapi", |
|
972 | "winapi", | |
@@ -956,7 +1017,7 b' source = "registry+https://github.com/ru' | |||||
956 | checksum = "04f8ab788026715fa63b31960869617cba39117e520eb415b0139543e325ab59" |
|
1017 | checksum = "04f8ab788026715fa63b31960869617cba39117e520eb415b0139543e325ab59" | |
957 | dependencies = [ |
|
1018 | dependencies = [ | |
958 | "cfg-if 0.1.10", |
|
1019 | "cfg-if 0.1.10", | |
959 | "rand", |
|
1020 | "rand 0.7.3", | |
960 | "static_assertions", |
|
1021 | "static_assertions", | |
961 | ] |
|
1022 | ] | |
962 |
|
1023 | |||
@@ -995,6 +1056,17 b' source = "registry+https://github.com/ru' | |||||
995 | checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb" |
|
1056 | checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb" | |
996 |
|
1057 | |||
997 | [[package]] |
|
1058 | [[package]] | |
|
1059 | name = "vcsgraph" | |||
|
1060 | version = "0.2.0" | |||
|
1061 | source = "registry+https://github.com/rust-lang/crates.io-index" | |||
|
1062 | checksum = "4cb68c231e2575f7503a7c19213875f9d4ec2e84e963a56ce3de4b6bee351ef7" | |||
|
1063 | dependencies = [ | |||
|
1064 | "hex", | |||
|
1065 | "rand 0.7.3", | |||
|
1066 | "sha-1", | |||
|
1067 | ] | |||
|
1068 | ||||
|
1069 | [[package]] | |||
998 | name = "vec_map" |
|
1070 | name = "vec_map" | |
999 | version = "0.8.2" |
|
1071 | version = "0.8.2" | |
1000 | source = "registry+https://github.com/rust-lang/crates.io-index" |
|
1072 | source = "registry+https://github.com/rust-lang/crates.io-index" |
@@ -18,9 +18,9 b' im-rc = "15.0.*"' | |||||
18 | itertools = "0.9" |
|
18 | itertools = "0.9" | |
19 | lazy_static = "1.4.0" |
|
19 | lazy_static = "1.4.0" | |
20 | libc = "0.2" |
|
20 | libc = "0.2" | |
21 |
rand = "0. |
|
21 | rand = "0.8.4" | |
22 |
rand_pcg = "0. |
|
22 | rand_pcg = "0.3.1" | |
23 |
rand_distr = "0. |
|
23 | rand_distr = "0.4.2" | |
24 | rayon = "1.3.0" |
|
24 | rayon = "1.3.0" | |
25 | regex = "1.3.9" |
|
25 | regex = "1.3.9" | |
26 | sha-1 = "0.9.6" |
|
26 | sha-1 = "0.9.6" | |
@@ -33,7 +33,7 b' micro-timer = "0.3.0"' | |||||
33 | log = "0.4.8" |
|
33 | log = "0.4.8" | |
34 | memmap2 = {version = "0.4", features = ["stable_deref_trait"]} |
|
34 | memmap2 = {version = "0.4", features = ["stable_deref_trait"]} | |
35 | zstd = "0.5.3" |
|
35 | zstd = "0.5.3" | |
36 |
format-bytes = "0. |
|
36 | format-bytes = "0.3.0" | |
37 |
|
37 | |||
38 | # We don't use the `miniz-oxide` backend to not change rhg benchmarks and until |
|
38 | # We don't use the `miniz-oxide` backend to not change rhg benchmarks and until | |
39 | # we have a clearer view of which backend is the fastest. |
|
39 | # we have a clearer view of which backend is the fastest. |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file renamed from tests/badserverext.py to tests/testlib/badserverext.py |
|
NO CONTENT: file renamed from tests/badserverext.py to tests/testlib/badserverext.py | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
This diff has been collapsed as it changes many lines, (804 lines changed) Show them Hide them |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
This diff has been collapsed as it changes many lines, (576 lines changed) Show them Hide them |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
This diff has been collapsed as it changes many lines, (1613 lines changed) Show them Hide them |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed | ||
The requested commit or file is too big and content was truncated. Show full diff |
General Comments 0
You need to be logged in to leave comments.
Login now