##// END OF EJS Templates
merge with default
Pulkit Goyal -
r49521:a44bb185 merge 6.0rc0 stable
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
@@ -1,193 +1,193 b''
1 #
1 #
2 # This file is autogenerated by pip-compile
2 # This file is autogenerated by pip-compile
3 # To update, run:
3 # To update, run:
4 #
4 #
5 # pip-compile --generate-hashes --output-file=contrib/automation/requirements.txt contrib/automation/requirements.txt.in
5 # pip-compile --generate-hashes --output-file=contrib/automation/requirements.txt contrib/automation/requirements.txt.in
6 #
6 #
7 asn1crypto==1.0.1 \
7 asn1crypto==1.0.1 \
8 --hash=sha256:0b199f211ae690df3db4fd6c1c4ff976497fb1da689193e368eedbadc53d9292 \
8 --hash=sha256:0b199f211ae690df3db4fd6c1c4ff976497fb1da689193e368eedbadc53d9292 \
9 --hash=sha256:bca90060bd995c3f62c4433168eab407e44bdbdb567b3f3a396a676c1a4c4a3f \
9 --hash=sha256:bca90060bd995c3f62c4433168eab407e44bdbdb567b3f3a396a676c1a4c4a3f \
10 # via cryptography
10 # via cryptography
11 bcrypt==3.1.7 \
11 bcrypt==3.1.7 \
12 --hash=sha256:0258f143f3de96b7c14f762c770f5fc56ccd72f8a1857a451c1cd9a655d9ac89 \
12 --hash=sha256:0258f143f3de96b7c14f762c770f5fc56ccd72f8a1857a451c1cd9a655d9ac89 \
13 --hash=sha256:0b0069c752ec14172c5f78208f1863d7ad6755a6fae6fe76ec2c80d13be41e42 \
13 --hash=sha256:0b0069c752ec14172c5f78208f1863d7ad6755a6fae6fe76ec2c80d13be41e42 \
14 --hash=sha256:19a4b72a6ae5bb467fea018b825f0a7d917789bcfe893e53f15c92805d187294 \
14 --hash=sha256:19a4b72a6ae5bb467fea018b825f0a7d917789bcfe893e53f15c92805d187294 \
15 --hash=sha256:5432dd7b34107ae8ed6c10a71b4397f1c853bd39a4d6ffa7e35f40584cffd161 \
15 --hash=sha256:5432dd7b34107ae8ed6c10a71b4397f1c853bd39a4d6ffa7e35f40584cffd161 \
16 --hash=sha256:69361315039878c0680be456640f8705d76cb4a3a3fe1e057e0f261b74be4b31 \
16 --hash=sha256:69361315039878c0680be456640f8705d76cb4a3a3fe1e057e0f261b74be4b31 \
17 --hash=sha256:6fe49a60b25b584e2f4ef175b29d3a83ba63b3a4df1b4c0605b826668d1b6be5 \
17 --hash=sha256:6fe49a60b25b584e2f4ef175b29d3a83ba63b3a4df1b4c0605b826668d1b6be5 \
18 --hash=sha256:74a015102e877d0ccd02cdeaa18b32aa7273746914a6c5d0456dd442cb65b99c \
18 --hash=sha256:74a015102e877d0ccd02cdeaa18b32aa7273746914a6c5d0456dd442cb65b99c \
19 --hash=sha256:763669a367869786bb4c8fcf731f4175775a5b43f070f50f46f0b59da45375d0 \
19 --hash=sha256:763669a367869786bb4c8fcf731f4175775a5b43f070f50f46f0b59da45375d0 \
20 --hash=sha256:8b10acde4e1919d6015e1df86d4c217d3b5b01bb7744c36113ea43d529e1c3de \
20 --hash=sha256:8b10acde4e1919d6015e1df86d4c217d3b5b01bb7744c36113ea43d529e1c3de \
21 --hash=sha256:9fe92406c857409b70a38729dbdf6578caf9228de0aef5bc44f859ffe971a39e \
21 --hash=sha256:9fe92406c857409b70a38729dbdf6578caf9228de0aef5bc44f859ffe971a39e \
22 --hash=sha256:a190f2a5dbbdbff4b74e3103cef44344bc30e61255beb27310e2aec407766052 \
22 --hash=sha256:a190f2a5dbbdbff4b74e3103cef44344bc30e61255beb27310e2aec407766052 \
23 --hash=sha256:a595c12c618119255c90deb4b046e1ca3bcfad64667c43d1166f2b04bc72db09 \
23 --hash=sha256:a595c12c618119255c90deb4b046e1ca3bcfad64667c43d1166f2b04bc72db09 \
24 --hash=sha256:c9457fa5c121e94a58d6505cadca8bed1c64444b83b3204928a866ca2e599105 \
24 --hash=sha256:c9457fa5c121e94a58d6505cadca8bed1c64444b83b3204928a866ca2e599105 \
25 --hash=sha256:cb93f6b2ab0f6853550b74e051d297c27a638719753eb9ff66d1e4072be67133 \
25 --hash=sha256:cb93f6b2ab0f6853550b74e051d297c27a638719753eb9ff66d1e4072be67133 \
26 --hash=sha256:d7bdc26475679dd073ba0ed2766445bb5b20ca4793ca0db32b399dccc6bc84b7 \
26 --hash=sha256:d7bdc26475679dd073ba0ed2766445bb5b20ca4793ca0db32b399dccc6bc84b7 \
27 --hash=sha256:ff032765bb8716d9387fd5376d987a937254b0619eff0972779515b5c98820bc \
27 --hash=sha256:ff032765bb8716d9387fd5376d987a937254b0619eff0972779515b5c98820bc \
28 # via paramiko
28 # via paramiko
29 bleach==3.1.0 \
29 bleach==3.1.0 \
30 --hash=sha256:213336e49e102af26d9cde77dd2d0397afabc5a6bf2fed985dc35b5d1e285a16 \
30 --hash=sha256:213336e49e102af26d9cde77dd2d0397afabc5a6bf2fed985dc35b5d1e285a16 \
31 --hash=sha256:3fdf7f77adcf649c9911387df51254b813185e32b2c6619f690b593a617e19fa \
31 --hash=sha256:3fdf7f77adcf649c9911387df51254b813185e32b2c6619f690b593a617e19fa \
32 # via readme-renderer
32 # via readme-renderer
33 boto3==1.9.243 \
33 boto3==1.9.243 \
34 --hash=sha256:404acbecef8f4912f18312fcfaffe7eba7f10b3b7adf7853bdba59cdf2275ebb \
34 --hash=sha256:404acbecef8f4912f18312fcfaffe7eba7f10b3b7adf7853bdba59cdf2275ebb \
35 --hash=sha256:c6e5a7e4548ce7586c354ff633f2a66ba3c471d15a8ae6a30f873122ab04e1cf
35 --hash=sha256:c6e5a7e4548ce7586c354ff633f2a66ba3c471d15a8ae6a30f873122ab04e1cf
36 botocore==1.12.243 \
36 botocore==1.12.243 \
37 --hash=sha256:397585a7881230274afb8d1877ef69a661b0a311745cd324f14a052fb2a2863a \
37 --hash=sha256:397585a7881230274afb8d1877ef69a661b0a311745cd324f14a052fb2a2863a \
38 --hash=sha256:4496f8da89cb496462a831897ad248e13e431d9fa7e41e06d426fd6658ab6e59 \
38 --hash=sha256:4496f8da89cb496462a831897ad248e13e431d9fa7e41e06d426fd6658ab6e59 \
39 # via boto3, s3transfer
39 # via boto3, s3transfer
40 certifi==2019.9.11 \
40 certifi==2021.5.30 \
41 --hash=sha256:e4f3620cfea4f83eedc95b24abd9cd56f3c4b146dd0177e83a21b4eb49e21e50 \
41 --hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee \
42 --hash=sha256:fd7c7c74727ddcf00e9acd26bba8da604ffec95bf1c2144e67aff7a8b50e6cef \
42 --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 \
43 # via requests
43 # via requests
44 cffi==1.12.3 \
44 cffi==1.12.3 \
45 --hash=sha256:041c81822e9f84b1d9c401182e174996f0bae9991f33725d059b771744290774 \
45 --hash=sha256:041c81822e9f84b1d9c401182e174996f0bae9991f33725d059b771744290774 \
46 --hash=sha256:046ef9a22f5d3eed06334d01b1e836977eeef500d9b78e9ef693f9380ad0b83d \
46 --hash=sha256:046ef9a22f5d3eed06334d01b1e836977eeef500d9b78e9ef693f9380ad0b83d \
47 --hash=sha256:066bc4c7895c91812eff46f4b1c285220947d4aa46fa0a2651ff85f2afae9c90 \
47 --hash=sha256:066bc4c7895c91812eff46f4b1c285220947d4aa46fa0a2651ff85f2afae9c90 \
48 --hash=sha256:066c7ff148ae33040c01058662d6752fd73fbc8e64787229ea8498c7d7f4041b \
48 --hash=sha256:066c7ff148ae33040c01058662d6752fd73fbc8e64787229ea8498c7d7f4041b \
49 --hash=sha256:2444d0c61f03dcd26dbf7600cf64354376ee579acad77aef459e34efcb438c63 \
49 --hash=sha256:2444d0c61f03dcd26dbf7600cf64354376ee579acad77aef459e34efcb438c63 \
50 --hash=sha256:300832850b8f7967e278870c5d51e3819b9aad8f0a2c8dbe39ab11f119237f45 \
50 --hash=sha256:300832850b8f7967e278870c5d51e3819b9aad8f0a2c8dbe39ab11f119237f45 \
51 --hash=sha256:34c77afe85b6b9e967bd8154e3855e847b70ca42043db6ad17f26899a3df1b25 \
51 --hash=sha256:34c77afe85b6b9e967bd8154e3855e847b70ca42043db6ad17f26899a3df1b25 \
52 --hash=sha256:46de5fa00f7ac09f020729148ff632819649b3e05a007d286242c4882f7b1dc3 \
52 --hash=sha256:46de5fa00f7ac09f020729148ff632819649b3e05a007d286242c4882f7b1dc3 \
53 --hash=sha256:4aa8ee7ba27c472d429b980c51e714a24f47ca296d53f4d7868075b175866f4b \
53 --hash=sha256:4aa8ee7ba27c472d429b980c51e714a24f47ca296d53f4d7868075b175866f4b \
54 --hash=sha256:4d0004eb4351e35ed950c14c11e734182591465a33e960a4ab5e8d4f04d72647 \
54 --hash=sha256:4d0004eb4351e35ed950c14c11e734182591465a33e960a4ab5e8d4f04d72647 \
55 --hash=sha256:4e3d3f31a1e202b0f5a35ba3bc4eb41e2fc2b11c1eff38b362de710bcffb5016 \
55 --hash=sha256:4e3d3f31a1e202b0f5a35ba3bc4eb41e2fc2b11c1eff38b362de710bcffb5016 \
56 --hash=sha256:50bec6d35e6b1aaeb17f7c4e2b9374ebf95a8975d57863546fa83e8d31bdb8c4 \
56 --hash=sha256:50bec6d35e6b1aaeb17f7c4e2b9374ebf95a8975d57863546fa83e8d31bdb8c4 \
57 --hash=sha256:55cad9a6df1e2a1d62063f79d0881a414a906a6962bc160ac968cc03ed3efcfb \
57 --hash=sha256:55cad9a6df1e2a1d62063f79d0881a414a906a6962bc160ac968cc03ed3efcfb \
58 --hash=sha256:5662ad4e4e84f1eaa8efce5da695c5d2e229c563f9d5ce5b0113f71321bcf753 \
58 --hash=sha256:5662ad4e4e84f1eaa8efce5da695c5d2e229c563f9d5ce5b0113f71321bcf753 \
59 --hash=sha256:59b4dc008f98fc6ee2bb4fd7fc786a8d70000d058c2bbe2698275bc53a8d3fa7 \
59 --hash=sha256:59b4dc008f98fc6ee2bb4fd7fc786a8d70000d058c2bbe2698275bc53a8d3fa7 \
60 --hash=sha256:73e1ffefe05e4ccd7bcea61af76f36077b914f92b76f95ccf00b0c1b9186f3f9 \
60 --hash=sha256:73e1ffefe05e4ccd7bcea61af76f36077b914f92b76f95ccf00b0c1b9186f3f9 \
61 --hash=sha256:a1f0fd46eba2d71ce1589f7e50a9e2ffaeb739fb2c11e8192aa2b45d5f6cc41f \
61 --hash=sha256:a1f0fd46eba2d71ce1589f7e50a9e2ffaeb739fb2c11e8192aa2b45d5f6cc41f \
62 --hash=sha256:a2e85dc204556657661051ff4bab75a84e968669765c8a2cd425918699c3d0e8 \
62 --hash=sha256:a2e85dc204556657661051ff4bab75a84e968669765c8a2cd425918699c3d0e8 \
63 --hash=sha256:a5457d47dfff24882a21492e5815f891c0ca35fefae8aa742c6c263dac16ef1f \
63 --hash=sha256:a5457d47dfff24882a21492e5815f891c0ca35fefae8aa742c6c263dac16ef1f \
64 --hash=sha256:a8dccd61d52a8dae4a825cdbb7735da530179fea472903eb871a5513b5abbfdc \
64 --hash=sha256:a8dccd61d52a8dae4a825cdbb7735da530179fea472903eb871a5513b5abbfdc \
65 --hash=sha256:ae61af521ed676cf16ae94f30fe202781a38d7178b6b4ab622e4eec8cefaff42 \
65 --hash=sha256:ae61af521ed676cf16ae94f30fe202781a38d7178b6b4ab622e4eec8cefaff42 \
66 --hash=sha256:b012a5edb48288f77a63dba0840c92d0504aa215612da4541b7b42d849bc83a3 \
66 --hash=sha256:b012a5edb48288f77a63dba0840c92d0504aa215612da4541b7b42d849bc83a3 \
67 --hash=sha256:d2c5cfa536227f57f97c92ac30c8109688ace8fa4ac086d19d0af47d134e2909 \
67 --hash=sha256:d2c5cfa536227f57f97c92ac30c8109688ace8fa4ac086d19d0af47d134e2909 \
68 --hash=sha256:d42b5796e20aacc9d15e66befb7a345454eef794fdb0737d1af593447c6c8f45 \
68 --hash=sha256:d42b5796e20aacc9d15e66befb7a345454eef794fdb0737d1af593447c6c8f45 \
69 --hash=sha256:dee54f5d30d775f525894d67b1495625dd9322945e7fee00731952e0368ff42d \
69 --hash=sha256:dee54f5d30d775f525894d67b1495625dd9322945e7fee00731952e0368ff42d \
70 --hash=sha256:e070535507bd6aa07124258171be2ee8dfc19119c28ca94c9dfb7efd23564512 \
70 --hash=sha256:e070535507bd6aa07124258171be2ee8dfc19119c28ca94c9dfb7efd23564512 \
71 --hash=sha256:e1ff2748c84d97b065cc95429814cdba39bcbd77c9c85c89344b317dc0d9cbff \
71 --hash=sha256:e1ff2748c84d97b065cc95429814cdba39bcbd77c9c85c89344b317dc0d9cbff \
72 --hash=sha256:ed851c75d1e0e043cbf5ca9a8e1b13c4c90f3fbd863dacb01c0808e2b5204201 \
72 --hash=sha256:ed851c75d1e0e043cbf5ca9a8e1b13c4c90f3fbd863dacb01c0808e2b5204201 \
73 # via bcrypt, cryptography, pynacl
73 # via bcrypt, cryptography, pynacl
74 chardet==3.0.4 \
74 chardet==3.0.4 \
75 --hash=sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae \
75 --hash=sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae \
76 --hash=sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691 \
76 --hash=sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691 \
77 # via requests
77 # via requests
78 cryptography==2.7 \
78 cryptography==2.7 \
79 --hash=sha256:24b61e5fcb506424d3ec4e18bca995833839bf13c59fc43e530e488f28d46b8c \
79 --hash=sha256:24b61e5fcb506424d3ec4e18bca995833839bf13c59fc43e530e488f28d46b8c \
80 --hash=sha256:25dd1581a183e9e7a806fe0543f485103232f940fcfc301db65e630512cce643 \
80 --hash=sha256:25dd1581a183e9e7a806fe0543f485103232f940fcfc301db65e630512cce643 \
81 --hash=sha256:3452bba7c21c69f2df772762be0066c7ed5dc65df494a1d53a58b683a83e1216 \
81 --hash=sha256:3452bba7c21c69f2df772762be0066c7ed5dc65df494a1d53a58b683a83e1216 \
82 --hash=sha256:41a0be220dd1ed9e998f5891948306eb8c812b512dc398e5a01846d855050799 \
82 --hash=sha256:41a0be220dd1ed9e998f5891948306eb8c812b512dc398e5a01846d855050799 \
83 --hash=sha256:5751d8a11b956fbfa314f6553d186b94aa70fdb03d8a4d4f1c82dcacf0cbe28a \
83 --hash=sha256:5751d8a11b956fbfa314f6553d186b94aa70fdb03d8a4d4f1c82dcacf0cbe28a \
84 --hash=sha256:5f61c7d749048fa6e3322258b4263463bfccefecb0dd731b6561cb617a1d9bb9 \
84 --hash=sha256:5f61c7d749048fa6e3322258b4263463bfccefecb0dd731b6561cb617a1d9bb9 \
85 --hash=sha256:72e24c521fa2106f19623a3851e9f89ddfdeb9ac63871c7643790f872a305dfc \
85 --hash=sha256:72e24c521fa2106f19623a3851e9f89ddfdeb9ac63871c7643790f872a305dfc \
86 --hash=sha256:7b97ae6ef5cba2e3bb14256625423413d5ce8d1abb91d4f29b6d1a081da765f8 \
86 --hash=sha256:7b97ae6ef5cba2e3bb14256625423413d5ce8d1abb91d4f29b6d1a081da765f8 \
87 --hash=sha256:961e886d8a3590fd2c723cf07be14e2a91cf53c25f02435c04d39e90780e3b53 \
87 --hash=sha256:961e886d8a3590fd2c723cf07be14e2a91cf53c25f02435c04d39e90780e3b53 \
88 --hash=sha256:96d8473848e984184b6728e2c9d391482008646276c3ff084a1bd89e15ff53a1 \
88 --hash=sha256:96d8473848e984184b6728e2c9d391482008646276c3ff084a1bd89e15ff53a1 \
89 --hash=sha256:ae536da50c7ad1e002c3eee101871d93abdc90d9c5f651818450a0d3af718609 \
89 --hash=sha256:ae536da50c7ad1e002c3eee101871d93abdc90d9c5f651818450a0d3af718609 \
90 --hash=sha256:b0db0cecf396033abb4a93c95d1602f268b3a68bb0a9cc06a7cff587bb9a7292 \
90 --hash=sha256:b0db0cecf396033abb4a93c95d1602f268b3a68bb0a9cc06a7cff587bb9a7292 \
91 --hash=sha256:cfee9164954c186b191b91d4193989ca994703b2fff406f71cf454a2d3c7327e \
91 --hash=sha256:cfee9164954c186b191b91d4193989ca994703b2fff406f71cf454a2d3c7327e \
92 --hash=sha256:e6347742ac8f35ded4a46ff835c60e68c22a536a8ae5c4422966d06946b6d4c6 \
92 --hash=sha256:e6347742ac8f35ded4a46ff835c60e68c22a536a8ae5c4422966d06946b6d4c6 \
93 --hash=sha256:f27d93f0139a3c056172ebb5d4f9056e770fdf0206c2f422ff2ebbad142e09ed \
93 --hash=sha256:f27d93f0139a3c056172ebb5d4f9056e770fdf0206c2f422ff2ebbad142e09ed \
94 --hash=sha256:f57b76e46a58b63d1c6375017f4564a28f19a5ca912691fd2e4261b3414b618d \
94 --hash=sha256:f57b76e46a58b63d1c6375017f4564a28f19a5ca912691fd2e4261b3414b618d \
95 # via paramiko, pypsrp
95 # via paramiko, pypsrp
96 docutils==0.15.2 \
96 docutils==0.15.2 \
97 --hash=sha256:6c4f696463b79f1fb8ba0c594b63840ebd41f059e92b31957c46b74a4599b6d0 \
97 --hash=sha256:6c4f696463b79f1fb8ba0c594b63840ebd41f059e92b31957c46b74a4599b6d0 \
98 --hash=sha256:9e4d7ecfc600058e07ba661411a2b7de2fd0fafa17d1a7f7361cd47b1175c827 \
98 --hash=sha256:9e4d7ecfc600058e07ba661411a2b7de2fd0fafa17d1a7f7361cd47b1175c827 \
99 --hash=sha256:a2aeea129088da402665e92e0b25b04b073c04b2dce4ab65caaa38b7ce2e1a99 \
99 --hash=sha256:a2aeea129088da402665e92e0b25b04b073c04b2dce4ab65caaa38b7ce2e1a99 \
100 # via botocore, readme-renderer
100 # via botocore, readme-renderer
101 idna==2.8 \
101 idna==2.8 \
102 --hash=sha256:c357b3f628cf53ae2c4c05627ecc484553142ca23264e593d327bcde5e9c3407 \
102 --hash=sha256:c357b3f628cf53ae2c4c05627ecc484553142ca23264e593d327bcde5e9c3407 \
103 --hash=sha256:ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432f7e4a3c \
103 --hash=sha256:ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432f7e4a3c \
104 # via requests
104 # via requests
105 jmespath==0.9.4 \
105 jmespath==0.9.4 \
106 --hash=sha256:3720a4b1bd659dd2eecad0666459b9788813e032b83e7ba58578e48254e0a0e6 \
106 --hash=sha256:3720a4b1bd659dd2eecad0666459b9788813e032b83e7ba58578e48254e0a0e6 \
107 --hash=sha256:bde2aef6f44302dfb30320115b17d030798de8c4110e28d5cf6cf91a7a31074c \
107 --hash=sha256:bde2aef6f44302dfb30320115b17d030798de8c4110e28d5cf6cf91a7a31074c \
108 # via boto3, botocore
108 # via boto3, botocore
109 ntlm-auth==1.4.0 \
109 ntlm-auth==1.4.0 \
110 --hash=sha256:11f7a3cec38155b7cecdd9bbc8c37cd738d8012f0523b3f98d8caefe394feb97 \
110 --hash=sha256:11f7a3cec38155b7cecdd9bbc8c37cd738d8012f0523b3f98d8caefe394feb97 \
111 --hash=sha256:350f2389c8ee5517f47db55a36ac2f8efc9742a60a678d6e2caa92385bdcaa9a \
111 --hash=sha256:350f2389c8ee5517f47db55a36ac2f8efc9742a60a678d6e2caa92385bdcaa9a \
112 # via pypsrp
112 # via pypsrp
113 paramiko==2.6.0 \
113 paramiko==2.6.0 \
114 --hash=sha256:99f0179bdc176281d21961a003ffdb2ec369daac1a1007241f53374e376576cf \
114 --hash=sha256:99f0179bdc176281d21961a003ffdb2ec369daac1a1007241f53374e376576cf \
115 --hash=sha256:f4b2edfa0d226b70bd4ca31ea7e389325990283da23465d572ed1f70a7583041
115 --hash=sha256:f4b2edfa0d226b70bd4ca31ea7e389325990283da23465d572ed1f70a7583041
116 pkginfo==1.5.0.1 \
116 pkginfo==1.5.0.1 \
117 --hash=sha256:7424f2c8511c186cd5424bbf31045b77435b37a8d604990b79d4e70d741148bb \
117 --hash=sha256:7424f2c8511c186cd5424bbf31045b77435b37a8d604990b79d4e70d741148bb \
118 --hash=sha256:a6d9e40ca61ad3ebd0b72fbadd4fba16e4c0e4df0428c041e01e06eb6ee71f32 \
118 --hash=sha256:a6d9e40ca61ad3ebd0b72fbadd4fba16e4c0e4df0428c041e01e06eb6ee71f32 \
119 # via twine
119 # via twine
120 pycparser==2.19 \
120 pycparser==2.19 \
121 --hash=sha256:a988718abfad80b6b157acce7bf130a30876d27603738ac39f140993246b25b3 \
121 --hash=sha256:a988718abfad80b6b157acce7bf130a30876d27603738ac39f140993246b25b3 \
122 # via cffi
122 # via cffi
123 pygments==2.4.2 \
123 pygments==2.4.2 \
124 --hash=sha256:71e430bc85c88a430f000ac1d9b331d2407f681d6f6aec95e8bcfbc3df5b0127 \
124 --hash=sha256:71e430bc85c88a430f000ac1d9b331d2407f681d6f6aec95e8bcfbc3df5b0127 \
125 --hash=sha256:881c4c157e45f30af185c1ffe8d549d48ac9127433f2c380c24b84572ad66297 \
125 --hash=sha256:881c4c157e45f30af185c1ffe8d549d48ac9127433f2c380c24b84572ad66297 \
126 # via readme-renderer
126 # via readme-renderer
127 pynacl==1.3.0 \
127 pynacl==1.3.0 \
128 --hash=sha256:05c26f93964373fc0abe332676cb6735f0ecad27711035b9472751faa8521255 \
128 --hash=sha256:05c26f93964373fc0abe332676cb6735f0ecad27711035b9472751faa8521255 \
129 --hash=sha256:0c6100edd16fefd1557da078c7a31e7b7d7a52ce39fdca2bec29d4f7b6e7600c \
129 --hash=sha256:0c6100edd16fefd1557da078c7a31e7b7d7a52ce39fdca2bec29d4f7b6e7600c \
130 --hash=sha256:0d0a8171a68edf51add1e73d2159c4bc19fc0718e79dec51166e940856c2f28e \
130 --hash=sha256:0d0a8171a68edf51add1e73d2159c4bc19fc0718e79dec51166e940856c2f28e \
131 --hash=sha256:1c780712b206317a746ace34c209b8c29dbfd841dfbc02aa27f2084dd3db77ae \
131 --hash=sha256:1c780712b206317a746ace34c209b8c29dbfd841dfbc02aa27f2084dd3db77ae \
132 --hash=sha256:2424c8b9f41aa65bbdbd7a64e73a7450ebb4aa9ddedc6a081e7afcc4c97f7621 \
132 --hash=sha256:2424c8b9f41aa65bbdbd7a64e73a7450ebb4aa9ddedc6a081e7afcc4c97f7621 \
133 --hash=sha256:2d23c04e8d709444220557ae48ed01f3f1086439f12dbf11976e849a4926db56 \
133 --hash=sha256:2d23c04e8d709444220557ae48ed01f3f1086439f12dbf11976e849a4926db56 \
134 --hash=sha256:30f36a9c70450c7878053fa1344aca0145fd47d845270b43a7ee9192a051bf39 \
134 --hash=sha256:30f36a9c70450c7878053fa1344aca0145fd47d845270b43a7ee9192a051bf39 \
135 --hash=sha256:37aa336a317209f1bb099ad177fef0da45be36a2aa664507c5d72015f956c310 \
135 --hash=sha256:37aa336a317209f1bb099ad177fef0da45be36a2aa664507c5d72015f956c310 \
136 --hash=sha256:4943decfc5b905748f0756fdd99d4f9498d7064815c4cf3643820c9028b711d1 \
136 --hash=sha256:4943decfc5b905748f0756fdd99d4f9498d7064815c4cf3643820c9028b711d1 \
137 --hash=sha256:57ef38a65056e7800859e5ba9e6091053cd06e1038983016effaffe0efcd594a \
137 --hash=sha256:57ef38a65056e7800859e5ba9e6091053cd06e1038983016effaffe0efcd594a \
138 --hash=sha256:5bd61e9b44c543016ce1f6aef48606280e45f892a928ca7068fba30021e9b786 \
138 --hash=sha256:5bd61e9b44c543016ce1f6aef48606280e45f892a928ca7068fba30021e9b786 \
139 --hash=sha256:6482d3017a0c0327a49dddc8bd1074cc730d45db2ccb09c3bac1f8f32d1eb61b \
139 --hash=sha256:6482d3017a0c0327a49dddc8bd1074cc730d45db2ccb09c3bac1f8f32d1eb61b \
140 --hash=sha256:7d3ce02c0784b7cbcc771a2da6ea51f87e8716004512493a2b69016326301c3b \
140 --hash=sha256:7d3ce02c0784b7cbcc771a2da6ea51f87e8716004512493a2b69016326301c3b \
141 --hash=sha256:a14e499c0f5955dcc3991f785f3f8e2130ed504fa3a7f44009ff458ad6bdd17f \
141 --hash=sha256:a14e499c0f5955dcc3991f785f3f8e2130ed504fa3a7f44009ff458ad6bdd17f \
142 --hash=sha256:a39f54ccbcd2757d1d63b0ec00a00980c0b382c62865b61a505163943624ab20 \
142 --hash=sha256:a39f54ccbcd2757d1d63b0ec00a00980c0b382c62865b61a505163943624ab20 \
143 --hash=sha256:aabb0c5232910a20eec8563503c153a8e78bbf5459490c49ab31f6adf3f3a415 \
143 --hash=sha256:aabb0c5232910a20eec8563503c153a8e78bbf5459490c49ab31f6adf3f3a415 \
144 --hash=sha256:bd4ecb473a96ad0f90c20acba4f0bf0df91a4e03a1f4dd6a4bdc9ca75aa3a715 \
144 --hash=sha256:bd4ecb473a96ad0f90c20acba4f0bf0df91a4e03a1f4dd6a4bdc9ca75aa3a715 \
145 --hash=sha256:e2da3c13307eac601f3de04887624939aca8ee3c9488a0bb0eca4fb9401fc6b1 \
145 --hash=sha256:e2da3c13307eac601f3de04887624939aca8ee3c9488a0bb0eca4fb9401fc6b1 \
146 --hash=sha256:f67814c38162f4deb31f68d590771a29d5ae3b1bd64b75cf232308e5c74777e0 \
146 --hash=sha256:f67814c38162f4deb31f68d590771a29d5ae3b1bd64b75cf232308e5c74777e0 \
147 # via paramiko
147 # via paramiko
148 pypsrp==0.4.0 \
148 pypsrp==0.4.0 \
149 --hash=sha256:64b5bdd725a9744c821483b05ecd266f6417f4c6e90ee961a08838480f7d025e \
149 --hash=sha256:64b5bdd725a9744c821483b05ecd266f6417f4c6e90ee961a08838480f7d025e \
150 --hash=sha256:f42919247fb80f7dc24c552560d7c24e754d15326030c9e3b7b94f51cfa4dc69
150 --hash=sha256:f42919247fb80f7dc24c552560d7c24e754d15326030c9e3b7b94f51cfa4dc69
151 python-dateutil==2.8.0 \
151 python-dateutil==2.8.0 \
152 --hash=sha256:7e6584c74aeed623791615e26efd690f29817a27c73085b78e4bad02493df2fb \
152 --hash=sha256:7e6584c74aeed623791615e26efd690f29817a27c73085b78e4bad02493df2fb \
153 --hash=sha256:c89805f6f4d64db21ed966fda138f8a5ed7a4fdbc1a8ee329ce1b74e3c74da9e \
153 --hash=sha256:c89805f6f4d64db21ed966fda138f8a5ed7a4fdbc1a8ee329ce1b74e3c74da9e \
154 # via botocore
154 # via botocore
155 readme-renderer==24.0 \
155 readme-renderer==24.0 \
156 --hash=sha256:bb16f55b259f27f75f640acf5e00cf897845a8b3e4731b5c1a436e4b8529202f \
156 --hash=sha256:bb16f55b259f27f75f640acf5e00cf897845a8b3e4731b5c1a436e4b8529202f \
157 --hash=sha256:c8532b79afc0375a85f10433eca157d6b50f7d6990f337fa498c96cd4bfc203d \
157 --hash=sha256:c8532b79afc0375a85f10433eca157d6b50f7d6990f337fa498c96cd4bfc203d \
158 # via twine
158 # via twine
159 requests-toolbelt==0.9.1 \
159 requests-toolbelt==0.9.1 \
160 --hash=sha256:380606e1d10dc85c3bd47bf5a6095f815ec007be7a8b69c878507068df059e6f \
160 --hash=sha256:380606e1d10dc85c3bd47bf5a6095f815ec007be7a8b69c878507068df059e6f \
161 --hash=sha256:968089d4584ad4ad7c171454f0a5c6dac23971e9472521ea3b6d49d610aa6fc0 \
161 --hash=sha256:968089d4584ad4ad7c171454f0a5c6dac23971e9472521ea3b6d49d610aa6fc0 \
162 # via twine
162 # via twine
163 requests==2.22.0 \
163 requests==2.22.0 \
164 --hash=sha256:11e007a8a2aa0323f5a921e9e6a2d7e4e67d9877e85773fba9ba6419025cbeb4 \
164 --hash=sha256:11e007a8a2aa0323f5a921e9e6a2d7e4e67d9877e85773fba9ba6419025cbeb4 \
165 --hash=sha256:9cf5292fcd0f598c671cfc1e0d7d1a7f13bb8085e9a590f48c010551dc6c4b31 \
165 --hash=sha256:9cf5292fcd0f598c671cfc1e0d7d1a7f13bb8085e9a590f48c010551dc6c4b31 \
166 # via pypsrp, requests-toolbelt, twine
166 # via pypsrp, requests-toolbelt, twine
167 s3transfer==0.2.1 \
167 s3transfer==0.2.1 \
168 --hash=sha256:6efc926738a3cd576c2a79725fed9afde92378aa5c6a957e3af010cb019fac9d \
168 --hash=sha256:6efc926738a3cd576c2a79725fed9afde92378aa5c6a957e3af010cb019fac9d \
169 --hash=sha256:b780f2411b824cb541dbcd2c713d0cb61c7d1bcadae204cdddda2b35cef493ba \
169 --hash=sha256:b780f2411b824cb541dbcd2c713d0cb61c7d1bcadae204cdddda2b35cef493ba \
170 # via boto3
170 # via boto3
171 six==1.12.0 \
171 six==1.12.0 \
172 --hash=sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c \
172 --hash=sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c \
173 --hash=sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73 \
173 --hash=sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73 \
174 # via bcrypt, bleach, cryptography, pynacl, pypsrp, python-dateutil, readme-renderer
174 # via bcrypt, bleach, cryptography, pynacl, pypsrp, python-dateutil, readme-renderer
175 tqdm==4.36.1 \
175 tqdm==4.36.1 \
176 --hash=sha256:abc25d0ce2397d070ef07d8c7e706aede7920da163c64997585d42d3537ece3d \
176 --hash=sha256:abc25d0ce2397d070ef07d8c7e706aede7920da163c64997585d42d3537ece3d \
177 --hash=sha256:dd3fcca8488bb1d416aa7469d2f277902f26260c45aa86b667b074cd44b3b115 \
177 --hash=sha256:dd3fcca8488bb1d416aa7469d2f277902f26260c45aa86b667b074cd44b3b115 \
178 # via twine
178 # via twine
179 twine==2.0.0 \
179 twine==2.0.0 \
180 --hash=sha256:5319dd3e02ac73fcddcd94f035b9631589ab5d23e1f4699d57365199d85261e1 \
180 --hash=sha256:5319dd3e02ac73fcddcd94f035b9631589ab5d23e1f4699d57365199d85261e1 \
181 --hash=sha256:9fe7091715c7576df166df8ef6654e61bada39571783f2fd415bdcba867c6993
181 --hash=sha256:9fe7091715c7576df166df8ef6654e61bada39571783f2fd415bdcba867c6993
182 urllib3==1.25.6 \
182 urllib3==1.25.6 \
183 --hash=sha256:3de946ffbed6e6746608990594d08faac602528ac7015ac28d33cee6a45b7398 \
183 --hash=sha256:3de946ffbed6e6746608990594d08faac602528ac7015ac28d33cee6a45b7398 \
184 --hash=sha256:9a107b99a5393caf59c7aa3c1249c16e6879447533d0887f4336dde834c7be86 \
184 --hash=sha256:9a107b99a5393caf59c7aa3c1249c16e6879447533d0887f4336dde834c7be86 \
185 # via botocore, requests
185 # via botocore, requests
186 webencodings==0.5.1 \
186 webencodings==0.5.1 \
187 --hash=sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78 \
187 --hash=sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78 \
188 --hash=sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923 \
188 --hash=sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923 \
189 # via bleach
189 # via bleach
190
190
191 # WARNING: The following packages were not pinned, but pip requires them to be
191 # WARNING: The following packages were not pinned, but pip requires them to be
192 # pinned when the requirements file includes hashes. Consider using the --allow-unsafe flag.
192 # pinned when the requirements file includes hashes. Consider using the --allow-unsafe flag.
193 # setuptools==41.2.0 # via twine
193 # setuptools==41.2.0 # via twine
@@ -1,59 +1,59 b''
1 #
1 #
2 # This file is autogenerated by pip-compile
2 # This file is autogenerated by pip-compile
3 # To update, run:
3 # To update, run:
4 #
4 #
5 # pip-compile --generate-hashes --output-file=contrib/packaging/requirements-windows-py2.txt contrib/packaging/requirements-windows.txt.in
5 # pip-compile --generate-hashes --output-file=contrib/packaging/requirements-windows-py2.txt contrib/packaging/requirements-windows.txt.in
6 #
6 #
7 certifi==2020.6.20 \
7 certifi==2021.5.30 \
8 --hash=sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3 \
8 --hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee \
9 --hash=sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41 \
9 --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 \
10 # via dulwich
10 # via dulwich
11 configparser==4.0.2 \
11 configparser==4.0.2 \
12 --hash=sha256:254c1d9c79f60c45dfde850850883d5aaa7f19a23f13561243a050d5a7c3fe4c \
12 --hash=sha256:254c1d9c79f60c45dfde850850883d5aaa7f19a23f13561243a050d5a7c3fe4c \
13 --hash=sha256:c7d282687a5308319bf3d2e7706e575c635b0a470342641c93bea0ea3b5331df \
13 --hash=sha256:c7d282687a5308319bf3d2e7706e575c635b0a470342641c93bea0ea3b5331df \
14 # via entrypoints
14 # via entrypoints
15 docutils==0.16 \
15 docutils==0.16 \
16 --hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \
16 --hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \
17 --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc \
17 --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc \
18 # via -r contrib/packaging/requirements-windows.txt.in
18 # via -r contrib/packaging/requirements-windows.txt.in
19 dulwich==0.19.16 ; python_version <= "2.7" \
19 dulwich==0.19.16 ; python_version <= "2.7" \
20 --hash=sha256:10699277c6268d0c16febe141a5b1c1a6e9744f3144c2d2de1706f4b1adafe63 \
20 --hash=sha256:10699277c6268d0c16febe141a5b1c1a6e9744f3144c2d2de1706f4b1adafe63 \
21 --hash=sha256:267160904e9a1cb6c248c5efc53597a35d038ecc6f60bdc4546b3053bed11982 \
21 --hash=sha256:267160904e9a1cb6c248c5efc53597a35d038ecc6f60bdc4546b3053bed11982 \
22 --hash=sha256:4e3aba5e4844e7c700721c1fc696987ea820ee3528a03604dc4e74eff4196826 \
22 --hash=sha256:4e3aba5e4844e7c700721c1fc696987ea820ee3528a03604dc4e74eff4196826 \
23 --hash=sha256:60bb2c2c92f5025c1b53a556304008f0f624c98ae36f22d870e056b2d4236c11 \
23 --hash=sha256:60bb2c2c92f5025c1b53a556304008f0f624c98ae36f22d870e056b2d4236c11 \
24 --hash=sha256:dddae02d372fc3b5cfb0046d0f62246ef281fa0c088df7601ab5916607add94b \
24 --hash=sha256:dddae02d372fc3b5cfb0046d0f62246ef281fa0c088df7601ab5916607add94b \
25 --hash=sha256:f00d132082b8fcc2eb0d722abc773d4aeb5558c1475d7edd1f0f571146c29db9 \
25 --hash=sha256:f00d132082b8fcc2eb0d722abc773d4aeb5558c1475d7edd1f0f571146c29db9 \
26 --hash=sha256:f74561c448bfb6f04c07de731c1181ae4280017f759b0bb04fa5770aa84ca850 \
26 --hash=sha256:f74561c448bfb6f04c07de731c1181ae4280017f759b0bb04fa5770aa84ca850 \
27 # via -r contrib/packaging/requirements-windows.txt.in
27 # via -r contrib/packaging/requirements-windows.txt.in
28 entrypoints==0.3 \
28 entrypoints==0.3 \
29 --hash=sha256:589f874b313739ad35be6e0cd7efde2a4e9b6fea91edcc34e58ecbb8dbe56d19 \
29 --hash=sha256:589f874b313739ad35be6e0cd7efde2a4e9b6fea91edcc34e58ecbb8dbe56d19 \
30 --hash=sha256:c70dd71abe5a8c85e55e12c19bd91ccfeec11a6e99044204511f9ed547d48451 \
30 --hash=sha256:c70dd71abe5a8c85e55e12c19bd91ccfeec11a6e99044204511f9ed547d48451 \
31 # via keyring
31 # via keyring
32 keyring==18.0.1 \
32 keyring==18.0.1 \
33 --hash=sha256:67d6cc0132bd77922725fae9f18366bb314fd8f95ff4d323a4df41890a96a838 \
33 --hash=sha256:67d6cc0132bd77922725fae9f18366bb314fd8f95ff4d323a4df41890a96a838 \
34 --hash=sha256:7b29ebfcf8678c4da531b2478a912eea01e80007e5ddca9ee0c7038cb3489ec6 \
34 --hash=sha256:7b29ebfcf8678c4da531b2478a912eea01e80007e5ddca9ee0c7038cb3489ec6 \
35 # via -r contrib/packaging/requirements-windows.txt.in
35 # via -r contrib/packaging/requirements-windows.txt.in
36 pygments==2.5.2 \
36 pygments==2.5.2 \
37 --hash=sha256:2a3fe295e54a20164a9df49c75fa58526d3be48e14aceba6d6b1e8ac0bfd6f1b \
37 --hash=sha256:2a3fe295e54a20164a9df49c75fa58526d3be48e14aceba6d6b1e8ac0bfd6f1b \
38 --hash=sha256:98c8aa5a9f778fcd1026a17361ddaf7330d1b7c62ae97c3bb0ae73e0b9b6b0fe \
38 --hash=sha256:98c8aa5a9f778fcd1026a17361ddaf7330d1b7c62ae97c3bb0ae73e0b9b6b0fe \
39 # via -r contrib/packaging/requirements-windows.txt.in
39 # via -r contrib/packaging/requirements-windows.txt.in
40 pywin32-ctypes==0.2.0 \
40 pywin32-ctypes==0.2.0 \
41 --hash=sha256:24ffc3b341d457d48e8922352130cf2644024a4ff09762a2261fd34c36ee5942 \
41 --hash=sha256:24ffc3b341d457d48e8922352130cf2644024a4ff09762a2261fd34c36ee5942 \
42 --hash=sha256:9dc2d991b3479cc2df15930958b674a48a227d5361d413827a4cfd0b5876fc98 \
42 --hash=sha256:9dc2d991b3479cc2df15930958b674a48a227d5361d413827a4cfd0b5876fc98 \
43 # via -r contrib/packaging/requirements-windows.txt.in, keyring
43 # via -r contrib/packaging/requirements-windows.txt.in, keyring
44 urllib3==1.25.11 \
44 urllib3==1.25.11 \
45 --hash=sha256:8d7eaa5a82a1cac232164990f04874c594c9453ec55eef02eab885aa02fc17a2 \
45 --hash=sha256:8d7eaa5a82a1cac232164990f04874c594c9453ec55eef02eab885aa02fc17a2 \
46 --hash=sha256:f5321fbe4bf3fefa0efd0bfe7fb14e90909eb62a48ccda331726b4319897dd5e \
46 --hash=sha256:f5321fbe4bf3fefa0efd0bfe7fb14e90909eb62a48ccda331726b4319897dd5e \
47 # via dulwich
47 # via dulwich
48 windows-curses==2.1.0 \
48 windows-curses==2.1.0 \
49 --hash=sha256:261fde5680d1ce4ce116908996b9a3cfb0ffb03ea68d42240f62b56a9fa6af2c \
49 --hash=sha256:261fde5680d1ce4ce116908996b9a3cfb0ffb03ea68d42240f62b56a9fa6af2c \
50 --hash=sha256:66034dc9a705d87308cc9ea90836f4ee60008a1d5e2c1d34ace627f60268158b \
50 --hash=sha256:66034dc9a705d87308cc9ea90836f4ee60008a1d5e2c1d34ace627f60268158b \
51 --hash=sha256:669caad3ae16faf2d201d7ab3b8af418a2fd074d8a39d60ca26f3acb34b6afe5 \
51 --hash=sha256:669caad3ae16faf2d201d7ab3b8af418a2fd074d8a39d60ca26f3acb34b6afe5 \
52 --hash=sha256:73bd3eebccfda55330783f165151de115bfa238d1332f0b2e224b550d6187840 \
52 --hash=sha256:73bd3eebccfda55330783f165151de115bfa238d1332f0b2e224b550d6187840 \
53 --hash=sha256:89a6d973f88cfe49b41ea80164dcbec209d296e0cec34a02002578b0bf464a64 \
53 --hash=sha256:89a6d973f88cfe49b41ea80164dcbec209d296e0cec34a02002578b0bf464a64 \
54 --hash=sha256:8ba7c000d7ffa5452bbd0966b96e69261e4f117ebe510aeb8771a9650197b7f0 \
54 --hash=sha256:8ba7c000d7ffa5452bbd0966b96e69261e4f117ebe510aeb8771a9650197b7f0 \
55 --hash=sha256:97084c6b37b1534f6a28a514d521dfae402f77dcbad42b14ee32e8d5bdc13648 \
55 --hash=sha256:97084c6b37b1534f6a28a514d521dfae402f77dcbad42b14ee32e8d5bdc13648 \
56 --hash=sha256:9e474a181f96d60429a4766145628264e60b72e7715876f9135aeb2e842f9433 \
56 --hash=sha256:9e474a181f96d60429a4766145628264e60b72e7715876f9135aeb2e842f9433 \
57 --hash=sha256:cfe64c30807c146ef8d094412f90f2a2c81ad6aefff3ebfe8e37aabe2f801303 \
57 --hash=sha256:cfe64c30807c146ef8d094412f90f2a2c81ad6aefff3ebfe8e37aabe2f801303 \
58 --hash=sha256:ff8c67f74b88944d99fa9d22971c05c335bc74f149120f0a69340c2c3a595497 \
58 --hash=sha256:ff8c67f74b88944d99fa9d22971c05c335bc74f149120f0a69340c2c3a595497 \
59 # via -r contrib/packaging/requirements-windows.txt.in
59 # via -r contrib/packaging/requirements-windows.txt.in
@@ -1,301 +1,301 b''
1 #
1 #
2 # This file is autogenerated by pip-compile
2 # This file is autogenerated by pip-compile
3 # To update, run:
3 # To update, run:
4 #
4 #
5 # pip-compile --generate-hashes --output-file=contrib/packaging/requirements-windows-py3.txt contrib/packaging/requirements-windows.txt.in
5 # pip-compile --generate-hashes --output-file=contrib/packaging/requirements-windows-py3.txt contrib/packaging/requirements-windows.txt.in
6 #
6 #
7 atomicwrites==1.4.0 \
7 atomicwrites==1.4.0 \
8 --hash=sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197 \
8 --hash=sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197 \
9 --hash=sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a \
9 --hash=sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a \
10 # via pytest
10 # via pytest
11 attrs==21.2.0 \
11 attrs==21.2.0 \
12 --hash=sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1 \
12 --hash=sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1 \
13 --hash=sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb \
13 --hash=sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb \
14 # via pytest
14 # via pytest
15 cached-property==1.5.2 \
15 cached-property==1.5.2 \
16 --hash=sha256:9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130 \
16 --hash=sha256:9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130 \
17 --hash=sha256:df4f613cf7ad9a588cc381aaf4a512d26265ecebd5eb9e1ba12f1319eb85a6a0 \
17 --hash=sha256:df4f613cf7ad9a588cc381aaf4a512d26265ecebd5eb9e1ba12f1319eb85a6a0 \
18 # via pygit2
18 # via pygit2
19 certifi==2020.6.20 \
19 certifi==2021.5.30 \
20 --hash=sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3 \
20 --hash=sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee \
21 --hash=sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41 \
21 --hash=sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8 \
22 # via dulwich
22 # via dulwich
23 cffi==1.14.4 \
23 cffi==1.14.4 \
24 --hash=sha256:00a1ba5e2e95684448de9b89888ccd02c98d512064b4cb987d48f4b40aa0421e \
24 --hash=sha256:00a1ba5e2e95684448de9b89888ccd02c98d512064b4cb987d48f4b40aa0421e \
25 --hash=sha256:00e28066507bfc3fe865a31f325c8391a1ac2916219340f87dfad602c3e48e5d \
25 --hash=sha256:00e28066507bfc3fe865a31f325c8391a1ac2916219340f87dfad602c3e48e5d \
26 --hash=sha256:045d792900a75e8b1e1b0ab6787dd733a8190ffcf80e8c8ceb2fb10a29ff238a \
26 --hash=sha256:045d792900a75e8b1e1b0ab6787dd733a8190ffcf80e8c8ceb2fb10a29ff238a \
27 --hash=sha256:0638c3ae1a0edfb77c6765d487fee624d2b1ee1bdfeffc1f0b58c64d149e7eec \
27 --hash=sha256:0638c3ae1a0edfb77c6765d487fee624d2b1ee1bdfeffc1f0b58c64d149e7eec \
28 --hash=sha256:105abaf8a6075dc96c1fe5ae7aae073f4696f2905fde6aeada4c9d2926752362 \
28 --hash=sha256:105abaf8a6075dc96c1fe5ae7aae073f4696f2905fde6aeada4c9d2926752362 \
29 --hash=sha256:155136b51fd733fa94e1c2ea5211dcd4c8879869008fc811648f16541bf99668 \
29 --hash=sha256:155136b51fd733fa94e1c2ea5211dcd4c8879869008fc811648f16541bf99668 \
30 --hash=sha256:1a465cbe98a7fd391d47dce4b8f7e5b921e6cd805ef421d04f5f66ba8f06086c \
30 --hash=sha256:1a465cbe98a7fd391d47dce4b8f7e5b921e6cd805ef421d04f5f66ba8f06086c \
31 --hash=sha256:1d2c4994f515e5b485fd6d3a73d05526aa0fcf248eb135996b088d25dfa1865b \
31 --hash=sha256:1d2c4994f515e5b485fd6d3a73d05526aa0fcf248eb135996b088d25dfa1865b \
32 --hash=sha256:2c24d61263f511551f740d1a065eb0212db1dbbbbd241db758f5244281590c06 \
32 --hash=sha256:2c24d61263f511551f740d1a065eb0212db1dbbbbd241db758f5244281590c06 \
33 --hash=sha256:51a8b381b16ddd370178a65360ebe15fbc1c71cf6f584613a7ea08bfad946698 \
33 --hash=sha256:51a8b381b16ddd370178a65360ebe15fbc1c71cf6f584613a7ea08bfad946698 \
34 --hash=sha256:594234691ac0e9b770aee9fcdb8fa02c22e43e5c619456efd0d6c2bf276f3eb2 \
34 --hash=sha256:594234691ac0e9b770aee9fcdb8fa02c22e43e5c619456efd0d6c2bf276f3eb2 \
35 --hash=sha256:5cf4be6c304ad0b6602f5c4e90e2f59b47653ac1ed9c662ed379fe48a8f26b0c \
35 --hash=sha256:5cf4be6c304ad0b6602f5c4e90e2f59b47653ac1ed9c662ed379fe48a8f26b0c \
36 --hash=sha256:64081b3f8f6f3c3de6191ec89d7dc6c86a8a43911f7ecb422c60e90c70be41c7 \
36 --hash=sha256:64081b3f8f6f3c3de6191ec89d7dc6c86a8a43911f7ecb422c60e90c70be41c7 \
37 --hash=sha256:6bc25fc545a6b3d57b5f8618e59fc13d3a3a68431e8ca5fd4c13241cd70d0009 \
37 --hash=sha256:6bc25fc545a6b3d57b5f8618e59fc13d3a3a68431e8ca5fd4c13241cd70d0009 \
38 --hash=sha256:798caa2a2384b1cbe8a2a139d80734c9db54f9cc155c99d7cc92441a23871c03 \
38 --hash=sha256:798caa2a2384b1cbe8a2a139d80734c9db54f9cc155c99d7cc92441a23871c03 \
39 --hash=sha256:7c6b1dece89874d9541fc974917b631406233ea0440d0bdfbb8e03bf39a49b3b \
39 --hash=sha256:7c6b1dece89874d9541fc974917b631406233ea0440d0bdfbb8e03bf39a49b3b \
40 --hash=sha256:840793c68105fe031f34d6a086eaea153a0cd5c491cde82a74b420edd0a2b909 \
40 --hash=sha256:840793c68105fe031f34d6a086eaea153a0cd5c491cde82a74b420edd0a2b909 \
41 --hash=sha256:8d6603078baf4e11edc4168a514c5ce5b3ba6e3e9c374298cb88437957960a53 \
41 --hash=sha256:8d6603078baf4e11edc4168a514c5ce5b3ba6e3e9c374298cb88437957960a53 \
42 --hash=sha256:9cc46bc107224ff5b6d04369e7c595acb700c3613ad7bcf2e2012f62ece80c35 \
42 --hash=sha256:9cc46bc107224ff5b6d04369e7c595acb700c3613ad7bcf2e2012f62ece80c35 \
43 --hash=sha256:9f7a31251289b2ab6d4012f6e83e58bc3b96bd151f5b5262467f4bb6b34a7c26 \
43 --hash=sha256:9f7a31251289b2ab6d4012f6e83e58bc3b96bd151f5b5262467f4bb6b34a7c26 \
44 --hash=sha256:9ffb888f19d54a4d4dfd4b3f29bc2c16aa4972f1c2ab9c4ab09b8ab8685b9c2b \
44 --hash=sha256:9ffb888f19d54a4d4dfd4b3f29bc2c16aa4972f1c2ab9c4ab09b8ab8685b9c2b \
45 --hash=sha256:a7711edca4dcef1a75257b50a2fbfe92a65187c47dab5a0f1b9b332c5919a3fb \
45 --hash=sha256:a7711edca4dcef1a75257b50a2fbfe92a65187c47dab5a0f1b9b332c5919a3fb \
46 --hash=sha256:af5c59122a011049aad5dd87424b8e65a80e4a6477419c0c1015f73fb5ea0293 \
46 --hash=sha256:af5c59122a011049aad5dd87424b8e65a80e4a6477419c0c1015f73fb5ea0293 \
47 --hash=sha256:b18e0a9ef57d2b41f5c68beefa32317d286c3d6ac0484efd10d6e07491bb95dd \
47 --hash=sha256:b18e0a9ef57d2b41f5c68beefa32317d286c3d6ac0484efd10d6e07491bb95dd \
48 --hash=sha256:b4e248d1087abf9f4c10f3c398896c87ce82a9856494a7155823eb45a892395d \
48 --hash=sha256:b4e248d1087abf9f4c10f3c398896c87ce82a9856494a7155823eb45a892395d \
49 --hash=sha256:ba4e9e0ae13fc41c6b23299545e5ef73055213e466bd107953e4a013a5ddd7e3 \
49 --hash=sha256:ba4e9e0ae13fc41c6b23299545e5ef73055213e466bd107953e4a013a5ddd7e3 \
50 --hash=sha256:c6332685306b6417a91b1ff9fae889b3ba65c2292d64bd9245c093b1b284809d \
50 --hash=sha256:c6332685306b6417a91b1ff9fae889b3ba65c2292d64bd9245c093b1b284809d \
51 --hash=sha256:d9efd8b7a3ef378dd61a1e77367f1924375befc2eba06168b6ebfa903a5e59ca \
51 --hash=sha256:d9efd8b7a3ef378dd61a1e77367f1924375befc2eba06168b6ebfa903a5e59ca \
52 --hash=sha256:df5169c4396adc04f9b0a05f13c074df878b6052430e03f50e68adf3a57aa28d \
52 --hash=sha256:df5169c4396adc04f9b0a05f13c074df878b6052430e03f50e68adf3a57aa28d \
53 --hash=sha256:ebb253464a5d0482b191274f1c8bf00e33f7e0b9c66405fbffc61ed2c839c775 \
53 --hash=sha256:ebb253464a5d0482b191274f1c8bf00e33f7e0b9c66405fbffc61ed2c839c775 \
54 --hash=sha256:ec80dc47f54e6e9a78181ce05feb71a0353854cc26999db963695f950b5fb375 \
54 --hash=sha256:ec80dc47f54e6e9a78181ce05feb71a0353854cc26999db963695f950b5fb375 \
55 --hash=sha256:f032b34669220030f905152045dfa27741ce1a6db3324a5bc0b96b6c7420c87b \
55 --hash=sha256:f032b34669220030f905152045dfa27741ce1a6db3324a5bc0b96b6c7420c87b \
56 --hash=sha256:f60567825f791c6f8a592f3c6e3bd93dd2934e3f9dac189308426bd76b00ef3b \
56 --hash=sha256:f60567825f791c6f8a592f3c6e3bd93dd2934e3f9dac189308426bd76b00ef3b \
57 --hash=sha256:f803eaa94c2fcda012c047e62bc7a51b0bdabda1cad7a92a522694ea2d76e49f \
57 --hash=sha256:f803eaa94c2fcda012c047e62bc7a51b0bdabda1cad7a92a522694ea2d76e49f \
58 # via pygit2
58 # via pygit2
59 colorama==0.4.4 \
59 colorama==0.4.4 \
60 --hash=sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b \
60 --hash=sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b \
61 --hash=sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2 \
61 --hash=sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2 \
62 # via pytest
62 # via pytest
63 docutils==0.16 \
63 docutils==0.16 \
64 --hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \
64 --hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \
65 --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc \
65 --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc \
66 # via -r contrib/packaging/requirements-windows.txt.in
66 # via -r contrib/packaging/requirements-windows.txt.in
67 dulwich==0.20.6 ; python_version >= "3" \
67 dulwich==0.20.6 ; python_version >= "3" \
68 --hash=sha256:1ccd55e38fa9f169290f93e027ab4508202f5bdd6ef534facac4edd3f6903f0d \
68 --hash=sha256:1ccd55e38fa9f169290f93e027ab4508202f5bdd6ef534facac4edd3f6903f0d \
69 --hash=sha256:2452a0379cc7bbbd7ab893ec104d18039f1ea98b0d6be6bca5646e5cf29e0ae9 \
69 --hash=sha256:2452a0379cc7bbbd7ab893ec104d18039f1ea98b0d6be6bca5646e5cf29e0ae9 \
70 --hash=sha256:2f4aebc54ed2d37dcee737024421452375570a422eb682232e676aa7ebc9cb4b \
70 --hash=sha256:2f4aebc54ed2d37dcee737024421452375570a422eb682232e676aa7ebc9cb4b \
71 --hash=sha256:304f52b10c49c3a6ddfbd73e2e93d8e979350225cfba9688e51110e74fa2f718 \
71 --hash=sha256:304f52b10c49c3a6ddfbd73e2e93d8e979350225cfba9688e51110e74fa2f718 \
72 --hash=sha256:49e747c72d9099e873bf6196260346d5996c3f28af788294d47a8accdc524de7 \
72 --hash=sha256:49e747c72d9099e873bf6196260346d5996c3f28af788294d47a8accdc524de7 \
73 --hash=sha256:4fee359928c59b53af153a582a7ed7595259a5a825df400301a29e17fd78dfd3 \
73 --hash=sha256:4fee359928c59b53af153a582a7ed7595259a5a825df400301a29e17fd78dfd3 \
74 --hash=sha256:50ef300a9fa4efd9f85009c2bd8b515266ec1529400f8834f85c04fa9f09b2c0 \
74 --hash=sha256:50ef300a9fa4efd9f85009c2bd8b515266ec1529400f8834f85c04fa9f09b2c0 \
75 --hash=sha256:5348310f21b2a23847342ce464461499b6652483fa42de03714d0f6421a99698 \
75 --hash=sha256:5348310f21b2a23847342ce464461499b6652483fa42de03714d0f6421a99698 \
76 --hash=sha256:7e7b5dea5178b6493fdb83adccbe81de9ddff55f79880185ed594c0e3a97209b \
76 --hash=sha256:7e7b5dea5178b6493fdb83adccbe81de9ddff55f79880185ed594c0e3a97209b \
77 --hash=sha256:8f7a7f973be2beedfb10dd8d3eb6bdf9ec466c72ad555704897cbd6357fe5021 \
77 --hash=sha256:8f7a7f973be2beedfb10dd8d3eb6bdf9ec466c72ad555704897cbd6357fe5021 \
78 --hash=sha256:bea6e6caffc6c73bfd1647714c5715ab96ac49deb8beb8b67511529afa25685a \
78 --hash=sha256:bea6e6caffc6c73bfd1647714c5715ab96ac49deb8beb8b67511529afa25685a \
79 --hash=sha256:e5871b86a079e9e290f52ab14559cea1b694a0b8ed2b9ebb898f6ced7f14a406 \
79 --hash=sha256:e5871b86a079e9e290f52ab14559cea1b694a0b8ed2b9ebb898f6ced7f14a406 \
80 --hash=sha256:e593f514b8ac740b4ceeb047745b4719bfc9f334904245c6edcb3a9d002f577b \
80 --hash=sha256:e593f514b8ac740b4ceeb047745b4719bfc9f334904245c6edcb3a9d002f577b \
81 # via -r contrib/packaging/requirements-windows.txt.in
81 # via -r contrib/packaging/requirements-windows.txt.in
82 fuzzywuzzy==0.18.0 \
82 fuzzywuzzy==0.18.0 \
83 --hash=sha256:45016e92264780e58972dca1b3d939ac864b78437422beecebb3095f8efd00e8 \
83 --hash=sha256:45016e92264780e58972dca1b3d939ac864b78437422beecebb3095f8efd00e8 \
84 # via -r contrib/packaging/requirements-windows.txt.in
84 # via -r contrib/packaging/requirements-windows.txt.in
85 idna==3.2 \
85 idna==3.2 \
86 --hash=sha256:14475042e284991034cb48e06f6851428fb14c4dc953acd9be9a5e95c7b6dd7a \
86 --hash=sha256:14475042e284991034cb48e06f6851428fb14c4dc953acd9be9a5e95c7b6dd7a \
87 --hash=sha256:467fbad99067910785144ce333826c71fb0e63a425657295239737f7ecd125f3 \
87 --hash=sha256:467fbad99067910785144ce333826c71fb0e63a425657295239737f7ecd125f3 \
88 # via yarl
88 # via yarl
89 importlib-metadata==3.1.0 \
89 importlib-metadata==3.1.0 \
90 --hash=sha256:590690d61efdd716ff82c39ca9a9d4209252adfe288a4b5721181050acbd4175 \
90 --hash=sha256:590690d61efdd716ff82c39ca9a9d4209252adfe288a4b5721181050acbd4175 \
91 --hash=sha256:d9b8a46a0885337627a6430db287176970fff18ad421becec1d64cfc763c2099 \
91 --hash=sha256:d9b8a46a0885337627a6430db287176970fff18ad421becec1d64cfc763c2099 \
92 # via keyring, pluggy, pytest
92 # via keyring, pluggy, pytest
93 iniconfig==1.1.1 \
93 iniconfig==1.1.1 \
94 --hash=sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3 \
94 --hash=sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3 \
95 --hash=sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32 \
95 --hash=sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32 \
96 # via pytest
96 # via pytest
97 keyring==21.4.0 \
97 keyring==21.4.0 \
98 --hash=sha256:4e34ea2fdec90c1c43d6610b5a5fafa1b9097db1802948e90caf5763974b8f8d \
98 --hash=sha256:4e34ea2fdec90c1c43d6610b5a5fafa1b9097db1802948e90caf5763974b8f8d \
99 --hash=sha256:9aeadd006a852b78f4b4ef7c7556c2774d2432bbef8ee538a3e9089ac8b11466 \
99 --hash=sha256:9aeadd006a852b78f4b4ef7c7556c2774d2432bbef8ee538a3e9089ac8b11466 \
100 # via -r contrib/packaging/requirements-windows.txt.in
100 # via -r contrib/packaging/requirements-windows.txt.in
101 multidict==5.1.0 \
101 multidict==5.1.0 \
102 --hash=sha256:018132dbd8688c7a69ad89c4a3f39ea2f9f33302ebe567a879da8f4ca73f0d0a \
102 --hash=sha256:018132dbd8688c7a69ad89c4a3f39ea2f9f33302ebe567a879da8f4ca73f0d0a \
103 --hash=sha256:051012ccee979b2b06be928a6150d237aec75dd6bf2d1eeeb190baf2b05abc93 \
103 --hash=sha256:051012ccee979b2b06be928a6150d237aec75dd6bf2d1eeeb190baf2b05abc93 \
104 --hash=sha256:05c20b68e512166fddba59a918773ba002fdd77800cad9f55b59790030bab632 \
104 --hash=sha256:05c20b68e512166fddba59a918773ba002fdd77800cad9f55b59790030bab632 \
105 --hash=sha256:07b42215124aedecc6083f1ce6b7e5ec5b50047afa701f3442054373a6deb656 \
105 --hash=sha256:07b42215124aedecc6083f1ce6b7e5ec5b50047afa701f3442054373a6deb656 \
106 --hash=sha256:0e3c84e6c67eba89c2dbcee08504ba8644ab4284863452450520dad8f1e89b79 \
106 --hash=sha256:0e3c84e6c67eba89c2dbcee08504ba8644ab4284863452450520dad8f1e89b79 \
107 --hash=sha256:0e929169f9c090dae0646a011c8b058e5e5fb391466016b39d21745b48817fd7 \
107 --hash=sha256:0e929169f9c090dae0646a011c8b058e5e5fb391466016b39d21745b48817fd7 \
108 --hash=sha256:1ab820665e67373de5802acae069a6a05567ae234ddb129f31d290fc3d1aa56d \
108 --hash=sha256:1ab820665e67373de5802acae069a6a05567ae234ddb129f31d290fc3d1aa56d \
109 --hash=sha256:25b4e5f22d3a37ddf3effc0710ba692cfc792c2b9edfb9c05aefe823256e84d5 \
109 --hash=sha256:25b4e5f22d3a37ddf3effc0710ba692cfc792c2b9edfb9c05aefe823256e84d5 \
110 --hash=sha256:2e68965192c4ea61fff1b81c14ff712fc7dc15d2bd120602e4a3494ea6584224 \
110 --hash=sha256:2e68965192c4ea61fff1b81c14ff712fc7dc15d2bd120602e4a3494ea6584224 \
111 --hash=sha256:2f1a132f1c88724674271d636e6b7351477c27722f2ed789f719f9e3545a3d26 \
111 --hash=sha256:2f1a132f1c88724674271d636e6b7351477c27722f2ed789f719f9e3545a3d26 \
112 --hash=sha256:37e5438e1c78931df5d3c0c78ae049092877e5e9c02dd1ff5abb9cf27a5914ea \
112 --hash=sha256:37e5438e1c78931df5d3c0c78ae049092877e5e9c02dd1ff5abb9cf27a5914ea \
113 --hash=sha256:3a041b76d13706b7fff23b9fc83117c7b8fe8d5fe9e6be45eee72b9baa75f348 \
113 --hash=sha256:3a041b76d13706b7fff23b9fc83117c7b8fe8d5fe9e6be45eee72b9baa75f348 \
114 --hash=sha256:3a4f32116f8f72ecf2a29dabfb27b23ab7cdc0ba807e8459e59a93a9be9506f6 \
114 --hash=sha256:3a4f32116f8f72ecf2a29dabfb27b23ab7cdc0ba807e8459e59a93a9be9506f6 \
115 --hash=sha256:46c73e09ad374a6d876c599f2328161bcd95e280f84d2060cf57991dec5cfe76 \
115 --hash=sha256:46c73e09ad374a6d876c599f2328161bcd95e280f84d2060cf57991dec5cfe76 \
116 --hash=sha256:46dd362c2f045095c920162e9307de5ffd0a1bfbba0a6e990b344366f55a30c1 \
116 --hash=sha256:46dd362c2f045095c920162e9307de5ffd0a1bfbba0a6e990b344366f55a30c1 \
117 --hash=sha256:4b186eb7d6ae7c06eb4392411189469e6a820da81447f46c0072a41c748ab73f \
117 --hash=sha256:4b186eb7d6ae7c06eb4392411189469e6a820da81447f46c0072a41c748ab73f \
118 --hash=sha256:54fd1e83a184e19c598d5e70ba508196fd0bbdd676ce159feb412a4a6664f952 \
118 --hash=sha256:54fd1e83a184e19c598d5e70ba508196fd0bbdd676ce159feb412a4a6664f952 \
119 --hash=sha256:585fd452dd7782130d112f7ddf3473ffdd521414674c33876187e101b588738a \
119 --hash=sha256:585fd452dd7782130d112f7ddf3473ffdd521414674c33876187e101b588738a \
120 --hash=sha256:5cf3443199b83ed9e955f511b5b241fd3ae004e3cb81c58ec10f4fe47c7dce37 \
120 --hash=sha256:5cf3443199b83ed9e955f511b5b241fd3ae004e3cb81c58ec10f4fe47c7dce37 \
121 --hash=sha256:6a4d5ce640e37b0efcc8441caeea8f43a06addace2335bd11151bc02d2ee31f9 \
121 --hash=sha256:6a4d5ce640e37b0efcc8441caeea8f43a06addace2335bd11151bc02d2ee31f9 \
122 --hash=sha256:7df80d07818b385f3129180369079bd6934cf70469f99daaebfac89dca288359 \
122 --hash=sha256:7df80d07818b385f3129180369079bd6934cf70469f99daaebfac89dca288359 \
123 --hash=sha256:806068d4f86cb06af37cd65821554f98240a19ce646d3cd24e1c33587f313eb8 \
123 --hash=sha256:806068d4f86cb06af37cd65821554f98240a19ce646d3cd24e1c33587f313eb8 \
124 --hash=sha256:830f57206cc96ed0ccf68304141fec9481a096c4d2e2831f311bde1c404401da \
124 --hash=sha256:830f57206cc96ed0ccf68304141fec9481a096c4d2e2831f311bde1c404401da \
125 --hash=sha256:929006d3c2d923788ba153ad0de8ed2e5ed39fdbe8e7be21e2f22ed06c6783d3 \
125 --hash=sha256:929006d3c2d923788ba153ad0de8ed2e5ed39fdbe8e7be21e2f22ed06c6783d3 \
126 --hash=sha256:9436dc58c123f07b230383083855593550c4d301d2532045a17ccf6eca505f6d \
126 --hash=sha256:9436dc58c123f07b230383083855593550c4d301d2532045a17ccf6eca505f6d \
127 --hash=sha256:9dd6e9b1a913d096ac95d0399bd737e00f2af1e1594a787e00f7975778c8b2bf \
127 --hash=sha256:9dd6e9b1a913d096ac95d0399bd737e00f2af1e1594a787e00f7975778c8b2bf \
128 --hash=sha256:ace010325c787c378afd7f7c1ac66b26313b3344628652eacd149bdd23c68841 \
128 --hash=sha256:ace010325c787c378afd7f7c1ac66b26313b3344628652eacd149bdd23c68841 \
129 --hash=sha256:b47a43177a5e65b771b80db71e7be76c0ba23cc8aa73eeeb089ed5219cdbe27d \
129 --hash=sha256:b47a43177a5e65b771b80db71e7be76c0ba23cc8aa73eeeb089ed5219cdbe27d \
130 --hash=sha256:b797515be8743b771aa868f83563f789bbd4b236659ba52243b735d80b29ed93 \
130 --hash=sha256:b797515be8743b771aa868f83563f789bbd4b236659ba52243b735d80b29ed93 \
131 --hash=sha256:b7993704f1a4b204e71debe6095150d43b2ee6150fa4f44d6d966ec356a8d61f \
131 --hash=sha256:b7993704f1a4b204e71debe6095150d43b2ee6150fa4f44d6d966ec356a8d61f \
132 --hash=sha256:d5c65bdf4484872c4af3150aeebe101ba560dcfb34488d9a8ff8dbcd21079647 \
132 --hash=sha256:d5c65bdf4484872c4af3150aeebe101ba560dcfb34488d9a8ff8dbcd21079647 \
133 --hash=sha256:d81eddcb12d608cc08081fa88d046c78afb1bf8107e6feab5d43503fea74a635 \
133 --hash=sha256:d81eddcb12d608cc08081fa88d046c78afb1bf8107e6feab5d43503fea74a635 \
134 --hash=sha256:dc862056f76443a0db4509116c5cd480fe1b6a2d45512a653f9a855cc0517456 \
134 --hash=sha256:dc862056f76443a0db4509116c5cd480fe1b6a2d45512a653f9a855cc0517456 \
135 --hash=sha256:ecc771ab628ea281517e24fd2c52e8f31c41e66652d07599ad8818abaad38cda \
135 --hash=sha256:ecc771ab628ea281517e24fd2c52e8f31c41e66652d07599ad8818abaad38cda \
136 --hash=sha256:f200755768dc19c6f4e2b672421e0ebb3dd54c38d5a4f262b872d8cfcc9e93b5 \
136 --hash=sha256:f200755768dc19c6f4e2b672421e0ebb3dd54c38d5a4f262b872d8cfcc9e93b5 \
137 --hash=sha256:f21756997ad8ef815d8ef3d34edd98804ab5ea337feedcd62fb52d22bf531281 \
137 --hash=sha256:f21756997ad8ef815d8ef3d34edd98804ab5ea337feedcd62fb52d22bf531281 \
138 --hash=sha256:fc13a9524bc18b6fb6e0dbec3533ba0496bbed167c56d0aabefd965584557d80 \
138 --hash=sha256:fc13a9524bc18b6fb6e0dbec3533ba0496bbed167c56d0aabefd965584557d80 \
139 # via yarl
139 # via yarl
140 packaging==21.0 \
140 packaging==21.0 \
141 --hash=sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7 \
141 --hash=sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7 \
142 --hash=sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14 \
142 --hash=sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14 \
143 # via pytest
143 # via pytest
144 pluggy==0.13.1 \
144 pluggy==0.13.1 \
145 --hash=sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0 \
145 --hash=sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0 \
146 --hash=sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d \
146 --hash=sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d \
147 # via pytest
147 # via pytest
148 py==1.10.0 \
148 py==1.10.0 \
149 --hash=sha256:21b81bda15b66ef5e1a777a21c4dcd9c20ad3efd0b3f817e7a809035269e1bd3 \
149 --hash=sha256:21b81bda15b66ef5e1a777a21c4dcd9c20ad3efd0b3f817e7a809035269e1bd3 \
150 --hash=sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a \
150 --hash=sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a \
151 # via pytest
151 # via pytest
152 pycparser==2.20 \
152 pycparser==2.20 \
153 --hash=sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0 \
153 --hash=sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0 \
154 --hash=sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705 \
154 --hash=sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705 \
155 # via cffi
155 # via cffi
156 pygit2==1.4.0 ; python_version >= "3" \
156 pygit2==1.4.0 ; python_version >= "3" \
157 --hash=sha256:0d298098e286eeda000e49ca7e1b41f87300e10dd8b9d06b32b008bd61f50b83 \
157 --hash=sha256:0d298098e286eeda000e49ca7e1b41f87300e10dd8b9d06b32b008bd61f50b83 \
158 --hash=sha256:0ee135eb2cd8b07ce1374f3596cc5c3213472d6389bad6a4c5d87d8e267e93e9 \
158 --hash=sha256:0ee135eb2cd8b07ce1374f3596cc5c3213472d6389bad6a4c5d87d8e267e93e9 \
159 --hash=sha256:32eb863d6651d4890ced318505ea8dc229bd9637deaf29c898de1ab574d727a0 \
159 --hash=sha256:32eb863d6651d4890ced318505ea8dc229bd9637deaf29c898de1ab574d727a0 \
160 --hash=sha256:37d6d7d6d7804c42a0fe23425c72e38093488525092fc5e51a05684e63503ce7 \
160 --hash=sha256:37d6d7d6d7804c42a0fe23425c72e38093488525092fc5e51a05684e63503ce7 \
161 --hash=sha256:41204b6f3406d9f53147710f3cc485d77181ba67f57c34d36b7c86de1c14a18c \
161 --hash=sha256:41204b6f3406d9f53147710f3cc485d77181ba67f57c34d36b7c86de1c14a18c \
162 --hash=sha256:818c91b582109d90580c5da74af783738838353f15eb12eeb734d80a974b05a3 \
162 --hash=sha256:818c91b582109d90580c5da74af783738838353f15eb12eeb734d80a974b05a3 \
163 --hash=sha256:8306a302487dac67df7af6a064bb37e8a8eb4138958f9560ff49ff162e185dab \
163 --hash=sha256:8306a302487dac67df7af6a064bb37e8a8eb4138958f9560ff49ff162e185dab \
164 --hash=sha256:9c2f2d9ef59513007b66f6534b000792b614de3faf60313a0a68f6b8571aea85 \
164 --hash=sha256:9c2f2d9ef59513007b66f6534b000792b614de3faf60313a0a68f6b8571aea85 \
165 --hash=sha256:9c8d5881eb709e2e2e13000b507a131bd5fb91a879581030088d0ddffbcd19af \
165 --hash=sha256:9c8d5881eb709e2e2e13000b507a131bd5fb91a879581030088d0ddffbcd19af \
166 --hash=sha256:b422e417739def0a136a6355723dfe8a5ffc83db5098076f28a14f1d139779c1 \
166 --hash=sha256:b422e417739def0a136a6355723dfe8a5ffc83db5098076f28a14f1d139779c1 \
167 --hash=sha256:cbeb38ab1df9b5d8896548a11e63aae8a064763ab5f1eabe4475e6b8a78ee1c8 \
167 --hash=sha256:cbeb38ab1df9b5d8896548a11e63aae8a064763ab5f1eabe4475e6b8a78ee1c8 \
168 --hash=sha256:cf00481ddf053e549a6edd0216bdc267b292d261eae02a67bb3737de920cbf88 \
168 --hash=sha256:cf00481ddf053e549a6edd0216bdc267b292d261eae02a67bb3737de920cbf88 \
169 --hash=sha256:d0d889144e9487d926fecea947c3f39ce5f477e521d7d467d2e66907e4cd657d \
169 --hash=sha256:d0d889144e9487d926fecea947c3f39ce5f477e521d7d467d2e66907e4cd657d \
170 --hash=sha256:ddb7a1f6d38063e8724abfa1cfdfb0f9b25014b8bca0546274b7a84b873a3888 \
170 --hash=sha256:ddb7a1f6d38063e8724abfa1cfdfb0f9b25014b8bca0546274b7a84b873a3888 \
171 --hash=sha256:e9037a7d810750fe23c9f5641ef14a0af2525ff03e14752cd4f73e1870ecfcb0 \
171 --hash=sha256:e9037a7d810750fe23c9f5641ef14a0af2525ff03e14752cd4f73e1870ecfcb0 \
172 --hash=sha256:ec5c0365a9bdfcac1609d20868507b28685ec5ea7cc3a2c903c9b62ef2e0bbc0 \
172 --hash=sha256:ec5c0365a9bdfcac1609d20868507b28685ec5ea7cc3a2c903c9b62ef2e0bbc0 \
173 --hash=sha256:fdd8ba30cda277290e000322f505132f590cf89bd7d31829b45a3cb57447ec32 \
173 --hash=sha256:fdd8ba30cda277290e000322f505132f590cf89bd7d31829b45a3cb57447ec32 \
174 # via -r contrib/packaging/requirements-windows.txt.in
174 # via -r contrib/packaging/requirements-windows.txt.in
175 pygments==2.7.1 \
175 pygments==2.7.1 \
176 --hash=sha256:307543fe65c0947b126e83dd5a61bd8acbd84abec11f43caebaf5534cbc17998 \
176 --hash=sha256:307543fe65c0947b126e83dd5a61bd8acbd84abec11f43caebaf5534cbc17998 \
177 --hash=sha256:926c3f319eda178d1bd90851e4317e6d8cdb5e292a3386aac9bd75eca29cf9c7 \
177 --hash=sha256:926c3f319eda178d1bd90851e4317e6d8cdb5e292a3386aac9bd75eca29cf9c7 \
178 # via -r contrib/packaging/requirements-windows.txt.in
178 # via -r contrib/packaging/requirements-windows.txt.in
179 pyparsing==2.4.7 \
179 pyparsing==2.4.7 \
180 --hash=sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1 \
180 --hash=sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1 \
181 --hash=sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b \
181 --hash=sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b \
182 # via packaging
182 # via packaging
183 pytest-vcr==1.0.2 \
183 pytest-vcr==1.0.2 \
184 --hash=sha256:23ee51b75abbcc43d926272773aae4f39f93aceb75ed56852d0bf618f92e1896 \
184 --hash=sha256:23ee51b75abbcc43d926272773aae4f39f93aceb75ed56852d0bf618f92e1896 \
185 # via -r contrib/packaging/requirements-windows.txt.in
185 # via -r contrib/packaging/requirements-windows.txt.in
186 pytest==6.2.4 \
186 pytest==6.2.4 \
187 --hash=sha256:50bcad0a0b9c5a72c8e4e7c9855a3ad496ca6a881a3641b4260605450772c54b \
187 --hash=sha256:50bcad0a0b9c5a72c8e4e7c9855a3ad496ca6a881a3641b4260605450772c54b \
188 --hash=sha256:91ef2131a9bd6be8f76f1f08eac5c5317221d6ad1e143ae03894b862e8976890 \
188 --hash=sha256:91ef2131a9bd6be8f76f1f08eac5c5317221d6ad1e143ae03894b862e8976890 \
189 # via pytest-vcr
189 # via pytest-vcr
190 pywin32-ctypes==0.2.0 \
190 pywin32-ctypes==0.2.0 \
191 --hash=sha256:24ffc3b341d457d48e8922352130cf2644024a4ff09762a2261fd34c36ee5942 \
191 --hash=sha256:24ffc3b341d457d48e8922352130cf2644024a4ff09762a2261fd34c36ee5942 \
192 --hash=sha256:9dc2d991b3479cc2df15930958b674a48a227d5361d413827a4cfd0b5876fc98 \
192 --hash=sha256:9dc2d991b3479cc2df15930958b674a48a227d5361d413827a4cfd0b5876fc98 \
193 # via -r contrib/packaging/requirements-windows.txt.in, keyring
193 # via -r contrib/packaging/requirements-windows.txt.in, keyring
194 pyyaml==5.4.1 \
194 pyyaml==5.4.1 \
195 --hash=sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf \
195 --hash=sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf \
196 --hash=sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696 \
196 --hash=sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696 \
197 --hash=sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393 \
197 --hash=sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393 \
198 --hash=sha256:294db365efa064d00b8d1ef65d8ea2c3426ac366c0c4368d930bf1c5fb497f77 \
198 --hash=sha256:294db365efa064d00b8d1ef65d8ea2c3426ac366c0c4368d930bf1c5fb497f77 \
199 --hash=sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922 \
199 --hash=sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922 \
200 --hash=sha256:3bd0e463264cf257d1ffd2e40223b197271046d09dadf73a0fe82b9c1fc385a5 \
200 --hash=sha256:3bd0e463264cf257d1ffd2e40223b197271046d09dadf73a0fe82b9c1fc385a5 \
201 --hash=sha256:4465124ef1b18d9ace298060f4eccc64b0850899ac4ac53294547536533800c8 \
201 --hash=sha256:4465124ef1b18d9ace298060f4eccc64b0850899ac4ac53294547536533800c8 \
202 --hash=sha256:49d4cdd9065b9b6e206d0595fee27a96b5dd22618e7520c33204a4a3239d5b10 \
202 --hash=sha256:49d4cdd9065b9b6e206d0595fee27a96b5dd22618e7520c33204a4a3239d5b10 \
203 --hash=sha256:4e0583d24c881e14342eaf4ec5fbc97f934b999a6828693a99157fde912540cc \
203 --hash=sha256:4e0583d24c881e14342eaf4ec5fbc97f934b999a6828693a99157fde912540cc \
204 --hash=sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018 \
204 --hash=sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018 \
205 --hash=sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e \
205 --hash=sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e \
206 --hash=sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253 \
206 --hash=sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253 \
207 --hash=sha256:72a01f726a9c7851ca9bfad6fd09ca4e090a023c00945ea05ba1638c09dc3347 \
207 --hash=sha256:72a01f726a9c7851ca9bfad6fd09ca4e090a023c00945ea05ba1638c09dc3347 \
208 --hash=sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183 \
208 --hash=sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183 \
209 --hash=sha256:895f61ef02e8fed38159bb70f7e100e00f471eae2bc838cd0f4ebb21e28f8541 \
209 --hash=sha256:895f61ef02e8fed38159bb70f7e100e00f471eae2bc838cd0f4ebb21e28f8541 \
210 --hash=sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb \
210 --hash=sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb \
211 --hash=sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185 \
211 --hash=sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185 \
212 --hash=sha256:bfb51918d4ff3d77c1c856a9699f8492c612cde32fd3bcd344af9be34999bfdc \
212 --hash=sha256:bfb51918d4ff3d77c1c856a9699f8492c612cde32fd3bcd344af9be34999bfdc \
213 --hash=sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db \
213 --hash=sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db \
214 --hash=sha256:cb333c16912324fd5f769fff6bc5de372e9e7a202247b48870bc251ed40239aa \
214 --hash=sha256:cb333c16912324fd5f769fff6bc5de372e9e7a202247b48870bc251ed40239aa \
215 --hash=sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46 \
215 --hash=sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46 \
216 --hash=sha256:d483ad4e639292c90170eb6f7783ad19490e7a8defb3e46f97dfe4bacae89122 \
216 --hash=sha256:d483ad4e639292c90170eb6f7783ad19490e7a8defb3e46f97dfe4bacae89122 \
217 --hash=sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b \
217 --hash=sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b \
218 --hash=sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63 \
218 --hash=sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63 \
219 --hash=sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df \
219 --hash=sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df \
220 --hash=sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc \
220 --hash=sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc \
221 --hash=sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247 \
221 --hash=sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247 \
222 --hash=sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6 \
222 --hash=sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6 \
223 --hash=sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0 \
223 --hash=sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0 \
224 # via vcrpy
224 # via vcrpy
225 six==1.16.0 \
225 six==1.16.0 \
226 --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \
226 --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \
227 --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 \
227 --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 \
228 # via vcrpy
228 # via vcrpy
229 toml==0.10.2 \
229 toml==0.10.2 \
230 --hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \
230 --hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \
231 --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f \
231 --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f \
232 # via pytest
232 # via pytest
233 typing-extensions==3.10.0.0 \
233 typing-extensions==3.10.0.0 \
234 --hash=sha256:0ac0f89795dd19de6b97debb0c6af1c70987fd80a2d62d1958f7e56fcc31b497 \
234 --hash=sha256:0ac0f89795dd19de6b97debb0c6af1c70987fd80a2d62d1958f7e56fcc31b497 \
235 --hash=sha256:50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342 \
235 --hash=sha256:50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342 \
236 --hash=sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84 \
236 --hash=sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84 \
237 # via yarl
237 # via yarl
238 urllib3==1.25.11 \
238 urllib3==1.25.11 \
239 --hash=sha256:8d7eaa5a82a1cac232164990f04874c594c9453ec55eef02eab885aa02fc17a2 \
239 --hash=sha256:8d7eaa5a82a1cac232164990f04874c594c9453ec55eef02eab885aa02fc17a2 \
240 --hash=sha256:f5321fbe4bf3fefa0efd0bfe7fb14e90909eb62a48ccda331726b4319897dd5e \
240 --hash=sha256:f5321fbe4bf3fefa0efd0bfe7fb14e90909eb62a48ccda331726b4319897dd5e \
241 # via dulwich
241 # via dulwich
242 vcrpy==4.1.1 \
242 vcrpy==4.1.1 \
243 --hash=sha256:12c3fcdae7b88ecf11fc0d3e6d77586549d4575a2ceee18e82eee75c1f626162 \
243 --hash=sha256:12c3fcdae7b88ecf11fc0d3e6d77586549d4575a2ceee18e82eee75c1f626162 \
244 --hash=sha256:57095bf22fc0a2d99ee9674cdafebed0f3ba763018582450706f7d3a74fff599 \
244 --hash=sha256:57095bf22fc0a2d99ee9674cdafebed0f3ba763018582450706f7d3a74fff599 \
245 # via pytest-vcr
245 # via pytest-vcr
246 windows-curses==2.2.0 \
246 windows-curses==2.2.0 \
247 --hash=sha256:1452d771ec6f9b3fef037da2b169196a9a12be4e86a6c27dd579adac70c42028 \
247 --hash=sha256:1452d771ec6f9b3fef037da2b169196a9a12be4e86a6c27dd579adac70c42028 \
248 --hash=sha256:267544e4f60c09af6505e50a69d7f01d7f8a281cf4bd4fc7efc3b32b9a4ef64e \
248 --hash=sha256:267544e4f60c09af6505e50a69d7f01d7f8a281cf4bd4fc7efc3b32b9a4ef64e \
249 --hash=sha256:389228a3df556102e72450f599283094168aa82eee189f501ad9f131a0fc92e1 \
249 --hash=sha256:389228a3df556102e72450f599283094168aa82eee189f501ad9f131a0fc92e1 \
250 --hash=sha256:84336fe470fa07288daec5c684dec74c0766fec6b3511ccedb4c494804acfbb7 \
250 --hash=sha256:84336fe470fa07288daec5c684dec74c0766fec6b3511ccedb4c494804acfbb7 \
251 --hash=sha256:9aa6ff60be76f5de696dc6dbf7897e3b1e6abcf4c0f741e9a0ee22cd6ef382f8 \
251 --hash=sha256:9aa6ff60be76f5de696dc6dbf7897e3b1e6abcf4c0f741e9a0ee22cd6ef382f8 \
252 --hash=sha256:c4a8ce00e82635f06648cc40d99f470be4e3ffeb84f9f7ae9d6a4f68ec6361e7 \
252 --hash=sha256:c4a8ce00e82635f06648cc40d99f470be4e3ffeb84f9f7ae9d6a4f68ec6361e7 \
253 --hash=sha256:c5cd032bc7d0f03224ab55c925059d98e81795098d59bbd10f7d05c7ea9677ce \
253 --hash=sha256:c5cd032bc7d0f03224ab55c925059d98e81795098d59bbd10f7d05c7ea9677ce \
254 --hash=sha256:fc0be372fe6da3c39d7093154ce029115a927bf287f34b4c615e2b3f8c23dfaa \
254 --hash=sha256:fc0be372fe6da3c39d7093154ce029115a927bf287f34b4c615e2b3f8c23dfaa \
255 # via -r contrib/packaging/requirements-windows.txt.in
255 # via -r contrib/packaging/requirements-windows.txt.in
256 wrapt==1.12.1 \
256 wrapt==1.12.1 \
257 --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7 \
257 --hash=sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7 \
258 # via vcrpy
258 # via vcrpy
259 yarl==1.6.3 \
259 yarl==1.6.3 \
260 --hash=sha256:00d7ad91b6583602eb9c1d085a2cf281ada267e9a197e8b7cae487dadbfa293e \
260 --hash=sha256:00d7ad91b6583602eb9c1d085a2cf281ada267e9a197e8b7cae487dadbfa293e \
261 --hash=sha256:0355a701b3998dcd832d0dc47cc5dedf3874f966ac7f870e0f3a6788d802d434 \
261 --hash=sha256:0355a701b3998dcd832d0dc47cc5dedf3874f966ac7f870e0f3a6788d802d434 \
262 --hash=sha256:15263c3b0b47968c1d90daa89f21fcc889bb4b1aac5555580d74565de6836366 \
262 --hash=sha256:15263c3b0b47968c1d90daa89f21fcc889bb4b1aac5555580d74565de6836366 \
263 --hash=sha256:2ce4c621d21326a4a5500c25031e102af589edb50c09b321049e388b3934eec3 \
263 --hash=sha256:2ce4c621d21326a4a5500c25031e102af589edb50c09b321049e388b3934eec3 \
264 --hash=sha256:31ede6e8c4329fb81c86706ba8f6bf661a924b53ba191b27aa5fcee5714d18ec \
264 --hash=sha256:31ede6e8c4329fb81c86706ba8f6bf661a924b53ba191b27aa5fcee5714d18ec \
265 --hash=sha256:324ba3d3c6fee56e2e0b0d09bf5c73824b9f08234339d2b788af65e60040c959 \
265 --hash=sha256:324ba3d3c6fee56e2e0b0d09bf5c73824b9f08234339d2b788af65e60040c959 \
266 --hash=sha256:329412812ecfc94a57cd37c9d547579510a9e83c516bc069470db5f75684629e \
266 --hash=sha256:329412812ecfc94a57cd37c9d547579510a9e83c516bc069470db5f75684629e \
267 --hash=sha256:4736eaee5626db8d9cda9eb5282028cc834e2aeb194e0d8b50217d707e98bb5c \
267 --hash=sha256:4736eaee5626db8d9cda9eb5282028cc834e2aeb194e0d8b50217d707e98bb5c \
268 --hash=sha256:4953fb0b4fdb7e08b2f3b3be80a00d28c5c8a2056bb066169de00e6501b986b6 \
268 --hash=sha256:4953fb0b4fdb7e08b2f3b3be80a00d28c5c8a2056bb066169de00e6501b986b6 \
269 --hash=sha256:4c5bcfc3ed226bf6419f7a33982fb4b8ec2e45785a0561eb99274ebbf09fdd6a \
269 --hash=sha256:4c5bcfc3ed226bf6419f7a33982fb4b8ec2e45785a0561eb99274ebbf09fdd6a \
270 --hash=sha256:547f7665ad50fa8563150ed079f8e805e63dd85def6674c97efd78eed6c224a6 \
270 --hash=sha256:547f7665ad50fa8563150ed079f8e805e63dd85def6674c97efd78eed6c224a6 \
271 --hash=sha256:5b883e458058f8d6099e4420f0cc2567989032b5f34b271c0827de9f1079a424 \
271 --hash=sha256:5b883e458058f8d6099e4420f0cc2567989032b5f34b271c0827de9f1079a424 \
272 --hash=sha256:63f90b20ca654b3ecc7a8d62c03ffa46999595f0167d6450fa8383bab252987e \
272 --hash=sha256:63f90b20ca654b3ecc7a8d62c03ffa46999595f0167d6450fa8383bab252987e \
273 --hash=sha256:68dc568889b1c13f1e4745c96b931cc94fdd0defe92a72c2b8ce01091b22e35f \
273 --hash=sha256:68dc568889b1c13f1e4745c96b931cc94fdd0defe92a72c2b8ce01091b22e35f \
274 --hash=sha256:69ee97c71fee1f63d04c945f56d5d726483c4762845400a6795a3b75d56b6c50 \
274 --hash=sha256:69ee97c71fee1f63d04c945f56d5d726483c4762845400a6795a3b75d56b6c50 \
275 --hash=sha256:6d6283d8e0631b617edf0fd726353cb76630b83a089a40933043894e7f6721e2 \
275 --hash=sha256:6d6283d8e0631b617edf0fd726353cb76630b83a089a40933043894e7f6721e2 \
276 --hash=sha256:72a660bdd24497e3e84f5519e57a9ee9220b6f3ac4d45056961bf22838ce20cc \
276 --hash=sha256:72a660bdd24497e3e84f5519e57a9ee9220b6f3ac4d45056961bf22838ce20cc \
277 --hash=sha256:73494d5b71099ae8cb8754f1df131c11d433b387efab7b51849e7e1e851f07a4 \
277 --hash=sha256:73494d5b71099ae8cb8754f1df131c11d433b387efab7b51849e7e1e851f07a4 \
278 --hash=sha256:7356644cbed76119d0b6bd32ffba704d30d747e0c217109d7979a7bc36c4d970 \
278 --hash=sha256:7356644cbed76119d0b6bd32ffba704d30d747e0c217109d7979a7bc36c4d970 \
279 --hash=sha256:8a9066529240171b68893d60dca86a763eae2139dd42f42106b03cf4b426bf10 \
279 --hash=sha256:8a9066529240171b68893d60dca86a763eae2139dd42f42106b03cf4b426bf10 \
280 --hash=sha256:8aa3decd5e0e852dc68335abf5478a518b41bf2ab2f330fe44916399efedfae0 \
280 --hash=sha256:8aa3decd5e0e852dc68335abf5478a518b41bf2ab2f330fe44916399efedfae0 \
281 --hash=sha256:97b5bdc450d63c3ba30a127d018b866ea94e65655efaf889ebeabc20f7d12406 \
281 --hash=sha256:97b5bdc450d63c3ba30a127d018b866ea94e65655efaf889ebeabc20f7d12406 \
282 --hash=sha256:9ede61b0854e267fd565e7527e2f2eb3ef8858b301319be0604177690e1a3896 \
282 --hash=sha256:9ede61b0854e267fd565e7527e2f2eb3ef8858b301319be0604177690e1a3896 \
283 --hash=sha256:b2e9a456c121e26d13c29251f8267541bd75e6a1ccf9e859179701c36a078643 \
283 --hash=sha256:b2e9a456c121e26d13c29251f8267541bd75e6a1ccf9e859179701c36a078643 \
284 --hash=sha256:b5dfc9a40c198334f4f3f55880ecf910adebdcb2a0b9a9c23c9345faa9185721 \
284 --hash=sha256:b5dfc9a40c198334f4f3f55880ecf910adebdcb2a0b9a9c23c9345faa9185721 \
285 --hash=sha256:bafb450deef6861815ed579c7a6113a879a6ef58aed4c3a4be54400ae8871478 \
285 --hash=sha256:bafb450deef6861815ed579c7a6113a879a6ef58aed4c3a4be54400ae8871478 \
286 --hash=sha256:c49ff66d479d38ab863c50f7bb27dee97c6627c5fe60697de15529da9c3de724 \
286 --hash=sha256:c49ff66d479d38ab863c50f7bb27dee97c6627c5fe60697de15529da9c3de724 \
287 --hash=sha256:ce3beb46a72d9f2190f9e1027886bfc513702d748047b548b05dab7dfb584d2e \
287 --hash=sha256:ce3beb46a72d9f2190f9e1027886bfc513702d748047b548b05dab7dfb584d2e \
288 --hash=sha256:d26608cf178efb8faa5ff0f2d2e77c208f471c5a3709e577a7b3fd0445703ac8 \
288 --hash=sha256:d26608cf178efb8faa5ff0f2d2e77c208f471c5a3709e577a7b3fd0445703ac8 \
289 --hash=sha256:d597767fcd2c3dc49d6eea360c458b65643d1e4dbed91361cf5e36e53c1f8c96 \
289 --hash=sha256:d597767fcd2c3dc49d6eea360c458b65643d1e4dbed91361cf5e36e53c1f8c96 \
290 --hash=sha256:d5c32c82990e4ac4d8150fd7652b972216b204de4e83a122546dce571c1bdf25 \
290 --hash=sha256:d5c32c82990e4ac4d8150fd7652b972216b204de4e83a122546dce571c1bdf25 \
291 --hash=sha256:d8d07d102f17b68966e2de0e07bfd6e139c7c02ef06d3a0f8d2f0f055e13bb76 \
291 --hash=sha256:d8d07d102f17b68966e2de0e07bfd6e139c7c02ef06d3a0f8d2f0f055e13bb76 \
292 --hash=sha256:e46fba844f4895b36f4c398c5af062a9808d1f26b2999c58909517384d5deda2 \
292 --hash=sha256:e46fba844f4895b36f4c398c5af062a9808d1f26b2999c58909517384d5deda2 \
293 --hash=sha256:e6b5460dc5ad42ad2b36cca524491dfcaffbfd9c8df50508bddc354e787b8dc2 \
293 --hash=sha256:e6b5460dc5ad42ad2b36cca524491dfcaffbfd9c8df50508bddc354e787b8dc2 \
294 --hash=sha256:f040bcc6725c821a4c0665f3aa96a4d0805a7aaf2caf266d256b8ed71b9f041c \
294 --hash=sha256:f040bcc6725c821a4c0665f3aa96a4d0805a7aaf2caf266d256b8ed71b9f041c \
295 --hash=sha256:f0b059678fd549c66b89bed03efcabb009075bd131c248ecdf087bdb6faba24a \
295 --hash=sha256:f0b059678fd549c66b89bed03efcabb009075bd131c248ecdf087bdb6faba24a \
296 --hash=sha256:fcbb48a93e8699eae920f8d92f7160c03567b421bc17362a9ffbbd706a816f71 \
296 --hash=sha256:fcbb48a93e8699eae920f8d92f7160c03567b421bc17362a9ffbbd706a816f71 \
297 # via vcrpy
297 # via vcrpy
298 zipp==3.4.0 \
298 zipp==3.4.0 \
299 --hash=sha256:102c24ef8f171fd729d46599845e95c7ab894a4cf45f5de11a44cc7444fb1108 \
299 --hash=sha256:102c24ef8f171fd729d46599845e95c7ab894a4cf45f5de11a44cc7444fb1108 \
300 --hash=sha256:ed5eee1974372595f9e416cc7bbeeb12335201d8081ca8a0743c954d4446e5cb \
300 --hash=sha256:ed5eee1974372595f9e416cc7bbeeb12335201d8081ca8a0743c954d4446e5cb \
301 # via importlib-metadata
301 # via importlib-metadata
@@ -1,565 +1,565 b''
1 # synthrepo.py - repo synthesis
1 # synthrepo.py - repo synthesis
2 #
2 #
3 # Copyright 2012 Facebook
3 # Copyright 2012 Facebook
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''synthesize structurally interesting change history
8 '''synthesize structurally interesting change history
9
9
10 This extension is useful for creating a repository with properties
10 This extension is useful for creating a repository with properties
11 that are statistically similar to an existing repository. During
11 that are statistically similar to an existing repository. During
12 analysis, a simple probability table is constructed from the history
12 analysis, a simple probability table is constructed from the history
13 of an existing repository. During synthesis, these properties are
13 of an existing repository. During synthesis, these properties are
14 reconstructed.
14 reconstructed.
15
15
16 Properties that are analyzed and synthesized include the following:
16 Properties that are analyzed and synthesized include the following:
17
17
18 - Lines added or removed when an existing file is modified
18 - Lines added or removed when an existing file is modified
19 - Number and sizes of files added
19 - Number and sizes of files added
20 - Number of files removed
20 - Number of files removed
21 - Line lengths
21 - Line lengths
22 - Topological distance to parent changeset(s)
22 - Topological distance to parent changeset(s)
23 - Probability of a commit being a merge
23 - Probability of a commit being a merge
24 - Probability of a newly added file being added to a new directory
24 - Probability of a newly added file being added to a new directory
25 - Interarrival time, and time zone, of commits
25 - Interarrival time, and time zone, of commits
26 - Number of files in each directory
26 - Number of files in each directory
27
27
28 A few obvious properties that are not currently handled realistically:
28 A few obvious properties that are not currently handled realistically:
29
29
30 - Merges are treated as regular commits with two parents, which is not
30 - Merges are treated as regular commits with two parents, which is not
31 realistic
31 realistic
32 - Modifications are not treated as operations on hunks of lines, but
32 - Modifications are not treated as operations on hunks of lines, but
33 as insertions and deletions of randomly chosen single lines
33 as insertions and deletions of randomly chosen single lines
34 - Committer ID (always random)
34 - Committer ID (always random)
35 - Executability of files
35 - Executability of files
36 - Symlinks and binary files are ignored
36 - Symlinks and binary files are ignored
37 '''
37 '''
38
38
39 from __future__ import absolute_import
39 from __future__ import absolute_import
40 import bisect
40 import bisect
41 import collections
41 import collections
42 import itertools
42 import itertools
43 import json
43 import json
44 import os
44 import os
45 import random
45 import random
46 import sys
46 import sys
47 import time
47 import time
48
48
49 from mercurial.i18n import _
49 from mercurial.i18n import _
50 from mercurial.node import (
50 from mercurial.node import (
51 nullid,
51 nullid,
52 nullrev,
52 nullrev,
53 short,
53 short,
54 )
54 )
55 from mercurial import (
55 from mercurial import (
56 context,
56 context,
57 diffutil,
57 diffutil,
58 error,
58 error,
59 hg,
59 hg,
60 logcmdutil,
60 patch,
61 patch,
61 pycompat,
62 pycompat,
62 registrar,
63 registrar,
63 scmutil,
64 )
64 )
65 from mercurial.utils import dateutil
65 from mercurial.utils import dateutil
66
66
67 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
67 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
68 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
68 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
69 # be specifying the version(s) of Mercurial they are tested with, or
69 # be specifying the version(s) of Mercurial they are tested with, or
70 # leave the attribute unspecified.
70 # leave the attribute unspecified.
71 testedwith = 'ships-with-hg-core'
71 testedwith = 'ships-with-hg-core'
72
72
73 cmdtable = {}
73 cmdtable = {}
74 command = registrar.command(cmdtable)
74 command = registrar.command(cmdtable)
75
75
76 newfile = {'new fi', 'rename', 'copy f', 'copy t'}
76 newfile = {'new fi', 'rename', 'copy f', 'copy t'}
77
77
78
78
79 def zerodict():
79 def zerodict():
80 return collections.defaultdict(lambda: 0)
80 return collections.defaultdict(lambda: 0)
81
81
82
82
83 def roundto(x, k):
83 def roundto(x, k):
84 if x > k * 2:
84 if x > k * 2:
85 return int(round(x / float(k)) * k)
85 return int(round(x / float(k)) * k)
86 return int(round(x))
86 return int(round(x))
87
87
88
88
89 def parsegitdiff(lines):
89 def parsegitdiff(lines):
90 filename, mar, lineadd, lineremove = None, None, zerodict(), 0
90 filename, mar, lineadd, lineremove = None, None, zerodict(), 0
91 binary = False
91 binary = False
92 for line in lines:
92 for line in lines:
93 start = line[:6]
93 start = line[:6]
94 if start == 'diff -':
94 if start == 'diff -':
95 if filename:
95 if filename:
96 yield filename, mar, lineadd, lineremove, binary
96 yield filename, mar, lineadd, lineremove, binary
97 mar, lineadd, lineremove, binary = 'm', zerodict(), 0, False
97 mar, lineadd, lineremove, binary = 'm', zerodict(), 0, False
98 filename = patch.gitre.match(line).group(1)
98 filename = patch.gitre.match(line).group(1)
99 elif start in newfile:
99 elif start in newfile:
100 mar = 'a'
100 mar = 'a'
101 elif start == 'GIT bi':
101 elif start == 'GIT bi':
102 binary = True
102 binary = True
103 elif start == 'delete':
103 elif start == 'delete':
104 mar = 'r'
104 mar = 'r'
105 elif start:
105 elif start:
106 s = start[0]
106 s = start[0]
107 if s == '-' and not line.startswith('--- '):
107 if s == '-' and not line.startswith('--- '):
108 lineremove += 1
108 lineremove += 1
109 elif s == '+' and not line.startswith('+++ '):
109 elif s == '+' and not line.startswith('+++ '):
110 lineadd[roundto(len(line) - 1, 5)] += 1
110 lineadd[roundto(len(line) - 1, 5)] += 1
111 if filename:
111 if filename:
112 yield filename, mar, lineadd, lineremove, binary
112 yield filename, mar, lineadd, lineremove, binary
113
113
114
114
115 @command(
115 @command(
116 'analyze',
116 'analyze',
117 [
117 [
118 ('o', 'output', '', _('write output to given file'), _('FILE')),
118 ('o', 'output', '', _('write output to given file'), _('FILE')),
119 ('r', 'rev', [], _('analyze specified revisions'), _('REV')),
119 ('r', 'rev', [], _('analyze specified revisions'), _('REV')),
120 ],
120 ],
121 _('hg analyze'),
121 _('hg analyze'),
122 optionalrepo=True,
122 optionalrepo=True,
123 )
123 )
124 def analyze(ui, repo, *revs, **opts):
124 def analyze(ui, repo, *revs, **opts):
125 """create a simple model of a repository to use for later synthesis
125 """create a simple model of a repository to use for later synthesis
126
126
127 This command examines every changeset in the given range (or all
127 This command examines every changeset in the given range (or all
128 of history if none are specified) and creates a simple statistical
128 of history if none are specified) and creates a simple statistical
129 model of the history of the repository. It also measures the directory
129 model of the history of the repository. It also measures the directory
130 structure of the repository as checked out.
130 structure of the repository as checked out.
131
131
132 The model is written out to a JSON file, and can be used by
132 The model is written out to a JSON file, and can be used by
133 :hg:`synthesize` to create or augment a repository with synthetic
133 :hg:`synthesize` to create or augment a repository with synthetic
134 commits that have a structure that is statistically similar to the
134 commits that have a structure that is statistically similar to the
135 analyzed repository.
135 analyzed repository.
136 """
136 """
137 root = repo.root
137 root = repo.root
138 if not root.endswith(os.path.sep):
138 if not root.endswith(os.path.sep):
139 root += os.path.sep
139 root += os.path.sep
140
140
141 revs = list(revs)
141 revs = list(revs)
142 revs.extend(opts['rev'])
142 revs.extend(opts['rev'])
143 if not revs:
143 if not revs:
144 revs = [':']
144 revs = [':']
145
145
146 output = opts['output']
146 output = opts['output']
147 if not output:
147 if not output:
148 output = os.path.basename(root) + '.json'
148 output = os.path.basename(root) + '.json'
149
149
150 if output == '-':
150 if output == '-':
151 fp = sys.stdout
151 fp = sys.stdout
152 else:
152 else:
153 fp = open(output, 'w')
153 fp = open(output, 'w')
154
154
155 # Always obtain file counts of each directory in the given root directory.
155 # Always obtain file counts of each directory in the given root directory.
156 def onerror(e):
156 def onerror(e):
157 ui.warn(_('error walking directory structure: %s\n') % e)
157 ui.warn(_('error walking directory structure: %s\n') % e)
158
158
159 dirs = {}
159 dirs = {}
160 rootprefixlen = len(root)
160 rootprefixlen = len(root)
161 for dirpath, dirnames, filenames in os.walk(root, onerror=onerror):
161 for dirpath, dirnames, filenames in os.walk(root, onerror=onerror):
162 dirpathfromroot = dirpath[rootprefixlen:]
162 dirpathfromroot = dirpath[rootprefixlen:]
163 dirs[dirpathfromroot] = len(filenames)
163 dirs[dirpathfromroot] = len(filenames)
164 if '.hg' in dirnames:
164 if '.hg' in dirnames:
165 dirnames.remove('.hg')
165 dirnames.remove('.hg')
166
166
167 lineschanged = zerodict()
167 lineschanged = zerodict()
168 children = zerodict()
168 children = zerodict()
169 p1distance = zerodict()
169 p1distance = zerodict()
170 p2distance = zerodict()
170 p2distance = zerodict()
171 linesinfilesadded = zerodict()
171 linesinfilesadded = zerodict()
172 fileschanged = zerodict()
172 fileschanged = zerodict()
173 filesadded = zerodict()
173 filesadded = zerodict()
174 filesremoved = zerodict()
174 filesremoved = zerodict()
175 linelengths = zerodict()
175 linelengths = zerodict()
176 interarrival = zerodict()
176 interarrival = zerodict()
177 parents = zerodict()
177 parents = zerodict()
178 dirsadded = zerodict()
178 dirsadded = zerodict()
179 tzoffset = zerodict()
179 tzoffset = zerodict()
180
180
181 # If a mercurial repo is available, also model the commit history.
181 # If a mercurial repo is available, also model the commit history.
182 if repo:
182 if repo:
183 revs = scmutil.revrange(repo, revs)
183 revs = logcmdutil.revrange(repo, revs)
184 revs.sort()
184 revs.sort()
185
185
186 progress = ui.makeprogress(
186 progress = ui.makeprogress(
187 _('analyzing'), unit=_('changesets'), total=len(revs)
187 _('analyzing'), unit=_('changesets'), total=len(revs)
188 )
188 )
189 for i, rev in enumerate(revs):
189 for i, rev in enumerate(revs):
190 progress.update(i)
190 progress.update(i)
191 ctx = repo[rev]
191 ctx = repo[rev]
192 pl = ctx.parents()
192 pl = ctx.parents()
193 pctx = pl[0]
193 pctx = pl[0]
194 prev = pctx.rev()
194 prev = pctx.rev()
195 children[prev] += 1
195 children[prev] += 1
196 p1distance[rev - prev] += 1
196 p1distance[rev - prev] += 1
197 parents[len(pl)] += 1
197 parents[len(pl)] += 1
198 tzoffset[ctx.date()[1]] += 1
198 tzoffset[ctx.date()[1]] += 1
199 if len(pl) > 1:
199 if len(pl) > 1:
200 p2distance[rev - pl[1].rev()] += 1
200 p2distance[rev - pl[1].rev()] += 1
201 if prev == rev - 1:
201 if prev == rev - 1:
202 lastctx = pctx
202 lastctx = pctx
203 else:
203 else:
204 lastctx = repo[rev - 1]
204 lastctx = repo[rev - 1]
205 if lastctx.rev() != nullrev:
205 if lastctx.rev() != nullrev:
206 timedelta = ctx.date()[0] - lastctx.date()[0]
206 timedelta = ctx.date()[0] - lastctx.date()[0]
207 interarrival[roundto(timedelta, 300)] += 1
207 interarrival[roundto(timedelta, 300)] += 1
208 diffopts = diffutil.diffallopts(ui, {'git': True})
208 diffopts = diffutil.diffallopts(ui, {'git': True})
209 diff = sum(
209 diff = sum(
210 (d.splitlines() for d in ctx.diff(pctx, opts=diffopts)), []
210 (d.splitlines() for d in ctx.diff(pctx, opts=diffopts)), []
211 )
211 )
212 fileadds, diradds, fileremoves, filechanges = 0, 0, 0, 0
212 fileadds, diradds, fileremoves, filechanges = 0, 0, 0, 0
213 for filename, mar, lineadd, lineremove, isbin in parsegitdiff(diff):
213 for filename, mar, lineadd, lineremove, isbin in parsegitdiff(diff):
214 if isbin:
214 if isbin:
215 continue
215 continue
216 added = sum(pycompat.itervalues(lineadd), 0)
216 added = sum(pycompat.itervalues(lineadd), 0)
217 if mar == 'm':
217 if mar == 'm':
218 if added and lineremove:
218 if added and lineremove:
219 lineschanged[
219 lineschanged[
220 roundto(added, 5), roundto(lineremove, 5)
220 roundto(added, 5), roundto(lineremove, 5)
221 ] += 1
221 ] += 1
222 filechanges += 1
222 filechanges += 1
223 elif mar == 'a':
223 elif mar == 'a':
224 fileadds += 1
224 fileadds += 1
225 if '/' in filename:
225 if '/' in filename:
226 filedir = filename.rsplit('/', 1)[0]
226 filedir = filename.rsplit('/', 1)[0]
227 if filedir not in pctx.dirs():
227 if filedir not in pctx.dirs():
228 diradds += 1
228 diradds += 1
229 linesinfilesadded[roundto(added, 5)] += 1
229 linesinfilesadded[roundto(added, 5)] += 1
230 elif mar == 'r':
230 elif mar == 'r':
231 fileremoves += 1
231 fileremoves += 1
232 for length, count in lineadd.iteritems():
232 for length, count in lineadd.iteritems():
233 linelengths[length] += count
233 linelengths[length] += count
234 fileschanged[filechanges] += 1
234 fileschanged[filechanges] += 1
235 filesadded[fileadds] += 1
235 filesadded[fileadds] += 1
236 dirsadded[diradds] += 1
236 dirsadded[diradds] += 1
237 filesremoved[fileremoves] += 1
237 filesremoved[fileremoves] += 1
238 progress.complete()
238 progress.complete()
239
239
240 invchildren = zerodict()
240 invchildren = zerodict()
241
241
242 for rev, count in children.iteritems():
242 for rev, count in children.iteritems():
243 invchildren[count] += 1
243 invchildren[count] += 1
244
244
245 if output != '-':
245 if output != '-':
246 ui.status(_('writing output to %s\n') % output)
246 ui.status(_('writing output to %s\n') % output)
247
247
248 def pronk(d):
248 def pronk(d):
249 return sorted(d.iteritems(), key=lambda x: x[1], reverse=True)
249 return sorted(d.iteritems(), key=lambda x: x[1], reverse=True)
250
250
251 json.dump(
251 json.dump(
252 {
252 {
253 'revs': len(revs),
253 'revs': len(revs),
254 'initdirs': pronk(dirs),
254 'initdirs': pronk(dirs),
255 'lineschanged': pronk(lineschanged),
255 'lineschanged': pronk(lineschanged),
256 'children': pronk(invchildren),
256 'children': pronk(invchildren),
257 'fileschanged': pronk(fileschanged),
257 'fileschanged': pronk(fileschanged),
258 'filesadded': pronk(filesadded),
258 'filesadded': pronk(filesadded),
259 'linesinfilesadded': pronk(linesinfilesadded),
259 'linesinfilesadded': pronk(linesinfilesadded),
260 'dirsadded': pronk(dirsadded),
260 'dirsadded': pronk(dirsadded),
261 'filesremoved': pronk(filesremoved),
261 'filesremoved': pronk(filesremoved),
262 'linelengths': pronk(linelengths),
262 'linelengths': pronk(linelengths),
263 'parents': pronk(parents),
263 'parents': pronk(parents),
264 'p1distance': pronk(p1distance),
264 'p1distance': pronk(p1distance),
265 'p2distance': pronk(p2distance),
265 'p2distance': pronk(p2distance),
266 'interarrival': pronk(interarrival),
266 'interarrival': pronk(interarrival),
267 'tzoffset': pronk(tzoffset),
267 'tzoffset': pronk(tzoffset),
268 },
268 },
269 fp,
269 fp,
270 )
270 )
271 fp.close()
271 fp.close()
272
272
273
273
274 @command(
274 @command(
275 'synthesize',
275 'synthesize',
276 [
276 [
277 ('c', 'count', 0, _('create given number of commits'), _('COUNT')),
277 ('c', 'count', 0, _('create given number of commits'), _('COUNT')),
278 ('', 'dict', '', _('path to a dictionary of words'), _('FILE')),
278 ('', 'dict', '', _('path to a dictionary of words'), _('FILE')),
279 ('', 'initfiles', 0, _('initial file count to create'), _('COUNT')),
279 ('', 'initfiles', 0, _('initial file count to create'), _('COUNT')),
280 ],
280 ],
281 _('hg synthesize [OPTION].. DESCFILE'),
281 _('hg synthesize [OPTION].. DESCFILE'),
282 )
282 )
283 def synthesize(ui, repo, descpath, **opts):
283 def synthesize(ui, repo, descpath, **opts):
284 """synthesize commits based on a model of an existing repository
284 """synthesize commits based on a model of an existing repository
285
285
286 The model must have been generated by :hg:`analyze`. Commits will
286 The model must have been generated by :hg:`analyze`. Commits will
287 be generated randomly according to the probabilities described in
287 be generated randomly according to the probabilities described in
288 the model. If --initfiles is set, the repository will be seeded with
288 the model. If --initfiles is set, the repository will be seeded with
289 the given number files following the modeled repository's directory
289 the given number files following the modeled repository's directory
290 structure.
290 structure.
291
291
292 When synthesizing new content, commit descriptions, and user
292 When synthesizing new content, commit descriptions, and user
293 names, words will be chosen randomly from a dictionary that is
293 names, words will be chosen randomly from a dictionary that is
294 presumed to contain one word per line. Use --dict to specify the
294 presumed to contain one word per line. Use --dict to specify the
295 path to an alternate dictionary to use.
295 path to an alternate dictionary to use.
296 """
296 """
297 try:
297 try:
298 fp = hg.openpath(ui, descpath)
298 fp = hg.openpath(ui, descpath)
299 except Exception as err:
299 except Exception as err:
300 raise error.Abort('%s: %s' % (descpath, err[0].strerror))
300 raise error.Abort('%s: %s' % (descpath, err[0].strerror))
301 desc = json.load(fp)
301 desc = json.load(fp)
302 fp.close()
302 fp.close()
303
303
304 def cdf(l):
304 def cdf(l):
305 if not l:
305 if not l:
306 return [], []
306 return [], []
307 vals, probs = zip(*sorted(l, key=lambda x: x[1], reverse=True))
307 vals, probs = zip(*sorted(l, key=lambda x: x[1], reverse=True))
308 t = float(sum(probs, 0))
308 t = float(sum(probs, 0))
309 s, cdfs = 0, []
309 s, cdfs = 0, []
310 for v in probs:
310 for v in probs:
311 s += v
311 s += v
312 cdfs.append(s / t)
312 cdfs.append(s / t)
313 return vals, cdfs
313 return vals, cdfs
314
314
315 lineschanged = cdf(desc['lineschanged'])
315 lineschanged = cdf(desc['lineschanged'])
316 fileschanged = cdf(desc['fileschanged'])
316 fileschanged = cdf(desc['fileschanged'])
317 filesadded = cdf(desc['filesadded'])
317 filesadded = cdf(desc['filesadded'])
318 dirsadded = cdf(desc['dirsadded'])
318 dirsadded = cdf(desc['dirsadded'])
319 filesremoved = cdf(desc['filesremoved'])
319 filesremoved = cdf(desc['filesremoved'])
320 linelengths = cdf(desc['linelengths'])
320 linelengths = cdf(desc['linelengths'])
321 parents = cdf(desc['parents'])
321 parents = cdf(desc['parents'])
322 p1distance = cdf(desc['p1distance'])
322 p1distance = cdf(desc['p1distance'])
323 p2distance = cdf(desc['p2distance'])
323 p2distance = cdf(desc['p2distance'])
324 interarrival = cdf(desc['interarrival'])
324 interarrival = cdf(desc['interarrival'])
325 linesinfilesadded = cdf(desc['linesinfilesadded'])
325 linesinfilesadded = cdf(desc['linesinfilesadded'])
326 tzoffset = cdf(desc['tzoffset'])
326 tzoffset = cdf(desc['tzoffset'])
327
327
328 dictfile = opts.get('dict') or '/usr/share/dict/words'
328 dictfile = opts.get('dict') or '/usr/share/dict/words'
329 try:
329 try:
330 fp = open(dictfile, 'rU')
330 fp = open(dictfile, 'rU')
331 except IOError as err:
331 except IOError as err:
332 raise error.Abort('%s: %s' % (dictfile, err.strerror))
332 raise error.Abort('%s: %s' % (dictfile, err.strerror))
333 words = fp.read().splitlines()
333 words = fp.read().splitlines()
334 fp.close()
334 fp.close()
335
335
336 initdirs = {}
336 initdirs = {}
337 if desc['initdirs']:
337 if desc['initdirs']:
338 for k, v in desc['initdirs']:
338 for k, v in desc['initdirs']:
339 initdirs[k.encode('utf-8').replace('.hg', '_hg')] = v
339 initdirs[k.encode('utf-8').replace('.hg', '_hg')] = v
340 initdirs = renamedirs(initdirs, words)
340 initdirs = renamedirs(initdirs, words)
341 initdirscdf = cdf(initdirs)
341 initdirscdf = cdf(initdirs)
342
342
343 def pick(cdf):
343 def pick(cdf):
344 return cdf[0][bisect.bisect_left(cdf[1], random.random())]
344 return cdf[0][bisect.bisect_left(cdf[1], random.random())]
345
345
346 def pickpath():
346 def pickpath():
347 return os.path.join(pick(initdirscdf), random.choice(words))
347 return os.path.join(pick(initdirscdf), random.choice(words))
348
348
349 def makeline(minimum=0):
349 def makeline(minimum=0):
350 total = max(minimum, pick(linelengths))
350 total = max(minimum, pick(linelengths))
351 c, l = 0, []
351 c, l = 0, []
352 while c < total:
352 while c < total:
353 w = random.choice(words)
353 w = random.choice(words)
354 c += len(w) + 1
354 c += len(w) + 1
355 l.append(w)
355 l.append(w)
356 return ' '.join(l)
356 return ' '.join(l)
357
357
358 wlock = repo.wlock()
358 wlock = repo.wlock()
359 lock = repo.lock()
359 lock = repo.lock()
360
360
361 nevertouch = {'.hgsub', '.hgignore', '.hgtags'}
361 nevertouch = {'.hgsub', '.hgignore', '.hgtags'}
362
362
363 _synthesizing = _('synthesizing')
363 _synthesizing = _('synthesizing')
364 _files = _('initial files')
364 _files = _('initial files')
365 _changesets = _('changesets')
365 _changesets = _('changesets')
366
366
367 # Synthesize a single initial revision adding files to the repo according
367 # Synthesize a single initial revision adding files to the repo according
368 # to the modeled directory structure.
368 # to the modeled directory structure.
369 initcount = int(opts['initfiles'])
369 initcount = int(opts['initfiles'])
370 if initcount and initdirs:
370 if initcount and initdirs:
371 pctx = repo['.']
371 pctx = repo['.']
372 dirs = set(pctx.dirs())
372 dirs = set(pctx.dirs())
373 files = {}
373 files = {}
374
374
375 def validpath(path):
375 def validpath(path):
376 # Don't pick filenames which are already directory names.
376 # Don't pick filenames which are already directory names.
377 if path in dirs:
377 if path in dirs:
378 return False
378 return False
379 # Don't pick directories which were used as file names.
379 # Don't pick directories which were used as file names.
380 while path:
380 while path:
381 if path in files:
381 if path in files:
382 return False
382 return False
383 path = os.path.dirname(path)
383 path = os.path.dirname(path)
384 return True
384 return True
385
385
386 progress = ui.makeprogress(_synthesizing, unit=_files, total=initcount)
386 progress = ui.makeprogress(_synthesizing, unit=_files, total=initcount)
387 for i in pycompat.xrange(0, initcount):
387 for i in pycompat.xrange(0, initcount):
388 progress.update(i)
388 progress.update(i)
389
389
390 path = pickpath()
390 path = pickpath()
391 while not validpath(path):
391 while not validpath(path):
392 path = pickpath()
392 path = pickpath()
393 data = '%s contents\n' % path
393 data = '%s contents\n' % path
394 files[path] = data
394 files[path] = data
395 dir = os.path.dirname(path)
395 dir = os.path.dirname(path)
396 while dir and dir not in dirs:
396 while dir and dir not in dirs:
397 dirs.add(dir)
397 dirs.add(dir)
398 dir = os.path.dirname(dir)
398 dir = os.path.dirname(dir)
399
399
400 def filectxfn(repo, memctx, path):
400 def filectxfn(repo, memctx, path):
401 return context.memfilectx(repo, memctx, path, files[path])
401 return context.memfilectx(repo, memctx, path, files[path])
402
402
403 progress.complete()
403 progress.complete()
404 message = 'synthesized wide repo with %d files' % (len(files),)
404 message = 'synthesized wide repo with %d files' % (len(files),)
405 mc = context.memctx(
405 mc = context.memctx(
406 repo,
406 repo,
407 [pctx.node(), nullid],
407 [pctx.node(), nullid],
408 message,
408 message,
409 files,
409 files,
410 filectxfn,
410 filectxfn,
411 ui.username(),
411 ui.username(),
412 '%d %d' % dateutil.makedate(),
412 '%d %d' % dateutil.makedate(),
413 )
413 )
414 initnode = mc.commit()
414 initnode = mc.commit()
415 if ui.debugflag:
415 if ui.debugflag:
416 hexfn = hex
416 hexfn = hex
417 else:
417 else:
418 hexfn = short
418 hexfn = short
419 ui.status(
419 ui.status(
420 _('added commit %s with %d files\n') % (hexfn(initnode), len(files))
420 _('added commit %s with %d files\n') % (hexfn(initnode), len(files))
421 )
421 )
422
422
423 # Synthesize incremental revisions to the repository, adding repo depth.
423 # Synthesize incremental revisions to the repository, adding repo depth.
424 count = int(opts['count'])
424 count = int(opts['count'])
425 heads = set(map(repo.changelog.rev, repo.heads()))
425 heads = set(map(repo.changelog.rev, repo.heads()))
426 progress = ui.makeprogress(_synthesizing, unit=_changesets, total=count)
426 progress = ui.makeprogress(_synthesizing, unit=_changesets, total=count)
427 for i in pycompat.xrange(count):
427 for i in pycompat.xrange(count):
428 progress.update(i)
428 progress.update(i)
429
429
430 node = repo.changelog.node
430 node = repo.changelog.node
431 revs = len(repo)
431 revs = len(repo)
432
432
433 def pickhead(heads, distance):
433 def pickhead(heads, distance):
434 if heads:
434 if heads:
435 lheads = sorted(heads)
435 lheads = sorted(heads)
436 rev = revs - min(pick(distance), revs)
436 rev = revs - min(pick(distance), revs)
437 if rev < lheads[-1]:
437 if rev < lheads[-1]:
438 rev = lheads[bisect.bisect_left(lheads, rev)]
438 rev = lheads[bisect.bisect_left(lheads, rev)]
439 else:
439 else:
440 rev = lheads[-1]
440 rev = lheads[-1]
441 return rev, node(rev)
441 return rev, node(rev)
442 return nullrev, nullid
442 return nullrev, nullid
443
443
444 r1 = revs - min(pick(p1distance), revs)
444 r1 = revs - min(pick(p1distance), revs)
445 p1 = node(r1)
445 p1 = node(r1)
446
446
447 # the number of heads will grow without bound if we use a pure
447 # the number of heads will grow without bound if we use a pure
448 # model, so artificially constrain their proliferation
448 # model, so artificially constrain their proliferation
449 toomanyheads = len(heads) > random.randint(1, 20)
449 toomanyheads = len(heads) > random.randint(1, 20)
450 if p2distance[0] and (pick(parents) == 2 or toomanyheads):
450 if p2distance[0] and (pick(parents) == 2 or toomanyheads):
451 r2, p2 = pickhead(heads.difference([r1]), p2distance)
451 r2, p2 = pickhead(heads.difference([r1]), p2distance)
452 else:
452 else:
453 r2, p2 = nullrev, nullid
453 r2, p2 = nullrev, nullid
454
454
455 pl = [p1, p2]
455 pl = [p1, p2]
456 pctx = repo[r1]
456 pctx = repo[r1]
457 mf = pctx.manifest()
457 mf = pctx.manifest()
458 mfk = mf.keys()
458 mfk = mf.keys()
459 changes = {}
459 changes = {}
460 if mfk:
460 if mfk:
461 for __ in pycompat.xrange(pick(fileschanged)):
461 for __ in pycompat.xrange(pick(fileschanged)):
462 for __ in pycompat.xrange(10):
462 for __ in pycompat.xrange(10):
463 fctx = pctx.filectx(random.choice(mfk))
463 fctx = pctx.filectx(random.choice(mfk))
464 path = fctx.path()
464 path = fctx.path()
465 if not (
465 if not (
466 path in nevertouch
466 path in nevertouch
467 or fctx.isbinary()
467 or fctx.isbinary()
468 or 'l' in fctx.flags()
468 or 'l' in fctx.flags()
469 ):
469 ):
470 break
470 break
471 lines = fctx.data().splitlines()
471 lines = fctx.data().splitlines()
472 add, remove = pick(lineschanged)
472 add, remove = pick(lineschanged)
473 for __ in pycompat.xrange(remove):
473 for __ in pycompat.xrange(remove):
474 if not lines:
474 if not lines:
475 break
475 break
476 del lines[random.randrange(0, len(lines))]
476 del lines[random.randrange(0, len(lines))]
477 for __ in pycompat.xrange(add):
477 for __ in pycompat.xrange(add):
478 lines.insert(random.randint(0, len(lines)), makeline())
478 lines.insert(random.randint(0, len(lines)), makeline())
479 path = fctx.path()
479 path = fctx.path()
480 changes[path] = '\n'.join(lines) + '\n'
480 changes[path] = '\n'.join(lines) + '\n'
481 for __ in pycompat.xrange(pick(filesremoved)):
481 for __ in pycompat.xrange(pick(filesremoved)):
482 for __ in pycompat.xrange(10):
482 for __ in pycompat.xrange(10):
483 path = random.choice(mfk)
483 path = random.choice(mfk)
484 if path not in changes:
484 if path not in changes:
485 break
485 break
486 if filesadded:
486 if filesadded:
487 dirs = list(pctx.dirs())
487 dirs = list(pctx.dirs())
488 dirs.insert(0, '')
488 dirs.insert(0, '')
489 for __ in pycompat.xrange(pick(filesadded)):
489 for __ in pycompat.xrange(pick(filesadded)):
490 pathstr = ''
490 pathstr = ''
491 while pathstr in dirs:
491 while pathstr in dirs:
492 path = [random.choice(dirs)]
492 path = [random.choice(dirs)]
493 if pick(dirsadded):
493 if pick(dirsadded):
494 path.append(random.choice(words))
494 path.append(random.choice(words))
495 path.append(random.choice(words))
495 path.append(random.choice(words))
496 pathstr = '/'.join(filter(None, path))
496 pathstr = '/'.join(filter(None, path))
497 data = (
497 data = (
498 '\n'.join(
498 '\n'.join(
499 makeline()
499 makeline()
500 for __ in pycompat.xrange(pick(linesinfilesadded))
500 for __ in pycompat.xrange(pick(linesinfilesadded))
501 )
501 )
502 + '\n'
502 + '\n'
503 )
503 )
504 changes[pathstr] = data
504 changes[pathstr] = data
505
505
506 def filectxfn(repo, memctx, path):
506 def filectxfn(repo, memctx, path):
507 if path not in changes:
507 if path not in changes:
508 return None
508 return None
509 return context.memfilectx(repo, memctx, path, changes[path])
509 return context.memfilectx(repo, memctx, path, changes[path])
510
510
511 if not changes:
511 if not changes:
512 continue
512 continue
513 if revs:
513 if revs:
514 date = repo['tip'].date()[0] + pick(interarrival)
514 date = repo['tip'].date()[0] + pick(interarrival)
515 else:
515 else:
516 date = time.time() - (86400 * count)
516 date = time.time() - (86400 * count)
517 # dates in mercurial must be positive, fit in 32-bit signed integers.
517 # dates in mercurial must be positive, fit in 32-bit signed integers.
518 date = min(0x7FFFFFFF, max(0, date))
518 date = min(0x7FFFFFFF, max(0, date))
519 user = random.choice(words) + '@' + random.choice(words)
519 user = random.choice(words) + '@' + random.choice(words)
520 mc = context.memctx(
520 mc = context.memctx(
521 repo,
521 repo,
522 pl,
522 pl,
523 makeline(minimum=2),
523 makeline(minimum=2),
524 sorted(changes),
524 sorted(changes),
525 filectxfn,
525 filectxfn,
526 user,
526 user,
527 '%d %d' % (date, pick(tzoffset)),
527 '%d %d' % (date, pick(tzoffset)),
528 )
528 )
529 newnode = mc.commit()
529 newnode = mc.commit()
530 heads.add(repo.changelog.rev(newnode))
530 heads.add(repo.changelog.rev(newnode))
531 heads.discard(r1)
531 heads.discard(r1)
532 heads.discard(r2)
532 heads.discard(r2)
533 progress.complete()
533 progress.complete()
534
534
535 lock.release()
535 lock.release()
536 wlock.release()
536 wlock.release()
537
537
538
538
539 def renamedirs(dirs, words):
539 def renamedirs(dirs, words):
540 '''Randomly rename the directory names in the per-dir file count dict.'''
540 '''Randomly rename the directory names in the per-dir file count dict.'''
541 wordgen = itertools.cycle(words)
541 wordgen = itertools.cycle(words)
542 replacements = {'': ''}
542 replacements = {'': ''}
543
543
544 def rename(dirpath):
544 def rename(dirpath):
545 """Recursively rename the directory and all path prefixes.
545 """Recursively rename the directory and all path prefixes.
546
546
547 The mapping from path to renamed path is stored for all path prefixes
547 The mapping from path to renamed path is stored for all path prefixes
548 as in dynamic programming, ensuring linear runtime and consistent
548 as in dynamic programming, ensuring linear runtime and consistent
549 renaming regardless of iteration order through the model.
549 renaming regardless of iteration order through the model.
550 """
550 """
551 if dirpath in replacements:
551 if dirpath in replacements:
552 return replacements[dirpath]
552 return replacements[dirpath]
553 head, _ = os.path.split(dirpath)
553 head, _ = os.path.split(dirpath)
554 if head:
554 if head:
555 head = rename(head)
555 head = rename(head)
556 else:
556 else:
557 head = ''
557 head = ''
558 renamed = os.path.join(head, next(wordgen))
558 renamed = os.path.join(head, next(wordgen))
559 replacements[dirpath] = renamed
559 replacements[dirpath] = renamed
560 return renamed
560 return renamed
561
561
562 result = []
562 result = []
563 for dirpath, count in dirs.iteritems():
563 for dirpath, count in dirs.iteritems():
564 result.append([rename(dirpath.lstrip(os.sep)), count])
564 result.append([rename(dirpath.lstrip(os.sep)), count])
565 return result
565 return result
@@ -1,119 +1,120 b''
1 # Copyright (C) 2015 - Mike Edgar <adgar@google.com>
1 # Copyright (C) 2015 - Mike Edgar <adgar@google.com>
2 #
2 #
3 # This extension enables removal of file content at a given revision,
3 # This extension enables removal of file content at a given revision,
4 # rewriting the data/metadata of successive revisions to preserve revision log
4 # rewriting the data/metadata of successive revisions to preserve revision log
5 # integrity.
5 # integrity.
6
6
7 """erase file content at a given revision
7 """erase file content at a given revision
8
8
9 The censor command instructs Mercurial to erase all content of a file at a given
9 The censor command instructs Mercurial to erase all content of a file at a given
10 revision *without updating the changeset hash.* This allows existing history to
10 revision *without updating the changeset hash.* This allows existing history to
11 remain valid while preventing future clones/pulls from receiving the erased
11 remain valid while preventing future clones/pulls from receiving the erased
12 data.
12 data.
13
13
14 Typical uses for censor are due to security or legal requirements, including::
14 Typical uses for censor are due to security or legal requirements, including::
15
15
16 * Passwords, private keys, cryptographic material
16 * Passwords, private keys, cryptographic material
17 * Licensed data/code/libraries for which the license has expired
17 * Licensed data/code/libraries for which the license has expired
18 * Personally Identifiable Information or other private data
18 * Personally Identifiable Information or other private data
19
19
20 Censored nodes can interrupt mercurial's typical operation whenever the excised
20 Censored nodes can interrupt mercurial's typical operation whenever the excised
21 data needs to be materialized. Some commands, like ``hg cat``/``hg revert``,
21 data needs to be materialized. Some commands, like ``hg cat``/``hg revert``,
22 simply fail when asked to produce censored data. Others, like ``hg verify`` and
22 simply fail when asked to produce censored data. Others, like ``hg verify`` and
23 ``hg update``, must be capable of tolerating censored data to continue to
23 ``hg update``, must be capable of tolerating censored data to continue to
24 function in a meaningful way. Such commands only tolerate censored file
24 function in a meaningful way. Such commands only tolerate censored file
25 revisions if they are allowed by the "censor.policy=ignore" config option.
25 revisions if they are allowed by the "censor.policy=ignore" config option.
26
26
27 A few informative commands such as ``hg grep`` will unconditionally
27 A few informative commands such as ``hg grep`` will unconditionally
28 ignore censored data and merely report that it was encountered.
28 ignore censored data and merely report that it was encountered.
29 """
29 """
30
30
31 from __future__ import absolute_import
31 from __future__ import absolute_import
32
32
33 from mercurial.i18n import _
33 from mercurial.i18n import _
34 from mercurial.node import short
34 from mercurial.node import short
35
35
36 from mercurial import (
36 from mercurial import (
37 error,
37 error,
38 logcmdutil,
38 registrar,
39 registrar,
39 scmutil,
40 scmutil,
40 )
41 )
41
42
42 cmdtable = {}
43 cmdtable = {}
43 command = registrar.command(cmdtable)
44 command = registrar.command(cmdtable)
44 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
45 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
45 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
46 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
46 # be specifying the version(s) of Mercurial they are tested with, or
47 # be specifying the version(s) of Mercurial they are tested with, or
47 # leave the attribute unspecified.
48 # leave the attribute unspecified.
48 testedwith = b'ships-with-hg-core'
49 testedwith = b'ships-with-hg-core'
49
50
50
51
51 @command(
52 @command(
52 b'censor',
53 b'censor',
53 [
54 [
54 (
55 (
55 b'r',
56 b'r',
56 b'rev',
57 b'rev',
57 b'',
58 b'',
58 _(b'censor file from specified revision'),
59 _(b'censor file from specified revision'),
59 _(b'REV'),
60 _(b'REV'),
60 ),
61 ),
61 (b't', b'tombstone', b'', _(b'replacement tombstone data'), _(b'TEXT')),
62 (b't', b'tombstone', b'', _(b'replacement tombstone data'), _(b'TEXT')),
62 ],
63 ],
63 _(b'-r REV [-t TEXT] [FILE]'),
64 _(b'-r REV [-t TEXT] [FILE]'),
64 helpcategory=command.CATEGORY_MAINTENANCE,
65 helpcategory=command.CATEGORY_MAINTENANCE,
65 )
66 )
66 def censor(ui, repo, path, rev=b'', tombstone=b'', **opts):
67 def censor(ui, repo, path, rev=b'', tombstone=b'', **opts):
67 with repo.wlock(), repo.lock():
68 with repo.wlock(), repo.lock():
68 return _docensor(ui, repo, path, rev, tombstone, **opts)
69 return _docensor(ui, repo, path, rev, tombstone, **opts)
69
70
70
71
71 def _docensor(ui, repo, path, rev=b'', tombstone=b'', **opts):
72 def _docensor(ui, repo, path, rev=b'', tombstone=b'', **opts):
72 if not path:
73 if not path:
73 raise error.Abort(_(b'must specify file path to censor'))
74 raise error.Abort(_(b'must specify file path to censor'))
74 if not rev:
75 if not rev:
75 raise error.Abort(_(b'must specify revision to censor'))
76 raise error.Abort(_(b'must specify revision to censor'))
76
77
77 wctx = repo[None]
78 wctx = repo[None]
78
79
79 m = scmutil.match(wctx, (path,))
80 m = scmutil.match(wctx, (path,))
80 if m.anypats() or len(m.files()) != 1:
81 if m.anypats() or len(m.files()) != 1:
81 raise error.Abort(_(b'can only specify an explicit filename'))
82 raise error.Abort(_(b'can only specify an explicit filename'))
82 path = m.files()[0]
83 path = m.files()[0]
83 flog = repo.file(path)
84 flog = repo.file(path)
84 if not len(flog):
85 if not len(flog):
85 raise error.Abort(_(b'cannot censor file with no history'))
86 raise error.Abort(_(b'cannot censor file with no history'))
86
87
87 rev = scmutil.revsingle(repo, rev, rev).rev()
88 rev = logcmdutil.revsingle(repo, rev, rev).rev()
88 try:
89 try:
89 ctx = repo[rev]
90 ctx = repo[rev]
90 except KeyError:
91 except KeyError:
91 raise error.Abort(_(b'invalid revision identifier %s') % rev)
92 raise error.Abort(_(b'invalid revision identifier %s') % rev)
92
93
93 try:
94 try:
94 fctx = ctx.filectx(path)
95 fctx = ctx.filectx(path)
95 except error.LookupError:
96 except error.LookupError:
96 raise error.Abort(_(b'file does not exist at revision %s') % rev)
97 raise error.Abort(_(b'file does not exist at revision %s') % rev)
97
98
98 fnode = fctx.filenode()
99 fnode = fctx.filenode()
99 heads = []
100 heads = []
100 for headnode in repo.heads():
101 for headnode in repo.heads():
101 hc = repo[headnode]
102 hc = repo[headnode]
102 if path in hc and hc.filenode(path) == fnode:
103 if path in hc and hc.filenode(path) == fnode:
103 heads.append(hc)
104 heads.append(hc)
104 if heads:
105 if heads:
105 headlist = b', '.join([short(c.node()) for c in heads])
106 headlist = b', '.join([short(c.node()) for c in heads])
106 raise error.Abort(
107 raise error.Abort(
107 _(b'cannot censor file in heads (%s)') % headlist,
108 _(b'cannot censor file in heads (%s)') % headlist,
108 hint=_(b'clean/delete and commit first'),
109 hint=_(b'clean/delete and commit first'),
109 )
110 )
110
111
111 wp = wctx.parents()
112 wp = wctx.parents()
112 if ctx.node() in [p.node() for p in wp]:
113 if ctx.node() in [p.node() for p in wp]:
113 raise error.Abort(
114 raise error.Abort(
114 _(b'cannot censor working directory'),
115 _(b'cannot censor working directory'),
115 hint=_(b'clean/delete/update first'),
116 hint=_(b'clean/delete/update first'),
116 )
117 )
117
118
118 with repo.transaction(b'censor') as tr:
119 with repo.transaction(b'censor') as tr:
119 flog.censorrevision(tr, fnode, tombstone=tombstone)
120 flog.censorrevision(tr, fnode, tombstone=tombstone)
@@ -1,84 +1,83 b''
1 # Mercurial extension to provide the 'hg children' command
1 # Mercurial extension to provide the 'hg children' command
2 #
2 #
3 # Copyright 2007 by Intevation GmbH <intevation@intevation.de>
3 # Copyright 2007 by Intevation GmbH <intevation@intevation.de>
4 #
4 #
5 # Author(s):
5 # Author(s):
6 # Thomas Arendsen Hein <thomas@intevation.de>
6 # Thomas Arendsen Hein <thomas@intevation.de>
7 #
7 #
8 # This software may be used and distributed according to the terms of the
8 # This software may be used and distributed according to the terms of the
9 # GNU General Public License version 2 or any later version.
9 # GNU General Public License version 2 or any later version.
10
10
11 '''command to display child changesets (DEPRECATED)
11 '''command to display child changesets (DEPRECATED)
12
12
13 This extension is deprecated. You should use :hg:`log -r
13 This extension is deprecated. You should use :hg:`log -r
14 "children(REV)"` instead.
14 "children(REV)"` instead.
15 '''
15 '''
16
16
17 from __future__ import absolute_import
17 from __future__ import absolute_import
18
18
19 from mercurial.i18n import _
19 from mercurial.i18n import _
20 from mercurial import (
20 from mercurial import (
21 cmdutil,
21 cmdutil,
22 logcmdutil,
22 logcmdutil,
23 pycompat,
23 pycompat,
24 registrar,
24 registrar,
25 scmutil,
26 )
25 )
27
26
28 templateopts = cmdutil.templateopts
27 templateopts = cmdutil.templateopts
29
28
30 cmdtable = {}
29 cmdtable = {}
31 command = registrar.command(cmdtable)
30 command = registrar.command(cmdtable)
32 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
31 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
33 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
32 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
34 # be specifying the version(s) of Mercurial they are tested with, or
33 # be specifying the version(s) of Mercurial they are tested with, or
35 # leave the attribute unspecified.
34 # leave the attribute unspecified.
36 testedwith = b'ships-with-hg-core'
35 testedwith = b'ships-with-hg-core'
37
36
38
37
39 @command(
38 @command(
40 b'children',
39 b'children',
41 [
40 [
42 (
41 (
43 b'r',
42 b'r',
44 b'rev',
43 b'rev',
45 b'.',
44 b'.',
46 _(b'show children of the specified revision'),
45 _(b'show children of the specified revision'),
47 _(b'REV'),
46 _(b'REV'),
48 ),
47 ),
49 ]
48 ]
50 + templateopts,
49 + templateopts,
51 _(b'hg children [-r REV] [FILE]'),
50 _(b'hg children [-r REV] [FILE]'),
52 helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
51 helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
53 inferrepo=True,
52 inferrepo=True,
54 )
53 )
55 def children(ui, repo, file_=None, **opts):
54 def children(ui, repo, file_=None, **opts):
56 """show the children of the given or working directory revision
55 """show the children of the given or working directory revision
57
56
58 Print the children of the working directory's revisions. If a
57 Print the children of the working directory's revisions. If a
59 revision is given via -r/--rev, the children of that revision will
58 revision is given via -r/--rev, the children of that revision will
60 be printed. If a file argument is given, revision in which the
59 be printed. If a file argument is given, revision in which the
61 file was last changed (after the working directory revision or the
60 file was last changed (after the working directory revision or the
62 argument to --rev if given) is printed.
61 argument to --rev if given) is printed.
63
62
64 Please use :hg:`log` instead::
63 Please use :hg:`log` instead::
65
64
66 hg children => hg log -r "children(.)"
65 hg children => hg log -r "children(.)"
67 hg children -r REV => hg log -r "children(REV)"
66 hg children -r REV => hg log -r "children(REV)"
68
67
69 See :hg:`help log` and :hg:`help revsets.children`.
68 See :hg:`help log` and :hg:`help revsets.children`.
70
69
71 """
70 """
72 opts = pycompat.byteskwargs(opts)
71 opts = pycompat.byteskwargs(opts)
73 rev = opts.get(b'rev')
72 rev = opts.get(b'rev')
74 ctx = scmutil.revsingle(repo, rev)
73 ctx = logcmdutil.revsingle(repo, rev)
75 if file_:
74 if file_:
76 fctx = repo.filectx(file_, changeid=ctx.rev())
75 fctx = repo.filectx(file_, changeid=ctx.rev())
77 childctxs = [fcctx.changectx() for fcctx in fctx.children()]
76 childctxs = [fcctx.changectx() for fcctx in fctx.children()]
78 else:
77 else:
79 childctxs = ctx.children()
78 childctxs = ctx.children()
80
79
81 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
80 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
82 for cctx in childctxs:
81 for cctx in childctxs:
83 displayer.show(cctx)
82 displayer.show(cctx)
84 displayer.close()
83 displayer.close()
@@ -1,95 +1,95 b''
1 # closehead.py - Close arbitrary heads without checking them out first
1 # closehead.py - Close arbitrary heads without checking them out first
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 '''close arbitrary heads without checking them out first'''
6 '''close arbitrary heads without checking them out first'''
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from mercurial.i18n import _
10 from mercurial.i18n import _
11 from mercurial import (
11 from mercurial import (
12 bookmarks,
12 bookmarks,
13 cmdutil,
13 cmdutil,
14 context,
14 context,
15 error,
15 error,
16 logcmdutil,
16 pycompat,
17 pycompat,
17 registrar,
18 registrar,
18 scmutil,
19 )
19 )
20
20
21 cmdtable = {}
21 cmdtable = {}
22 command = registrar.command(cmdtable)
22 command = registrar.command(cmdtable)
23 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
23 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
24 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
24 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
25 # be specifying the version(s) of Mercurial they are tested with, or
25 # be specifying the version(s) of Mercurial they are tested with, or
26 # leave the attribute unspecified.
26 # leave the attribute unspecified.
27 testedwith = b'ships-with-hg-core'
27 testedwith = b'ships-with-hg-core'
28
28
29 commitopts = cmdutil.commitopts
29 commitopts = cmdutil.commitopts
30 commitopts2 = cmdutil.commitopts2
30 commitopts2 = cmdutil.commitopts2
31 commitopts3 = [(b'r', b'rev', [], _(b'revision to check'), _(b'REV'))]
31 commitopts3 = [(b'r', b'rev', [], _(b'revision to check'), _(b'REV'))]
32
32
33
33
34 @command(
34 @command(
35 b'close-head|close-heads',
35 b'close-head|close-heads',
36 commitopts + commitopts2 + commitopts3,
36 commitopts + commitopts2 + commitopts3,
37 _(b'[OPTION]... [REV]...'),
37 _(b'[OPTION]... [REV]...'),
38 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
38 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
39 inferrepo=True,
39 inferrepo=True,
40 )
40 )
41 def close_branch(ui, repo, *revs, **opts):
41 def close_branch(ui, repo, *revs, **opts):
42 """close the given head revisions
42 """close the given head revisions
43
43
44 This is equivalent to checking out each revision in a clean tree and running
44 This is equivalent to checking out each revision in a clean tree and running
45 ``hg commit --close-branch``, except that it doesn't change the working
45 ``hg commit --close-branch``, except that it doesn't change the working
46 directory.
46 directory.
47
47
48 The commit message must be specified with -l or -m.
48 The commit message must be specified with -l or -m.
49 """
49 """
50
50
51 def docommit(rev):
51 def docommit(rev):
52 cctx = context.memctx(
52 cctx = context.memctx(
53 repo,
53 repo,
54 parents=[rev, None],
54 parents=[rev, None],
55 text=message,
55 text=message,
56 files=[],
56 files=[],
57 filectxfn=None,
57 filectxfn=None,
58 user=opts.get(b'user'),
58 user=opts.get(b'user'),
59 date=opts.get(b'date'),
59 date=opts.get(b'date'),
60 extra=extra,
60 extra=extra,
61 )
61 )
62 tr = repo.transaction(b'commit')
62 tr = repo.transaction(b'commit')
63 ret = repo.commitctx(cctx, True)
63 ret = repo.commitctx(cctx, True)
64 bookmarks.update(repo, [rev, None], ret)
64 bookmarks.update(repo, [rev, None], ret)
65 cctx.markcommitted(ret)
65 cctx.markcommitted(ret)
66 tr.close()
66 tr.close()
67
67
68 opts = pycompat.byteskwargs(opts)
68 opts = pycompat.byteskwargs(opts)
69
69
70 revs += tuple(opts.get(b'rev', []))
70 revs += tuple(opts.get(b'rev', []))
71 revs = scmutil.revrange(repo, revs)
71 revs = logcmdutil.revrange(repo, revs)
72
72
73 if not revs:
73 if not revs:
74 raise error.Abort(_(b'no revisions specified'))
74 raise error.Abort(_(b'no revisions specified'))
75
75
76 heads = []
76 heads = []
77 for branch in repo.branchmap():
77 for branch in repo.branchmap():
78 heads.extend(repo.branchheads(branch))
78 heads.extend(repo.branchheads(branch))
79 heads = {repo[h].rev() for h in heads}
79 heads = {repo[h].rev() for h in heads}
80 for rev in revs:
80 for rev in revs:
81 if rev not in heads:
81 if rev not in heads:
82 raise error.Abort(_(b'revision is not an open head: %d') % rev)
82 raise error.Abort(_(b'revision is not an open head: %d') % rev)
83
83
84 message = cmdutil.logmessage(ui, opts)
84 message = cmdutil.logmessage(ui, opts)
85 if not message:
85 if not message:
86 raise error.Abort(_(b"no commit message specified with -l or -m"))
86 raise error.Abort(_(b"no commit message specified with -l or -m"))
87 extra = {b'close': b'1'}
87 extra = {b'close': b'1'}
88
88
89 with repo.wlock(), repo.lock():
89 with repo.wlock(), repo.lock():
90 for rev in revs:
90 for rev in revs:
91 r = repo[rev]
91 r = repo[rev]
92 branch = r.branch()
92 branch = r.branch()
93 extra[b'branch'] = branch
93 extra[b'branch'] = branch
94 docommit(r)
94 docommit(r)
95 return 0
95 return 0
@@ -1,732 +1,732 b''
1 # hg.py - hg backend for convert extension
1 # hg.py - hg backend for convert extension
2 #
2 #
3 # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # Notes for hg->hg conversion:
8 # Notes for hg->hg conversion:
9 #
9 #
10 # * Old versions of Mercurial didn't trim the whitespace from the ends
10 # * Old versions of Mercurial didn't trim the whitespace from the ends
11 # of commit messages, but new versions do. Changesets created by
11 # of commit messages, but new versions do. Changesets created by
12 # those older versions, then converted, may thus have different
12 # those older versions, then converted, may thus have different
13 # hashes for changesets that are otherwise identical.
13 # hashes for changesets that are otherwise identical.
14 #
14 #
15 # * Using "--config convert.hg.saverev=true" will make the source
15 # * Using "--config convert.hg.saverev=true" will make the source
16 # identifier to be stored in the converted revision. This will cause
16 # identifier to be stored in the converted revision. This will cause
17 # the converted revision to have a different identity than the
17 # the converted revision to have a different identity than the
18 # source.
18 # source.
19 from __future__ import absolute_import
19 from __future__ import absolute_import
20
20
21 import os
21 import os
22 import re
22 import re
23 import time
23 import time
24
24
25 from mercurial.i18n import _
25 from mercurial.i18n import _
26 from mercurial.pycompat import open
26 from mercurial.pycompat import open
27 from mercurial.node import (
27 from mercurial.node import (
28 bin,
28 bin,
29 hex,
29 hex,
30 sha1nodeconstants,
30 sha1nodeconstants,
31 )
31 )
32 from mercurial import (
32 from mercurial import (
33 bookmarks,
33 bookmarks,
34 context,
34 context,
35 error,
35 error,
36 exchange,
36 exchange,
37 hg,
37 hg,
38 lock as lockmod,
38 lock as lockmod,
39 logcmdutil,
39 merge as mergemod,
40 merge as mergemod,
40 phases,
41 phases,
41 pycompat,
42 pycompat,
42 scmutil,
43 util,
43 util,
44 )
44 )
45 from mercurial.utils import dateutil
45 from mercurial.utils import dateutil
46
46
47 stringio = util.stringio
47 stringio = util.stringio
48
48
49 from . import common
49 from . import common
50
50
51 mapfile = common.mapfile
51 mapfile = common.mapfile
52 NoRepo = common.NoRepo
52 NoRepo = common.NoRepo
53
53
54 sha1re = re.compile(br'\b[0-9a-f]{12,40}\b')
54 sha1re = re.compile(br'\b[0-9a-f]{12,40}\b')
55
55
56
56
57 class mercurial_sink(common.converter_sink):
57 class mercurial_sink(common.converter_sink):
58 def __init__(self, ui, repotype, path):
58 def __init__(self, ui, repotype, path):
59 common.converter_sink.__init__(self, ui, repotype, path)
59 common.converter_sink.__init__(self, ui, repotype, path)
60 self.branchnames = ui.configbool(b'convert', b'hg.usebranchnames')
60 self.branchnames = ui.configbool(b'convert', b'hg.usebranchnames')
61 self.clonebranches = ui.configbool(b'convert', b'hg.clonebranches')
61 self.clonebranches = ui.configbool(b'convert', b'hg.clonebranches')
62 self.tagsbranch = ui.config(b'convert', b'hg.tagsbranch')
62 self.tagsbranch = ui.config(b'convert', b'hg.tagsbranch')
63 self.lastbranch = None
63 self.lastbranch = None
64 if os.path.isdir(path) and len(os.listdir(path)) > 0:
64 if os.path.isdir(path) and len(os.listdir(path)) > 0:
65 try:
65 try:
66 self.repo = hg.repository(self.ui, path)
66 self.repo = hg.repository(self.ui, path)
67 if not self.repo.local():
67 if not self.repo.local():
68 raise NoRepo(
68 raise NoRepo(
69 _(b'%s is not a local Mercurial repository') % path
69 _(b'%s is not a local Mercurial repository') % path
70 )
70 )
71 except error.RepoError as err:
71 except error.RepoError as err:
72 ui.traceback()
72 ui.traceback()
73 raise NoRepo(err.args[0])
73 raise NoRepo(err.args[0])
74 else:
74 else:
75 try:
75 try:
76 ui.status(_(b'initializing destination %s repository\n') % path)
76 ui.status(_(b'initializing destination %s repository\n') % path)
77 self.repo = hg.repository(self.ui, path, create=True)
77 self.repo = hg.repository(self.ui, path, create=True)
78 if not self.repo.local():
78 if not self.repo.local():
79 raise NoRepo(
79 raise NoRepo(
80 _(b'%s is not a local Mercurial repository') % path
80 _(b'%s is not a local Mercurial repository') % path
81 )
81 )
82 self.created.append(path)
82 self.created.append(path)
83 except error.RepoError:
83 except error.RepoError:
84 ui.traceback()
84 ui.traceback()
85 raise NoRepo(
85 raise NoRepo(
86 _(b"could not create hg repository %s as sink") % path
86 _(b"could not create hg repository %s as sink") % path
87 )
87 )
88 self.lock = None
88 self.lock = None
89 self.wlock = None
89 self.wlock = None
90 self.filemapmode = False
90 self.filemapmode = False
91 self.subrevmaps = {}
91 self.subrevmaps = {}
92
92
93 def before(self):
93 def before(self):
94 self.ui.debug(b'run hg sink pre-conversion action\n')
94 self.ui.debug(b'run hg sink pre-conversion action\n')
95 self.wlock = self.repo.wlock()
95 self.wlock = self.repo.wlock()
96 self.lock = self.repo.lock()
96 self.lock = self.repo.lock()
97
97
98 def after(self):
98 def after(self):
99 self.ui.debug(b'run hg sink post-conversion action\n')
99 self.ui.debug(b'run hg sink post-conversion action\n')
100 if self.lock:
100 if self.lock:
101 self.lock.release()
101 self.lock.release()
102 if self.wlock:
102 if self.wlock:
103 self.wlock.release()
103 self.wlock.release()
104
104
105 def revmapfile(self):
105 def revmapfile(self):
106 return self.repo.vfs.join(b"shamap")
106 return self.repo.vfs.join(b"shamap")
107
107
108 def authorfile(self):
108 def authorfile(self):
109 return self.repo.vfs.join(b"authormap")
109 return self.repo.vfs.join(b"authormap")
110
110
111 def setbranch(self, branch, pbranches):
111 def setbranch(self, branch, pbranches):
112 if not self.clonebranches:
112 if not self.clonebranches:
113 return
113 return
114
114
115 setbranch = branch != self.lastbranch
115 setbranch = branch != self.lastbranch
116 self.lastbranch = branch
116 self.lastbranch = branch
117 if not branch:
117 if not branch:
118 branch = b'default'
118 branch = b'default'
119 pbranches = [(b[0], b[1] and b[1] or b'default') for b in pbranches]
119 pbranches = [(b[0], b[1] and b[1] or b'default') for b in pbranches]
120
120
121 branchpath = os.path.join(self.path, branch)
121 branchpath = os.path.join(self.path, branch)
122 if setbranch:
122 if setbranch:
123 self.after()
123 self.after()
124 try:
124 try:
125 self.repo = hg.repository(self.ui, branchpath)
125 self.repo = hg.repository(self.ui, branchpath)
126 except Exception:
126 except Exception:
127 self.repo = hg.repository(self.ui, branchpath, create=True)
127 self.repo = hg.repository(self.ui, branchpath, create=True)
128 self.before()
128 self.before()
129
129
130 # pbranches may bring revisions from other branches (merge parents)
130 # pbranches may bring revisions from other branches (merge parents)
131 # Make sure we have them, or pull them.
131 # Make sure we have them, or pull them.
132 missings = {}
132 missings = {}
133 for b in pbranches:
133 for b in pbranches:
134 try:
134 try:
135 self.repo.lookup(b[0])
135 self.repo.lookup(b[0])
136 except Exception:
136 except Exception:
137 missings.setdefault(b[1], []).append(b[0])
137 missings.setdefault(b[1], []).append(b[0])
138
138
139 if missings:
139 if missings:
140 self.after()
140 self.after()
141 for pbranch, heads in sorted(pycompat.iteritems(missings)):
141 for pbranch, heads in sorted(pycompat.iteritems(missings)):
142 pbranchpath = os.path.join(self.path, pbranch)
142 pbranchpath = os.path.join(self.path, pbranch)
143 prepo = hg.peer(self.ui, {}, pbranchpath)
143 prepo = hg.peer(self.ui, {}, pbranchpath)
144 self.ui.note(
144 self.ui.note(
145 _(b'pulling from %s into %s\n') % (pbranch, branch)
145 _(b'pulling from %s into %s\n') % (pbranch, branch)
146 )
146 )
147 exchange.pull(
147 exchange.pull(
148 self.repo, prepo, [prepo.lookup(h) for h in heads]
148 self.repo, prepo, heads=[prepo.lookup(h) for h in heads]
149 )
149 )
150 self.before()
150 self.before()
151
151
152 def _rewritetags(self, source, revmap, data):
152 def _rewritetags(self, source, revmap, data):
153 fp = stringio()
153 fp = stringio()
154 for line in data.splitlines():
154 for line in data.splitlines():
155 s = line.split(b' ', 1)
155 s = line.split(b' ', 1)
156 if len(s) != 2:
156 if len(s) != 2:
157 self.ui.warn(_(b'invalid tag entry: "%s"\n') % line)
157 self.ui.warn(_(b'invalid tag entry: "%s"\n') % line)
158 fp.write(b'%s\n' % line) # Bogus, but keep for hash stability
158 fp.write(b'%s\n' % line) # Bogus, but keep for hash stability
159 continue
159 continue
160 revid = revmap.get(source.lookuprev(s[0]))
160 revid = revmap.get(source.lookuprev(s[0]))
161 if not revid:
161 if not revid:
162 if s[0] == sha1nodeconstants.nullhex:
162 if s[0] == sha1nodeconstants.nullhex:
163 revid = s[0]
163 revid = s[0]
164 else:
164 else:
165 # missing, but keep for hash stability
165 # missing, but keep for hash stability
166 self.ui.warn(_(b'missing tag entry: "%s"\n') % line)
166 self.ui.warn(_(b'missing tag entry: "%s"\n') % line)
167 fp.write(b'%s\n' % line)
167 fp.write(b'%s\n' % line)
168 continue
168 continue
169 fp.write(b'%s %s\n' % (revid, s[1]))
169 fp.write(b'%s %s\n' % (revid, s[1]))
170 return fp.getvalue()
170 return fp.getvalue()
171
171
172 def _rewritesubstate(self, source, data):
172 def _rewritesubstate(self, source, data):
173 fp = stringio()
173 fp = stringio()
174 for line in data.splitlines():
174 for line in data.splitlines():
175 s = line.split(b' ', 1)
175 s = line.split(b' ', 1)
176 if len(s) != 2:
176 if len(s) != 2:
177 continue
177 continue
178
178
179 revid = s[0]
179 revid = s[0]
180 subpath = s[1]
180 subpath = s[1]
181 if revid != sha1nodeconstants.nullhex:
181 if revid != sha1nodeconstants.nullhex:
182 revmap = self.subrevmaps.get(subpath)
182 revmap = self.subrevmaps.get(subpath)
183 if revmap is None:
183 if revmap is None:
184 revmap = mapfile(
184 revmap = mapfile(
185 self.ui, self.repo.wjoin(subpath, b'.hg/shamap')
185 self.ui, self.repo.wjoin(subpath, b'.hg/shamap')
186 )
186 )
187 self.subrevmaps[subpath] = revmap
187 self.subrevmaps[subpath] = revmap
188
188
189 # It is reasonable that one or more of the subrepos don't
189 # It is reasonable that one or more of the subrepos don't
190 # need to be converted, in which case they can be cloned
190 # need to be converted, in which case they can be cloned
191 # into place instead of converted. Therefore, only warn
191 # into place instead of converted. Therefore, only warn
192 # once.
192 # once.
193 msg = _(b'no ".hgsubstate" updates will be made for "%s"\n')
193 msg = _(b'no ".hgsubstate" updates will be made for "%s"\n')
194 if len(revmap) == 0:
194 if len(revmap) == 0:
195 sub = self.repo.wvfs.reljoin(subpath, b'.hg')
195 sub = self.repo.wvfs.reljoin(subpath, b'.hg')
196
196
197 if self.repo.wvfs.exists(sub):
197 if self.repo.wvfs.exists(sub):
198 self.ui.warn(msg % subpath)
198 self.ui.warn(msg % subpath)
199
199
200 newid = revmap.get(revid)
200 newid = revmap.get(revid)
201 if not newid:
201 if not newid:
202 if len(revmap) > 0:
202 if len(revmap) > 0:
203 self.ui.warn(
203 self.ui.warn(
204 _(b"%s is missing from %s/.hg/shamap\n")
204 _(b"%s is missing from %s/.hg/shamap\n")
205 % (revid, subpath)
205 % (revid, subpath)
206 )
206 )
207 else:
207 else:
208 revid = newid
208 revid = newid
209
209
210 fp.write(b'%s %s\n' % (revid, subpath))
210 fp.write(b'%s %s\n' % (revid, subpath))
211
211
212 return fp.getvalue()
212 return fp.getvalue()
213
213
214 def _calculatemergedfiles(self, source, p1ctx, p2ctx):
214 def _calculatemergedfiles(self, source, p1ctx, p2ctx):
215 """Calculates the files from p2 that we need to pull in when merging p1
215 """Calculates the files from p2 that we need to pull in when merging p1
216 and p2, given that the merge is coming from the given source.
216 and p2, given that the merge is coming from the given source.
217
217
218 This prevents us from losing files that only exist in the target p2 and
218 This prevents us from losing files that only exist in the target p2 and
219 that don't come from the source repo (like if you're merging multiple
219 that don't come from the source repo (like if you're merging multiple
220 repositories together).
220 repositories together).
221 """
221 """
222 anc = [p1ctx.ancestor(p2ctx)]
222 anc = [p1ctx.ancestor(p2ctx)]
223 # Calculate what files are coming from p2
223 # Calculate what files are coming from p2
224 # TODO: mresult.commitinfo might be able to get that info
224 # TODO: mresult.commitinfo might be able to get that info
225 mresult = mergemod.calculateupdates(
225 mresult = mergemod.calculateupdates(
226 self.repo,
226 self.repo,
227 p1ctx,
227 p1ctx,
228 p2ctx,
228 p2ctx,
229 anc,
229 anc,
230 branchmerge=True,
230 branchmerge=True,
231 force=True,
231 force=True,
232 acceptremote=False,
232 acceptremote=False,
233 followcopies=False,
233 followcopies=False,
234 )
234 )
235
235
236 for file, (action, info, msg) in mresult.filemap():
236 for file, (action, info, msg) in mresult.filemap():
237 if source.targetfilebelongstosource(file):
237 if source.targetfilebelongstosource(file):
238 # If the file belongs to the source repo, ignore the p2
238 # If the file belongs to the source repo, ignore the p2
239 # since it will be covered by the existing fileset.
239 # since it will be covered by the existing fileset.
240 continue
240 continue
241
241
242 # If the file requires actual merging, abort. We don't have enough
242 # If the file requires actual merging, abort. We don't have enough
243 # context to resolve merges correctly.
243 # context to resolve merges correctly.
244 if action in [b'm', b'dm', b'cd', b'dc']:
244 if action in [b'm', b'dm', b'cd', b'dc']:
245 raise error.Abort(
245 raise error.Abort(
246 _(
246 _(
247 b"unable to convert merge commit "
247 b"unable to convert merge commit "
248 b"since target parents do not merge cleanly (file "
248 b"since target parents do not merge cleanly (file "
249 b"%s, parents %s and %s)"
249 b"%s, parents %s and %s)"
250 )
250 )
251 % (file, p1ctx, p2ctx)
251 % (file, p1ctx, p2ctx)
252 )
252 )
253 elif action == b'k':
253 elif action == b'k':
254 # 'keep' means nothing changed from p1
254 # 'keep' means nothing changed from p1
255 continue
255 continue
256 else:
256 else:
257 # Any other change means we want to take the p2 version
257 # Any other change means we want to take the p2 version
258 yield file
258 yield file
259
259
260 def putcommit(
260 def putcommit(
261 self, files, copies, parents, commit, source, revmap, full, cleanp2
261 self, files, copies, parents, commit, source, revmap, full, cleanp2
262 ):
262 ):
263 files = dict(files)
263 files = dict(files)
264
264
265 def getfilectx(repo, memctx, f):
265 def getfilectx(repo, memctx, f):
266 if p2ctx and f in p2files and f not in copies:
266 if p2ctx and f in p2files and f not in copies:
267 self.ui.debug(b'reusing %s from p2\n' % f)
267 self.ui.debug(b'reusing %s from p2\n' % f)
268 try:
268 try:
269 return p2ctx[f]
269 return p2ctx[f]
270 except error.ManifestLookupError:
270 except error.ManifestLookupError:
271 # If the file doesn't exist in p2, then we're syncing a
271 # If the file doesn't exist in p2, then we're syncing a
272 # delete, so just return None.
272 # delete, so just return None.
273 return None
273 return None
274 try:
274 try:
275 v = files[f]
275 v = files[f]
276 except KeyError:
276 except KeyError:
277 return None
277 return None
278 data, mode = source.getfile(f, v)
278 data, mode = source.getfile(f, v)
279 if data is None:
279 if data is None:
280 return None
280 return None
281 if f == b'.hgtags':
281 if f == b'.hgtags':
282 data = self._rewritetags(source, revmap, data)
282 data = self._rewritetags(source, revmap, data)
283 if f == b'.hgsubstate':
283 if f == b'.hgsubstate':
284 data = self._rewritesubstate(source, data)
284 data = self._rewritesubstate(source, data)
285 return context.memfilectx(
285 return context.memfilectx(
286 self.repo,
286 self.repo,
287 memctx,
287 memctx,
288 f,
288 f,
289 data,
289 data,
290 b'l' in mode,
290 b'l' in mode,
291 b'x' in mode,
291 b'x' in mode,
292 copies.get(f),
292 copies.get(f),
293 )
293 )
294
294
295 pl = []
295 pl = []
296 for p in parents:
296 for p in parents:
297 if p not in pl:
297 if p not in pl:
298 pl.append(p)
298 pl.append(p)
299 parents = pl
299 parents = pl
300 nparents = len(parents)
300 nparents = len(parents)
301 if self.filemapmode and nparents == 1:
301 if self.filemapmode and nparents == 1:
302 m1node = self.repo.changelog.read(bin(parents[0]))[0]
302 m1node = self.repo.changelog.read(bin(parents[0]))[0]
303 parent = parents[0]
303 parent = parents[0]
304
304
305 if len(parents) < 2:
305 if len(parents) < 2:
306 parents.append(self.repo.nullid)
306 parents.append(self.repo.nullid)
307 if len(parents) < 2:
307 if len(parents) < 2:
308 parents.append(self.repo.nullid)
308 parents.append(self.repo.nullid)
309 p2 = parents.pop(0)
309 p2 = parents.pop(0)
310
310
311 text = commit.desc
311 text = commit.desc
312
312
313 sha1s = re.findall(sha1re, text)
313 sha1s = re.findall(sha1re, text)
314 for sha1 in sha1s:
314 for sha1 in sha1s:
315 oldrev = source.lookuprev(sha1)
315 oldrev = source.lookuprev(sha1)
316 newrev = revmap.get(oldrev)
316 newrev = revmap.get(oldrev)
317 if newrev is not None:
317 if newrev is not None:
318 text = text.replace(sha1, newrev[: len(sha1)])
318 text = text.replace(sha1, newrev[: len(sha1)])
319
319
320 extra = commit.extra.copy()
320 extra = commit.extra.copy()
321
321
322 sourcename = self.repo.ui.config(b'convert', b'hg.sourcename')
322 sourcename = self.repo.ui.config(b'convert', b'hg.sourcename')
323 if sourcename:
323 if sourcename:
324 extra[b'convert_source'] = sourcename
324 extra[b'convert_source'] = sourcename
325
325
326 for label in (
326 for label in (
327 b'source',
327 b'source',
328 b'transplant_source',
328 b'transplant_source',
329 b'rebase_source',
329 b'rebase_source',
330 b'intermediate-source',
330 b'intermediate-source',
331 ):
331 ):
332 node = extra.get(label)
332 node = extra.get(label)
333
333
334 if node is None:
334 if node is None:
335 continue
335 continue
336
336
337 # Only transplant stores its reference in binary
337 # Only transplant stores its reference in binary
338 if label == b'transplant_source':
338 if label == b'transplant_source':
339 node = hex(node)
339 node = hex(node)
340
340
341 newrev = revmap.get(node)
341 newrev = revmap.get(node)
342 if newrev is not None:
342 if newrev is not None:
343 if label == b'transplant_source':
343 if label == b'transplant_source':
344 newrev = bin(newrev)
344 newrev = bin(newrev)
345
345
346 extra[label] = newrev
346 extra[label] = newrev
347
347
348 if self.branchnames and commit.branch:
348 if self.branchnames and commit.branch:
349 extra[b'branch'] = commit.branch
349 extra[b'branch'] = commit.branch
350 if commit.rev and commit.saverev:
350 if commit.rev and commit.saverev:
351 extra[b'convert_revision'] = commit.rev
351 extra[b'convert_revision'] = commit.rev
352
352
353 while parents:
353 while parents:
354 p1 = p2
354 p1 = p2
355 p2 = parents.pop(0)
355 p2 = parents.pop(0)
356 p1ctx = self.repo[p1]
356 p1ctx = self.repo[p1]
357 p2ctx = None
357 p2ctx = None
358 if p2 != self.repo.nullid:
358 if p2 != self.repo.nullid:
359 p2ctx = self.repo[p2]
359 p2ctx = self.repo[p2]
360 fileset = set(files)
360 fileset = set(files)
361 if full:
361 if full:
362 fileset.update(self.repo[p1])
362 fileset.update(self.repo[p1])
363 fileset.update(self.repo[p2])
363 fileset.update(self.repo[p2])
364
364
365 if p2ctx:
365 if p2ctx:
366 p2files = set(cleanp2)
366 p2files = set(cleanp2)
367 for file in self._calculatemergedfiles(source, p1ctx, p2ctx):
367 for file in self._calculatemergedfiles(source, p1ctx, p2ctx):
368 p2files.add(file)
368 p2files.add(file)
369 fileset.add(file)
369 fileset.add(file)
370
370
371 ctx = context.memctx(
371 ctx = context.memctx(
372 self.repo,
372 self.repo,
373 (p1, p2),
373 (p1, p2),
374 text,
374 text,
375 fileset,
375 fileset,
376 getfilectx,
376 getfilectx,
377 commit.author,
377 commit.author,
378 commit.date,
378 commit.date,
379 extra,
379 extra,
380 )
380 )
381
381
382 # We won't know if the conversion changes the node until after the
382 # We won't know if the conversion changes the node until after the
383 # commit, so copy the source's phase for now.
383 # commit, so copy the source's phase for now.
384 self.repo.ui.setconfig(
384 self.repo.ui.setconfig(
385 b'phases',
385 b'phases',
386 b'new-commit',
386 b'new-commit',
387 phases.phasenames[commit.phase],
387 phases.phasenames[commit.phase],
388 b'convert',
388 b'convert',
389 )
389 )
390
390
391 with self.repo.transaction(b"convert") as tr:
391 with self.repo.transaction(b"convert") as tr:
392 if self.repo.ui.config(b'convert', b'hg.preserve-hash'):
392 if self.repo.ui.config(b'convert', b'hg.preserve-hash'):
393 origctx = commit.ctx
393 origctx = commit.ctx
394 else:
394 else:
395 origctx = None
395 origctx = None
396 node = hex(self.repo.commitctx(ctx, origctx=origctx))
396 node = hex(self.repo.commitctx(ctx, origctx=origctx))
397
397
398 # If the node value has changed, but the phase is lower than
398 # If the node value has changed, but the phase is lower than
399 # draft, set it back to draft since it hasn't been exposed
399 # draft, set it back to draft since it hasn't been exposed
400 # anywhere.
400 # anywhere.
401 if commit.rev != node:
401 if commit.rev != node:
402 ctx = self.repo[node]
402 ctx = self.repo[node]
403 if ctx.phase() < phases.draft:
403 if ctx.phase() < phases.draft:
404 phases.registernew(
404 phases.registernew(
405 self.repo, tr, phases.draft, [ctx.rev()]
405 self.repo, tr, phases.draft, [ctx.rev()]
406 )
406 )
407
407
408 text = b"(octopus merge fixup)\n"
408 text = b"(octopus merge fixup)\n"
409 p2 = node
409 p2 = node
410
410
411 if self.filemapmode and nparents == 1:
411 if self.filemapmode and nparents == 1:
412 man = self.repo.manifestlog.getstorage(b'')
412 man = self.repo.manifestlog.getstorage(b'')
413 mnode = self.repo.changelog.read(bin(p2))[0]
413 mnode = self.repo.changelog.read(bin(p2))[0]
414 closed = b'close' in commit.extra
414 closed = b'close' in commit.extra
415 if not closed and not man.cmp(m1node, man.revision(mnode)):
415 if not closed and not man.cmp(m1node, man.revision(mnode)):
416 self.ui.status(_(b"filtering out empty revision\n"))
416 self.ui.status(_(b"filtering out empty revision\n"))
417 self.repo.rollback(force=True)
417 self.repo.rollback(force=True)
418 return parent
418 return parent
419 return p2
419 return p2
420
420
421 def puttags(self, tags):
421 def puttags(self, tags):
422 tagparent = self.repo.branchtip(self.tagsbranch, ignoremissing=True)
422 tagparent = self.repo.branchtip(self.tagsbranch, ignoremissing=True)
423 tagparent = tagparent or self.repo.nullid
423 tagparent = tagparent or self.repo.nullid
424
424
425 oldlines = set()
425 oldlines = set()
426 for branch, heads in pycompat.iteritems(self.repo.branchmap()):
426 for branch, heads in pycompat.iteritems(self.repo.branchmap()):
427 for h in heads:
427 for h in heads:
428 if b'.hgtags' in self.repo[h]:
428 if b'.hgtags' in self.repo[h]:
429 oldlines.update(
429 oldlines.update(
430 set(self.repo[h][b'.hgtags'].data().splitlines(True))
430 set(self.repo[h][b'.hgtags'].data().splitlines(True))
431 )
431 )
432 oldlines = sorted(list(oldlines))
432 oldlines = sorted(list(oldlines))
433
433
434 newlines = sorted([(b"%s %s\n" % (tags[tag], tag)) for tag in tags])
434 newlines = sorted([(b"%s %s\n" % (tags[tag], tag)) for tag in tags])
435 if newlines == oldlines:
435 if newlines == oldlines:
436 return None, None
436 return None, None
437
437
438 # if the old and new tags match, then there is nothing to update
438 # if the old and new tags match, then there is nothing to update
439 oldtags = set()
439 oldtags = set()
440 newtags = set()
440 newtags = set()
441 for line in oldlines:
441 for line in oldlines:
442 s = line.strip().split(b' ', 1)
442 s = line.strip().split(b' ', 1)
443 if len(s) != 2:
443 if len(s) != 2:
444 continue
444 continue
445 oldtags.add(s[1])
445 oldtags.add(s[1])
446 for line in newlines:
446 for line in newlines:
447 s = line.strip().split(b' ', 1)
447 s = line.strip().split(b' ', 1)
448 if len(s) != 2:
448 if len(s) != 2:
449 continue
449 continue
450 if s[1] not in oldtags:
450 if s[1] not in oldtags:
451 newtags.add(s[1].strip())
451 newtags.add(s[1].strip())
452
452
453 if not newtags:
453 if not newtags:
454 return None, None
454 return None, None
455
455
456 data = b"".join(newlines)
456 data = b"".join(newlines)
457
457
458 def getfilectx(repo, memctx, f):
458 def getfilectx(repo, memctx, f):
459 return context.memfilectx(repo, memctx, f, data, False, False, None)
459 return context.memfilectx(repo, memctx, f, data, False, False, None)
460
460
461 self.ui.status(_(b"updating tags\n"))
461 self.ui.status(_(b"updating tags\n"))
462 date = b"%d 0" % int(time.mktime(time.gmtime()))
462 date = b"%d 0" % int(time.mktime(time.gmtime()))
463 extra = {b'branch': self.tagsbranch}
463 extra = {b'branch': self.tagsbranch}
464 ctx = context.memctx(
464 ctx = context.memctx(
465 self.repo,
465 self.repo,
466 (tagparent, None),
466 (tagparent, None),
467 b"update tags",
467 b"update tags",
468 [b".hgtags"],
468 [b".hgtags"],
469 getfilectx,
469 getfilectx,
470 b"convert-repo",
470 b"convert-repo",
471 date,
471 date,
472 extra,
472 extra,
473 )
473 )
474 node = self.repo.commitctx(ctx)
474 node = self.repo.commitctx(ctx)
475 return hex(node), hex(tagparent)
475 return hex(node), hex(tagparent)
476
476
477 def setfilemapmode(self, active):
477 def setfilemapmode(self, active):
478 self.filemapmode = active
478 self.filemapmode = active
479
479
480 def putbookmarks(self, updatedbookmark):
480 def putbookmarks(self, updatedbookmark):
481 if not len(updatedbookmark):
481 if not len(updatedbookmark):
482 return
482 return
483 wlock = lock = tr = None
483 wlock = lock = tr = None
484 try:
484 try:
485 wlock = self.repo.wlock()
485 wlock = self.repo.wlock()
486 lock = self.repo.lock()
486 lock = self.repo.lock()
487 tr = self.repo.transaction(b'bookmark')
487 tr = self.repo.transaction(b'bookmark')
488 self.ui.status(_(b"updating bookmarks\n"))
488 self.ui.status(_(b"updating bookmarks\n"))
489 destmarks = self.repo._bookmarks
489 destmarks = self.repo._bookmarks
490 changes = [
490 changes = [
491 (bookmark, bin(updatedbookmark[bookmark]))
491 (bookmark, bin(updatedbookmark[bookmark]))
492 for bookmark in updatedbookmark
492 for bookmark in updatedbookmark
493 ]
493 ]
494 destmarks.applychanges(self.repo, tr, changes)
494 destmarks.applychanges(self.repo, tr, changes)
495 tr.close()
495 tr.close()
496 finally:
496 finally:
497 lockmod.release(lock, wlock, tr)
497 lockmod.release(lock, wlock, tr)
498
498
499 def hascommitfrommap(self, rev):
499 def hascommitfrommap(self, rev):
500 # the exact semantics of clonebranches is unclear so we can't say no
500 # the exact semantics of clonebranches is unclear so we can't say no
501 return rev in self.repo or self.clonebranches
501 return rev in self.repo or self.clonebranches
502
502
503 def hascommitforsplicemap(self, rev):
503 def hascommitforsplicemap(self, rev):
504 if rev not in self.repo and self.clonebranches:
504 if rev not in self.repo and self.clonebranches:
505 raise error.Abort(
505 raise error.Abort(
506 _(
506 _(
507 b'revision %s not found in destination '
507 b'revision %s not found in destination '
508 b'repository (lookups with clonebranches=true '
508 b'repository (lookups with clonebranches=true '
509 b'are not implemented)'
509 b'are not implemented)'
510 )
510 )
511 % rev
511 % rev
512 )
512 )
513 return rev in self.repo
513 return rev in self.repo
514
514
515
515
516 class mercurial_source(common.converter_source):
516 class mercurial_source(common.converter_source):
517 def __init__(self, ui, repotype, path, revs=None):
517 def __init__(self, ui, repotype, path, revs=None):
518 common.converter_source.__init__(self, ui, repotype, path, revs)
518 common.converter_source.__init__(self, ui, repotype, path, revs)
519 self.ignoreerrors = ui.configbool(b'convert', b'hg.ignoreerrors')
519 self.ignoreerrors = ui.configbool(b'convert', b'hg.ignoreerrors')
520 self.ignored = set()
520 self.ignored = set()
521 self.saverev = ui.configbool(b'convert', b'hg.saverev')
521 self.saverev = ui.configbool(b'convert', b'hg.saverev')
522 try:
522 try:
523 self.repo = hg.repository(self.ui, path)
523 self.repo = hg.repository(self.ui, path)
524 # try to provoke an exception if this isn't really a hg
524 # try to provoke an exception if this isn't really a hg
525 # repo, but some other bogus compatible-looking url
525 # repo, but some other bogus compatible-looking url
526 if not self.repo.local():
526 if not self.repo.local():
527 raise error.RepoError
527 raise error.RepoError
528 except error.RepoError:
528 except error.RepoError:
529 ui.traceback()
529 ui.traceback()
530 raise NoRepo(_(b"%s is not a local Mercurial repository") % path)
530 raise NoRepo(_(b"%s is not a local Mercurial repository") % path)
531 self.lastrev = None
531 self.lastrev = None
532 self.lastctx = None
532 self.lastctx = None
533 self._changescache = None, None
533 self._changescache = None, None
534 self.convertfp = None
534 self.convertfp = None
535 # Restrict converted revisions to startrev descendants
535 # Restrict converted revisions to startrev descendants
536 startnode = ui.config(b'convert', b'hg.startrev')
536 startnode = ui.config(b'convert', b'hg.startrev')
537 hgrevs = ui.config(b'convert', b'hg.revs')
537 hgrevs = ui.config(b'convert', b'hg.revs')
538 if hgrevs is None:
538 if hgrevs is None:
539 if startnode is not None:
539 if startnode is not None:
540 try:
540 try:
541 startnode = self.repo.lookup(startnode)
541 startnode = self.repo.lookup(startnode)
542 except error.RepoError:
542 except error.RepoError:
543 raise error.Abort(
543 raise error.Abort(
544 _(b'%s is not a valid start revision') % startnode
544 _(b'%s is not a valid start revision') % startnode
545 )
545 )
546 startrev = self.repo.changelog.rev(startnode)
546 startrev = self.repo.changelog.rev(startnode)
547 children = {startnode: 1}
547 children = {startnode: 1}
548 for r in self.repo.changelog.descendants([startrev]):
548 for r in self.repo.changelog.descendants([startrev]):
549 children[self.repo.changelog.node(r)] = 1
549 children[self.repo.changelog.node(r)] = 1
550 self.keep = children.__contains__
550 self.keep = children.__contains__
551 else:
551 else:
552 self.keep = util.always
552 self.keep = util.always
553 if revs:
553 if revs:
554 self._heads = [self.repo.lookup(r) for r in revs]
554 self._heads = [self.repo.lookup(r) for r in revs]
555 else:
555 else:
556 self._heads = self.repo.heads()
556 self._heads = self.repo.heads()
557 else:
557 else:
558 if revs or startnode is not None:
558 if revs or startnode is not None:
559 raise error.Abort(
559 raise error.Abort(
560 _(
560 _(
561 b'hg.revs cannot be combined with '
561 b'hg.revs cannot be combined with '
562 b'hg.startrev or --rev'
562 b'hg.startrev or --rev'
563 )
563 )
564 )
564 )
565 nodes = set()
565 nodes = set()
566 parents = set()
566 parents = set()
567 for r in scmutil.revrange(self.repo, [hgrevs]):
567 for r in logcmdutil.revrange(self.repo, [hgrevs]):
568 ctx = self.repo[r]
568 ctx = self.repo[r]
569 nodes.add(ctx.node())
569 nodes.add(ctx.node())
570 parents.update(p.node() for p in ctx.parents())
570 parents.update(p.node() for p in ctx.parents())
571 self.keep = nodes.__contains__
571 self.keep = nodes.__contains__
572 self._heads = nodes - parents
572 self._heads = nodes - parents
573
573
574 def _changectx(self, rev):
574 def _changectx(self, rev):
575 if self.lastrev != rev:
575 if self.lastrev != rev:
576 self.lastctx = self.repo[rev]
576 self.lastctx = self.repo[rev]
577 self.lastrev = rev
577 self.lastrev = rev
578 return self.lastctx
578 return self.lastctx
579
579
580 def _parents(self, ctx):
580 def _parents(self, ctx):
581 return [p for p in ctx.parents() if p and self.keep(p.node())]
581 return [p for p in ctx.parents() if p and self.keep(p.node())]
582
582
583 def getheads(self):
583 def getheads(self):
584 return [hex(h) for h in self._heads if self.keep(h)]
584 return [hex(h) for h in self._heads if self.keep(h)]
585
585
586 def getfile(self, name, rev):
586 def getfile(self, name, rev):
587 try:
587 try:
588 fctx = self._changectx(rev)[name]
588 fctx = self._changectx(rev)[name]
589 return fctx.data(), fctx.flags()
589 return fctx.data(), fctx.flags()
590 except error.LookupError:
590 except error.LookupError:
591 return None, None
591 return None, None
592
592
593 def _changedfiles(self, ctx1, ctx2):
593 def _changedfiles(self, ctx1, ctx2):
594 ma, r = [], []
594 ma, r = [], []
595 maappend = ma.append
595 maappend = ma.append
596 rappend = r.append
596 rappend = r.append
597 d = ctx1.manifest().diff(ctx2.manifest())
597 d = ctx1.manifest().diff(ctx2.manifest())
598 for f, ((node1, flag1), (node2, flag2)) in pycompat.iteritems(d):
598 for f, ((node1, flag1), (node2, flag2)) in pycompat.iteritems(d):
599 if node2 is None:
599 if node2 is None:
600 rappend(f)
600 rappend(f)
601 else:
601 else:
602 maappend(f)
602 maappend(f)
603 return ma, r
603 return ma, r
604
604
605 def getchanges(self, rev, full):
605 def getchanges(self, rev, full):
606 ctx = self._changectx(rev)
606 ctx = self._changectx(rev)
607 parents = self._parents(ctx)
607 parents = self._parents(ctx)
608 if full or not parents:
608 if full or not parents:
609 files = copyfiles = ctx.manifest()
609 files = copyfiles = ctx.manifest()
610 if parents:
610 if parents:
611 if self._changescache[0] == rev:
611 if self._changescache[0] == rev:
612 ma, r = self._changescache[1]
612 ma, r = self._changescache[1]
613 else:
613 else:
614 ma, r = self._changedfiles(parents[0], ctx)
614 ma, r = self._changedfiles(parents[0], ctx)
615 if not full:
615 if not full:
616 files = ma + r
616 files = ma + r
617 copyfiles = ma
617 copyfiles = ma
618 # _getcopies() is also run for roots and before filtering so missing
618 # _getcopies() is also run for roots and before filtering so missing
619 # revlogs are detected early
619 # revlogs are detected early
620 copies = self._getcopies(ctx, parents, copyfiles)
620 copies = self._getcopies(ctx, parents, copyfiles)
621 cleanp2 = set()
621 cleanp2 = set()
622 if len(parents) == 2:
622 if len(parents) == 2:
623 d = parents[1].manifest().diff(ctx.manifest(), clean=True)
623 d = parents[1].manifest().diff(ctx.manifest(), clean=True)
624 for f, value in pycompat.iteritems(d):
624 for f, value in pycompat.iteritems(d):
625 if value is None:
625 if value is None:
626 cleanp2.add(f)
626 cleanp2.add(f)
627 changes = [(f, rev) for f in files if f not in self.ignored]
627 changes = [(f, rev) for f in files if f not in self.ignored]
628 changes.sort()
628 changes.sort()
629 return changes, copies, cleanp2
629 return changes, copies, cleanp2
630
630
631 def _getcopies(self, ctx, parents, files):
631 def _getcopies(self, ctx, parents, files):
632 copies = {}
632 copies = {}
633 for name in files:
633 for name in files:
634 if name in self.ignored:
634 if name in self.ignored:
635 continue
635 continue
636 try:
636 try:
637 copysource = ctx.filectx(name).copysource()
637 copysource = ctx.filectx(name).copysource()
638 if copysource in self.ignored:
638 if copysource in self.ignored:
639 continue
639 continue
640 # Ignore copy sources not in parent revisions
640 # Ignore copy sources not in parent revisions
641 if not any(copysource in p for p in parents):
641 if not any(copysource in p for p in parents):
642 continue
642 continue
643 copies[name] = copysource
643 copies[name] = copysource
644 except TypeError:
644 except TypeError:
645 pass
645 pass
646 except error.LookupError as e:
646 except error.LookupError as e:
647 if not self.ignoreerrors:
647 if not self.ignoreerrors:
648 raise
648 raise
649 self.ignored.add(name)
649 self.ignored.add(name)
650 self.ui.warn(_(b'ignoring: %s\n') % e)
650 self.ui.warn(_(b'ignoring: %s\n') % e)
651 return copies
651 return copies
652
652
653 def getcommit(self, rev):
653 def getcommit(self, rev):
654 ctx = self._changectx(rev)
654 ctx = self._changectx(rev)
655 _parents = self._parents(ctx)
655 _parents = self._parents(ctx)
656 parents = [p.hex() for p in _parents]
656 parents = [p.hex() for p in _parents]
657 optparents = [p.hex() for p in ctx.parents() if p and p not in _parents]
657 optparents = [p.hex() for p in ctx.parents() if p and p not in _parents]
658 crev = rev
658 crev = rev
659
659
660 return common.commit(
660 return common.commit(
661 author=ctx.user(),
661 author=ctx.user(),
662 date=dateutil.datestr(ctx.date(), b'%Y-%m-%d %H:%M:%S %1%2'),
662 date=dateutil.datestr(ctx.date(), b'%Y-%m-%d %H:%M:%S %1%2'),
663 desc=ctx.description(),
663 desc=ctx.description(),
664 rev=crev,
664 rev=crev,
665 parents=parents,
665 parents=parents,
666 optparents=optparents,
666 optparents=optparents,
667 branch=ctx.branch(),
667 branch=ctx.branch(),
668 extra=ctx.extra(),
668 extra=ctx.extra(),
669 sortkey=ctx.rev(),
669 sortkey=ctx.rev(),
670 saverev=self.saverev,
670 saverev=self.saverev,
671 phase=ctx.phase(),
671 phase=ctx.phase(),
672 ctx=ctx,
672 ctx=ctx,
673 )
673 )
674
674
675 def numcommits(self):
675 def numcommits(self):
676 return len(self.repo)
676 return len(self.repo)
677
677
678 def gettags(self):
678 def gettags(self):
679 # This will get written to .hgtags, filter non global tags out.
679 # This will get written to .hgtags, filter non global tags out.
680 tags = [
680 tags = [
681 t
681 t
682 for t in self.repo.tagslist()
682 for t in self.repo.tagslist()
683 if self.repo.tagtype(t[0]) == b'global'
683 if self.repo.tagtype(t[0]) == b'global'
684 ]
684 ]
685 return {name: hex(node) for name, node in tags if self.keep(node)}
685 return {name: hex(node) for name, node in tags if self.keep(node)}
686
686
687 def getchangedfiles(self, rev, i):
687 def getchangedfiles(self, rev, i):
688 ctx = self._changectx(rev)
688 ctx = self._changectx(rev)
689 parents = self._parents(ctx)
689 parents = self._parents(ctx)
690 if not parents and i is None:
690 if not parents and i is None:
691 i = 0
691 i = 0
692 ma, r = ctx.manifest().keys(), []
692 ma, r = ctx.manifest().keys(), []
693 else:
693 else:
694 i = i or 0
694 i = i or 0
695 ma, r = self._changedfiles(parents[i], ctx)
695 ma, r = self._changedfiles(parents[i], ctx)
696 ma, r = [[f for f in l if f not in self.ignored] for l in (ma, r)]
696 ma, r = [[f for f in l if f not in self.ignored] for l in (ma, r)]
697
697
698 if i == 0:
698 if i == 0:
699 self._changescache = (rev, (ma, r))
699 self._changescache = (rev, (ma, r))
700
700
701 return ma + r
701 return ma + r
702
702
703 def converted(self, rev, destrev):
703 def converted(self, rev, destrev):
704 if self.convertfp is None:
704 if self.convertfp is None:
705 self.convertfp = open(self.repo.vfs.join(b'shamap'), b'ab')
705 self.convertfp = open(self.repo.vfs.join(b'shamap'), b'ab')
706 self.convertfp.write(util.tonativeeol(b'%s %s\n' % (destrev, rev)))
706 self.convertfp.write(util.tonativeeol(b'%s %s\n' % (destrev, rev)))
707 self.convertfp.flush()
707 self.convertfp.flush()
708
708
709 def before(self):
709 def before(self):
710 self.ui.debug(b'run hg source pre-conversion action\n')
710 self.ui.debug(b'run hg source pre-conversion action\n')
711
711
712 def after(self):
712 def after(self):
713 self.ui.debug(b'run hg source post-conversion action\n')
713 self.ui.debug(b'run hg source post-conversion action\n')
714
714
715 def hasnativeorder(self):
715 def hasnativeorder(self):
716 return True
716 return True
717
717
718 def hasnativeclose(self):
718 def hasnativeclose(self):
719 return True
719 return True
720
720
721 def lookuprev(self, rev):
721 def lookuprev(self, rev):
722 try:
722 try:
723 return hex(self.repo.lookup(rev))
723 return hex(self.repo.lookup(rev))
724 except (error.RepoError, error.LookupError):
724 except (error.RepoError, error.LookupError):
725 return None
725 return None
726
726
727 def getbookmarks(self):
727 def getbookmarks(self):
728 return bookmarks.listbookmarks(self.repo)
728 return bookmarks.listbookmarks(self.repo)
729
729
730 def checkrevformat(self, revstr, mapname=b'splicemap'):
730 def checkrevformat(self, revstr, mapname=b'splicemap'):
731 """Mercurial, revision string is a 40 byte hex"""
731 """Mercurial, revision string is a 40 byte hex"""
732 self.checkhexformat(revstr, mapname)
732 self.checkhexformat(revstr, mapname)
@@ -1,480 +1,480 b''
1 """automatically manage newlines in repository files
1 """automatically manage newlines in repository files
2
2
3 This extension allows you to manage the type of line endings (CRLF or
3 This extension allows you to manage the type of line endings (CRLF or
4 LF) that are used in the repository and in the local working
4 LF) that are used in the repository and in the local working
5 directory. That way you can get CRLF line endings on Windows and LF on
5 directory. That way you can get CRLF line endings on Windows and LF on
6 Unix/Mac, thereby letting everybody use their OS native line endings.
6 Unix/Mac, thereby letting everybody use their OS native line endings.
7
7
8 The extension reads its configuration from a versioned ``.hgeol``
8 The extension reads its configuration from a versioned ``.hgeol``
9 configuration file found in the root of the working directory. The
9 configuration file found in the root of the working directory. The
10 ``.hgeol`` file use the same syntax as all other Mercurial
10 ``.hgeol`` file use the same syntax as all other Mercurial
11 configuration files. It uses two sections, ``[patterns]`` and
11 configuration files. It uses two sections, ``[patterns]`` and
12 ``[repository]``.
12 ``[repository]``.
13
13
14 The ``[patterns]`` section specifies how line endings should be
14 The ``[patterns]`` section specifies how line endings should be
15 converted between the working directory and the repository. The format is
15 converted between the working directory and the repository. The format is
16 specified by a file pattern. The first match is used, so put more
16 specified by a file pattern. The first match is used, so put more
17 specific patterns first. The available line endings are ``LF``,
17 specific patterns first. The available line endings are ``LF``,
18 ``CRLF``, and ``BIN``.
18 ``CRLF``, and ``BIN``.
19
19
20 Files with the declared format of ``CRLF`` or ``LF`` are always
20 Files with the declared format of ``CRLF`` or ``LF`` are always
21 checked out and stored in the repository in that format and files
21 checked out and stored in the repository in that format and files
22 declared to be binary (``BIN``) are left unchanged. Additionally,
22 declared to be binary (``BIN``) are left unchanged. Additionally,
23 ``native`` is an alias for checking out in the platform's default line
23 ``native`` is an alias for checking out in the platform's default line
24 ending: ``LF`` on Unix (including Mac OS X) and ``CRLF`` on
24 ending: ``LF`` on Unix (including Mac OS X) and ``CRLF`` on
25 Windows. Note that ``BIN`` (do nothing to line endings) is Mercurial's
25 Windows. Note that ``BIN`` (do nothing to line endings) is Mercurial's
26 default behavior; it is only needed if you need to override a later,
26 default behavior; it is only needed if you need to override a later,
27 more general pattern.
27 more general pattern.
28
28
29 The optional ``[repository]`` section specifies the line endings to
29 The optional ``[repository]`` section specifies the line endings to
30 use for files stored in the repository. It has a single setting,
30 use for files stored in the repository. It has a single setting,
31 ``native``, which determines the storage line endings for files
31 ``native``, which determines the storage line endings for files
32 declared as ``native`` in the ``[patterns]`` section. It can be set to
32 declared as ``native`` in the ``[patterns]`` section. It can be set to
33 ``LF`` or ``CRLF``. The default is ``LF``. For example, this means
33 ``LF`` or ``CRLF``. The default is ``LF``. For example, this means
34 that on Windows, files configured as ``native`` (``CRLF`` by default)
34 that on Windows, files configured as ``native`` (``CRLF`` by default)
35 will be converted to ``LF`` when stored in the repository. Files
35 will be converted to ``LF`` when stored in the repository. Files
36 declared as ``LF``, ``CRLF``, or ``BIN`` in the ``[patterns]`` section
36 declared as ``LF``, ``CRLF``, or ``BIN`` in the ``[patterns]`` section
37 are always stored as-is in the repository.
37 are always stored as-is in the repository.
38
38
39 Example versioned ``.hgeol`` file::
39 Example versioned ``.hgeol`` file::
40
40
41 [patterns]
41 [patterns]
42 **.py = native
42 **.py = native
43 **.vcproj = CRLF
43 **.vcproj = CRLF
44 **.txt = native
44 **.txt = native
45 Makefile = LF
45 Makefile = LF
46 **.jpg = BIN
46 **.jpg = BIN
47
47
48 [repository]
48 [repository]
49 native = LF
49 native = LF
50
50
51 .. note::
51 .. note::
52
52
53 The rules will first apply when files are touched in the working
53 The rules will first apply when files are touched in the working
54 directory, e.g. by updating to null and back to tip to touch all files.
54 directory, e.g. by updating to null and back to tip to touch all files.
55
55
56 The extension uses an optional ``[eol]`` section read from both the
56 The extension uses an optional ``[eol]`` section read from both the
57 normal Mercurial configuration files and the ``.hgeol`` file, with the
57 normal Mercurial configuration files and the ``.hgeol`` file, with the
58 latter overriding the former. You can use that section to control the
58 latter overriding the former. You can use that section to control the
59 overall behavior. There are three settings:
59 overall behavior. There are three settings:
60
60
61 - ``eol.native`` (default ``os.linesep``) can be set to ``LF`` or
61 - ``eol.native`` (default ``os.linesep``) can be set to ``LF`` or
62 ``CRLF`` to override the default interpretation of ``native`` for
62 ``CRLF`` to override the default interpretation of ``native`` for
63 checkout. This can be used with :hg:`archive` on Unix, say, to
63 checkout. This can be used with :hg:`archive` on Unix, say, to
64 generate an archive where files have line endings for Windows.
64 generate an archive where files have line endings for Windows.
65
65
66 - ``eol.only-consistent`` (default True) can be set to False to make
66 - ``eol.only-consistent`` (default True) can be set to False to make
67 the extension convert files with inconsistent EOLs. Inconsistent
67 the extension convert files with inconsistent EOLs. Inconsistent
68 means that there is both ``CRLF`` and ``LF`` present in the file.
68 means that there is both ``CRLF`` and ``LF`` present in the file.
69 Such files are normally not touched under the assumption that they
69 Such files are normally not touched under the assumption that they
70 have mixed EOLs on purpose.
70 have mixed EOLs on purpose.
71
71
72 - ``eol.fix-trailing-newline`` (default False) can be set to True to
72 - ``eol.fix-trailing-newline`` (default False) can be set to True to
73 ensure that converted files end with a EOL character (either ``\\n``
73 ensure that converted files end with a EOL character (either ``\\n``
74 or ``\\r\\n`` as per the configured patterns).
74 or ``\\r\\n`` as per the configured patterns).
75
75
76 The extension provides ``cleverencode:`` and ``cleverdecode:`` filters
76 The extension provides ``cleverencode:`` and ``cleverdecode:`` filters
77 like the deprecated win32text extension does. This means that you can
77 like the deprecated win32text extension does. This means that you can
78 disable win32text and enable eol and your filters will still work. You
78 disable win32text and enable eol and your filters will still work. You
79 only need to these filters until you have prepared a ``.hgeol`` file.
79 only need to these filters until you have prepared a ``.hgeol`` file.
80
80
81 The ``win32text.forbid*`` hooks provided by the win32text extension
81 The ``win32text.forbid*`` hooks provided by the win32text extension
82 have been unified into a single hook named ``eol.checkheadshook``. The
82 have been unified into a single hook named ``eol.checkheadshook``. The
83 hook will lookup the expected line endings from the ``.hgeol`` file,
83 hook will lookup the expected line endings from the ``.hgeol`` file,
84 which means you must migrate to a ``.hgeol`` file first before using
84 which means you must migrate to a ``.hgeol`` file first before using
85 the hook. ``eol.checkheadshook`` only checks heads, intermediate
85 the hook. ``eol.checkheadshook`` only checks heads, intermediate
86 invalid revisions will be pushed. To forbid them completely, use the
86 invalid revisions will be pushed. To forbid them completely, use the
87 ``eol.checkallhook`` hook. These hooks are best used as
87 ``eol.checkallhook`` hook. These hooks are best used as
88 ``pretxnchangegroup`` hooks.
88 ``pretxnchangegroup`` hooks.
89
89
90 See :hg:`help patterns` for more information about the glob patterns
90 See :hg:`help patterns` for more information about the glob patterns
91 used.
91 used.
92 """
92 """
93
93
94 from __future__ import absolute_import
94 from __future__ import absolute_import
95
95
96 import os
96 import os
97 import re
97 import re
98 from mercurial.i18n import _
98 from mercurial.i18n import _
99 from mercurial import (
99 from mercurial import (
100 config,
100 config,
101 error as errormod,
101 error as errormod,
102 extensions,
102 extensions,
103 match,
103 match,
104 pycompat,
104 pycompat,
105 registrar,
105 registrar,
106 scmutil,
106 scmutil,
107 util,
107 util,
108 )
108 )
109 from mercurial.utils import stringutil
109 from mercurial.utils import stringutil
110
110
111 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
111 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
112 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
112 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
113 # be specifying the version(s) of Mercurial they are tested with, or
113 # be specifying the version(s) of Mercurial they are tested with, or
114 # leave the attribute unspecified.
114 # leave the attribute unspecified.
115 testedwith = b'ships-with-hg-core'
115 testedwith = b'ships-with-hg-core'
116
116
117 configtable = {}
117 configtable = {}
118 configitem = registrar.configitem(configtable)
118 configitem = registrar.configitem(configtable)
119
119
120 configitem(
120 configitem(
121 b'eol',
121 b'eol',
122 b'fix-trailing-newline',
122 b'fix-trailing-newline',
123 default=False,
123 default=False,
124 )
124 )
125 configitem(
125 configitem(
126 b'eol',
126 b'eol',
127 b'native',
127 b'native',
128 default=pycompat.oslinesep,
128 default=pycompat.oslinesep,
129 )
129 )
130 configitem(
130 configitem(
131 b'eol',
131 b'eol',
132 b'only-consistent',
132 b'only-consistent',
133 default=True,
133 default=True,
134 )
134 )
135
135
136 # Matches a lone LF, i.e., one that is not part of CRLF.
136 # Matches a lone LF, i.e., one that is not part of CRLF.
137 singlelf = re.compile(b'(^|[^\r])\n')
137 singlelf = re.compile(b'(^|[^\r])\n')
138
138
139
139
140 def inconsistenteol(data):
140 def inconsistenteol(data):
141 return b'\r\n' in data and singlelf.search(data)
141 return b'\r\n' in data and singlelf.search(data)
142
142
143
143
144 def tolf(s, params, ui, **kwargs):
144 def tolf(s, params, ui, **kwargs):
145 """Filter to convert to LF EOLs."""
145 """Filter to convert to LF EOLs."""
146 if stringutil.binary(s):
146 if stringutil.binary(s):
147 return s
147 return s
148 if ui.configbool(b'eol', b'only-consistent') and inconsistenteol(s):
148 if ui.configbool(b'eol', b'only-consistent') and inconsistenteol(s):
149 return s
149 return s
150 if (
150 if (
151 ui.configbool(b'eol', b'fix-trailing-newline')
151 ui.configbool(b'eol', b'fix-trailing-newline')
152 and s
152 and s
153 and not s.endswith(b'\n')
153 and not s.endswith(b'\n')
154 ):
154 ):
155 s = s + b'\n'
155 s = s + b'\n'
156 return util.tolf(s)
156 return util.tolf(s)
157
157
158
158
159 def tocrlf(s, params, ui, **kwargs):
159 def tocrlf(s, params, ui, **kwargs):
160 """Filter to convert to CRLF EOLs."""
160 """Filter to convert to CRLF EOLs."""
161 if stringutil.binary(s):
161 if stringutil.binary(s):
162 return s
162 return s
163 if ui.configbool(b'eol', b'only-consistent') and inconsistenteol(s):
163 if ui.configbool(b'eol', b'only-consistent') and inconsistenteol(s):
164 return s
164 return s
165 if (
165 if (
166 ui.configbool(b'eol', b'fix-trailing-newline')
166 ui.configbool(b'eol', b'fix-trailing-newline')
167 and s
167 and s
168 and not s.endswith(b'\n')
168 and not s.endswith(b'\n')
169 ):
169 ):
170 s = s + b'\n'
170 s = s + b'\n'
171 return util.tocrlf(s)
171 return util.tocrlf(s)
172
172
173
173
174 def isbinary(s, params, ui, **kwargs):
174 def isbinary(s, params, ui, **kwargs):
175 """Filter to do nothing with the file."""
175 """Filter to do nothing with the file."""
176 return s
176 return s
177
177
178
178
179 filters = {
179 filters = {
180 b'to-lf': tolf,
180 b'to-lf': tolf,
181 b'to-crlf': tocrlf,
181 b'to-crlf': tocrlf,
182 b'is-binary': isbinary,
182 b'is-binary': isbinary,
183 # The following provide backwards compatibility with win32text
183 # The following provide backwards compatibility with win32text
184 b'cleverencode:': tolf,
184 b'cleverencode:': tolf,
185 b'cleverdecode:': tocrlf,
185 b'cleverdecode:': tocrlf,
186 }
186 }
187
187
188
188
189 class eolfile(object):
189 class eolfile(object):
190 def __init__(self, ui, root, data):
190 def __init__(self, ui, root, data):
191 self._decode = {
191 self._decode = {
192 b'LF': b'to-lf',
192 b'LF': b'to-lf',
193 b'CRLF': b'to-crlf',
193 b'CRLF': b'to-crlf',
194 b'BIN': b'is-binary',
194 b'BIN': b'is-binary',
195 }
195 }
196 self._encode = {
196 self._encode = {
197 b'LF': b'to-lf',
197 b'LF': b'to-lf',
198 b'CRLF': b'to-crlf',
198 b'CRLF': b'to-crlf',
199 b'BIN': b'is-binary',
199 b'BIN': b'is-binary',
200 }
200 }
201
201
202 self.cfg = config.config()
202 self.cfg = config.config()
203 # Our files should not be touched. The pattern must be
203 # Our files should not be touched. The pattern must be
204 # inserted first override a '** = native' pattern.
204 # inserted first override a '** = native' pattern.
205 self.cfg.set(b'patterns', b'.hg*', b'BIN', b'eol')
205 self.cfg.set(b'patterns', b'.hg*', b'BIN', b'eol')
206 # We can then parse the user's patterns.
206 # We can then parse the user's patterns.
207 self.cfg.parse(b'.hgeol', data)
207 self.cfg.parse(b'.hgeol', data)
208
208
209 isrepolf = self.cfg.get(b'repository', b'native') != b'CRLF'
209 isrepolf = self.cfg.get(b'repository', b'native') != b'CRLF'
210 self._encode[b'NATIVE'] = isrepolf and b'to-lf' or b'to-crlf'
210 self._encode[b'NATIVE'] = isrepolf and b'to-lf' or b'to-crlf'
211 iswdlf = ui.config(b'eol', b'native') in (b'LF', b'\n')
211 iswdlf = ui.config(b'eol', b'native') in (b'LF', b'\n')
212 self._decode[b'NATIVE'] = iswdlf and b'to-lf' or b'to-crlf'
212 self._decode[b'NATIVE'] = iswdlf and b'to-lf' or b'to-crlf'
213
213
214 include = []
214 include = []
215 exclude = []
215 exclude = []
216 self.patterns = []
216 self.patterns = []
217 for pattern, style in self.cfg.items(b'patterns'):
217 for pattern, style in self.cfg.items(b'patterns'):
218 key = style.upper()
218 key = style.upper()
219 if key == b'BIN':
219 if key == b'BIN':
220 exclude.append(pattern)
220 exclude.append(pattern)
221 else:
221 else:
222 include.append(pattern)
222 include.append(pattern)
223 m = match.match(root, b'', [pattern])
223 m = match.match(root, b'', [pattern])
224 self.patterns.append((pattern, key, m))
224 self.patterns.append((pattern, key, m))
225 # This will match the files for which we need to care
225 # This will match the files for which we need to care
226 # about inconsistent newlines.
226 # about inconsistent newlines.
227 self.match = match.match(root, b'', [], include, exclude)
227 self.match = match.match(root, b'', [], include, exclude)
228
228
229 def copytoui(self, ui):
229 def copytoui(self, ui):
230 newpatterns = {pattern for pattern, key, m in self.patterns}
230 newpatterns = {pattern for pattern, key, m in self.patterns}
231 for section in (b'decode', b'encode'):
231 for section in (b'decode', b'encode'):
232 for oldpattern, _filter in ui.configitems(section):
232 for oldpattern, _filter in ui.configitems(section):
233 if oldpattern not in newpatterns:
233 if oldpattern not in newpatterns:
234 if ui.configsource(section, oldpattern) == b'eol':
234 if ui.configsource(section, oldpattern) == b'eol':
235 ui.setconfig(section, oldpattern, b'!', b'eol')
235 ui.setconfig(section, oldpattern, b'!', b'eol')
236 for pattern, key, m in self.patterns:
236 for pattern, key, m in self.patterns:
237 try:
237 try:
238 ui.setconfig(b'decode', pattern, self._decode[key], b'eol')
238 ui.setconfig(b'decode', pattern, self._decode[key], b'eol')
239 ui.setconfig(b'encode', pattern, self._encode[key], b'eol')
239 ui.setconfig(b'encode', pattern, self._encode[key], b'eol')
240 except KeyError:
240 except KeyError:
241 ui.warn(
241 ui.warn(
242 _(b"ignoring unknown EOL style '%s' from %s\n")
242 _(b"ignoring unknown EOL style '%s' from %s\n")
243 % (key, self.cfg.source(b'patterns', pattern))
243 % (key, self.cfg.source(b'patterns', pattern))
244 )
244 )
245 # eol.only-consistent can be specified in ~/.hgrc or .hgeol
245 # eol.only-consistent can be specified in ~/.hgrc or .hgeol
246 for k, v in self.cfg.items(b'eol'):
246 for k, v in self.cfg.items(b'eol'):
247 ui.setconfig(b'eol', k, v, b'eol')
247 ui.setconfig(b'eol', k, v, b'eol')
248
248
249 def checkrev(self, repo, ctx, files):
249 def checkrev(self, repo, ctx, files):
250 failed = []
250 failed = []
251 for f in files or ctx.files():
251 for f in files or ctx.files():
252 if f not in ctx:
252 if f not in ctx:
253 continue
253 continue
254 for pattern, key, m in self.patterns:
254 for pattern, key, m in self.patterns:
255 if not m(f):
255 if not m(f):
256 continue
256 continue
257 target = self._encode[key]
257 target = self._encode[key]
258 data = ctx[f].data()
258 data = ctx[f].data()
259 if (
259 if (
260 target == b"to-lf"
260 target == b"to-lf"
261 and b"\r\n" in data
261 and b"\r\n" in data
262 or target == b"to-crlf"
262 or target == b"to-crlf"
263 and singlelf.search(data)
263 and singlelf.search(data)
264 ):
264 ):
265 failed.append((f, target, bytes(ctx)))
265 failed.append((f, target, bytes(ctx)))
266 break
266 break
267 return failed
267 return failed
268
268
269
269
270 def parseeol(ui, repo, nodes):
270 def parseeol(ui, repo, nodes):
271 try:
271 try:
272 for node in nodes:
272 for node in nodes:
273 try:
273 try:
274 if node is None:
274 if node is None:
275 # Cannot use workingctx.data() since it would load
275 # Cannot use workingctx.data() since it would load
276 # and cache the filters before we configure them.
276 # and cache the filters before we configure them.
277 data = repo.wvfs(b'.hgeol').read()
277 data = repo.wvfs(b'.hgeol').read()
278 else:
278 else:
279 data = repo[node][b'.hgeol'].data()
279 data = repo[node][b'.hgeol'].data()
280 return eolfile(ui, repo.root, data)
280 return eolfile(ui, repo.root, data)
281 except (IOError, LookupError):
281 except (IOError, LookupError):
282 pass
282 pass
283 except errormod.ConfigError as inst:
283 except errormod.ConfigError as inst:
284 ui.warn(
284 ui.warn(
285 _(
285 _(
286 b"warning: ignoring .hgeol file due to parse error "
286 b"warning: ignoring .hgeol file due to parse error "
287 b"at %s: %s\n"
287 b"at %s: %s\n"
288 )
288 )
289 % (inst.location, inst.message)
289 % (inst.location, inst.message)
290 )
290 )
291 return None
291 return None
292
292
293
293
294 def ensureenabled(ui):
294 def ensureenabled(ui):
295 """make sure the extension is enabled when used as hook
295 """make sure the extension is enabled when used as hook
296
296
297 When eol is used through hooks, the extension is never formally loaded and
297 When eol is used through hooks, the extension is never formally loaded and
298 enabled. This has some side effect, for example the config declaration is
298 enabled. This has some side effect, for example the config declaration is
299 never loaded. This function ensure the extension is enabled when running
299 never loaded. This function ensure the extension is enabled when running
300 hooks.
300 hooks.
301 """
301 """
302 if b'eol' in ui._knownconfig:
302 if b'eol' in ui._knownconfig:
303 return
303 return
304 ui.setconfig(b'extensions', b'eol', b'', source=b'internal')
304 ui.setconfig(b'extensions', b'eol', b'', source=b'internal')
305 extensions.loadall(ui, [b'eol'])
305 extensions.loadall(ui, [b'eol'])
306
306
307
307
308 def _checkhook(ui, repo, node, headsonly):
308 def _checkhook(ui, repo, node, headsonly):
309 # Get revisions to check and touched files at the same time
309 # Get revisions to check and touched files at the same time
310 ensureenabled(ui)
310 ensureenabled(ui)
311 files = set()
311 files = set()
312 revs = set()
312 revs = set()
313 for rev in pycompat.xrange(repo[node].rev(), len(repo)):
313 for rev in pycompat.xrange(repo[node].rev(), len(repo)):
314 revs.add(rev)
314 revs.add(rev)
315 if headsonly:
315 if headsonly:
316 ctx = repo[rev]
316 ctx = repo[rev]
317 files.update(ctx.files())
317 files.update(ctx.files())
318 for pctx in ctx.parents():
318 for pctx in ctx.parents():
319 revs.discard(pctx.rev())
319 revs.discard(pctx.rev())
320 failed = []
320 failed = []
321 for rev in revs:
321 for rev in revs:
322 ctx = repo[rev]
322 ctx = repo[rev]
323 eol = parseeol(ui, repo, [ctx.node()])
323 eol = parseeol(ui, repo, [ctx.node()])
324 if eol:
324 if eol:
325 failed.extend(eol.checkrev(repo, ctx, files))
325 failed.extend(eol.checkrev(repo, ctx, files))
326
326
327 if failed:
327 if failed:
328 eols = {b'to-lf': b'CRLF', b'to-crlf': b'LF'}
328 eols = {b'to-lf': b'CRLF', b'to-crlf': b'LF'}
329 msgs = []
329 msgs = []
330 for f, target, node in sorted(failed):
330 for f, target, node in sorted(failed):
331 msgs.append(
331 msgs.append(
332 _(b" %s in %s should not have %s line endings")
332 _(b" %s in %s should not have %s line endings")
333 % (f, node, eols[target])
333 % (f, node, eols[target])
334 )
334 )
335 raise errormod.Abort(
335 raise errormod.Abort(
336 _(b"end-of-line check failed:\n") + b"\n".join(msgs)
336 _(b"end-of-line check failed:\n") + b"\n".join(msgs)
337 )
337 )
338
338
339
339
340 def checkallhook(ui, repo, node, hooktype, **kwargs):
340 def checkallhook(ui, repo, node, hooktype, **kwargs):
341 """verify that files have expected EOLs"""
341 """verify that files have expected EOLs"""
342 _checkhook(ui, repo, node, False)
342 _checkhook(ui, repo, node, False)
343
343
344
344
345 def checkheadshook(ui, repo, node, hooktype, **kwargs):
345 def checkheadshook(ui, repo, node, hooktype, **kwargs):
346 """verify that files have expected EOLs"""
346 """verify that files have expected EOLs"""
347 _checkhook(ui, repo, node, True)
347 _checkhook(ui, repo, node, True)
348
348
349
349
350 # "checkheadshook" used to be called "hook"
350 # "checkheadshook" used to be called "hook"
351 hook = checkheadshook
351 hook = checkheadshook
352
352
353
353
354 def preupdate(ui, repo, hooktype, parent1, parent2):
354 def preupdate(ui, repo, hooktype, parent1, parent2):
355 p1node = scmutil.resolvehexnodeidprefix(repo, parent1)
355 p1node = scmutil.resolvehexnodeidprefix(repo, parent1)
356 repo.loadeol([p1node])
356 repo.loadeol([p1node])
357 return False
357 return False
358
358
359
359
360 def uisetup(ui):
360 def uisetup(ui):
361 ui.setconfig(b'hooks', b'preupdate.eol', preupdate, b'eol')
361 ui.setconfig(b'hooks', b'preupdate.eol', preupdate, b'eol')
362
362
363
363
364 def extsetup(ui):
364 def extsetup(ui):
365 try:
365 try:
366 extensions.find(b'win32text')
366 extensions.find(b'win32text')
367 ui.warn(
367 ui.warn(
368 _(
368 _(
369 b"the eol extension is incompatible with the "
369 b"the eol extension is incompatible with the "
370 b"win32text extension\n"
370 b"win32text extension\n"
371 )
371 )
372 )
372 )
373 except KeyError:
373 except KeyError:
374 pass
374 pass
375
375
376
376
377 def reposetup(ui, repo):
377 def reposetup(ui, repo):
378 uisetup(repo.ui)
378 uisetup(repo.ui)
379
379
380 if not repo.local():
380 if not repo.local():
381 return
381 return
382 for name, fn in pycompat.iteritems(filters):
382 for name, fn in pycompat.iteritems(filters):
383 repo.adddatafilter(name, fn)
383 repo.adddatafilter(name, fn)
384
384
385 ui.setconfig(b'patch', b'eol', b'auto', b'eol')
385 ui.setconfig(b'patch', b'eol', b'auto', b'eol')
386
386
387 class eolrepo(repo.__class__):
387 class eolrepo(repo.__class__):
388 def loadeol(self, nodes):
388 def loadeol(self, nodes):
389 eol = parseeol(self.ui, self, nodes)
389 eol = parseeol(self.ui, self, nodes)
390 if eol is None:
390 if eol is None:
391 return None
391 return None
392 eol.copytoui(self.ui)
392 eol.copytoui(self.ui)
393 return eol.match
393 return eol.match
394
394
395 def _hgcleardirstate(self):
395 def _hgcleardirstate(self):
396 self._eolmatch = self.loadeol([None])
396 self._eolmatch = self.loadeol([None])
397 if not self._eolmatch:
397 if not self._eolmatch:
398 self._eolmatch = util.never
398 self._eolmatch = util.never
399 return
399 return
400
400
401 oldeol = None
401 oldeol = None
402 try:
402 try:
403 cachemtime = os.path.getmtime(self.vfs.join(b"eol.cache"))
403 cachemtime = os.path.getmtime(self.vfs.join(b"eol.cache"))
404 except OSError:
404 except OSError:
405 cachemtime = 0
405 cachemtime = 0
406 else:
406 else:
407 olddata = self.vfs.read(b"eol.cache")
407 olddata = self.vfs.read(b"eol.cache")
408 if olddata:
408 if olddata:
409 oldeol = eolfile(self.ui, self.root, olddata)
409 oldeol = eolfile(self.ui, self.root, olddata)
410
410
411 try:
411 try:
412 eolmtime = os.path.getmtime(self.wjoin(b".hgeol"))
412 eolmtime = os.path.getmtime(self.wjoin(b".hgeol"))
413 except OSError:
413 except OSError:
414 eolmtime = 0
414 eolmtime = 0
415
415
416 if eolmtime >= cachemtime and eolmtime > 0:
416 if eolmtime >= cachemtime and eolmtime > 0:
417 self.ui.debug(b"eol: detected change in .hgeol\n")
417 self.ui.debug(b"eol: detected change in .hgeol\n")
418
418
419 hgeoldata = self.wvfs.read(b'.hgeol')
419 hgeoldata = self.wvfs.read(b'.hgeol')
420 neweol = eolfile(self.ui, self.root, hgeoldata)
420 neweol = eolfile(self.ui, self.root, hgeoldata)
421
421
422 wlock = None
422 wlock = None
423 try:
423 try:
424 wlock = self.wlock()
424 wlock = self.wlock()
425 for f in self.dirstate:
425 for f in self.dirstate:
426 if self.dirstate[f] != b'n':
426 if not self.dirstate.get_entry(f).maybe_clean:
427 continue
427 continue
428 if oldeol is not None:
428 if oldeol is not None:
429 if not oldeol.match(f) and not neweol.match(f):
429 if not oldeol.match(f) and not neweol.match(f):
430 continue
430 continue
431 oldkey = None
431 oldkey = None
432 for pattern, key, m in oldeol.patterns:
432 for pattern, key, m in oldeol.patterns:
433 if m(f):
433 if m(f):
434 oldkey = key
434 oldkey = key
435 break
435 break
436 newkey = None
436 newkey = None
437 for pattern, key, m in neweol.patterns:
437 for pattern, key, m in neweol.patterns:
438 if m(f):
438 if m(f):
439 newkey = key
439 newkey = key
440 break
440 break
441 if oldkey == newkey:
441 if oldkey == newkey:
442 continue
442 continue
443 # all normal files need to be looked at again since
443 # all normal files need to be looked at again since
444 # the new .hgeol file specify a different filter
444 # the new .hgeol file specify a different filter
445 self.dirstate.set_possibly_dirty(f)
445 self.dirstate.set_possibly_dirty(f)
446 # Write the cache to update mtime and cache .hgeol
446 # Write the cache to update mtime and cache .hgeol
447 with self.vfs(b"eol.cache", b"w") as f:
447 with self.vfs(b"eol.cache", b"w") as f:
448 f.write(hgeoldata)
448 f.write(hgeoldata)
449 except errormod.LockUnavailable:
449 except errormod.LockUnavailable:
450 # If we cannot lock the repository and clear the
450 # If we cannot lock the repository and clear the
451 # dirstate, then a commit might not see all files
451 # dirstate, then a commit might not see all files
452 # as modified. But if we cannot lock the
452 # as modified. But if we cannot lock the
453 # repository, then we can also not make a commit,
453 # repository, then we can also not make a commit,
454 # so ignore the error.
454 # so ignore the error.
455 pass
455 pass
456 finally:
456 finally:
457 if wlock is not None:
457 if wlock is not None:
458 wlock.release()
458 wlock.release()
459
459
460 def commitctx(self, ctx, error=False, origctx=None):
460 def commitctx(self, ctx, error=False, origctx=None):
461 for f in sorted(ctx.added() + ctx.modified()):
461 for f in sorted(ctx.added() + ctx.modified()):
462 if not self._eolmatch(f):
462 if not self._eolmatch(f):
463 continue
463 continue
464 fctx = ctx[f]
464 fctx = ctx[f]
465 if fctx is None:
465 if fctx is None:
466 continue
466 continue
467 data = fctx.data()
467 data = fctx.data()
468 if stringutil.binary(data):
468 if stringutil.binary(data):
469 # We should not abort here, since the user should
469 # We should not abort here, since the user should
470 # be able to say "** = native" to automatically
470 # be able to say "** = native" to automatically
471 # have all non-binary files taken care of.
471 # have all non-binary files taken care of.
472 continue
472 continue
473 if inconsistenteol(data):
473 if inconsistenteol(data):
474 raise errormod.Abort(
474 raise errormod.Abort(
475 _(b"inconsistent newline style in %s\n") % f
475 _(b"inconsistent newline style in %s\n") % f
476 )
476 )
477 return super(eolrepo, self).commitctx(ctx, error, origctx)
477 return super(eolrepo, self).commitctx(ctx, error, origctx)
478
478
479 repo.__class__ = eolrepo
479 repo.__class__ = eolrepo
480 repo._hgcleardirstate()
480 repo._hgcleardirstate()
@@ -1,803 +1,804 b''
1 # extdiff.py - external diff program support for mercurial
1 # extdiff.py - external diff program support for mercurial
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''command to allow external programs to compare revisions
8 '''command to allow external programs to compare revisions
9
9
10 The extdiff Mercurial extension allows you to use external programs
10 The extdiff Mercurial extension allows you to use external programs
11 to compare revisions, or revision with working directory. The external
11 to compare revisions, or revision with working directory. The external
12 diff programs are called with a configurable set of options and two
12 diff programs are called with a configurable set of options and two
13 non-option arguments: paths to directories containing snapshots of
13 non-option arguments: paths to directories containing snapshots of
14 files to compare.
14 files to compare.
15
15
16 If there is more than one file being compared and the "child" revision
16 If there is more than one file being compared and the "child" revision
17 is the working directory, any modifications made in the external diff
17 is the working directory, any modifications made in the external diff
18 program will be copied back to the working directory from the temporary
18 program will be copied back to the working directory from the temporary
19 directory.
19 directory.
20
20
21 The extdiff extension also allows you to configure new diff commands, so
21 The extdiff extension also allows you to configure new diff commands, so
22 you do not need to type :hg:`extdiff -p kdiff3` always. ::
22 you do not need to type :hg:`extdiff -p kdiff3` always. ::
23
23
24 [extdiff]
24 [extdiff]
25 # add new command that runs GNU diff(1) in 'context diff' mode
25 # add new command that runs GNU diff(1) in 'context diff' mode
26 cdiff = gdiff -Nprc5
26 cdiff = gdiff -Nprc5
27 ## or the old way:
27 ## or the old way:
28 #cmd.cdiff = gdiff
28 #cmd.cdiff = gdiff
29 #opts.cdiff = -Nprc5
29 #opts.cdiff = -Nprc5
30
30
31 # add new command called meld, runs meld (no need to name twice). If
31 # add new command called meld, runs meld (no need to name twice). If
32 # the meld executable is not available, the meld tool in [merge-tools]
32 # the meld executable is not available, the meld tool in [merge-tools]
33 # will be used, if available
33 # will be used, if available
34 meld =
34 meld =
35
35
36 # add new command called vimdiff, runs gvimdiff with DirDiff plugin
36 # add new command called vimdiff, runs gvimdiff with DirDiff plugin
37 # (see http://www.vim.org/scripts/script.php?script_id=102) Non
37 # (see http://www.vim.org/scripts/script.php?script_id=102) Non
38 # English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in
38 # English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in
39 # your .vimrc
39 # your .vimrc
40 vimdiff = gvim -f "+next" \\
40 vimdiff = gvim -f "+next" \\
41 "+execute 'DirDiff' fnameescape(argv(0)) fnameescape(argv(1))"
41 "+execute 'DirDiff' fnameescape(argv(0)) fnameescape(argv(1))"
42
42
43 Tool arguments can include variables that are expanded at runtime::
43 Tool arguments can include variables that are expanded at runtime::
44
44
45 $parent1, $plabel1 - filename, descriptive label of first parent
45 $parent1, $plabel1 - filename, descriptive label of first parent
46 $child, $clabel - filename, descriptive label of child revision
46 $child, $clabel - filename, descriptive label of child revision
47 $parent2, $plabel2 - filename, descriptive label of second parent
47 $parent2, $plabel2 - filename, descriptive label of second parent
48 $root - repository root
48 $root - repository root
49 $parent is an alias for $parent1.
49 $parent is an alias for $parent1.
50
50
51 The extdiff extension will look in your [diff-tools] and [merge-tools]
51 The extdiff extension will look in your [diff-tools] and [merge-tools]
52 sections for diff tool arguments, when none are specified in [extdiff].
52 sections for diff tool arguments, when none are specified in [extdiff].
53
53
54 ::
54 ::
55
55
56 [extdiff]
56 [extdiff]
57 kdiff3 =
57 kdiff3 =
58
58
59 [diff-tools]
59 [diff-tools]
60 kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child
60 kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child
61
61
62 If a program has a graphical interface, it might be interesting to tell
62 If a program has a graphical interface, it might be interesting to tell
63 Mercurial about it. It will prevent the program from being mistakenly
63 Mercurial about it. It will prevent the program from being mistakenly
64 used in a terminal-only environment (such as an SSH terminal session),
64 used in a terminal-only environment (such as an SSH terminal session),
65 and will make :hg:`extdiff --per-file` open multiple file diffs at once
65 and will make :hg:`extdiff --per-file` open multiple file diffs at once
66 instead of one by one (if you still want to open file diffs one by one,
66 instead of one by one (if you still want to open file diffs one by one,
67 you can use the --confirm option).
67 you can use the --confirm option).
68
68
69 Declaring that a tool has a graphical interface can be done with the
69 Declaring that a tool has a graphical interface can be done with the
70 ``gui`` flag next to where ``diffargs`` are specified:
70 ``gui`` flag next to where ``diffargs`` are specified:
71
71
72 ::
72 ::
73
73
74 [diff-tools]
74 [diff-tools]
75 kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child
75 kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child
76 kdiff3.gui = true
76 kdiff3.gui = true
77
77
78 You can use -I/-X and list of file or directory names like normal
78 You can use -I/-X and list of file or directory names like normal
79 :hg:`diff` command. The extdiff extension makes snapshots of only
79 :hg:`diff` command. The extdiff extension makes snapshots of only
80 needed files, so running the external diff program will actually be
80 needed files, so running the external diff program will actually be
81 pretty fast (at least faster than having to compare the entire tree).
81 pretty fast (at least faster than having to compare the entire tree).
82 '''
82 '''
83
83
84 from __future__ import absolute_import
84 from __future__ import absolute_import
85
85
86 import os
86 import os
87 import re
87 import re
88 import shutil
88 import shutil
89 import stat
89 import stat
90 import subprocess
90 import subprocess
91
91
92 from mercurial.i18n import _
92 from mercurial.i18n import _
93 from mercurial.node import (
93 from mercurial.node import (
94 nullrev,
94 nullrev,
95 short,
95 short,
96 )
96 )
97 from mercurial import (
97 from mercurial import (
98 archival,
98 archival,
99 cmdutil,
99 cmdutil,
100 encoding,
100 encoding,
101 error,
101 error,
102 filemerge,
102 filemerge,
103 formatter,
103 formatter,
104 logcmdutil,
104 pycompat,
105 pycompat,
105 registrar,
106 registrar,
106 scmutil,
107 scmutil,
107 util,
108 util,
108 )
109 )
109 from mercurial.utils import (
110 from mercurial.utils import (
110 procutil,
111 procutil,
111 stringutil,
112 stringutil,
112 )
113 )
113
114
114 cmdtable = {}
115 cmdtable = {}
115 command = registrar.command(cmdtable)
116 command = registrar.command(cmdtable)
116
117
117 configtable = {}
118 configtable = {}
118 configitem = registrar.configitem(configtable)
119 configitem = registrar.configitem(configtable)
119
120
120 configitem(
121 configitem(
121 b'extdiff',
122 b'extdiff',
122 br'opts\..*',
123 br'opts\..*',
123 default=b'',
124 default=b'',
124 generic=True,
125 generic=True,
125 )
126 )
126
127
127 configitem(
128 configitem(
128 b'extdiff',
129 b'extdiff',
129 br'gui\..*',
130 br'gui\..*',
130 generic=True,
131 generic=True,
131 )
132 )
132
133
133 configitem(
134 configitem(
134 b'diff-tools',
135 b'diff-tools',
135 br'.*\.diffargs$',
136 br'.*\.diffargs$',
136 default=None,
137 default=None,
137 generic=True,
138 generic=True,
138 )
139 )
139
140
140 configitem(
141 configitem(
141 b'diff-tools',
142 b'diff-tools',
142 br'.*\.gui$',
143 br'.*\.gui$',
143 generic=True,
144 generic=True,
144 )
145 )
145
146
146 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
147 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
147 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
148 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
148 # be specifying the version(s) of Mercurial they are tested with, or
149 # be specifying the version(s) of Mercurial they are tested with, or
149 # leave the attribute unspecified.
150 # leave the attribute unspecified.
150 testedwith = b'ships-with-hg-core'
151 testedwith = b'ships-with-hg-core'
151
152
152
153
153 def snapshot(ui, repo, files, node, tmproot, listsubrepos):
154 def snapshot(ui, repo, files, node, tmproot, listsubrepos):
154 """snapshot files as of some revision
155 """snapshot files as of some revision
155 if not using snapshot, -I/-X does not work and recursive diff
156 if not using snapshot, -I/-X does not work and recursive diff
156 in tools like kdiff3 and meld displays too many files."""
157 in tools like kdiff3 and meld displays too many files."""
157 dirname = os.path.basename(repo.root)
158 dirname = os.path.basename(repo.root)
158 if dirname == b"":
159 if dirname == b"":
159 dirname = b"root"
160 dirname = b"root"
160 if node is not None:
161 if node is not None:
161 dirname = b'%s.%s' % (dirname, short(node))
162 dirname = b'%s.%s' % (dirname, short(node))
162 base = os.path.join(tmproot, dirname)
163 base = os.path.join(tmproot, dirname)
163 os.mkdir(base)
164 os.mkdir(base)
164 fnsandstat = []
165 fnsandstat = []
165
166
166 if node is not None:
167 if node is not None:
167 ui.note(
168 ui.note(
168 _(b'making snapshot of %d files from rev %s\n')
169 _(b'making snapshot of %d files from rev %s\n')
169 % (len(files), short(node))
170 % (len(files), short(node))
170 )
171 )
171 else:
172 else:
172 ui.note(
173 ui.note(
173 _(b'making snapshot of %d files from working directory\n')
174 _(b'making snapshot of %d files from working directory\n')
174 % (len(files))
175 % (len(files))
175 )
176 )
176
177
177 if files:
178 if files:
178 repo.ui.setconfig(b"ui", b"archivemeta", False)
179 repo.ui.setconfig(b"ui", b"archivemeta", False)
179
180
180 archival.archive(
181 archival.archive(
181 repo,
182 repo,
182 base,
183 base,
183 node,
184 node,
184 b'files',
185 b'files',
185 match=scmutil.matchfiles(repo, files),
186 match=scmutil.matchfiles(repo, files),
186 subrepos=listsubrepos,
187 subrepos=listsubrepos,
187 )
188 )
188
189
189 for fn in sorted(files):
190 for fn in sorted(files):
190 wfn = util.pconvert(fn)
191 wfn = util.pconvert(fn)
191 ui.note(b' %s\n' % wfn)
192 ui.note(b' %s\n' % wfn)
192
193
193 if node is None:
194 if node is None:
194 dest = os.path.join(base, wfn)
195 dest = os.path.join(base, wfn)
195
196
196 fnsandstat.append((dest, repo.wjoin(fn), os.lstat(dest)))
197 fnsandstat.append((dest, repo.wjoin(fn), os.lstat(dest)))
197 return dirname, fnsandstat
198 return dirname, fnsandstat
198
199
199
200
200 def formatcmdline(
201 def formatcmdline(
201 cmdline,
202 cmdline,
202 repo_root,
203 repo_root,
203 do3way,
204 do3way,
204 parent1,
205 parent1,
205 plabel1,
206 plabel1,
206 parent2,
207 parent2,
207 plabel2,
208 plabel2,
208 child,
209 child,
209 clabel,
210 clabel,
210 ):
211 ):
211 # Function to quote file/dir names in the argument string.
212 # Function to quote file/dir names in the argument string.
212 # When not operating in 3-way mode, an empty string is
213 # When not operating in 3-way mode, an empty string is
213 # returned for parent2
214 # returned for parent2
214 replace = {
215 replace = {
215 b'parent': parent1,
216 b'parent': parent1,
216 b'parent1': parent1,
217 b'parent1': parent1,
217 b'parent2': parent2,
218 b'parent2': parent2,
218 b'plabel1': plabel1,
219 b'plabel1': plabel1,
219 b'plabel2': plabel2,
220 b'plabel2': plabel2,
220 b'child': child,
221 b'child': child,
221 b'clabel': clabel,
222 b'clabel': clabel,
222 b'root': repo_root,
223 b'root': repo_root,
223 }
224 }
224
225
225 def quote(match):
226 def quote(match):
226 pre = match.group(2)
227 pre = match.group(2)
227 key = match.group(3)
228 key = match.group(3)
228 if not do3way and key == b'parent2':
229 if not do3way and key == b'parent2':
229 return pre
230 return pre
230 return pre + procutil.shellquote(replace[key])
231 return pre + procutil.shellquote(replace[key])
231
232
232 # Match parent2 first, so 'parent1?' will match both parent1 and parent
233 # Match parent2 first, so 'parent1?' will match both parent1 and parent
233 regex = (
234 regex = (
234 br'''(['"]?)([^\s'"$]*)'''
235 br'''(['"]?)([^\s'"$]*)'''
235 br'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1'
236 br'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1'
236 )
237 )
237 if not do3way and not re.search(regex, cmdline):
238 if not do3way and not re.search(regex, cmdline):
238 cmdline += b' $parent1 $child'
239 cmdline += b' $parent1 $child'
239 return re.sub(regex, quote, cmdline)
240 return re.sub(regex, quote, cmdline)
240
241
241
242
242 def _systembackground(cmd, environ=None, cwd=None):
243 def _systembackground(cmd, environ=None, cwd=None):
243 """like 'procutil.system', but returns the Popen object directly
244 """like 'procutil.system', but returns the Popen object directly
244 so we don't have to wait on it.
245 so we don't have to wait on it.
245 """
246 """
246 env = procutil.shellenviron(environ)
247 env = procutil.shellenviron(environ)
247 proc = subprocess.Popen(
248 proc = subprocess.Popen(
248 procutil.tonativestr(cmd),
249 procutil.tonativestr(cmd),
249 shell=True,
250 shell=True,
250 close_fds=procutil.closefds,
251 close_fds=procutil.closefds,
251 env=procutil.tonativeenv(env),
252 env=procutil.tonativeenv(env),
252 cwd=pycompat.rapply(procutil.tonativestr, cwd),
253 cwd=pycompat.rapply(procutil.tonativestr, cwd),
253 )
254 )
254 return proc
255 return proc
255
256
256
257
257 def _runperfilediff(
258 def _runperfilediff(
258 cmdline,
259 cmdline,
259 repo_root,
260 repo_root,
260 ui,
261 ui,
261 guitool,
262 guitool,
262 do3way,
263 do3way,
263 confirm,
264 confirm,
264 commonfiles,
265 commonfiles,
265 tmproot,
266 tmproot,
266 dir1a,
267 dir1a,
267 dir1b,
268 dir1b,
268 dir2,
269 dir2,
269 rev1a,
270 rev1a,
270 rev1b,
271 rev1b,
271 rev2,
272 rev2,
272 ):
273 ):
273 # Note that we need to sort the list of files because it was
274 # Note that we need to sort the list of files because it was
274 # built in an "unstable" way and it's annoying to get files in a
275 # built in an "unstable" way and it's annoying to get files in a
275 # random order, especially when "confirm" mode is enabled.
276 # random order, especially when "confirm" mode is enabled.
276 waitprocs = []
277 waitprocs = []
277 totalfiles = len(commonfiles)
278 totalfiles = len(commonfiles)
278 for idx, commonfile in enumerate(sorted(commonfiles)):
279 for idx, commonfile in enumerate(sorted(commonfiles)):
279 path1a = os.path.join(dir1a, commonfile)
280 path1a = os.path.join(dir1a, commonfile)
280 label1a = commonfile + rev1a
281 label1a = commonfile + rev1a
281 if not os.path.isfile(path1a):
282 if not os.path.isfile(path1a):
282 path1a = pycompat.osdevnull
283 path1a = pycompat.osdevnull
283
284
284 path1b = b''
285 path1b = b''
285 label1b = b''
286 label1b = b''
286 if do3way:
287 if do3way:
287 path1b = os.path.join(dir1b, commonfile)
288 path1b = os.path.join(dir1b, commonfile)
288 label1b = commonfile + rev1b
289 label1b = commonfile + rev1b
289 if not os.path.isfile(path1b):
290 if not os.path.isfile(path1b):
290 path1b = pycompat.osdevnull
291 path1b = pycompat.osdevnull
291
292
292 path2 = os.path.join(dir2, commonfile)
293 path2 = os.path.join(dir2, commonfile)
293 label2 = commonfile + rev2
294 label2 = commonfile + rev2
294
295
295 if confirm:
296 if confirm:
296 # Prompt before showing this diff
297 # Prompt before showing this diff
297 difffiles = _(b'diff %s (%d of %d)') % (
298 difffiles = _(b'diff %s (%d of %d)') % (
298 commonfile,
299 commonfile,
299 idx + 1,
300 idx + 1,
300 totalfiles,
301 totalfiles,
301 )
302 )
302 responses = _(
303 responses = _(
303 b'[Yns?]'
304 b'[Yns?]'
304 b'$$ &Yes, show diff'
305 b'$$ &Yes, show diff'
305 b'$$ &No, skip this diff'
306 b'$$ &No, skip this diff'
306 b'$$ &Skip remaining diffs'
307 b'$$ &Skip remaining diffs'
307 b'$$ &? (display help)'
308 b'$$ &? (display help)'
308 )
309 )
309 r = ui.promptchoice(b'%s %s' % (difffiles, responses))
310 r = ui.promptchoice(b'%s %s' % (difffiles, responses))
310 if r == 3: # ?
311 if r == 3: # ?
311 while r == 3:
312 while r == 3:
312 for c, t in ui.extractchoices(responses)[1]:
313 for c, t in ui.extractchoices(responses)[1]:
313 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
314 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
314 r = ui.promptchoice(b'%s %s' % (difffiles, responses))
315 r = ui.promptchoice(b'%s %s' % (difffiles, responses))
315 if r == 0: # yes
316 if r == 0: # yes
316 pass
317 pass
317 elif r == 1: # no
318 elif r == 1: # no
318 continue
319 continue
319 elif r == 2: # skip
320 elif r == 2: # skip
320 break
321 break
321
322
322 curcmdline = formatcmdline(
323 curcmdline = formatcmdline(
323 cmdline,
324 cmdline,
324 repo_root,
325 repo_root,
325 do3way=do3way,
326 do3way=do3way,
326 parent1=path1a,
327 parent1=path1a,
327 plabel1=label1a,
328 plabel1=label1a,
328 parent2=path1b,
329 parent2=path1b,
329 plabel2=label1b,
330 plabel2=label1b,
330 child=path2,
331 child=path2,
331 clabel=label2,
332 clabel=label2,
332 )
333 )
333
334
334 if confirm or not guitool:
335 if confirm or not guitool:
335 # Run the comparison program and wait for it to exit
336 # Run the comparison program and wait for it to exit
336 # before we show the next file.
337 # before we show the next file.
337 # This is because either we need to wait for confirmation
338 # This is because either we need to wait for confirmation
338 # from the user between each invocation, or because, as far
339 # from the user between each invocation, or because, as far
339 # as we know, the tool doesn't have a GUI, in which case
340 # as we know, the tool doesn't have a GUI, in which case
340 # we can't run multiple CLI programs at the same time.
341 # we can't run multiple CLI programs at the same time.
341 ui.debug(
342 ui.debug(
342 b'running %r in %s\n' % (pycompat.bytestr(curcmdline), tmproot)
343 b'running %r in %s\n' % (pycompat.bytestr(curcmdline), tmproot)
343 )
344 )
344 ui.system(curcmdline, cwd=tmproot, blockedtag=b'extdiff')
345 ui.system(curcmdline, cwd=tmproot, blockedtag=b'extdiff')
345 else:
346 else:
346 # Run the comparison program but don't wait, as we're
347 # Run the comparison program but don't wait, as we're
347 # going to rapid-fire each file diff and then wait on
348 # going to rapid-fire each file diff and then wait on
348 # the whole group.
349 # the whole group.
349 ui.debug(
350 ui.debug(
350 b'running %r in %s (backgrounded)\n'
351 b'running %r in %s (backgrounded)\n'
351 % (pycompat.bytestr(curcmdline), tmproot)
352 % (pycompat.bytestr(curcmdline), tmproot)
352 )
353 )
353 proc = _systembackground(curcmdline, cwd=tmproot)
354 proc = _systembackground(curcmdline, cwd=tmproot)
354 waitprocs.append(proc)
355 waitprocs.append(proc)
355
356
356 if waitprocs:
357 if waitprocs:
357 with ui.timeblockedsection(b'extdiff'):
358 with ui.timeblockedsection(b'extdiff'):
358 for proc in waitprocs:
359 for proc in waitprocs:
359 proc.wait()
360 proc.wait()
360
361
361
362
362 def diffpatch(ui, repo, node1, node2, tmproot, matcher, cmdline):
363 def diffpatch(ui, repo, node1, node2, tmproot, matcher, cmdline):
363 template = b'hg-%h.patch'
364 template = b'hg-%h.patch'
364 # write patches to temporary files
365 # write patches to temporary files
365 with formatter.nullformatter(ui, b'extdiff', {}) as fm:
366 with formatter.nullformatter(ui, b'extdiff', {}) as fm:
366 cmdutil.export(
367 cmdutil.export(
367 repo,
368 repo,
368 [repo[node1].rev(), repo[node2].rev()],
369 [repo[node1].rev(), repo[node2].rev()],
369 fm,
370 fm,
370 fntemplate=repo.vfs.reljoin(tmproot, template),
371 fntemplate=repo.vfs.reljoin(tmproot, template),
371 match=matcher,
372 match=matcher,
372 )
373 )
373 label1 = cmdutil.makefilename(repo[node1], template)
374 label1 = cmdutil.makefilename(repo[node1], template)
374 label2 = cmdutil.makefilename(repo[node2], template)
375 label2 = cmdutil.makefilename(repo[node2], template)
375 file1 = repo.vfs.reljoin(tmproot, label1)
376 file1 = repo.vfs.reljoin(tmproot, label1)
376 file2 = repo.vfs.reljoin(tmproot, label2)
377 file2 = repo.vfs.reljoin(tmproot, label2)
377 cmdline = formatcmdline(
378 cmdline = formatcmdline(
378 cmdline,
379 cmdline,
379 repo.root,
380 repo.root,
380 # no 3way while comparing patches
381 # no 3way while comparing patches
381 do3way=False,
382 do3way=False,
382 parent1=file1,
383 parent1=file1,
383 plabel1=label1,
384 plabel1=label1,
384 # while comparing patches, there is no second parent
385 # while comparing patches, there is no second parent
385 parent2=None,
386 parent2=None,
386 plabel2=None,
387 plabel2=None,
387 child=file2,
388 child=file2,
388 clabel=label2,
389 clabel=label2,
389 )
390 )
390 ui.debug(b'running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot))
391 ui.debug(b'running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot))
391 ui.system(cmdline, cwd=tmproot, blockedtag=b'extdiff')
392 ui.system(cmdline, cwd=tmproot, blockedtag=b'extdiff')
392 return 1
393 return 1
393
394
394
395
395 def diffrevs(
396 def diffrevs(
396 ui,
397 ui,
397 repo,
398 repo,
398 ctx1a,
399 ctx1a,
399 ctx1b,
400 ctx1b,
400 ctx2,
401 ctx2,
401 matcher,
402 matcher,
402 tmproot,
403 tmproot,
403 cmdline,
404 cmdline,
404 do3way,
405 do3way,
405 guitool,
406 guitool,
406 opts,
407 opts,
407 ):
408 ):
408
409
409 subrepos = opts.get(b'subrepos')
410 subrepos = opts.get(b'subrepos')
410
411
411 # calculate list of files changed between both revs
412 # calculate list of files changed between both revs
412 st = ctx1a.status(ctx2, matcher, listsubrepos=subrepos)
413 st = ctx1a.status(ctx2, matcher, listsubrepos=subrepos)
413 mod_a, add_a, rem_a = set(st.modified), set(st.added), set(st.removed)
414 mod_a, add_a, rem_a = set(st.modified), set(st.added), set(st.removed)
414 if do3way:
415 if do3way:
415 stb = ctx1b.status(ctx2, matcher, listsubrepos=subrepos)
416 stb = ctx1b.status(ctx2, matcher, listsubrepos=subrepos)
416 mod_b, add_b, rem_b = (
417 mod_b, add_b, rem_b = (
417 set(stb.modified),
418 set(stb.modified),
418 set(stb.added),
419 set(stb.added),
419 set(stb.removed),
420 set(stb.removed),
420 )
421 )
421 else:
422 else:
422 mod_b, add_b, rem_b = set(), set(), set()
423 mod_b, add_b, rem_b = set(), set(), set()
423 modadd = mod_a | add_a | mod_b | add_b
424 modadd = mod_a | add_a | mod_b | add_b
424 common = modadd | rem_a | rem_b
425 common = modadd | rem_a | rem_b
425 if not common:
426 if not common:
426 return 0
427 return 0
427
428
428 # Always make a copy of ctx1a (and ctx1b, if applicable)
429 # Always make a copy of ctx1a (and ctx1b, if applicable)
429 # dir1a should contain files which are:
430 # dir1a should contain files which are:
430 # * modified or removed from ctx1a to ctx2
431 # * modified or removed from ctx1a to ctx2
431 # * modified or added from ctx1b to ctx2
432 # * modified or added from ctx1b to ctx2
432 # (except file added from ctx1a to ctx2 as they were not present in
433 # (except file added from ctx1a to ctx2 as they were not present in
433 # ctx1a)
434 # ctx1a)
434 dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a)
435 dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a)
435 dir1a = snapshot(ui, repo, dir1a_files, ctx1a.node(), tmproot, subrepos)[0]
436 dir1a = snapshot(ui, repo, dir1a_files, ctx1a.node(), tmproot, subrepos)[0]
436 rev1a = b'' if ctx1a.rev() is None else b'@%d' % ctx1a.rev()
437 rev1a = b'' if ctx1a.rev() is None else b'@%d' % ctx1a.rev()
437 if do3way:
438 if do3way:
438 # file calculation criteria same as dir1a
439 # file calculation criteria same as dir1a
439 dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b)
440 dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b)
440 dir1b = snapshot(
441 dir1b = snapshot(
441 ui, repo, dir1b_files, ctx1b.node(), tmproot, subrepos
442 ui, repo, dir1b_files, ctx1b.node(), tmproot, subrepos
442 )[0]
443 )[0]
443 rev1b = b'@%d' % ctx1b.rev()
444 rev1b = b'@%d' % ctx1b.rev()
444 else:
445 else:
445 dir1b = None
446 dir1b = None
446 rev1b = b''
447 rev1b = b''
447
448
448 fnsandstat = []
449 fnsandstat = []
449
450
450 # If ctx2 is not the wc or there is >1 change, copy it
451 # If ctx2 is not the wc or there is >1 change, copy it
451 dir2root = b''
452 dir2root = b''
452 rev2 = b''
453 rev2 = b''
453 if ctx2.node() is not None:
454 if ctx2.node() is not None:
454 dir2 = snapshot(ui, repo, modadd, ctx2.node(), tmproot, subrepos)[0]
455 dir2 = snapshot(ui, repo, modadd, ctx2.node(), tmproot, subrepos)[0]
455 rev2 = b'@%d' % ctx2.rev()
456 rev2 = b'@%d' % ctx2.rev()
456 elif len(common) > 1:
457 elif len(common) > 1:
457 # we only actually need to get the files to copy back to
458 # we only actually need to get the files to copy back to
458 # the working dir in this case (because the other cases
459 # the working dir in this case (because the other cases
459 # are: diffing 2 revisions or single file -- in which case
460 # are: diffing 2 revisions or single file -- in which case
460 # the file is already directly passed to the diff tool).
461 # the file is already directly passed to the diff tool).
461 dir2, fnsandstat = snapshot(ui, repo, modadd, None, tmproot, subrepos)
462 dir2, fnsandstat = snapshot(ui, repo, modadd, None, tmproot, subrepos)
462 else:
463 else:
463 # This lets the diff tool open the changed file directly
464 # This lets the diff tool open the changed file directly
464 dir2 = b''
465 dir2 = b''
465 dir2root = repo.root
466 dir2root = repo.root
466
467
467 label1a = rev1a
468 label1a = rev1a
468 label1b = rev1b
469 label1b = rev1b
469 label2 = rev2
470 label2 = rev2
470
471
471 if not opts.get(b'per_file'):
472 if not opts.get(b'per_file'):
472 # If only one change, diff the files instead of the directories
473 # If only one change, diff the files instead of the directories
473 # Handle bogus modifies correctly by checking if the files exist
474 # Handle bogus modifies correctly by checking if the files exist
474 if len(common) == 1:
475 if len(common) == 1:
475 common_file = util.localpath(common.pop())
476 common_file = util.localpath(common.pop())
476 dir1a = os.path.join(tmproot, dir1a, common_file)
477 dir1a = os.path.join(tmproot, dir1a, common_file)
477 label1a = common_file + rev1a
478 label1a = common_file + rev1a
478 if not os.path.isfile(dir1a):
479 if not os.path.isfile(dir1a):
479 dir1a = pycompat.osdevnull
480 dir1a = pycompat.osdevnull
480 if do3way:
481 if do3way:
481 dir1b = os.path.join(tmproot, dir1b, common_file)
482 dir1b = os.path.join(tmproot, dir1b, common_file)
482 label1b = common_file + rev1b
483 label1b = common_file + rev1b
483 if not os.path.isfile(dir1b):
484 if not os.path.isfile(dir1b):
484 dir1b = pycompat.osdevnull
485 dir1b = pycompat.osdevnull
485 dir2 = os.path.join(dir2root, dir2, common_file)
486 dir2 = os.path.join(dir2root, dir2, common_file)
486 label2 = common_file + rev2
487 label2 = common_file + rev2
487
488
488 # Run the external tool on the 2 temp directories or the patches
489 # Run the external tool on the 2 temp directories or the patches
489 cmdline = formatcmdline(
490 cmdline = formatcmdline(
490 cmdline,
491 cmdline,
491 repo.root,
492 repo.root,
492 do3way=do3way,
493 do3way=do3way,
493 parent1=dir1a,
494 parent1=dir1a,
494 plabel1=label1a,
495 plabel1=label1a,
495 parent2=dir1b,
496 parent2=dir1b,
496 plabel2=label1b,
497 plabel2=label1b,
497 child=dir2,
498 child=dir2,
498 clabel=label2,
499 clabel=label2,
499 )
500 )
500 ui.debug(b'running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot))
501 ui.debug(b'running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot))
501 ui.system(cmdline, cwd=tmproot, blockedtag=b'extdiff')
502 ui.system(cmdline, cwd=tmproot, blockedtag=b'extdiff')
502 else:
503 else:
503 # Run the external tool once for each pair of files
504 # Run the external tool once for each pair of files
504 _runperfilediff(
505 _runperfilediff(
505 cmdline,
506 cmdline,
506 repo.root,
507 repo.root,
507 ui,
508 ui,
508 guitool=guitool,
509 guitool=guitool,
509 do3way=do3way,
510 do3way=do3way,
510 confirm=opts.get(b'confirm'),
511 confirm=opts.get(b'confirm'),
511 commonfiles=common,
512 commonfiles=common,
512 tmproot=tmproot,
513 tmproot=tmproot,
513 dir1a=os.path.join(tmproot, dir1a),
514 dir1a=os.path.join(tmproot, dir1a),
514 dir1b=os.path.join(tmproot, dir1b) if do3way else None,
515 dir1b=os.path.join(tmproot, dir1b) if do3way else None,
515 dir2=os.path.join(dir2root, dir2),
516 dir2=os.path.join(dir2root, dir2),
516 rev1a=rev1a,
517 rev1a=rev1a,
517 rev1b=rev1b,
518 rev1b=rev1b,
518 rev2=rev2,
519 rev2=rev2,
519 )
520 )
520
521
521 for copy_fn, working_fn, st in fnsandstat:
522 for copy_fn, working_fn, st in fnsandstat:
522 cpstat = os.lstat(copy_fn)
523 cpstat = os.lstat(copy_fn)
523 # Some tools copy the file and attributes, so mtime may not detect
524 # Some tools copy the file and attributes, so mtime may not detect
524 # all changes. A size check will detect more cases, but not all.
525 # all changes. A size check will detect more cases, but not all.
525 # The only certain way to detect every case is to diff all files,
526 # The only certain way to detect every case is to diff all files,
526 # which could be expensive.
527 # which could be expensive.
527 # copyfile() carries over the permission, so the mode check could
528 # copyfile() carries over the permission, so the mode check could
528 # be in an 'elif' branch, but for the case where the file has
529 # be in an 'elif' branch, but for the case where the file has
529 # changed without affecting mtime or size.
530 # changed without affecting mtime or size.
530 if (
531 if (
531 cpstat[stat.ST_MTIME] != st[stat.ST_MTIME]
532 cpstat[stat.ST_MTIME] != st[stat.ST_MTIME]
532 or cpstat.st_size != st.st_size
533 or cpstat.st_size != st.st_size
533 or (cpstat.st_mode & 0o100) != (st.st_mode & 0o100)
534 or (cpstat.st_mode & 0o100) != (st.st_mode & 0o100)
534 ):
535 ):
535 ui.debug(
536 ui.debug(
536 b'file changed while diffing. '
537 b'file changed while diffing. '
537 b'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn)
538 b'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn)
538 )
539 )
539 util.copyfile(copy_fn, working_fn)
540 util.copyfile(copy_fn, working_fn)
540
541
541 return 1
542 return 1
542
543
543
544
544 def dodiff(ui, repo, cmdline, pats, opts, guitool=False):
545 def dodiff(ui, repo, cmdline, pats, opts, guitool=False):
545 """Do the actual diff:
546 """Do the actual diff:
546
547
547 - copy to a temp structure if diffing 2 internal revisions
548 - copy to a temp structure if diffing 2 internal revisions
548 - copy to a temp structure if diffing working revision with
549 - copy to a temp structure if diffing working revision with
549 another one and more than 1 file is changed
550 another one and more than 1 file is changed
550 - just invoke the diff for a single file in the working dir
551 - just invoke the diff for a single file in the working dir
551 """
552 """
552
553
553 cmdutil.check_at_most_one_arg(opts, b'rev', b'change')
554 cmdutil.check_at_most_one_arg(opts, b'rev', b'change')
554 revs = opts.get(b'rev')
555 revs = opts.get(b'rev')
555 from_rev = opts.get(b'from')
556 from_rev = opts.get(b'from')
556 to_rev = opts.get(b'to')
557 to_rev = opts.get(b'to')
557 change = opts.get(b'change')
558 change = opts.get(b'change')
558 do3way = b'$parent2' in cmdline
559 do3way = b'$parent2' in cmdline
559
560
560 if change:
561 if change:
561 ctx2 = scmutil.revsingle(repo, change, None)
562 ctx2 = logcmdutil.revsingle(repo, change, None)
562 ctx1a, ctx1b = ctx2.p1(), ctx2.p2()
563 ctx1a, ctx1b = ctx2.p1(), ctx2.p2()
563 elif from_rev or to_rev:
564 elif from_rev or to_rev:
564 repo = scmutil.unhidehashlikerevs(
565 repo = scmutil.unhidehashlikerevs(
565 repo, [from_rev] + [to_rev], b'nowarn'
566 repo, [from_rev] + [to_rev], b'nowarn'
566 )
567 )
567 ctx1a = scmutil.revsingle(repo, from_rev, None)
568 ctx1a = logcmdutil.revsingle(repo, from_rev, None)
568 ctx1b = repo[nullrev]
569 ctx1b = repo[nullrev]
569 ctx2 = scmutil.revsingle(repo, to_rev, None)
570 ctx2 = logcmdutil.revsingle(repo, to_rev, None)
570 else:
571 else:
571 ctx1a, ctx2 = scmutil.revpair(repo, revs)
572 ctx1a, ctx2 = logcmdutil.revpair(repo, revs)
572 if not revs:
573 if not revs:
573 ctx1b = repo[None].p2()
574 ctx1b = repo[None].p2()
574 else:
575 else:
575 ctx1b = repo[nullrev]
576 ctx1b = repo[nullrev]
576
577
577 # Disable 3-way merge if there is only one parent
578 # Disable 3-way merge if there is only one parent
578 if do3way:
579 if do3way:
579 if ctx1b.rev() == nullrev:
580 if ctx1b.rev() == nullrev:
580 do3way = False
581 do3way = False
581
582
582 matcher = scmutil.match(ctx2, pats, opts)
583 matcher = scmutil.match(ctx2, pats, opts)
583
584
584 if opts.get(b'patch'):
585 if opts.get(b'patch'):
585 if opts.get(b'subrepos'):
586 if opts.get(b'subrepos'):
586 raise error.Abort(_(b'--patch cannot be used with --subrepos'))
587 raise error.Abort(_(b'--patch cannot be used with --subrepos'))
587 if opts.get(b'per_file'):
588 if opts.get(b'per_file'):
588 raise error.Abort(_(b'--patch cannot be used with --per-file'))
589 raise error.Abort(_(b'--patch cannot be used with --per-file'))
589 if ctx2.node() is None:
590 if ctx2.node() is None:
590 raise error.Abort(_(b'--patch requires two revisions'))
591 raise error.Abort(_(b'--patch requires two revisions'))
591
592
592 tmproot = pycompat.mkdtemp(prefix=b'extdiff.')
593 tmproot = pycompat.mkdtemp(prefix=b'extdiff.')
593 try:
594 try:
594 if opts.get(b'patch'):
595 if opts.get(b'patch'):
595 return diffpatch(
596 return diffpatch(
596 ui, repo, ctx1a.node(), ctx2.node(), tmproot, matcher, cmdline
597 ui, repo, ctx1a.node(), ctx2.node(), tmproot, matcher, cmdline
597 )
598 )
598
599
599 return diffrevs(
600 return diffrevs(
600 ui,
601 ui,
601 repo,
602 repo,
602 ctx1a,
603 ctx1a,
603 ctx1b,
604 ctx1b,
604 ctx2,
605 ctx2,
605 matcher,
606 matcher,
606 tmproot,
607 tmproot,
607 cmdline,
608 cmdline,
608 do3way,
609 do3way,
609 guitool,
610 guitool,
610 opts,
611 opts,
611 )
612 )
612
613
613 finally:
614 finally:
614 ui.note(_(b'cleaning up temp directory\n'))
615 ui.note(_(b'cleaning up temp directory\n'))
615 shutil.rmtree(tmproot)
616 shutil.rmtree(tmproot)
616
617
617
618
618 extdiffopts = (
619 extdiffopts = (
619 [
620 [
620 (
621 (
621 b'o',
622 b'o',
622 b'option',
623 b'option',
623 [],
624 [],
624 _(b'pass option to comparison program'),
625 _(b'pass option to comparison program'),
625 _(b'OPT'),
626 _(b'OPT'),
626 ),
627 ),
627 (b'r', b'rev', [], _(b'revision (DEPRECATED)'), _(b'REV')),
628 (b'r', b'rev', [], _(b'revision (DEPRECATED)'), _(b'REV')),
628 (b'', b'from', b'', _(b'revision to diff from'), _(b'REV1')),
629 (b'', b'from', b'', _(b'revision to diff from'), _(b'REV1')),
629 (b'', b'to', b'', _(b'revision to diff to'), _(b'REV2')),
630 (b'', b'to', b'', _(b'revision to diff to'), _(b'REV2')),
630 (b'c', b'change', b'', _(b'change made by revision'), _(b'REV')),
631 (b'c', b'change', b'', _(b'change made by revision'), _(b'REV')),
631 (
632 (
632 b'',
633 b'',
633 b'per-file',
634 b'per-file',
634 False,
635 False,
635 _(b'compare each file instead of revision snapshots'),
636 _(b'compare each file instead of revision snapshots'),
636 ),
637 ),
637 (
638 (
638 b'',
639 b'',
639 b'confirm',
640 b'confirm',
640 False,
641 False,
641 _(b'prompt user before each external program invocation'),
642 _(b'prompt user before each external program invocation'),
642 ),
643 ),
643 (b'', b'patch', None, _(b'compare patches for two revisions')),
644 (b'', b'patch', None, _(b'compare patches for two revisions')),
644 ]
645 ]
645 + cmdutil.walkopts
646 + cmdutil.walkopts
646 + cmdutil.subrepoopts
647 + cmdutil.subrepoopts
647 )
648 )
648
649
649
650
650 @command(
651 @command(
651 b'extdiff',
652 b'extdiff',
652 [
653 [
653 (b'p', b'program', b'', _(b'comparison program to run'), _(b'CMD')),
654 (b'p', b'program', b'', _(b'comparison program to run'), _(b'CMD')),
654 ]
655 ]
655 + extdiffopts,
656 + extdiffopts,
656 _(b'hg extdiff [OPT]... [FILE]...'),
657 _(b'hg extdiff [OPT]... [FILE]...'),
657 helpcategory=command.CATEGORY_FILE_CONTENTS,
658 helpcategory=command.CATEGORY_FILE_CONTENTS,
658 inferrepo=True,
659 inferrepo=True,
659 )
660 )
660 def extdiff(ui, repo, *pats, **opts):
661 def extdiff(ui, repo, *pats, **opts):
661 """use external program to diff repository (or selected files)
662 """use external program to diff repository (or selected files)
662
663
663 Show differences between revisions for the specified files, using
664 Show differences between revisions for the specified files, using
664 an external program. The default program used is diff, with
665 an external program. The default program used is diff, with
665 default options "-Npru".
666 default options "-Npru".
666
667
667 To select a different program, use the -p/--program option. The
668 To select a different program, use the -p/--program option. The
668 program will be passed the names of two directories to compare,
669 program will be passed the names of two directories to compare,
669 unless the --per-file option is specified (see below). To pass
670 unless the --per-file option is specified (see below). To pass
670 additional options to the program, use -o/--option. These will be
671 additional options to the program, use -o/--option. These will be
671 passed before the names of the directories or files to compare.
672 passed before the names of the directories or files to compare.
672
673
673 The --from, --to, and --change options work the same way they do for
674 The --from, --to, and --change options work the same way they do for
674 :hg:`diff`.
675 :hg:`diff`.
675
676
676 The --per-file option runs the external program repeatedly on each
677 The --per-file option runs the external program repeatedly on each
677 file to diff, instead of once on two directories. By default,
678 file to diff, instead of once on two directories. By default,
678 this happens one by one, where the next file diff is open in the
679 this happens one by one, where the next file diff is open in the
679 external program only once the previous external program (for the
680 external program only once the previous external program (for the
680 previous file diff) has exited. If the external program has a
681 previous file diff) has exited. If the external program has a
681 graphical interface, it can open all the file diffs at once instead
682 graphical interface, it can open all the file diffs at once instead
682 of one by one. See :hg:`help -e extdiff` for information about how
683 of one by one. See :hg:`help -e extdiff` for information about how
683 to tell Mercurial that a given program has a graphical interface.
684 to tell Mercurial that a given program has a graphical interface.
684
685
685 The --confirm option will prompt the user before each invocation of
686 The --confirm option will prompt the user before each invocation of
686 the external program. It is ignored if --per-file isn't specified.
687 the external program. It is ignored if --per-file isn't specified.
687 """
688 """
688 opts = pycompat.byteskwargs(opts)
689 opts = pycompat.byteskwargs(opts)
689 program = opts.get(b'program')
690 program = opts.get(b'program')
690 option = opts.get(b'option')
691 option = opts.get(b'option')
691 if not program:
692 if not program:
692 program = b'diff'
693 program = b'diff'
693 option = option or [b'-Npru']
694 option = option or [b'-Npru']
694 cmdline = b' '.join(map(procutil.shellquote, [program] + option))
695 cmdline = b' '.join(map(procutil.shellquote, [program] + option))
695 return dodiff(ui, repo, cmdline, pats, opts)
696 return dodiff(ui, repo, cmdline, pats, opts)
696
697
697
698
698 class savedcmd(object):
699 class savedcmd(object):
699 """use external program to diff repository (or selected files)
700 """use external program to diff repository (or selected files)
700
701
701 Show differences between revisions for the specified files, using
702 Show differences between revisions for the specified files, using
702 the following program::
703 the following program::
703
704
704 %(path)s
705 %(path)s
705
706
706 When two revision arguments are given, then changes are shown
707 When two revision arguments are given, then changes are shown
707 between those revisions. If only one revision is specified then
708 between those revisions. If only one revision is specified then
708 that revision is compared to the working directory, and, when no
709 that revision is compared to the working directory, and, when no
709 revisions are specified, the working directory files are compared
710 revisions are specified, the working directory files are compared
710 to its parent.
711 to its parent.
711 """
712 """
712
713
713 def __init__(self, path, cmdline, isgui):
714 def __init__(self, path, cmdline, isgui):
714 # We can't pass non-ASCII through docstrings (and path is
715 # We can't pass non-ASCII through docstrings (and path is
715 # in an unknown encoding anyway), but avoid double separators on
716 # in an unknown encoding anyway), but avoid double separators on
716 # Windows
717 # Windows
717 docpath = stringutil.escapestr(path).replace(b'\\\\', b'\\')
718 docpath = stringutil.escapestr(path).replace(b'\\\\', b'\\')
718 self.__doc__ %= {'path': pycompat.sysstr(stringutil.uirepr(docpath))}
719 self.__doc__ %= {'path': pycompat.sysstr(stringutil.uirepr(docpath))}
719 self._cmdline = cmdline
720 self._cmdline = cmdline
720 self._isgui = isgui
721 self._isgui = isgui
721
722
722 def __call__(self, ui, repo, *pats, **opts):
723 def __call__(self, ui, repo, *pats, **opts):
723 opts = pycompat.byteskwargs(opts)
724 opts = pycompat.byteskwargs(opts)
724 options = b' '.join(map(procutil.shellquote, opts[b'option']))
725 options = b' '.join(map(procutil.shellquote, opts[b'option']))
725 if options:
726 if options:
726 options = b' ' + options
727 options = b' ' + options
727 return dodiff(
728 return dodiff(
728 ui, repo, self._cmdline + options, pats, opts, guitool=self._isgui
729 ui, repo, self._cmdline + options, pats, opts, guitool=self._isgui
729 )
730 )
730
731
731
732
732 def _gettooldetails(ui, cmd, path):
733 def _gettooldetails(ui, cmd, path):
733 """
734 """
734 returns following things for a
735 returns following things for a
735 ```
736 ```
736 [extdiff]
737 [extdiff]
737 <cmd> = <path>
738 <cmd> = <path>
738 ```
739 ```
739 entry:
740 entry:
740
741
741 cmd: command/tool name
742 cmd: command/tool name
742 path: path to the tool
743 path: path to the tool
743 cmdline: the command which should be run
744 cmdline: the command which should be run
744 isgui: whether the tool uses GUI or not
745 isgui: whether the tool uses GUI or not
745
746
746 Reads all external tools related configs, whether it be extdiff section,
747 Reads all external tools related configs, whether it be extdiff section,
747 diff-tools or merge-tools section, or its specified in an old format or
748 diff-tools or merge-tools section, or its specified in an old format or
748 the latest format.
749 the latest format.
749 """
750 """
750 path = util.expandpath(path)
751 path = util.expandpath(path)
751 if cmd.startswith(b'cmd.'):
752 if cmd.startswith(b'cmd.'):
752 cmd = cmd[4:]
753 cmd = cmd[4:]
753 if not path:
754 if not path:
754 path = procutil.findexe(cmd)
755 path = procutil.findexe(cmd)
755 if path is None:
756 if path is None:
756 path = filemerge.findexternaltool(ui, cmd) or cmd
757 path = filemerge.findexternaltool(ui, cmd) or cmd
757 diffopts = ui.config(b'extdiff', b'opts.' + cmd)
758 diffopts = ui.config(b'extdiff', b'opts.' + cmd)
758 cmdline = procutil.shellquote(path)
759 cmdline = procutil.shellquote(path)
759 if diffopts:
760 if diffopts:
760 cmdline += b' ' + diffopts
761 cmdline += b' ' + diffopts
761 isgui = ui.configbool(b'extdiff', b'gui.' + cmd)
762 isgui = ui.configbool(b'extdiff', b'gui.' + cmd)
762 else:
763 else:
763 if path:
764 if path:
764 # case "cmd = path opts"
765 # case "cmd = path opts"
765 cmdline = path
766 cmdline = path
766 diffopts = len(pycompat.shlexsplit(cmdline)) > 1
767 diffopts = len(pycompat.shlexsplit(cmdline)) > 1
767 else:
768 else:
768 # case "cmd ="
769 # case "cmd ="
769 path = procutil.findexe(cmd)
770 path = procutil.findexe(cmd)
770 if path is None:
771 if path is None:
771 path = filemerge.findexternaltool(ui, cmd) or cmd
772 path = filemerge.findexternaltool(ui, cmd) or cmd
772 cmdline = procutil.shellquote(path)
773 cmdline = procutil.shellquote(path)
773 diffopts = False
774 diffopts = False
774 isgui = ui.configbool(b'extdiff', b'gui.' + cmd)
775 isgui = ui.configbool(b'extdiff', b'gui.' + cmd)
775 # look for diff arguments in [diff-tools] then [merge-tools]
776 # look for diff arguments in [diff-tools] then [merge-tools]
776 if not diffopts:
777 if not diffopts:
777 key = cmd + b'.diffargs'
778 key = cmd + b'.diffargs'
778 for section in (b'diff-tools', b'merge-tools'):
779 for section in (b'diff-tools', b'merge-tools'):
779 args = ui.config(section, key)
780 args = ui.config(section, key)
780 if args:
781 if args:
781 cmdline += b' ' + args
782 cmdline += b' ' + args
782 if isgui is None:
783 if isgui is None:
783 isgui = ui.configbool(section, cmd + b'.gui') or False
784 isgui = ui.configbool(section, cmd + b'.gui') or False
784 break
785 break
785 return cmd, path, cmdline, isgui
786 return cmd, path, cmdline, isgui
786
787
787
788
788 def uisetup(ui):
789 def uisetup(ui):
789 for cmd, path in ui.configitems(b'extdiff'):
790 for cmd, path in ui.configitems(b'extdiff'):
790 if cmd.startswith(b'opts.') or cmd.startswith(b'gui.'):
791 if cmd.startswith(b'opts.') or cmd.startswith(b'gui.'):
791 continue
792 continue
792 cmd, path, cmdline, isgui = _gettooldetails(ui, cmd, path)
793 cmd, path, cmdline, isgui = _gettooldetails(ui, cmd, path)
793 command(
794 command(
794 cmd,
795 cmd,
795 extdiffopts[:],
796 extdiffopts[:],
796 _(b'hg %s [OPTION]... [FILE]...') % cmd,
797 _(b'hg %s [OPTION]... [FILE]...') % cmd,
797 helpcategory=command.CATEGORY_FILE_CONTENTS,
798 helpcategory=command.CATEGORY_FILE_CONTENTS,
798 inferrepo=True,
799 inferrepo=True,
799 )(savedcmd(path, cmdline, isgui))
800 )(savedcmd(path, cmdline, isgui))
800
801
801
802
802 # tell hggettext to extract docstrings from these functions:
803 # tell hggettext to extract docstrings from these functions:
803 i18nfunctions = [savedcmd]
804 i18nfunctions = [savedcmd]
@@ -1,357 +1,358 b''
1 # Copyright 2016-present Facebook. All Rights Reserved.
1 # Copyright 2016-present Facebook. All Rights Reserved.
2 #
2 #
3 # commands: fastannotate commands
3 # commands: fastannotate commands
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11
11
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial import (
13 from mercurial import (
14 commands,
14 commands,
15 encoding,
15 encoding,
16 error,
16 error,
17 extensions,
17 extensions,
18 logcmdutil,
18 patch,
19 patch,
19 pycompat,
20 pycompat,
20 registrar,
21 registrar,
21 scmutil,
22 scmutil,
22 util,
23 util,
23 )
24 )
24
25
25 from . import (
26 from . import (
26 context as facontext,
27 context as facontext,
27 error as faerror,
28 error as faerror,
28 formatter as faformatter,
29 formatter as faformatter,
29 )
30 )
30
31
31 cmdtable = {}
32 cmdtable = {}
32 command = registrar.command(cmdtable)
33 command = registrar.command(cmdtable)
33
34
34
35
35 def _matchpaths(repo, rev, pats, opts, aopts=facontext.defaultopts):
36 def _matchpaths(repo, rev, pats, opts, aopts=facontext.defaultopts):
36 """generate paths matching given patterns"""
37 """generate paths matching given patterns"""
37 perfhack = repo.ui.configbool(b'fastannotate', b'perfhack')
38 perfhack = repo.ui.configbool(b'fastannotate', b'perfhack')
38
39
39 # disable perfhack if:
40 # disable perfhack if:
40 # a) any walkopt is used
41 # a) any walkopt is used
41 # b) if we treat pats as plain file names, some of them do not have
42 # b) if we treat pats as plain file names, some of them do not have
42 # corresponding linelog files
43 # corresponding linelog files
43 if perfhack:
44 if perfhack:
44 # cwd related to reporoot
45 # cwd related to reporoot
45 reporoot = os.path.dirname(repo.path)
46 reporoot = os.path.dirname(repo.path)
46 reldir = os.path.relpath(encoding.getcwd(), reporoot)
47 reldir = os.path.relpath(encoding.getcwd(), reporoot)
47 if reldir == b'.':
48 if reldir == b'.':
48 reldir = b''
49 reldir = b''
49 if any(opts.get(o[1]) for o in commands.walkopts): # a)
50 if any(opts.get(o[1]) for o in commands.walkopts): # a)
50 perfhack = False
51 perfhack = False
51 else: # b)
52 else: # b)
52 relpats = [
53 relpats = [
53 os.path.relpath(p, reporoot) if os.path.isabs(p) else p
54 os.path.relpath(p, reporoot) if os.path.isabs(p) else p
54 for p in pats
55 for p in pats
55 ]
56 ]
56 # disable perfhack on '..' since it allows escaping from the repo
57 # disable perfhack on '..' since it allows escaping from the repo
57 if any(
58 if any(
58 (
59 (
59 b'..' in f
60 b'..' in f
60 or not os.path.isfile(
61 or not os.path.isfile(
61 facontext.pathhelper(repo, f, aopts).linelogpath
62 facontext.pathhelper(repo, f, aopts).linelogpath
62 )
63 )
63 )
64 )
64 for f in relpats
65 for f in relpats
65 ):
66 ):
66 perfhack = False
67 perfhack = False
67
68
68 # perfhack: emit paths directory without checking with manifest
69 # perfhack: emit paths directory without checking with manifest
69 # this can be incorrect if the rev dos not have file.
70 # this can be incorrect if the rev dos not have file.
70 if perfhack:
71 if perfhack:
71 for p in relpats:
72 for p in relpats:
72 yield os.path.join(reldir, p)
73 yield os.path.join(reldir, p)
73 else:
74 else:
74
75
75 def bad(x, y):
76 def bad(x, y):
76 raise error.Abort(b"%s: %s" % (x, y))
77 raise error.Abort(b"%s: %s" % (x, y))
77
78
78 ctx = scmutil.revsingle(repo, rev)
79 ctx = logcmdutil.revsingle(repo, rev)
79 m = scmutil.match(ctx, pats, opts, badfn=bad)
80 m = scmutil.match(ctx, pats, opts, badfn=bad)
80 for p in ctx.walk(m):
81 for p in ctx.walk(m):
81 yield p
82 yield p
82
83
83
84
84 fastannotatecommandargs = {
85 fastannotatecommandargs = {
85 'options': [
86 'options': [
86 (b'r', b'rev', b'.', _(b'annotate the specified revision'), _(b'REV')),
87 (b'r', b'rev', b'.', _(b'annotate the specified revision'), _(b'REV')),
87 (b'u', b'user', None, _(b'list the author (long with -v)')),
88 (b'u', b'user', None, _(b'list the author (long with -v)')),
88 (b'f', b'file', None, _(b'list the filename')),
89 (b'f', b'file', None, _(b'list the filename')),
89 (b'd', b'date', None, _(b'list the date (short with -q)')),
90 (b'd', b'date', None, _(b'list the date (short with -q)')),
90 (b'n', b'number', None, _(b'list the revision number (default)')),
91 (b'n', b'number', None, _(b'list the revision number (default)')),
91 (b'c', b'changeset', None, _(b'list the changeset')),
92 (b'c', b'changeset', None, _(b'list the changeset')),
92 (
93 (
93 b'l',
94 b'l',
94 b'line-number',
95 b'line-number',
95 None,
96 None,
96 _(b'show line number at the first appearance'),
97 _(b'show line number at the first appearance'),
97 ),
98 ),
98 (
99 (
99 b'e',
100 b'e',
100 b'deleted',
101 b'deleted',
101 None,
102 None,
102 _(b'show deleted lines (slow) (EXPERIMENTAL)'),
103 _(b'show deleted lines (slow) (EXPERIMENTAL)'),
103 ),
104 ),
104 (
105 (
105 b'',
106 b'',
106 b'no-content',
107 b'no-content',
107 None,
108 None,
108 _(b'do not show file content (EXPERIMENTAL)'),
109 _(b'do not show file content (EXPERIMENTAL)'),
109 ),
110 ),
110 (b'', b'no-follow', None, _(b"don't follow copies and renames")),
111 (b'', b'no-follow', None, _(b"don't follow copies and renames")),
111 (
112 (
112 b'',
113 b'',
113 b'linear',
114 b'linear',
114 None,
115 None,
115 _(
116 _(
116 b'enforce linear history, ignore second parent '
117 b'enforce linear history, ignore second parent '
117 b'of merges (EXPERIMENTAL)'
118 b'of merges (EXPERIMENTAL)'
118 ),
119 ),
119 ),
120 ),
120 (
121 (
121 b'',
122 b'',
122 b'long-hash',
123 b'long-hash',
123 None,
124 None,
124 _(b'show long changeset hash (EXPERIMENTAL)'),
125 _(b'show long changeset hash (EXPERIMENTAL)'),
125 ),
126 ),
126 (
127 (
127 b'',
128 b'',
128 b'rebuild',
129 b'rebuild',
129 None,
130 None,
130 _(b'rebuild cache even if it exists (EXPERIMENTAL)'),
131 _(b'rebuild cache even if it exists (EXPERIMENTAL)'),
131 ),
132 ),
132 ]
133 ]
133 + commands.diffwsopts
134 + commands.diffwsopts
134 + commands.walkopts
135 + commands.walkopts
135 + commands.formatteropts,
136 + commands.formatteropts,
136 'synopsis': _(b'[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...'),
137 'synopsis': _(b'[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...'),
137 'inferrepo': True,
138 'inferrepo': True,
138 }
139 }
139
140
140
141
141 def fastannotate(ui, repo, *pats, **opts):
142 def fastannotate(ui, repo, *pats, **opts):
142 """show changeset information by line for each file
143 """show changeset information by line for each file
143
144
144 List changes in files, showing the revision id responsible for each line.
145 List changes in files, showing the revision id responsible for each line.
145
146
146 This command is useful for discovering when a change was made and by whom.
147 This command is useful for discovering when a change was made and by whom.
147
148
148 By default this command prints revision numbers. If you include --file,
149 By default this command prints revision numbers. If you include --file,
149 --user, or --date, the revision number is suppressed unless you also
150 --user, or --date, the revision number is suppressed unless you also
150 include --number. The default format can also be customized by setting
151 include --number. The default format can also be customized by setting
151 fastannotate.defaultformat.
152 fastannotate.defaultformat.
152
153
153 Returns 0 on success.
154 Returns 0 on success.
154
155
155 .. container:: verbose
156 .. container:: verbose
156
157
157 This command uses an implementation different from the vanilla annotate
158 This command uses an implementation different from the vanilla annotate
158 command, which may produce slightly different (while still reasonable)
159 command, which may produce slightly different (while still reasonable)
159 outputs for some cases.
160 outputs for some cases.
160
161
161 Unlike the vanilla anootate, fastannotate follows rename regardless of
162 Unlike the vanilla anootate, fastannotate follows rename regardless of
162 the existence of --file.
163 the existence of --file.
163
164
164 For the best performance when running on a full repo, use -c, -l,
165 For the best performance when running on a full repo, use -c, -l,
165 avoid -u, -d, -n. Use --linear and --no-content to make it even faster.
166 avoid -u, -d, -n. Use --linear and --no-content to make it even faster.
166
167
167 For the best performance when running on a shallow (remotefilelog)
168 For the best performance when running on a shallow (remotefilelog)
168 repo, avoid --linear, --no-follow, or any diff options. As the server
169 repo, avoid --linear, --no-follow, or any diff options. As the server
169 won't be able to populate annotate cache when non-default options
170 won't be able to populate annotate cache when non-default options
170 affecting results are used.
171 affecting results are used.
171 """
172 """
172 if not pats:
173 if not pats:
173 raise error.Abort(_(b'at least one filename or pattern is required'))
174 raise error.Abort(_(b'at least one filename or pattern is required'))
174
175
175 # performance hack: filtered repo can be slow. unfilter by default.
176 # performance hack: filtered repo can be slow. unfilter by default.
176 if ui.configbool(b'fastannotate', b'unfilteredrepo'):
177 if ui.configbool(b'fastannotate', b'unfilteredrepo'):
177 repo = repo.unfiltered()
178 repo = repo.unfiltered()
178
179
179 opts = pycompat.byteskwargs(opts)
180 opts = pycompat.byteskwargs(opts)
180
181
181 rev = opts.get(b'rev', b'.')
182 rev = opts.get(b'rev', b'.')
182 rebuild = opts.get(b'rebuild', False)
183 rebuild = opts.get(b'rebuild', False)
183
184
184 diffopts = patch.difffeatureopts(
185 diffopts = patch.difffeatureopts(
185 ui, opts, section=b'annotate', whitespace=True
186 ui, opts, section=b'annotate', whitespace=True
186 )
187 )
187 aopts = facontext.annotateopts(
188 aopts = facontext.annotateopts(
188 diffopts=diffopts,
189 diffopts=diffopts,
189 followmerge=not opts.get(b'linear', False),
190 followmerge=not opts.get(b'linear', False),
190 followrename=not opts.get(b'no_follow', False),
191 followrename=not opts.get(b'no_follow', False),
191 )
192 )
192
193
193 if not any(
194 if not any(
194 opts.get(s)
195 opts.get(s)
195 for s in [b'user', b'date', b'file', b'number', b'changeset']
196 for s in [b'user', b'date', b'file', b'number', b'changeset']
196 ):
197 ):
197 # default 'number' for compatibility. but fastannotate is more
198 # default 'number' for compatibility. but fastannotate is more
198 # efficient with "changeset", "line-number" and "no-content".
199 # efficient with "changeset", "line-number" and "no-content".
199 for name in ui.configlist(
200 for name in ui.configlist(
200 b'fastannotate', b'defaultformat', [b'number']
201 b'fastannotate', b'defaultformat', [b'number']
201 ):
202 ):
202 opts[name] = True
203 opts[name] = True
203
204
204 ui.pager(b'fastannotate')
205 ui.pager(b'fastannotate')
205 template = opts.get(b'template')
206 template = opts.get(b'template')
206 if template == b'json':
207 if template == b'json':
207 formatter = faformatter.jsonformatter(ui, repo, opts)
208 formatter = faformatter.jsonformatter(ui, repo, opts)
208 else:
209 else:
209 formatter = faformatter.defaultformatter(ui, repo, opts)
210 formatter = faformatter.defaultformatter(ui, repo, opts)
210 showdeleted = opts.get(b'deleted', False)
211 showdeleted = opts.get(b'deleted', False)
211 showlines = not bool(opts.get(b'no_content'))
212 showlines = not bool(opts.get(b'no_content'))
212 showpath = opts.get(b'file', False)
213 showpath = opts.get(b'file', False)
213
214
214 # find the head of the main (master) branch
215 # find the head of the main (master) branch
215 master = ui.config(b'fastannotate', b'mainbranch') or rev
216 master = ui.config(b'fastannotate', b'mainbranch') or rev
216
217
217 # paths will be used for prefetching and the real annotating
218 # paths will be used for prefetching and the real annotating
218 paths = list(_matchpaths(repo, rev, pats, opts, aopts))
219 paths = list(_matchpaths(repo, rev, pats, opts, aopts))
219
220
220 # for client, prefetch from the server
221 # for client, prefetch from the server
221 if util.safehasattr(repo, 'prefetchfastannotate'):
222 if util.safehasattr(repo, 'prefetchfastannotate'):
222 repo.prefetchfastannotate(paths)
223 repo.prefetchfastannotate(paths)
223
224
224 for path in paths:
225 for path in paths:
225 result = lines = existinglines = None
226 result = lines = existinglines = None
226 while True:
227 while True:
227 try:
228 try:
228 with facontext.annotatecontext(repo, path, aopts, rebuild) as a:
229 with facontext.annotatecontext(repo, path, aopts, rebuild) as a:
229 result = a.annotate(
230 result = a.annotate(
230 rev,
231 rev,
231 master=master,
232 master=master,
232 showpath=showpath,
233 showpath=showpath,
233 showlines=(showlines and not showdeleted),
234 showlines=(showlines and not showdeleted),
234 )
235 )
235 if showdeleted:
236 if showdeleted:
236 existinglines = {(l[0], l[1]) for l in result}
237 existinglines = {(l[0], l[1]) for l in result}
237 result = a.annotatealllines(
238 result = a.annotatealllines(
238 rev, showpath=showpath, showlines=showlines
239 rev, showpath=showpath, showlines=showlines
239 )
240 )
240 break
241 break
241 except (faerror.CannotReuseError, faerror.CorruptedFileError):
242 except (faerror.CannotReuseError, faerror.CorruptedFileError):
242 # happens if master moves backwards, or the file was deleted
243 # happens if master moves backwards, or the file was deleted
243 # and readded, or renamed to an existing name, or corrupted.
244 # and readded, or renamed to an existing name, or corrupted.
244 if rebuild: # give up since we have tried rebuild already
245 if rebuild: # give up since we have tried rebuild already
245 raise
246 raise
246 else: # try a second time rebuilding the cache (slow)
247 else: # try a second time rebuilding the cache (slow)
247 rebuild = True
248 rebuild = True
248 continue
249 continue
249
250
250 if showlines:
251 if showlines:
251 result, lines = result
252 result, lines = result
252
253
253 formatter.write(result, lines, existinglines=existinglines)
254 formatter.write(result, lines, existinglines=existinglines)
254 formatter.end()
255 formatter.end()
255
256
256
257
257 _newopts = set()
258 _newopts = set()
258 _knownopts = {
259 _knownopts = {
259 opt[1].replace(b'-', b'_')
260 opt[1].replace(b'-', b'_')
260 for opt in (fastannotatecommandargs['options'] + commands.globalopts)
261 for opt in (fastannotatecommandargs['options'] + commands.globalopts)
261 }
262 }
262
263
263
264
264 def _annotatewrapper(orig, ui, repo, *pats, **opts):
265 def _annotatewrapper(orig, ui, repo, *pats, **opts):
265 """used by wrapdefault"""
266 """used by wrapdefault"""
266 # we need this hack until the obsstore has 0.0 seconds perf impact
267 # we need this hack until the obsstore has 0.0 seconds perf impact
267 if ui.configbool(b'fastannotate', b'unfilteredrepo'):
268 if ui.configbool(b'fastannotate', b'unfilteredrepo'):
268 repo = repo.unfiltered()
269 repo = repo.unfiltered()
269
270
270 # treat the file as text (skip the isbinary check)
271 # treat the file as text (skip the isbinary check)
271 if ui.configbool(b'fastannotate', b'forcetext'):
272 if ui.configbool(b'fastannotate', b'forcetext'):
272 opts['text'] = True
273 opts['text'] = True
273
274
274 # check if we need to do prefetch (client-side)
275 # check if we need to do prefetch (client-side)
275 rev = opts.get('rev')
276 rev = opts.get('rev')
276 if util.safehasattr(repo, 'prefetchfastannotate') and rev is not None:
277 if util.safehasattr(repo, 'prefetchfastannotate') and rev is not None:
277 paths = list(_matchpaths(repo, rev, pats, pycompat.byteskwargs(opts)))
278 paths = list(_matchpaths(repo, rev, pats, pycompat.byteskwargs(opts)))
278 repo.prefetchfastannotate(paths)
279 repo.prefetchfastannotate(paths)
279
280
280 return orig(ui, repo, *pats, **opts)
281 return orig(ui, repo, *pats, **opts)
281
282
282
283
283 def registercommand():
284 def registercommand():
284 """register the fastannotate command"""
285 """register the fastannotate command"""
285 name = b'fastannotate|fastblame|fa'
286 name = b'fastannotate|fastblame|fa'
286 command(name, helpbasic=True, **fastannotatecommandargs)(fastannotate)
287 command(name, helpbasic=True, **fastannotatecommandargs)(fastannotate)
287
288
288
289
289 def wrapdefault():
290 def wrapdefault():
290 """wrap the default annotate command, to be aware of the protocol"""
291 """wrap the default annotate command, to be aware of the protocol"""
291 extensions.wrapcommand(commands.table, b'annotate', _annotatewrapper)
292 extensions.wrapcommand(commands.table, b'annotate', _annotatewrapper)
292
293
293
294
294 @command(
295 @command(
295 b'debugbuildannotatecache',
296 b'debugbuildannotatecache',
296 [(b'r', b'rev', b'', _(b'build up to the specific revision'), _(b'REV'))]
297 [(b'r', b'rev', b'', _(b'build up to the specific revision'), _(b'REV'))]
297 + commands.walkopts,
298 + commands.walkopts,
298 _(b'[-r REV] FILE...'),
299 _(b'[-r REV] FILE...'),
299 )
300 )
300 def debugbuildannotatecache(ui, repo, *pats, **opts):
301 def debugbuildannotatecache(ui, repo, *pats, **opts):
301 """incrementally build fastannotate cache up to REV for specified files
302 """incrementally build fastannotate cache up to REV for specified files
302
303
303 If REV is not specified, use the config 'fastannotate.mainbranch'.
304 If REV is not specified, use the config 'fastannotate.mainbranch'.
304
305
305 If fastannotate.client is True, download the annotate cache from the
306 If fastannotate.client is True, download the annotate cache from the
306 server. Otherwise, build the annotate cache locally.
307 server. Otherwise, build the annotate cache locally.
307
308
308 The annotate cache will be built using the default diff and follow
309 The annotate cache will be built using the default diff and follow
309 options and lives in '.hg/fastannotate/default'.
310 options and lives in '.hg/fastannotate/default'.
310 """
311 """
311 opts = pycompat.byteskwargs(opts)
312 opts = pycompat.byteskwargs(opts)
312 rev = opts.get(b'REV') or ui.config(b'fastannotate', b'mainbranch')
313 rev = opts.get(b'REV') or ui.config(b'fastannotate', b'mainbranch')
313 if not rev:
314 if not rev:
314 raise error.Abort(
315 raise error.Abort(
315 _(b'you need to provide a revision'),
316 _(b'you need to provide a revision'),
316 hint=_(b'set fastannotate.mainbranch or use --rev'),
317 hint=_(b'set fastannotate.mainbranch or use --rev'),
317 )
318 )
318 if ui.configbool(b'fastannotate', b'unfilteredrepo'):
319 if ui.configbool(b'fastannotate', b'unfilteredrepo'):
319 repo = repo.unfiltered()
320 repo = repo.unfiltered()
320 ctx = scmutil.revsingle(repo, rev)
321 ctx = logcmdutil.revsingle(repo, rev)
321 m = scmutil.match(ctx, pats, opts)
322 m = scmutil.match(ctx, pats, opts)
322 paths = list(ctx.walk(m))
323 paths = list(ctx.walk(m))
323 if util.safehasattr(repo, 'prefetchfastannotate'):
324 if util.safehasattr(repo, 'prefetchfastannotate'):
324 # client
325 # client
325 if opts.get(b'REV'):
326 if opts.get(b'REV'):
326 raise error.Abort(_(b'--rev cannot be used for client'))
327 raise error.Abort(_(b'--rev cannot be used for client'))
327 repo.prefetchfastannotate(paths)
328 repo.prefetchfastannotate(paths)
328 else:
329 else:
329 # server, or full repo
330 # server, or full repo
330 progress = ui.makeprogress(_(b'building'), total=len(paths))
331 progress = ui.makeprogress(_(b'building'), total=len(paths))
331 for i, path in enumerate(paths):
332 for i, path in enumerate(paths):
332 progress.update(i)
333 progress.update(i)
333 with facontext.annotatecontext(repo, path) as actx:
334 with facontext.annotatecontext(repo, path) as actx:
334 try:
335 try:
335 if actx.isuptodate(rev):
336 if actx.isuptodate(rev):
336 continue
337 continue
337 actx.annotate(rev, rev)
338 actx.annotate(rev, rev)
338 except (faerror.CannotReuseError, faerror.CorruptedFileError):
339 except (faerror.CannotReuseError, faerror.CorruptedFileError):
339 # the cache is broken (could happen with renaming so the
340 # the cache is broken (could happen with renaming so the
340 # file history gets invalidated). rebuild and try again.
341 # file history gets invalidated). rebuild and try again.
341 ui.debug(
342 ui.debug(
342 b'fastannotate: %s: rebuilding broken cache\n' % path
343 b'fastannotate: %s: rebuilding broken cache\n' % path
343 )
344 )
344 actx.rebuild()
345 actx.rebuild()
345 try:
346 try:
346 actx.annotate(rev, rev)
347 actx.annotate(rev, rev)
347 except Exception as ex:
348 except Exception as ex:
348 # possibly a bug, but should not stop us from building
349 # possibly a bug, but should not stop us from building
349 # cache for other files.
350 # cache for other files.
350 ui.warn(
351 ui.warn(
351 _(
352 _(
352 b'fastannotate: %s: failed to '
353 b'fastannotate: %s: failed to '
353 b'build cache: %r\n'
354 b'build cache: %r\n'
354 )
355 )
355 % (path, ex)
356 % (path, ex)
356 )
357 )
357 progress.complete()
358 progress.complete()
@@ -1,263 +1,261 b''
1 # Copyright 2016-present Facebook. All Rights Reserved.
1 # Copyright 2016-present Facebook. All Rights Reserved.
2 #
2 #
3 # protocol: logic for a server providing fastannotate support
3 # protocol: logic for a server providing fastannotate support
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import contextlib
9 import contextlib
10 import os
10 import os
11
11
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial.pycompat import open
13 from mercurial.pycompat import open
14 from mercurial import (
14 from mercurial import (
15 error,
15 error,
16 extensions,
16 extensions,
17 hg,
17 hg,
18 pycompat,
18 pycompat,
19 util,
19 util,
20 wireprotov1peer,
20 wireprotov1peer,
21 wireprotov1server,
21 wireprotov1server,
22 )
22 )
23 from mercurial.utils import (
23 from mercurial.utils import (
24 urlutil,
24 urlutil,
25 )
25 )
26 from . import context
26 from . import context
27
27
28 # common
28 # common
29
29
30
30
31 def _getmaster(ui):
31 def _getmaster(ui):
32 """get the mainbranch, and enforce it is set"""
32 """get the mainbranch, and enforce it is set"""
33 master = ui.config(b'fastannotate', b'mainbranch')
33 master = ui.config(b'fastannotate', b'mainbranch')
34 if not master:
34 if not master:
35 raise error.Abort(
35 raise error.Abort(
36 _(
36 _(
37 b'fastannotate.mainbranch is required '
37 b'fastannotate.mainbranch is required '
38 b'for both the client and the server'
38 b'for both the client and the server'
39 )
39 )
40 )
40 )
41 return master
41 return master
42
42
43
43
44 # server-side
44 # server-side
45
45
46
46
47 def _capabilities(orig, repo, proto):
47 def _capabilities(orig, repo, proto):
48 result = orig(repo, proto)
48 result = orig(repo, proto)
49 result.append(b'getannotate')
49 result.append(b'getannotate')
50 return result
50 return result
51
51
52
52
53 def _getannotate(repo, proto, path, lastnode):
53 def _getannotate(repo, proto, path, lastnode):
54 # output:
54 # output:
55 # FILE := vfspath + '\0' + str(size) + '\0' + content
55 # FILE := vfspath + '\0' + str(size) + '\0' + content
56 # OUTPUT := '' | FILE + OUTPUT
56 # OUTPUT := '' | FILE + OUTPUT
57 result = b''
57 result = b''
58 buildondemand = repo.ui.configbool(
58 buildondemand = repo.ui.configbool(
59 b'fastannotate', b'serverbuildondemand', True
59 b'fastannotate', b'serverbuildondemand', True
60 )
60 )
61 with context.annotatecontext(repo, path) as actx:
61 with context.annotatecontext(repo, path) as actx:
62 if buildondemand:
62 if buildondemand:
63 # update before responding to the client
63 # update before responding to the client
64 master = _getmaster(repo.ui)
64 master = _getmaster(repo.ui)
65 try:
65 try:
66 if not actx.isuptodate(master):
66 if not actx.isuptodate(master):
67 actx.annotate(master, master)
67 actx.annotate(master, master)
68 except Exception:
68 except Exception:
69 # non-fast-forward move or corrupted. rebuild automically.
69 # non-fast-forward move or corrupted. rebuild automically.
70 actx.rebuild()
70 actx.rebuild()
71 try:
71 try:
72 actx.annotate(master, master)
72 actx.annotate(master, master)
73 except Exception:
73 except Exception:
74 actx.rebuild() # delete files
74 actx.rebuild() # delete files
75 finally:
75 finally:
76 # although the "with" context will also do a close/flush, we
76 # although the "with" context will also do a close/flush, we
77 # need to do it early so we can send the correct respond to
77 # need to do it early so we can send the correct respond to
78 # client.
78 # client.
79 actx.close()
79 actx.close()
80 # send back the full content of revmap and linelog, in the future we
80 # send back the full content of revmap and linelog, in the future we
81 # may want to do some rsync-like fancy updating.
81 # may want to do some rsync-like fancy updating.
82 # the lastnode check is not necessary if the client and the server
82 # the lastnode check is not necessary if the client and the server
83 # agree where the main branch is.
83 # agree where the main branch is.
84 if actx.lastnode != lastnode:
84 if actx.lastnode != lastnode:
85 for p in [actx.revmappath, actx.linelogpath]:
85 for p in [actx.revmappath, actx.linelogpath]:
86 if not os.path.exists(p):
86 if not os.path.exists(p):
87 continue
87 continue
88 with open(p, b'rb') as f:
88 with open(p, b'rb') as f:
89 content = f.read()
89 content = f.read()
90 vfsbaselen = len(repo.vfs.base + b'/')
90 vfsbaselen = len(repo.vfs.base + b'/')
91 relpath = p[vfsbaselen:]
91 relpath = p[vfsbaselen:]
92 result += b'%s\0%d\0%s' % (relpath, len(content), content)
92 result += b'%s\0%d\0%s' % (relpath, len(content), content)
93 return result
93 return result
94
94
95
95
96 def _registerwireprotocommand():
96 def _registerwireprotocommand():
97 if b'getannotate' in wireprotov1server.commands:
97 if b'getannotate' in wireprotov1server.commands:
98 return
98 return
99 wireprotov1server.wireprotocommand(b'getannotate', b'path lastnode')(
99 wireprotov1server.wireprotocommand(b'getannotate', b'path lastnode')(
100 _getannotate
100 _getannotate
101 )
101 )
102
102
103
103
104 def serveruisetup(ui):
104 def serveruisetup(ui):
105 _registerwireprotocommand()
105 _registerwireprotocommand()
106 extensions.wrapfunction(wireprotov1server, b'_capabilities', _capabilities)
106 extensions.wrapfunction(wireprotov1server, b'_capabilities', _capabilities)
107
107
108
108
109 # client-side
109 # client-side
110
110
111
111
112 def _parseresponse(payload):
112 def _parseresponse(payload):
113 result = {}
113 result = {}
114 i = 0
114 i = 0
115 l = len(payload) - 1
115 l = len(payload) - 1
116 state = 0 # 0: vfspath, 1: size
116 state = 0 # 0: vfspath, 1: size
117 vfspath = size = b''
117 vfspath = size = b''
118 while i < l:
118 while i < l:
119 ch = payload[i : i + 1]
119 ch = payload[i : i + 1]
120 if ch == b'\0':
120 if ch == b'\0':
121 if state == 1:
121 if state == 1:
122 result[vfspath] = payload[i + 1 : i + 1 + int(size)]
122 result[vfspath] = payload[i + 1 : i + 1 + int(size)]
123 i += int(size)
123 i += int(size)
124 state = 0
124 state = 0
125 vfspath = size = b''
125 vfspath = size = b''
126 elif state == 0:
126 elif state == 0:
127 state = 1
127 state = 1
128 else:
128 else:
129 if state == 1:
129 if state == 1:
130 size += ch
130 size += ch
131 elif state == 0:
131 elif state == 0:
132 vfspath += ch
132 vfspath += ch
133 i += 1
133 i += 1
134 return result
134 return result
135
135
136
136
137 def peersetup(ui, peer):
137 def peersetup(ui, peer):
138 class fastannotatepeer(peer.__class__):
138 class fastannotatepeer(peer.__class__):
139 @wireprotov1peer.batchable
139 @wireprotov1peer.batchable
140 def getannotate(self, path, lastnode=None):
140 def getannotate(self, path, lastnode=None):
141 if not self.capable(b'getannotate'):
141 if not self.capable(b'getannotate'):
142 ui.warn(_(b'remote peer cannot provide annotate cache\n'))
142 ui.warn(_(b'remote peer cannot provide annotate cache\n'))
143 yield None, None
143 return None, None
144 else:
144 else:
145 args = {b'path': path, b'lastnode': lastnode or b''}
145 args = {b'path': path, b'lastnode': lastnode or b''}
146 f = wireprotov1peer.future()
146 return args, _parseresponse
147 yield args, f
148 yield _parseresponse(f.value)
149
147
150 peer.__class__ = fastannotatepeer
148 peer.__class__ = fastannotatepeer
151
149
152
150
153 @contextlib.contextmanager
151 @contextlib.contextmanager
154 def annotatepeer(repo):
152 def annotatepeer(repo):
155 ui = repo.ui
153 ui = repo.ui
156
154
157 remotedest = ui.config(b'fastannotate', b'remotepath', b'default')
155 remotedest = ui.config(b'fastannotate', b'remotepath', b'default')
158 r = urlutil.get_unique_pull_path(b'fastannotate', repo, ui, remotedest)
156 r = urlutil.get_unique_pull_path(b'fastannotate', repo, ui, remotedest)
159 remotepath = r[0]
157 remotepath = r[0]
160 peer = hg.peer(ui, {}, remotepath)
158 peer = hg.peer(ui, {}, remotepath)
161
159
162 try:
160 try:
163 yield peer
161 yield peer
164 finally:
162 finally:
165 peer.close()
163 peer.close()
166
164
167
165
168 def clientfetch(repo, paths, lastnodemap=None, peer=None):
166 def clientfetch(repo, paths, lastnodemap=None, peer=None):
169 """download annotate cache from the server for paths"""
167 """download annotate cache from the server for paths"""
170 if not paths:
168 if not paths:
171 return
169 return
172
170
173 if peer is None:
171 if peer is None:
174 with annotatepeer(repo) as peer:
172 with annotatepeer(repo) as peer:
175 return clientfetch(repo, paths, lastnodemap, peer)
173 return clientfetch(repo, paths, lastnodemap, peer)
176
174
177 if lastnodemap is None:
175 if lastnodemap is None:
178 lastnodemap = {}
176 lastnodemap = {}
179
177
180 ui = repo.ui
178 ui = repo.ui
181 results = []
179 results = []
182 with peer.commandexecutor() as batcher:
180 with peer.commandexecutor() as batcher:
183 ui.debug(b'fastannotate: requesting %d files\n' % len(paths))
181 ui.debug(b'fastannotate: requesting %d files\n' % len(paths))
184 for p in paths:
182 for p in paths:
185 results.append(
183 results.append(
186 batcher.callcommand(
184 batcher.callcommand(
187 b'getannotate',
185 b'getannotate',
188 {b'path': p, b'lastnode': lastnodemap.get(p)},
186 {b'path': p, b'lastnode': lastnodemap.get(p)},
189 )
187 )
190 )
188 )
191
189
192 for result in results:
190 for result in results:
193 r = result.result()
191 r = result.result()
194 # TODO: pconvert these paths on the server?
192 # TODO: pconvert these paths on the server?
195 r = {util.pconvert(p): v for p, v in pycompat.iteritems(r)}
193 r = {util.pconvert(p): v for p, v in pycompat.iteritems(r)}
196 for path in sorted(r):
194 for path in sorted(r):
197 # ignore malicious paths
195 # ignore malicious paths
198 if not path.startswith(b'fastannotate/') or b'/../' in (
196 if not path.startswith(b'fastannotate/') or b'/../' in (
199 path + b'/'
197 path + b'/'
200 ):
198 ):
201 ui.debug(
199 ui.debug(
202 b'fastannotate: ignored malicious path %s\n' % path
200 b'fastannotate: ignored malicious path %s\n' % path
203 )
201 )
204 continue
202 continue
205 content = r[path]
203 content = r[path]
206 if ui.debugflag:
204 if ui.debugflag:
207 ui.debug(
205 ui.debug(
208 b'fastannotate: writing %d bytes to %s\n'
206 b'fastannotate: writing %d bytes to %s\n'
209 % (len(content), path)
207 % (len(content), path)
210 )
208 )
211 repo.vfs.makedirs(os.path.dirname(path))
209 repo.vfs.makedirs(os.path.dirname(path))
212 with repo.vfs(path, b'wb') as f:
210 with repo.vfs(path, b'wb') as f:
213 f.write(content)
211 f.write(content)
214
212
215
213
216 def _filterfetchpaths(repo, paths):
214 def _filterfetchpaths(repo, paths):
217 """return a subset of paths whose history is long and need to fetch linelog
215 """return a subset of paths whose history is long and need to fetch linelog
218 from the server. works with remotefilelog and non-remotefilelog repos.
216 from the server. works with remotefilelog and non-remotefilelog repos.
219 """
217 """
220 threshold = repo.ui.configint(b'fastannotate', b'clientfetchthreshold', 10)
218 threshold = repo.ui.configint(b'fastannotate', b'clientfetchthreshold', 10)
221 if threshold <= 0:
219 if threshold <= 0:
222 return paths
220 return paths
223
221
224 result = []
222 result = []
225 for path in paths:
223 for path in paths:
226 try:
224 try:
227 if len(repo.file(path)) >= threshold:
225 if len(repo.file(path)) >= threshold:
228 result.append(path)
226 result.append(path)
229 except Exception: # file not found etc.
227 except Exception: # file not found etc.
230 result.append(path)
228 result.append(path)
231
229
232 return result
230 return result
233
231
234
232
235 def localreposetup(ui, repo):
233 def localreposetup(ui, repo):
236 class fastannotaterepo(repo.__class__):
234 class fastannotaterepo(repo.__class__):
237 def prefetchfastannotate(self, paths, peer=None):
235 def prefetchfastannotate(self, paths, peer=None):
238 master = _getmaster(self.ui)
236 master = _getmaster(self.ui)
239 needupdatepaths = []
237 needupdatepaths = []
240 lastnodemap = {}
238 lastnodemap = {}
241 try:
239 try:
242 for path in _filterfetchpaths(self, paths):
240 for path in _filterfetchpaths(self, paths):
243 with context.annotatecontext(self, path) as actx:
241 with context.annotatecontext(self, path) as actx:
244 if not actx.isuptodate(master, strict=False):
242 if not actx.isuptodate(master, strict=False):
245 needupdatepaths.append(path)
243 needupdatepaths.append(path)
246 lastnodemap[path] = actx.lastnode
244 lastnodemap[path] = actx.lastnode
247 if needupdatepaths:
245 if needupdatepaths:
248 clientfetch(self, needupdatepaths, lastnodemap, peer)
246 clientfetch(self, needupdatepaths, lastnodemap, peer)
249 except Exception as ex:
247 except Exception as ex:
250 # could be directory not writable or so, not fatal
248 # could be directory not writable or so, not fatal
251 self.ui.debug(b'fastannotate: prefetch failed: %r\n' % ex)
249 self.ui.debug(b'fastannotate: prefetch failed: %r\n' % ex)
252
250
253 repo.__class__ = fastannotaterepo
251 repo.__class__ = fastannotaterepo
254
252
255
253
256 def clientreposetup(ui, repo):
254 def clientreposetup(ui, repo):
257 _registerwireprotocommand()
255 _registerwireprotocommand()
258 if repo.local():
256 if repo.local():
259 localreposetup(ui, repo)
257 localreposetup(ui, repo)
260 # TODO: this mutates global state, but only if at least one repo
258 # TODO: this mutates global state, but only if at least one repo
261 # has the extension enabled. This is probably bad for hgweb.
259 # has the extension enabled. This is probably bad for hgweb.
262 if peersetup not in hg.wirepeersetupfuncs:
260 if peersetup not in hg.wirepeersetupfuncs:
263 hg.wirepeersetupfuncs.append(peersetup)
261 hg.wirepeersetupfuncs.append(peersetup)
@@ -1,219 +1,220 b''
1 # Copyright 2020 Joerg Sonnenberger <joerg@bec.de>
1 # Copyright 2020 Joerg Sonnenberger <joerg@bec.de>
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5 """export repositories as git fast-import stream"""
5 """export repositories as git fast-import stream"""
6
6
7 # The format specification for fast-import streams can be found at
7 # The format specification for fast-import streams can be found at
8 # https://git-scm.com/docs/git-fast-import#_input_format
8 # https://git-scm.com/docs/git-fast-import#_input_format
9
9
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11 import re
11 import re
12
12
13 from mercurial.i18n import _
13 from mercurial.i18n import _
14 from mercurial.node import hex, nullrev
14 from mercurial.node import hex, nullrev
15 from mercurial.utils import stringutil
15 from mercurial.utils import stringutil
16 from mercurial import (
16 from mercurial import (
17 error,
17 error,
18 logcmdutil,
18 pycompat,
19 pycompat,
19 registrar,
20 registrar,
20 scmutil,
21 scmutil,
21 )
22 )
22 from .convert import convcmd
23 from .convert import convcmd
23
24
24 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
25 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
25 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
26 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
26 # be specifying the version(s) of Mercurial they are tested with, or
27 # be specifying the version(s) of Mercurial they are tested with, or
27 # leave the attribute unspecified.
28 # leave the attribute unspecified.
28 testedwith = b"ships-with-hg-core"
29 testedwith = b"ships-with-hg-core"
29
30
30 cmdtable = {}
31 cmdtable = {}
31 command = registrar.command(cmdtable)
32 command = registrar.command(cmdtable)
32
33
33 GIT_PERSON_PROHIBITED = re.compile(b'[<>\n"]')
34 GIT_PERSON_PROHIBITED = re.compile(b'[<>\n"]')
34 GIT_EMAIL_PROHIBITED = re.compile(b"[<> \n]")
35 GIT_EMAIL_PROHIBITED = re.compile(b"[<> \n]")
35
36
36
37
37 def convert_to_git_user(authormap, user, rev):
38 def convert_to_git_user(authormap, user, rev):
38 mapped_user = authormap.get(user, user)
39 mapped_user = authormap.get(user, user)
39 user_person = stringutil.person(mapped_user)
40 user_person = stringutil.person(mapped_user)
40 user_email = stringutil.email(mapped_user)
41 user_email = stringutil.email(mapped_user)
41 if GIT_EMAIL_PROHIBITED.match(user_email) or GIT_PERSON_PROHIBITED.match(
42 if GIT_EMAIL_PROHIBITED.match(user_email) or GIT_PERSON_PROHIBITED.match(
42 user_person
43 user_person
43 ):
44 ):
44 raise error.Abort(
45 raise error.Abort(
45 _(b"Unable to parse user into person and email for revision %s")
46 _(b"Unable to parse user into person and email for revision %s")
46 % rev
47 % rev
47 )
48 )
48 if user_person:
49 if user_person:
49 return b'"' + user_person + b'" <' + user_email + b'>'
50 return b'"' + user_person + b'" <' + user_email + b'>'
50 else:
51 else:
51 return b"<" + user_email + b">"
52 return b"<" + user_email + b">"
52
53
53
54
54 def convert_to_git_date(date):
55 def convert_to_git_date(date):
55 timestamp, utcoff = date
56 timestamp, utcoff = date
56 tzsign = b"+" if utcoff <= 0 else b"-"
57 tzsign = b"+" if utcoff <= 0 else b"-"
57 if utcoff % 60 != 0:
58 if utcoff % 60 != 0:
58 raise error.Abort(
59 raise error.Abort(
59 _(b"UTC offset in %b is not an integer number of seconds") % (date,)
60 _(b"UTC offset in %b is not an integer number of seconds") % (date,)
60 )
61 )
61 utcoff = abs(utcoff) // 60
62 utcoff = abs(utcoff) // 60
62 tzh = utcoff // 60
63 tzh = utcoff // 60
63 tzmin = utcoff % 60
64 tzmin = utcoff % 60
64 return b"%d " % int(timestamp) + tzsign + b"%02d%02d" % (tzh, tzmin)
65 return b"%d " % int(timestamp) + tzsign + b"%02d%02d" % (tzh, tzmin)
65
66
66
67
67 def convert_to_git_ref(branch):
68 def convert_to_git_ref(branch):
68 # XXX filter/map depending on git restrictions
69 # XXX filter/map depending on git restrictions
69 return b"refs/heads/" + branch
70 return b"refs/heads/" + branch
70
71
71
72
72 def write_data(buf, data, skip_newline):
73 def write_data(buf, data, skip_newline):
73 buf.append(b"data %d\n" % len(data))
74 buf.append(b"data %d\n" % len(data))
74 buf.append(data)
75 buf.append(data)
75 if not skip_newline or data[-1:] != b"\n":
76 if not skip_newline or data[-1:] != b"\n":
76 buf.append(b"\n")
77 buf.append(b"\n")
77
78
78
79
79 def export_commit(ui, repo, rev, marks, authormap):
80 def export_commit(ui, repo, rev, marks, authormap):
80 ctx = repo[rev]
81 ctx = repo[rev]
81 revid = ctx.hex()
82 revid = ctx.hex()
82 if revid in marks:
83 if revid in marks:
83 ui.debug(b"warning: revision %s already exported, skipped\n" % revid)
84 ui.debug(b"warning: revision %s already exported, skipped\n" % revid)
84 return
85 return
85 parents = [p for p in ctx.parents() if p.rev() != nullrev]
86 parents = [p for p in ctx.parents() if p.rev() != nullrev]
86 for p in parents:
87 for p in parents:
87 if p.hex() not in marks:
88 if p.hex() not in marks:
88 ui.warn(
89 ui.warn(
89 _(b"warning: parent %s of %s has not been exported, skipped\n")
90 _(b"warning: parent %s of %s has not been exported, skipped\n")
90 % (p, revid)
91 % (p, revid)
91 )
92 )
92 return
93 return
93
94
94 # For all files modified by the commit, check if they have already
95 # For all files modified by the commit, check if they have already
95 # been exported and otherwise dump the blob with the new mark.
96 # been exported and otherwise dump the blob with the new mark.
96 for fname in ctx.files():
97 for fname in ctx.files():
97 if fname not in ctx:
98 if fname not in ctx:
98 continue
99 continue
99 filectx = ctx.filectx(fname)
100 filectx = ctx.filectx(fname)
100 filerev = hex(filectx.filenode())
101 filerev = hex(filectx.filenode())
101 if filerev not in marks:
102 if filerev not in marks:
102 mark = len(marks) + 1
103 mark = len(marks) + 1
103 marks[filerev] = mark
104 marks[filerev] = mark
104 data = filectx.data()
105 data = filectx.data()
105 buf = [b"blob\n", b"mark :%d\n" % mark]
106 buf = [b"blob\n", b"mark :%d\n" % mark]
106 write_data(buf, data, False)
107 write_data(buf, data, False)
107 ui.write(*buf, keepprogressbar=True)
108 ui.write(*buf, keepprogressbar=True)
108 del buf
109 del buf
109
110
110 # Assign a mark for the current revision for references by
111 # Assign a mark for the current revision for references by
111 # latter merge commits.
112 # latter merge commits.
112 mark = len(marks) + 1
113 mark = len(marks) + 1
113 marks[revid] = mark
114 marks[revid] = mark
114
115
115 ref = convert_to_git_ref(ctx.branch())
116 ref = convert_to_git_ref(ctx.branch())
116 buf = [
117 buf = [
117 b"commit %s\n" % ref,
118 b"commit %s\n" % ref,
118 b"mark :%d\n" % mark,
119 b"mark :%d\n" % mark,
119 b"committer %s %s\n"
120 b"committer %s %s\n"
120 % (
121 % (
121 convert_to_git_user(authormap, ctx.user(), revid),
122 convert_to_git_user(authormap, ctx.user(), revid),
122 convert_to_git_date(ctx.date()),
123 convert_to_git_date(ctx.date()),
123 ),
124 ),
124 ]
125 ]
125 write_data(buf, ctx.description(), True)
126 write_data(buf, ctx.description(), True)
126 if parents:
127 if parents:
127 buf.append(b"from :%d\n" % marks[parents[0].hex()])
128 buf.append(b"from :%d\n" % marks[parents[0].hex()])
128 if len(parents) == 2:
129 if len(parents) == 2:
129 buf.append(b"merge :%d\n" % marks[parents[1].hex()])
130 buf.append(b"merge :%d\n" % marks[parents[1].hex()])
130 p0ctx = repo[parents[0]]
131 p0ctx = repo[parents[0]]
131 files = ctx.manifest().diff(p0ctx.manifest())
132 files = ctx.manifest().diff(p0ctx.manifest())
132 else:
133 else:
133 files = ctx.files()
134 files = ctx.files()
134 filebuf = []
135 filebuf = []
135 for fname in files:
136 for fname in files:
136 if fname not in ctx:
137 if fname not in ctx:
137 filebuf.append((fname, b"D %s\n" % fname))
138 filebuf.append((fname, b"D %s\n" % fname))
138 else:
139 else:
139 filectx = ctx.filectx(fname)
140 filectx = ctx.filectx(fname)
140 filerev = filectx.filenode()
141 filerev = filectx.filenode()
141 fileperm = b"755" if filectx.isexec() else b"644"
142 fileperm = b"755" if filectx.isexec() else b"644"
142 changed = b"M %s :%d %s\n" % (fileperm, marks[hex(filerev)], fname)
143 changed = b"M %s :%d %s\n" % (fileperm, marks[hex(filerev)], fname)
143 filebuf.append((fname, changed))
144 filebuf.append((fname, changed))
144 filebuf.sort()
145 filebuf.sort()
145 buf.extend(changed for (fname, changed) in filebuf)
146 buf.extend(changed for (fname, changed) in filebuf)
146 del filebuf
147 del filebuf
147 buf.append(b"\n")
148 buf.append(b"\n")
148 ui.write(*buf, keepprogressbar=True)
149 ui.write(*buf, keepprogressbar=True)
149 del buf
150 del buf
150
151
151
152
152 isrev = re.compile(b"^[0-9a-f]{40}$")
153 isrev = re.compile(b"^[0-9a-f]{40}$")
153
154
154
155
155 @command(
156 @command(
156 b"fastexport",
157 b"fastexport",
157 [
158 [
158 (b"r", b"rev", [], _(b"revisions to export"), _(b"REV")),
159 (b"r", b"rev", [], _(b"revisions to export"), _(b"REV")),
159 (b"i", b"import-marks", b"", _(b"old marks file to read"), _(b"FILE")),
160 (b"i", b"import-marks", b"", _(b"old marks file to read"), _(b"FILE")),
160 (b"e", b"export-marks", b"", _(b"new marks file to write"), _(b"FILE")),
161 (b"e", b"export-marks", b"", _(b"new marks file to write"), _(b"FILE")),
161 (
162 (
162 b"A",
163 b"A",
163 b"authormap",
164 b"authormap",
164 b"",
165 b"",
165 _(b"remap usernames using this file"),
166 _(b"remap usernames using this file"),
166 _(b"FILE"),
167 _(b"FILE"),
167 ),
168 ),
168 ],
169 ],
169 _(b"[OPTION]... [REV]..."),
170 _(b"[OPTION]... [REV]..."),
170 helpcategory=command.CATEGORY_IMPORT_EXPORT,
171 helpcategory=command.CATEGORY_IMPORT_EXPORT,
171 )
172 )
172 def fastexport(ui, repo, *revs, **opts):
173 def fastexport(ui, repo, *revs, **opts):
173 """export repository as git fast-import stream
174 """export repository as git fast-import stream
174
175
175 This command lets you dump a repository as a human-readable text stream.
176 This command lets you dump a repository as a human-readable text stream.
176 It can be piped into corresponding import routines like "git fast-import".
177 It can be piped into corresponding import routines like "git fast-import".
177 Incremental dumps can be created by using marks files.
178 Incremental dumps can be created by using marks files.
178 """
179 """
179 opts = pycompat.byteskwargs(opts)
180 opts = pycompat.byteskwargs(opts)
180
181
181 revs += tuple(opts.get(b"rev", []))
182 revs += tuple(opts.get(b"rev", []))
182 if not revs:
183 if not revs:
183 revs = scmutil.revrange(repo, [b":"])
184 revs = scmutil.revrange(repo, [b":"])
184 else:
185 else:
185 revs = scmutil.revrange(repo, revs)
186 revs = logcmdutil.revrange(repo, revs)
186 if not revs:
187 if not revs:
187 raise error.Abort(_(b"no revisions matched"))
188 raise error.Abort(_(b"no revisions matched"))
188 authorfile = opts.get(b"authormap")
189 authorfile = opts.get(b"authormap")
189 if authorfile:
190 if authorfile:
190 authormap = convcmd.readauthormap(ui, authorfile)
191 authormap = convcmd.readauthormap(ui, authorfile)
191 else:
192 else:
192 authormap = {}
193 authormap = {}
193
194
194 import_marks = opts.get(b"import_marks")
195 import_marks = opts.get(b"import_marks")
195 marks = {}
196 marks = {}
196 if import_marks:
197 if import_marks:
197 with open(import_marks, "rb") as import_marks_file:
198 with open(import_marks, "rb") as import_marks_file:
198 for line in import_marks_file:
199 for line in import_marks_file:
199 line = line.strip()
200 line = line.strip()
200 if not isrev.match(line) or line in marks:
201 if not isrev.match(line) or line in marks:
201 raise error.Abort(_(b"Corrupted marks file"))
202 raise error.Abort(_(b"Corrupted marks file"))
202 marks[line] = len(marks) + 1
203 marks[line] = len(marks) + 1
203
204
204 revs.sort()
205 revs.sort()
205 with ui.makeprogress(
206 with ui.makeprogress(
206 _(b"exporting"), unit=_(b"revisions"), total=len(revs)
207 _(b"exporting"), unit=_(b"revisions"), total=len(revs)
207 ) as progress:
208 ) as progress:
208 for rev in revs:
209 for rev in revs:
209 export_commit(ui, repo, rev, marks, authormap)
210 export_commit(ui, repo, rev, marks, authormap)
210 progress.increment()
211 progress.increment()
211
212
212 export_marks = opts.get(b"export_marks")
213 export_marks = opts.get(b"export_marks")
213 if export_marks:
214 if export_marks:
214 with open(export_marks, "wb") as export_marks_file:
215 with open(export_marks, "wb") as export_marks_file:
215 output_marks = [None] * len(marks)
216 output_marks = [None] * len(marks)
216 for k, v in marks.items():
217 for k, v in marks.items():
217 output_marks[v - 1] = k
218 output_marks[v - 1] = k
218 for k in output_marks:
219 for k in output_marks:
219 export_marks_file.write(k + b"\n")
220 export_marks_file.write(k + b"\n")
@@ -1,939 +1,971 b''
1 # fix - rewrite file content in changesets and working copy
1 # fix - rewrite file content in changesets and working copy
2 #
2 #
3 # Copyright 2018 Google LLC.
3 # Copyright 2018 Google LLC.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """rewrite file content in changesets or working copy (EXPERIMENTAL)
7 """rewrite file content in changesets or working copy (EXPERIMENTAL)
8
8
9 Provides a command that runs configured tools on the contents of modified files,
9 Provides a command that runs configured tools on the contents of modified files,
10 writing back any fixes to the working copy or replacing changesets.
10 writing back any fixes to the working copy or replacing changesets.
11
11
12 Here is an example configuration that causes :hg:`fix` to apply automatic
12 Here is an example configuration that causes :hg:`fix` to apply automatic
13 formatting fixes to modified lines in C++ code::
13 formatting fixes to modified lines in C++ code::
14
14
15 [fix]
15 [fix]
16 clang-format:command=clang-format --assume-filename={rootpath}
16 clang-format:command=clang-format --assume-filename={rootpath}
17 clang-format:linerange=--lines={first}:{last}
17 clang-format:linerange=--lines={first}:{last}
18 clang-format:pattern=set:**.cpp or **.hpp
18 clang-format:pattern=set:**.cpp or **.hpp
19
19
20 The :command suboption forms the first part of the shell command that will be
20 The :command suboption forms the first part of the shell command that will be
21 used to fix a file. The content of the file is passed on standard input, and the
21 used to fix a file. The content of the file is passed on standard input, and the
22 fixed file content is expected on standard output. Any output on standard error
22 fixed file content is expected on standard output. Any output on standard error
23 will be displayed as a warning. If the exit status is not zero, the file will
23 will be displayed as a warning. If the exit status is not zero, the file will
24 not be affected. A placeholder warning is displayed if there is a non-zero exit
24 not be affected. A placeholder warning is displayed if there is a non-zero exit
25 status but no standard error output. Some values may be substituted into the
25 status but no standard error output. Some values may be substituted into the
26 command::
26 command::
27
27
28 {rootpath} The path of the file being fixed, relative to the repo root
28 {rootpath} The path of the file being fixed, relative to the repo root
29 {basename} The name of the file being fixed, without the directory path
29 {basename} The name of the file being fixed, without the directory path
30
30
31 If the :linerange suboption is set, the tool will only be run if there are
31 If the :linerange suboption is set, the tool will only be run if there are
32 changed lines in a file. The value of this suboption is appended to the shell
32 changed lines in a file. The value of this suboption is appended to the shell
33 command once for every range of changed lines in the file. Some values may be
33 command once for every range of changed lines in the file. Some values may be
34 substituted into the command::
34 substituted into the command::
35
35
36 {first} The 1-based line number of the first line in the modified range
36 {first} The 1-based line number of the first line in the modified range
37 {last} The 1-based line number of the last line in the modified range
37 {last} The 1-based line number of the last line in the modified range
38
38
39 Deleted sections of a file will be ignored by :linerange, because there is no
39 Deleted sections of a file will be ignored by :linerange, because there is no
40 corresponding line range in the version being fixed.
40 corresponding line range in the version being fixed.
41
41
42 By default, tools that set :linerange will only be executed if there is at least
42 By default, tools that set :linerange will only be executed if there is at least
43 one changed line range. This is meant to prevent accidents like running a code
43 one changed line range. This is meant to prevent accidents like running a code
44 formatter in such a way that it unexpectedly reformats the whole file. If such a
44 formatter in such a way that it unexpectedly reformats the whole file. If such a
45 tool needs to operate on unchanged files, it should set the :skipclean suboption
45 tool needs to operate on unchanged files, it should set the :skipclean suboption
46 to false.
46 to false.
47
47
48 The :pattern suboption determines which files will be passed through each
48 The :pattern suboption determines which files will be passed through each
49 configured tool. See :hg:`help patterns` for possible values. However, all
49 configured tool. See :hg:`help patterns` for possible values. However, all
50 patterns are relative to the repo root, even if that text says they are relative
50 patterns are relative to the repo root, even if that text says they are relative
51 to the current working directory. If there are file arguments to :hg:`fix`, the
51 to the current working directory. If there are file arguments to :hg:`fix`, the
52 intersection of these patterns is used.
52 intersection of these patterns is used.
53
53
54 There is also a configurable limit for the maximum size of file that will be
54 There is also a configurable limit for the maximum size of file that will be
55 processed by :hg:`fix`::
55 processed by :hg:`fix`::
56
56
57 [fix]
57 [fix]
58 maxfilesize = 2MB
58 maxfilesize = 2MB
59
59
60 Normally, execution of configured tools will continue after a failure (indicated
60 Normally, execution of configured tools will continue after a failure (indicated
61 by a non-zero exit status). It can also be configured to abort after the first
61 by a non-zero exit status). It can also be configured to abort after the first
62 such failure, so that no files will be affected if any tool fails. This abort
62 such failure, so that no files will be affected if any tool fails. This abort
63 will also cause :hg:`fix` to exit with a non-zero status::
63 will also cause :hg:`fix` to exit with a non-zero status::
64
64
65 [fix]
65 [fix]
66 failure = abort
66 failure = abort
67
67
68 When multiple tools are configured to affect a file, they execute in an order
68 When multiple tools are configured to affect a file, they execute in an order
69 defined by the :priority suboption. The priority suboption has a default value
69 defined by the :priority suboption. The priority suboption has a default value
70 of zero for each tool. Tools are executed in order of descending priority. The
70 of zero for each tool. Tools are executed in order of descending priority. The
71 execution order of tools with equal priority is unspecified. For example, you
71 execution order of tools with equal priority is unspecified. For example, you
72 could use the 'sort' and 'head' utilities to keep only the 10 smallest numbers
72 could use the 'sort' and 'head' utilities to keep only the 10 smallest numbers
73 in a text file by ensuring that 'sort' runs before 'head'::
73 in a text file by ensuring that 'sort' runs before 'head'::
74
74
75 [fix]
75 [fix]
76 sort:command = sort -n
76 sort:command = sort -n
77 head:command = head -n 10
77 head:command = head -n 10
78 sort:pattern = numbers.txt
78 sort:pattern = numbers.txt
79 head:pattern = numbers.txt
79 head:pattern = numbers.txt
80 sort:priority = 2
80 sort:priority = 2
81 head:priority = 1
81 head:priority = 1
82
82
83 To account for changes made by each tool, the line numbers used for incremental
83 To account for changes made by each tool, the line numbers used for incremental
84 formatting are recomputed before executing the next tool. So, each tool may see
84 formatting are recomputed before executing the next tool. So, each tool may see
85 different values for the arguments added by the :linerange suboption.
85 different values for the arguments added by the :linerange suboption.
86
86
87 Each fixer tool is allowed to return some metadata in addition to the fixed file
87 Each fixer tool is allowed to return some metadata in addition to the fixed file
88 content. The metadata must be placed before the file content on stdout,
88 content. The metadata must be placed before the file content on stdout,
89 separated from the file content by a zero byte. The metadata is parsed as a JSON
89 separated from the file content by a zero byte. The metadata is parsed as a JSON
90 value (so, it should be UTF-8 encoded and contain no zero bytes). A fixer tool
90 value (so, it should be UTF-8 encoded and contain no zero bytes). A fixer tool
91 is expected to produce this metadata encoding if and only if the :metadata
91 is expected to produce this metadata encoding if and only if the :metadata
92 suboption is true::
92 suboption is true::
93
93
94 [fix]
94 [fix]
95 tool:command = tool --prepend-json-metadata
95 tool:command = tool --prepend-json-metadata
96 tool:metadata = true
96 tool:metadata = true
97
97
98 The metadata values are passed to hooks, which can be used to print summaries or
98 The metadata values are passed to hooks, which can be used to print summaries or
99 perform other post-fixing work. The supported hooks are::
99 perform other post-fixing work. The supported hooks are::
100
100
101 "postfixfile"
101 "postfixfile"
102 Run once for each file in each revision where any fixer tools made changes
102 Run once for each file in each revision where any fixer tools made changes
103 to the file content. Provides "$HG_REV" and "$HG_PATH" to identify the file,
103 to the file content. Provides "$HG_REV" and "$HG_PATH" to identify the file,
104 and "$HG_METADATA" with a map of fixer names to metadata values from fixer
104 and "$HG_METADATA" with a map of fixer names to metadata values from fixer
105 tools that affected the file. Fixer tools that didn't affect the file have a
105 tools that affected the file. Fixer tools that didn't affect the file have a
106 value of None. Only fixer tools that executed are present in the metadata.
106 value of None. Only fixer tools that executed are present in the metadata.
107
107
108 "postfix"
108 "postfix"
109 Run once after all files and revisions have been handled. Provides
109 Run once after all files and revisions have been handled. Provides
110 "$HG_REPLACEMENTS" with information about what revisions were created and
110 "$HG_REPLACEMENTS" with information about what revisions were created and
111 made obsolete. Provides a boolean "$HG_WDIRWRITTEN" to indicate whether any
111 made obsolete. Provides a boolean "$HG_WDIRWRITTEN" to indicate whether any
112 files in the working copy were updated. Provides a list "$HG_METADATA"
112 files in the working copy were updated. Provides a list "$HG_METADATA"
113 mapping fixer tool names to lists of metadata values returned from
113 mapping fixer tool names to lists of metadata values returned from
114 executions that modified a file. This aggregates the same metadata
114 executions that modified a file. This aggregates the same metadata
115 previously passed to the "postfixfile" hook.
115 previously passed to the "postfixfile" hook.
116
116
117 Fixer tools are run in the repository's root directory. This allows them to read
117 Fixer tools are run in the repository's root directory. This allows them to read
118 configuration files from the working copy, or even write to the working copy.
118 configuration files from the working copy, or even write to the working copy.
119 The working copy is not updated to match the revision being fixed. In fact,
119 The working copy is not updated to match the revision being fixed. In fact,
120 several revisions may be fixed in parallel. Writes to the working copy are not
120 several revisions may be fixed in parallel. Writes to the working copy are not
121 amended into the revision being fixed; fixer tools should always write fixed
121 amended into the revision being fixed; fixer tools should always write fixed
122 file content back to stdout as documented above.
122 file content back to stdout as documented above.
123 """
123 """
124
124
125 from __future__ import absolute_import
125 from __future__ import absolute_import
126
126
127 import collections
127 import collections
128 import itertools
128 import itertools
129 import os
129 import os
130 import re
130 import re
131 import subprocess
131 import subprocess
132
132
133 from mercurial.i18n import _
133 from mercurial.i18n import _
134 from mercurial.node import (
134 from mercurial.node import (
135 nullid,
135 nullid,
136 nullrev,
136 nullrev,
137 wdirrev,
137 wdirrev,
138 )
138 )
139
139
140 from mercurial.utils import procutil
140 from mercurial.utils import procutil
141
141
142 from mercurial import (
142 from mercurial import (
143 cmdutil,
143 cmdutil,
144 context,
144 context,
145 copies,
145 copies,
146 error,
146 error,
147 logcmdutil,
147 match as matchmod,
148 match as matchmod,
148 mdiff,
149 mdiff,
149 merge,
150 merge,
150 mergestate as mergestatemod,
151 mergestate as mergestatemod,
151 obsolete,
152 obsolete,
152 pycompat,
153 pycompat,
153 registrar,
154 registrar,
154 rewriteutil,
155 rewriteutil,
155 scmutil,
156 scmutil,
156 util,
157 util,
157 worker,
158 worker,
158 )
159 )
159
160
160 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
161 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
161 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
162 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
162 # be specifying the version(s) of Mercurial they are tested with, or
163 # be specifying the version(s) of Mercurial they are tested with, or
163 # leave the attribute unspecified.
164 # leave the attribute unspecified.
164 testedwith = b'ships-with-hg-core'
165 testedwith = b'ships-with-hg-core'
165
166
166 cmdtable = {}
167 cmdtable = {}
167 command = registrar.command(cmdtable)
168 command = registrar.command(cmdtable)
168
169
169 configtable = {}
170 configtable = {}
170 configitem = registrar.configitem(configtable)
171 configitem = registrar.configitem(configtable)
171
172
172 # Register the suboptions allowed for each configured fixer, and default values.
173 # Register the suboptions allowed for each configured fixer, and default values.
173 FIXER_ATTRS = {
174 FIXER_ATTRS = {
174 b'command': None,
175 b'command': None,
175 b'linerange': None,
176 b'linerange': None,
176 b'pattern': None,
177 b'pattern': None,
177 b'priority': 0,
178 b'priority': 0,
178 b'metadata': False,
179 b'metadata': False,
179 b'skipclean': True,
180 b'skipclean': True,
180 b'enabled': True,
181 b'enabled': True,
181 }
182 }
182
183
183 for key, default in FIXER_ATTRS.items():
184 for key, default in FIXER_ATTRS.items():
184 configitem(b'fix', b'.*:%s$' % key, default=default, generic=True)
185 configitem(b'fix', b'.*:%s$' % key, default=default, generic=True)
185
186
186 # A good default size allows most source code files to be fixed, but avoids
187 # A good default size allows most source code files to be fixed, but avoids
187 # letting fixer tools choke on huge inputs, which could be surprising to the
188 # letting fixer tools choke on huge inputs, which could be surprising to the
188 # user.
189 # user.
189 configitem(b'fix', b'maxfilesize', default=b'2MB')
190 configitem(b'fix', b'maxfilesize', default=b'2MB')
190
191
191 # Allow fix commands to exit non-zero if an executed fixer tool exits non-zero.
192 # Allow fix commands to exit non-zero if an executed fixer tool exits non-zero.
192 # This helps users do shell scripts that stop when a fixer tool signals a
193 # This helps users do shell scripts that stop when a fixer tool signals a
193 # problem.
194 # problem.
194 configitem(b'fix', b'failure', default=b'continue')
195 configitem(b'fix', b'failure', default=b'continue')
195
196
196
197
197 def checktoolfailureaction(ui, message, hint=None):
198 def checktoolfailureaction(ui, message, hint=None):
198 """Abort with 'message' if fix.failure=abort"""
199 """Abort with 'message' if fix.failure=abort"""
199 action = ui.config(b'fix', b'failure')
200 action = ui.config(b'fix', b'failure')
200 if action not in (b'continue', b'abort'):
201 if action not in (b'continue', b'abort'):
201 raise error.Abort(
202 raise error.Abort(
202 _(b'unknown fix.failure action: %s') % (action,),
203 _(b'unknown fix.failure action: %s') % (action,),
203 hint=_(b'use "continue" or "abort"'),
204 hint=_(b'use "continue" or "abort"'),
204 )
205 )
205 if action == b'abort':
206 if action == b'abort':
206 raise error.Abort(message, hint=hint)
207 raise error.Abort(message, hint=hint)
207
208
208
209
209 allopt = (b'', b'all', False, _(b'fix all non-public non-obsolete revisions'))
210 allopt = (b'', b'all', False, _(b'fix all non-public non-obsolete revisions'))
210 baseopt = (
211 baseopt = (
211 b'',
212 b'',
212 b'base',
213 b'base',
213 [],
214 [],
214 _(
215 _(
215 b'revisions to diff against (overrides automatic '
216 b'revisions to diff against (overrides automatic '
216 b'selection, and applies to every revision being '
217 b'selection, and applies to every revision being '
217 b'fixed)'
218 b'fixed)'
218 ),
219 ),
219 _(b'REV'),
220 _(b'REV'),
220 )
221 )
221 revopt = (b'r', b'rev', [], _(b'revisions to fix (ADVANCED)'), _(b'REV'))
222 revopt = (b'r', b'rev', [], _(b'revisions to fix (ADVANCED)'), _(b'REV'))
222 sourceopt = (
223 sourceopt = (
223 b's',
224 b's',
224 b'source',
225 b'source',
225 [],
226 [],
226 _(b'fix the specified revisions and their descendants'),
227 _(b'fix the specified revisions and their descendants'),
227 _(b'REV'),
228 _(b'REV'),
228 )
229 )
229 wdiropt = (b'w', b'working-dir', False, _(b'fix the working directory'))
230 wdiropt = (b'w', b'working-dir', False, _(b'fix the working directory'))
230 wholeopt = (b'', b'whole', False, _(b'always fix every line of a file'))
231 wholeopt = (b'', b'whole', False, _(b'always fix every line of a file'))
231 usage = _(b'[OPTION]... [FILE]...')
232 usage = _(b'[OPTION]... [FILE]...')
232
233
233
234
234 @command(
235 @command(
235 b'fix',
236 b'fix',
236 [allopt, baseopt, revopt, sourceopt, wdiropt, wholeopt],
237 [allopt, baseopt, revopt, sourceopt, wdiropt, wholeopt],
237 usage,
238 usage,
238 helpcategory=command.CATEGORY_FILE_CONTENTS,
239 helpcategory=command.CATEGORY_FILE_CONTENTS,
239 )
240 )
240 def fix(ui, repo, *pats, **opts):
241 def fix(ui, repo, *pats, **opts):
241 """rewrite file content in changesets or working directory
242 """rewrite file content in changesets or working directory
242
243
243 Runs any configured tools to fix the content of files. Only affects files
244 Runs any configured tools to fix the content of files. Only affects files
244 with changes, unless file arguments are provided. Only affects changed lines
245 with changes, unless file arguments are provided. Only affects changed lines
245 of files, unless the --whole flag is used. Some tools may always affect the
246 of files, unless the --whole flag is used. Some tools may always affect the
246 whole file regardless of --whole.
247 whole file regardless of --whole.
247
248
248 If --working-dir is used, files with uncommitted changes in the working copy
249 If --working-dir is used, files with uncommitted changes in the working copy
249 will be fixed. Note that no backup are made.
250 will be fixed. Note that no backup are made.
250
251
251 If revisions are specified with --source, those revisions and their
252 If revisions are specified with --source, those revisions and their
252 descendants will be checked, and they may be replaced with new revisions
253 descendants will be checked, and they may be replaced with new revisions
253 that have fixed file content. By automatically including the descendants,
254 that have fixed file content. By automatically including the descendants,
254 no merging, rebasing, or evolution will be required. If an ancestor of the
255 no merging, rebasing, or evolution will be required. If an ancestor of the
255 working copy is included, then the working copy itself will also be fixed,
256 working copy is included, then the working copy itself will also be fixed,
256 and the working copy will be updated to the fixed parent.
257 and the working copy will be updated to the fixed parent.
257
258
258 When determining what lines of each file to fix at each revision, the whole
259 When determining what lines of each file to fix at each revision, the whole
259 set of revisions being fixed is considered, so that fixes to earlier
260 set of revisions being fixed is considered, so that fixes to earlier
260 revisions are not forgotten in later ones. The --base flag can be used to
261 revisions are not forgotten in later ones. The --base flag can be used to
261 override this default behavior, though it is not usually desirable to do so.
262 override this default behavior, though it is not usually desirable to do so.
262 """
263 """
263 opts = pycompat.byteskwargs(opts)
264 opts = pycompat.byteskwargs(opts)
264 cmdutil.check_at_most_one_arg(opts, b'all', b'source', b'rev')
265 cmdutil.check_at_most_one_arg(opts, b'all', b'source', b'rev')
265 cmdutil.check_incompatible_arguments(
266 cmdutil.check_incompatible_arguments(
266 opts, b'working_dir', [b'all', b'source']
267 opts, b'working_dir', [b'all', b'source']
267 )
268 )
268
269
269 with repo.wlock(), repo.lock(), repo.transaction(b'fix'):
270 with repo.wlock(), repo.lock(), repo.transaction(b'fix'):
270 revstofix = getrevstofix(ui, repo, opts)
271 revstofix = getrevstofix(ui, repo, opts)
271 basectxs = getbasectxs(repo, opts, revstofix)
272 basectxs = getbasectxs(repo, opts, revstofix)
272 workqueue, numitems = getworkqueue(
273 workqueue, numitems = getworkqueue(
273 ui, repo, pats, opts, revstofix, basectxs
274 ui, repo, pats, opts, revstofix, basectxs
274 )
275 )
275 basepaths = getbasepaths(repo, opts, workqueue, basectxs)
276 basepaths = getbasepaths(repo, opts, workqueue, basectxs)
276 fixers = getfixers(ui)
277 fixers = getfixers(ui)
277
278
278 # Rather than letting each worker independently fetch the files
279 # Rather than letting each worker independently fetch the files
279 # (which also would add complications for shared/keepalive
280 # (which also would add complications for shared/keepalive
280 # connections), prefetch them all first.
281 # connections), prefetch them all first.
281 _prefetchfiles(repo, workqueue, basepaths)
282 _prefetchfiles(repo, workqueue, basepaths)
282
283
283 # There are no data dependencies between the workers fixing each file
284 # There are no data dependencies between the workers fixing each file
284 # revision, so we can use all available parallelism.
285 # revision, so we can use all available parallelism.
285 def getfixes(items):
286 def getfixes(items):
286 for rev, path in items:
287 for srcrev, path, dstrevs in items:
287 ctx = repo[rev]
288 ctx = repo[srcrev]
288 olddata = ctx[path].data()
289 olddata = ctx[path].data()
289 metadata, newdata = fixfile(
290 metadata, newdata = fixfile(
290 ui, repo, opts, fixers, ctx, path, basepaths, basectxs[rev]
291 ui,
292 repo,
293 opts,
294 fixers,
295 ctx,
296 path,
297 basepaths,
298 basectxs[srcrev],
291 )
299 )
292 # Don't waste memory/time passing unchanged content back, but
300 # We ungroup the work items now, because the code that consumes
293 # produce one result per item either way.
301 # these results has to handle each dstrev separately, and in
294 yield (
302 # topological order. Because these are handled in topological
295 rev,
303 # order, it's important that we pass around references to
296 path,
304 # "newdata" instead of copying it. Otherwise, we would be
297 metadata,
305 # keeping more copies of file content in memory at a time than
298 newdata if newdata != olddata else None,
306 # if we hadn't bothered to group/deduplicate the work items.
299 )
307 data = newdata if newdata != olddata else None
308 for dstrev in dstrevs:
309 yield (dstrev, path, metadata, data)
300
310
301 results = worker.worker(
311 results = worker.worker(
302 ui, 1.0, getfixes, tuple(), workqueue, threadsafe=False
312 ui, 1.0, getfixes, tuple(), workqueue, threadsafe=False
303 )
313 )
304
314
305 # We have to hold on to the data for each successor revision in memory
315 # We have to hold on to the data for each successor revision in memory
306 # until all its parents are committed. We ensure this by committing and
316 # until all its parents are committed. We ensure this by committing and
307 # freeing memory for the revisions in some topological order. This
317 # freeing memory for the revisions in some topological order. This
308 # leaves a little bit of memory efficiency on the table, but also makes
318 # leaves a little bit of memory efficiency on the table, but also makes
309 # the tests deterministic. It might also be considered a feature since
319 # the tests deterministic. It might also be considered a feature since
310 # it makes the results more easily reproducible.
320 # it makes the results more easily reproducible.
311 filedata = collections.defaultdict(dict)
321 filedata = collections.defaultdict(dict)
312 aggregatemetadata = collections.defaultdict(list)
322 aggregatemetadata = collections.defaultdict(list)
313 replacements = {}
323 replacements = {}
314 wdirwritten = False
324 wdirwritten = False
315 commitorder = sorted(revstofix, reverse=True)
325 commitorder = sorted(revstofix, reverse=True)
316 with ui.makeprogress(
326 with ui.makeprogress(
317 topic=_(b'fixing'), unit=_(b'files'), total=sum(numitems.values())
327 topic=_(b'fixing'), unit=_(b'files'), total=sum(numitems.values())
318 ) as progress:
328 ) as progress:
319 for rev, path, filerevmetadata, newdata in results:
329 for rev, path, filerevmetadata, newdata in results:
320 progress.increment(item=path)
330 progress.increment(item=path)
321 for fixername, fixermetadata in filerevmetadata.items():
331 for fixername, fixermetadata in filerevmetadata.items():
322 aggregatemetadata[fixername].append(fixermetadata)
332 aggregatemetadata[fixername].append(fixermetadata)
323 if newdata is not None:
333 if newdata is not None:
324 filedata[rev][path] = newdata
334 filedata[rev][path] = newdata
325 hookargs = {
335 hookargs = {
326 b'rev': rev,
336 b'rev': rev,
327 b'path': path,
337 b'path': path,
328 b'metadata': filerevmetadata,
338 b'metadata': filerevmetadata,
329 }
339 }
330 repo.hook(
340 repo.hook(
331 b'postfixfile',
341 b'postfixfile',
332 throw=False,
342 throw=False,
333 **pycompat.strkwargs(hookargs)
343 **pycompat.strkwargs(hookargs)
334 )
344 )
335 numitems[rev] -= 1
345 numitems[rev] -= 1
336 # Apply the fixes for this and any other revisions that are
346 # Apply the fixes for this and any other revisions that are
337 # ready and sitting at the front of the queue. Using a loop here
347 # ready and sitting at the front of the queue. Using a loop here
338 # prevents the queue from being blocked by the first revision to
348 # prevents the queue from being blocked by the first revision to
339 # be ready out of order.
349 # be ready out of order.
340 while commitorder and not numitems[commitorder[-1]]:
350 while commitorder and not numitems[commitorder[-1]]:
341 rev = commitorder.pop()
351 rev = commitorder.pop()
342 ctx = repo[rev]
352 ctx = repo[rev]
343 if rev == wdirrev:
353 if rev == wdirrev:
344 writeworkingdir(repo, ctx, filedata[rev], replacements)
354 writeworkingdir(repo, ctx, filedata[rev], replacements)
345 wdirwritten = bool(filedata[rev])
355 wdirwritten = bool(filedata[rev])
346 else:
356 else:
347 replacerev(ui, repo, ctx, filedata[rev], replacements)
357 replacerev(ui, repo, ctx, filedata[rev], replacements)
348 del filedata[rev]
358 del filedata[rev]
349
359
350 cleanup(repo, replacements, wdirwritten)
360 cleanup(repo, replacements, wdirwritten)
351 hookargs = {
361 hookargs = {
352 b'replacements': replacements,
362 b'replacements': replacements,
353 b'wdirwritten': wdirwritten,
363 b'wdirwritten': wdirwritten,
354 b'metadata': aggregatemetadata,
364 b'metadata': aggregatemetadata,
355 }
365 }
356 repo.hook(b'postfix', throw=True, **pycompat.strkwargs(hookargs))
366 repo.hook(b'postfix', throw=True, **pycompat.strkwargs(hookargs))
357
367
358
368
359 def cleanup(repo, replacements, wdirwritten):
369 def cleanup(repo, replacements, wdirwritten):
360 """Calls scmutil.cleanupnodes() with the given replacements.
370 """Calls scmutil.cleanupnodes() with the given replacements.
361
371
362 "replacements" is a dict from nodeid to nodeid, with one key and one value
372 "replacements" is a dict from nodeid to nodeid, with one key and one value
363 for every revision that was affected by fixing. This is slightly different
373 for every revision that was affected by fixing. This is slightly different
364 from cleanupnodes().
374 from cleanupnodes().
365
375
366 "wdirwritten" is a bool which tells whether the working copy was affected by
376 "wdirwritten" is a bool which tells whether the working copy was affected by
367 fixing, since it has no entry in "replacements".
377 fixing, since it has no entry in "replacements".
368
378
369 Useful as a hook point for extending "hg fix" with output summarizing the
379 Useful as a hook point for extending "hg fix" with output summarizing the
370 effects of the command, though we choose not to output anything here.
380 effects of the command, though we choose not to output anything here.
371 """
381 """
372 replacements = {
382 replacements = {
373 prec: [succ] for prec, succ in pycompat.iteritems(replacements)
383 prec: [succ] for prec, succ in pycompat.iteritems(replacements)
374 }
384 }
375 scmutil.cleanupnodes(repo, replacements, b'fix', fixphase=True)
385 scmutil.cleanupnodes(repo, replacements, b'fix', fixphase=True)
376
386
377
387
378 def getworkqueue(ui, repo, pats, opts, revstofix, basectxs):
388 def getworkqueue(ui, repo, pats, opts, revstofix, basectxs):
379 """Constructs the list of files to be fixed at specific revisions
389 """Constructs a list of files to fix and which revisions each fix applies to
380
390
381 It is up to the caller how to consume the work items, and the only
391 To avoid duplicating work, there is usually only one work item for each file
382 dependence between them is that replacement revisions must be committed in
392 revision that might need to be fixed. There can be multiple work items per
383 topological order. Each work item represents a file in the working copy or
393 file revision if the same file needs to be fixed in multiple changesets with
384 in some revision that should be fixed and written back to the working copy
394 different baserevs. Each work item also contains a list of changesets where
385 or into a replacement revision.
395 the file's data should be replaced with the fixed data. The work items for
396 earlier changesets come earlier in the work queue, to improve pipelining by
397 allowing the first changeset to be replaced while fixes are still being
398 computed for later changesets.
386
399
387 Work items for the same revision are grouped together, so that a worker
400 Also returned is a map from changesets to the count of work items that might
388 pool starting with the first N items in parallel is likely to finish the
401 affect each changeset. This is used later to count when all of a changeset's
389 first revision's work before other revisions. This can allow us to write
402 work items have been finished, without having to inspect the remaining work
390 the result to disk and reduce memory footprint. At time of writing, the
403 queue in each worker subprocess.
391 partition strategy in worker.py seems favorable to this. We also sort the
404
392 items by ascending revision number to match the order in which we commit
405 The example work item (1, "foo/bar.txt", (1, 2, 3)) means that the data of
393 the fixes later.
406 bar.txt should be read from revision 1, then fixed, and written back to
407 revisions 1, 2 and 3. Revision 1 is called the "srcrev" and the list of
408 revisions is called the "dstrevs". In practice the srcrev is always one of
409 the dstrevs, and we make that choice when constructing the work item so that
410 the choice can't be made inconsistently later on. The dstrevs should all
411 have the same file revision for the given path, so the choice of srcrev is
412 arbitrary. The wdirrev can be a dstrev and a srcrev.
394 """
413 """
395 workqueue = []
414 dstrevmap = collections.defaultdict(list)
396 numitems = collections.defaultdict(int)
415 numitems = collections.defaultdict(int)
397 maxfilesize = ui.configbytes(b'fix', b'maxfilesize')
416 maxfilesize = ui.configbytes(b'fix', b'maxfilesize')
398 for rev in sorted(revstofix):
417 for rev in sorted(revstofix):
399 fixctx = repo[rev]
418 fixctx = repo[rev]
400 match = scmutil.match(fixctx, pats, opts)
419 match = scmutil.match(fixctx, pats, opts)
401 for path in sorted(
420 for path in sorted(
402 pathstofix(ui, repo, pats, opts, match, basectxs[rev], fixctx)
421 pathstofix(ui, repo, pats, opts, match, basectxs[rev], fixctx)
403 ):
422 ):
404 fctx = fixctx[path]
423 fctx = fixctx[path]
405 if fctx.islink():
424 if fctx.islink():
406 continue
425 continue
407 if fctx.size() > maxfilesize:
426 if fctx.size() > maxfilesize:
408 ui.warn(
427 ui.warn(
409 _(b'ignoring file larger than %s: %s\n')
428 _(b'ignoring file larger than %s: %s\n')
410 % (util.bytecount(maxfilesize), path)
429 % (util.bytecount(maxfilesize), path)
411 )
430 )
412 continue
431 continue
413 workqueue.append((rev, path))
432 baserevs = tuple(ctx.rev() for ctx in basectxs[rev])
433 dstrevmap[(fctx.filerev(), baserevs, path)].append(rev)
414 numitems[rev] += 1
434 numitems[rev] += 1
435 workqueue = [
436 (min(dstrevs), path, dstrevs)
437 for (_filerev, _baserevs, path), dstrevs in dstrevmap.items()
438 ]
439 # Move work items for earlier changesets to the front of the queue, so we
440 # might be able to replace those changesets (in topological order) while
441 # we're still processing later work items. Note the min() in the previous
442 # expression, which means we don't need a custom comparator here. The path
443 # is also important in the sort order to make the output order stable. There
444 # are some situations where this doesn't help much, but some situations
445 # where it lets us buffer O(1) files instead of O(n) files.
446 workqueue.sort()
415 return workqueue, numitems
447 return workqueue, numitems
416
448
417
449
418 def getrevstofix(ui, repo, opts):
450 def getrevstofix(ui, repo, opts):
419 """Returns the set of revision numbers that should be fixed"""
451 """Returns the set of revision numbers that should be fixed"""
420 if opts[b'all']:
452 if opts[b'all']:
421 revs = repo.revs(b'(not public() and not obsolete()) or wdir()')
453 revs = repo.revs(b'(not public() and not obsolete()) or wdir()')
422 elif opts[b'source']:
454 elif opts[b'source']:
423 source_revs = scmutil.revrange(repo, opts[b'source'])
455 source_revs = logcmdutil.revrange(repo, opts[b'source'])
424 revs = set(repo.revs(b'(%ld::) - obsolete()', source_revs))
456 revs = set(repo.revs(b'(%ld::) - obsolete()', source_revs))
425 if wdirrev in source_revs:
457 if wdirrev in source_revs:
426 # `wdir()::` is currently empty, so manually add wdir
458 # `wdir()::` is currently empty, so manually add wdir
427 revs.add(wdirrev)
459 revs.add(wdirrev)
428 if repo[b'.'].rev() in revs:
460 if repo[b'.'].rev() in revs:
429 revs.add(wdirrev)
461 revs.add(wdirrev)
430 else:
462 else:
431 revs = set(scmutil.revrange(repo, opts[b'rev']))
463 revs = set(logcmdutil.revrange(repo, opts[b'rev']))
432 if opts.get(b'working_dir'):
464 if opts.get(b'working_dir'):
433 revs.add(wdirrev)
465 revs.add(wdirrev)
434 for rev in revs:
466 for rev in revs:
435 checkfixablectx(ui, repo, repo[rev])
467 checkfixablectx(ui, repo, repo[rev])
436 # Allow fixing only wdir() even if there's an unfinished operation
468 # Allow fixing only wdir() even if there's an unfinished operation
437 if not (len(revs) == 1 and wdirrev in revs):
469 if not (len(revs) == 1 and wdirrev in revs):
438 cmdutil.checkunfinished(repo)
470 cmdutil.checkunfinished(repo)
439 rewriteutil.precheck(repo, revs, b'fix')
471 rewriteutil.precheck(repo, revs, b'fix')
440 if (
472 if (
441 wdirrev in revs
473 wdirrev in revs
442 and mergestatemod.mergestate.read(repo).unresolvedcount()
474 and mergestatemod.mergestate.read(repo).unresolvedcount()
443 ):
475 ):
444 raise error.Abort(b'unresolved conflicts', hint=b"use 'hg resolve'")
476 raise error.Abort(b'unresolved conflicts', hint=b"use 'hg resolve'")
445 if not revs:
477 if not revs:
446 raise error.Abort(
478 raise error.Abort(
447 b'no changesets specified', hint=b'use --source or --working-dir'
479 b'no changesets specified', hint=b'use --source or --working-dir'
448 )
480 )
449 return revs
481 return revs
450
482
451
483
452 def checkfixablectx(ui, repo, ctx):
484 def checkfixablectx(ui, repo, ctx):
453 """Aborts if the revision shouldn't be replaced with a fixed one."""
485 """Aborts if the revision shouldn't be replaced with a fixed one."""
454 if ctx.obsolete():
486 if ctx.obsolete():
455 # It would be better to actually check if the revision has a successor.
487 # It would be better to actually check if the revision has a successor.
456 if not obsolete.isenabled(repo, obsolete.allowdivergenceopt):
488 if not obsolete.isenabled(repo, obsolete.allowdivergenceopt):
457 raise error.Abort(
489 raise error.Abort(
458 b'fixing obsolete revision could cause divergence'
490 b'fixing obsolete revision could cause divergence'
459 )
491 )
460
492
461
493
462 def pathstofix(ui, repo, pats, opts, match, basectxs, fixctx):
494 def pathstofix(ui, repo, pats, opts, match, basectxs, fixctx):
463 """Returns the set of files that should be fixed in a context
495 """Returns the set of files that should be fixed in a context
464
496
465 The result depends on the base contexts; we include any file that has
497 The result depends on the base contexts; we include any file that has
466 changed relative to any of the base contexts. Base contexts should be
498 changed relative to any of the base contexts. Base contexts should be
467 ancestors of the context being fixed.
499 ancestors of the context being fixed.
468 """
500 """
469 files = set()
501 files = set()
470 for basectx in basectxs:
502 for basectx in basectxs:
471 stat = basectx.status(
503 stat = basectx.status(
472 fixctx, match=match, listclean=bool(pats), listunknown=bool(pats)
504 fixctx, match=match, listclean=bool(pats), listunknown=bool(pats)
473 )
505 )
474 files.update(
506 files.update(
475 set(
507 set(
476 itertools.chain(
508 itertools.chain(
477 stat.added, stat.modified, stat.clean, stat.unknown
509 stat.added, stat.modified, stat.clean, stat.unknown
478 )
510 )
479 )
511 )
480 )
512 )
481 return files
513 return files
482
514
483
515
484 def lineranges(opts, path, basepaths, basectxs, fixctx, content2):
516 def lineranges(opts, path, basepaths, basectxs, fixctx, content2):
485 """Returns the set of line ranges that should be fixed in a file
517 """Returns the set of line ranges that should be fixed in a file
486
518
487 Of the form [(10, 20), (30, 40)].
519 Of the form [(10, 20), (30, 40)].
488
520
489 This depends on the given base contexts; we must consider lines that have
521 This depends on the given base contexts; we must consider lines that have
490 changed versus any of the base contexts, and whether the file has been
522 changed versus any of the base contexts, and whether the file has been
491 renamed versus any of them.
523 renamed versus any of them.
492
524
493 Another way to understand this is that we exclude line ranges that are
525 Another way to understand this is that we exclude line ranges that are
494 common to the file in all base contexts.
526 common to the file in all base contexts.
495 """
527 """
496 if opts.get(b'whole'):
528 if opts.get(b'whole'):
497 # Return a range containing all lines. Rely on the diff implementation's
529 # Return a range containing all lines. Rely on the diff implementation's
498 # idea of how many lines are in the file, instead of reimplementing it.
530 # idea of how many lines are in the file, instead of reimplementing it.
499 return difflineranges(b'', content2)
531 return difflineranges(b'', content2)
500
532
501 rangeslist = []
533 rangeslist = []
502 for basectx in basectxs:
534 for basectx in basectxs:
503 basepath = basepaths.get((basectx.rev(), fixctx.rev(), path), path)
535 basepath = basepaths.get((basectx.rev(), fixctx.rev(), path), path)
504
536
505 if basepath in basectx:
537 if basepath in basectx:
506 content1 = basectx[basepath].data()
538 content1 = basectx[basepath].data()
507 else:
539 else:
508 content1 = b''
540 content1 = b''
509 rangeslist.extend(difflineranges(content1, content2))
541 rangeslist.extend(difflineranges(content1, content2))
510 return unionranges(rangeslist)
542 return unionranges(rangeslist)
511
543
512
544
513 def getbasepaths(repo, opts, workqueue, basectxs):
545 def getbasepaths(repo, opts, workqueue, basectxs):
514 if opts.get(b'whole'):
546 if opts.get(b'whole'):
515 # Base paths will never be fetched for line range determination.
547 # Base paths will never be fetched for line range determination.
516 return {}
548 return {}
517
549
518 basepaths = {}
550 basepaths = {}
519 for rev, path in workqueue:
551 for srcrev, path, _dstrevs in workqueue:
520 fixctx = repo[rev]
552 fixctx = repo[srcrev]
521 for basectx in basectxs[rev]:
553 for basectx in basectxs[srcrev]:
522 basepath = copies.pathcopies(basectx, fixctx).get(path, path)
554 basepath = copies.pathcopies(basectx, fixctx).get(path, path)
523 if basepath in basectx:
555 if basepath in basectx:
524 basepaths[(basectx.rev(), fixctx.rev(), path)] = basepath
556 basepaths[(basectx.rev(), fixctx.rev(), path)] = basepath
525 return basepaths
557 return basepaths
526
558
527
559
528 def unionranges(rangeslist):
560 def unionranges(rangeslist):
529 """Return the union of some closed intervals
561 """Return the union of some closed intervals
530
562
531 >>> unionranges([])
563 >>> unionranges([])
532 []
564 []
533 >>> unionranges([(1, 100)])
565 >>> unionranges([(1, 100)])
534 [(1, 100)]
566 [(1, 100)]
535 >>> unionranges([(1, 100), (1, 100)])
567 >>> unionranges([(1, 100), (1, 100)])
536 [(1, 100)]
568 [(1, 100)]
537 >>> unionranges([(1, 100), (2, 100)])
569 >>> unionranges([(1, 100), (2, 100)])
538 [(1, 100)]
570 [(1, 100)]
539 >>> unionranges([(1, 99), (1, 100)])
571 >>> unionranges([(1, 99), (1, 100)])
540 [(1, 100)]
572 [(1, 100)]
541 >>> unionranges([(1, 100), (40, 60)])
573 >>> unionranges([(1, 100), (40, 60)])
542 [(1, 100)]
574 [(1, 100)]
543 >>> unionranges([(1, 49), (50, 100)])
575 >>> unionranges([(1, 49), (50, 100)])
544 [(1, 100)]
576 [(1, 100)]
545 >>> unionranges([(1, 48), (50, 100)])
577 >>> unionranges([(1, 48), (50, 100)])
546 [(1, 48), (50, 100)]
578 [(1, 48), (50, 100)]
547 >>> unionranges([(1, 2), (3, 4), (5, 6)])
579 >>> unionranges([(1, 2), (3, 4), (5, 6)])
548 [(1, 6)]
580 [(1, 6)]
549 """
581 """
550 rangeslist = sorted(set(rangeslist))
582 rangeslist = sorted(set(rangeslist))
551 unioned = []
583 unioned = []
552 if rangeslist:
584 if rangeslist:
553 unioned, rangeslist = [rangeslist[0]], rangeslist[1:]
585 unioned, rangeslist = [rangeslist[0]], rangeslist[1:]
554 for a, b in rangeslist:
586 for a, b in rangeslist:
555 c, d = unioned[-1]
587 c, d = unioned[-1]
556 if a > d + 1:
588 if a > d + 1:
557 unioned.append((a, b))
589 unioned.append((a, b))
558 else:
590 else:
559 unioned[-1] = (c, max(b, d))
591 unioned[-1] = (c, max(b, d))
560 return unioned
592 return unioned
561
593
562
594
563 def difflineranges(content1, content2):
595 def difflineranges(content1, content2):
564 """Return list of line number ranges in content2 that differ from content1.
596 """Return list of line number ranges in content2 that differ from content1.
565
597
566 Line numbers are 1-based. The numbers are the first and last line contained
598 Line numbers are 1-based. The numbers are the first and last line contained
567 in the range. Single-line ranges have the same line number for the first and
599 in the range. Single-line ranges have the same line number for the first and
568 last line. Excludes any empty ranges that result from lines that are only
600 last line. Excludes any empty ranges that result from lines that are only
569 present in content1. Relies on mdiff's idea of where the line endings are in
601 present in content1. Relies on mdiff's idea of where the line endings are in
570 the string.
602 the string.
571
603
572 >>> from mercurial import pycompat
604 >>> from mercurial import pycompat
573 >>> lines = lambda s: b'\\n'.join([c for c in pycompat.iterbytestr(s)])
605 >>> lines = lambda s: b'\\n'.join([c for c in pycompat.iterbytestr(s)])
574 >>> difflineranges2 = lambda a, b: difflineranges(lines(a), lines(b))
606 >>> difflineranges2 = lambda a, b: difflineranges(lines(a), lines(b))
575 >>> difflineranges2(b'', b'')
607 >>> difflineranges2(b'', b'')
576 []
608 []
577 >>> difflineranges2(b'a', b'')
609 >>> difflineranges2(b'a', b'')
578 []
610 []
579 >>> difflineranges2(b'', b'A')
611 >>> difflineranges2(b'', b'A')
580 [(1, 1)]
612 [(1, 1)]
581 >>> difflineranges2(b'a', b'a')
613 >>> difflineranges2(b'a', b'a')
582 []
614 []
583 >>> difflineranges2(b'a', b'A')
615 >>> difflineranges2(b'a', b'A')
584 [(1, 1)]
616 [(1, 1)]
585 >>> difflineranges2(b'ab', b'')
617 >>> difflineranges2(b'ab', b'')
586 []
618 []
587 >>> difflineranges2(b'', b'AB')
619 >>> difflineranges2(b'', b'AB')
588 [(1, 2)]
620 [(1, 2)]
589 >>> difflineranges2(b'abc', b'ac')
621 >>> difflineranges2(b'abc', b'ac')
590 []
622 []
591 >>> difflineranges2(b'ab', b'aCb')
623 >>> difflineranges2(b'ab', b'aCb')
592 [(2, 2)]
624 [(2, 2)]
593 >>> difflineranges2(b'abc', b'aBc')
625 >>> difflineranges2(b'abc', b'aBc')
594 [(2, 2)]
626 [(2, 2)]
595 >>> difflineranges2(b'ab', b'AB')
627 >>> difflineranges2(b'ab', b'AB')
596 [(1, 2)]
628 [(1, 2)]
597 >>> difflineranges2(b'abcde', b'aBcDe')
629 >>> difflineranges2(b'abcde', b'aBcDe')
598 [(2, 2), (4, 4)]
630 [(2, 2), (4, 4)]
599 >>> difflineranges2(b'abcde', b'aBCDe')
631 >>> difflineranges2(b'abcde', b'aBCDe')
600 [(2, 4)]
632 [(2, 4)]
601 """
633 """
602 ranges = []
634 ranges = []
603 for lines, kind in mdiff.allblocks(content1, content2):
635 for lines, kind in mdiff.allblocks(content1, content2):
604 firstline, lastline = lines[2:4]
636 firstline, lastline = lines[2:4]
605 if kind == b'!' and firstline != lastline:
637 if kind == b'!' and firstline != lastline:
606 ranges.append((firstline + 1, lastline))
638 ranges.append((firstline + 1, lastline))
607 return ranges
639 return ranges
608
640
609
641
610 def getbasectxs(repo, opts, revstofix):
642 def getbasectxs(repo, opts, revstofix):
611 """Returns a map of the base contexts for each revision
643 """Returns a map of the base contexts for each revision
612
644
613 The base contexts determine which lines are considered modified when we
645 The base contexts determine which lines are considered modified when we
614 attempt to fix just the modified lines in a file. It also determines which
646 attempt to fix just the modified lines in a file. It also determines which
615 files we attempt to fix, so it is important to compute this even when
647 files we attempt to fix, so it is important to compute this even when
616 --whole is used.
648 --whole is used.
617 """
649 """
618 # The --base flag overrides the usual logic, and we give every revision
650 # The --base flag overrides the usual logic, and we give every revision
619 # exactly the set of baserevs that the user specified.
651 # exactly the set of baserevs that the user specified.
620 if opts.get(b'base'):
652 if opts.get(b'base'):
621 baserevs = set(scmutil.revrange(repo, opts.get(b'base')))
653 baserevs = set(logcmdutil.revrange(repo, opts.get(b'base')))
622 if not baserevs:
654 if not baserevs:
623 baserevs = {nullrev}
655 baserevs = {nullrev}
624 basectxs = {repo[rev] for rev in baserevs}
656 basectxs = {repo[rev] for rev in baserevs}
625 return {rev: basectxs for rev in revstofix}
657 return {rev: basectxs for rev in revstofix}
626
658
627 # Proceed in topological order so that we can easily determine each
659 # Proceed in topological order so that we can easily determine each
628 # revision's baserevs by looking at its parents and their baserevs.
660 # revision's baserevs by looking at its parents and their baserevs.
629 basectxs = collections.defaultdict(set)
661 basectxs = collections.defaultdict(set)
630 for rev in sorted(revstofix):
662 for rev in sorted(revstofix):
631 ctx = repo[rev]
663 ctx = repo[rev]
632 for pctx in ctx.parents():
664 for pctx in ctx.parents():
633 if pctx.rev() in basectxs:
665 if pctx.rev() in basectxs:
634 basectxs[rev].update(basectxs[pctx.rev()])
666 basectxs[rev].update(basectxs[pctx.rev()])
635 else:
667 else:
636 basectxs[rev].add(pctx)
668 basectxs[rev].add(pctx)
637 return basectxs
669 return basectxs
638
670
639
671
640 def _prefetchfiles(repo, workqueue, basepaths):
672 def _prefetchfiles(repo, workqueue, basepaths):
641 toprefetch = set()
673 toprefetch = set()
642
674
643 # Prefetch the files that will be fixed.
675 # Prefetch the files that will be fixed.
644 for rev, path in workqueue:
676 for srcrev, path, _dstrevs in workqueue:
645 if rev == wdirrev:
677 if srcrev == wdirrev:
646 continue
678 continue
647 toprefetch.add((rev, path))
679 toprefetch.add((srcrev, path))
648
680
649 # Prefetch the base contents for lineranges().
681 # Prefetch the base contents for lineranges().
650 for (baserev, fixrev, path), basepath in basepaths.items():
682 for (baserev, fixrev, path), basepath in basepaths.items():
651 toprefetch.add((baserev, basepath))
683 toprefetch.add((baserev, basepath))
652
684
653 if toprefetch:
685 if toprefetch:
654 scmutil.prefetchfiles(
686 scmutil.prefetchfiles(
655 repo,
687 repo,
656 [
688 [
657 (rev, scmutil.matchfiles(repo, [path]))
689 (rev, scmutil.matchfiles(repo, [path]))
658 for rev, path in toprefetch
690 for rev, path in toprefetch
659 ],
691 ],
660 )
692 )
661
693
662
694
663 def fixfile(ui, repo, opts, fixers, fixctx, path, basepaths, basectxs):
695 def fixfile(ui, repo, opts, fixers, fixctx, path, basepaths, basectxs):
664 """Run any configured fixers that should affect the file in this context
696 """Run any configured fixers that should affect the file in this context
665
697
666 Returns the file content that results from applying the fixers in some order
698 Returns the file content that results from applying the fixers in some order
667 starting with the file's content in the fixctx. Fixers that support line
699 starting with the file's content in the fixctx. Fixers that support line
668 ranges will affect lines that have changed relative to any of the basectxs
700 ranges will affect lines that have changed relative to any of the basectxs
669 (i.e. they will only avoid lines that are common to all basectxs).
701 (i.e. they will only avoid lines that are common to all basectxs).
670
702
671 A fixer tool's stdout will become the file's new content if and only if it
703 A fixer tool's stdout will become the file's new content if and only if it
672 exits with code zero. The fixer tool's working directory is the repository's
704 exits with code zero. The fixer tool's working directory is the repository's
673 root.
705 root.
674 """
706 """
675 metadata = {}
707 metadata = {}
676 newdata = fixctx[path].data()
708 newdata = fixctx[path].data()
677 for fixername, fixer in pycompat.iteritems(fixers):
709 for fixername, fixer in pycompat.iteritems(fixers):
678 if fixer.affects(opts, fixctx, path):
710 if fixer.affects(opts, fixctx, path):
679 ranges = lineranges(
711 ranges = lineranges(
680 opts, path, basepaths, basectxs, fixctx, newdata
712 opts, path, basepaths, basectxs, fixctx, newdata
681 )
713 )
682 command = fixer.command(ui, path, ranges)
714 command = fixer.command(ui, path, ranges)
683 if command is None:
715 if command is None:
684 continue
716 continue
685 ui.debug(b'subprocess: %s\n' % (command,))
717 ui.debug(b'subprocess: %s\n' % (command,))
686 proc = subprocess.Popen(
718 proc = subprocess.Popen(
687 procutil.tonativestr(command),
719 procutil.tonativestr(command),
688 shell=True,
720 shell=True,
689 cwd=procutil.tonativestr(repo.root),
721 cwd=procutil.tonativestr(repo.root),
690 stdin=subprocess.PIPE,
722 stdin=subprocess.PIPE,
691 stdout=subprocess.PIPE,
723 stdout=subprocess.PIPE,
692 stderr=subprocess.PIPE,
724 stderr=subprocess.PIPE,
693 )
725 )
694 stdout, stderr = proc.communicate(newdata)
726 stdout, stderr = proc.communicate(newdata)
695 if stderr:
727 if stderr:
696 showstderr(ui, fixctx.rev(), fixername, stderr)
728 showstderr(ui, fixctx.rev(), fixername, stderr)
697 newerdata = stdout
729 newerdata = stdout
698 if fixer.shouldoutputmetadata():
730 if fixer.shouldoutputmetadata():
699 try:
731 try:
700 metadatajson, newerdata = stdout.split(b'\0', 1)
732 metadatajson, newerdata = stdout.split(b'\0', 1)
701 metadata[fixername] = pycompat.json_loads(metadatajson)
733 metadata[fixername] = pycompat.json_loads(metadatajson)
702 except ValueError:
734 except ValueError:
703 ui.warn(
735 ui.warn(
704 _(b'ignored invalid output from fixer tool: %s\n')
736 _(b'ignored invalid output from fixer tool: %s\n')
705 % (fixername,)
737 % (fixername,)
706 )
738 )
707 continue
739 continue
708 else:
740 else:
709 metadata[fixername] = None
741 metadata[fixername] = None
710 if proc.returncode == 0:
742 if proc.returncode == 0:
711 newdata = newerdata
743 newdata = newerdata
712 else:
744 else:
713 if not stderr:
745 if not stderr:
714 message = _(b'exited with status %d\n') % (proc.returncode,)
746 message = _(b'exited with status %d\n') % (proc.returncode,)
715 showstderr(ui, fixctx.rev(), fixername, message)
747 showstderr(ui, fixctx.rev(), fixername, message)
716 checktoolfailureaction(
748 checktoolfailureaction(
717 ui,
749 ui,
718 _(b'no fixes will be applied'),
750 _(b'no fixes will be applied'),
719 hint=_(
751 hint=_(
720 b'use --config fix.failure=continue to apply any '
752 b'use --config fix.failure=continue to apply any '
721 b'successful fixes anyway'
753 b'successful fixes anyway'
722 ),
754 ),
723 )
755 )
724 return metadata, newdata
756 return metadata, newdata
725
757
726
758
727 def showstderr(ui, rev, fixername, stderr):
759 def showstderr(ui, rev, fixername, stderr):
728 """Writes the lines of the stderr string as warnings on the ui
760 """Writes the lines of the stderr string as warnings on the ui
729
761
730 Uses the revision number and fixername to give more context to each line of
762 Uses the revision number and fixername to give more context to each line of
731 the error message. Doesn't include file names, since those take up a lot of
763 the error message. Doesn't include file names, since those take up a lot of
732 space and would tend to be included in the error message if they were
764 space and would tend to be included in the error message if they were
733 relevant.
765 relevant.
734 """
766 """
735 for line in re.split(b'[\r\n]+', stderr):
767 for line in re.split(b'[\r\n]+', stderr):
736 if line:
768 if line:
737 ui.warn(b'[')
769 ui.warn(b'[')
738 if rev is None:
770 if rev is None:
739 ui.warn(_(b'wdir'), label=b'evolve.rev')
771 ui.warn(_(b'wdir'), label=b'evolve.rev')
740 else:
772 else:
741 ui.warn(b'%d' % rev, label=b'evolve.rev')
773 ui.warn(b'%d' % rev, label=b'evolve.rev')
742 ui.warn(b'] %s: %s\n' % (fixername, line))
774 ui.warn(b'] %s: %s\n' % (fixername, line))
743
775
744
776
745 def writeworkingdir(repo, ctx, filedata, replacements):
777 def writeworkingdir(repo, ctx, filedata, replacements):
746 """Write new content to the working copy and check out the new p1 if any
778 """Write new content to the working copy and check out the new p1 if any
747
779
748 We check out a new revision if and only if we fixed something in both the
780 We check out a new revision if and only if we fixed something in both the
749 working directory and its parent revision. This avoids the need for a full
781 working directory and its parent revision. This avoids the need for a full
750 update/merge, and means that the working directory simply isn't affected
782 update/merge, and means that the working directory simply isn't affected
751 unless the --working-dir flag is given.
783 unless the --working-dir flag is given.
752
784
753 Directly updates the dirstate for the affected files.
785 Directly updates the dirstate for the affected files.
754 """
786 """
755 for path, data in pycompat.iteritems(filedata):
787 for path, data in pycompat.iteritems(filedata):
756 fctx = ctx[path]
788 fctx = ctx[path]
757 fctx.write(data, fctx.flags())
789 fctx.write(data, fctx.flags())
758
790
759 oldp1 = repo.dirstate.p1()
791 oldp1 = repo.dirstate.p1()
760 newp1 = replacements.get(oldp1, oldp1)
792 newp1 = replacements.get(oldp1, oldp1)
761 if newp1 != oldp1:
793 if newp1 != oldp1:
762 assert repo.dirstate.p2() == nullid
794 assert repo.dirstate.p2() == nullid
763 with repo.dirstate.parentchange():
795 with repo.dirstate.parentchange():
764 scmutil.movedirstate(repo, repo[newp1])
796 scmutil.movedirstate(repo, repo[newp1])
765
797
766
798
767 def replacerev(ui, repo, ctx, filedata, replacements):
799 def replacerev(ui, repo, ctx, filedata, replacements):
768 """Commit a new revision like the given one, but with file content changes
800 """Commit a new revision like the given one, but with file content changes
769
801
770 "ctx" is the original revision to be replaced by a modified one.
802 "ctx" is the original revision to be replaced by a modified one.
771
803
772 "filedata" is a dict that maps paths to their new file content. All other
804 "filedata" is a dict that maps paths to their new file content. All other
773 paths will be recreated from the original revision without changes.
805 paths will be recreated from the original revision without changes.
774 "filedata" may contain paths that didn't exist in the original revision;
806 "filedata" may contain paths that didn't exist in the original revision;
775 they will be added.
807 they will be added.
776
808
777 "replacements" is a dict that maps a single node to a single node, and it is
809 "replacements" is a dict that maps a single node to a single node, and it is
778 updated to indicate the original revision is replaced by the newly created
810 updated to indicate the original revision is replaced by the newly created
779 one. No entry is added if the replacement's node already exists.
811 one. No entry is added if the replacement's node already exists.
780
812
781 The new revision has the same parents as the old one, unless those parents
813 The new revision has the same parents as the old one, unless those parents
782 have already been replaced, in which case those replacements are the parents
814 have already been replaced, in which case those replacements are the parents
783 of this new revision. Thus, if revisions are replaced in topological order,
815 of this new revision. Thus, if revisions are replaced in topological order,
784 there is no need to rebase them into the original topology later.
816 there is no need to rebase them into the original topology later.
785 """
817 """
786
818
787 p1rev, p2rev = repo.changelog.parentrevs(ctx.rev())
819 p1rev, p2rev = repo.changelog.parentrevs(ctx.rev())
788 p1ctx, p2ctx = repo[p1rev], repo[p2rev]
820 p1ctx, p2ctx = repo[p1rev], repo[p2rev]
789 newp1node = replacements.get(p1ctx.node(), p1ctx.node())
821 newp1node = replacements.get(p1ctx.node(), p1ctx.node())
790 newp2node = replacements.get(p2ctx.node(), p2ctx.node())
822 newp2node = replacements.get(p2ctx.node(), p2ctx.node())
791
823
792 # We don't want to create a revision that has no changes from the original,
824 # We don't want to create a revision that has no changes from the original,
793 # but we should if the original revision's parent has been replaced.
825 # but we should if the original revision's parent has been replaced.
794 # Otherwise, we would produce an orphan that needs no actual human
826 # Otherwise, we would produce an orphan that needs no actual human
795 # intervention to evolve. We can't rely on commit() to avoid creating the
827 # intervention to evolve. We can't rely on commit() to avoid creating the
796 # un-needed revision because the extra field added below produces a new hash
828 # un-needed revision because the extra field added below produces a new hash
797 # regardless of file content changes.
829 # regardless of file content changes.
798 if (
830 if (
799 not filedata
831 not filedata
800 and p1ctx.node() not in replacements
832 and p1ctx.node() not in replacements
801 and p2ctx.node() not in replacements
833 and p2ctx.node() not in replacements
802 ):
834 ):
803 return
835 return
804
836
805 extra = ctx.extra().copy()
837 extra = ctx.extra().copy()
806 extra[b'fix_source'] = ctx.hex()
838 extra[b'fix_source'] = ctx.hex()
807
839
808 wctx = context.overlayworkingctx(repo)
840 wctx = context.overlayworkingctx(repo)
809 wctx.setbase(repo[newp1node])
841 wctx.setbase(repo[newp1node])
810 merge.revert_to(ctx, wc=wctx)
842 merge.revert_to(ctx, wc=wctx)
811 copies.graftcopies(wctx, ctx, ctx.p1())
843 copies.graftcopies(wctx, ctx, ctx.p1())
812
844
813 for path in filedata.keys():
845 for path in filedata.keys():
814 fctx = ctx[path]
846 fctx = ctx[path]
815 copysource = fctx.copysource()
847 copysource = fctx.copysource()
816 wctx.write(path, filedata[path], flags=fctx.flags())
848 wctx.write(path, filedata[path], flags=fctx.flags())
817 if copysource:
849 if copysource:
818 wctx.markcopied(path, copysource)
850 wctx.markcopied(path, copysource)
819
851
820 desc = rewriteutil.update_hash_refs(
852 desc = rewriteutil.update_hash_refs(
821 repo,
853 repo,
822 ctx.description(),
854 ctx.description(),
823 {oldnode: [newnode] for oldnode, newnode in replacements.items()},
855 {oldnode: [newnode] for oldnode, newnode in replacements.items()},
824 )
856 )
825
857
826 memctx = wctx.tomemctx(
858 memctx = wctx.tomemctx(
827 text=desc,
859 text=desc,
828 branch=ctx.branch(),
860 branch=ctx.branch(),
829 extra=extra,
861 extra=extra,
830 date=ctx.date(),
862 date=ctx.date(),
831 parents=(newp1node, newp2node),
863 parents=(newp1node, newp2node),
832 user=ctx.user(),
864 user=ctx.user(),
833 )
865 )
834
866
835 sucnode = memctx.commit()
867 sucnode = memctx.commit()
836 prenode = ctx.node()
868 prenode = ctx.node()
837 if prenode == sucnode:
869 if prenode == sucnode:
838 ui.debug(b'node %s already existed\n' % (ctx.hex()))
870 ui.debug(b'node %s already existed\n' % (ctx.hex()))
839 else:
871 else:
840 replacements[ctx.node()] = sucnode
872 replacements[ctx.node()] = sucnode
841
873
842
874
843 def getfixers(ui):
875 def getfixers(ui):
844 """Returns a map of configured fixer tools indexed by their names
876 """Returns a map of configured fixer tools indexed by their names
845
877
846 Each value is a Fixer object with methods that implement the behavior of the
878 Each value is a Fixer object with methods that implement the behavior of the
847 fixer's config suboptions. Does not validate the config values.
879 fixer's config suboptions. Does not validate the config values.
848 """
880 """
849 fixers = {}
881 fixers = {}
850 for name in fixernames(ui):
882 for name in fixernames(ui):
851 enabled = ui.configbool(b'fix', name + b':enabled')
883 enabled = ui.configbool(b'fix', name + b':enabled')
852 command = ui.config(b'fix', name + b':command')
884 command = ui.config(b'fix', name + b':command')
853 pattern = ui.config(b'fix', name + b':pattern')
885 pattern = ui.config(b'fix', name + b':pattern')
854 linerange = ui.config(b'fix', name + b':linerange')
886 linerange = ui.config(b'fix', name + b':linerange')
855 priority = ui.configint(b'fix', name + b':priority')
887 priority = ui.configint(b'fix', name + b':priority')
856 metadata = ui.configbool(b'fix', name + b':metadata')
888 metadata = ui.configbool(b'fix', name + b':metadata')
857 skipclean = ui.configbool(b'fix', name + b':skipclean')
889 skipclean = ui.configbool(b'fix', name + b':skipclean')
858 # Don't use a fixer if it has no pattern configured. It would be
890 # Don't use a fixer if it has no pattern configured. It would be
859 # dangerous to let it affect all files. It would be pointless to let it
891 # dangerous to let it affect all files. It would be pointless to let it
860 # affect no files. There is no reasonable subset of files to use as the
892 # affect no files. There is no reasonable subset of files to use as the
861 # default.
893 # default.
862 if command is None:
894 if command is None:
863 ui.warn(
895 ui.warn(
864 _(b'fixer tool has no command configuration: %s\n') % (name,)
896 _(b'fixer tool has no command configuration: %s\n') % (name,)
865 )
897 )
866 elif pattern is None:
898 elif pattern is None:
867 ui.warn(
899 ui.warn(
868 _(b'fixer tool has no pattern configuration: %s\n') % (name,)
900 _(b'fixer tool has no pattern configuration: %s\n') % (name,)
869 )
901 )
870 elif not enabled:
902 elif not enabled:
871 ui.debug(b'ignoring disabled fixer tool: %s\n' % (name,))
903 ui.debug(b'ignoring disabled fixer tool: %s\n' % (name,))
872 else:
904 else:
873 fixers[name] = Fixer(
905 fixers[name] = Fixer(
874 command, pattern, linerange, priority, metadata, skipclean
906 command, pattern, linerange, priority, metadata, skipclean
875 )
907 )
876 return collections.OrderedDict(
908 return collections.OrderedDict(
877 sorted(fixers.items(), key=lambda item: item[1]._priority, reverse=True)
909 sorted(fixers.items(), key=lambda item: item[1]._priority, reverse=True)
878 )
910 )
879
911
880
912
881 def fixernames(ui):
913 def fixernames(ui):
882 """Returns the names of [fix] config options that have suboptions"""
914 """Returns the names of [fix] config options that have suboptions"""
883 names = set()
915 names = set()
884 for k, v in ui.configitems(b'fix'):
916 for k, v in ui.configitems(b'fix'):
885 if b':' in k:
917 if b':' in k:
886 names.add(k.split(b':', 1)[0])
918 names.add(k.split(b':', 1)[0])
887 return names
919 return names
888
920
889
921
890 class Fixer(object):
922 class Fixer(object):
891 """Wraps the raw config values for a fixer with methods"""
923 """Wraps the raw config values for a fixer with methods"""
892
924
893 def __init__(
925 def __init__(
894 self, command, pattern, linerange, priority, metadata, skipclean
926 self, command, pattern, linerange, priority, metadata, skipclean
895 ):
927 ):
896 self._command = command
928 self._command = command
897 self._pattern = pattern
929 self._pattern = pattern
898 self._linerange = linerange
930 self._linerange = linerange
899 self._priority = priority
931 self._priority = priority
900 self._metadata = metadata
932 self._metadata = metadata
901 self._skipclean = skipclean
933 self._skipclean = skipclean
902
934
903 def affects(self, opts, fixctx, path):
935 def affects(self, opts, fixctx, path):
904 """Should this fixer run on the file at the given path and context?"""
936 """Should this fixer run on the file at the given path and context?"""
905 repo = fixctx.repo()
937 repo = fixctx.repo()
906 matcher = matchmod.match(
938 matcher = matchmod.match(
907 repo.root, repo.root, [self._pattern], ctx=fixctx
939 repo.root, repo.root, [self._pattern], ctx=fixctx
908 )
940 )
909 return matcher(path)
941 return matcher(path)
910
942
911 def shouldoutputmetadata(self):
943 def shouldoutputmetadata(self):
912 """Should the stdout of this fixer start with JSON and a null byte?"""
944 """Should the stdout of this fixer start with JSON and a null byte?"""
913 return self._metadata
945 return self._metadata
914
946
915 def command(self, ui, path, ranges):
947 def command(self, ui, path, ranges):
916 """A shell command to use to invoke this fixer on the given file/lines
948 """A shell command to use to invoke this fixer on the given file/lines
917
949
918 May return None if there is no appropriate command to run for the given
950 May return None if there is no appropriate command to run for the given
919 parameters.
951 parameters.
920 """
952 """
921 expand = cmdutil.rendercommandtemplate
953 expand = cmdutil.rendercommandtemplate
922 parts = [
954 parts = [
923 expand(
955 expand(
924 ui,
956 ui,
925 self._command,
957 self._command,
926 {b'rootpath': path, b'basename': os.path.basename(path)},
958 {b'rootpath': path, b'basename': os.path.basename(path)},
927 )
959 )
928 ]
960 ]
929 if self._linerange:
961 if self._linerange:
930 if self._skipclean and not ranges:
962 if self._skipclean and not ranges:
931 # No line ranges to fix, so don't run the fixer.
963 # No line ranges to fix, so don't run the fixer.
932 return None
964 return None
933 for first, last in ranges:
965 for first, last in ranges:
934 parts.append(
966 parts.append(
935 expand(
967 expand(
936 ui, self._linerange, {b'first': first, b'last': last}
968 ui, self._linerange, {b'first': first, b'last': last}
937 )
969 )
938 )
970 )
939 return b' '.join(parts)
971 return b' '.join(parts)
@@ -1,1001 +1,1005 b''
1 # __init__.py - fsmonitor initialization and overrides
1 # __init__.py - fsmonitor initialization and overrides
2 #
2 #
3 # Copyright 2013-2016 Facebook, Inc.
3 # Copyright 2013-2016 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''Faster status operations with the Watchman file monitor (EXPERIMENTAL)
8 '''Faster status operations with the Watchman file monitor (EXPERIMENTAL)
9
9
10 Integrates the file-watching program Watchman with Mercurial to produce faster
10 Integrates the file-watching program Watchman with Mercurial to produce faster
11 status results.
11 status results.
12
12
13 On a particular Linux system, for a real-world repository with over 400,000
13 On a particular Linux system, for a real-world repository with over 400,000
14 files hosted on ext4, vanilla `hg status` takes 1.3 seconds. On the same
14 files hosted on ext4, vanilla `hg status` takes 1.3 seconds. On the same
15 system, with fsmonitor it takes about 0.3 seconds.
15 system, with fsmonitor it takes about 0.3 seconds.
16
16
17 fsmonitor requires no configuration -- it will tell Watchman about your
17 fsmonitor requires no configuration -- it will tell Watchman about your
18 repository as necessary. You'll need to install Watchman from
18 repository as necessary. You'll need to install Watchman from
19 https://facebook.github.io/watchman/ and make sure it is in your PATH.
19 https://facebook.github.io/watchman/ and make sure it is in your PATH.
20
20
21 fsmonitor is incompatible with the largefiles and eol extensions, and
21 fsmonitor is incompatible with the largefiles and eol extensions, and
22 will disable itself if any of those are active.
22 will disable itself if any of those are active.
23
23
24 The following configuration options exist:
24 The following configuration options exist:
25
25
26 ::
26 ::
27
27
28 [fsmonitor]
28 [fsmonitor]
29 mode = {off, on, paranoid}
29 mode = {off, on, paranoid}
30
30
31 When `mode = off`, fsmonitor will disable itself (similar to not loading the
31 When `mode = off`, fsmonitor will disable itself (similar to not loading the
32 extension at all). When `mode = on`, fsmonitor will be enabled (the default).
32 extension at all). When `mode = on`, fsmonitor will be enabled (the default).
33 When `mode = paranoid`, fsmonitor will query both Watchman and the filesystem,
33 When `mode = paranoid`, fsmonitor will query both Watchman and the filesystem,
34 and ensure that the results are consistent.
34 and ensure that the results are consistent.
35
35
36 ::
36 ::
37
37
38 [fsmonitor]
38 [fsmonitor]
39 timeout = (float)
39 timeout = (float)
40
40
41 A value, in seconds, that determines how long fsmonitor will wait for Watchman
41 A value, in seconds, that determines how long fsmonitor will wait for Watchman
42 to return results. Defaults to `2.0`.
42 to return results. Defaults to `2.0`.
43
43
44 ::
44 ::
45
45
46 [fsmonitor]
46 [fsmonitor]
47 blacklistusers = (list of userids)
47 blacklistusers = (list of userids)
48
48
49 A list of usernames for which fsmonitor will disable itself altogether.
49 A list of usernames for which fsmonitor will disable itself altogether.
50
50
51 ::
51 ::
52
52
53 [fsmonitor]
53 [fsmonitor]
54 walk_on_invalidate = (boolean)
54 walk_on_invalidate = (boolean)
55
55
56 Whether or not to walk the whole repo ourselves when our cached state has been
56 Whether or not to walk the whole repo ourselves when our cached state has been
57 invalidated, for example when Watchman has been restarted or .hgignore rules
57 invalidated, for example when Watchman has been restarted or .hgignore rules
58 have been changed. Walking the repo in that case can result in competing for
58 have been changed. Walking the repo in that case can result in competing for
59 I/O with Watchman. For large repos it is recommended to set this value to
59 I/O with Watchman. For large repos it is recommended to set this value to
60 false. You may wish to set this to true if you have a very fast filesystem
60 false. You may wish to set this to true if you have a very fast filesystem
61 that can outpace the IPC overhead of getting the result data for the full repo
61 that can outpace the IPC overhead of getting the result data for the full repo
62 from Watchman. Defaults to false.
62 from Watchman. Defaults to false.
63
63
64 ::
64 ::
65
65
66 [fsmonitor]
66 [fsmonitor]
67 warn_when_unused = (boolean)
67 warn_when_unused = (boolean)
68
68
69 Whether to print a warning during certain operations when fsmonitor would be
69 Whether to print a warning during certain operations when fsmonitor would be
70 beneficial to performance but isn't enabled.
70 beneficial to performance but isn't enabled.
71
71
72 ::
72 ::
73
73
74 [fsmonitor]
74 [fsmonitor]
75 warn_update_file_count = (integer)
75 warn_update_file_count = (integer)
76 # or when mercurial is built with rust support
76 # or when mercurial is built with rust support
77 warn_update_file_count_rust = (integer)
77 warn_update_file_count_rust = (integer)
78
78
79 If ``warn_when_unused`` is set and fsmonitor isn't enabled, a warning will
79 If ``warn_when_unused`` is set and fsmonitor isn't enabled, a warning will
80 be printed during working directory updates if this many files will be
80 be printed during working directory updates if this many files will be
81 created.
81 created.
82 '''
82 '''
83
83
84 # Platforms Supported
84 # Platforms Supported
85 # ===================
85 # ===================
86 #
86 #
87 # **Linux:** *Stable*. Watchman and fsmonitor are both known to work reliably,
87 # **Linux:** *Stable*. Watchman and fsmonitor are both known to work reliably,
88 # even under severe loads.
88 # even under severe loads.
89 #
89 #
90 # **Mac OS X:** *Stable*. The Mercurial test suite passes with fsmonitor
90 # **Mac OS X:** *Stable*. The Mercurial test suite passes with fsmonitor
91 # turned on, on case-insensitive HFS+. There has been a reasonable amount of
91 # turned on, on case-insensitive HFS+. There has been a reasonable amount of
92 # user testing under normal loads.
92 # user testing under normal loads.
93 #
93 #
94 # **Solaris, BSD:** *Alpha*. watchman and fsmonitor are believed to work, but
94 # **Solaris, BSD:** *Alpha*. watchman and fsmonitor are believed to work, but
95 # very little testing has been done.
95 # very little testing has been done.
96 #
96 #
97 # **Windows:** *Alpha*. Not in a release version of watchman or fsmonitor yet.
97 # **Windows:** *Alpha*. Not in a release version of watchman or fsmonitor yet.
98 #
98 #
99 # Known Issues
99 # Known Issues
100 # ============
100 # ============
101 #
101 #
102 # * fsmonitor will disable itself if any of the following extensions are
102 # * fsmonitor will disable itself if any of the following extensions are
103 # enabled: largefiles, inotify, eol; or if the repository has subrepos.
103 # enabled: largefiles, inotify, eol; or if the repository has subrepos.
104 # * fsmonitor will produce incorrect results if nested repos that are not
104 # * fsmonitor will produce incorrect results if nested repos that are not
105 # subrepos exist. *Workaround*: add nested repo paths to your `.hgignore`.
105 # subrepos exist. *Workaround*: add nested repo paths to your `.hgignore`.
106 #
106 #
107 # The issues related to nested repos and subrepos are probably not fundamental
107 # The issues related to nested repos and subrepos are probably not fundamental
108 # ones. Patches to fix them are welcome.
108 # ones. Patches to fix them are welcome.
109
109
110 from __future__ import absolute_import
110 from __future__ import absolute_import
111
111
112 import codecs
112 import codecs
113 import os
113 import os
114 import stat
114 import stat
115 import sys
115 import sys
116 import tempfile
116 import tempfile
117 import weakref
117 import weakref
118
118
119 from mercurial.i18n import _
119 from mercurial.i18n import _
120 from mercurial.node import hex
120 from mercurial.node import hex
121 from mercurial.pycompat import open
121 from mercurial.pycompat import open
122 from mercurial import (
122 from mercurial import (
123 context,
123 context,
124 encoding,
124 encoding,
125 error,
125 error,
126 extensions,
126 extensions,
127 localrepo,
127 localrepo,
128 merge,
128 merge,
129 pathutil,
129 pathutil,
130 pycompat,
130 pycompat,
131 registrar,
131 registrar,
132 scmutil,
132 scmutil,
133 util,
133 util,
134 )
134 )
135 from mercurial import match as matchmod
135 from mercurial import match as matchmod
136 from mercurial.utils import (
136 from mercurial.utils import (
137 hashutil,
137 hashutil,
138 stringutil,
138 stringutil,
139 )
139 )
140
140
141 from . import (
141 from . import (
142 pywatchman,
142 pywatchman,
143 state,
143 state,
144 watchmanclient,
144 watchmanclient,
145 )
145 )
146
146
147 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
147 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
148 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
148 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
149 # be specifying the version(s) of Mercurial they are tested with, or
149 # be specifying the version(s) of Mercurial they are tested with, or
150 # leave the attribute unspecified.
150 # leave the attribute unspecified.
151 testedwith = b'ships-with-hg-core'
151 testedwith = b'ships-with-hg-core'
152
152
153 configtable = {}
153 configtable = {}
154 configitem = registrar.configitem(configtable)
154 configitem = registrar.configitem(configtable)
155
155
156 configitem(
156 configitem(
157 b'fsmonitor',
157 b'fsmonitor',
158 b'mode',
158 b'mode',
159 default=b'on',
159 default=b'on',
160 )
160 )
161 configitem(
161 configitem(
162 b'fsmonitor',
162 b'fsmonitor',
163 b'walk_on_invalidate',
163 b'walk_on_invalidate',
164 default=False,
164 default=False,
165 )
165 )
166 configitem(
166 configitem(
167 b'fsmonitor',
167 b'fsmonitor',
168 b'timeout',
168 b'timeout',
169 default=b'2',
169 default=b'2',
170 )
170 )
171 configitem(
171 configitem(
172 b'fsmonitor',
172 b'fsmonitor',
173 b'blacklistusers',
173 b'blacklistusers',
174 default=list,
174 default=list,
175 )
175 )
176 configitem(
176 configitem(
177 b'fsmonitor',
177 b'fsmonitor',
178 b'watchman_exe',
178 b'watchman_exe',
179 default=b'watchman',
179 default=b'watchman',
180 )
180 )
181 configitem(
181 configitem(
182 b'fsmonitor',
182 b'fsmonitor',
183 b'verbose',
183 b'verbose',
184 default=True,
184 default=True,
185 experimental=True,
185 experimental=True,
186 )
186 )
187 configitem(
187 configitem(
188 b'experimental',
188 b'experimental',
189 b'fsmonitor.transaction_notify',
189 b'fsmonitor.transaction_notify',
190 default=False,
190 default=False,
191 )
191 )
192
192
193 # This extension is incompatible with the following blacklisted extensions
193 # This extension is incompatible with the following blacklisted extensions
194 # and will disable itself when encountering one of these:
194 # and will disable itself when encountering one of these:
195 _blacklist = [b'largefiles', b'eol']
195 _blacklist = [b'largefiles', b'eol']
196
196
197
197
198 def debuginstall(ui, fm):
198 def debuginstall(ui, fm):
199 fm.write(
199 fm.write(
200 b"fsmonitor-watchman",
200 b"fsmonitor-watchman",
201 _(b"fsmonitor checking for watchman binary... (%s)\n"),
201 _(b"fsmonitor checking for watchman binary... (%s)\n"),
202 ui.configpath(b"fsmonitor", b"watchman_exe"),
202 ui.configpath(b"fsmonitor", b"watchman_exe"),
203 )
203 )
204 root = tempfile.mkdtemp()
204 root = tempfile.mkdtemp()
205 c = watchmanclient.client(ui, root)
205 c = watchmanclient.client(ui, root)
206 err = None
206 err = None
207 try:
207 try:
208 v = c.command(b"version")
208 v = c.command(b"version")
209 fm.write(
209 fm.write(
210 b"fsmonitor-watchman-version",
210 b"fsmonitor-watchman-version",
211 _(b" watchman binary version %s\n"),
211 _(b" watchman binary version %s\n"),
212 pycompat.bytestr(v["version"]),
212 pycompat.bytestr(v["version"]),
213 )
213 )
214 except watchmanclient.Unavailable as e:
214 except watchmanclient.Unavailable as e:
215 err = stringutil.forcebytestr(e)
215 err = stringutil.forcebytestr(e)
216 fm.condwrite(
216 fm.condwrite(
217 err,
217 err,
218 b"fsmonitor-watchman-error",
218 b"fsmonitor-watchman-error",
219 _(b" watchman binary missing or broken: %s\n"),
219 _(b" watchman binary missing or broken: %s\n"),
220 err,
220 err,
221 )
221 )
222 return 1 if err else 0
222 return 1 if err else 0
223
223
224
224
225 def _handleunavailable(ui, state, ex):
225 def _handleunavailable(ui, state, ex):
226 """Exception handler for Watchman interaction exceptions"""
226 """Exception handler for Watchman interaction exceptions"""
227 if isinstance(ex, watchmanclient.Unavailable):
227 if isinstance(ex, watchmanclient.Unavailable):
228 # experimental config: fsmonitor.verbose
228 # experimental config: fsmonitor.verbose
229 if ex.warn and ui.configbool(b'fsmonitor', b'verbose'):
229 if ex.warn and ui.configbool(b'fsmonitor', b'verbose'):
230 if b'illegal_fstypes' not in stringutil.forcebytestr(ex):
230 if b'illegal_fstypes' not in stringutil.forcebytestr(ex):
231 ui.warn(stringutil.forcebytestr(ex) + b'\n')
231 ui.warn(stringutil.forcebytestr(ex) + b'\n')
232 if ex.invalidate:
232 if ex.invalidate:
233 state.invalidate()
233 state.invalidate()
234 # experimental config: fsmonitor.verbose
234 # experimental config: fsmonitor.verbose
235 if ui.configbool(b'fsmonitor', b'verbose'):
235 if ui.configbool(b'fsmonitor', b'verbose'):
236 ui.log(
236 ui.log(
237 b'fsmonitor',
237 b'fsmonitor',
238 b'Watchman unavailable: %s\n',
238 b'Watchman unavailable: %s\n',
239 stringutil.forcebytestr(ex.msg),
239 stringutil.forcebytestr(ex.msg),
240 )
240 )
241 else:
241 else:
242 ui.log(
242 ui.log(
243 b'fsmonitor',
243 b'fsmonitor',
244 b'Watchman exception: %s\n',
244 b'Watchman exception: %s\n',
245 stringutil.forcebytestr(ex),
245 stringutil.forcebytestr(ex),
246 )
246 )
247
247
248
248
249 def _hashignore(ignore):
249 def _hashignore(ignore):
250 """Calculate hash for ignore patterns and filenames
250 """Calculate hash for ignore patterns and filenames
251
251
252 If this information changes between Mercurial invocations, we can't
252 If this information changes between Mercurial invocations, we can't
253 rely on Watchman information anymore and have to re-scan the working
253 rely on Watchman information anymore and have to re-scan the working
254 copy.
254 copy.
255
255
256 """
256 """
257 sha1 = hashutil.sha1()
257 sha1 = hashutil.sha1()
258 sha1.update(pycompat.byterepr(ignore))
258 sha1.update(pycompat.byterepr(ignore))
259 return pycompat.sysbytes(sha1.hexdigest())
259 return pycompat.sysbytes(sha1.hexdigest())
260
260
261
261
262 _watchmanencoding = pywatchman.encoding.get_local_encoding()
262 _watchmanencoding = pywatchman.encoding.get_local_encoding()
263 _fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
263 _fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
264 _fixencoding = codecs.lookup(_watchmanencoding) != codecs.lookup(_fsencoding)
264 _fixencoding = codecs.lookup(_watchmanencoding) != codecs.lookup(_fsencoding)
265
265
266
266
267 def _watchmantofsencoding(path):
267 def _watchmantofsencoding(path):
268 """Fix path to match watchman and local filesystem encoding
268 """Fix path to match watchman and local filesystem encoding
269
269
270 watchman's paths encoding can differ from filesystem encoding. For example,
270 watchman's paths encoding can differ from filesystem encoding. For example,
271 on Windows, it's always utf-8.
271 on Windows, it's always utf-8.
272 """
272 """
273 try:
273 try:
274 decoded = path.decode(_watchmanencoding)
274 decoded = path.decode(_watchmanencoding)
275 except UnicodeDecodeError as e:
275 except UnicodeDecodeError as e:
276 raise error.Abort(
276 raise error.Abort(
277 stringutil.forcebytestr(e), hint=b'watchman encoding error'
277 stringutil.forcebytestr(e), hint=b'watchman encoding error'
278 )
278 )
279
279
280 try:
280 try:
281 encoded = decoded.encode(_fsencoding, 'strict')
281 encoded = decoded.encode(_fsencoding, 'strict')
282 except UnicodeEncodeError as e:
282 except UnicodeEncodeError as e:
283 raise error.Abort(stringutil.forcebytestr(e))
283 raise error.Abort(stringutil.forcebytestr(e))
284
284
285 return encoded
285 return encoded
286
286
287
287
288 def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True):
288 def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True):
289 """Replacement for dirstate.walk, hooking into Watchman.
289 """Replacement for dirstate.walk, hooking into Watchman.
290
290
291 Whenever full is False, ignored is False, and the Watchman client is
291 Whenever full is False, ignored is False, and the Watchman client is
292 available, use Watchman combined with saved state to possibly return only a
292 available, use Watchman combined with saved state to possibly return only a
293 subset of files."""
293 subset of files."""
294
294
295 def bail(reason):
295 def bail(reason):
296 self._ui.debug(b'fsmonitor: fallback to core status, %s\n' % reason)
296 self._ui.debug(b'fsmonitor: fallback to core status, %s\n' % reason)
297 return orig(match, subrepos, unknown, ignored, full=True)
297 return orig(match, subrepos, unknown, ignored, full=True)
298
298
299 if full:
299 if full:
300 return bail(b'full rewalk requested')
300 return bail(b'full rewalk requested')
301 if ignored:
301 if ignored:
302 return bail(b'listing ignored files')
302 return bail(b'listing ignored files')
303 if not self._watchmanclient.available():
303 if not self._watchmanclient.available():
304 return bail(b'client unavailable')
304 return bail(b'client unavailable')
305 state = self._fsmonitorstate
305 state = self._fsmonitorstate
306 clock, ignorehash, notefiles = state.get()
306 clock, ignorehash, notefiles = state.get()
307 if not clock:
307 if not clock:
308 if state.walk_on_invalidate:
308 if state.walk_on_invalidate:
309 return bail(b'no clock')
309 return bail(b'no clock')
310 # Initial NULL clock value, see
310 # Initial NULL clock value, see
311 # https://facebook.github.io/watchman/docs/clockspec.html
311 # https://facebook.github.io/watchman/docs/clockspec.html
312 clock = b'c:0:0'
312 clock = b'c:0:0'
313 notefiles = []
313 notefiles = []
314
314
315 ignore = self._ignore
315 ignore = self._ignore
316 dirignore = self._dirignore
316 dirignore = self._dirignore
317 if unknown:
317 if unknown:
318 if _hashignore(ignore) != ignorehash and clock != b'c:0:0':
318 if _hashignore(ignore) != ignorehash and clock != b'c:0:0':
319 # ignore list changed -- can't rely on Watchman state any more
319 # ignore list changed -- can't rely on Watchman state any more
320 if state.walk_on_invalidate:
320 if state.walk_on_invalidate:
321 return bail(b'ignore rules changed')
321 return bail(b'ignore rules changed')
322 notefiles = []
322 notefiles = []
323 clock = b'c:0:0'
323 clock = b'c:0:0'
324 else:
324 else:
325 # always ignore
325 # always ignore
326 ignore = util.always
326 ignore = util.always
327 dirignore = util.always
327 dirignore = util.always
328
328
329 matchfn = match.matchfn
329 matchfn = match.matchfn
330 matchalways = match.always()
330 matchalways = match.always()
331 dmap = self._map
331 dmap = self._map
332 if util.safehasattr(dmap, b'_map'):
332 if util.safehasattr(dmap, b'_map'):
333 # for better performance, directly access the inner dirstate map if the
333 # for better performance, directly access the inner dirstate map if the
334 # standard dirstate implementation is in use.
334 # standard dirstate implementation is in use.
335 dmap = dmap._map
335 dmap = dmap._map
336 nonnormalset = self._map.nonnormalset
336 nonnormalset = {
337 f
338 for f, e in self._map.items()
339 if e.v1_state() != "n" or e.v1_mtime() == -1
340 }
337
341
338 copymap = self._map.copymap
342 copymap = self._map.copymap
339 getkind = stat.S_IFMT
343 getkind = stat.S_IFMT
340 dirkind = stat.S_IFDIR
344 dirkind = stat.S_IFDIR
341 regkind = stat.S_IFREG
345 regkind = stat.S_IFREG
342 lnkkind = stat.S_IFLNK
346 lnkkind = stat.S_IFLNK
343 join = self._join
347 join = self._join
344 normcase = util.normcase
348 normcase = util.normcase
345 fresh_instance = False
349 fresh_instance = False
346
350
347 exact = skipstep3 = False
351 exact = skipstep3 = False
348 if match.isexact(): # match.exact
352 if match.isexact(): # match.exact
349 exact = True
353 exact = True
350 dirignore = util.always # skip step 2
354 dirignore = util.always # skip step 2
351 elif match.prefix(): # match.match, no patterns
355 elif match.prefix(): # match.match, no patterns
352 skipstep3 = True
356 skipstep3 = True
353
357
354 if not exact and self._checkcase:
358 if not exact and self._checkcase:
355 # note that even though we could receive directory entries, we're only
359 # note that even though we could receive directory entries, we're only
356 # interested in checking if a file with the same name exists. So only
360 # interested in checking if a file with the same name exists. So only
357 # normalize files if possible.
361 # normalize files if possible.
358 normalize = self._normalizefile
362 normalize = self._normalizefile
359 skipstep3 = False
363 skipstep3 = False
360 else:
364 else:
361 normalize = None
365 normalize = None
362
366
363 # step 1: find all explicit files
367 # step 1: find all explicit files
364 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
368 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
365
369
366 skipstep3 = skipstep3 and not (work or dirsnotfound)
370 skipstep3 = skipstep3 and not (work or dirsnotfound)
367 work = [d for d in work if not dirignore(d[0])]
371 work = [d for d in work if not dirignore(d[0])]
368
372
369 if not work and (exact or skipstep3):
373 if not work and (exact or skipstep3):
370 for s in subrepos:
374 for s in subrepos:
371 del results[s]
375 del results[s]
372 del results[b'.hg']
376 del results[b'.hg']
373 return results
377 return results
374
378
375 # step 2: query Watchman
379 # step 2: query Watchman
376 try:
380 try:
377 # Use the user-configured timeout for the query.
381 # Use the user-configured timeout for the query.
378 # Add a little slack over the top of the user query to allow for
382 # Add a little slack over the top of the user query to allow for
379 # overheads while transferring the data
383 # overheads while transferring the data
380 self._watchmanclient.settimeout(state.timeout + 0.1)
384 self._watchmanclient.settimeout(state.timeout + 0.1)
381 result = self._watchmanclient.command(
385 result = self._watchmanclient.command(
382 b'query',
386 b'query',
383 {
387 {
384 b'fields': [b'mode', b'mtime', b'size', b'exists', b'name'],
388 b'fields': [b'mode', b'mtime', b'size', b'exists', b'name'],
385 b'since': clock,
389 b'since': clock,
386 b'expression': [
390 b'expression': [
387 b'not',
391 b'not',
388 [
392 [
389 b'anyof',
393 b'anyof',
390 [b'dirname', b'.hg'],
394 [b'dirname', b'.hg'],
391 [b'name', b'.hg', b'wholename'],
395 [b'name', b'.hg', b'wholename'],
392 ],
396 ],
393 ],
397 ],
394 b'sync_timeout': int(state.timeout * 1000),
398 b'sync_timeout': int(state.timeout * 1000),
395 b'empty_on_fresh_instance': state.walk_on_invalidate,
399 b'empty_on_fresh_instance': state.walk_on_invalidate,
396 },
400 },
397 )
401 )
398 except Exception as ex:
402 except Exception as ex:
399 _handleunavailable(self._ui, state, ex)
403 _handleunavailable(self._ui, state, ex)
400 self._watchmanclient.clearconnection()
404 self._watchmanclient.clearconnection()
401 return bail(b'exception during run')
405 return bail(b'exception during run')
402 else:
406 else:
403 # We need to propagate the last observed clock up so that we
407 # We need to propagate the last observed clock up so that we
404 # can use it for our next query
408 # can use it for our next query
405 state.setlastclock(pycompat.sysbytes(result[b'clock']))
409 state.setlastclock(pycompat.sysbytes(result[b'clock']))
406 if result[b'is_fresh_instance']:
410 if result[b'is_fresh_instance']:
407 if state.walk_on_invalidate:
411 if state.walk_on_invalidate:
408 state.invalidate()
412 state.invalidate()
409 return bail(b'fresh instance')
413 return bail(b'fresh instance')
410 fresh_instance = True
414 fresh_instance = True
411 # Ignore any prior noteable files from the state info
415 # Ignore any prior noteable files from the state info
412 notefiles = []
416 notefiles = []
413
417
414 # for file paths which require normalization and we encounter a case
418 # for file paths which require normalization and we encounter a case
415 # collision, we store our own foldmap
419 # collision, we store our own foldmap
416 if normalize:
420 if normalize:
417 foldmap = {normcase(k): k for k in results}
421 foldmap = {normcase(k): k for k in results}
418
422
419 switch_slashes = pycompat.ossep == b'\\'
423 switch_slashes = pycompat.ossep == b'\\'
420 # The order of the results is, strictly speaking, undefined.
424 # The order of the results is, strictly speaking, undefined.
421 # For case changes on a case insensitive filesystem we may receive
425 # For case changes on a case insensitive filesystem we may receive
422 # two entries, one with exists=True and another with exists=False.
426 # two entries, one with exists=True and another with exists=False.
423 # The exists=True entries in the same response should be interpreted
427 # The exists=True entries in the same response should be interpreted
424 # as being happens-after the exists=False entries due to the way that
428 # as being happens-after the exists=False entries due to the way that
425 # Watchman tracks files. We use this property to reconcile deletes
429 # Watchman tracks files. We use this property to reconcile deletes
426 # for name case changes.
430 # for name case changes.
427 for entry in result[b'files']:
431 for entry in result[b'files']:
428 fname = entry[b'name']
432 fname = entry[b'name']
429
433
430 # Watchman always give us a str. Normalize to bytes on Python 3
434 # Watchman always give us a str. Normalize to bytes on Python 3
431 # using Watchman's encoding, if needed.
435 # using Watchman's encoding, if needed.
432 if not isinstance(fname, bytes):
436 if not isinstance(fname, bytes):
433 fname = fname.encode(_watchmanencoding)
437 fname = fname.encode(_watchmanencoding)
434
438
435 if _fixencoding:
439 if _fixencoding:
436 fname = _watchmantofsencoding(fname)
440 fname = _watchmantofsencoding(fname)
437
441
438 if switch_slashes:
442 if switch_slashes:
439 fname = fname.replace(b'\\', b'/')
443 fname = fname.replace(b'\\', b'/')
440 if normalize:
444 if normalize:
441 normed = normcase(fname)
445 normed = normcase(fname)
442 fname = normalize(fname, True, True)
446 fname = normalize(fname, True, True)
443 foldmap[normed] = fname
447 foldmap[normed] = fname
444 fmode = entry[b'mode']
448 fmode = entry[b'mode']
445 fexists = entry[b'exists']
449 fexists = entry[b'exists']
446 kind = getkind(fmode)
450 kind = getkind(fmode)
447
451
448 if b'/.hg/' in fname or fname.endswith(b'/.hg'):
452 if b'/.hg/' in fname or fname.endswith(b'/.hg'):
449 return bail(b'nested-repo-detected')
453 return bail(b'nested-repo-detected')
450
454
451 if not fexists:
455 if not fexists:
452 # if marked as deleted and we don't already have a change
456 # if marked as deleted and we don't already have a change
453 # record, mark it as deleted. If we already have an entry
457 # record, mark it as deleted. If we already have an entry
454 # for fname then it was either part of walkexplicit or was
458 # for fname then it was either part of walkexplicit or was
455 # an earlier result that was a case change
459 # an earlier result that was a case change
456 if (
460 if (
457 fname not in results
461 fname not in results
458 and fname in dmap
462 and fname in dmap
459 and (matchalways or matchfn(fname))
463 and (matchalways or matchfn(fname))
460 ):
464 ):
461 results[fname] = None
465 results[fname] = None
462 elif kind == dirkind:
466 elif kind == dirkind:
463 if fname in dmap and (matchalways or matchfn(fname)):
467 if fname in dmap and (matchalways or matchfn(fname)):
464 results[fname] = None
468 results[fname] = None
465 elif kind == regkind or kind == lnkkind:
469 elif kind == regkind or kind == lnkkind:
466 if fname in dmap:
470 if fname in dmap:
467 if matchalways or matchfn(fname):
471 if matchalways or matchfn(fname):
468 results[fname] = entry
472 results[fname] = entry
469 elif (matchalways or matchfn(fname)) and not ignore(fname):
473 elif (matchalways or matchfn(fname)) and not ignore(fname):
470 results[fname] = entry
474 results[fname] = entry
471 elif fname in dmap and (matchalways or matchfn(fname)):
475 elif fname in dmap and (matchalways or matchfn(fname)):
472 results[fname] = None
476 results[fname] = None
473
477
474 # step 3: query notable files we don't already know about
478 # step 3: query notable files we don't already know about
475 # XXX try not to iterate over the entire dmap
479 # XXX try not to iterate over the entire dmap
476 if normalize:
480 if normalize:
477 # any notable files that have changed case will already be handled
481 # any notable files that have changed case will already be handled
478 # above, so just check membership in the foldmap
482 # above, so just check membership in the foldmap
479 notefiles = {
483 notefiles = {
480 normalize(f, True, True)
484 normalize(f, True, True)
481 for f in notefiles
485 for f in notefiles
482 if normcase(f) not in foldmap
486 if normcase(f) not in foldmap
483 }
487 }
484 visit = {
488 visit = {
485 f
489 f
486 for f in notefiles
490 for f in notefiles
487 if (f not in results and matchfn(f) and (f in dmap or not ignore(f)))
491 if (f not in results and matchfn(f) and (f in dmap or not ignore(f)))
488 }
492 }
489
493
490 if not fresh_instance:
494 if not fresh_instance:
491 if matchalways:
495 if matchalways:
492 visit.update(f for f in nonnormalset if f not in results)
496 visit.update(f for f in nonnormalset if f not in results)
493 visit.update(f for f in copymap if f not in results)
497 visit.update(f for f in copymap if f not in results)
494 else:
498 else:
495 visit.update(
499 visit.update(
496 f for f in nonnormalset if f not in results and matchfn(f)
500 f for f in nonnormalset if f not in results and matchfn(f)
497 )
501 )
498 visit.update(f for f in copymap if f not in results and matchfn(f))
502 visit.update(f for f in copymap if f not in results and matchfn(f))
499 else:
503 else:
500 if matchalways:
504 if matchalways:
501 visit.update(
505 visit.update(
502 f for f, st in pycompat.iteritems(dmap) if f not in results
506 f for f, st in pycompat.iteritems(dmap) if f not in results
503 )
507 )
504 visit.update(f for f in copymap if f not in results)
508 visit.update(f for f in copymap if f not in results)
505 else:
509 else:
506 visit.update(
510 visit.update(
507 f
511 f
508 for f, st in pycompat.iteritems(dmap)
512 for f, st in pycompat.iteritems(dmap)
509 if f not in results and matchfn(f)
513 if f not in results and matchfn(f)
510 )
514 )
511 visit.update(f for f in copymap if f not in results and matchfn(f))
515 visit.update(f for f in copymap if f not in results and matchfn(f))
512
516
513 audit = pathutil.pathauditor(self._root, cached=True).check
517 audit = pathutil.pathauditor(self._root, cached=True).check
514 auditpass = [f for f in visit if audit(f)]
518 auditpass = [f for f in visit if audit(f)]
515 auditpass.sort()
519 auditpass.sort()
516 auditfail = visit.difference(auditpass)
520 auditfail = visit.difference(auditpass)
517 for f in auditfail:
521 for f in auditfail:
518 results[f] = None
522 results[f] = None
519
523
520 nf = iter(auditpass)
524 nf = iter(auditpass)
521 for st in util.statfiles([join(f) for f in auditpass]):
525 for st in util.statfiles([join(f) for f in auditpass]):
522 f = next(nf)
526 f = next(nf)
523 if st or f in dmap:
527 if st or f in dmap:
524 results[f] = st
528 results[f] = st
525
529
526 for s in subrepos:
530 for s in subrepos:
527 del results[s]
531 del results[s]
528 del results[b'.hg']
532 del results[b'.hg']
529 return results
533 return results
530
534
531
535
532 def overridestatus(
536 def overridestatus(
533 orig,
537 orig,
534 self,
538 self,
535 node1=b'.',
539 node1=b'.',
536 node2=None,
540 node2=None,
537 match=None,
541 match=None,
538 ignored=False,
542 ignored=False,
539 clean=False,
543 clean=False,
540 unknown=False,
544 unknown=False,
541 listsubrepos=False,
545 listsubrepos=False,
542 ):
546 ):
543 listignored = ignored
547 listignored = ignored
544 listclean = clean
548 listclean = clean
545 listunknown = unknown
549 listunknown = unknown
546
550
547 def _cmpsets(l1, l2):
551 def _cmpsets(l1, l2):
548 try:
552 try:
549 if b'FSMONITOR_LOG_FILE' in encoding.environ:
553 if b'FSMONITOR_LOG_FILE' in encoding.environ:
550 fn = encoding.environ[b'FSMONITOR_LOG_FILE']
554 fn = encoding.environ[b'FSMONITOR_LOG_FILE']
551 f = open(fn, b'wb')
555 f = open(fn, b'wb')
552 else:
556 else:
553 fn = b'fsmonitorfail.log'
557 fn = b'fsmonitorfail.log'
554 f = self.vfs.open(fn, b'wb')
558 f = self.vfs.open(fn, b'wb')
555 except (IOError, OSError):
559 except (IOError, OSError):
556 self.ui.warn(_(b'warning: unable to write to %s\n') % fn)
560 self.ui.warn(_(b'warning: unable to write to %s\n') % fn)
557 return
561 return
558
562
559 try:
563 try:
560 for i, (s1, s2) in enumerate(zip(l1, l2)):
564 for i, (s1, s2) in enumerate(zip(l1, l2)):
561 if set(s1) != set(s2):
565 if set(s1) != set(s2):
562 f.write(b'sets at position %d are unequal\n' % i)
566 f.write(b'sets at position %d are unequal\n' % i)
563 f.write(b'watchman returned: %s\n' % s1)
567 f.write(b'watchman returned: %r\n' % s1)
564 f.write(b'stat returned: %s\n' % s2)
568 f.write(b'stat returned: %r\n' % s2)
565 finally:
569 finally:
566 f.close()
570 f.close()
567
571
568 if isinstance(node1, context.changectx):
572 if isinstance(node1, context.changectx):
569 ctx1 = node1
573 ctx1 = node1
570 else:
574 else:
571 ctx1 = self[node1]
575 ctx1 = self[node1]
572 if isinstance(node2, context.changectx):
576 if isinstance(node2, context.changectx):
573 ctx2 = node2
577 ctx2 = node2
574 else:
578 else:
575 ctx2 = self[node2]
579 ctx2 = self[node2]
576
580
577 working = ctx2.rev() is None
581 working = ctx2.rev() is None
578 parentworking = working and ctx1 == self[b'.']
582 parentworking = working and ctx1 == self[b'.']
579 match = match or matchmod.always()
583 match = match or matchmod.always()
580
584
581 # Maybe we can use this opportunity to update Watchman's state.
585 # Maybe we can use this opportunity to update Watchman's state.
582 # Mercurial uses workingcommitctx and/or memctx to represent the part of
586 # Mercurial uses workingcommitctx and/or memctx to represent the part of
583 # the workingctx that is to be committed. So don't update the state in
587 # the workingctx that is to be committed. So don't update the state in
584 # that case.
588 # that case.
585 # HG_PENDING is set in the environment when the dirstate is being updated
589 # HG_PENDING is set in the environment when the dirstate is being updated
586 # in the middle of a transaction; we must not update our state in that
590 # in the middle of a transaction; we must not update our state in that
587 # case, or we risk forgetting about changes in the working copy.
591 # case, or we risk forgetting about changes in the working copy.
588 updatestate = (
592 updatestate = (
589 parentworking
593 parentworking
590 and match.always()
594 and match.always()
591 and not isinstance(ctx2, (context.workingcommitctx, context.memctx))
595 and not isinstance(ctx2, (context.workingcommitctx, context.memctx))
592 and b'HG_PENDING' not in encoding.environ
596 and b'HG_PENDING' not in encoding.environ
593 )
597 )
594
598
595 try:
599 try:
596 if self._fsmonitorstate.walk_on_invalidate:
600 if self._fsmonitorstate.walk_on_invalidate:
597 # Use a short timeout to query the current clock. If that
601 # Use a short timeout to query the current clock. If that
598 # takes too long then we assume that the service will be slow
602 # takes too long then we assume that the service will be slow
599 # to answer our query.
603 # to answer our query.
600 # walk_on_invalidate indicates that we prefer to walk the
604 # walk_on_invalidate indicates that we prefer to walk the
601 # tree ourselves because we can ignore portions that Watchman
605 # tree ourselves because we can ignore portions that Watchman
602 # cannot and we tend to be faster in the warmer buffer cache
606 # cannot and we tend to be faster in the warmer buffer cache
603 # cases.
607 # cases.
604 self._watchmanclient.settimeout(0.1)
608 self._watchmanclient.settimeout(0.1)
605 else:
609 else:
606 # Give Watchman more time to potentially complete its walk
610 # Give Watchman more time to potentially complete its walk
607 # and return the initial clock. In this mode we assume that
611 # and return the initial clock. In this mode we assume that
608 # the filesystem will be slower than parsing a potentially
612 # the filesystem will be slower than parsing a potentially
609 # very large Watchman result set.
613 # very large Watchman result set.
610 self._watchmanclient.settimeout(self._fsmonitorstate.timeout + 0.1)
614 self._watchmanclient.settimeout(self._fsmonitorstate.timeout + 0.1)
611 startclock = self._watchmanclient.getcurrentclock()
615 startclock = self._watchmanclient.getcurrentclock()
612 except Exception as ex:
616 except Exception as ex:
613 self._watchmanclient.clearconnection()
617 self._watchmanclient.clearconnection()
614 _handleunavailable(self.ui, self._fsmonitorstate, ex)
618 _handleunavailable(self.ui, self._fsmonitorstate, ex)
615 # boo, Watchman failed. bail
619 # boo, Watchman failed. bail
616 return orig(
620 return orig(
617 node1,
621 node1,
618 node2,
622 node2,
619 match,
623 match,
620 listignored,
624 listignored,
621 listclean,
625 listclean,
622 listunknown,
626 listunknown,
623 listsubrepos,
627 listsubrepos,
624 )
628 )
625
629
626 if updatestate:
630 if updatestate:
627 # We need info about unknown files. This may make things slower the
631 # We need info about unknown files. This may make things slower the
628 # first time, but whatever.
632 # first time, but whatever.
629 stateunknown = True
633 stateunknown = True
630 else:
634 else:
631 stateunknown = listunknown
635 stateunknown = listunknown
632
636
633 if updatestate:
637 if updatestate:
634 ps = poststatus(startclock)
638 ps = poststatus(startclock)
635 self.addpostdsstatus(ps)
639 self.addpostdsstatus(ps)
636
640
637 r = orig(
641 r = orig(
638 node1, node2, match, listignored, listclean, stateunknown, listsubrepos
642 node1, node2, match, listignored, listclean, stateunknown, listsubrepos
639 )
643 )
640 modified, added, removed, deleted, unknown, ignored, clean = r
644 modified, added, removed, deleted, unknown, ignored, clean = r
641
645
642 if not listunknown:
646 if not listunknown:
643 unknown = []
647 unknown = []
644
648
645 # don't do paranoid checks if we're not going to query Watchman anyway
649 # don't do paranoid checks if we're not going to query Watchman anyway
646 full = listclean or match.traversedir is not None
650 full = listclean or match.traversedir is not None
647 if self._fsmonitorstate.mode == b'paranoid' and not full:
651 if self._fsmonitorstate.mode == b'paranoid' and not full:
648 # run status again and fall back to the old walk this time
652 # run status again and fall back to the old walk this time
649 self.dirstate._fsmonitordisable = True
653 self.dirstate._fsmonitordisable = True
650
654
651 # shut the UI up
655 # shut the UI up
652 quiet = self.ui.quiet
656 quiet = self.ui.quiet
653 self.ui.quiet = True
657 self.ui.quiet = True
654 fout, ferr = self.ui.fout, self.ui.ferr
658 fout, ferr = self.ui.fout, self.ui.ferr
655 self.ui.fout = self.ui.ferr = open(os.devnull, b'wb')
659 self.ui.fout = self.ui.ferr = open(os.devnull, b'wb')
656
660
657 try:
661 try:
658 rv2 = orig(
662 rv2 = orig(
659 node1,
663 node1,
660 node2,
664 node2,
661 match,
665 match,
662 listignored,
666 listignored,
663 listclean,
667 listclean,
664 listunknown,
668 listunknown,
665 listsubrepos,
669 listsubrepos,
666 )
670 )
667 finally:
671 finally:
668 self.dirstate._fsmonitordisable = False
672 self.dirstate._fsmonitordisable = False
669 self.ui.quiet = quiet
673 self.ui.quiet = quiet
670 self.ui.fout, self.ui.ferr = fout, ferr
674 self.ui.fout, self.ui.ferr = fout, ferr
671
675
672 # clean isn't tested since it's set to True above
676 # clean isn't tested since it's set to True above
673 with self.wlock():
677 with self.wlock():
674 _cmpsets(
678 _cmpsets(
675 [modified, added, removed, deleted, unknown, ignored, clean],
679 [modified, added, removed, deleted, unknown, ignored, clean],
676 rv2,
680 rv2,
677 )
681 )
678 modified, added, removed, deleted, unknown, ignored, clean = rv2
682 modified, added, removed, deleted, unknown, ignored, clean = rv2
679
683
680 return scmutil.status(
684 return scmutil.status(
681 modified, added, removed, deleted, unknown, ignored, clean
685 modified, added, removed, deleted, unknown, ignored, clean
682 )
686 )
683
687
684
688
685 class poststatus(object):
689 class poststatus(object):
686 def __init__(self, startclock):
690 def __init__(self, startclock):
687 self._startclock = pycompat.sysbytes(startclock)
691 self._startclock = pycompat.sysbytes(startclock)
688
692
689 def __call__(self, wctx, status):
693 def __call__(self, wctx, status):
690 clock = wctx.repo()._fsmonitorstate.getlastclock() or self._startclock
694 clock = wctx.repo()._fsmonitorstate.getlastclock() or self._startclock
691 hashignore = _hashignore(wctx.repo().dirstate._ignore)
695 hashignore = _hashignore(wctx.repo().dirstate._ignore)
692 notefiles = (
696 notefiles = (
693 status.modified
697 status.modified
694 + status.added
698 + status.added
695 + status.removed
699 + status.removed
696 + status.deleted
700 + status.deleted
697 + status.unknown
701 + status.unknown
698 )
702 )
699 wctx.repo()._fsmonitorstate.set(clock, hashignore, notefiles)
703 wctx.repo()._fsmonitorstate.set(clock, hashignore, notefiles)
700
704
701
705
702 def makedirstate(repo, dirstate):
706 def makedirstate(repo, dirstate):
703 class fsmonitordirstate(dirstate.__class__):
707 class fsmonitordirstate(dirstate.__class__):
704 def _fsmonitorinit(self, repo):
708 def _fsmonitorinit(self, repo):
705 # _fsmonitordisable is used in paranoid mode
709 # _fsmonitordisable is used in paranoid mode
706 self._fsmonitordisable = False
710 self._fsmonitordisable = False
707 self._fsmonitorstate = repo._fsmonitorstate
711 self._fsmonitorstate = repo._fsmonitorstate
708 self._watchmanclient = repo._watchmanclient
712 self._watchmanclient = repo._watchmanclient
709 self._repo = weakref.proxy(repo)
713 self._repo = weakref.proxy(repo)
710
714
711 def walk(self, *args, **kwargs):
715 def walk(self, *args, **kwargs):
712 orig = super(fsmonitordirstate, self).walk
716 orig = super(fsmonitordirstate, self).walk
713 if self._fsmonitordisable:
717 if self._fsmonitordisable:
714 return orig(*args, **kwargs)
718 return orig(*args, **kwargs)
715 return overridewalk(orig, self, *args, **kwargs)
719 return overridewalk(orig, self, *args, **kwargs)
716
720
717 def rebuild(self, *args, **kwargs):
721 def rebuild(self, *args, **kwargs):
718 self._fsmonitorstate.invalidate()
722 self._fsmonitorstate.invalidate()
719 return super(fsmonitordirstate, self).rebuild(*args, **kwargs)
723 return super(fsmonitordirstate, self).rebuild(*args, **kwargs)
720
724
721 def invalidate(self, *args, **kwargs):
725 def invalidate(self, *args, **kwargs):
722 self._fsmonitorstate.invalidate()
726 self._fsmonitorstate.invalidate()
723 return super(fsmonitordirstate, self).invalidate(*args, **kwargs)
727 return super(fsmonitordirstate, self).invalidate(*args, **kwargs)
724
728
725 dirstate.__class__ = fsmonitordirstate
729 dirstate.__class__ = fsmonitordirstate
726 dirstate._fsmonitorinit(repo)
730 dirstate._fsmonitorinit(repo)
727
731
728
732
729 def wrapdirstate(orig, self):
733 def wrapdirstate(orig, self):
730 ds = orig(self)
734 ds = orig(self)
731 # only override the dirstate when Watchman is available for the repo
735 # only override the dirstate when Watchman is available for the repo
732 if util.safehasattr(self, b'_fsmonitorstate'):
736 if util.safehasattr(self, b'_fsmonitorstate'):
733 makedirstate(self, ds)
737 makedirstate(self, ds)
734 return ds
738 return ds
735
739
736
740
737 def extsetup(ui):
741 def extsetup(ui):
738 extensions.wrapfilecache(
742 extensions.wrapfilecache(
739 localrepo.localrepository, b'dirstate', wrapdirstate
743 localrepo.localrepository, b'dirstate', wrapdirstate
740 )
744 )
741 if pycompat.isdarwin:
745 if pycompat.isdarwin:
742 # An assist for avoiding the dangling-symlink fsevents bug
746 # An assist for avoiding the dangling-symlink fsevents bug
743 extensions.wrapfunction(os, b'symlink', wrapsymlink)
747 extensions.wrapfunction(os, b'symlink', wrapsymlink)
744
748
745 extensions.wrapfunction(merge, b'_update', wrapupdate)
749 extensions.wrapfunction(merge, b'_update', wrapupdate)
746
750
747
751
748 def wrapsymlink(orig, source, link_name):
752 def wrapsymlink(orig, source, link_name):
749 """if we create a dangling symlink, also touch the parent dir
753 """if we create a dangling symlink, also touch the parent dir
750 to encourage fsevents notifications to work more correctly"""
754 to encourage fsevents notifications to work more correctly"""
751 try:
755 try:
752 return orig(source, link_name)
756 return orig(source, link_name)
753 finally:
757 finally:
754 try:
758 try:
755 os.utime(os.path.dirname(link_name), None)
759 os.utime(os.path.dirname(link_name), None)
756 except OSError:
760 except OSError:
757 pass
761 pass
758
762
759
763
760 class state_update(object):
764 class state_update(object):
761 """This context manager is responsible for dispatching the state-enter
765 """This context manager is responsible for dispatching the state-enter
762 and state-leave signals to the watchman service. The enter and leave
766 and state-leave signals to the watchman service. The enter and leave
763 methods can be invoked manually (for scenarios where context manager
767 methods can be invoked manually (for scenarios where context manager
764 semantics are not possible). If parameters oldnode and newnode are None,
768 semantics are not possible). If parameters oldnode and newnode are None,
765 they will be populated based on current working copy in enter and
769 they will be populated based on current working copy in enter and
766 leave, respectively. Similarly, if the distance is none, it will be
770 leave, respectively. Similarly, if the distance is none, it will be
767 calculated based on the oldnode and newnode in the leave method."""
771 calculated based on the oldnode and newnode in the leave method."""
768
772
769 def __init__(
773 def __init__(
770 self,
774 self,
771 repo,
775 repo,
772 name,
776 name,
773 oldnode=None,
777 oldnode=None,
774 newnode=None,
778 newnode=None,
775 distance=None,
779 distance=None,
776 partial=False,
780 partial=False,
777 ):
781 ):
778 self.repo = repo.unfiltered()
782 self.repo = repo.unfiltered()
779 self.name = name
783 self.name = name
780 self.oldnode = oldnode
784 self.oldnode = oldnode
781 self.newnode = newnode
785 self.newnode = newnode
782 self.distance = distance
786 self.distance = distance
783 self.partial = partial
787 self.partial = partial
784 self._lock = None
788 self._lock = None
785 self.need_leave = False
789 self.need_leave = False
786
790
787 def __enter__(self):
791 def __enter__(self):
788 self.enter()
792 self.enter()
789
793
790 def enter(self):
794 def enter(self):
791 # Make sure we have a wlock prior to sending notifications to watchman.
795 # Make sure we have a wlock prior to sending notifications to watchman.
792 # We don't want to race with other actors. In the update case,
796 # We don't want to race with other actors. In the update case,
793 # merge.update is going to take the wlock almost immediately. We are
797 # merge.update is going to take the wlock almost immediately. We are
794 # effectively extending the lock around several short sanity checks.
798 # effectively extending the lock around several short sanity checks.
795 if self.oldnode is None:
799 if self.oldnode is None:
796 self.oldnode = self.repo[b'.'].node()
800 self.oldnode = self.repo[b'.'].node()
797
801
798 if self.repo.currentwlock() is None:
802 if self.repo.currentwlock() is None:
799 if util.safehasattr(self.repo, b'wlocknostateupdate'):
803 if util.safehasattr(self.repo, b'wlocknostateupdate'):
800 self._lock = self.repo.wlocknostateupdate()
804 self._lock = self.repo.wlocknostateupdate()
801 else:
805 else:
802 self._lock = self.repo.wlock()
806 self._lock = self.repo.wlock()
803 self.need_leave = self._state(b'state-enter', hex(self.oldnode))
807 self.need_leave = self._state(b'state-enter', hex(self.oldnode))
804 return self
808 return self
805
809
806 def __exit__(self, type_, value, tb):
810 def __exit__(self, type_, value, tb):
807 abort = True if type_ else False
811 abort = True if type_ else False
808 self.exit(abort=abort)
812 self.exit(abort=abort)
809
813
810 def exit(self, abort=False):
814 def exit(self, abort=False):
811 try:
815 try:
812 if self.need_leave:
816 if self.need_leave:
813 status = b'failed' if abort else b'ok'
817 status = b'failed' if abort else b'ok'
814 if self.newnode is None:
818 if self.newnode is None:
815 self.newnode = self.repo[b'.'].node()
819 self.newnode = self.repo[b'.'].node()
816 if self.distance is None:
820 if self.distance is None:
817 self.distance = calcdistance(
821 self.distance = calcdistance(
818 self.repo, self.oldnode, self.newnode
822 self.repo, self.oldnode, self.newnode
819 )
823 )
820 self._state(b'state-leave', hex(self.newnode), status=status)
824 self._state(b'state-leave', hex(self.newnode), status=status)
821 finally:
825 finally:
822 self.need_leave = False
826 self.need_leave = False
823 if self._lock:
827 if self._lock:
824 self._lock.release()
828 self._lock.release()
825
829
826 def _state(self, cmd, commithash, status=b'ok'):
830 def _state(self, cmd, commithash, status=b'ok'):
827 if not util.safehasattr(self.repo, b'_watchmanclient'):
831 if not util.safehasattr(self.repo, b'_watchmanclient'):
828 return False
832 return False
829 try:
833 try:
830 self.repo._watchmanclient.command(
834 self.repo._watchmanclient.command(
831 cmd,
835 cmd,
832 {
836 {
833 b'name': self.name,
837 b'name': self.name,
834 b'metadata': {
838 b'metadata': {
835 # the target revision
839 # the target revision
836 b'rev': commithash,
840 b'rev': commithash,
837 # approximate number of commits between current and target
841 # approximate number of commits between current and target
838 b'distance': self.distance if self.distance else 0,
842 b'distance': self.distance if self.distance else 0,
839 # success/failure (only really meaningful for state-leave)
843 # success/failure (only really meaningful for state-leave)
840 b'status': status,
844 b'status': status,
841 # whether the working copy parent is changing
845 # whether the working copy parent is changing
842 b'partial': self.partial,
846 b'partial': self.partial,
843 },
847 },
844 },
848 },
845 )
849 )
846 return True
850 return True
847 except Exception as e:
851 except Exception as e:
848 # Swallow any errors; fire and forget
852 # Swallow any errors; fire and forget
849 self.repo.ui.log(
853 self.repo.ui.log(
850 b'watchman', b'Exception %s while running %s\n', e, cmd
854 b'watchman', b'Exception %s while running %s\n', e, cmd
851 )
855 )
852 return False
856 return False
853
857
854
858
855 # Estimate the distance between two nodes
859 # Estimate the distance between two nodes
856 def calcdistance(repo, oldnode, newnode):
860 def calcdistance(repo, oldnode, newnode):
857 anc = repo.changelog.ancestor(oldnode, newnode)
861 anc = repo.changelog.ancestor(oldnode, newnode)
858 ancrev = repo[anc].rev()
862 ancrev = repo[anc].rev()
859 distance = abs(repo[oldnode].rev() - ancrev) + abs(
863 distance = abs(repo[oldnode].rev() - ancrev) + abs(
860 repo[newnode].rev() - ancrev
864 repo[newnode].rev() - ancrev
861 )
865 )
862 return distance
866 return distance
863
867
864
868
865 # Bracket working copy updates with calls to the watchman state-enter
869 # Bracket working copy updates with calls to the watchman state-enter
866 # and state-leave commands. This allows clients to perform more intelligent
870 # and state-leave commands. This allows clients to perform more intelligent
867 # settling during bulk file change scenarios
871 # settling during bulk file change scenarios
868 # https://facebook.github.io/watchman/docs/cmd/subscribe.html#advanced-settling
872 # https://facebook.github.io/watchman/docs/cmd/subscribe.html#advanced-settling
869 def wrapupdate(
873 def wrapupdate(
870 orig,
874 orig,
871 repo,
875 repo,
872 node,
876 node,
873 branchmerge,
877 branchmerge,
874 force,
878 force,
875 ancestor=None,
879 ancestor=None,
876 mergeancestor=False,
880 mergeancestor=False,
877 labels=None,
881 labels=None,
878 matcher=None,
882 matcher=None,
879 **kwargs
883 **kwargs
880 ):
884 ):
881
885
882 distance = 0
886 distance = 0
883 partial = True
887 partial = True
884 oldnode = repo[b'.'].node()
888 oldnode = repo[b'.'].node()
885 newnode = repo[node].node()
889 newnode = repo[node].node()
886 if matcher is None or matcher.always():
890 if matcher is None or matcher.always():
887 partial = False
891 partial = False
888 distance = calcdistance(repo.unfiltered(), oldnode, newnode)
892 distance = calcdistance(repo.unfiltered(), oldnode, newnode)
889
893
890 with state_update(
894 with state_update(
891 repo,
895 repo,
892 name=b"hg.update",
896 name=b"hg.update",
893 oldnode=oldnode,
897 oldnode=oldnode,
894 newnode=newnode,
898 newnode=newnode,
895 distance=distance,
899 distance=distance,
896 partial=partial,
900 partial=partial,
897 ):
901 ):
898 return orig(
902 return orig(
899 repo,
903 repo,
900 node,
904 node,
901 branchmerge,
905 branchmerge,
902 force,
906 force,
903 ancestor,
907 ancestor,
904 mergeancestor,
908 mergeancestor,
905 labels,
909 labels,
906 matcher,
910 matcher,
907 **kwargs
911 **kwargs
908 )
912 )
909
913
910
914
911 def repo_has_depth_one_nested_repo(repo):
915 def repo_has_depth_one_nested_repo(repo):
912 for f in repo.wvfs.listdir():
916 for f in repo.wvfs.listdir():
913 if os.path.isdir(os.path.join(repo.root, f, b'.hg')):
917 if os.path.isdir(os.path.join(repo.root, f, b'.hg')):
914 msg = b'fsmonitor: sub-repository %r detected, fsmonitor disabled\n'
918 msg = b'fsmonitor: sub-repository %r detected, fsmonitor disabled\n'
915 repo.ui.debug(msg % f)
919 repo.ui.debug(msg % f)
916 return True
920 return True
917 return False
921 return False
918
922
919
923
920 def reposetup(ui, repo):
924 def reposetup(ui, repo):
921 # We don't work with largefiles or inotify
925 # We don't work with largefiles or inotify
922 exts = extensions.enabled()
926 exts = extensions.enabled()
923 for ext in _blacklist:
927 for ext in _blacklist:
924 if ext in exts:
928 if ext in exts:
925 ui.warn(
929 ui.warn(
926 _(
930 _(
927 b'The fsmonitor extension is incompatible with the %s '
931 b'The fsmonitor extension is incompatible with the %s '
928 b'extension and has been disabled.\n'
932 b'extension and has been disabled.\n'
929 )
933 )
930 % ext
934 % ext
931 )
935 )
932 return
936 return
933
937
934 if repo.local():
938 if repo.local():
935 # We don't work with subrepos either.
939 # We don't work with subrepos either.
936 #
940 #
937 # if repo[None].substate can cause a dirstate parse, which is too
941 # if repo[None].substate can cause a dirstate parse, which is too
938 # slow. Instead, look for a file called hgsubstate,
942 # slow. Instead, look for a file called hgsubstate,
939 if repo.wvfs.exists(b'.hgsubstate') or repo.wvfs.exists(b'.hgsub'):
943 if repo.wvfs.exists(b'.hgsubstate') or repo.wvfs.exists(b'.hgsub'):
940 return
944 return
941
945
942 if repo_has_depth_one_nested_repo(repo):
946 if repo_has_depth_one_nested_repo(repo):
943 return
947 return
944
948
945 fsmonitorstate = state.state(repo)
949 fsmonitorstate = state.state(repo)
946 if fsmonitorstate.mode == b'off':
950 if fsmonitorstate.mode == b'off':
947 return
951 return
948
952
949 try:
953 try:
950 client = watchmanclient.client(repo.ui, repo.root)
954 client = watchmanclient.client(repo.ui, repo.root)
951 except Exception as ex:
955 except Exception as ex:
952 _handleunavailable(ui, fsmonitorstate, ex)
956 _handleunavailable(ui, fsmonitorstate, ex)
953 return
957 return
954
958
955 repo._fsmonitorstate = fsmonitorstate
959 repo._fsmonitorstate = fsmonitorstate
956 repo._watchmanclient = client
960 repo._watchmanclient = client
957
961
958 dirstate, cached = localrepo.isfilecached(repo, b'dirstate')
962 dirstate, cached = localrepo.isfilecached(repo, b'dirstate')
959 if cached:
963 if cached:
960 # at this point since fsmonitorstate wasn't present,
964 # at this point since fsmonitorstate wasn't present,
961 # repo.dirstate is not a fsmonitordirstate
965 # repo.dirstate is not a fsmonitordirstate
962 makedirstate(repo, dirstate)
966 makedirstate(repo, dirstate)
963
967
964 class fsmonitorrepo(repo.__class__):
968 class fsmonitorrepo(repo.__class__):
965 def status(self, *args, **kwargs):
969 def status(self, *args, **kwargs):
966 orig = super(fsmonitorrepo, self).status
970 orig = super(fsmonitorrepo, self).status
967 return overridestatus(orig, self, *args, **kwargs)
971 return overridestatus(orig, self, *args, **kwargs)
968
972
969 def wlocknostateupdate(self, *args, **kwargs):
973 def wlocknostateupdate(self, *args, **kwargs):
970 return super(fsmonitorrepo, self).wlock(*args, **kwargs)
974 return super(fsmonitorrepo, self).wlock(*args, **kwargs)
971
975
972 def wlock(self, *args, **kwargs):
976 def wlock(self, *args, **kwargs):
973 l = super(fsmonitorrepo, self).wlock(*args, **kwargs)
977 l = super(fsmonitorrepo, self).wlock(*args, **kwargs)
974 if not ui.configbool(
978 if not ui.configbool(
975 b"experimental", b"fsmonitor.transaction_notify"
979 b"experimental", b"fsmonitor.transaction_notify"
976 ):
980 ):
977 return l
981 return l
978 if l.held != 1:
982 if l.held != 1:
979 return l
983 return l
980 origrelease = l.releasefn
984 origrelease = l.releasefn
981
985
982 def staterelease():
986 def staterelease():
983 if origrelease:
987 if origrelease:
984 origrelease()
988 origrelease()
985 if l.stateupdate:
989 if l.stateupdate:
986 l.stateupdate.exit()
990 l.stateupdate.exit()
987 l.stateupdate = None
991 l.stateupdate = None
988
992
989 try:
993 try:
990 l.stateupdate = None
994 l.stateupdate = None
991 l.stateupdate = state_update(self, name=b"hg.transaction")
995 l.stateupdate = state_update(self, name=b"hg.transaction")
992 l.stateupdate.enter()
996 l.stateupdate.enter()
993 l.releasefn = staterelease
997 l.releasefn = staterelease
994 except Exception as e:
998 except Exception as e:
995 # Swallow any errors; fire and forget
999 # Swallow any errors; fire and forget
996 self.ui.log(
1000 self.ui.log(
997 b'watchman', b'Exception in state update %s\n', e
1001 b'watchman', b'Exception in state update %s\n', e
998 )
1002 )
999 return l
1003 return l
1000
1004
1001 repo.__class__ = fsmonitorrepo
1005 repo.__class__ = fsmonitorrepo
This diff has been collapsed as it changes many lines, (651 lines changed) Show them Hide them
@@ -1,2665 +1,2678 b''
1 # histedit.py - interactive history editing for mercurial
1 # histedit.py - interactive history editing for mercurial
2 #
2 #
3 # Copyright 2009 Augie Fackler <raf@durin42.com>
3 # Copyright 2009 Augie Fackler <raf@durin42.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """interactive history editing
7 """interactive history editing
8
8
9 With this extension installed, Mercurial gains one new command: histedit. Usage
9 With this extension installed, Mercurial gains one new command: histedit. Usage
10 is as follows, assuming the following history::
10 is as follows, assuming the following history::
11
11
12 @ 3[tip] 7c2fd3b9020c 2009-04-27 18:04 -0500 durin42
12 @ 3[tip] 7c2fd3b9020c 2009-04-27 18:04 -0500 durin42
13 | Add delta
13 | Add delta
14 |
14 |
15 o 2 030b686bedc4 2009-04-27 18:04 -0500 durin42
15 o 2 030b686bedc4 2009-04-27 18:04 -0500 durin42
16 | Add gamma
16 | Add gamma
17 |
17 |
18 o 1 c561b4e977df 2009-04-27 18:04 -0500 durin42
18 o 1 c561b4e977df 2009-04-27 18:04 -0500 durin42
19 | Add beta
19 | Add beta
20 |
20 |
21 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
21 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
22 Add alpha
22 Add alpha
23
23
24 If you were to run ``hg histedit c561b4e977df``, you would see the following
24 If you were to run ``hg histedit c561b4e977df``, you would see the following
25 file open in your editor::
25 file open in your editor::
26
26
27 pick c561b4e977df Add beta
27 pick c561b4e977df Add beta
28 pick 030b686bedc4 Add gamma
28 pick 030b686bedc4 Add gamma
29 pick 7c2fd3b9020c Add delta
29 pick 7c2fd3b9020c Add delta
30
30
31 # Edit history between c561b4e977df and 7c2fd3b9020c
31 # Edit history between c561b4e977df and 7c2fd3b9020c
32 #
32 #
33 # Commits are listed from least to most recent
33 # Commits are listed from least to most recent
34 #
34 #
35 # Commands:
35 # Commands:
36 # p, pick = use commit
36 # p, pick = use commit
37 # e, edit = use commit, but allow edits before making new commit
37 # e, edit = use commit, but allow edits before making new commit
38 # f, fold = use commit, but combine it with the one above
38 # f, fold = use commit, but combine it with the one above
39 # r, roll = like fold, but discard this commit's description and date
39 # r, roll = like fold, but discard this commit's description and date
40 # d, drop = remove commit from history
40 # d, drop = remove commit from history
41 # m, mess = edit commit message without changing commit content
41 # m, mess = edit commit message without changing commit content
42 # b, base = checkout changeset and apply further changesets from there
42 # b, base = checkout changeset and apply further changesets from there
43 #
43 #
44
44
45 In this file, lines beginning with ``#`` are ignored. You must specify a rule
45 In this file, lines beginning with ``#`` are ignored. You must specify a rule
46 for each revision in your history. For example, if you had meant to add gamma
46 for each revision in your history. For example, if you had meant to add gamma
47 before beta, and then wanted to add delta in the same revision as beta, you
47 before beta, and then wanted to add delta in the same revision as beta, you
48 would reorganize the file to look like this::
48 would reorganize the file to look like this::
49
49
50 pick 030b686bedc4 Add gamma
50 pick 030b686bedc4 Add gamma
51 pick c561b4e977df Add beta
51 pick c561b4e977df Add beta
52 fold 7c2fd3b9020c Add delta
52 fold 7c2fd3b9020c Add delta
53
53
54 # Edit history between c561b4e977df and 7c2fd3b9020c
54 # Edit history between c561b4e977df and 7c2fd3b9020c
55 #
55 #
56 # Commits are listed from least to most recent
56 # Commits are listed from least to most recent
57 #
57 #
58 # Commands:
58 # Commands:
59 # p, pick = use commit
59 # p, pick = use commit
60 # e, edit = use commit, but allow edits before making new commit
60 # e, edit = use commit, but allow edits before making new commit
61 # f, fold = use commit, but combine it with the one above
61 # f, fold = use commit, but combine it with the one above
62 # r, roll = like fold, but discard this commit's description and date
62 # r, roll = like fold, but discard this commit's description and date
63 # d, drop = remove commit from history
63 # d, drop = remove commit from history
64 # m, mess = edit commit message without changing commit content
64 # m, mess = edit commit message without changing commit content
65 # b, base = checkout changeset and apply further changesets from there
65 # b, base = checkout changeset and apply further changesets from there
66 #
66 #
67
67
68 At which point you close the editor and ``histedit`` starts working. When you
68 At which point you close the editor and ``histedit`` starts working. When you
69 specify a ``fold`` operation, ``histedit`` will open an editor when it folds
69 specify a ``fold`` operation, ``histedit`` will open an editor when it folds
70 those revisions together, offering you a chance to clean up the commit message::
70 those revisions together, offering you a chance to clean up the commit message::
71
71
72 Add beta
72 Add beta
73 ***
73 ***
74 Add delta
74 Add delta
75
75
76 Edit the commit message to your liking, then close the editor. The date used
76 Edit the commit message to your liking, then close the editor. The date used
77 for the commit will be the later of the two commits' dates. For this example,
77 for the commit will be the later of the two commits' dates. For this example,
78 let's assume that the commit message was changed to ``Add beta and delta.``
78 let's assume that the commit message was changed to ``Add beta and delta.``
79 After histedit has run and had a chance to remove any old or temporary
79 After histedit has run and had a chance to remove any old or temporary
80 revisions it needed, the history looks like this::
80 revisions it needed, the history looks like this::
81
81
82 @ 2[tip] 989b4d060121 2009-04-27 18:04 -0500 durin42
82 @ 2[tip] 989b4d060121 2009-04-27 18:04 -0500 durin42
83 | Add beta and delta.
83 | Add beta and delta.
84 |
84 |
85 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
85 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
86 | Add gamma
86 | Add gamma
87 |
87 |
88 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
88 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
89 Add alpha
89 Add alpha
90
90
91 Note that ``histedit`` does *not* remove any revisions (even its own temporary
91 Note that ``histedit`` does *not* remove any revisions (even its own temporary
92 ones) until after it has completed all the editing operations, so it will
92 ones) until after it has completed all the editing operations, so it will
93 probably perform several strip operations when it's done. For the above example,
93 probably perform several strip operations when it's done. For the above example,
94 it had to run strip twice. Strip can be slow depending on a variety of factors,
94 it had to run strip twice. Strip can be slow depending on a variety of factors,
95 so you might need to be a little patient. You can choose to keep the original
95 so you might need to be a little patient. You can choose to keep the original
96 revisions by passing the ``--keep`` flag.
96 revisions by passing the ``--keep`` flag.
97
97
98 The ``edit`` operation will drop you back to a command prompt,
98 The ``edit`` operation will drop you back to a command prompt,
99 allowing you to edit files freely, or even use ``hg record`` to commit
99 allowing you to edit files freely, or even use ``hg record`` to commit
100 some changes as a separate commit. When you're done, any remaining
100 some changes as a separate commit. When you're done, any remaining
101 uncommitted changes will be committed as well. When done, run ``hg
101 uncommitted changes will be committed as well. When done, run ``hg
102 histedit --continue`` to finish this step. If there are uncommitted
102 histedit --continue`` to finish this step. If there are uncommitted
103 changes, you'll be prompted for a new commit message, but the default
103 changes, you'll be prompted for a new commit message, but the default
104 commit message will be the original message for the ``edit`` ed
104 commit message will be the original message for the ``edit`` ed
105 revision, and the date of the original commit will be preserved.
105 revision, and the date of the original commit will be preserved.
106
106
107 The ``message`` operation will give you a chance to revise a commit
107 The ``message`` operation will give you a chance to revise a commit
108 message without changing the contents. It's a shortcut for doing
108 message without changing the contents. It's a shortcut for doing
109 ``edit`` immediately followed by `hg histedit --continue``.
109 ``edit`` immediately followed by `hg histedit --continue``.
110
110
111 If ``histedit`` encounters a conflict when moving a revision (while
111 If ``histedit`` encounters a conflict when moving a revision (while
112 handling ``pick`` or ``fold``), it'll stop in a similar manner to
112 handling ``pick`` or ``fold``), it'll stop in a similar manner to
113 ``edit`` with the difference that it won't prompt you for a commit
113 ``edit`` with the difference that it won't prompt you for a commit
114 message when done. If you decide at this point that you don't like how
114 message when done. If you decide at this point that you don't like how
115 much work it will be to rearrange history, or that you made a mistake,
115 much work it will be to rearrange history, or that you made a mistake,
116 you can use ``hg histedit --abort`` to abandon the new changes you
116 you can use ``hg histedit --abort`` to abandon the new changes you
117 have made and return to the state before you attempted to edit your
117 have made and return to the state before you attempted to edit your
118 history.
118 history.
119
119
120 If we clone the histedit-ed example repository above and add four more
120 If we clone the histedit-ed example repository above and add four more
121 changes, such that we have the following history::
121 changes, such that we have the following history::
122
122
123 @ 6[tip] 038383181893 2009-04-27 18:04 -0500 stefan
123 @ 6[tip] 038383181893 2009-04-27 18:04 -0500 stefan
124 | Add theta
124 | Add theta
125 |
125 |
126 o 5 140988835471 2009-04-27 18:04 -0500 stefan
126 o 5 140988835471 2009-04-27 18:04 -0500 stefan
127 | Add eta
127 | Add eta
128 |
128 |
129 o 4 122930637314 2009-04-27 18:04 -0500 stefan
129 o 4 122930637314 2009-04-27 18:04 -0500 stefan
130 | Add zeta
130 | Add zeta
131 |
131 |
132 o 3 836302820282 2009-04-27 18:04 -0500 stefan
132 o 3 836302820282 2009-04-27 18:04 -0500 stefan
133 | Add epsilon
133 | Add epsilon
134 |
134 |
135 o 2 989b4d060121 2009-04-27 18:04 -0500 durin42
135 o 2 989b4d060121 2009-04-27 18:04 -0500 durin42
136 | Add beta and delta.
136 | Add beta and delta.
137 |
137 |
138 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
138 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
139 | Add gamma
139 | Add gamma
140 |
140 |
141 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
141 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
142 Add alpha
142 Add alpha
143
143
144 If you run ``hg histedit --outgoing`` on the clone then it is the same
144 If you run ``hg histedit --outgoing`` on the clone then it is the same
145 as running ``hg histedit 836302820282``. If you need plan to push to a
145 as running ``hg histedit 836302820282``. If you need plan to push to a
146 repository that Mercurial does not detect to be related to the source
146 repository that Mercurial does not detect to be related to the source
147 repo, you can add a ``--force`` option.
147 repo, you can add a ``--force`` option.
148
148
149 Config
149 Config
150 ------
150 ------
151
151
152 Histedit rule lines are truncated to 80 characters by default. You
152 Histedit rule lines are truncated to 80 characters by default. You
153 can customize this behavior by setting a different length in your
153 can customize this behavior by setting a different length in your
154 configuration file::
154 configuration file::
155
155
156 [histedit]
156 [histedit]
157 linelen = 120 # truncate rule lines at 120 characters
157 linelen = 120 # truncate rule lines at 120 characters
158
158
159 The summary of a change can be customized as well::
159 The summary of a change can be customized as well::
160
160
161 [histedit]
161 [histedit]
162 summary-template = '{rev} {bookmarks} {desc|firstline}'
162 summary-template = '{rev} {bookmarks} {desc|firstline}'
163
163
164 The customized summary should be kept short enough that rule lines
164 The customized summary should be kept short enough that rule lines
165 will fit in the configured line length. See above if that requires
165 will fit in the configured line length. See above if that requires
166 customization.
166 customization.
167
167
168 ``hg histedit`` attempts to automatically choose an appropriate base
168 ``hg histedit`` attempts to automatically choose an appropriate base
169 revision to use. To change which base revision is used, define a
169 revision to use. To change which base revision is used, define a
170 revset in your configuration file::
170 revset in your configuration file::
171
171
172 [histedit]
172 [histedit]
173 defaultrev = only(.) & draft()
173 defaultrev = only(.) & draft()
174
174
175 By default each edited revision needs to be present in histedit commands.
175 By default each edited revision needs to be present in histedit commands.
176 To remove revision you need to use ``drop`` operation. You can configure
176 To remove revision you need to use ``drop`` operation. You can configure
177 the drop to be implicit for missing commits by adding::
177 the drop to be implicit for missing commits by adding::
178
178
179 [histedit]
179 [histedit]
180 dropmissing = True
180 dropmissing = True
181
181
182 By default, histedit will close the transaction after each action. For
182 By default, histedit will close the transaction after each action. For
183 performance purposes, you can configure histedit to use a single transaction
183 performance purposes, you can configure histedit to use a single transaction
184 across the entire histedit. WARNING: This setting introduces a significant risk
184 across the entire histedit. WARNING: This setting introduces a significant risk
185 of losing the work you've done in a histedit if the histedit aborts
185 of losing the work you've done in a histedit if the histedit aborts
186 unexpectedly::
186 unexpectedly::
187
187
188 [histedit]
188 [histedit]
189 singletransaction = True
189 singletransaction = True
190
190
191 """
191 """
192
192
193 from __future__ import absolute_import
193 from __future__ import absolute_import
194
194
195 # chistedit dependencies that are not available everywhere
195 # chistedit dependencies that are not available everywhere
196 try:
196 try:
197 import fcntl
197 import fcntl
198 import termios
198 import termios
199 except ImportError:
199 except ImportError:
200 fcntl = None
200 fcntl = None
201 termios = None
201 termios = None
202
202
203 import functools
203 import functools
204 import os
204 import os
205 import struct
205 import struct
206
206
207 from mercurial.i18n import _
207 from mercurial.i18n import _
208 from mercurial.pycompat import (
208 from mercurial.pycompat import (
209 getattr,
209 getattr,
210 open,
210 open,
211 )
211 )
212 from mercurial.node import (
212 from mercurial.node import (
213 bin,
213 bin,
214 hex,
214 hex,
215 short,
215 short,
216 )
216 )
217 from mercurial import (
217 from mercurial import (
218 bundle2,
218 bundle2,
219 cmdutil,
219 cmdutil,
220 context,
220 context,
221 copies,
221 copies,
222 destutil,
222 destutil,
223 discovery,
223 discovery,
224 encoding,
224 encoding,
225 error,
225 error,
226 exchange,
226 exchange,
227 extensions,
227 extensions,
228 hg,
228 hg,
229 logcmdutil,
229 logcmdutil,
230 merge as mergemod,
230 merge as mergemod,
231 mergestate as mergestatemod,
231 mergestate as mergestatemod,
232 mergeutil,
232 mergeutil,
233 obsolete,
233 obsolete,
234 pycompat,
234 pycompat,
235 registrar,
235 registrar,
236 repair,
236 repair,
237 rewriteutil,
237 rewriteutil,
238 scmutil,
238 scmutil,
239 state as statemod,
239 state as statemod,
240 util,
240 util,
241 )
241 )
242 from mercurial.utils import (
242 from mercurial.utils import (
243 dateutil,
243 dateutil,
244 stringutil,
244 stringutil,
245 urlutil,
245 urlutil,
246 )
246 )
247
247
248 pickle = util.pickle
248 pickle = util.pickle
249 cmdtable = {}
249 cmdtable = {}
250 command = registrar.command(cmdtable)
250 command = registrar.command(cmdtable)
251
251
252 configtable = {}
252 configtable = {}
253 configitem = registrar.configitem(configtable)
253 configitem = registrar.configitem(configtable)
254 configitem(
254 configitem(
255 b'experimental',
255 b'experimental',
256 b'histedit.autoverb',
256 b'histedit.autoverb',
257 default=False,
257 default=False,
258 )
258 )
259 configitem(
259 configitem(
260 b'histedit',
260 b'histedit',
261 b'defaultrev',
261 b'defaultrev',
262 default=None,
262 default=None,
263 )
263 )
264 configitem(
264 configitem(
265 b'histedit',
265 b'histedit',
266 b'dropmissing',
266 b'dropmissing',
267 default=False,
267 default=False,
268 )
268 )
269 configitem(
269 configitem(
270 b'histedit',
270 b'histedit',
271 b'linelen',
271 b'linelen',
272 default=80,
272 default=80,
273 )
273 )
274 configitem(
274 configitem(
275 b'histedit',
275 b'histedit',
276 b'singletransaction',
276 b'singletransaction',
277 default=False,
277 default=False,
278 )
278 )
279 configitem(
279 configitem(
280 b'ui',
280 b'ui',
281 b'interface.histedit',
281 b'interface.histedit',
282 default=None,
282 default=None,
283 )
283 )
284 configitem(b'histedit', b'summary-template', default=b'{rev} {desc|firstline}')
284 configitem(b'histedit', b'summary-template', default=b'{rev} {desc|firstline}')
285 # TODO: Teach the text-based histedit interface to respect this config option
286 # before we make it non-experimental.
287 configitem(
288 b'histedit', b'later-commits-first', default=False, experimental=True
289 )
285
290
286 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
291 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
287 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
292 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
288 # be specifying the version(s) of Mercurial they are tested with, or
293 # be specifying the version(s) of Mercurial they are tested with, or
289 # leave the attribute unspecified.
294 # leave the attribute unspecified.
290 testedwith = b'ships-with-hg-core'
295 testedwith = b'ships-with-hg-core'
291
296
292 actiontable = {}
297 actiontable = {}
293 primaryactions = set()
298 primaryactions = set()
294 secondaryactions = set()
299 secondaryactions = set()
295 tertiaryactions = set()
300 tertiaryactions = set()
296 internalactions = set()
301 internalactions = set()
297
302
298
303
299 def geteditcomment(ui, first, last):
304 def geteditcomment(ui, first, last):
300 """construct the editor comment
305 """construct the editor comment
301 The comment includes::
306 The comment includes::
302 - an intro
307 - an intro
303 - sorted primary commands
308 - sorted primary commands
304 - sorted short commands
309 - sorted short commands
305 - sorted long commands
310 - sorted long commands
306 - additional hints
311 - additional hints
307
312
308 Commands are only included once.
313 Commands are only included once.
309 """
314 """
310 intro = _(
315 intro = _(
311 b"""Edit history between %s and %s
316 b"""Edit history between %s and %s
312
317
313 Commits are listed from least to most recent
318 Commits are listed from least to most recent
314
319
315 You can reorder changesets by reordering the lines
320 You can reorder changesets by reordering the lines
316
321
317 Commands:
322 Commands:
318 """
323 """
319 )
324 )
320 actions = []
325 actions = []
321
326
322 def addverb(v):
327 def addverb(v):
323 a = actiontable[v]
328 a = actiontable[v]
324 lines = a.message.split(b"\n")
329 lines = a.message.split(b"\n")
325 if len(a.verbs):
330 if len(a.verbs):
326 v = b', '.join(sorted(a.verbs, key=lambda v: len(v)))
331 v = b', '.join(sorted(a.verbs, key=lambda v: len(v)))
327 actions.append(b" %s = %s" % (v, lines[0]))
332 actions.append(b" %s = %s" % (v, lines[0]))
328 actions.extend([b' %s'] * (len(lines) - 1))
333 actions.extend([b' %s'] * (len(lines) - 1))
329
334
330 for v in (
335 for v in (
331 sorted(primaryactions)
336 sorted(primaryactions)
332 + sorted(secondaryactions)
337 + sorted(secondaryactions)
333 + sorted(tertiaryactions)
338 + sorted(tertiaryactions)
334 ):
339 ):
335 addverb(v)
340 addverb(v)
336 actions.append(b'')
341 actions.append(b'')
337
342
338 hints = []
343 hints = []
339 if ui.configbool(b'histedit', b'dropmissing'):
344 if ui.configbool(b'histedit', b'dropmissing'):
340 hints.append(
345 hints.append(
341 b"Deleting a changeset from the list "
346 b"Deleting a changeset from the list "
342 b"will DISCARD it from the edited history!"
347 b"will DISCARD it from the edited history!"
343 )
348 )
344
349
345 lines = (intro % (first, last)).split(b'\n') + actions + hints
350 lines = (intro % (first, last)).split(b'\n') + actions + hints
346
351
347 return b''.join([b'# %s\n' % l if l else b'#\n' for l in lines])
352 return b''.join([b'# %s\n' % l if l else b'#\n' for l in lines])
348
353
349
354
350 class histeditstate(object):
355 class histeditstate(object):
351 def __init__(self, repo):
356 def __init__(self, repo):
352 self.repo = repo
357 self.repo = repo
353 self.actions = None
358 self.actions = None
354 self.keep = None
359 self.keep = None
355 self.topmost = None
360 self.topmost = None
356 self.parentctxnode = None
361 self.parentctxnode = None
357 self.lock = None
362 self.lock = None
358 self.wlock = None
363 self.wlock = None
359 self.backupfile = None
364 self.backupfile = None
360 self.stateobj = statemod.cmdstate(repo, b'histedit-state')
365 self.stateobj = statemod.cmdstate(repo, b'histedit-state')
361 self.replacements = []
366 self.replacements = []
362
367
363 def read(self):
368 def read(self):
364 """Load histedit state from disk and set fields appropriately."""
369 """Load histedit state from disk and set fields appropriately."""
365 if not self.stateobj.exists():
370 if not self.stateobj.exists():
366 cmdutil.wrongtooltocontinue(self.repo, _(b'histedit'))
371 cmdutil.wrongtooltocontinue(self.repo, _(b'histedit'))
367
372
368 data = self._read()
373 data = self._read()
369
374
370 self.parentctxnode = data[b'parentctxnode']
375 self.parentctxnode = data[b'parentctxnode']
371 actions = parserules(data[b'rules'], self)
376 actions = parserules(data[b'rules'], self)
372 self.actions = actions
377 self.actions = actions
373 self.keep = data[b'keep']
378 self.keep = data[b'keep']
374 self.topmost = data[b'topmost']
379 self.topmost = data[b'topmost']
375 self.replacements = data[b'replacements']
380 self.replacements = data[b'replacements']
376 self.backupfile = data[b'backupfile']
381 self.backupfile = data[b'backupfile']
377
382
378 def _read(self):
383 def _read(self):
379 fp = self.repo.vfs.read(b'histedit-state')
384 fp = self.repo.vfs.read(b'histedit-state')
380 if fp.startswith(b'v1\n'):
385 if fp.startswith(b'v1\n'):
381 data = self._load()
386 data = self._load()
382 parentctxnode, rules, keep, topmost, replacements, backupfile = data
387 parentctxnode, rules, keep, topmost, replacements, backupfile = data
383 else:
388 else:
384 data = pickle.loads(fp)
389 data = pickle.loads(fp)
385 parentctxnode, rules, keep, topmost, replacements = data
390 parentctxnode, rules, keep, topmost, replacements = data
386 backupfile = None
391 backupfile = None
387 rules = b"\n".join([b"%s %s" % (verb, rest) for [verb, rest] in rules])
392 rules = b"\n".join([b"%s %s" % (verb, rest) for [verb, rest] in rules])
388
393
389 return {
394 return {
390 b'parentctxnode': parentctxnode,
395 b'parentctxnode': parentctxnode,
391 b"rules": rules,
396 b"rules": rules,
392 b"keep": keep,
397 b"keep": keep,
393 b"topmost": topmost,
398 b"topmost": topmost,
394 b"replacements": replacements,
399 b"replacements": replacements,
395 b"backupfile": backupfile,
400 b"backupfile": backupfile,
396 }
401 }
397
402
398 def write(self, tr=None):
403 def write(self, tr=None):
399 if tr:
404 if tr:
400 tr.addfilegenerator(
405 tr.addfilegenerator(
401 b'histedit-state',
406 b'histedit-state',
402 (b'histedit-state',),
407 (b'histedit-state',),
403 self._write,
408 self._write,
404 location=b'plain',
409 location=b'plain',
405 )
410 )
406 else:
411 else:
407 with self.repo.vfs(b"histedit-state", b"w") as f:
412 with self.repo.vfs(b"histedit-state", b"w") as f:
408 self._write(f)
413 self._write(f)
409
414
410 def _write(self, fp):
415 def _write(self, fp):
411 fp.write(b'v1\n')
416 fp.write(b'v1\n')
412 fp.write(b'%s\n' % hex(self.parentctxnode))
417 fp.write(b'%s\n' % hex(self.parentctxnode))
413 fp.write(b'%s\n' % hex(self.topmost))
418 fp.write(b'%s\n' % hex(self.topmost))
414 fp.write(b'%s\n' % (b'True' if self.keep else b'False'))
419 fp.write(b'%s\n' % (b'True' if self.keep else b'False'))
415 fp.write(b'%d\n' % len(self.actions))
420 fp.write(b'%d\n' % len(self.actions))
416 for action in self.actions:
421 for action in self.actions:
417 fp.write(b'%s\n' % action.tostate())
422 fp.write(b'%s\n' % action.tostate())
418 fp.write(b'%d\n' % len(self.replacements))
423 fp.write(b'%d\n' % len(self.replacements))
419 for replacement in self.replacements:
424 for replacement in self.replacements:
420 fp.write(
425 fp.write(
421 b'%s%s\n'
426 b'%s%s\n'
422 % (
427 % (
423 hex(replacement[0]),
428 hex(replacement[0]),
424 b''.join(hex(r) for r in replacement[1]),
429 b''.join(hex(r) for r in replacement[1]),
425 )
430 )
426 )
431 )
427 backupfile = self.backupfile
432 backupfile = self.backupfile
428 if not backupfile:
433 if not backupfile:
429 backupfile = b''
434 backupfile = b''
430 fp.write(b'%s\n' % backupfile)
435 fp.write(b'%s\n' % backupfile)
431
436
432 def _load(self):
437 def _load(self):
433 fp = self.repo.vfs(b'histedit-state', b'r')
438 fp = self.repo.vfs(b'histedit-state', b'r')
434 lines = [l[:-1] for l in fp.readlines()]
439 lines = [l[:-1] for l in fp.readlines()]
435
440
436 index = 0
441 index = 0
437 lines[index] # version number
442 lines[index] # version number
438 index += 1
443 index += 1
439
444
440 parentctxnode = bin(lines[index])
445 parentctxnode = bin(lines[index])
441 index += 1
446 index += 1
442
447
443 topmost = bin(lines[index])
448 topmost = bin(lines[index])
444 index += 1
449 index += 1
445
450
446 keep = lines[index] == b'True'
451 keep = lines[index] == b'True'
447 index += 1
452 index += 1
448
453
449 # Rules
454 # Rules
450 rules = []
455 rules = []
451 rulelen = int(lines[index])
456 rulelen = int(lines[index])
452 index += 1
457 index += 1
453 for i in pycompat.xrange(rulelen):
458 for i in pycompat.xrange(rulelen):
454 ruleaction = lines[index]
459 ruleaction = lines[index]
455 index += 1
460 index += 1
456 rule = lines[index]
461 rule = lines[index]
457 index += 1
462 index += 1
458 rules.append((ruleaction, rule))
463 rules.append((ruleaction, rule))
459
464
460 # Replacements
465 # Replacements
461 replacements = []
466 replacements = []
462 replacementlen = int(lines[index])
467 replacementlen = int(lines[index])
463 index += 1
468 index += 1
464 for i in pycompat.xrange(replacementlen):
469 for i in pycompat.xrange(replacementlen):
465 replacement = lines[index]
470 replacement = lines[index]
466 original = bin(replacement[:40])
471 original = bin(replacement[:40])
467 succ = [
472 succ = [
468 bin(replacement[i : i + 40])
473 bin(replacement[i : i + 40])
469 for i in range(40, len(replacement), 40)
474 for i in range(40, len(replacement), 40)
470 ]
475 ]
471 replacements.append((original, succ))
476 replacements.append((original, succ))
472 index += 1
477 index += 1
473
478
474 backupfile = lines[index]
479 backupfile = lines[index]
475 index += 1
480 index += 1
476
481
477 fp.close()
482 fp.close()
478
483
479 return parentctxnode, rules, keep, topmost, replacements, backupfile
484 return parentctxnode, rules, keep, topmost, replacements, backupfile
480
485
481 def clear(self):
486 def clear(self):
482 if self.inprogress():
487 if self.inprogress():
483 self.repo.vfs.unlink(b'histedit-state')
488 self.repo.vfs.unlink(b'histedit-state')
484
489
485 def inprogress(self):
490 def inprogress(self):
486 return self.repo.vfs.exists(b'histedit-state')
491 return self.repo.vfs.exists(b'histedit-state')
487
492
488
493
489 class histeditaction(object):
494 class histeditaction(object):
490 def __init__(self, state, node):
495 def __init__(self, state, node):
491 self.state = state
496 self.state = state
492 self.repo = state.repo
497 self.repo = state.repo
493 self.node = node
498 self.node = node
494
499
495 @classmethod
500 @classmethod
496 def fromrule(cls, state, rule):
501 def fromrule(cls, state, rule):
497 """Parses the given rule, returning an instance of the histeditaction."""
502 """Parses the given rule, returning an instance of the histeditaction."""
498 ruleid = rule.strip().split(b' ', 1)[0]
503 ruleid = rule.strip().split(b' ', 1)[0]
499 # ruleid can be anything from rev numbers, hashes, "bookmarks" etc
504 # ruleid can be anything from rev numbers, hashes, "bookmarks" etc
500 # Check for validation of rule ids and get the rulehash
505 # Check for validation of rule ids and get the rulehash
501 try:
506 try:
502 rev = bin(ruleid)
507 rev = bin(ruleid)
503 except TypeError:
508 except TypeError:
504 try:
509 try:
505 _ctx = scmutil.revsingle(state.repo, ruleid)
510 _ctx = scmutil.revsingle(state.repo, ruleid)
506 rulehash = _ctx.hex()
511 rulehash = _ctx.hex()
507 rev = bin(rulehash)
512 rev = bin(rulehash)
508 except error.RepoLookupError:
513 except error.RepoLookupError:
509 raise error.ParseError(_(b"invalid changeset %s") % ruleid)
514 raise error.ParseError(_(b"invalid changeset %s") % ruleid)
510 return cls(state, rev)
515 return cls(state, rev)
511
516
512 def verify(self, prev, expected, seen):
517 def verify(self, prev, expected, seen):
513 """Verifies semantic correctness of the rule"""
518 """Verifies semantic correctness of the rule"""
514 repo = self.repo
519 repo = self.repo
515 ha = hex(self.node)
520 ha = hex(self.node)
516 self.node = scmutil.resolvehexnodeidprefix(repo, ha)
521 self.node = scmutil.resolvehexnodeidprefix(repo, ha)
517 if self.node is None:
522 if self.node is None:
518 raise error.ParseError(_(b'unknown changeset %s listed') % ha[:12])
523 raise error.ParseError(_(b'unknown changeset %s listed') % ha[:12])
519 self._verifynodeconstraints(prev, expected, seen)
524 self._verifynodeconstraints(prev, expected, seen)
520
525
521 def _verifynodeconstraints(self, prev, expected, seen):
526 def _verifynodeconstraints(self, prev, expected, seen):
522 # by default command need a node in the edited list
527 # by default command need a node in the edited list
523 if self.node not in expected:
528 if self.node not in expected:
524 raise error.ParseError(
529 raise error.ParseError(
525 _(b'%s "%s" changeset was not a candidate')
530 _(b'%s "%s" changeset was not a candidate')
526 % (self.verb, short(self.node)),
531 % (self.verb, short(self.node)),
527 hint=_(b'only use listed changesets'),
532 hint=_(b'only use listed changesets'),
528 )
533 )
529 # and only one command per node
534 # and only one command per node
530 if self.node in seen:
535 if self.node in seen:
531 raise error.ParseError(
536 raise error.ParseError(
532 _(b'duplicated command for changeset %s') % short(self.node)
537 _(b'duplicated command for changeset %s') % short(self.node)
533 )
538 )
534
539
535 def torule(self):
540 def torule(self):
536 """build a histedit rule line for an action
541 """build a histedit rule line for an action
537
542
538 by default lines are in the form:
543 by default lines are in the form:
539 <hash> <rev> <summary>
544 <hash> <rev> <summary>
540 """
545 """
541 ctx = self.repo[self.node]
546 ctx = self.repo[self.node]
542 ui = self.repo.ui
547 ui = self.repo.ui
543 # We don't want color codes in the commit message template, so
548 # We don't want color codes in the commit message template, so
544 # disable the label() template function while we render it.
549 # disable the label() template function while we render it.
545 with ui.configoverride(
550 with ui.configoverride(
546 {(b'templatealias', b'label(l,x)'): b"x"}, b'histedit'
551 {(b'templatealias', b'label(l,x)'): b"x"}, b'histedit'
547 ):
552 ):
548 summary = cmdutil.rendertemplate(
553 summary = cmdutil.rendertemplate(
549 ctx, ui.config(b'histedit', b'summary-template')
554 ctx, ui.config(b'histedit', b'summary-template')
550 )
555 )
551 # Handle the fact that `''.splitlines() => []`
556 # Handle the fact that `''.splitlines() => []`
552 summary = summary.splitlines()[0] if summary else b''
557 summary = summary.splitlines()[0] if summary else b''
553 line = b'%s %s %s' % (self.verb, ctx, summary)
558 line = b'%s %s %s' % (self.verb, ctx, summary)
554 # trim to 75 columns by default so it's not stupidly wide in my editor
559 # trim to 75 columns by default so it's not stupidly wide in my editor
555 # (the 5 more are left for verb)
560 # (the 5 more are left for verb)
556 maxlen = self.repo.ui.configint(b'histedit', b'linelen')
561 maxlen = self.repo.ui.configint(b'histedit', b'linelen')
557 maxlen = max(maxlen, 22) # avoid truncating hash
562 maxlen = max(maxlen, 22) # avoid truncating hash
558 return stringutil.ellipsis(line, maxlen)
563 return stringutil.ellipsis(line, maxlen)
559
564
560 def tostate(self):
565 def tostate(self):
561 """Print an action in format used by histedit state files
566 """Print an action in format used by histedit state files
562 (the first line is a verb, the remainder is the second)
567 (the first line is a verb, the remainder is the second)
563 """
568 """
564 return b"%s\n%s" % (self.verb, hex(self.node))
569 return b"%s\n%s" % (self.verb, hex(self.node))
565
570
566 def run(self):
571 def run(self):
567 """Runs the action. The default behavior is simply apply the action's
572 """Runs the action. The default behavior is simply apply the action's
568 rulectx onto the current parentctx."""
573 rulectx onto the current parentctx."""
569 self.applychange()
574 self.applychange()
570 self.continuedirty()
575 self.continuedirty()
571 return self.continueclean()
576 return self.continueclean()
572
577
573 def applychange(self):
578 def applychange(self):
574 """Applies the changes from this action's rulectx onto the current
579 """Applies the changes from this action's rulectx onto the current
575 parentctx, but does not commit them."""
580 parentctx, but does not commit them."""
576 repo = self.repo
581 repo = self.repo
577 rulectx = repo[self.node]
582 rulectx = repo[self.node]
578 with repo.ui.silent():
583 with repo.ui.silent():
579 hg.update(repo, self.state.parentctxnode, quietempty=True)
584 hg.update(repo, self.state.parentctxnode, quietempty=True)
580 stats = applychanges(repo.ui, repo, rulectx, {})
585 stats = applychanges(repo.ui, repo, rulectx, {})
581 repo.dirstate.setbranch(rulectx.branch())
586 repo.dirstate.setbranch(rulectx.branch())
582 if stats.unresolvedcount:
587 if stats.unresolvedcount:
583 raise error.InterventionRequired(
588 raise error.InterventionRequired(
584 _(b'Fix up the change (%s %s)') % (self.verb, short(self.node)),
589 _(b'Fix up the change (%s %s)') % (self.verb, short(self.node)),
585 hint=_(b'hg histedit --continue to resume'),
590 hint=_(b'hg histedit --continue to resume'),
586 )
591 )
587
592
588 def continuedirty(self):
593 def continuedirty(self):
589 """Continues the action when changes have been applied to the working
594 """Continues the action when changes have been applied to the working
590 copy. The default behavior is to commit the dirty changes."""
595 copy. The default behavior is to commit the dirty changes."""
591 repo = self.repo
596 repo = self.repo
592 rulectx = repo[self.node]
597 rulectx = repo[self.node]
593
598
594 editor = self.commiteditor()
599 editor = self.commiteditor()
595 commit = commitfuncfor(repo, rulectx)
600 commit = commitfuncfor(repo, rulectx)
596 if repo.ui.configbool(b'rewrite', b'update-timestamp'):
601 if repo.ui.configbool(b'rewrite', b'update-timestamp'):
597 date = dateutil.makedate()
602 date = dateutil.makedate()
598 else:
603 else:
599 date = rulectx.date()
604 date = rulectx.date()
600 commit(
605 commit(
601 text=rulectx.description(),
606 text=rulectx.description(),
602 user=rulectx.user(),
607 user=rulectx.user(),
603 date=date,
608 date=date,
604 extra=rulectx.extra(),
609 extra=rulectx.extra(),
605 editor=editor,
610 editor=editor,
606 )
611 )
607
612
608 def commiteditor(self):
613 def commiteditor(self):
609 """The editor to be used to edit the commit message."""
614 """The editor to be used to edit the commit message."""
610 return False
615 return False
611
616
612 def continueclean(self):
617 def continueclean(self):
613 """Continues the action when the working copy is clean. The default
618 """Continues the action when the working copy is clean. The default
614 behavior is to accept the current commit as the new version of the
619 behavior is to accept the current commit as the new version of the
615 rulectx."""
620 rulectx."""
616 ctx = self.repo[b'.']
621 ctx = self.repo[b'.']
617 if ctx.node() == self.state.parentctxnode:
622 if ctx.node() == self.state.parentctxnode:
618 self.repo.ui.warn(
623 self.repo.ui.warn(
619 _(b'%s: skipping changeset (no changes)\n') % short(self.node)
624 _(b'%s: skipping changeset (no changes)\n') % short(self.node)
620 )
625 )
621 return ctx, [(self.node, tuple())]
626 return ctx, [(self.node, tuple())]
622 if ctx.node() == self.node:
627 if ctx.node() == self.node:
623 # Nothing changed
628 # Nothing changed
624 return ctx, []
629 return ctx, []
625 return ctx, [(self.node, (ctx.node(),))]
630 return ctx, [(self.node, (ctx.node(),))]
626
631
627
632
628 def commitfuncfor(repo, src):
633 def commitfuncfor(repo, src):
629 """Build a commit function for the replacement of <src>
634 """Build a commit function for the replacement of <src>
630
635
631 This function ensure we apply the same treatment to all changesets.
636 This function ensure we apply the same treatment to all changesets.
632
637
633 - Add a 'histedit_source' entry in extra.
638 - Add a 'histedit_source' entry in extra.
634
639
635 Note that fold has its own separated logic because its handling is a bit
640 Note that fold has its own separated logic because its handling is a bit
636 different and not easily factored out of the fold method.
641 different and not easily factored out of the fold method.
637 """
642 """
638 phasemin = src.phase()
643 phasemin = src.phase()
639
644
640 def commitfunc(**kwargs):
645 def commitfunc(**kwargs):
641 overrides = {(b'phases', b'new-commit'): phasemin}
646 overrides = {(b'phases', b'new-commit'): phasemin}
642 with repo.ui.configoverride(overrides, b'histedit'):
647 with repo.ui.configoverride(overrides, b'histedit'):
643 extra = kwargs.get('extra', {}).copy()
648 extra = kwargs.get('extra', {}).copy()
644 extra[b'histedit_source'] = src.hex()
649 extra[b'histedit_source'] = src.hex()
645 kwargs['extra'] = extra
650 kwargs['extra'] = extra
646 return repo.commit(**kwargs)
651 return repo.commit(**kwargs)
647
652
648 return commitfunc
653 return commitfunc
649
654
650
655
651 def applychanges(ui, repo, ctx, opts):
656 def applychanges(ui, repo, ctx, opts):
652 """Merge changeset from ctx (only) in the current working directory"""
657 """Merge changeset from ctx (only) in the current working directory"""
653 if ctx.p1().node() == repo.dirstate.p1():
658 if ctx.p1().node() == repo.dirstate.p1():
654 # edits are "in place" we do not need to make any merge,
659 # edits are "in place" we do not need to make any merge,
655 # just applies changes on parent for editing
660 # just applies changes on parent for editing
656 with ui.silent():
661 with ui.silent():
657 cmdutil.revert(ui, repo, ctx, all=True)
662 cmdutil.revert(ui, repo, ctx, all=True)
658 stats = mergemod.updateresult(0, 0, 0, 0)
663 stats = mergemod.updateresult(0, 0, 0, 0)
659 else:
664 else:
660 try:
665 try:
661 # ui.forcemerge is an internal variable, do not document
666 # ui.forcemerge is an internal variable, do not document
662 repo.ui.setconfig(
667 repo.ui.setconfig(
663 b'ui', b'forcemerge', opts.get(b'tool', b''), b'histedit'
668 b'ui', b'forcemerge', opts.get(b'tool', b''), b'histedit'
664 )
669 )
665 stats = mergemod.graft(repo, ctx, labels=[b'local', b'histedit'])
670 stats = mergemod.graft(repo, ctx, labels=[b'local', b'histedit'])
666 finally:
671 finally:
667 repo.ui.setconfig(b'ui', b'forcemerge', b'', b'histedit')
672 repo.ui.setconfig(b'ui', b'forcemerge', b'', b'histedit')
668 return stats
673 return stats
669
674
670
675
671 def collapse(repo, firstctx, lastctx, commitopts, skipprompt=False):
676 def collapse(repo, firstctx, lastctx, commitopts, skipprompt=False):
672 """collapse the set of revisions from first to last as new one.
677 """collapse the set of revisions from first to last as new one.
673
678
674 Expected commit options are:
679 Expected commit options are:
675 - message
680 - message
676 - date
681 - date
677 - username
682 - username
678 Commit message is edited in all cases.
683 Commit message is edited in all cases.
679
684
680 This function works in memory."""
685 This function works in memory."""
681 ctxs = list(repo.set(b'%d::%d', firstctx.rev(), lastctx.rev()))
686 ctxs = list(repo.set(b'%d::%d', firstctx.rev(), lastctx.rev()))
682 if not ctxs:
687 if not ctxs:
683 return None
688 return None
684 for c in ctxs:
689 for c in ctxs:
685 if not c.mutable():
690 if not c.mutable():
686 raise error.ParseError(
691 raise error.ParseError(
687 _(b"cannot fold into public change %s") % short(c.node())
692 _(b"cannot fold into public change %s") % short(c.node())
688 )
693 )
689 base = firstctx.p1()
694 base = firstctx.p1()
690
695
691 # commit a new version of the old changeset, including the update
696 # commit a new version of the old changeset, including the update
692 # collect all files which might be affected
697 # collect all files which might be affected
693 files = set()
698 files = set()
694 for ctx in ctxs:
699 for ctx in ctxs:
695 files.update(ctx.files())
700 files.update(ctx.files())
696
701
697 # Recompute copies (avoid recording a -> b -> a)
702 # Recompute copies (avoid recording a -> b -> a)
698 copied = copies.pathcopies(base, lastctx)
703 copied = copies.pathcopies(base, lastctx)
699
704
700 # prune files which were reverted by the updates
705 # prune files which were reverted by the updates
701 files = [f for f in files if not cmdutil.samefile(f, lastctx, base)]
706 files = [f for f in files if not cmdutil.samefile(f, lastctx, base)]
702 # commit version of these files as defined by head
707 # commit version of these files as defined by head
703 headmf = lastctx.manifest()
708 headmf = lastctx.manifest()
704
709
705 def filectxfn(repo, ctx, path):
710 def filectxfn(repo, ctx, path):
706 if path in headmf:
711 if path in headmf:
707 fctx = lastctx[path]
712 fctx = lastctx[path]
708 flags = fctx.flags()
713 flags = fctx.flags()
709 mctx = context.memfilectx(
714 mctx = context.memfilectx(
710 repo,
715 repo,
711 ctx,
716 ctx,
712 fctx.path(),
717 fctx.path(),
713 fctx.data(),
718 fctx.data(),
714 islink=b'l' in flags,
719 islink=b'l' in flags,
715 isexec=b'x' in flags,
720 isexec=b'x' in flags,
716 copysource=copied.get(path),
721 copysource=copied.get(path),
717 )
722 )
718 return mctx
723 return mctx
719 return None
724 return None
720
725
721 if commitopts.get(b'message'):
726 if commitopts.get(b'message'):
722 message = commitopts[b'message']
727 message = commitopts[b'message']
723 else:
728 else:
724 message = firstctx.description()
729 message = firstctx.description()
725 user = commitopts.get(b'user')
730 user = commitopts.get(b'user')
726 date = commitopts.get(b'date')
731 date = commitopts.get(b'date')
727 extra = commitopts.get(b'extra')
732 extra = commitopts.get(b'extra')
728
733
729 parents = (firstctx.p1().node(), firstctx.p2().node())
734 parents = (firstctx.p1().node(), firstctx.p2().node())
730 editor = None
735 editor = None
731 if not skipprompt:
736 if not skipprompt:
732 editor = cmdutil.getcommiteditor(edit=True, editform=b'histedit.fold')
737 editor = cmdutil.getcommiteditor(edit=True, editform=b'histedit.fold')
733 new = context.memctx(
738 new = context.memctx(
734 repo,
739 repo,
735 parents=parents,
740 parents=parents,
736 text=message,
741 text=message,
737 files=files,
742 files=files,
738 filectxfn=filectxfn,
743 filectxfn=filectxfn,
739 user=user,
744 user=user,
740 date=date,
745 date=date,
741 extra=extra,
746 extra=extra,
742 editor=editor,
747 editor=editor,
743 )
748 )
744 return repo.commitctx(new)
749 return repo.commitctx(new)
745
750
746
751
747 def _isdirtywc(repo):
752 def _isdirtywc(repo):
748 return repo[None].dirty(missing=True)
753 return repo[None].dirty(missing=True)
749
754
750
755
751 def abortdirty():
756 def abortdirty():
752 raise error.Abort(
757 raise error.StateError(
753 _(b'working copy has pending changes'),
758 _(b'working copy has pending changes'),
754 hint=_(
759 hint=_(
755 b'amend, commit, or revert them and run histedit '
760 b'amend, commit, or revert them and run histedit '
756 b'--continue, or abort with histedit --abort'
761 b'--continue, or abort with histedit --abort'
757 ),
762 ),
758 )
763 )
759
764
760
765
761 def action(verbs, message, priority=False, internal=False):
766 def action(verbs, message, priority=False, internal=False):
762 def wrap(cls):
767 def wrap(cls):
763 assert not priority or not internal
768 assert not priority or not internal
764 verb = verbs[0]
769 verb = verbs[0]
765 if priority:
770 if priority:
766 primaryactions.add(verb)
771 primaryactions.add(verb)
767 elif internal:
772 elif internal:
768 internalactions.add(verb)
773 internalactions.add(verb)
769 elif len(verbs) > 1:
774 elif len(verbs) > 1:
770 secondaryactions.add(verb)
775 secondaryactions.add(verb)
771 else:
776 else:
772 tertiaryactions.add(verb)
777 tertiaryactions.add(verb)
773
778
774 cls.verb = verb
779 cls.verb = verb
775 cls.verbs = verbs
780 cls.verbs = verbs
776 cls.message = message
781 cls.message = message
777 for verb in verbs:
782 for verb in verbs:
778 actiontable[verb] = cls
783 actiontable[verb] = cls
779 return cls
784 return cls
780
785
781 return wrap
786 return wrap
782
787
783
788
784 @action([b'pick', b'p'], _(b'use commit'), priority=True)
789 @action([b'pick', b'p'], _(b'use commit'), priority=True)
785 class pick(histeditaction):
790 class pick(histeditaction):
786 def run(self):
791 def run(self):
787 rulectx = self.repo[self.node]
792 rulectx = self.repo[self.node]
788 if rulectx.p1().node() == self.state.parentctxnode:
793 if rulectx.p1().node() == self.state.parentctxnode:
789 self.repo.ui.debug(b'node %s unchanged\n' % short(self.node))
794 self.repo.ui.debug(b'node %s unchanged\n' % short(self.node))
790 return rulectx, []
795 return rulectx, []
791
796
792 return super(pick, self).run()
797 return super(pick, self).run()
793
798
794
799
795 @action(
800 @action(
796 [b'edit', b'e'],
801 [b'edit', b'e'],
797 _(b'use commit, but allow edits before making new commit'),
802 _(b'use commit, but allow edits before making new commit'),
798 priority=True,
803 priority=True,
799 )
804 )
800 class edit(histeditaction):
805 class edit(histeditaction):
801 def run(self):
806 def run(self):
802 repo = self.repo
807 repo = self.repo
803 rulectx = repo[self.node]
808 rulectx = repo[self.node]
804 hg.update(repo, self.state.parentctxnode, quietempty=True)
809 hg.update(repo, self.state.parentctxnode, quietempty=True)
805 applychanges(repo.ui, repo, rulectx, {})
810 applychanges(repo.ui, repo, rulectx, {})
806 hint = _(b'to edit %s, `hg histedit --continue` after making changes')
811 hint = _(b'to edit %s, `hg histedit --continue` after making changes')
807 raise error.InterventionRequired(
812 raise error.InterventionRequired(
808 _(b'Editing (%s), commit as needed now to split the change')
813 _(b'Editing (%s), commit as needed now to split the change')
809 % short(self.node),
814 % short(self.node),
810 hint=hint % short(self.node),
815 hint=hint % short(self.node),
811 )
816 )
812
817
813 def commiteditor(self):
818 def commiteditor(self):
814 return cmdutil.getcommiteditor(edit=True, editform=b'histedit.edit')
819 return cmdutil.getcommiteditor(edit=True, editform=b'histedit.edit')
815
820
816
821
817 @action([b'fold', b'f'], _(b'use commit, but combine it with the one above'))
822 @action([b'fold', b'f'], _(b'use commit, but combine it with the one above'))
818 class fold(histeditaction):
823 class fold(histeditaction):
819 def verify(self, prev, expected, seen):
824 def verify(self, prev, expected, seen):
820 """Verifies semantic correctness of the fold rule"""
825 """Verifies semantic correctness of the fold rule"""
821 super(fold, self).verify(prev, expected, seen)
826 super(fold, self).verify(prev, expected, seen)
822 repo = self.repo
827 repo = self.repo
823 if not prev:
828 if not prev:
824 c = repo[self.node].p1()
829 c = repo[self.node].p1()
825 elif not prev.verb in (b'pick', b'base'):
830 elif not prev.verb in (b'pick', b'base'):
826 return
831 return
827 else:
832 else:
828 c = repo[prev.node]
833 c = repo[prev.node]
829 if not c.mutable():
834 if not c.mutable():
830 raise error.ParseError(
835 raise error.ParseError(
831 _(b"cannot fold into public change %s") % short(c.node())
836 _(b"cannot fold into public change %s") % short(c.node())
832 )
837 )
833
838
834 def continuedirty(self):
839 def continuedirty(self):
835 repo = self.repo
840 repo = self.repo
836 rulectx = repo[self.node]
841 rulectx = repo[self.node]
837
842
838 commit = commitfuncfor(repo, rulectx)
843 commit = commitfuncfor(repo, rulectx)
839 commit(
844 commit(
840 text=b'fold-temp-revision %s' % short(self.node),
845 text=b'fold-temp-revision %s' % short(self.node),
841 user=rulectx.user(),
846 user=rulectx.user(),
842 date=rulectx.date(),
847 date=rulectx.date(),
843 extra=rulectx.extra(),
848 extra=rulectx.extra(),
844 )
849 )
845
850
846 def continueclean(self):
851 def continueclean(self):
847 repo = self.repo
852 repo = self.repo
848 ctx = repo[b'.']
853 ctx = repo[b'.']
849 rulectx = repo[self.node]
854 rulectx = repo[self.node]
850 parentctxnode = self.state.parentctxnode
855 parentctxnode = self.state.parentctxnode
851 if ctx.node() == parentctxnode:
856 if ctx.node() == parentctxnode:
852 repo.ui.warn(_(b'%s: empty changeset\n') % short(self.node))
857 repo.ui.warn(_(b'%s: empty changeset\n') % short(self.node))
853 return ctx, [(self.node, (parentctxnode,))]
858 return ctx, [(self.node, (parentctxnode,))]
854
859
855 parentctx = repo[parentctxnode]
860 parentctx = repo[parentctxnode]
856 newcommits = {
861 newcommits = {
857 c.node()
862 c.node()
858 for c in repo.set(b'(%d::. - %d)', parentctx.rev(), parentctx.rev())
863 for c in repo.set(b'(%d::. - %d)', parentctx.rev(), parentctx.rev())
859 }
864 }
860 if not newcommits:
865 if not newcommits:
861 repo.ui.warn(
866 repo.ui.warn(
862 _(
867 _(
863 b'%s: cannot fold - working copy is not a '
868 b'%s: cannot fold - working copy is not a '
864 b'descendant of previous commit %s\n'
869 b'descendant of previous commit %s\n'
865 )
870 )
866 % (short(self.node), short(parentctxnode))
871 % (short(self.node), short(parentctxnode))
867 )
872 )
868 return ctx, [(self.node, (ctx.node(),))]
873 return ctx, [(self.node, (ctx.node(),))]
869
874
870 middlecommits = newcommits.copy()
875 middlecommits = newcommits.copy()
871 middlecommits.discard(ctx.node())
876 middlecommits.discard(ctx.node())
872
877
873 return self.finishfold(
878 return self.finishfold(
874 repo.ui, repo, parentctx, rulectx, ctx.node(), middlecommits
879 repo.ui, repo, parentctx, rulectx, ctx.node(), middlecommits
875 )
880 )
876
881
877 def skipprompt(self):
882 def skipprompt(self):
878 """Returns true if the rule should skip the message editor.
883 """Returns true if the rule should skip the message editor.
879
884
880 For example, 'fold' wants to show an editor, but 'rollup'
885 For example, 'fold' wants to show an editor, but 'rollup'
881 doesn't want to.
886 doesn't want to.
882 """
887 """
883 return False
888 return False
884
889
885 def mergedescs(self):
890 def mergedescs(self):
886 """Returns true if the rule should merge messages of multiple changes.
891 """Returns true if the rule should merge messages of multiple changes.
887
892
888 This exists mainly so that 'rollup' rules can be a subclass of
893 This exists mainly so that 'rollup' rules can be a subclass of
889 'fold'.
894 'fold'.
890 """
895 """
891 return True
896 return True
892
897
893 def firstdate(self):
898 def firstdate(self):
894 """Returns true if the rule should preserve the date of the first
899 """Returns true if the rule should preserve the date of the first
895 change.
900 change.
896
901
897 This exists mainly so that 'rollup' rules can be a subclass of
902 This exists mainly so that 'rollup' rules can be a subclass of
898 'fold'.
903 'fold'.
899 """
904 """
900 return False
905 return False
901
906
902 def finishfold(self, ui, repo, ctx, oldctx, newnode, internalchanges):
907 def finishfold(self, ui, repo, ctx, oldctx, newnode, internalchanges):
903 mergemod.update(ctx.p1())
908 mergemod.update(ctx.p1())
904 ### prepare new commit data
909 ### prepare new commit data
905 commitopts = {}
910 commitopts = {}
906 commitopts[b'user'] = ctx.user()
911 commitopts[b'user'] = ctx.user()
907 # commit message
912 # commit message
908 if not self.mergedescs():
913 if not self.mergedescs():
909 newmessage = ctx.description()
914 newmessage = ctx.description()
910 else:
915 else:
911 newmessage = (
916 newmessage = (
912 b'\n***\n'.join(
917 b'\n***\n'.join(
913 [ctx.description()]
918 [ctx.description()]
914 + [repo[r].description() for r in internalchanges]
919 + [repo[r].description() for r in internalchanges]
915 + [oldctx.description()]
920 + [oldctx.description()]
916 )
921 )
917 + b'\n'
922 + b'\n'
918 )
923 )
919 commitopts[b'message'] = newmessage
924 commitopts[b'message'] = newmessage
920 # date
925 # date
921 if self.firstdate():
926 if self.firstdate():
922 commitopts[b'date'] = ctx.date()
927 commitopts[b'date'] = ctx.date()
923 else:
928 else:
924 commitopts[b'date'] = max(ctx.date(), oldctx.date())
929 commitopts[b'date'] = max(ctx.date(), oldctx.date())
925 # if date is to be updated to current
930 # if date is to be updated to current
926 if ui.configbool(b'rewrite', b'update-timestamp'):
931 if ui.configbool(b'rewrite', b'update-timestamp'):
927 commitopts[b'date'] = dateutil.makedate()
932 commitopts[b'date'] = dateutil.makedate()
928
933
929 extra = ctx.extra().copy()
934 extra = ctx.extra().copy()
930 # histedit_source
935 # histedit_source
931 # note: ctx is likely a temporary commit but that the best we can do
936 # note: ctx is likely a temporary commit but that the best we can do
932 # here. This is sufficient to solve issue3681 anyway.
937 # here. This is sufficient to solve issue3681 anyway.
933 extra[b'histedit_source'] = b'%s,%s' % (ctx.hex(), oldctx.hex())
938 extra[b'histedit_source'] = b'%s,%s' % (ctx.hex(), oldctx.hex())
934 commitopts[b'extra'] = extra
939 commitopts[b'extra'] = extra
935 phasemin = max(ctx.phase(), oldctx.phase())
940 phasemin = max(ctx.phase(), oldctx.phase())
936 overrides = {(b'phases', b'new-commit'): phasemin}
941 overrides = {(b'phases', b'new-commit'): phasemin}
937 with repo.ui.configoverride(overrides, b'histedit'):
942 with repo.ui.configoverride(overrides, b'histedit'):
938 n = collapse(
943 n = collapse(
939 repo,
944 repo,
940 ctx,
945 ctx,
941 repo[newnode],
946 repo[newnode],
942 commitopts,
947 commitopts,
943 skipprompt=self.skipprompt(),
948 skipprompt=self.skipprompt(),
944 )
949 )
945 if n is None:
950 if n is None:
946 return ctx, []
951 return ctx, []
947 mergemod.update(repo[n])
952 mergemod.update(repo[n])
948 replacements = [
953 replacements = [
949 (oldctx.node(), (newnode,)),
954 (oldctx.node(), (newnode,)),
950 (ctx.node(), (n,)),
955 (ctx.node(), (n,)),
951 (newnode, (n,)),
956 (newnode, (n,)),
952 ]
957 ]
953 for ich in internalchanges:
958 for ich in internalchanges:
954 replacements.append((ich, (n,)))
959 replacements.append((ich, (n,)))
955 return repo[n], replacements
960 return repo[n], replacements
956
961
957
962
958 @action(
963 @action(
959 [b'base', b'b'],
964 [b'base', b'b'],
960 _(b'checkout changeset and apply further changesets from there'),
965 _(b'checkout changeset and apply further changesets from there'),
961 )
966 )
962 class base(histeditaction):
967 class base(histeditaction):
963 def run(self):
968 def run(self):
964 if self.repo[b'.'].node() != self.node:
969 if self.repo[b'.'].node() != self.node:
965 mergemod.clean_update(self.repo[self.node])
970 mergemod.clean_update(self.repo[self.node])
966 return self.continueclean()
971 return self.continueclean()
967
972
968 def continuedirty(self):
973 def continuedirty(self):
969 abortdirty()
974 abortdirty()
970
975
971 def continueclean(self):
976 def continueclean(self):
972 basectx = self.repo[b'.']
977 basectx = self.repo[b'.']
973 return basectx, []
978 return basectx, []
974
979
975 def _verifynodeconstraints(self, prev, expected, seen):
980 def _verifynodeconstraints(self, prev, expected, seen):
976 # base can only be use with a node not in the edited set
981 # base can only be use with a node not in the edited set
977 if self.node in expected:
982 if self.node in expected:
978 msg = _(b'%s "%s" changeset was an edited list candidate')
983 msg = _(b'%s "%s" changeset was an edited list candidate')
979 raise error.ParseError(
984 raise error.ParseError(
980 msg % (self.verb, short(self.node)),
985 msg % (self.verb, short(self.node)),
981 hint=_(b'base must only use unlisted changesets'),
986 hint=_(b'base must only use unlisted changesets'),
982 )
987 )
983
988
984
989
985 @action(
990 @action(
986 [b'_multifold'],
991 [b'_multifold'],
987 _(
992 _(
988 """fold subclass used for when multiple folds happen in a row
993 """fold subclass used for when multiple folds happen in a row
989
994
990 We only want to fire the editor for the folded message once when
995 We only want to fire the editor for the folded message once when
991 (say) four changes are folded down into a single change. This is
996 (say) four changes are folded down into a single change. This is
992 similar to rollup, but we should preserve both messages so that
997 similar to rollup, but we should preserve both messages so that
993 when the last fold operation runs we can show the user all the
998 when the last fold operation runs we can show the user all the
994 commit messages in their editor.
999 commit messages in their editor.
995 """
1000 """
996 ),
1001 ),
997 internal=True,
1002 internal=True,
998 )
1003 )
999 class _multifold(fold):
1004 class _multifold(fold):
1000 def skipprompt(self):
1005 def skipprompt(self):
1001 return True
1006 return True
1002
1007
1003
1008
1004 @action(
1009 @action(
1005 [b"roll", b"r"],
1010 [b"roll", b"r"],
1006 _(b"like fold, but discard this commit's description and date"),
1011 _(b"like fold, but discard this commit's description and date"),
1007 )
1012 )
1008 class rollup(fold):
1013 class rollup(fold):
1009 def mergedescs(self):
1014 def mergedescs(self):
1010 return False
1015 return False
1011
1016
1012 def skipprompt(self):
1017 def skipprompt(self):
1013 return True
1018 return True
1014
1019
1015 def firstdate(self):
1020 def firstdate(self):
1016 return True
1021 return True
1017
1022
1018
1023
1019 @action([b"drop", b"d"], _(b'remove commit from history'))
1024 @action([b"drop", b"d"], _(b'remove commit from history'))
1020 class drop(histeditaction):
1025 class drop(histeditaction):
1021 def run(self):
1026 def run(self):
1022 parentctx = self.repo[self.state.parentctxnode]
1027 parentctx = self.repo[self.state.parentctxnode]
1023 return parentctx, [(self.node, tuple())]
1028 return parentctx, [(self.node, tuple())]
1024
1029
1025
1030
1026 @action(
1031 @action(
1027 [b"mess", b"m"],
1032 [b"mess", b"m"],
1028 _(b'edit commit message without changing commit content'),
1033 _(b'edit commit message without changing commit content'),
1029 priority=True,
1034 priority=True,
1030 )
1035 )
1031 class message(histeditaction):
1036 class message(histeditaction):
1032 def commiteditor(self):
1037 def commiteditor(self):
1033 return cmdutil.getcommiteditor(edit=True, editform=b'histedit.mess')
1038 return cmdutil.getcommiteditor(edit=True, editform=b'histedit.mess')
1034
1039
1035
1040
1036 def findoutgoing(ui, repo, remote=None, force=False, opts=None):
1041 def findoutgoing(ui, repo, remote=None, force=False, opts=None):
1037 """utility function to find the first outgoing changeset
1042 """utility function to find the first outgoing changeset
1038
1043
1039 Used by initialization code"""
1044 Used by initialization code"""
1040 if opts is None:
1045 if opts is None:
1041 opts = {}
1046 opts = {}
1042 path = urlutil.get_unique_push_path(b'histedit', repo, ui, remote)
1047 path = urlutil.get_unique_push_path(b'histedit', repo, ui, remote)
1043 dest = path.pushloc or path.loc
1048 dest = path.pushloc or path.loc
1044
1049
1045 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1050 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(dest))
1046
1051
1047 revs, checkout = hg.addbranchrevs(repo, repo, (path.branch, []), None)
1052 revs, checkout = hg.addbranchrevs(repo, repo, (path.branch, []), None)
1048 other = hg.peer(repo, opts, dest)
1053 other = hg.peer(repo, opts, dest)
1049
1054
1050 if revs:
1055 if revs:
1051 revs = [repo.lookup(rev) for rev in revs]
1056 revs = [repo.lookup(rev) for rev in revs]
1052
1057
1053 outgoing = discovery.findcommonoutgoing(repo, other, revs, force=force)
1058 outgoing = discovery.findcommonoutgoing(repo, other, revs, force=force)
1054 if not outgoing.missing:
1059 if not outgoing.missing:
1055 raise error.Abort(_(b'no outgoing ancestors'))
1060 raise error.StateError(_(b'no outgoing ancestors'))
1056 roots = list(repo.revs(b"roots(%ln)", outgoing.missing))
1061 roots = list(repo.revs(b"roots(%ln)", outgoing.missing))
1057 if len(roots) > 1:
1062 if len(roots) > 1:
1058 msg = _(b'there are ambiguous outgoing revisions')
1063 msg = _(b'there are ambiguous outgoing revisions')
1059 hint = _(b"see 'hg help histedit' for more detail")
1064 hint = _(b"see 'hg help histedit' for more detail")
1060 raise error.Abort(msg, hint=hint)
1065 raise error.StateError(msg, hint=hint)
1061 return repo[roots[0]].node()
1066 return repo[roots[0]].node()
1062
1067
1063
1068
1064 # Curses Support
1069 # Curses Support
1065 try:
1070 try:
1066 import curses
1071 import curses
1067 except ImportError:
1072 except ImportError:
1068 curses = None
1073 curses = None
1069
1074
1070 KEY_LIST = [b'pick', b'edit', b'fold', b'drop', b'mess', b'roll']
1075 KEY_LIST = [b'pick', b'edit', b'fold', b'drop', b'mess', b'roll']
1071 ACTION_LABELS = {
1076 ACTION_LABELS = {
1072 b'fold': b'^fold',
1077 b'fold': b'^fold',
1073 b'roll': b'^roll',
1078 b'roll': b'^roll',
1074 }
1079 }
1075
1080
1076 COLOR_HELP, COLOR_SELECTED, COLOR_OK, COLOR_WARN, COLOR_CURRENT = 1, 2, 3, 4, 5
1081 COLOR_HELP, COLOR_SELECTED, COLOR_OK, COLOR_WARN, COLOR_CURRENT = 1, 2, 3, 4, 5
1077 COLOR_DIFF_ADD_LINE, COLOR_DIFF_DEL_LINE, COLOR_DIFF_OFFSET = 6, 7, 8
1082 COLOR_DIFF_ADD_LINE, COLOR_DIFF_DEL_LINE, COLOR_DIFF_OFFSET = 6, 7, 8
1078 COLOR_ROLL, COLOR_ROLL_CURRENT, COLOR_ROLL_SELECTED = 9, 10, 11
1083 COLOR_ROLL, COLOR_ROLL_CURRENT, COLOR_ROLL_SELECTED = 9, 10, 11
1079
1084
1080 E_QUIT, E_HISTEDIT = 1, 2
1085 E_QUIT, E_HISTEDIT = 1, 2
1081 E_PAGEDOWN, E_PAGEUP, E_LINEUP, E_LINEDOWN, E_RESIZE = 3, 4, 5, 6, 7
1086 E_PAGEDOWN, E_PAGEUP, E_LINEUP, E_LINEDOWN, E_RESIZE = 3, 4, 5, 6, 7
1082 MODE_INIT, MODE_PATCH, MODE_RULES, MODE_HELP = 0, 1, 2, 3
1087 MODE_INIT, MODE_PATCH, MODE_RULES, MODE_HELP = 0, 1, 2, 3
1083
1088
1084 KEYTABLE = {
1089 KEYTABLE = {
1085 b'global': {
1090 b'global': {
1086 b'h': b'next-action',
1091 b'h': b'next-action',
1087 b'KEY_RIGHT': b'next-action',
1092 b'KEY_RIGHT': b'next-action',
1088 b'l': b'prev-action',
1093 b'l': b'prev-action',
1089 b'KEY_LEFT': b'prev-action',
1094 b'KEY_LEFT': b'prev-action',
1090 b'q': b'quit',
1095 b'q': b'quit',
1091 b'c': b'histedit',
1096 b'c': b'histedit',
1092 b'C': b'histedit',
1097 b'C': b'histedit',
1093 b'v': b'showpatch',
1098 b'v': b'showpatch',
1094 b'?': b'help',
1099 b'?': b'help',
1095 },
1100 },
1096 MODE_RULES: {
1101 MODE_RULES: {
1097 b'd': b'action-drop',
1102 b'd': b'action-drop',
1098 b'e': b'action-edit',
1103 b'e': b'action-edit',
1099 b'f': b'action-fold',
1104 b'f': b'action-fold',
1100 b'm': b'action-mess',
1105 b'm': b'action-mess',
1101 b'p': b'action-pick',
1106 b'p': b'action-pick',
1102 b'r': b'action-roll',
1107 b'r': b'action-roll',
1103 b' ': b'select',
1108 b' ': b'select',
1104 b'j': b'down',
1109 b'j': b'down',
1105 b'k': b'up',
1110 b'k': b'up',
1106 b'KEY_DOWN': b'down',
1111 b'KEY_DOWN': b'down',
1107 b'KEY_UP': b'up',
1112 b'KEY_UP': b'up',
1108 b'J': b'move-down',
1113 b'J': b'move-down',
1109 b'K': b'move-up',
1114 b'K': b'move-up',
1110 b'KEY_NPAGE': b'move-down',
1115 b'KEY_NPAGE': b'move-down',
1111 b'KEY_PPAGE': b'move-up',
1116 b'KEY_PPAGE': b'move-up',
1112 b'0': b'goto', # Used for 0..9
1117 b'0': b'goto', # Used for 0..9
1113 },
1118 },
1114 MODE_PATCH: {
1119 MODE_PATCH: {
1115 b' ': b'page-down',
1120 b' ': b'page-down',
1116 b'KEY_NPAGE': b'page-down',
1121 b'KEY_NPAGE': b'page-down',
1117 b'KEY_PPAGE': b'page-up',
1122 b'KEY_PPAGE': b'page-up',
1118 b'j': b'line-down',
1123 b'j': b'line-down',
1119 b'k': b'line-up',
1124 b'k': b'line-up',
1120 b'KEY_DOWN': b'line-down',
1125 b'KEY_DOWN': b'line-down',
1121 b'KEY_UP': b'line-up',
1126 b'KEY_UP': b'line-up',
1122 b'J': b'down',
1127 b'J': b'down',
1123 b'K': b'up',
1128 b'K': b'up',
1124 },
1129 },
1125 MODE_HELP: {},
1130 MODE_HELP: {},
1126 }
1131 }
1127
1132
1128
1133
1129 def screen_size():
1134 def screen_size():
1130 return struct.unpack(b'hh', fcntl.ioctl(1, termios.TIOCGWINSZ, b' '))
1135 return struct.unpack(b'hh', fcntl.ioctl(1, termios.TIOCGWINSZ, b' '))
1131
1136
1132
1137
1133 class histeditrule(object):
1138 class histeditrule(object):
1134 def __init__(self, ui, ctx, pos, action=b'pick'):
1139 def __init__(self, ui, ctx, pos, action=b'pick'):
1135 self.ui = ui
1140 self.ui = ui
1136 self.ctx = ctx
1141 self.ctx = ctx
1137 self.action = action
1142 self.action = action
1138 self.origpos = pos
1143 self.origpos = pos
1139 self.pos = pos
1144 self.pos = pos
1140 self.conflicts = []
1145 self.conflicts = []
1141
1146
1142 def __bytes__(self):
1147 def __bytes__(self):
1143 # Example display of several histeditrules:
1148 # Example display of several histeditrules:
1144 #
1149 #
1145 # #10 pick 316392:06a16c25c053 add option to skip tests
1150 # #10 pick 316392:06a16c25c053 add option to skip tests
1146 # #11 ^roll 316393:71313c964cc5 <RED>oops a fixup commit</RED>
1151 # #11 ^roll 316393:71313c964cc5 <RED>oops a fixup commit</RED>
1147 # #12 pick 316394:ab31f3973b0d include mfbt for mozilla-config.h
1152 # #12 pick 316394:ab31f3973b0d include mfbt for mozilla-config.h
1148 # #13 ^fold 316395:14ce5803f4c3 fix warnings
1153 # #13 ^fold 316395:14ce5803f4c3 fix warnings
1149 #
1154 #
1150 # The carets point to the changeset being folded into ("roll this
1155 # The carets point to the changeset being folded into ("roll this
1151 # changeset into the changeset above").
1156 # changeset into the changeset above").
1152 return b'%s%s' % (self.prefix, self.desc)
1157 return b'%s%s' % (self.prefix, self.desc)
1153
1158
1154 __str__ = encoding.strmethod(__bytes__)
1159 __str__ = encoding.strmethod(__bytes__)
1155
1160
1156 @property
1161 @property
1157 def prefix(self):
1162 def prefix(self):
1158 # Some actions ('fold' and 'roll') combine a patch with a
1163 # Some actions ('fold' and 'roll') combine a patch with a
1159 # previous one. Add a marker showing which patch they apply
1164 # previous one. Add a marker showing which patch they apply
1160 # to.
1165 # to.
1161 action = ACTION_LABELS.get(self.action, self.action)
1166 action = ACTION_LABELS.get(self.action, self.action)
1162
1167
1163 h = self.ctx.hex()[0:12]
1168 h = self.ctx.hex()[0:12]
1164 r = self.ctx.rev()
1169 r = self.ctx.rev()
1165
1170
1166 return b"#%s %s %d:%s " % (
1171 return b"#%s %s %d:%s " % (
1167 (b'%d' % self.origpos).ljust(2),
1172 (b'%d' % self.origpos).ljust(2),
1168 action.ljust(6),
1173 action.ljust(6),
1169 r,
1174 r,
1170 h,
1175 h,
1171 )
1176 )
1172
1177
1173 @util.propertycache
1178 @util.propertycache
1174 def desc(self):
1179 def desc(self):
1175 summary = cmdutil.rendertemplate(
1180 summary = cmdutil.rendertemplate(
1176 self.ctx, self.ui.config(b'histedit', b'summary-template')
1181 self.ctx, self.ui.config(b'histedit', b'summary-template')
1177 )
1182 )
1178 if summary:
1183 if summary:
1179 return summary
1184 return summary
1180 # This is split off from the prefix property so that we can
1185 # This is split off from the prefix property so that we can
1181 # separately make the description for 'roll' red (since it
1186 # separately make the description for 'roll' red (since it
1182 # will get discarded).
1187 # will get discarded).
1183 return self.ctx.description().splitlines()[0].strip()
1188 return self.ctx.description().splitlines()[0].strip()
1184
1189
1185 def checkconflicts(self, other):
1190 def checkconflicts(self, other):
1186 if other.pos > self.pos and other.origpos <= self.origpos:
1191 if other.pos > self.pos and other.origpos <= self.origpos:
1187 if set(other.ctx.files()) & set(self.ctx.files()) != set():
1192 if set(other.ctx.files()) & set(self.ctx.files()) != set():
1188 self.conflicts.append(other)
1193 self.conflicts.append(other)
1189 return self.conflicts
1194 return self.conflicts
1190
1195
1191 if other in self.conflicts:
1196 if other in self.conflicts:
1192 self.conflicts.remove(other)
1197 self.conflicts.remove(other)
1193 return self.conflicts
1198 return self.conflicts
1194
1199
1195
1200
1196 # ============ EVENTS ===============
1197 def movecursor(state, oldpos, newpos):
1198 """Change the rule/changeset that the cursor is pointing to, regardless of
1199 current mode (you can switch between patches from the view patch window)."""
1200 state[b'pos'] = newpos
1201
1202 mode, _ = state[b'mode']
1203 if mode == MODE_RULES:
1204 # Scroll through the list by updating the view for MODE_RULES, so that
1205 # even if we are not currently viewing the rules, switching back will
1206 # result in the cursor's rule being visible.
1207 modestate = state[b'modes'][MODE_RULES]
1208 if newpos < modestate[b'line_offset']:
1209 modestate[b'line_offset'] = newpos
1210 elif newpos > modestate[b'line_offset'] + state[b'page_height'] - 1:
1211 modestate[b'line_offset'] = newpos - state[b'page_height'] + 1
1212
1213 # Reset the patch view region to the top of the new patch.
1214 state[b'modes'][MODE_PATCH][b'line_offset'] = 0
1215
1216
1217 def changemode(state, mode):
1218 curmode, _ = state[b'mode']
1219 state[b'mode'] = (mode, curmode)
1220 if mode == MODE_PATCH:
1221 state[b'modes'][MODE_PATCH][b'patchcontents'] = patchcontents(state)
1222
1223
1224 def makeselection(state, pos):
1225 state[b'selected'] = pos
1226
1227
1228 def swap(state, oldpos, newpos):
1229 """Swap two positions and calculate necessary conflicts in
1230 O(|newpos-oldpos|) time"""
1231
1232 rules = state[b'rules']
1233 assert 0 <= oldpos < len(rules) and 0 <= newpos < len(rules)
1234
1235 rules[oldpos], rules[newpos] = rules[newpos], rules[oldpos]
1236
1237 # TODO: swap should not know about histeditrule's internals
1238 rules[newpos].pos = newpos
1239 rules[oldpos].pos = oldpos
1240
1241 start = min(oldpos, newpos)
1242 end = max(oldpos, newpos)
1243 for r in pycompat.xrange(start, end + 1):
1244 rules[newpos].checkconflicts(rules[r])
1245 rules[oldpos].checkconflicts(rules[r])
1246
1247 if state[b'selected']:
1248 makeselection(state, newpos)
1249
1250
1251 def changeaction(state, pos, action):
1252 """Change the action state on the given position to the new action"""
1253 rules = state[b'rules']
1254 assert 0 <= pos < len(rules)
1255 rules[pos].action = action
1256
1257
1258 def cycleaction(state, pos, next=False):
1259 """Changes the action state the next or the previous action from
1260 the action list"""
1261 rules = state[b'rules']
1262 assert 0 <= pos < len(rules)
1263 current = rules[pos].action
1264
1265 assert current in KEY_LIST
1266
1267 index = KEY_LIST.index(current)
1268 if next:
1269 index += 1
1270 else:
1271 index -= 1
1272 changeaction(state, pos, KEY_LIST[index % len(KEY_LIST)])
1273
1274
1275 def changeview(state, delta, unit):
1276 """Change the region of whatever is being viewed (a patch or the list of
1277 changesets). 'delta' is an amount (+/- 1) and 'unit' is 'page' or 'line'."""
1278 mode, _ = state[b'mode']
1279 if mode != MODE_PATCH:
1280 return
1281 mode_state = state[b'modes'][mode]
1282 num_lines = len(mode_state[b'patchcontents'])
1283 page_height = state[b'page_height']
1284 unit = page_height if unit == b'page' else 1
1285 num_pages = 1 + (num_lines - 1) // page_height
1286 max_offset = (num_pages - 1) * page_height
1287 newline = mode_state[b'line_offset'] + delta * unit
1288 mode_state[b'line_offset'] = max(0, min(max_offset, newline))
1289
1290
1291 def event(state, ch):
1292 """Change state based on the current character input
1293
1294 This takes the current state and based on the current character input from
1295 the user we change the state.
1296 """
1297 selected = state[b'selected']
1298 oldpos = state[b'pos']
1299 rules = state[b'rules']
1300
1301 if ch in (curses.KEY_RESIZE, b"KEY_RESIZE"):
1302 return E_RESIZE
1303
1304 lookup_ch = ch
1305 if ch is not None and b'0' <= ch <= b'9':
1306 lookup_ch = b'0'
1307
1308 curmode, prevmode = state[b'mode']
1309 action = KEYTABLE[curmode].get(
1310 lookup_ch, KEYTABLE[b'global'].get(lookup_ch)
1311 )
1312 if action is None:
1313 return
1314 if action in (b'down', b'move-down'):
1315 newpos = min(oldpos + 1, len(rules) - 1)
1316 movecursor(state, oldpos, newpos)
1317 if selected is not None or action == b'move-down':
1318 swap(state, oldpos, newpos)
1319 elif action in (b'up', b'move-up'):
1320 newpos = max(0, oldpos - 1)
1321 movecursor(state, oldpos, newpos)
1322 if selected is not None or action == b'move-up':
1323 swap(state, oldpos, newpos)
1324 elif action == b'next-action':
1325 cycleaction(state, oldpos, next=True)
1326 elif action == b'prev-action':
1327 cycleaction(state, oldpos, next=False)
1328 elif action == b'select':
1329 selected = oldpos if selected is None else None
1330 makeselection(state, selected)
1331 elif action == b'goto' and int(ch) < len(rules) and len(rules) <= 10:
1332 newrule = next((r for r in rules if r.origpos == int(ch)))
1333 movecursor(state, oldpos, newrule.pos)
1334 if selected is not None:
1335 swap(state, oldpos, newrule.pos)
1336 elif action.startswith(b'action-'):
1337 changeaction(state, oldpos, action[7:])
1338 elif action == b'showpatch':
1339 changemode(state, MODE_PATCH if curmode != MODE_PATCH else prevmode)
1340 elif action == b'help':
1341 changemode(state, MODE_HELP if curmode != MODE_HELP else prevmode)
1342 elif action == b'quit':
1343 return E_QUIT
1344 elif action == b'histedit':
1345 return E_HISTEDIT
1346 elif action == b'page-down':
1347 return E_PAGEDOWN
1348 elif action == b'page-up':
1349 return E_PAGEUP
1350 elif action == b'line-down':
1351 return E_LINEDOWN
1352 elif action == b'line-up':
1353 return E_LINEUP
1354
1355
1356 def makecommands(rules):
1201 def makecommands(rules):
1357 """Returns a list of commands consumable by histedit --commands based on
1202 """Returns a list of commands consumable by histedit --commands based on
1358 our list of rules"""
1203 our list of rules"""
1359 commands = []
1204 commands = []
1360 for rules in rules:
1205 for rules in rules:
1361 commands.append(b'%s %s\n' % (rules.action, rules.ctx))
1206 commands.append(b'%s %s\n' % (rules.action, rules.ctx))
1362 return commands
1207 return commands
1363
1208
1364
1209
1365 def addln(win, y, x, line, color=None):
1210 def addln(win, y, x, line, color=None):
1366 """Add a line to the given window left padding but 100% filled with
1211 """Add a line to the given window left padding but 100% filled with
1367 whitespace characters, so that the color appears on the whole line"""
1212 whitespace characters, so that the color appears on the whole line"""
1368 maxy, maxx = win.getmaxyx()
1213 maxy, maxx = win.getmaxyx()
1369 length = maxx - 1 - x
1214 length = maxx - 1 - x
1370 line = bytes(line).ljust(length)[:length]
1215 line = bytes(line).ljust(length)[:length]
1371 if y < 0:
1216 if y < 0:
1372 y = maxy + y
1217 y = maxy + y
1373 if x < 0:
1218 if x < 0:
1374 x = maxx + x
1219 x = maxx + x
1375 if color:
1220 if color:
1376 win.addstr(y, x, line, color)
1221 win.addstr(y, x, line, color)
1377 else:
1222 else:
1378 win.addstr(y, x, line)
1223 win.addstr(y, x, line)
1379
1224
1380
1225
1381 def _trunc_head(line, n):
1226 def _trunc_head(line, n):
1382 if len(line) <= n:
1227 if len(line) <= n:
1383 return line
1228 return line
1384 return b'> ' + line[-(n - 2) :]
1229 return b'> ' + line[-(n - 2) :]
1385
1230
1386
1231
1387 def _trunc_tail(line, n):
1232 def _trunc_tail(line, n):
1388 if len(line) <= n:
1233 if len(line) <= n:
1389 return line
1234 return line
1390 return line[: n - 2] + b' >'
1235 return line[: n - 2] + b' >'
1391
1236
1392
1237
1393 def patchcontents(state):
1238 class _chistedit_state(object):
1394 repo = state[b'repo']
1239 def __init__(
1395 rule = state[b'rules'][state[b'pos']]
1240 self,
1396 displayer = logcmdutil.changesetdisplayer(
1241 repo,
1397 repo.ui, repo, {b"patch": True, b"template": b"status"}, buffered=True
1242 rules,
1398 )
1243 stdscr,
1399 overrides = {(b'ui', b'verbose'): True}
1244 ):
1400 with repo.ui.configoverride(overrides, source=b'histedit'):
1245 self.repo = repo
1401 displayer.show(rule.ctx)
1246 self.rules = rules
1402 displayer.close()
1247 self.stdscr = stdscr
1403 return displayer.hunk[rule.ctx.rev()].splitlines()
1248 self.later_on_top = repo.ui.configbool(
1404
1249 b'histedit', b'later-commits-first'
1405
1250 )
1406 def _chisteditmain(repo, rules, stdscr):
1251 # The current item in display order, initialized to point to the top
1407 try:
1252 # of the screen.
1408 curses.use_default_colors()
1253 self.pos = 0
1409 except curses.error:
1254 self.selected = None
1410 pass
1255 self.mode = (MODE_INIT, MODE_INIT)
1411
1256 self.page_height = None
1412 # initialize color pattern
1257 self.modes = {
1413 curses.init_pair(COLOR_HELP, curses.COLOR_WHITE, curses.COLOR_BLUE)
1258 MODE_RULES: {
1414 curses.init_pair(COLOR_SELECTED, curses.COLOR_BLACK, curses.COLOR_WHITE)
1259 b'line_offset': 0,
1415 curses.init_pair(COLOR_WARN, curses.COLOR_BLACK, curses.COLOR_YELLOW)
1260 },
1416 curses.init_pair(COLOR_OK, curses.COLOR_BLACK, curses.COLOR_GREEN)
1261 MODE_PATCH: {
1417 curses.init_pair(COLOR_CURRENT, curses.COLOR_WHITE, curses.COLOR_MAGENTA)
1262 b'line_offset': 0,
1418 curses.init_pair(COLOR_DIFF_ADD_LINE, curses.COLOR_GREEN, -1)
1263 },
1419 curses.init_pair(COLOR_DIFF_DEL_LINE, curses.COLOR_RED, -1)
1264 }
1420 curses.init_pair(COLOR_DIFF_OFFSET, curses.COLOR_MAGENTA, -1)
1265
1421 curses.init_pair(COLOR_ROLL, curses.COLOR_RED, -1)
1266 def render_commit(self, win):
1422 curses.init_pair(
1423 COLOR_ROLL_CURRENT, curses.COLOR_BLACK, curses.COLOR_MAGENTA
1424 )
1425 curses.init_pair(COLOR_ROLL_SELECTED, curses.COLOR_RED, curses.COLOR_WHITE)
1426
1427 # don't display the cursor
1428 try:
1429 curses.curs_set(0)
1430 except curses.error:
1431 pass
1432
1433 def rendercommit(win, state):
1434 """Renders the commit window that shows the log of the current selected
1267 """Renders the commit window that shows the log of the current selected
1435 commit"""
1268 commit"""
1436 pos = state[b'pos']
1269 rule = self.rules[self.display_pos_to_rule_pos(self.pos)]
1437 rules = state[b'rules']
1438 rule = rules[pos]
1439
1270
1440 ctx = rule.ctx
1271 ctx = rule.ctx
1441 win.box()
1272 win.box()
1442
1273
1443 maxy, maxx = win.getmaxyx()
1274 maxy, maxx = win.getmaxyx()
1444 length = maxx - 3
1275 length = maxx - 3
1445
1276
1446 line = b"changeset: %d:%s" % (ctx.rev(), ctx.hex()[:12])
1277 line = b"changeset: %d:%s" % (ctx.rev(), ctx.hex()[:12])
1447 win.addstr(1, 1, line[:length])
1278 win.addstr(1, 1, line[:length])
1448
1279
1449 line = b"user: %s" % ctx.user()
1280 line = b"user: %s" % ctx.user()
1450 win.addstr(2, 1, line[:length])
1281 win.addstr(2, 1, line[:length])
1451
1282
1452 bms = repo.nodebookmarks(ctx.node())
1283 bms = self.repo.nodebookmarks(ctx.node())
1453 line = b"bookmark: %s" % b' '.join(bms)
1284 line = b"bookmark: %s" % b' '.join(bms)
1454 win.addstr(3, 1, line[:length])
1285 win.addstr(3, 1, line[:length])
1455
1286
1456 line = b"summary: %s" % (ctx.description().splitlines()[0])
1287 line = b"summary: %s" % (ctx.description().splitlines()[0])
1457 win.addstr(4, 1, line[:length])
1288 win.addstr(4, 1, line[:length])
1458
1289
1459 line = b"files: "
1290 line = b"files: "
1460 win.addstr(5, 1, line)
1291 win.addstr(5, 1, line)
1461 fnx = 1 + len(line)
1292 fnx = 1 + len(line)
1462 fnmaxx = length - fnx + 1
1293 fnmaxx = length - fnx + 1
1463 y = 5
1294 y = 5
1464 fnmaxn = maxy - (1 + y) - 1
1295 fnmaxn = maxy - (1 + y) - 1
1465 files = ctx.files()
1296 files = ctx.files()
1466 for i, line1 in enumerate(files):
1297 for i, line1 in enumerate(files):
1467 if len(files) > fnmaxn and i == fnmaxn - 1:
1298 if len(files) > fnmaxn and i == fnmaxn - 1:
1468 win.addstr(y, fnx, _trunc_tail(b','.join(files[i:]), fnmaxx))
1299 win.addstr(y, fnx, _trunc_tail(b','.join(files[i:]), fnmaxx))
1469 y = y + 1
1300 y = y + 1
1470 break
1301 break
1471 win.addstr(y, fnx, _trunc_head(line1, fnmaxx))
1302 win.addstr(y, fnx, _trunc_head(line1, fnmaxx))
1472 y = y + 1
1303 y = y + 1
1473
1304
1474 conflicts = rule.conflicts
1305 conflicts = rule.conflicts
1475 if len(conflicts) > 0:
1306 if len(conflicts) > 0:
1476 conflictstr = b','.join(map(lambda r: r.ctx.hex()[:12], conflicts))
1307 conflictstr = b','.join(map(lambda r: r.ctx.hex()[:12], conflicts))
1477 conflictstr = b"changed files overlap with %s" % conflictstr
1308 conflictstr = b"changed files overlap with %s" % conflictstr
1478 else:
1309 else:
1479 conflictstr = b'no overlap'
1310 conflictstr = b'no overlap'
1480
1311
1481 win.addstr(y, 1, conflictstr[:length])
1312 win.addstr(y, 1, conflictstr[:length])
1482 win.noutrefresh()
1313 win.noutrefresh()
1483
1314
1484 def helplines(mode):
1315 def helplines(self):
1485 if mode == MODE_PATCH:
1316 if self.mode[0] == MODE_PATCH:
1486 help = b"""\
1317 help = b"""\
1487 ?: help, k/up: line up, j/down: line down, v: stop viewing patch
1318 ?: help, k/up: line up, j/down: line down, v: stop viewing patch
1488 pgup: prev page, space/pgdn: next page, c: commit, q: abort
1319 pgup: prev page, space/pgdn: next page, c: commit, q: abort
1489 """
1320 """
1490 else:
1321 else:
1491 help = b"""\
1322 help = b"""\
1492 ?: help, k/up: move up, j/down: move down, space: select, v: view patch
1323 ?: help, k/up: move up, j/down: move down, space: select, v: view patch
1493 d: drop, e: edit, f: fold, m: mess, p: pick, r: roll
1324 d: drop, e: edit, f: fold, m: mess, p: pick, r: roll
1494 pgup/K: move patch up, pgdn/J: move patch down, c: commit, q: abort
1325 pgup/K: move patch up, pgdn/J: move patch down, c: commit, q: abort
1495 """
1326 """
1496 return help.splitlines()
1327 return help.splitlines()
1497
1328
1498 def renderhelp(win, state):
1329 def render_help(self, win):
1499 maxy, maxx = win.getmaxyx()
1330 maxy, maxx = win.getmaxyx()
1500 mode, _ = state[b'mode']
1331 for y, line in enumerate(self.helplines()):
1501 for y, line in enumerate(helplines(mode)):
1502 if y >= maxy:
1332 if y >= maxy:
1503 break
1333 break
1504 addln(win, y, 0, line, curses.color_pair(COLOR_HELP))
1334 addln(win, y, 0, line, curses.color_pair(COLOR_HELP))
1505 win.noutrefresh()
1335 win.noutrefresh()
1506
1336
1507 def renderrules(rulesscr, state):
1337 def layout(self):
1508 rules = state[b'rules']
1338 maxy, maxx = self.stdscr.getmaxyx()
1509 pos = state[b'pos']
1339 helplen = len(self.helplines())
1510 selected = state[b'selected']
1340 mainlen = maxy - helplen - 12
1511 start = state[b'modes'][MODE_RULES][b'line_offset']
1341 if mainlen < 1:
1512
1342 raise error.Abort(
1513 conflicts = [r.ctx for r in rules if r.conflicts]
1343 _(b"terminal dimensions %d by %d too small for curses histedit")
1344 % (maxy, maxx),
1345 hint=_(
1346 b"enlarge your terminal or use --config ui.interface=text"
1347 ),
1348 )
1349 return {
1350 b'commit': (12, maxx),
1351 b'help': (helplen, maxx),
1352 b'main': (mainlen, maxx),
1353 }
1354
1355 def display_pos_to_rule_pos(self, display_pos):
1356 """Converts a position in display order to rule order.
1357
1358 The `display_pos` is the order from the top in display order, not
1359 considering which items are currently visible on the screen. Thus,
1360 `display_pos=0` is the item at the top (possibly after scrolling to
1361 the top)
1362 """
1363 if self.later_on_top:
1364 return len(self.rules) - 1 - display_pos
1365 else:
1366 return display_pos
1367
1368 def render_rules(self, rulesscr):
1369 start = self.modes[MODE_RULES][b'line_offset']
1370
1371 conflicts = [r.ctx for r in self.rules if r.conflicts]
1514 if len(conflicts) > 0:
1372 if len(conflicts) > 0:
1515 line = b"potential conflict in %s" % b','.join(
1373 line = b"potential conflict in %s" % b','.join(
1516 map(pycompat.bytestr, conflicts)
1374 map(pycompat.bytestr, conflicts)
1517 )
1375 )
1518 addln(rulesscr, -1, 0, line, curses.color_pair(COLOR_WARN))
1376 addln(rulesscr, -1, 0, line, curses.color_pair(COLOR_WARN))
1519
1377
1520 for y, rule in enumerate(rules[start:]):
1378 for display_pos in range(start, len(self.rules)):
1521 if y >= state[b'page_height']:
1379 y = display_pos - start
1522 break
1380 if y < 0 or y >= self.page_height:
1381 continue
1382 rule_pos = self.display_pos_to_rule_pos(display_pos)
1383 rule = self.rules[rule_pos]
1523 if len(rule.conflicts) > 0:
1384 if len(rule.conflicts) > 0:
1524 rulesscr.addstr(y, 0, b" ", curses.color_pair(COLOR_WARN))
1385 rulesscr.addstr(y, 0, b" ", curses.color_pair(COLOR_WARN))
1525 else:
1386 else:
1526 rulesscr.addstr(y, 0, b" ", curses.COLOR_BLACK)
1387 rulesscr.addstr(y, 0, b" ", curses.COLOR_BLACK)
1527
1388
1528 if y + start == selected:
1389 if display_pos == self.selected:
1529 rollcolor = COLOR_ROLL_SELECTED
1390 rollcolor = COLOR_ROLL_SELECTED
1530 addln(rulesscr, y, 2, rule, curses.color_pair(COLOR_SELECTED))
1391 addln(rulesscr, y, 2, rule, curses.color_pair(COLOR_SELECTED))
1531 elif y + start == pos:
1392 elif display_pos == self.pos:
1532 rollcolor = COLOR_ROLL_CURRENT
1393 rollcolor = COLOR_ROLL_CURRENT
1533 addln(
1394 addln(
1534 rulesscr,
1395 rulesscr,
1535 y,
1396 y,
1536 2,
1397 2,
1537 rule,
1398 rule,
1538 curses.color_pair(COLOR_CURRENT) | curses.A_BOLD,
1399 curses.color_pair(COLOR_CURRENT) | curses.A_BOLD,
1539 )
1400 )
1540 else:
1401 else:
1541 rollcolor = COLOR_ROLL
1402 rollcolor = COLOR_ROLL
1542 addln(rulesscr, y, 2, rule)
1403 addln(rulesscr, y, 2, rule)
1543
1404
1544 if rule.action == b'roll':
1405 if rule.action == b'roll':
1545 rulesscr.addstr(
1406 rulesscr.addstr(
1546 y,
1407 y,
1547 2 + len(rule.prefix),
1408 2 + len(rule.prefix),
1548 rule.desc,
1409 rule.desc,
1549 curses.color_pair(rollcolor),
1410 curses.color_pair(rollcolor),
1550 )
1411 )
1551
1412
1552 rulesscr.noutrefresh()
1413 rulesscr.noutrefresh()
1553
1414
1554 def renderstring(win, state, output, diffcolors=False):
1415 def render_string(self, win, output, diffcolors=False):
1555 maxy, maxx = win.getmaxyx()
1416 maxy, maxx = win.getmaxyx()
1556 length = min(maxy - 1, len(output))
1417 length = min(maxy - 1, len(output))
1557 for y in range(0, length):
1418 for y in range(0, length):
1558 line = output[y]
1419 line = output[y]
1559 if diffcolors:
1420 if diffcolors:
1560 if line and line[0] == b'+':
1421 if line and line[0] == b'+':
1561 win.addstr(
1422 win.addstr(
1562 y, 0, line, curses.color_pair(COLOR_DIFF_ADD_LINE)
1423 y, 0, line, curses.color_pair(COLOR_DIFF_ADD_LINE)
1563 )
1424 )
1564 elif line and line[0] == b'-':
1425 elif line and line[0] == b'-':
1565 win.addstr(
1426 win.addstr(
1566 y, 0, line, curses.color_pair(COLOR_DIFF_DEL_LINE)
1427 y, 0, line, curses.color_pair(COLOR_DIFF_DEL_LINE)
1567 )
1428 )
1568 elif line.startswith(b'@@ '):
1429 elif line.startswith(b'@@ '):
1569 win.addstr(y, 0, line, curses.color_pair(COLOR_DIFF_OFFSET))
1430 win.addstr(y, 0, line, curses.color_pair(COLOR_DIFF_OFFSET))
1570 else:
1431 else:
1571 win.addstr(y, 0, line)
1432 win.addstr(y, 0, line)
1572 else:
1433 else:
1573 win.addstr(y, 0, line)
1434 win.addstr(y, 0, line)
1574 win.noutrefresh()
1435 win.noutrefresh()
1575
1436
1576 def renderpatch(win, state):
1437 def render_patch(self, win):
1577 start = state[b'modes'][MODE_PATCH][b'line_offset']
1438 start = self.modes[MODE_PATCH][b'line_offset']
1578 content = state[b'modes'][MODE_PATCH][b'patchcontents']
1439 content = self.modes[MODE_PATCH][b'patchcontents']
1579 renderstring(win, state, content[start:], diffcolors=True)
1440 self.render_string(win, content[start:], diffcolors=True)
1580
1441
1581 def layout(mode):
1442 def event(self, ch):
1582 maxy, maxx = stdscr.getmaxyx()
1443 """Change state based on the current character input
1583 helplen = len(helplines(mode))
1444
1584 mainlen = maxy - helplen - 12
1445 This takes the current state and based on the current character input from
1585 if mainlen < 1:
1446 the user we change the state.
1586 raise error.Abort(
1447 """
1587 _(b"terminal dimensions %d by %d too small for curses histedit")
1448 oldpos = self.pos
1588 % (maxy, maxx),
1449
1589 hint=_(
1450 if ch in (curses.KEY_RESIZE, b"KEY_RESIZE"):
1590 b"enlarge your terminal or use --config ui.interface=text"
1451 return E_RESIZE
1591 ),
1452
1592 )
1453 lookup_ch = ch
1593 return {
1454 if ch is not None and b'0' <= ch <= b'9':
1594 b'commit': (12, maxx),
1455 lookup_ch = b'0'
1595 b'help': (helplen, maxx),
1456
1596 b'main': (mainlen, maxx),
1457 curmode, prevmode = self.mode
1597 }
1458 action = KEYTABLE[curmode].get(
1459 lookup_ch, KEYTABLE[b'global'].get(lookup_ch)
1460 )
1461 if action is None:
1462 return
1463 if action in (b'down', b'move-down'):
1464 newpos = min(oldpos + 1, len(self.rules) - 1)
1465 self.move_cursor(oldpos, newpos)
1466 if self.selected is not None or action == b'move-down':
1467 self.swap(oldpos, newpos)
1468 elif action in (b'up', b'move-up'):
1469 newpos = max(0, oldpos - 1)
1470 self.move_cursor(oldpos, newpos)
1471 if self.selected is not None or action == b'move-up':
1472 self.swap(oldpos, newpos)
1473 elif action == b'next-action':
1474 self.cycle_action(oldpos, next=True)
1475 elif action == b'prev-action':
1476 self.cycle_action(oldpos, next=False)
1477 elif action == b'select':
1478 self.selected = oldpos if self.selected is None else None
1479 self.make_selection(self.selected)
1480 elif action == b'goto' and int(ch) < len(self.rules) <= 10:
1481 newrule = next((r for r in self.rules if r.origpos == int(ch)))
1482 self.move_cursor(oldpos, newrule.pos)
1483 if self.selected is not None:
1484 self.swap(oldpos, newrule.pos)
1485 elif action.startswith(b'action-'):
1486 self.change_action(oldpos, action[7:])
1487 elif action == b'showpatch':
1488 self.change_mode(MODE_PATCH if curmode != MODE_PATCH else prevmode)
1489 elif action == b'help':
1490 self.change_mode(MODE_HELP if curmode != MODE_HELP else prevmode)
1491 elif action == b'quit':
1492 return E_QUIT
1493 elif action == b'histedit':
1494 return E_HISTEDIT
1495 elif action == b'page-down':
1496 return E_PAGEDOWN
1497 elif action == b'page-up':
1498 return E_PAGEUP
1499 elif action == b'line-down':
1500 return E_LINEDOWN
1501 elif action == b'line-up':
1502 return E_LINEUP
1503
1504 def patch_contents(self):
1505 repo = self.repo
1506 rule = self.rules[self.display_pos_to_rule_pos(self.pos)]
1507 displayer = logcmdutil.changesetdisplayer(
1508 repo.ui,
1509 repo,
1510 {b"patch": True, b"template": b"status"},
1511 buffered=True,
1512 )
1513 overrides = {(b'ui', b'verbose'): True}
1514 with repo.ui.configoverride(overrides, source=b'histedit'):
1515 displayer.show(rule.ctx)
1516 displayer.close()
1517 return displayer.hunk[rule.ctx.rev()].splitlines()
1518
1519 def move_cursor(self, oldpos, newpos):
1520 """Change the rule/changeset that the cursor is pointing to, regardless of
1521 current mode (you can switch between patches from the view patch window)."""
1522 self.pos = newpos
1523
1524 mode, _ = self.mode
1525 if mode == MODE_RULES:
1526 # Scroll through the list by updating the view for MODE_RULES, so that
1527 # even if we are not currently viewing the rules, switching back will
1528 # result in the cursor's rule being visible.
1529 modestate = self.modes[MODE_RULES]
1530 if newpos < modestate[b'line_offset']:
1531 modestate[b'line_offset'] = newpos
1532 elif newpos > modestate[b'line_offset'] + self.page_height - 1:
1533 modestate[b'line_offset'] = newpos - self.page_height + 1
1534
1535 # Reset the patch view region to the top of the new patch.
1536 self.modes[MODE_PATCH][b'line_offset'] = 0
1537
1538 def change_mode(self, mode):
1539 curmode, _ = self.mode
1540 self.mode = (mode, curmode)
1541 if mode == MODE_PATCH:
1542 self.modes[MODE_PATCH][b'patchcontents'] = self.patch_contents()
1543
1544 def make_selection(self, pos):
1545 self.selected = pos
1546
1547 def swap(self, oldpos, newpos):
1548 """Swap two positions and calculate necessary conflicts in
1549 O(|newpos-oldpos|) time"""
1550 old_rule_pos = self.display_pos_to_rule_pos(oldpos)
1551 new_rule_pos = self.display_pos_to_rule_pos(newpos)
1552
1553 rules = self.rules
1554 assert 0 <= old_rule_pos < len(rules) and 0 <= new_rule_pos < len(rules)
1555
1556 rules[old_rule_pos], rules[new_rule_pos] = (
1557 rules[new_rule_pos],
1558 rules[old_rule_pos],
1559 )
1560
1561 # TODO: swap should not know about histeditrule's internals
1562 rules[new_rule_pos].pos = new_rule_pos
1563 rules[old_rule_pos].pos = old_rule_pos
1564
1565 start = min(old_rule_pos, new_rule_pos)
1566 end = max(old_rule_pos, new_rule_pos)
1567 for r in pycompat.xrange(start, end + 1):
1568 rules[new_rule_pos].checkconflicts(rules[r])
1569 rules[old_rule_pos].checkconflicts(rules[r])
1570
1571 if self.selected:
1572 self.make_selection(newpos)
1573
1574 def change_action(self, pos, action):
1575 """Change the action state on the given position to the new action"""
1576 assert 0 <= pos < len(self.rules)
1577 self.rules[pos].action = action
1578
1579 def cycle_action(self, pos, next=False):
1580 """Changes the action state the next or the previous action from
1581 the action list"""
1582 assert 0 <= pos < len(self.rules)
1583 current = self.rules[pos].action
1584
1585 assert current in KEY_LIST
1586
1587 index = KEY_LIST.index(current)
1588 if next:
1589 index += 1
1590 else:
1591 index -= 1
1592 self.change_action(pos, KEY_LIST[index % len(KEY_LIST)])
1593
1594 def change_view(self, delta, unit):
1595 """Change the region of whatever is being viewed (a patch or the list of
1596 changesets). 'delta' is an amount (+/- 1) and 'unit' is 'page' or 'line'."""
1597 mode, _ = self.mode
1598 if mode != MODE_PATCH:
1599 return
1600 mode_state = self.modes[mode]
1601 num_lines = len(mode_state[b'patchcontents'])
1602 page_height = self.page_height
1603 unit = page_height if unit == b'page' else 1
1604 num_pages = 1 + (num_lines - 1) // page_height
1605 max_offset = (num_pages - 1) * page_height
1606 newline = mode_state[b'line_offset'] + delta * unit
1607 mode_state[b'line_offset'] = max(0, min(max_offset, newline))
1608
1609
1610 def _chisteditmain(repo, rules, stdscr):
1611 try:
1612 curses.use_default_colors()
1613 except curses.error:
1614 pass
1615
1616 # initialize color pattern
1617 curses.init_pair(COLOR_HELP, curses.COLOR_WHITE, curses.COLOR_BLUE)
1618 curses.init_pair(COLOR_SELECTED, curses.COLOR_BLACK, curses.COLOR_WHITE)
1619 curses.init_pair(COLOR_WARN, curses.COLOR_BLACK, curses.COLOR_YELLOW)
1620 curses.init_pair(COLOR_OK, curses.COLOR_BLACK, curses.COLOR_GREEN)
1621 curses.init_pair(COLOR_CURRENT, curses.COLOR_WHITE, curses.COLOR_MAGENTA)
1622 curses.init_pair(COLOR_DIFF_ADD_LINE, curses.COLOR_GREEN, -1)
1623 curses.init_pair(COLOR_DIFF_DEL_LINE, curses.COLOR_RED, -1)
1624 curses.init_pair(COLOR_DIFF_OFFSET, curses.COLOR_MAGENTA, -1)
1625 curses.init_pair(COLOR_ROLL, curses.COLOR_RED, -1)
1626 curses.init_pair(
1627 COLOR_ROLL_CURRENT, curses.COLOR_BLACK, curses.COLOR_MAGENTA
1628 )
1629 curses.init_pair(COLOR_ROLL_SELECTED, curses.COLOR_RED, curses.COLOR_WHITE)
1630
1631 # don't display the cursor
1632 try:
1633 curses.curs_set(0)
1634 except curses.error:
1635 pass
1598
1636
1599 def drawvertwin(size, y, x):
1637 def drawvertwin(size, y, x):
1600 win = curses.newwin(size[0], size[1], y, x)
1638 win = curses.newwin(size[0], size[1], y, x)
1601 y += size[0]
1639 y += size[0]
1602 return win, y, x
1640 return win, y, x
1603
1641
1604 state = {
1642 state = _chistedit_state(repo, rules, stdscr)
1605 b'pos': 0,
1606 b'rules': rules,
1607 b'selected': None,
1608 b'mode': (MODE_INIT, MODE_INIT),
1609 b'page_height': None,
1610 b'modes': {
1611 MODE_RULES: {
1612 b'line_offset': 0,
1613 },
1614 MODE_PATCH: {
1615 b'line_offset': 0,
1616 },
1617 },
1618 b'repo': repo,
1619 }
1620
1643
1621 # eventloop
1644 # eventloop
1622 ch = None
1645 ch = None
1623 stdscr.clear()
1646 stdscr.clear()
1624 stdscr.refresh()
1647 stdscr.refresh()
1625 while True:
1648 while True:
1626 oldmode, unused = state[b'mode']
1649 oldmode, unused = state.mode
1627 if oldmode == MODE_INIT:
1650 if oldmode == MODE_INIT:
1628 changemode(state, MODE_RULES)
1651 state.change_mode(MODE_RULES)
1629 e = event(state, ch)
1652 e = state.event(ch)
1630
1653
1631 if e == E_QUIT:
1654 if e == E_QUIT:
1632 return False
1655 return False
1633 if e == E_HISTEDIT:
1656 if e == E_HISTEDIT:
1634 return state[b'rules']
1657 return state.rules
1635 else:
1658 else:
1636 if e == E_RESIZE:
1659 if e == E_RESIZE:
1637 size = screen_size()
1660 size = screen_size()
1638 if size != stdscr.getmaxyx():
1661 if size != stdscr.getmaxyx():
1639 curses.resizeterm(*size)
1662 curses.resizeterm(*size)
1640
1663
1641 curmode, unused = state[b'mode']
1664 sizes = state.layout()
1642 sizes = layout(curmode)
1665 curmode, unused = state.mode
1643 if curmode != oldmode:
1666 if curmode != oldmode:
1644 state[b'page_height'] = sizes[b'main'][0]
1667 state.page_height = sizes[b'main'][0]
1645 # Adjust the view to fit the current screen size.
1668 # Adjust the view to fit the current screen size.
1646 movecursor(state, state[b'pos'], state[b'pos'])
1669 state.move_cursor(state.pos, state.pos)
1647
1670
1648 # Pack the windows against the top, each pane spread across the
1671 # Pack the windows against the top, each pane spread across the
1649 # full width of the screen.
1672 # full width of the screen.
1650 y, x = (0, 0)
1673 y, x = (0, 0)
1651 helpwin, y, x = drawvertwin(sizes[b'help'], y, x)
1674 helpwin, y, x = drawvertwin(sizes[b'help'], y, x)
1652 mainwin, y, x = drawvertwin(sizes[b'main'], y, x)
1675 mainwin, y, x = drawvertwin(sizes[b'main'], y, x)
1653 commitwin, y, x = drawvertwin(sizes[b'commit'], y, x)
1676 commitwin, y, x = drawvertwin(sizes[b'commit'], y, x)
1654
1677
1655 if e in (E_PAGEDOWN, E_PAGEUP, E_LINEDOWN, E_LINEUP):
1678 if e in (E_PAGEDOWN, E_PAGEUP, E_LINEDOWN, E_LINEUP):
1656 if e == E_PAGEDOWN:
1679 if e == E_PAGEDOWN:
1657 changeview(state, +1, b'page')
1680 state.change_view(+1, b'page')
1658 elif e == E_PAGEUP:
1681 elif e == E_PAGEUP:
1659 changeview(state, -1, b'page')
1682 state.change_view(-1, b'page')
1660 elif e == E_LINEDOWN:
1683 elif e == E_LINEDOWN:
1661 changeview(state, +1, b'line')
1684 state.change_view(+1, b'line')
1662 elif e == E_LINEUP:
1685 elif e == E_LINEUP:
1663 changeview(state, -1, b'line')
1686 state.change_view(-1, b'line')
1664
1687
1665 # start rendering
1688 # start rendering
1666 commitwin.erase()
1689 commitwin.erase()
1667 helpwin.erase()
1690 helpwin.erase()
1668 mainwin.erase()
1691 mainwin.erase()
1669 if curmode == MODE_PATCH:
1692 if curmode == MODE_PATCH:
1670 renderpatch(mainwin, state)
1693 state.render_patch(mainwin)
1671 elif curmode == MODE_HELP:
1694 elif curmode == MODE_HELP:
1672 renderstring(mainwin, state, __doc__.strip().splitlines())
1695 state.render_string(mainwin, __doc__.strip().splitlines())
1673 else:
1696 else:
1674 renderrules(mainwin, state)
1697 state.render_rules(mainwin)
1675 rendercommit(commitwin, state)
1698 state.render_commit(commitwin)
1676 renderhelp(helpwin, state)
1699 state.render_help(helpwin)
1677 curses.doupdate()
1700 curses.doupdate()
1678 # done rendering
1701 # done rendering
1679 ch = encoding.strtolocal(stdscr.getkey())
1702 ch = encoding.strtolocal(stdscr.getkey())
1680
1703
1681
1704
1682 def _chistedit(ui, repo, freeargs, opts):
1705 def _chistedit(ui, repo, freeargs, opts):
1683 """interactively edit changeset history via a curses interface
1706 """interactively edit changeset history via a curses interface
1684
1707
1685 Provides a ncurses interface to histedit. Press ? in chistedit mode
1708 Provides a ncurses interface to histedit. Press ? in chistedit mode
1686 to see an extensive help. Requires python-curses to be installed."""
1709 to see an extensive help. Requires python-curses to be installed."""
1687
1710
1688 if curses is None:
1711 if curses is None:
1689 raise error.Abort(_(b"Python curses library required"))
1712 raise error.Abort(_(b"Python curses library required"))
1690
1713
1691 # disable color
1714 # disable color
1692 ui._colormode = None
1715 ui._colormode = None
1693
1716
1694 try:
1717 try:
1695 keep = opts.get(b'keep')
1718 keep = opts.get(b'keep')
1696 revs = opts.get(b'rev', [])[:]
1719 revs = opts.get(b'rev', [])[:]
1697 cmdutil.checkunfinished(repo)
1720 cmdutil.checkunfinished(repo)
1698 cmdutil.bailifchanged(repo)
1721 cmdutil.bailifchanged(repo)
1699
1722
1700 if os.path.exists(os.path.join(repo.path, b'histedit-state')):
1701 raise error.Abort(
1702 _(
1703 b'history edit already in progress, try '
1704 b'--continue or --abort'
1705 )
1706 )
1707 revs.extend(freeargs)
1723 revs.extend(freeargs)
1708 if not revs:
1724 if not revs:
1709 defaultrev = destutil.desthistedit(ui, repo)
1725 defaultrev = destutil.desthistedit(ui, repo)
1710 if defaultrev is not None:
1726 if defaultrev is not None:
1711 revs.append(defaultrev)
1727 revs.append(defaultrev)
1712 if len(revs) != 1:
1728 if len(revs) != 1:
1713 raise error.Abort(
1729 raise error.InputError(
1714 _(b'histedit requires exactly one ancestor revision')
1730 _(b'histedit requires exactly one ancestor revision')
1715 )
1731 )
1716
1732
1717 rr = list(repo.set(b'roots(%ld)', scmutil.revrange(repo, revs)))
1733 rr = list(repo.set(b'roots(%ld)', logcmdutil.revrange(repo, revs)))
1718 if len(rr) != 1:
1734 if len(rr) != 1:
1719 raise error.Abort(
1735 raise error.InputError(
1720 _(
1736 _(
1721 b'The specified revisions must have '
1737 b'The specified revisions must have '
1722 b'exactly one common root'
1738 b'exactly one common root'
1723 )
1739 )
1724 )
1740 )
1725 root = rr[0].node()
1741 root = rr[0].node()
1726
1742
1727 topmost = repo.dirstate.p1()
1743 topmost = repo.dirstate.p1()
1728 revs = between(repo, root, topmost, keep)
1744 revs = between(repo, root, topmost, keep)
1729 if not revs:
1745 if not revs:
1730 raise error.Abort(
1746 raise error.InputError(
1731 _(b'%s is not an ancestor of working directory') % short(root)
1747 _(b'%s is not an ancestor of working directory') % short(root)
1732 )
1748 )
1733
1749
1734 ctxs = []
1750 rules = []
1735 for i, r in enumerate(revs):
1751 for i, r in enumerate(revs):
1736 ctxs.append(histeditrule(ui, repo[r], i))
1752 rules.append(histeditrule(ui, repo[r], i))
1737 with util.with_lc_ctype():
1753 with util.with_lc_ctype():
1738 rc = curses.wrapper(functools.partial(_chisteditmain, repo, ctxs))
1754 rc = curses.wrapper(functools.partial(_chisteditmain, repo, rules))
1739 curses.echo()
1755 curses.echo()
1740 curses.endwin()
1756 curses.endwin()
1741 if rc is False:
1757 if rc is False:
1742 ui.write(_(b"histedit aborted\n"))
1758 ui.write(_(b"histedit aborted\n"))
1743 return 0
1759 return 0
1744 if type(rc) is list:
1760 if type(rc) is list:
1745 ui.status(_(b"performing changes\n"))
1761 ui.status(_(b"performing changes\n"))
1746 rules = makecommands(rc)
1762 rules = makecommands(rc)
1747 with repo.vfs(b'chistedit', b'w+') as fp:
1763 with repo.vfs(b'chistedit', b'w+') as fp:
1748 for r in rules:
1764 for r in rules:
1749 fp.write(r)
1765 fp.write(r)
1750 opts[b'commands'] = fp.name
1766 opts[b'commands'] = fp.name
1751 return _texthistedit(ui, repo, freeargs, opts)
1767 return _texthistedit(ui, repo, freeargs, opts)
1752 except KeyboardInterrupt:
1768 except KeyboardInterrupt:
1753 pass
1769 pass
1754 return -1
1770 return -1
1755
1771
1756
1772
1757 @command(
1773 @command(
1758 b'histedit',
1774 b'histedit',
1759 [
1775 [
1760 (
1776 (
1761 b'',
1777 b'',
1762 b'commands',
1778 b'commands',
1763 b'',
1779 b'',
1764 _(b'read history edits from the specified file'),
1780 _(b'read history edits from the specified file'),
1765 _(b'FILE'),
1781 _(b'FILE'),
1766 ),
1782 ),
1767 (b'c', b'continue', False, _(b'continue an edit already in progress')),
1783 (b'c', b'continue', False, _(b'continue an edit already in progress')),
1768 (b'', b'edit-plan', False, _(b'edit remaining actions list')),
1784 (b'', b'edit-plan', False, _(b'edit remaining actions list')),
1769 (
1785 (
1770 b'k',
1786 b'k',
1771 b'keep',
1787 b'keep',
1772 False,
1788 False,
1773 _(b"don't strip old nodes after edit is complete"),
1789 _(b"don't strip old nodes after edit is complete"),
1774 ),
1790 ),
1775 (b'', b'abort', False, _(b'abort an edit in progress')),
1791 (b'', b'abort', False, _(b'abort an edit in progress')),
1776 (b'o', b'outgoing', False, _(b'changesets not found in destination')),
1792 (b'o', b'outgoing', False, _(b'changesets not found in destination')),
1777 (
1793 (
1778 b'f',
1794 b'f',
1779 b'force',
1795 b'force',
1780 False,
1796 False,
1781 _(b'force outgoing even for unrelated repositories'),
1797 _(b'force outgoing even for unrelated repositories'),
1782 ),
1798 ),
1783 (b'r', b'rev', [], _(b'first revision to be edited'), _(b'REV')),
1799 (b'r', b'rev', [], _(b'first revision to be edited'), _(b'REV')),
1784 ]
1800 ]
1785 + cmdutil.formatteropts,
1801 + cmdutil.formatteropts,
1786 _(b"[OPTIONS] ([ANCESTOR] | --outgoing [URL])"),
1802 _(b"[OPTIONS] ([ANCESTOR] | --outgoing [URL])"),
1787 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
1803 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
1788 )
1804 )
1789 def histedit(ui, repo, *freeargs, **opts):
1805 def histedit(ui, repo, *freeargs, **opts):
1790 """interactively edit changeset history
1806 """interactively edit changeset history
1791
1807
1792 This command lets you edit a linear series of changesets (up to
1808 This command lets you edit a linear series of changesets (up to
1793 and including the working directory, which should be clean).
1809 and including the working directory, which should be clean).
1794 You can:
1810 You can:
1795
1811
1796 - `pick` to [re]order a changeset
1812 - `pick` to [re]order a changeset
1797
1813
1798 - `drop` to omit changeset
1814 - `drop` to omit changeset
1799
1815
1800 - `mess` to reword the changeset commit message
1816 - `mess` to reword the changeset commit message
1801
1817
1802 - `fold` to combine it with the preceding changeset (using the later date)
1818 - `fold` to combine it with the preceding changeset (using the later date)
1803
1819
1804 - `roll` like fold, but discarding this commit's description and date
1820 - `roll` like fold, but discarding this commit's description and date
1805
1821
1806 - `edit` to edit this changeset (preserving date)
1822 - `edit` to edit this changeset (preserving date)
1807
1823
1808 - `base` to checkout changeset and apply further changesets from there
1824 - `base` to checkout changeset and apply further changesets from there
1809
1825
1810 There are a number of ways to select the root changeset:
1826 There are a number of ways to select the root changeset:
1811
1827
1812 - Specify ANCESTOR directly
1828 - Specify ANCESTOR directly
1813
1829
1814 - Use --outgoing -- it will be the first linear changeset not
1830 - Use --outgoing -- it will be the first linear changeset not
1815 included in destination. (See :hg:`help config.paths.default-push`)
1831 included in destination. (See :hg:`help config.paths.default-push`)
1816
1832
1817 - Otherwise, the value from the "histedit.defaultrev" config option
1833 - Otherwise, the value from the "histedit.defaultrev" config option
1818 is used as a revset to select the base revision when ANCESTOR is not
1834 is used as a revset to select the base revision when ANCESTOR is not
1819 specified. The first revision returned by the revset is used. By
1835 specified. The first revision returned by the revset is used. By
1820 default, this selects the editable history that is unique to the
1836 default, this selects the editable history that is unique to the
1821 ancestry of the working directory.
1837 ancestry of the working directory.
1822
1838
1823 .. container:: verbose
1839 .. container:: verbose
1824
1840
1825 If you use --outgoing, this command will abort if there are ambiguous
1841 If you use --outgoing, this command will abort if there are ambiguous
1826 outgoing revisions. For example, if there are multiple branches
1842 outgoing revisions. For example, if there are multiple branches
1827 containing outgoing revisions.
1843 containing outgoing revisions.
1828
1844
1829 Use "min(outgoing() and ::.)" or similar revset specification
1845 Use "min(outgoing() and ::.)" or similar revset specification
1830 instead of --outgoing to specify edit target revision exactly in
1846 instead of --outgoing to specify edit target revision exactly in
1831 such ambiguous situation. See :hg:`help revsets` for detail about
1847 such ambiguous situation. See :hg:`help revsets` for detail about
1832 selecting revisions.
1848 selecting revisions.
1833
1849
1834 .. container:: verbose
1850 .. container:: verbose
1835
1851
1836 Examples:
1852 Examples:
1837
1853
1838 - A number of changes have been made.
1854 - A number of changes have been made.
1839 Revision 3 is no longer needed.
1855 Revision 3 is no longer needed.
1840
1856
1841 Start history editing from revision 3::
1857 Start history editing from revision 3::
1842
1858
1843 hg histedit -r 3
1859 hg histedit -r 3
1844
1860
1845 An editor opens, containing the list of revisions,
1861 An editor opens, containing the list of revisions,
1846 with specific actions specified::
1862 with specific actions specified::
1847
1863
1848 pick 5339bf82f0ca 3 Zworgle the foobar
1864 pick 5339bf82f0ca 3 Zworgle the foobar
1849 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1865 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1850 pick 0a9639fcda9d 5 Morgify the cromulancy
1866 pick 0a9639fcda9d 5 Morgify the cromulancy
1851
1867
1852 Additional information about the possible actions
1868 Additional information about the possible actions
1853 to take appears below the list of revisions.
1869 to take appears below the list of revisions.
1854
1870
1855 To remove revision 3 from the history,
1871 To remove revision 3 from the history,
1856 its action (at the beginning of the relevant line)
1872 its action (at the beginning of the relevant line)
1857 is changed to 'drop'::
1873 is changed to 'drop'::
1858
1874
1859 drop 5339bf82f0ca 3 Zworgle the foobar
1875 drop 5339bf82f0ca 3 Zworgle the foobar
1860 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1876 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1861 pick 0a9639fcda9d 5 Morgify the cromulancy
1877 pick 0a9639fcda9d 5 Morgify the cromulancy
1862
1878
1863 - A number of changes have been made.
1879 - A number of changes have been made.
1864 Revision 2 and 4 need to be swapped.
1880 Revision 2 and 4 need to be swapped.
1865
1881
1866 Start history editing from revision 2::
1882 Start history editing from revision 2::
1867
1883
1868 hg histedit -r 2
1884 hg histedit -r 2
1869
1885
1870 An editor opens, containing the list of revisions,
1886 An editor opens, containing the list of revisions,
1871 with specific actions specified::
1887 with specific actions specified::
1872
1888
1873 pick 252a1af424ad 2 Blorb a morgwazzle
1889 pick 252a1af424ad 2 Blorb a morgwazzle
1874 pick 5339bf82f0ca 3 Zworgle the foobar
1890 pick 5339bf82f0ca 3 Zworgle the foobar
1875 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1891 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1876
1892
1877 To swap revision 2 and 4, its lines are swapped
1893 To swap revision 2 and 4, its lines are swapped
1878 in the editor::
1894 in the editor::
1879
1895
1880 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1896 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1881 pick 5339bf82f0ca 3 Zworgle the foobar
1897 pick 5339bf82f0ca 3 Zworgle the foobar
1882 pick 252a1af424ad 2 Blorb a morgwazzle
1898 pick 252a1af424ad 2 Blorb a morgwazzle
1883
1899
1884 Returns 0 on success, 1 if user intervention is required (not only
1900 Returns 0 on success, 1 if user intervention is required (not only
1885 for intentional "edit" command, but also for resolving unexpected
1901 for intentional "edit" command, but also for resolving unexpected
1886 conflicts).
1902 conflicts).
1887 """
1903 """
1888 opts = pycompat.byteskwargs(opts)
1904 opts = pycompat.byteskwargs(opts)
1889
1905
1890 # kludge: _chistedit only works for starting an edit, not aborting
1906 # kludge: _chistedit only works for starting an edit, not aborting
1891 # or continuing, so fall back to regular _texthistedit for those
1907 # or continuing, so fall back to regular _texthistedit for those
1892 # operations.
1908 # operations.
1893 if ui.interface(b'histedit') == b'curses' and _getgoal(opts) == goalnew:
1909 if ui.interface(b'histedit') == b'curses' and _getgoal(opts) == goalnew:
1894 return _chistedit(ui, repo, freeargs, opts)
1910 return _chistedit(ui, repo, freeargs, opts)
1895 return _texthistedit(ui, repo, freeargs, opts)
1911 return _texthistedit(ui, repo, freeargs, opts)
1896
1912
1897
1913
1898 def _texthistedit(ui, repo, freeargs, opts):
1914 def _texthistedit(ui, repo, freeargs, opts):
1899 state = histeditstate(repo)
1915 state = histeditstate(repo)
1900 with repo.wlock() as wlock, repo.lock() as lock:
1916 with repo.wlock() as wlock, repo.lock() as lock:
1901 state.wlock = wlock
1917 state.wlock = wlock
1902 state.lock = lock
1918 state.lock = lock
1903 _histedit(ui, repo, state, freeargs, opts)
1919 _histedit(ui, repo, state, freeargs, opts)
1904
1920
1905
1921
1906 goalcontinue = b'continue'
1922 goalcontinue = b'continue'
1907 goalabort = b'abort'
1923 goalabort = b'abort'
1908 goaleditplan = b'edit-plan'
1924 goaleditplan = b'edit-plan'
1909 goalnew = b'new'
1925 goalnew = b'new'
1910
1926
1911
1927
1912 def _getgoal(opts):
1928 def _getgoal(opts):
1913 if opts.get(b'continue'):
1929 if opts.get(b'continue'):
1914 return goalcontinue
1930 return goalcontinue
1915 if opts.get(b'abort'):
1931 if opts.get(b'abort'):
1916 return goalabort
1932 return goalabort
1917 if opts.get(b'edit_plan'):
1933 if opts.get(b'edit_plan'):
1918 return goaleditplan
1934 return goaleditplan
1919 return goalnew
1935 return goalnew
1920
1936
1921
1937
1922 def _readfile(ui, path):
1938 def _readfile(ui, path):
1923 if path == b'-':
1939 if path == b'-':
1924 with ui.timeblockedsection(b'histedit'):
1940 with ui.timeblockedsection(b'histedit'):
1925 return ui.fin.read()
1941 return ui.fin.read()
1926 else:
1942 else:
1927 with open(path, b'rb') as f:
1943 with open(path, b'rb') as f:
1928 return f.read()
1944 return f.read()
1929
1945
1930
1946
1931 def _validateargs(ui, repo, state, freeargs, opts, goal, rules, revs):
1947 def _validateargs(ui, repo, freeargs, opts, goal, rules, revs):
1932 # TODO only abort if we try to histedit mq patches, not just
1948 # TODO only abort if we try to histedit mq patches, not just
1933 # blanket if mq patches are applied somewhere
1949 # blanket if mq patches are applied somewhere
1934 mq = getattr(repo, 'mq', None)
1950 mq = getattr(repo, 'mq', None)
1935 if mq and mq.applied:
1951 if mq and mq.applied:
1936 raise error.Abort(_(b'source has mq patches applied'))
1952 raise error.StateError(_(b'source has mq patches applied'))
1937
1953
1938 # basic argument incompatibility processing
1954 # basic argument incompatibility processing
1939 outg = opts.get(b'outgoing')
1955 outg = opts.get(b'outgoing')
1940 editplan = opts.get(b'edit_plan')
1956 editplan = opts.get(b'edit_plan')
1941 abort = opts.get(b'abort')
1957 abort = opts.get(b'abort')
1942 force = opts.get(b'force')
1958 force = opts.get(b'force')
1943 if force and not outg:
1959 if force and not outg:
1944 raise error.Abort(_(b'--force only allowed with --outgoing'))
1960 raise error.InputError(_(b'--force only allowed with --outgoing'))
1945 if goal == b'continue':
1961 if goal == b'continue':
1946 if any((outg, abort, revs, freeargs, rules, editplan)):
1962 if any((outg, abort, revs, freeargs, rules, editplan)):
1947 raise error.Abort(_(b'no arguments allowed with --continue'))
1963 raise error.InputError(_(b'no arguments allowed with --continue'))
1948 elif goal == b'abort':
1964 elif goal == b'abort':
1949 if any((outg, revs, freeargs, rules, editplan)):
1965 if any((outg, revs, freeargs, rules, editplan)):
1950 raise error.Abort(_(b'no arguments allowed with --abort'))
1966 raise error.InputError(_(b'no arguments allowed with --abort'))
1951 elif goal == b'edit-plan':
1967 elif goal == b'edit-plan':
1952 if any((outg, revs, freeargs)):
1968 if any((outg, revs, freeargs)):
1953 raise error.Abort(
1969 raise error.InputError(
1954 _(b'only --commands argument allowed with --edit-plan')
1970 _(b'only --commands argument allowed with --edit-plan')
1955 )
1971 )
1956 else:
1972 else:
1957 if state.inprogress():
1958 raise error.Abort(
1959 _(
1960 b'history edit already in progress, try '
1961 b'--continue or --abort'
1962 )
1963 )
1964 if outg:
1973 if outg:
1965 if revs:
1974 if revs:
1966 raise error.Abort(_(b'no revisions allowed with --outgoing'))
1975 raise error.InputError(
1976 _(b'no revisions allowed with --outgoing')
1977 )
1967 if len(freeargs) > 1:
1978 if len(freeargs) > 1:
1968 raise error.Abort(
1979 raise error.InputError(
1969 _(b'only one repo argument allowed with --outgoing')
1980 _(b'only one repo argument allowed with --outgoing')
1970 )
1981 )
1971 else:
1982 else:
1972 revs.extend(freeargs)
1983 revs.extend(freeargs)
1973 if len(revs) == 0:
1984 if len(revs) == 0:
1974 defaultrev = destutil.desthistedit(ui, repo)
1985 defaultrev = destutil.desthistedit(ui, repo)
1975 if defaultrev is not None:
1986 if defaultrev is not None:
1976 revs.append(defaultrev)
1987 revs.append(defaultrev)
1977
1988
1978 if len(revs) != 1:
1989 if len(revs) != 1:
1979 raise error.Abort(
1990 raise error.InputError(
1980 _(b'histedit requires exactly one ancestor revision')
1991 _(b'histedit requires exactly one ancestor revision')
1981 )
1992 )
1982
1993
1983
1994
1984 def _histedit(ui, repo, state, freeargs, opts):
1995 def _histedit(ui, repo, state, freeargs, opts):
1985 fm = ui.formatter(b'histedit', opts)
1996 fm = ui.formatter(b'histedit', opts)
1986 fm.startitem()
1997 fm.startitem()
1987 goal = _getgoal(opts)
1998 goal = _getgoal(opts)
1988 revs = opts.get(b'rev', [])
1999 revs = opts.get(b'rev', [])
1989 nobackup = not ui.configbool(b'rewrite', b'backup-bundle')
2000 nobackup = not ui.configbool(b'rewrite', b'backup-bundle')
1990 rules = opts.get(b'commands', b'')
2001 rules = opts.get(b'commands', b'')
1991 state.keep = opts.get(b'keep', False)
2002 state.keep = opts.get(b'keep', False)
1992
2003
1993 _validateargs(ui, repo, state, freeargs, opts, goal, rules, revs)
2004 _validateargs(ui, repo, freeargs, opts, goal, rules, revs)
1994
2005
1995 hastags = False
2006 hastags = False
1996 if revs:
2007 if revs:
1997 revs = scmutil.revrange(repo, revs)
2008 revs = logcmdutil.revrange(repo, revs)
1998 ctxs = [repo[rev] for rev in revs]
2009 ctxs = [repo[rev] for rev in revs]
1999 for ctx in ctxs:
2010 for ctx in ctxs:
2000 tags = [tag for tag in ctx.tags() if tag != b'tip']
2011 tags = [tag for tag in ctx.tags() if tag != b'tip']
2001 if not hastags:
2012 if not hastags:
2002 hastags = len(tags)
2013 hastags = len(tags)
2003 if hastags:
2014 if hastags:
2004 if ui.promptchoice(
2015 if ui.promptchoice(
2005 _(
2016 _(
2006 b'warning: tags associated with the given'
2017 b'warning: tags associated with the given'
2007 b' changeset will be lost after histedit.\n'
2018 b' changeset will be lost after histedit.\n'
2008 b'do you want to continue (yN)? $$ &Yes $$ &No'
2019 b'do you want to continue (yN)? $$ &Yes $$ &No'
2009 ),
2020 ),
2010 default=1,
2021 default=1,
2011 ):
2022 ):
2012 raise error.Abort(_(b'histedit cancelled\n'))
2023 raise error.CanceledError(_(b'histedit cancelled\n'))
2013 # rebuild state
2024 # rebuild state
2014 if goal == goalcontinue:
2025 if goal == goalcontinue:
2015 state.read()
2026 state.read()
2016 state = bootstrapcontinue(ui, state, opts)
2027 state = bootstrapcontinue(ui, state, opts)
2017 elif goal == goaleditplan:
2028 elif goal == goaleditplan:
2018 _edithisteditplan(ui, repo, state, rules)
2029 _edithisteditplan(ui, repo, state, rules)
2019 return
2030 return
2020 elif goal == goalabort:
2031 elif goal == goalabort:
2021 _aborthistedit(ui, repo, state, nobackup=nobackup)
2032 _aborthistedit(ui, repo, state, nobackup=nobackup)
2022 return
2033 return
2023 else:
2034 else:
2024 # goal == goalnew
2035 # goal == goalnew
2025 _newhistedit(ui, repo, state, revs, freeargs, opts)
2036 _newhistedit(ui, repo, state, revs, freeargs, opts)
2026
2037
2027 _continuehistedit(ui, repo, state)
2038 _continuehistedit(ui, repo, state)
2028 _finishhistedit(ui, repo, state, fm)
2039 _finishhistedit(ui, repo, state, fm)
2029 fm.end()
2040 fm.end()
2030
2041
2031
2042
2032 def _continuehistedit(ui, repo, state):
2043 def _continuehistedit(ui, repo, state):
2033 """This function runs after either:
2044 """This function runs after either:
2034 - bootstrapcontinue (if the goal is 'continue')
2045 - bootstrapcontinue (if the goal is 'continue')
2035 - _newhistedit (if the goal is 'new')
2046 - _newhistedit (if the goal is 'new')
2036 """
2047 """
2037 # preprocess rules so that we can hide inner folds from the user
2048 # preprocess rules so that we can hide inner folds from the user
2038 # and only show one editor
2049 # and only show one editor
2039 actions = state.actions[:]
2050 actions = state.actions[:]
2040 for idx, (action, nextact) in enumerate(zip(actions, actions[1:] + [None])):
2051 for idx, (action, nextact) in enumerate(zip(actions, actions[1:] + [None])):
2041 if action.verb == b'fold' and nextact and nextact.verb == b'fold':
2052 if action.verb == b'fold' and nextact and nextact.verb == b'fold':
2042 state.actions[idx].__class__ = _multifold
2053 state.actions[idx].__class__ = _multifold
2043
2054
2044 # Force an initial state file write, so the user can run --abort/continue
2055 # Force an initial state file write, so the user can run --abort/continue
2045 # even if there's an exception before the first transaction serialize.
2056 # even if there's an exception before the first transaction serialize.
2046 state.write()
2057 state.write()
2047
2058
2048 tr = None
2059 tr = None
2049 # Don't use singletransaction by default since it rolls the entire
2060 # Don't use singletransaction by default since it rolls the entire
2050 # transaction back if an unexpected exception happens (like a
2061 # transaction back if an unexpected exception happens (like a
2051 # pretxncommit hook throws, or the user aborts the commit msg editor).
2062 # pretxncommit hook throws, or the user aborts the commit msg editor).
2052 if ui.configbool(b"histedit", b"singletransaction"):
2063 if ui.configbool(b"histedit", b"singletransaction"):
2053 # Don't use a 'with' for the transaction, since actions may close
2064 # Don't use a 'with' for the transaction, since actions may close
2054 # and reopen a transaction. For example, if the action executes an
2065 # and reopen a transaction. For example, if the action executes an
2055 # external process it may choose to commit the transaction first.
2066 # external process it may choose to commit the transaction first.
2056 tr = repo.transaction(b'histedit')
2067 tr = repo.transaction(b'histedit')
2057 progress = ui.makeprogress(
2068 progress = ui.makeprogress(
2058 _(b"editing"), unit=_(b'changes'), total=len(state.actions)
2069 _(b"editing"), unit=_(b'changes'), total=len(state.actions)
2059 )
2070 )
2060 with progress, util.acceptintervention(tr):
2071 with progress, util.acceptintervention(tr):
2061 while state.actions:
2072 while state.actions:
2062 state.write(tr=tr)
2073 state.write(tr=tr)
2063 actobj = state.actions[0]
2074 actobj = state.actions[0]
2064 progress.increment(item=actobj.torule())
2075 progress.increment(item=actobj.torule())
2065 ui.debug(
2076 ui.debug(
2066 b'histedit: processing %s %s\n' % (actobj.verb, actobj.torule())
2077 b'histedit: processing %s %s\n' % (actobj.verb, actobj.torule())
2067 )
2078 )
2068 parentctx, replacement_ = actobj.run()
2079 parentctx, replacement_ = actobj.run()
2069 state.parentctxnode = parentctx.node()
2080 state.parentctxnode = parentctx.node()
2070 state.replacements.extend(replacement_)
2081 state.replacements.extend(replacement_)
2071 state.actions.pop(0)
2082 state.actions.pop(0)
2072
2083
2073 state.write()
2084 state.write()
2074
2085
2075
2086
2076 def _finishhistedit(ui, repo, state, fm):
2087 def _finishhistedit(ui, repo, state, fm):
2077 """This action runs when histedit is finishing its session"""
2088 """This action runs when histedit is finishing its session"""
2078 mergemod.update(repo[state.parentctxnode])
2089 mergemod.update(repo[state.parentctxnode])
2079
2090
2080 mapping, tmpnodes, created, ntm = processreplacement(state)
2091 mapping, tmpnodes, created, ntm = processreplacement(state)
2081 if mapping:
2092 if mapping:
2082 for prec, succs in pycompat.iteritems(mapping):
2093 for prec, succs in pycompat.iteritems(mapping):
2083 if not succs:
2094 if not succs:
2084 ui.debug(b'histedit: %s is dropped\n' % short(prec))
2095 ui.debug(b'histedit: %s is dropped\n' % short(prec))
2085 else:
2096 else:
2086 ui.debug(
2097 ui.debug(
2087 b'histedit: %s is replaced by %s\n'
2098 b'histedit: %s is replaced by %s\n'
2088 % (short(prec), short(succs[0]))
2099 % (short(prec), short(succs[0]))
2089 )
2100 )
2090 if len(succs) > 1:
2101 if len(succs) > 1:
2091 m = b'histedit: %s'
2102 m = b'histedit: %s'
2092 for n in succs[1:]:
2103 for n in succs[1:]:
2093 ui.debug(m % short(n))
2104 ui.debug(m % short(n))
2094
2105
2095 if not state.keep:
2106 if not state.keep:
2096 if mapping:
2107 if mapping:
2097 movetopmostbookmarks(repo, state.topmost, ntm)
2108 movetopmostbookmarks(repo, state.topmost, ntm)
2098 # TODO update mq state
2109 # TODO update mq state
2099 else:
2110 else:
2100 mapping = {}
2111 mapping = {}
2101
2112
2102 for n in tmpnodes:
2113 for n in tmpnodes:
2103 if n in repo:
2114 if n in repo:
2104 mapping[n] = ()
2115 mapping[n] = ()
2105
2116
2106 # remove entries about unknown nodes
2117 # remove entries about unknown nodes
2107 has_node = repo.unfiltered().changelog.index.has_node
2118 has_node = repo.unfiltered().changelog.index.has_node
2108 mapping = {
2119 mapping = {
2109 k: v
2120 k: v
2110 for k, v in mapping.items()
2121 for k, v in mapping.items()
2111 if has_node(k) and all(has_node(n) for n in v)
2122 if has_node(k) and all(has_node(n) for n in v)
2112 }
2123 }
2113 scmutil.cleanupnodes(repo, mapping, b'histedit')
2124 scmutil.cleanupnodes(repo, mapping, b'histedit')
2114 hf = fm.hexfunc
2125 hf = fm.hexfunc
2115 fl = fm.formatlist
2126 fl = fm.formatlist
2116 fd = fm.formatdict
2127 fd = fm.formatdict
2117 nodechanges = fd(
2128 nodechanges = fd(
2118 {
2129 {
2119 hf(oldn): fl([hf(n) for n in newn], name=b'node')
2130 hf(oldn): fl([hf(n) for n in newn], name=b'node')
2120 for oldn, newn in pycompat.iteritems(mapping)
2131 for oldn, newn in pycompat.iteritems(mapping)
2121 },
2132 },
2122 key=b"oldnode",
2133 key=b"oldnode",
2123 value=b"newnodes",
2134 value=b"newnodes",
2124 )
2135 )
2125 fm.data(nodechanges=nodechanges)
2136 fm.data(nodechanges=nodechanges)
2126
2137
2127 state.clear()
2138 state.clear()
2128 if os.path.exists(repo.sjoin(b'undo')):
2139 if os.path.exists(repo.sjoin(b'undo')):
2129 os.unlink(repo.sjoin(b'undo'))
2140 os.unlink(repo.sjoin(b'undo'))
2130 if repo.vfs.exists(b'histedit-last-edit.txt'):
2141 if repo.vfs.exists(b'histedit-last-edit.txt'):
2131 repo.vfs.unlink(b'histedit-last-edit.txt')
2142 repo.vfs.unlink(b'histedit-last-edit.txt')
2132
2143
2133
2144
2134 def _aborthistedit(ui, repo, state, nobackup=False):
2145 def _aborthistedit(ui, repo, state, nobackup=False):
2135 try:
2146 try:
2136 state.read()
2147 state.read()
2137 __, leafs, tmpnodes, __ = processreplacement(state)
2148 __, leafs, tmpnodes, __ = processreplacement(state)
2138 ui.debug(b'restore wc to old parent %s\n' % short(state.topmost))
2149 ui.debug(b'restore wc to old parent %s\n' % short(state.topmost))
2139
2150
2140 # Recover our old commits if necessary
2151 # Recover our old commits if necessary
2141 if not state.topmost in repo and state.backupfile:
2152 if not state.topmost in repo and state.backupfile:
2142 backupfile = repo.vfs.join(state.backupfile)
2153 backupfile = repo.vfs.join(state.backupfile)
2143 f = hg.openpath(ui, backupfile)
2154 f = hg.openpath(ui, backupfile)
2144 gen = exchange.readbundle(ui, f, backupfile)
2155 gen = exchange.readbundle(ui, f, backupfile)
2145 with repo.transaction(b'histedit.abort') as tr:
2156 with repo.transaction(b'histedit.abort') as tr:
2146 bundle2.applybundle(
2157 bundle2.applybundle(
2147 repo,
2158 repo,
2148 gen,
2159 gen,
2149 tr,
2160 tr,
2150 source=b'histedit',
2161 source=b'histedit',
2151 url=b'bundle:' + backupfile,
2162 url=b'bundle:' + backupfile,
2152 )
2163 )
2153
2164
2154 os.remove(backupfile)
2165 os.remove(backupfile)
2155
2166
2156 # check whether we should update away
2167 # check whether we should update away
2157 if repo.unfiltered().revs(
2168 if repo.unfiltered().revs(
2158 b'parents() and (%n or %ln::)',
2169 b'parents() and (%n or %ln::)',
2159 state.parentctxnode,
2170 state.parentctxnode,
2160 leafs | tmpnodes,
2171 leafs | tmpnodes,
2161 ):
2172 ):
2162 hg.clean(repo, state.topmost, show_stats=True, quietempty=True)
2173 hg.clean(repo, state.topmost, show_stats=True, quietempty=True)
2163 cleanupnode(ui, repo, tmpnodes, nobackup=nobackup)
2174 cleanupnode(ui, repo, tmpnodes, nobackup=nobackup)
2164 cleanupnode(ui, repo, leafs, nobackup=nobackup)
2175 cleanupnode(ui, repo, leafs, nobackup=nobackup)
2165 except Exception:
2176 except Exception:
2166 if state.inprogress():
2177 if state.inprogress():
2167 ui.warn(
2178 ui.warn(
2168 _(
2179 _(
2169 b'warning: encountered an exception during histedit '
2180 b'warning: encountered an exception during histedit '
2170 b'--abort; the repository may not have been completely '
2181 b'--abort; the repository may not have been completely '
2171 b'cleaned up\n'
2182 b'cleaned up\n'
2172 )
2183 )
2173 )
2184 )
2174 raise
2185 raise
2175 finally:
2186 finally:
2176 state.clear()
2187 state.clear()
2177
2188
2178
2189
2179 def hgaborthistedit(ui, repo):
2190 def hgaborthistedit(ui, repo):
2180 state = histeditstate(repo)
2191 state = histeditstate(repo)
2181 nobackup = not ui.configbool(b'rewrite', b'backup-bundle')
2192 nobackup = not ui.configbool(b'rewrite', b'backup-bundle')
2182 with repo.wlock() as wlock, repo.lock() as lock:
2193 with repo.wlock() as wlock, repo.lock() as lock:
2183 state.wlock = wlock
2194 state.wlock = wlock
2184 state.lock = lock
2195 state.lock = lock
2185 _aborthistedit(ui, repo, state, nobackup=nobackup)
2196 _aborthistedit(ui, repo, state, nobackup=nobackup)
2186
2197
2187
2198
2188 def _edithisteditplan(ui, repo, state, rules):
2199 def _edithisteditplan(ui, repo, state, rules):
2189 state.read()
2200 state.read()
2190 if not rules:
2201 if not rules:
2191 comment = geteditcomment(
2202 comment = geteditcomment(
2192 ui, short(state.parentctxnode), short(state.topmost)
2203 ui, short(state.parentctxnode), short(state.topmost)
2193 )
2204 )
2194 rules = ruleeditor(repo, ui, state.actions, comment)
2205 rules = ruleeditor(repo, ui, state.actions, comment)
2195 else:
2206 else:
2196 rules = _readfile(ui, rules)
2207 rules = _readfile(ui, rules)
2197 actions = parserules(rules, state)
2208 actions = parserules(rules, state)
2198 ctxs = [repo[act.node] for act in state.actions if act.node]
2209 ctxs = [repo[act.node] for act in state.actions if act.node]
2199 warnverifyactions(ui, repo, actions, state, ctxs)
2210 warnverifyactions(ui, repo, actions, state, ctxs)
2200 state.actions = actions
2211 state.actions = actions
2201 state.write()
2212 state.write()
2202
2213
2203
2214
2204 def _newhistedit(ui, repo, state, revs, freeargs, opts):
2215 def _newhistedit(ui, repo, state, revs, freeargs, opts):
2205 outg = opts.get(b'outgoing')
2216 outg = opts.get(b'outgoing')
2206 rules = opts.get(b'commands', b'')
2217 rules = opts.get(b'commands', b'')
2207 force = opts.get(b'force')
2218 force = opts.get(b'force')
2208
2219
2209 cmdutil.checkunfinished(repo)
2220 cmdutil.checkunfinished(repo)
2210 cmdutil.bailifchanged(repo)
2221 cmdutil.bailifchanged(repo)
2211
2222
2212 topmost = repo.dirstate.p1()
2223 topmost = repo.dirstate.p1()
2213 if outg:
2224 if outg:
2214 if freeargs:
2225 if freeargs:
2215 remote = freeargs[0]
2226 remote = freeargs[0]
2216 else:
2227 else:
2217 remote = None
2228 remote = None
2218 root = findoutgoing(ui, repo, remote, force, opts)
2229 root = findoutgoing(ui, repo, remote, force, opts)
2219 else:
2230 else:
2220 rr = list(repo.set(b'roots(%ld)', scmutil.revrange(repo, revs)))
2231 rr = list(repo.set(b'roots(%ld)', logcmdutil.revrange(repo, revs)))
2221 if len(rr) != 1:
2232 if len(rr) != 1:
2222 raise error.Abort(
2233 raise error.InputError(
2223 _(
2234 _(
2224 b'The specified revisions must have '
2235 b'The specified revisions must have '
2225 b'exactly one common root'
2236 b'exactly one common root'
2226 )
2237 )
2227 )
2238 )
2228 root = rr[0].node()
2239 root = rr[0].node()
2229
2240
2230 revs = between(repo, root, topmost, state.keep)
2241 revs = between(repo, root, topmost, state.keep)
2231 if not revs:
2242 if not revs:
2232 raise error.Abort(
2243 raise error.InputError(
2233 _(b'%s is not an ancestor of working directory') % short(root)
2244 _(b'%s is not an ancestor of working directory') % short(root)
2234 )
2245 )
2235
2246
2236 ctxs = [repo[r] for r in revs]
2247 ctxs = [repo[r] for r in revs]
2237
2248
2238 wctx = repo[None]
2249 wctx = repo[None]
2239 # Please don't ask me why `ancestors` is this value. I figured it
2250 # Please don't ask me why `ancestors` is this value. I figured it
2240 # out with print-debugging, not by actually understanding what the
2251 # out with print-debugging, not by actually understanding what the
2241 # merge code is doing. :(
2252 # merge code is doing. :(
2242 ancs = [repo[b'.']]
2253 ancs = [repo[b'.']]
2243 # Sniff-test to make sure we won't collide with untracked files in
2254 # Sniff-test to make sure we won't collide with untracked files in
2244 # the working directory. If we don't do this, we can get a
2255 # the working directory. If we don't do this, we can get a
2245 # collision after we've started histedit and backing out gets ugly
2256 # collision after we've started histedit and backing out gets ugly
2246 # for everyone, especially the user.
2257 # for everyone, especially the user.
2247 for c in [ctxs[0].p1()] + ctxs:
2258 for c in [ctxs[0].p1()] + ctxs:
2248 try:
2259 try:
2249 mergemod.calculateupdates(
2260 mergemod.calculateupdates(
2250 repo,
2261 repo,
2251 wctx,
2262 wctx,
2252 c,
2263 c,
2253 ancs,
2264 ancs,
2254 # These parameters were determined by print-debugging
2265 # These parameters were determined by print-debugging
2255 # what happens later on inside histedit.
2266 # what happens later on inside histedit.
2256 branchmerge=False,
2267 branchmerge=False,
2257 force=False,
2268 force=False,
2258 acceptremote=False,
2269 acceptremote=False,
2259 followcopies=False,
2270 followcopies=False,
2260 )
2271 )
2261 except error.Abort:
2272 except error.Abort:
2262 raise error.Abort(
2273 raise error.StateError(
2263 _(
2274 _(
2264 b"untracked files in working directory conflict with files in %s"
2275 b"untracked files in working directory conflict with files in %s"
2265 )
2276 )
2266 % c
2277 % c
2267 )
2278 )
2268
2279
2269 if not rules:
2280 if not rules:
2270 comment = geteditcomment(ui, short(root), short(topmost))
2281 comment = geteditcomment(ui, short(root), short(topmost))
2271 actions = [pick(state, r) for r in revs]
2282 actions = [pick(state, r) for r in revs]
2272 rules = ruleeditor(repo, ui, actions, comment)
2283 rules = ruleeditor(repo, ui, actions, comment)
2273 else:
2284 else:
2274 rules = _readfile(ui, rules)
2285 rules = _readfile(ui, rules)
2275 actions = parserules(rules, state)
2286 actions = parserules(rules, state)
2276 warnverifyactions(ui, repo, actions, state, ctxs)
2287 warnverifyactions(ui, repo, actions, state, ctxs)
2277
2288
2278 parentctxnode = repo[root].p1().node()
2289 parentctxnode = repo[root].p1().node()
2279
2290
2280 state.parentctxnode = parentctxnode
2291 state.parentctxnode = parentctxnode
2281 state.actions = actions
2292 state.actions = actions
2282 state.topmost = topmost
2293 state.topmost = topmost
2283 state.replacements = []
2294 state.replacements = []
2284
2295
2285 ui.log(
2296 ui.log(
2286 b"histedit",
2297 b"histedit",
2287 b"%d actions to histedit\n",
2298 b"%d actions to histedit\n",
2288 len(actions),
2299 len(actions),
2289 histedit_num_actions=len(actions),
2300 histedit_num_actions=len(actions),
2290 )
2301 )
2291
2302
2292 # Create a backup so we can always abort completely.
2303 # Create a backup so we can always abort completely.
2293 backupfile = None
2304 backupfile = None
2294 if not obsolete.isenabled(repo, obsolete.createmarkersopt):
2305 if not obsolete.isenabled(repo, obsolete.createmarkersopt):
2295 backupfile = repair.backupbundle(
2306 backupfile = repair.backupbundle(
2296 repo, [parentctxnode], [topmost], root, b'histedit'
2307 repo, [parentctxnode], [topmost], root, b'histedit'
2297 )
2308 )
2298 state.backupfile = backupfile
2309 state.backupfile = backupfile
2299
2310
2300
2311
2301 def _getsummary(ctx):
2312 def _getsummary(ctx):
2302 # a common pattern is to extract the summary but default to the empty
2313 # a common pattern is to extract the summary but default to the empty
2303 # string
2314 # string
2304 summary = ctx.description() or b''
2315 summary = ctx.description() or b''
2305 if summary:
2316 if summary:
2306 summary = summary.splitlines()[0]
2317 summary = summary.splitlines()[0]
2307 return summary
2318 return summary
2308
2319
2309
2320
2310 def bootstrapcontinue(ui, state, opts):
2321 def bootstrapcontinue(ui, state, opts):
2311 repo = state.repo
2322 repo = state.repo
2312
2323
2313 ms = mergestatemod.mergestate.read(repo)
2324 ms = mergestatemod.mergestate.read(repo)
2314 mergeutil.checkunresolved(ms)
2325 mergeutil.checkunresolved(ms)
2315
2326
2316 if state.actions:
2327 if state.actions:
2317 actobj = state.actions.pop(0)
2328 actobj = state.actions.pop(0)
2318
2329
2319 if _isdirtywc(repo):
2330 if _isdirtywc(repo):
2320 actobj.continuedirty()
2331 actobj.continuedirty()
2321 if _isdirtywc(repo):
2332 if _isdirtywc(repo):
2322 abortdirty()
2333 abortdirty()
2323
2334
2324 parentctx, replacements = actobj.continueclean()
2335 parentctx, replacements = actobj.continueclean()
2325
2336
2326 state.parentctxnode = parentctx.node()
2337 state.parentctxnode = parentctx.node()
2327 state.replacements.extend(replacements)
2338 state.replacements.extend(replacements)
2328
2339
2329 return state
2340 return state
2330
2341
2331
2342
2332 def between(repo, old, new, keep):
2343 def between(repo, old, new, keep):
2333 """select and validate the set of revision to edit
2344 """select and validate the set of revision to edit
2334
2345
2335 When keep is false, the specified set can't have children."""
2346 When keep is false, the specified set can't have children."""
2336 revs = repo.revs(b'%n::%n', old, new)
2347 revs = repo.revs(b'%n::%n', old, new)
2337 if revs and not keep:
2348 if revs and not keep:
2338 rewriteutil.precheck(repo, revs, b'edit')
2349 rewriteutil.precheck(repo, revs, b'edit')
2339 if repo.revs(b'(%ld) and merge()', revs):
2350 if repo.revs(b'(%ld) and merge()', revs):
2340 raise error.Abort(_(b'cannot edit history that contains merges'))
2351 raise error.StateError(
2352 _(b'cannot edit history that contains merges')
2353 )
2341 return pycompat.maplist(repo.changelog.node, revs)
2354 return pycompat.maplist(repo.changelog.node, revs)
2342
2355
2343
2356
2344 def ruleeditor(repo, ui, actions, editcomment=b""):
2357 def ruleeditor(repo, ui, actions, editcomment=b""):
2345 """open an editor to edit rules
2358 """open an editor to edit rules
2346
2359
2347 rules are in the format [ [act, ctx], ...] like in state.rules
2360 rules are in the format [ [act, ctx], ...] like in state.rules
2348 """
2361 """
2349 if repo.ui.configbool(b"experimental", b"histedit.autoverb"):
2362 if repo.ui.configbool(b"experimental", b"histedit.autoverb"):
2350 newact = util.sortdict()
2363 newact = util.sortdict()
2351 for act in actions:
2364 for act in actions:
2352 ctx = repo[act.node]
2365 ctx = repo[act.node]
2353 summary = _getsummary(ctx)
2366 summary = _getsummary(ctx)
2354 fword = summary.split(b' ', 1)[0].lower()
2367 fword = summary.split(b' ', 1)[0].lower()
2355 added = False
2368 added = False
2356
2369
2357 # if it doesn't end with the special character '!' just skip this
2370 # if it doesn't end with the special character '!' just skip this
2358 if fword.endswith(b'!'):
2371 if fword.endswith(b'!'):
2359 fword = fword[:-1]
2372 fword = fword[:-1]
2360 if fword in primaryactions | secondaryactions | tertiaryactions:
2373 if fword in primaryactions | secondaryactions | tertiaryactions:
2361 act.verb = fword
2374 act.verb = fword
2362 # get the target summary
2375 # get the target summary
2363 tsum = summary[len(fword) + 1 :].lstrip()
2376 tsum = summary[len(fword) + 1 :].lstrip()
2364 # safe but slow: reverse iterate over the actions so we
2377 # safe but slow: reverse iterate over the actions so we
2365 # don't clash on two commits having the same summary
2378 # don't clash on two commits having the same summary
2366 for na, l in reversed(list(pycompat.iteritems(newact))):
2379 for na, l in reversed(list(pycompat.iteritems(newact))):
2367 actx = repo[na.node]
2380 actx = repo[na.node]
2368 asum = _getsummary(actx)
2381 asum = _getsummary(actx)
2369 if asum == tsum:
2382 if asum == tsum:
2370 added = True
2383 added = True
2371 l.append(act)
2384 l.append(act)
2372 break
2385 break
2373
2386
2374 if not added:
2387 if not added:
2375 newact[act] = []
2388 newact[act] = []
2376
2389
2377 # copy over and flatten the new list
2390 # copy over and flatten the new list
2378 actions = []
2391 actions = []
2379 for na, l in pycompat.iteritems(newact):
2392 for na, l in pycompat.iteritems(newact):
2380 actions.append(na)
2393 actions.append(na)
2381 actions += l
2394 actions += l
2382
2395
2383 rules = b'\n'.join([act.torule() for act in actions])
2396 rules = b'\n'.join([act.torule() for act in actions])
2384 rules += b'\n\n'
2397 rules += b'\n\n'
2385 rules += editcomment
2398 rules += editcomment
2386 rules = ui.edit(
2399 rules = ui.edit(
2387 rules,
2400 rules,
2388 ui.username(),
2401 ui.username(),
2389 {b'prefix': b'histedit'},
2402 {b'prefix': b'histedit'},
2390 repopath=repo.path,
2403 repopath=repo.path,
2391 action=b'histedit',
2404 action=b'histedit',
2392 )
2405 )
2393
2406
2394 # Save edit rules in .hg/histedit-last-edit.txt in case
2407 # Save edit rules in .hg/histedit-last-edit.txt in case
2395 # the user needs to ask for help after something
2408 # the user needs to ask for help after something
2396 # surprising happens.
2409 # surprising happens.
2397 with repo.vfs(b'histedit-last-edit.txt', b'wb') as f:
2410 with repo.vfs(b'histedit-last-edit.txt', b'wb') as f:
2398 f.write(rules)
2411 f.write(rules)
2399
2412
2400 return rules
2413 return rules
2401
2414
2402
2415
2403 def parserules(rules, state):
2416 def parserules(rules, state):
2404 """Read the histedit rules string and return list of action objects"""
2417 """Read the histedit rules string and return list of action objects"""
2405 rules = [
2418 rules = [
2406 l
2419 l
2407 for l in (r.strip() for r in rules.splitlines())
2420 for l in (r.strip() for r in rules.splitlines())
2408 if l and not l.startswith(b'#')
2421 if l and not l.startswith(b'#')
2409 ]
2422 ]
2410 actions = []
2423 actions = []
2411 for r in rules:
2424 for r in rules:
2412 if b' ' not in r:
2425 if b' ' not in r:
2413 raise error.ParseError(_(b'malformed line "%s"') % r)
2426 raise error.ParseError(_(b'malformed line "%s"') % r)
2414 verb, rest = r.split(b' ', 1)
2427 verb, rest = r.split(b' ', 1)
2415
2428
2416 if verb not in actiontable:
2429 if verb not in actiontable:
2417 raise error.ParseError(_(b'unknown action "%s"') % verb)
2430 raise error.ParseError(_(b'unknown action "%s"') % verb)
2418
2431
2419 action = actiontable[verb].fromrule(state, rest)
2432 action = actiontable[verb].fromrule(state, rest)
2420 actions.append(action)
2433 actions.append(action)
2421 return actions
2434 return actions
2422
2435
2423
2436
2424 def warnverifyactions(ui, repo, actions, state, ctxs):
2437 def warnverifyactions(ui, repo, actions, state, ctxs):
2425 try:
2438 try:
2426 verifyactions(actions, state, ctxs)
2439 verifyactions(actions, state, ctxs)
2427 except error.ParseError:
2440 except error.ParseError:
2428 if repo.vfs.exists(b'histedit-last-edit.txt'):
2441 if repo.vfs.exists(b'histedit-last-edit.txt'):
2429 ui.warn(
2442 ui.warn(
2430 _(
2443 _(
2431 b'warning: histedit rules saved '
2444 b'warning: histedit rules saved '
2432 b'to: .hg/histedit-last-edit.txt\n'
2445 b'to: .hg/histedit-last-edit.txt\n'
2433 )
2446 )
2434 )
2447 )
2435 raise
2448 raise
2436
2449
2437
2450
2438 def verifyactions(actions, state, ctxs):
2451 def verifyactions(actions, state, ctxs):
2439 """Verify that there exists exactly one action per given changeset and
2452 """Verify that there exists exactly one action per given changeset and
2440 other constraints.
2453 other constraints.
2441
2454
2442 Will abort if there are to many or too few rules, a malformed rule,
2455 Will abort if there are to many or too few rules, a malformed rule,
2443 or a rule on a changeset outside of the user-given range.
2456 or a rule on a changeset outside of the user-given range.
2444 """
2457 """
2445 expected = {c.node() for c in ctxs}
2458 expected = {c.node() for c in ctxs}
2446 seen = set()
2459 seen = set()
2447 prev = None
2460 prev = None
2448
2461
2449 if actions and actions[0].verb in [b'roll', b'fold']:
2462 if actions and actions[0].verb in [b'roll', b'fold']:
2450 raise error.ParseError(
2463 raise error.ParseError(
2451 _(b'first changeset cannot use verb "%s"') % actions[0].verb
2464 _(b'first changeset cannot use verb "%s"') % actions[0].verb
2452 )
2465 )
2453
2466
2454 for action in actions:
2467 for action in actions:
2455 action.verify(prev, expected, seen)
2468 action.verify(prev, expected, seen)
2456 prev = action
2469 prev = action
2457 if action.node is not None:
2470 if action.node is not None:
2458 seen.add(action.node)
2471 seen.add(action.node)
2459 missing = sorted(expected - seen) # sort to stabilize output
2472 missing = sorted(expected - seen) # sort to stabilize output
2460
2473
2461 if state.repo.ui.configbool(b'histedit', b'dropmissing'):
2474 if state.repo.ui.configbool(b'histedit', b'dropmissing'):
2462 if len(actions) == 0:
2475 if len(actions) == 0:
2463 raise error.ParseError(
2476 raise error.ParseError(
2464 _(b'no rules provided'),
2477 _(b'no rules provided'),
2465 hint=_(b'use strip extension to remove commits'),
2478 hint=_(b'use strip extension to remove commits'),
2466 )
2479 )
2467
2480
2468 drops = [drop(state, n) for n in missing]
2481 drops = [drop(state, n) for n in missing]
2469 # put the in the beginning so they execute immediately and
2482 # put the in the beginning so they execute immediately and
2470 # don't show in the edit-plan in the future
2483 # don't show in the edit-plan in the future
2471 actions[:0] = drops
2484 actions[:0] = drops
2472 elif missing:
2485 elif missing:
2473 raise error.ParseError(
2486 raise error.ParseError(
2474 _(b'missing rules for changeset %s') % short(missing[0]),
2487 _(b'missing rules for changeset %s') % short(missing[0]),
2475 hint=_(
2488 hint=_(
2476 b'use "drop %s" to discard, see also: '
2489 b'use "drop %s" to discard, see also: '
2477 b"'hg help -e histedit.config'"
2490 b"'hg help -e histedit.config'"
2478 )
2491 )
2479 % short(missing[0]),
2492 % short(missing[0]),
2480 )
2493 )
2481
2494
2482
2495
2483 def adjustreplacementsfrommarkers(repo, oldreplacements):
2496 def adjustreplacementsfrommarkers(repo, oldreplacements):
2484 """Adjust replacements from obsolescence markers
2497 """Adjust replacements from obsolescence markers
2485
2498
2486 Replacements structure is originally generated based on
2499 Replacements structure is originally generated based on
2487 histedit's state and does not account for changes that are
2500 histedit's state and does not account for changes that are
2488 not recorded there. This function fixes that by adding
2501 not recorded there. This function fixes that by adding
2489 data read from obsolescence markers"""
2502 data read from obsolescence markers"""
2490 if not obsolete.isenabled(repo, obsolete.createmarkersopt):
2503 if not obsolete.isenabled(repo, obsolete.createmarkersopt):
2491 return oldreplacements
2504 return oldreplacements
2492
2505
2493 unfi = repo.unfiltered()
2506 unfi = repo.unfiltered()
2494 get_rev = unfi.changelog.index.get_rev
2507 get_rev = unfi.changelog.index.get_rev
2495 obsstore = repo.obsstore
2508 obsstore = repo.obsstore
2496 newreplacements = list(oldreplacements)
2509 newreplacements = list(oldreplacements)
2497 oldsuccs = [r[1] for r in oldreplacements]
2510 oldsuccs = [r[1] for r in oldreplacements]
2498 # successors that have already been added to succstocheck once
2511 # successors that have already been added to succstocheck once
2499 seensuccs = set().union(
2512 seensuccs = set().union(
2500 *oldsuccs
2513 *oldsuccs
2501 ) # create a set from an iterable of tuples
2514 ) # create a set from an iterable of tuples
2502 succstocheck = list(seensuccs)
2515 succstocheck = list(seensuccs)
2503 while succstocheck:
2516 while succstocheck:
2504 n = succstocheck.pop()
2517 n = succstocheck.pop()
2505 missing = get_rev(n) is None
2518 missing = get_rev(n) is None
2506 markers = obsstore.successors.get(n, ())
2519 markers = obsstore.successors.get(n, ())
2507 if missing and not markers:
2520 if missing and not markers:
2508 # dead end, mark it as such
2521 # dead end, mark it as such
2509 newreplacements.append((n, ()))
2522 newreplacements.append((n, ()))
2510 for marker in markers:
2523 for marker in markers:
2511 nsuccs = marker[1]
2524 nsuccs = marker[1]
2512 newreplacements.append((n, nsuccs))
2525 newreplacements.append((n, nsuccs))
2513 for nsucc in nsuccs:
2526 for nsucc in nsuccs:
2514 if nsucc not in seensuccs:
2527 if nsucc not in seensuccs:
2515 seensuccs.add(nsucc)
2528 seensuccs.add(nsucc)
2516 succstocheck.append(nsucc)
2529 succstocheck.append(nsucc)
2517
2530
2518 return newreplacements
2531 return newreplacements
2519
2532
2520
2533
2521 def processreplacement(state):
2534 def processreplacement(state):
2522 """process the list of replacements to return
2535 """process the list of replacements to return
2523
2536
2524 1) the final mapping between original and created nodes
2537 1) the final mapping between original and created nodes
2525 2) the list of temporary node created by histedit
2538 2) the list of temporary node created by histedit
2526 3) the list of new commit created by histedit"""
2539 3) the list of new commit created by histedit"""
2527 replacements = adjustreplacementsfrommarkers(state.repo, state.replacements)
2540 replacements = adjustreplacementsfrommarkers(state.repo, state.replacements)
2528 allsuccs = set()
2541 allsuccs = set()
2529 replaced = set()
2542 replaced = set()
2530 fullmapping = {}
2543 fullmapping = {}
2531 # initialize basic set
2544 # initialize basic set
2532 # fullmapping records all operations recorded in replacement
2545 # fullmapping records all operations recorded in replacement
2533 for rep in replacements:
2546 for rep in replacements:
2534 allsuccs.update(rep[1])
2547 allsuccs.update(rep[1])
2535 replaced.add(rep[0])
2548 replaced.add(rep[0])
2536 fullmapping.setdefault(rep[0], set()).update(rep[1])
2549 fullmapping.setdefault(rep[0], set()).update(rep[1])
2537 new = allsuccs - replaced
2550 new = allsuccs - replaced
2538 tmpnodes = allsuccs & replaced
2551 tmpnodes = allsuccs & replaced
2539 # Reduce content fullmapping into direct relation between original nodes
2552 # Reduce content fullmapping into direct relation between original nodes
2540 # and final node created during history edition
2553 # and final node created during history edition
2541 # Dropped changeset are replaced by an empty list
2554 # Dropped changeset are replaced by an empty list
2542 toproceed = set(fullmapping)
2555 toproceed = set(fullmapping)
2543 final = {}
2556 final = {}
2544 while toproceed:
2557 while toproceed:
2545 for x in list(toproceed):
2558 for x in list(toproceed):
2546 succs = fullmapping[x]
2559 succs = fullmapping[x]
2547 for s in list(succs):
2560 for s in list(succs):
2548 if s in toproceed:
2561 if s in toproceed:
2549 # non final node with unknown closure
2562 # non final node with unknown closure
2550 # We can't process this now
2563 # We can't process this now
2551 break
2564 break
2552 elif s in final:
2565 elif s in final:
2553 # non final node, replace with closure
2566 # non final node, replace with closure
2554 succs.remove(s)
2567 succs.remove(s)
2555 succs.update(final[s])
2568 succs.update(final[s])
2556 else:
2569 else:
2557 final[x] = succs
2570 final[x] = succs
2558 toproceed.remove(x)
2571 toproceed.remove(x)
2559 # remove tmpnodes from final mapping
2572 # remove tmpnodes from final mapping
2560 for n in tmpnodes:
2573 for n in tmpnodes:
2561 del final[n]
2574 del final[n]
2562 # we expect all changes involved in final to exist in the repo
2575 # we expect all changes involved in final to exist in the repo
2563 # turn `final` into list (topologically sorted)
2576 # turn `final` into list (topologically sorted)
2564 get_rev = state.repo.changelog.index.get_rev
2577 get_rev = state.repo.changelog.index.get_rev
2565 for prec, succs in final.items():
2578 for prec, succs in final.items():
2566 final[prec] = sorted(succs, key=get_rev)
2579 final[prec] = sorted(succs, key=get_rev)
2567
2580
2568 # computed topmost element (necessary for bookmark)
2581 # computed topmost element (necessary for bookmark)
2569 if new:
2582 if new:
2570 newtopmost = sorted(new, key=state.repo.changelog.rev)[-1]
2583 newtopmost = sorted(new, key=state.repo.changelog.rev)[-1]
2571 elif not final:
2584 elif not final:
2572 # Nothing rewritten at all. we won't need `newtopmost`
2585 # Nothing rewritten at all. we won't need `newtopmost`
2573 # It is the same as `oldtopmost` and `processreplacement` know it
2586 # It is the same as `oldtopmost` and `processreplacement` know it
2574 newtopmost = None
2587 newtopmost = None
2575 else:
2588 else:
2576 # every body died. The newtopmost is the parent of the root.
2589 # every body died. The newtopmost is the parent of the root.
2577 r = state.repo.changelog.rev
2590 r = state.repo.changelog.rev
2578 newtopmost = state.repo[sorted(final, key=r)[0]].p1().node()
2591 newtopmost = state.repo[sorted(final, key=r)[0]].p1().node()
2579
2592
2580 return final, tmpnodes, new, newtopmost
2593 return final, tmpnodes, new, newtopmost
2581
2594
2582
2595
2583 def movetopmostbookmarks(repo, oldtopmost, newtopmost):
2596 def movetopmostbookmarks(repo, oldtopmost, newtopmost):
2584 """Move bookmark from oldtopmost to newly created topmost
2597 """Move bookmark from oldtopmost to newly created topmost
2585
2598
2586 This is arguably a feature and we may only want that for the active
2599 This is arguably a feature and we may only want that for the active
2587 bookmark. But the behavior is kept compatible with the old version for now.
2600 bookmark. But the behavior is kept compatible with the old version for now.
2588 """
2601 """
2589 if not oldtopmost or not newtopmost:
2602 if not oldtopmost or not newtopmost:
2590 return
2603 return
2591 oldbmarks = repo.nodebookmarks(oldtopmost)
2604 oldbmarks = repo.nodebookmarks(oldtopmost)
2592 if oldbmarks:
2605 if oldbmarks:
2593 with repo.lock(), repo.transaction(b'histedit') as tr:
2606 with repo.lock(), repo.transaction(b'histedit') as tr:
2594 marks = repo._bookmarks
2607 marks = repo._bookmarks
2595 changes = []
2608 changes = []
2596 for name in oldbmarks:
2609 for name in oldbmarks:
2597 changes.append((name, newtopmost))
2610 changes.append((name, newtopmost))
2598 marks.applychanges(repo, tr, changes)
2611 marks.applychanges(repo, tr, changes)
2599
2612
2600
2613
2601 def cleanupnode(ui, repo, nodes, nobackup=False):
2614 def cleanupnode(ui, repo, nodes, nobackup=False):
2602 """strip a group of nodes from the repository
2615 """strip a group of nodes from the repository
2603
2616
2604 The set of node to strip may contains unknown nodes."""
2617 The set of node to strip may contains unknown nodes."""
2605 with repo.lock():
2618 with repo.lock():
2606 # do not let filtering get in the way of the cleanse
2619 # do not let filtering get in the way of the cleanse
2607 # we should probably get rid of obsolescence marker created during the
2620 # we should probably get rid of obsolescence marker created during the
2608 # histedit, but we currently do not have such information.
2621 # histedit, but we currently do not have such information.
2609 repo = repo.unfiltered()
2622 repo = repo.unfiltered()
2610 # Find all nodes that need to be stripped
2623 # Find all nodes that need to be stripped
2611 # (we use %lr instead of %ln to silently ignore unknown items)
2624 # (we use %lr instead of %ln to silently ignore unknown items)
2612 has_node = repo.changelog.index.has_node
2625 has_node = repo.changelog.index.has_node
2613 nodes = sorted(n for n in nodes if has_node(n))
2626 nodes = sorted(n for n in nodes if has_node(n))
2614 roots = [c.node() for c in repo.set(b"roots(%ln)", nodes)]
2627 roots = [c.node() for c in repo.set(b"roots(%ln)", nodes)]
2615 if roots:
2628 if roots:
2616 backup = not nobackup
2629 backup = not nobackup
2617 repair.strip(ui, repo, roots, backup=backup)
2630 repair.strip(ui, repo, roots, backup=backup)
2618
2631
2619
2632
2620 def stripwrapper(orig, ui, repo, nodelist, *args, **kwargs):
2633 def stripwrapper(orig, ui, repo, nodelist, *args, **kwargs):
2621 if isinstance(nodelist, bytes):
2634 if isinstance(nodelist, bytes):
2622 nodelist = [nodelist]
2635 nodelist = [nodelist]
2623 state = histeditstate(repo)
2636 state = histeditstate(repo)
2624 if state.inprogress():
2637 if state.inprogress():
2625 state.read()
2638 state.read()
2626 histedit_nodes = {
2639 histedit_nodes = {
2627 action.node for action in state.actions if action.node
2640 action.node for action in state.actions if action.node
2628 }
2641 }
2629 common_nodes = histedit_nodes & set(nodelist)
2642 common_nodes = histedit_nodes & set(nodelist)
2630 if common_nodes:
2643 if common_nodes:
2631 raise error.Abort(
2644 raise error.Abort(
2632 _(b"histedit in progress, can't strip %s")
2645 _(b"histedit in progress, can't strip %s")
2633 % b', '.join(short(x) for x in common_nodes)
2646 % b', '.join(short(x) for x in common_nodes)
2634 )
2647 )
2635 return orig(ui, repo, nodelist, *args, **kwargs)
2648 return orig(ui, repo, nodelist, *args, **kwargs)
2636
2649
2637
2650
2638 extensions.wrapfunction(repair, b'strip', stripwrapper)
2651 extensions.wrapfunction(repair, b'strip', stripwrapper)
2639
2652
2640
2653
2641 def summaryhook(ui, repo):
2654 def summaryhook(ui, repo):
2642 state = histeditstate(repo)
2655 state = histeditstate(repo)
2643 if not state.inprogress():
2656 if not state.inprogress():
2644 return
2657 return
2645 state.read()
2658 state.read()
2646 if state.actions:
2659 if state.actions:
2647 # i18n: column positioning for "hg summary"
2660 # i18n: column positioning for "hg summary"
2648 ui.write(
2661 ui.write(
2649 _(b'hist: %s (histedit --continue)\n')
2662 _(b'hist: %s (histedit --continue)\n')
2650 % (
2663 % (
2651 ui.label(_(b'%d remaining'), b'histedit.remaining')
2664 ui.label(_(b'%d remaining'), b'histedit.remaining')
2652 % len(state.actions)
2665 % len(state.actions)
2653 )
2666 )
2654 )
2667 )
2655
2668
2656
2669
2657 def extsetup(ui):
2670 def extsetup(ui):
2658 cmdutil.summaryhooks.add(b'histedit', summaryhook)
2671 cmdutil.summaryhooks.add(b'histedit', summaryhook)
2659 statemod.addunfinished(
2672 statemod.addunfinished(
2660 b'histedit',
2673 b'histedit',
2661 fname=b'histedit-state',
2674 fname=b'histedit-state',
2662 allowcommit=True,
2675 allowcommit=True,
2663 continueflag=True,
2676 continueflag=True,
2664 abortfunc=hgaborthistedit,
2677 abortfunc=hgaborthistedit,
2665 )
2678 )
@@ -1,1389 +1,1390 b''
1 # Infinite push
1 # Infinite push
2 #
2 #
3 # Copyright 2016 Facebook, Inc.
3 # Copyright 2016 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """ store some pushes in a remote blob store on the server (EXPERIMENTAL)
7 """ store some pushes in a remote blob store on the server (EXPERIMENTAL)
8
8
9 IMPORTANT: if you use this extension, please contact
9 IMPORTANT: if you use this extension, please contact
10 mercurial-devel@mercurial-scm.org ASAP. This extension is believed to
10 mercurial-devel@mercurial-scm.org ASAP. This extension is believed to
11 be unused and barring learning of users of this functionality, we will
11 be unused and barring learning of users of this functionality, we will
12 delete this code at the end of 2020.
12 delete this code at the end of 2020.
13
13
14 [infinitepush]
14 [infinitepush]
15 # Server-side and client-side option. Pattern of the infinitepush bookmark
15 # Server-side and client-side option. Pattern of the infinitepush bookmark
16 branchpattern = PATTERN
16 branchpattern = PATTERN
17
17
18 # Server or client
18 # Server or client
19 server = False
19 server = False
20
20
21 # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
21 # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
22 indextype = disk
22 indextype = disk
23
23
24 # Server-side option. Used only if indextype=sql.
24 # Server-side option. Used only if indextype=sql.
25 # Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
25 # Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
26 sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
26 sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
27
27
28 # Server-side option. Used only if indextype=disk.
28 # Server-side option. Used only if indextype=disk.
29 # Filesystem path to the index store
29 # Filesystem path to the index store
30 indexpath = PATH
30 indexpath = PATH
31
31
32 # Server-side option. Possible values: 'disk' or 'external'
32 # Server-side option. Possible values: 'disk' or 'external'
33 # Fails if not set
33 # Fails if not set
34 storetype = disk
34 storetype = disk
35
35
36 # Server-side option.
36 # Server-side option.
37 # Path to the binary that will save bundle to the bundlestore
37 # Path to the binary that will save bundle to the bundlestore
38 # Formatted cmd line will be passed to it (see `put_args`)
38 # Formatted cmd line will be passed to it (see `put_args`)
39 put_binary = put
39 put_binary = put
40
40
41 # Serser-side option. Used only if storetype=external.
41 # Serser-side option. Used only if storetype=external.
42 # Format cmd-line string for put binary. Placeholder: {filename}
42 # Format cmd-line string for put binary. Placeholder: {filename}
43 put_args = {filename}
43 put_args = {filename}
44
44
45 # Server-side option.
45 # Server-side option.
46 # Path to the binary that get bundle from the bundlestore.
46 # Path to the binary that get bundle from the bundlestore.
47 # Formatted cmd line will be passed to it (see `get_args`)
47 # Formatted cmd line will be passed to it (see `get_args`)
48 get_binary = get
48 get_binary = get
49
49
50 # Serser-side option. Used only if storetype=external.
50 # Serser-side option. Used only if storetype=external.
51 # Format cmd-line string for get binary. Placeholders: {filename} {handle}
51 # Format cmd-line string for get binary. Placeholders: {filename} {handle}
52 get_args = {filename} {handle}
52 get_args = {filename} {handle}
53
53
54 # Server-side option
54 # Server-side option
55 logfile = FIlE
55 logfile = FIlE
56
56
57 # Server-side option
57 # Server-side option
58 loglevel = DEBUG
58 loglevel = DEBUG
59
59
60 # Server-side option. Used only if indextype=sql.
60 # Server-side option. Used only if indextype=sql.
61 # Sets mysql wait_timeout option.
61 # Sets mysql wait_timeout option.
62 waittimeout = 300
62 waittimeout = 300
63
63
64 # Server-side option. Used only if indextype=sql.
64 # Server-side option. Used only if indextype=sql.
65 # Sets mysql innodb_lock_wait_timeout option.
65 # Sets mysql innodb_lock_wait_timeout option.
66 locktimeout = 120
66 locktimeout = 120
67
67
68 # Server-side option. Used only if indextype=sql.
68 # Server-side option. Used only if indextype=sql.
69 # Name of the repository
69 # Name of the repository
70 reponame = ''
70 reponame = ''
71
71
72 # Client-side option. Used by --list-remote option. List of remote scratch
72 # Client-side option. Used by --list-remote option. List of remote scratch
73 # patterns to list if no patterns are specified.
73 # patterns to list if no patterns are specified.
74 defaultremotepatterns = ['*']
74 defaultremotepatterns = ['*']
75
75
76 # Instructs infinitepush to forward all received bundle2 parts to the
76 # Instructs infinitepush to forward all received bundle2 parts to the
77 # bundle for storage. Defaults to False.
77 # bundle for storage. Defaults to False.
78 storeallparts = True
78 storeallparts = True
79
79
80 # routes each incoming push to the bundlestore. defaults to False
80 # routes each incoming push to the bundlestore. defaults to False
81 pushtobundlestore = True
81 pushtobundlestore = True
82
82
83 [remotenames]
83 [remotenames]
84 # Client-side option
84 # Client-side option
85 # This option should be set only if remotenames extension is enabled.
85 # This option should be set only if remotenames extension is enabled.
86 # Whether remote bookmarks are tracked by remotenames extension.
86 # Whether remote bookmarks are tracked by remotenames extension.
87 bookmarks = True
87 bookmarks = True
88 """
88 """
89
89
90 from __future__ import absolute_import
90 from __future__ import absolute_import
91
91
92 import collections
92 import collections
93 import contextlib
93 import contextlib
94 import errno
94 import errno
95 import functools
95 import functools
96 import logging
96 import logging
97 import os
97 import os
98 import random
98 import random
99 import re
99 import re
100 import socket
100 import socket
101 import subprocess
101 import subprocess
102 import time
102 import time
103
103
104 from mercurial.node import (
104 from mercurial.node import (
105 bin,
105 bin,
106 hex,
106 hex,
107 )
107 )
108
108
109 from mercurial.i18n import _
109 from mercurial.i18n import _
110
110
111 from mercurial.pycompat import (
111 from mercurial.pycompat import (
112 getattr,
112 getattr,
113 open,
113 open,
114 )
114 )
115
115
116 from mercurial.utils import (
116 from mercurial.utils import (
117 procutil,
117 procutil,
118 stringutil,
118 stringutil,
119 urlutil,
119 urlutil,
120 )
120 )
121
121
122 from mercurial import (
122 from mercurial import (
123 bundle2,
123 bundle2,
124 changegroup,
124 changegroup,
125 commands,
125 commands,
126 discovery,
126 discovery,
127 encoding,
127 encoding,
128 error,
128 error,
129 exchange,
129 exchange,
130 extensions,
130 extensions,
131 hg,
131 hg,
132 localrepo,
132 localrepo,
133 phases,
133 phases,
134 pushkey,
134 pushkey,
135 pycompat,
135 pycompat,
136 registrar,
136 registrar,
137 util,
137 util,
138 wireprototypes,
138 wireprototypes,
139 wireprotov1peer,
139 wireprotov1peer,
140 wireprotov1server,
140 wireprotov1server,
141 )
141 )
142
142
143 from . import (
143 from . import (
144 bundleparts,
144 bundleparts,
145 common,
145 common,
146 )
146 )
147
147
148 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
148 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
149 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
149 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
150 # be specifying the version(s) of Mercurial they are tested with, or
150 # be specifying the version(s) of Mercurial they are tested with, or
151 # leave the attribute unspecified.
151 # leave the attribute unspecified.
152 testedwith = b'ships-with-hg-core'
152 testedwith = b'ships-with-hg-core'
153
153
154 configtable = {}
154 configtable = {}
155 configitem = registrar.configitem(configtable)
155 configitem = registrar.configitem(configtable)
156
156
157 configitem(
157 configitem(
158 b'infinitepush',
158 b'infinitepush',
159 b'server',
159 b'server',
160 default=False,
160 default=False,
161 )
161 )
162 configitem(
162 configitem(
163 b'infinitepush',
163 b'infinitepush',
164 b'storetype',
164 b'storetype',
165 default=b'',
165 default=b'',
166 )
166 )
167 configitem(
167 configitem(
168 b'infinitepush',
168 b'infinitepush',
169 b'indextype',
169 b'indextype',
170 default=b'',
170 default=b'',
171 )
171 )
172 configitem(
172 configitem(
173 b'infinitepush',
173 b'infinitepush',
174 b'indexpath',
174 b'indexpath',
175 default=b'',
175 default=b'',
176 )
176 )
177 configitem(
177 configitem(
178 b'infinitepush',
178 b'infinitepush',
179 b'storeallparts',
179 b'storeallparts',
180 default=False,
180 default=False,
181 )
181 )
182 configitem(
182 configitem(
183 b'infinitepush',
183 b'infinitepush',
184 b'reponame',
184 b'reponame',
185 default=b'',
185 default=b'',
186 )
186 )
187 configitem(
187 configitem(
188 b'scratchbranch',
188 b'scratchbranch',
189 b'storepath',
189 b'storepath',
190 default=b'',
190 default=b'',
191 )
191 )
192 configitem(
192 configitem(
193 b'infinitepush',
193 b'infinitepush',
194 b'branchpattern',
194 b'branchpattern',
195 default=b'',
195 default=b'',
196 )
196 )
197 configitem(
197 configitem(
198 b'infinitepush',
198 b'infinitepush',
199 b'pushtobundlestore',
199 b'pushtobundlestore',
200 default=False,
200 default=False,
201 )
201 )
202 configitem(
202 configitem(
203 b'experimental',
203 b'experimental',
204 b'server-bundlestore-bookmark',
204 b'server-bundlestore-bookmark',
205 default=b'',
205 default=b'',
206 )
206 )
207 configitem(
207 configitem(
208 b'experimental',
208 b'experimental',
209 b'infinitepush-scratchpush',
209 b'infinitepush-scratchpush',
210 default=False,
210 default=False,
211 )
211 )
212
212
213 experimental = b'experimental'
213 experimental = b'experimental'
214 configbookmark = b'server-bundlestore-bookmark'
214 configbookmark = b'server-bundlestore-bookmark'
215 configscratchpush = b'infinitepush-scratchpush'
215 configscratchpush = b'infinitepush-scratchpush'
216
216
217 scratchbranchparttype = bundleparts.scratchbranchparttype
217 scratchbranchparttype = bundleparts.scratchbranchparttype
218 revsetpredicate = registrar.revsetpredicate()
218 revsetpredicate = registrar.revsetpredicate()
219 templatekeyword = registrar.templatekeyword()
219 templatekeyword = registrar.templatekeyword()
220 _scratchbranchmatcher = lambda x: False
220 _scratchbranchmatcher = lambda x: False
221 _maybehash = re.compile('^[a-f0-9]+$').search
221 _maybehash = re.compile('^[a-f0-9]+$').search
222
222
223
223
224 def _buildexternalbundlestore(ui):
224 def _buildexternalbundlestore(ui):
225 put_args = ui.configlist(b'infinitepush', b'put_args', [])
225 put_args = ui.configlist(b'infinitepush', b'put_args', [])
226 put_binary = ui.config(b'infinitepush', b'put_binary')
226 put_binary = ui.config(b'infinitepush', b'put_binary')
227 if not put_binary:
227 if not put_binary:
228 raise error.Abort(b'put binary is not specified')
228 raise error.Abort(b'put binary is not specified')
229 get_args = ui.configlist(b'infinitepush', b'get_args', [])
229 get_args = ui.configlist(b'infinitepush', b'get_args', [])
230 get_binary = ui.config(b'infinitepush', b'get_binary')
230 get_binary = ui.config(b'infinitepush', b'get_binary')
231 if not get_binary:
231 if not get_binary:
232 raise error.Abort(b'get binary is not specified')
232 raise error.Abort(b'get binary is not specified')
233 from . import store
233 from . import store
234
234
235 return store.externalbundlestore(put_binary, put_args, get_binary, get_args)
235 return store.externalbundlestore(put_binary, put_args, get_binary, get_args)
236
236
237
237
238 def _buildsqlindex(ui):
238 def _buildsqlindex(ui):
239 sqlhost = ui.config(b'infinitepush', b'sqlhost')
239 sqlhost = ui.config(b'infinitepush', b'sqlhost')
240 if not sqlhost:
240 if not sqlhost:
241 raise error.Abort(_(b'please set infinitepush.sqlhost'))
241 raise error.Abort(_(b'please set infinitepush.sqlhost'))
242 host, port, db, user, password = sqlhost.split(b':')
242 host, port, db, user, password = sqlhost.split(b':')
243 reponame = ui.config(b'infinitepush', b'reponame')
243 reponame = ui.config(b'infinitepush', b'reponame')
244 if not reponame:
244 if not reponame:
245 raise error.Abort(_(b'please set infinitepush.reponame'))
245 raise error.Abort(_(b'please set infinitepush.reponame'))
246
246
247 logfile = ui.config(b'infinitepush', b'logfile', b'')
247 logfile = ui.config(b'infinitepush', b'logfile', b'')
248 waittimeout = ui.configint(b'infinitepush', b'waittimeout', 300)
248 waittimeout = ui.configint(b'infinitepush', b'waittimeout', 300)
249 locktimeout = ui.configint(b'infinitepush', b'locktimeout', 120)
249 locktimeout = ui.configint(b'infinitepush', b'locktimeout', 120)
250 from . import sqlindexapi
250 from . import sqlindexapi
251
251
252 return sqlindexapi.sqlindexapi(
252 return sqlindexapi.sqlindexapi(
253 reponame,
253 reponame,
254 host,
254 host,
255 port,
255 port,
256 db,
256 db,
257 user,
257 user,
258 password,
258 password,
259 logfile,
259 logfile,
260 _getloglevel(ui),
260 _getloglevel(ui),
261 waittimeout=waittimeout,
261 waittimeout=waittimeout,
262 locktimeout=locktimeout,
262 locktimeout=locktimeout,
263 )
263 )
264
264
265
265
266 def _getloglevel(ui):
266 def _getloglevel(ui):
267 loglevel = ui.config(b'infinitepush', b'loglevel', b'DEBUG')
267 loglevel = ui.config(b'infinitepush', b'loglevel', b'DEBUG')
268 numeric_loglevel = getattr(logging, loglevel.upper(), None)
268 numeric_loglevel = getattr(logging, loglevel.upper(), None)
269 if not isinstance(numeric_loglevel, int):
269 if not isinstance(numeric_loglevel, int):
270 raise error.Abort(_(b'invalid log level %s') % loglevel)
270 raise error.Abort(_(b'invalid log level %s') % loglevel)
271 return numeric_loglevel
271 return numeric_loglevel
272
272
273
273
274 def _tryhoist(ui, remotebookmark):
274 def _tryhoist(ui, remotebookmark):
275 """returns a bookmarks with hoisted part removed
275 """returns a bookmarks with hoisted part removed
276
276
277 Remotenames extension has a 'hoist' config that allows to use remote
277 Remotenames extension has a 'hoist' config that allows to use remote
278 bookmarks without specifying remote path. For example, 'hg update master'
278 bookmarks without specifying remote path. For example, 'hg update master'
279 works as well as 'hg update remote/master'. We want to allow the same in
279 works as well as 'hg update remote/master'. We want to allow the same in
280 infinitepush.
280 infinitepush.
281 """
281 """
282
282
283 if common.isremotebooksenabled(ui):
283 if common.isremotebooksenabled(ui):
284 hoist = ui.config(b'remotenames', b'hoistedpeer') + b'/'
284 hoist = ui.config(b'remotenames', b'hoistedpeer') + b'/'
285 if remotebookmark.startswith(hoist):
285 if remotebookmark.startswith(hoist):
286 return remotebookmark[len(hoist) :]
286 return remotebookmark[len(hoist) :]
287 return remotebookmark
287 return remotebookmark
288
288
289
289
290 class bundlestore(object):
290 class bundlestore(object):
291 def __init__(self, repo):
291 def __init__(self, repo):
292 self._repo = repo
292 self._repo = repo
293 storetype = self._repo.ui.config(b'infinitepush', b'storetype')
293 storetype = self._repo.ui.config(b'infinitepush', b'storetype')
294 if storetype == b'disk':
294 if storetype == b'disk':
295 from . import store
295 from . import store
296
296
297 self.store = store.filebundlestore(self._repo.ui, self._repo)
297 self.store = store.filebundlestore(self._repo.ui, self._repo)
298 elif storetype == b'external':
298 elif storetype == b'external':
299 self.store = _buildexternalbundlestore(self._repo.ui)
299 self.store = _buildexternalbundlestore(self._repo.ui)
300 else:
300 else:
301 raise error.Abort(
301 raise error.Abort(
302 _(b'unknown infinitepush store type specified %s') % storetype
302 _(b'unknown infinitepush store type specified %s') % storetype
303 )
303 )
304
304
305 indextype = self._repo.ui.config(b'infinitepush', b'indextype')
305 indextype = self._repo.ui.config(b'infinitepush', b'indextype')
306 if indextype == b'disk':
306 if indextype == b'disk':
307 from . import fileindexapi
307 from . import fileindexapi
308
308
309 self.index = fileindexapi.fileindexapi(self._repo)
309 self.index = fileindexapi.fileindexapi(self._repo)
310 elif indextype == b'sql':
310 elif indextype == b'sql':
311 self.index = _buildsqlindex(self._repo.ui)
311 self.index = _buildsqlindex(self._repo.ui)
312 else:
312 else:
313 raise error.Abort(
313 raise error.Abort(
314 _(b'unknown infinitepush index type specified %s') % indextype
314 _(b'unknown infinitepush index type specified %s') % indextype
315 )
315 )
316
316
317
317
318 def _isserver(ui):
318 def _isserver(ui):
319 return ui.configbool(b'infinitepush', b'server')
319 return ui.configbool(b'infinitepush', b'server')
320
320
321
321
322 def reposetup(ui, repo):
322 def reposetup(ui, repo):
323 if _isserver(ui) and repo.local():
323 if _isserver(ui) and repo.local():
324 repo.bundlestore = bundlestore(repo)
324 repo.bundlestore = bundlestore(repo)
325
325
326
326
327 def extsetup(ui):
327 def extsetup(ui):
328 commonsetup(ui)
328 commonsetup(ui)
329 if _isserver(ui):
329 if _isserver(ui):
330 serverextsetup(ui)
330 serverextsetup(ui)
331 else:
331 else:
332 clientextsetup(ui)
332 clientextsetup(ui)
333
333
334
334
335 def commonsetup(ui):
335 def commonsetup(ui):
336 wireprotov1server.commands[b'listkeyspatterns'] = (
336 wireprotov1server.commands[b'listkeyspatterns'] = (
337 wireprotolistkeyspatterns,
337 wireprotolistkeyspatterns,
338 b'namespace patterns',
338 b'namespace patterns',
339 )
339 )
340 scratchbranchpat = ui.config(b'infinitepush', b'branchpattern')
340 scratchbranchpat = ui.config(b'infinitepush', b'branchpattern')
341 if scratchbranchpat:
341 if scratchbranchpat:
342 global _scratchbranchmatcher
342 global _scratchbranchmatcher
343 kind, pat, _scratchbranchmatcher = stringutil.stringmatcher(
343 kind, pat, _scratchbranchmatcher = stringutil.stringmatcher(
344 scratchbranchpat
344 scratchbranchpat
345 )
345 )
346
346
347
347
348 def serverextsetup(ui):
348 def serverextsetup(ui):
349 origpushkeyhandler = bundle2.parthandlermapping[b'pushkey']
349 origpushkeyhandler = bundle2.parthandlermapping[b'pushkey']
350
350
351 def newpushkeyhandler(*args, **kwargs):
351 def newpushkeyhandler(*args, **kwargs):
352 bundle2pushkey(origpushkeyhandler, *args, **kwargs)
352 bundle2pushkey(origpushkeyhandler, *args, **kwargs)
353
353
354 newpushkeyhandler.params = origpushkeyhandler.params
354 newpushkeyhandler.params = origpushkeyhandler.params
355 bundle2.parthandlermapping[b'pushkey'] = newpushkeyhandler
355 bundle2.parthandlermapping[b'pushkey'] = newpushkeyhandler
356
356
357 orighandlephasehandler = bundle2.parthandlermapping[b'phase-heads']
357 orighandlephasehandler = bundle2.parthandlermapping[b'phase-heads']
358 newphaseheadshandler = lambda *args, **kwargs: bundle2handlephases(
358 newphaseheadshandler = lambda *args, **kwargs: bundle2handlephases(
359 orighandlephasehandler, *args, **kwargs
359 orighandlephasehandler, *args, **kwargs
360 )
360 )
361 newphaseheadshandler.params = orighandlephasehandler.params
361 newphaseheadshandler.params = orighandlephasehandler.params
362 bundle2.parthandlermapping[b'phase-heads'] = newphaseheadshandler
362 bundle2.parthandlermapping[b'phase-heads'] = newphaseheadshandler
363
363
364 extensions.wrapfunction(
364 extensions.wrapfunction(
365 localrepo.localrepository, b'listkeys', localrepolistkeys
365 localrepo.localrepository, b'listkeys', localrepolistkeys
366 )
366 )
367 wireprotov1server.commands[b'lookup'] = (
367 wireprotov1server.commands[b'lookup'] = (
368 _lookupwrap(wireprotov1server.commands[b'lookup'][0]),
368 _lookupwrap(wireprotov1server.commands[b'lookup'][0]),
369 b'key',
369 b'key',
370 )
370 )
371 extensions.wrapfunction(exchange, b'getbundlechunks', getbundlechunks)
371 extensions.wrapfunction(exchange, b'getbundlechunks', getbundlechunks)
372
372
373 extensions.wrapfunction(bundle2, b'processparts', processparts)
373 extensions.wrapfunction(bundle2, b'processparts', processparts)
374
374
375
375
376 def clientextsetup(ui):
376 def clientextsetup(ui):
377 entry = extensions.wrapcommand(commands.table, b'push', _push)
377 entry = extensions.wrapcommand(commands.table, b'push', _push)
378
378
379 entry[1].append(
379 entry[1].append(
380 (
380 (
381 b'',
381 b'',
382 b'bundle-store',
382 b'bundle-store',
383 None,
383 None,
384 _(b'force push to go to bundle store (EXPERIMENTAL)'),
384 _(b'force push to go to bundle store (EXPERIMENTAL)'),
385 )
385 )
386 )
386 )
387
387
388 extensions.wrapcommand(commands.table, b'pull', _pull)
388 extensions.wrapcommand(commands.table, b'pull', _pull)
389
389
390 extensions.wrapfunction(discovery, b'checkheads', _checkheads)
390 extensions.wrapfunction(discovery, b'checkheads', _checkheads)
391
391
392 wireprotov1peer.wirepeer.listkeyspatterns = listkeyspatterns
392 wireprotov1peer.wirepeer.listkeyspatterns = listkeyspatterns
393
393
394 partorder = exchange.b2partsgenorder
394 partorder = exchange.b2partsgenorder
395 index = partorder.index(b'changeset')
395 index = partorder.index(b'changeset')
396 partorder.insert(
396 partorder.insert(
397 index, partorder.pop(partorder.index(scratchbranchparttype))
397 index, partorder.pop(partorder.index(scratchbranchparttype))
398 )
398 )
399
399
400
400
401 def _checkheads(orig, pushop):
401 def _checkheads(orig, pushop):
402 if pushop.ui.configbool(experimental, configscratchpush, False):
402 if pushop.ui.configbool(experimental, configscratchpush, False):
403 return
403 return
404 return orig(pushop)
404 return orig(pushop)
405
405
406
406
407 def wireprotolistkeyspatterns(repo, proto, namespace, patterns):
407 def wireprotolistkeyspatterns(repo, proto, namespace, patterns):
408 patterns = wireprototypes.decodelist(patterns)
408 patterns = wireprototypes.decodelist(patterns)
409 d = pycompat.iteritems(repo.listkeys(encoding.tolocal(namespace), patterns))
409 d = pycompat.iteritems(repo.listkeys(encoding.tolocal(namespace), patterns))
410 return pushkey.encodekeys(d)
410 return pushkey.encodekeys(d)
411
411
412
412
413 def localrepolistkeys(orig, self, namespace, patterns=None):
413 def localrepolistkeys(orig, self, namespace, patterns=None):
414 if namespace == b'bookmarks' and patterns:
414 if namespace == b'bookmarks' and patterns:
415 index = self.bundlestore.index
415 index = self.bundlestore.index
416 results = {}
416 results = {}
417 bookmarks = orig(self, namespace)
417 bookmarks = orig(self, namespace)
418 for pattern in patterns:
418 for pattern in patterns:
419 results.update(index.getbookmarks(pattern))
419 results.update(index.getbookmarks(pattern))
420 if pattern.endswith(b'*'):
420 if pattern.endswith(b'*'):
421 pattern = b're:^' + pattern[:-1] + b'.*'
421 pattern = b're:^' + pattern[:-1] + b'.*'
422 kind, pat, matcher = stringutil.stringmatcher(pattern)
422 kind, pat, matcher = stringutil.stringmatcher(pattern)
423 for bookmark, node in pycompat.iteritems(bookmarks):
423 for bookmark, node in pycompat.iteritems(bookmarks):
424 if matcher(bookmark):
424 if matcher(bookmark):
425 results[bookmark] = node
425 results[bookmark] = node
426 return results
426 return results
427 else:
427 else:
428 return orig(self, namespace)
428 return orig(self, namespace)
429
429
430
430
431 @wireprotov1peer.batchable
431 @wireprotov1peer.batchable
432 def listkeyspatterns(self, namespace, patterns):
432 def listkeyspatterns(self, namespace, patterns):
433 if not self.capable(b'pushkey'):
433 if not self.capable(b'pushkey'):
434 yield {}, None
434 return {}, None
435 f = wireprotov1peer.future()
436 self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
435 self.ui.debug(b'preparing listkeys for "%s"\n' % namespace)
437 yield {
436
437 def decode(d):
438 self.ui.debug(
439 b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
440 )
441 return pushkey.decodekeys(d)
442
443 return {
438 b'namespace': encoding.fromlocal(namespace),
444 b'namespace': encoding.fromlocal(namespace),
439 b'patterns': wireprototypes.encodelist(patterns),
445 b'patterns': wireprototypes.encodelist(patterns),
440 }, f
446 }, decode
441 d = f.value
442 self.ui.debug(
443 b'received listkey for "%s": %i bytes\n' % (namespace, len(d))
444 )
445 yield pushkey.decodekeys(d)
446
447
447
448
448 def _readbundlerevs(bundlerepo):
449 def _readbundlerevs(bundlerepo):
449 return list(bundlerepo.revs(b'bundle()'))
450 return list(bundlerepo.revs(b'bundle()'))
450
451
451
452
452 def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui):
453 def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui):
453 """Tells remotefilelog to include all changed files to the changegroup
454 """Tells remotefilelog to include all changed files to the changegroup
454
455
455 By default remotefilelog doesn't include file content to the changegroup.
456 By default remotefilelog doesn't include file content to the changegroup.
456 But we need to include it if we are fetching from bundlestore.
457 But we need to include it if we are fetching from bundlestore.
457 """
458 """
458 changedfiles = set()
459 changedfiles = set()
459 cl = bundlerepo.changelog
460 cl = bundlerepo.changelog
460 for r in bundlerevs:
461 for r in bundlerevs:
461 # [3] means changed files
462 # [3] means changed files
462 changedfiles.update(cl.read(r)[3])
463 changedfiles.update(cl.read(r)[3])
463 if not changedfiles:
464 if not changedfiles:
464 return bundlecaps
465 return bundlecaps
465
466
466 changedfiles = b'\0'.join(changedfiles)
467 changedfiles = b'\0'.join(changedfiles)
467 newcaps = []
468 newcaps = []
468 appended = False
469 appended = False
469 for cap in bundlecaps or []:
470 for cap in bundlecaps or []:
470 if cap.startswith(b'excludepattern='):
471 if cap.startswith(b'excludepattern='):
471 newcaps.append(b'\0'.join((cap, changedfiles)))
472 newcaps.append(b'\0'.join((cap, changedfiles)))
472 appended = True
473 appended = True
473 else:
474 else:
474 newcaps.append(cap)
475 newcaps.append(cap)
475 if not appended:
476 if not appended:
476 # Not found excludepattern cap. Just append it
477 # Not found excludepattern cap. Just append it
477 newcaps.append(b'excludepattern=' + changedfiles)
478 newcaps.append(b'excludepattern=' + changedfiles)
478
479
479 return newcaps
480 return newcaps
480
481
481
482
482 def _rebundle(bundlerepo, bundleroots, unknownhead):
483 def _rebundle(bundlerepo, bundleroots, unknownhead):
483 """
484 """
484 Bundle may include more revision then user requested. For example,
485 Bundle may include more revision then user requested. For example,
485 if user asks for revision but bundle also consists its descendants.
486 if user asks for revision but bundle also consists its descendants.
486 This function will filter out all revision that user is not requested.
487 This function will filter out all revision that user is not requested.
487 """
488 """
488 parts = []
489 parts = []
489
490
490 version = b'02'
491 version = b'02'
491 outgoing = discovery.outgoing(
492 outgoing = discovery.outgoing(
492 bundlerepo, commonheads=bundleroots, ancestorsof=[unknownhead]
493 bundlerepo, commonheads=bundleroots, ancestorsof=[unknownhead]
493 )
494 )
494 cgstream = changegroup.makestream(bundlerepo, outgoing, version, b'pull')
495 cgstream = changegroup.makestream(bundlerepo, outgoing, version, b'pull')
495 cgstream = util.chunkbuffer(cgstream).read()
496 cgstream = util.chunkbuffer(cgstream).read()
496 cgpart = bundle2.bundlepart(b'changegroup', data=cgstream)
497 cgpart = bundle2.bundlepart(b'changegroup', data=cgstream)
497 cgpart.addparam(b'version', version)
498 cgpart.addparam(b'version', version)
498 parts.append(cgpart)
499 parts.append(cgpart)
499
500
500 return parts
501 return parts
501
502
502
503
503 def _getbundleroots(oldrepo, bundlerepo, bundlerevs):
504 def _getbundleroots(oldrepo, bundlerepo, bundlerevs):
504 cl = bundlerepo.changelog
505 cl = bundlerepo.changelog
505 bundleroots = []
506 bundleroots = []
506 for rev in bundlerevs:
507 for rev in bundlerevs:
507 node = cl.node(rev)
508 node = cl.node(rev)
508 parents = cl.parents(node)
509 parents = cl.parents(node)
509 for parent in parents:
510 for parent in parents:
510 # include all revs that exist in the main repo
511 # include all revs that exist in the main repo
511 # to make sure that bundle may apply client-side
512 # to make sure that bundle may apply client-side
512 if parent in oldrepo:
513 if parent in oldrepo:
513 bundleroots.append(parent)
514 bundleroots.append(parent)
514 return bundleroots
515 return bundleroots
515
516
516
517
517 def _needsrebundling(head, bundlerepo):
518 def _needsrebundling(head, bundlerepo):
518 bundleheads = list(bundlerepo.revs(b'heads(bundle())'))
519 bundleheads = list(bundlerepo.revs(b'heads(bundle())'))
519 return not (
520 return not (
520 len(bundleheads) == 1 and bundlerepo[bundleheads[0]].node() == head
521 len(bundleheads) == 1 and bundlerepo[bundleheads[0]].node() == head
521 )
522 )
522
523
523
524
524 def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile):
525 def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile):
525 """generates bundle that will be send to the user
526 """generates bundle that will be send to the user
526
527
527 returns tuple with raw bundle string and bundle type
528 returns tuple with raw bundle string and bundle type
528 """
529 """
529 parts = []
530 parts = []
530 if not _needsrebundling(head, bundlerepo):
531 if not _needsrebundling(head, bundlerepo):
531 with util.posixfile(bundlefile, b"rb") as f:
532 with util.posixfile(bundlefile, b"rb") as f:
532 unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
533 unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
533 if isinstance(unbundler, changegroup.cg1unpacker):
534 if isinstance(unbundler, changegroup.cg1unpacker):
534 part = bundle2.bundlepart(
535 part = bundle2.bundlepart(
535 b'changegroup', data=unbundler._stream.read()
536 b'changegroup', data=unbundler._stream.read()
536 )
537 )
537 part.addparam(b'version', b'01')
538 part.addparam(b'version', b'01')
538 parts.append(part)
539 parts.append(part)
539 elif isinstance(unbundler, bundle2.unbundle20):
540 elif isinstance(unbundler, bundle2.unbundle20):
540 haschangegroup = False
541 haschangegroup = False
541 for part in unbundler.iterparts():
542 for part in unbundler.iterparts():
542 if part.type == b'changegroup':
543 if part.type == b'changegroup':
543 haschangegroup = True
544 haschangegroup = True
544 newpart = bundle2.bundlepart(part.type, data=part.read())
545 newpart = bundle2.bundlepart(part.type, data=part.read())
545 for key, value in pycompat.iteritems(part.params):
546 for key, value in pycompat.iteritems(part.params):
546 newpart.addparam(key, value)
547 newpart.addparam(key, value)
547 parts.append(newpart)
548 parts.append(newpart)
548
549
549 if not haschangegroup:
550 if not haschangegroup:
550 raise error.Abort(
551 raise error.Abort(
551 b'unexpected bundle without changegroup part, '
552 b'unexpected bundle without changegroup part, '
552 + b'head: %s' % hex(head),
553 + b'head: %s' % hex(head),
553 hint=b'report to administrator',
554 hint=b'report to administrator',
554 )
555 )
555 else:
556 else:
556 raise error.Abort(b'unknown bundle type')
557 raise error.Abort(b'unknown bundle type')
557 else:
558 else:
558 parts = _rebundle(bundlerepo, bundleroots, head)
559 parts = _rebundle(bundlerepo, bundleroots, head)
559
560
560 return parts
561 return parts
561
562
562
563
563 def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs):
564 def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs):
564 heads = heads or []
565 heads = heads or []
565 # newheads are parents of roots of scratch bundles that were requested
566 # newheads are parents of roots of scratch bundles that were requested
566 newphases = {}
567 newphases = {}
567 scratchbundles = []
568 scratchbundles = []
568 newheads = []
569 newheads = []
569 scratchheads = []
570 scratchheads = []
570 nodestobundle = {}
571 nodestobundle = {}
571 allbundlestocleanup = []
572 allbundlestocleanup = []
572 try:
573 try:
573 for head in heads:
574 for head in heads:
574 if not repo.changelog.index.has_node(head):
575 if not repo.changelog.index.has_node(head):
575 if head not in nodestobundle:
576 if head not in nodestobundle:
576 newbundlefile = common.downloadbundle(repo, head)
577 newbundlefile = common.downloadbundle(repo, head)
577 bundlepath = b"bundle:%s+%s" % (repo.root, newbundlefile)
578 bundlepath = b"bundle:%s+%s" % (repo.root, newbundlefile)
578 bundlerepo = hg.repository(repo.ui, bundlepath)
579 bundlerepo = hg.repository(repo.ui, bundlepath)
579
580
580 allbundlestocleanup.append((bundlerepo, newbundlefile))
581 allbundlestocleanup.append((bundlerepo, newbundlefile))
581 bundlerevs = set(_readbundlerevs(bundlerepo))
582 bundlerevs = set(_readbundlerevs(bundlerepo))
582 bundlecaps = _includefilelogstobundle(
583 bundlecaps = _includefilelogstobundle(
583 bundlecaps, bundlerepo, bundlerevs, repo.ui
584 bundlecaps, bundlerepo, bundlerevs, repo.ui
584 )
585 )
585 cl = bundlerepo.changelog
586 cl = bundlerepo.changelog
586 bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs)
587 bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs)
587 for rev in bundlerevs:
588 for rev in bundlerevs:
588 node = cl.node(rev)
589 node = cl.node(rev)
589 newphases[hex(node)] = str(phases.draft)
590 newphases[hex(node)] = str(phases.draft)
590 nodestobundle[node] = (
591 nodestobundle[node] = (
591 bundlerepo,
592 bundlerepo,
592 bundleroots,
593 bundleroots,
593 newbundlefile,
594 newbundlefile,
594 )
595 )
595
596
596 scratchbundles.append(
597 scratchbundles.append(
597 _generateoutputparts(head, *nodestobundle[head])
598 _generateoutputparts(head, *nodestobundle[head])
598 )
599 )
599 newheads.extend(bundleroots)
600 newheads.extend(bundleroots)
600 scratchheads.append(head)
601 scratchheads.append(head)
601 finally:
602 finally:
602 for bundlerepo, bundlefile in allbundlestocleanup:
603 for bundlerepo, bundlefile in allbundlestocleanup:
603 bundlerepo.close()
604 bundlerepo.close()
604 try:
605 try:
605 os.unlink(bundlefile)
606 os.unlink(bundlefile)
606 except (IOError, OSError):
607 except (IOError, OSError):
607 # if we can't cleanup the file then just ignore the error,
608 # if we can't cleanup the file then just ignore the error,
608 # no need to fail
609 # no need to fail
609 pass
610 pass
610
611
611 pullfrombundlestore = bool(scratchbundles)
612 pullfrombundlestore = bool(scratchbundles)
612 wrappedchangegrouppart = False
613 wrappedchangegrouppart = False
613 wrappedlistkeys = False
614 wrappedlistkeys = False
614 oldchangegrouppart = exchange.getbundle2partsmapping[b'changegroup']
615 oldchangegrouppart = exchange.getbundle2partsmapping[b'changegroup']
615 try:
616 try:
616
617
617 def _changegrouppart(bundler, *args, **kwargs):
618 def _changegrouppart(bundler, *args, **kwargs):
618 # Order is important here. First add non-scratch part
619 # Order is important here. First add non-scratch part
619 # and only then add parts with scratch bundles because
620 # and only then add parts with scratch bundles because
620 # non-scratch part contains parents of roots of scratch bundles.
621 # non-scratch part contains parents of roots of scratch bundles.
621 result = oldchangegrouppart(bundler, *args, **kwargs)
622 result = oldchangegrouppart(bundler, *args, **kwargs)
622 for bundle in scratchbundles:
623 for bundle in scratchbundles:
623 for part in bundle:
624 for part in bundle:
624 bundler.addpart(part)
625 bundler.addpart(part)
625 return result
626 return result
626
627
627 exchange.getbundle2partsmapping[b'changegroup'] = _changegrouppart
628 exchange.getbundle2partsmapping[b'changegroup'] = _changegrouppart
628 wrappedchangegrouppart = True
629 wrappedchangegrouppart = True
629
630
630 def _listkeys(orig, self, namespace):
631 def _listkeys(orig, self, namespace):
631 origvalues = orig(self, namespace)
632 origvalues = orig(self, namespace)
632 if namespace == b'phases' and pullfrombundlestore:
633 if namespace == b'phases' and pullfrombundlestore:
633 if origvalues.get(b'publishing') == b'True':
634 if origvalues.get(b'publishing') == b'True':
634 # Make repo non-publishing to preserve draft phase
635 # Make repo non-publishing to preserve draft phase
635 del origvalues[b'publishing']
636 del origvalues[b'publishing']
636 origvalues.update(newphases)
637 origvalues.update(newphases)
637 return origvalues
638 return origvalues
638
639
639 extensions.wrapfunction(
640 extensions.wrapfunction(
640 localrepo.localrepository, b'listkeys', _listkeys
641 localrepo.localrepository, b'listkeys', _listkeys
641 )
642 )
642 wrappedlistkeys = True
643 wrappedlistkeys = True
643 heads = list((set(newheads) | set(heads)) - set(scratchheads))
644 heads = list((set(newheads) | set(heads)) - set(scratchheads))
644 result = orig(
645 result = orig(
645 repo, source, heads=heads, bundlecaps=bundlecaps, **kwargs
646 repo, source, heads=heads, bundlecaps=bundlecaps, **kwargs
646 )
647 )
647 finally:
648 finally:
648 if wrappedchangegrouppart:
649 if wrappedchangegrouppart:
649 exchange.getbundle2partsmapping[b'changegroup'] = oldchangegrouppart
650 exchange.getbundle2partsmapping[b'changegroup'] = oldchangegrouppart
650 if wrappedlistkeys:
651 if wrappedlistkeys:
651 extensions.unwrapfunction(
652 extensions.unwrapfunction(
652 localrepo.localrepository, b'listkeys', _listkeys
653 localrepo.localrepository, b'listkeys', _listkeys
653 )
654 )
654 return result
655 return result
655
656
656
657
657 def _lookupwrap(orig):
658 def _lookupwrap(orig):
658 def _lookup(repo, proto, key):
659 def _lookup(repo, proto, key):
659 localkey = encoding.tolocal(key)
660 localkey = encoding.tolocal(key)
660
661
661 if isinstance(localkey, str) and _scratchbranchmatcher(localkey):
662 if isinstance(localkey, str) and _scratchbranchmatcher(localkey):
662 scratchnode = repo.bundlestore.index.getnode(localkey)
663 scratchnode = repo.bundlestore.index.getnode(localkey)
663 if scratchnode:
664 if scratchnode:
664 return b"%d %s\n" % (1, scratchnode)
665 return b"%d %s\n" % (1, scratchnode)
665 else:
666 else:
666 return b"%d %s\n" % (
667 return b"%d %s\n" % (
667 0,
668 0,
668 b'scratch branch %s not found' % localkey,
669 b'scratch branch %s not found' % localkey,
669 )
670 )
670 else:
671 else:
671 try:
672 try:
672 r = hex(repo.lookup(localkey))
673 r = hex(repo.lookup(localkey))
673 return b"%d %s\n" % (1, r)
674 return b"%d %s\n" % (1, r)
674 except Exception as inst:
675 except Exception as inst:
675 if repo.bundlestore.index.getbundle(localkey):
676 if repo.bundlestore.index.getbundle(localkey):
676 return b"%d %s\n" % (1, localkey)
677 return b"%d %s\n" % (1, localkey)
677 else:
678 else:
678 r = stringutil.forcebytestr(inst)
679 r = stringutil.forcebytestr(inst)
679 return b"%d %s\n" % (0, r)
680 return b"%d %s\n" % (0, r)
680
681
681 return _lookup
682 return _lookup
682
683
683
684
684 def _pull(orig, ui, repo, source=b"default", **opts):
685 def _pull(orig, ui, repo, source=b"default", **opts):
685 opts = pycompat.byteskwargs(opts)
686 opts = pycompat.byteskwargs(opts)
686 # Copy paste from `pull` command
687 # Copy paste from `pull` command
687 source, branches = urlutil.get_unique_pull_path(
688 source, branches = urlutil.get_unique_pull_path(
688 b"infinite-push's pull",
689 b"infinite-push's pull",
689 repo,
690 repo,
690 ui,
691 ui,
691 source,
692 source,
692 default_branches=opts.get(b'branch'),
693 default_branches=opts.get(b'branch'),
693 )
694 )
694
695
695 scratchbookmarks = {}
696 scratchbookmarks = {}
696 unfi = repo.unfiltered()
697 unfi = repo.unfiltered()
697 unknownnodes = []
698 unknownnodes = []
698 for rev in opts.get(b'rev', []):
699 for rev in opts.get(b'rev', []):
699 if rev not in unfi:
700 if rev not in unfi:
700 unknownnodes.append(rev)
701 unknownnodes.append(rev)
701 if opts.get(b'bookmark'):
702 if opts.get(b'bookmark'):
702 bookmarks = []
703 bookmarks = []
703 revs = opts.get(b'rev') or []
704 revs = opts.get(b'rev') or []
704 for bookmark in opts.get(b'bookmark'):
705 for bookmark in opts.get(b'bookmark'):
705 if _scratchbranchmatcher(bookmark):
706 if _scratchbranchmatcher(bookmark):
706 # rev is not known yet
707 # rev is not known yet
707 # it will be fetched with listkeyspatterns next
708 # it will be fetched with listkeyspatterns next
708 scratchbookmarks[bookmark] = b'REVTOFETCH'
709 scratchbookmarks[bookmark] = b'REVTOFETCH'
709 else:
710 else:
710 bookmarks.append(bookmark)
711 bookmarks.append(bookmark)
711
712
712 if scratchbookmarks:
713 if scratchbookmarks:
713 other = hg.peer(repo, opts, source)
714 other = hg.peer(repo, opts, source)
714 try:
715 try:
715 fetchedbookmarks = other.listkeyspatterns(
716 fetchedbookmarks = other.listkeyspatterns(
716 b'bookmarks', patterns=scratchbookmarks
717 b'bookmarks', patterns=scratchbookmarks
717 )
718 )
718 for bookmark in scratchbookmarks:
719 for bookmark in scratchbookmarks:
719 if bookmark not in fetchedbookmarks:
720 if bookmark not in fetchedbookmarks:
720 raise error.Abort(
721 raise error.Abort(
721 b'remote bookmark %s not found!' % bookmark
722 b'remote bookmark %s not found!' % bookmark
722 )
723 )
723 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
724 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
724 revs.append(fetchedbookmarks[bookmark])
725 revs.append(fetchedbookmarks[bookmark])
725 finally:
726 finally:
726 other.close()
727 other.close()
727 opts[b'bookmark'] = bookmarks
728 opts[b'bookmark'] = bookmarks
728 opts[b'rev'] = revs
729 opts[b'rev'] = revs
729
730
730 if scratchbookmarks or unknownnodes:
731 if scratchbookmarks or unknownnodes:
731 # Set anyincoming to True
732 # Set anyincoming to True
732 extensions.wrapfunction(
733 extensions.wrapfunction(
733 discovery, b'findcommonincoming', _findcommonincoming
734 discovery, b'findcommonincoming', _findcommonincoming
734 )
735 )
735 try:
736 try:
736 # Remote scratch bookmarks will be deleted because remotenames doesn't
737 # Remote scratch bookmarks will be deleted because remotenames doesn't
737 # know about them. Let's save it before pull and restore after
738 # know about them. Let's save it before pull and restore after
738 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source)
739 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source)
739 result = orig(ui, repo, source, **pycompat.strkwargs(opts))
740 result = orig(ui, repo, source, **pycompat.strkwargs(opts))
740 # TODO(stash): race condition is possible
741 # TODO(stash): race condition is possible
741 # if scratch bookmarks was updated right after orig.
742 # if scratch bookmarks was updated right after orig.
742 # But that's unlikely and shouldn't be harmful.
743 # But that's unlikely and shouldn't be harmful.
743 if common.isremotebooksenabled(ui):
744 if common.isremotebooksenabled(ui):
744 remotescratchbookmarks.update(scratchbookmarks)
745 remotescratchbookmarks.update(scratchbookmarks)
745 _saveremotebookmarks(repo, remotescratchbookmarks, source)
746 _saveremotebookmarks(repo, remotescratchbookmarks, source)
746 else:
747 else:
747 _savelocalbookmarks(repo, scratchbookmarks)
748 _savelocalbookmarks(repo, scratchbookmarks)
748 return result
749 return result
749 finally:
750 finally:
750 if scratchbookmarks:
751 if scratchbookmarks:
751 extensions.unwrapfunction(discovery, b'findcommonincoming')
752 extensions.unwrapfunction(discovery, b'findcommonincoming')
752
753
753
754
754 def _readscratchremotebookmarks(ui, repo, other):
755 def _readscratchremotebookmarks(ui, repo, other):
755 if common.isremotebooksenabled(ui):
756 if common.isremotebooksenabled(ui):
756 remotenamesext = extensions.find(b'remotenames')
757 remotenamesext = extensions.find(b'remotenames')
757 remotepath = remotenamesext.activepath(repo.ui, other)
758 remotepath = remotenamesext.activepath(repo.ui, other)
758 result = {}
759 result = {}
759 # Let's refresh remotenames to make sure we have it up to date
760 # Let's refresh remotenames to make sure we have it up to date
760 # Seems that `repo.names['remotebookmarks']` may return stale bookmarks
761 # Seems that `repo.names['remotebookmarks']` may return stale bookmarks
761 # and it results in deleting scratch bookmarks. Our best guess how to
762 # and it results in deleting scratch bookmarks. Our best guess how to
762 # fix it is to use `clearnames()`
763 # fix it is to use `clearnames()`
763 repo._remotenames.clearnames()
764 repo._remotenames.clearnames()
764 for remotebookmark in repo.names[b'remotebookmarks'].listnames(repo):
765 for remotebookmark in repo.names[b'remotebookmarks'].listnames(repo):
765 path, bookname = remotenamesext.splitremotename(remotebookmark)
766 path, bookname = remotenamesext.splitremotename(remotebookmark)
766 if path == remotepath and _scratchbranchmatcher(bookname):
767 if path == remotepath and _scratchbranchmatcher(bookname):
767 nodes = repo.names[b'remotebookmarks'].nodes(
768 nodes = repo.names[b'remotebookmarks'].nodes(
768 repo, remotebookmark
769 repo, remotebookmark
769 )
770 )
770 if nodes:
771 if nodes:
771 result[bookname] = hex(nodes[0])
772 result[bookname] = hex(nodes[0])
772 return result
773 return result
773 else:
774 else:
774 return {}
775 return {}
775
776
776
777
777 def _saveremotebookmarks(repo, newbookmarks, remote):
778 def _saveremotebookmarks(repo, newbookmarks, remote):
778 remotenamesext = extensions.find(b'remotenames')
779 remotenamesext = extensions.find(b'remotenames')
779 remotepath = remotenamesext.activepath(repo.ui, remote)
780 remotepath = remotenamesext.activepath(repo.ui, remote)
780 branches = collections.defaultdict(list)
781 branches = collections.defaultdict(list)
781 bookmarks = {}
782 bookmarks = {}
782 remotenames = remotenamesext.readremotenames(repo)
783 remotenames = remotenamesext.readremotenames(repo)
783 for hexnode, nametype, remote, rname in remotenames:
784 for hexnode, nametype, remote, rname in remotenames:
784 if remote != remotepath:
785 if remote != remotepath:
785 continue
786 continue
786 if nametype == b'bookmarks':
787 if nametype == b'bookmarks':
787 if rname in newbookmarks:
788 if rname in newbookmarks:
788 # It's possible if we have a normal bookmark that matches
789 # It's possible if we have a normal bookmark that matches
789 # scratch branch pattern. In this case just use the current
790 # scratch branch pattern. In this case just use the current
790 # bookmark node
791 # bookmark node
791 del newbookmarks[rname]
792 del newbookmarks[rname]
792 bookmarks[rname] = hexnode
793 bookmarks[rname] = hexnode
793 elif nametype == b'branches':
794 elif nametype == b'branches':
794 # saveremotenames expects 20 byte binary nodes for branches
795 # saveremotenames expects 20 byte binary nodes for branches
795 branches[rname].append(bin(hexnode))
796 branches[rname].append(bin(hexnode))
796
797
797 for bookmark, hexnode in pycompat.iteritems(newbookmarks):
798 for bookmark, hexnode in pycompat.iteritems(newbookmarks):
798 bookmarks[bookmark] = hexnode
799 bookmarks[bookmark] = hexnode
799 remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks)
800 remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks)
800
801
801
802
802 def _savelocalbookmarks(repo, bookmarks):
803 def _savelocalbookmarks(repo, bookmarks):
803 if not bookmarks:
804 if not bookmarks:
804 return
805 return
805 with repo.wlock(), repo.lock(), repo.transaction(b'bookmark') as tr:
806 with repo.wlock(), repo.lock(), repo.transaction(b'bookmark') as tr:
806 changes = []
807 changes = []
807 for scratchbook, node in pycompat.iteritems(bookmarks):
808 for scratchbook, node in pycompat.iteritems(bookmarks):
808 changectx = repo[node]
809 changectx = repo[node]
809 changes.append((scratchbook, changectx.node()))
810 changes.append((scratchbook, changectx.node()))
810 repo._bookmarks.applychanges(repo, tr, changes)
811 repo._bookmarks.applychanges(repo, tr, changes)
811
812
812
813
813 def _findcommonincoming(orig, *args, **kwargs):
814 def _findcommonincoming(orig, *args, **kwargs):
814 common, inc, remoteheads = orig(*args, **kwargs)
815 common, inc, remoteheads = orig(*args, **kwargs)
815 return common, True, remoteheads
816 return common, True, remoteheads
816
817
817
818
818 def _push(orig, ui, repo, *dests, **opts):
819 def _push(orig, ui, repo, *dests, **opts):
819 opts = pycompat.byteskwargs(opts)
820 opts = pycompat.byteskwargs(opts)
820 bookmark = opts.get(b'bookmark')
821 bookmark = opts.get(b'bookmark')
821 # we only support pushing one infinitepush bookmark at once
822 # we only support pushing one infinitepush bookmark at once
822 if len(bookmark) == 1:
823 if len(bookmark) == 1:
823 bookmark = bookmark[0]
824 bookmark = bookmark[0]
824 else:
825 else:
825 bookmark = b''
826 bookmark = b''
826
827
827 oldphasemove = None
828 oldphasemove = None
828 overrides = {(experimental, configbookmark): bookmark}
829 overrides = {(experimental, configbookmark): bookmark}
829
830
830 with ui.configoverride(overrides, b'infinitepush'):
831 with ui.configoverride(overrides, b'infinitepush'):
831 scratchpush = opts.get(b'bundle_store')
832 scratchpush = opts.get(b'bundle_store')
832 if _scratchbranchmatcher(bookmark):
833 if _scratchbranchmatcher(bookmark):
833 scratchpush = True
834 scratchpush = True
834 # bundle2 can be sent back after push (for example, bundle2
835 # bundle2 can be sent back after push (for example, bundle2
835 # containing `pushkey` part to update bookmarks)
836 # containing `pushkey` part to update bookmarks)
836 ui.setconfig(experimental, b'bundle2.pushback', True)
837 ui.setconfig(experimental, b'bundle2.pushback', True)
837
838
838 if scratchpush:
839 if scratchpush:
839 # this is an infinitepush, we don't want the bookmark to be applied
840 # this is an infinitepush, we don't want the bookmark to be applied
840 # rather that should be stored in the bundlestore
841 # rather that should be stored in the bundlestore
841 opts[b'bookmark'] = []
842 opts[b'bookmark'] = []
842 ui.setconfig(experimental, configscratchpush, True)
843 ui.setconfig(experimental, configscratchpush, True)
843 oldphasemove = extensions.wrapfunction(
844 oldphasemove = extensions.wrapfunction(
844 exchange, b'_localphasemove', _phasemove
845 exchange, b'_localphasemove', _phasemove
845 )
846 )
846
847
847 paths = list(urlutil.get_push_paths(repo, ui, dests))
848 paths = list(urlutil.get_push_paths(repo, ui, dests))
848 if len(paths) > 1:
849 if len(paths) > 1:
849 msg = _(b'cannot push to multiple path with infinitepush')
850 msg = _(b'cannot push to multiple path with infinitepush')
850 raise error.Abort(msg)
851 raise error.Abort(msg)
851
852
852 path = paths[0]
853 path = paths[0]
853 destpath = path.pushloc or path.loc
854 destpath = path.pushloc or path.loc
854 # Remote scratch bookmarks will be deleted because remotenames doesn't
855 # Remote scratch bookmarks will be deleted because remotenames doesn't
855 # know about them. Let's save it before push and restore after
856 # know about them. Let's save it before push and restore after
856 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
857 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
857 result = orig(ui, repo, *dests, **pycompat.strkwargs(opts))
858 result = orig(ui, repo, *dests, **pycompat.strkwargs(opts))
858 if common.isremotebooksenabled(ui):
859 if common.isremotebooksenabled(ui):
859 if bookmark and scratchpush:
860 if bookmark and scratchpush:
860 other = hg.peer(repo, opts, destpath)
861 other = hg.peer(repo, opts, destpath)
861 try:
862 try:
862 fetchedbookmarks = other.listkeyspatterns(
863 fetchedbookmarks = other.listkeyspatterns(
863 b'bookmarks', patterns=[bookmark]
864 b'bookmarks', patterns=[bookmark]
864 )
865 )
865 remotescratchbookmarks.update(fetchedbookmarks)
866 remotescratchbookmarks.update(fetchedbookmarks)
866 finally:
867 finally:
867 other.close()
868 other.close()
868 _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
869 _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
869 if oldphasemove:
870 if oldphasemove:
870 exchange._localphasemove = oldphasemove
871 exchange._localphasemove = oldphasemove
871 return result
872 return result
872
873
873
874
874 def _deleteinfinitepushbookmarks(ui, repo, path, names):
875 def _deleteinfinitepushbookmarks(ui, repo, path, names):
875 """Prune remote names by removing the bookmarks we don't want anymore,
876 """Prune remote names by removing the bookmarks we don't want anymore,
876 then writing the result back to disk
877 then writing the result back to disk
877 """
878 """
878 remotenamesext = extensions.find(b'remotenames')
879 remotenamesext = extensions.find(b'remotenames')
879
880
880 # remotename format is:
881 # remotename format is:
881 # (node, nametype ("branches" or "bookmarks"), remote, name)
882 # (node, nametype ("branches" or "bookmarks"), remote, name)
882 nametype_idx = 1
883 nametype_idx = 1
883 remote_idx = 2
884 remote_idx = 2
884 name_idx = 3
885 name_idx = 3
885 remotenames = [
886 remotenames = [
886 remotename
887 remotename
887 for remotename in remotenamesext.readremotenames(repo)
888 for remotename in remotenamesext.readremotenames(repo)
888 if remotename[remote_idx] == path
889 if remotename[remote_idx] == path
889 ]
890 ]
890 remote_bm_names = [
891 remote_bm_names = [
891 remotename[name_idx]
892 remotename[name_idx]
892 for remotename in remotenames
893 for remotename in remotenames
893 if remotename[nametype_idx] == b"bookmarks"
894 if remotename[nametype_idx] == b"bookmarks"
894 ]
895 ]
895
896
896 for name in names:
897 for name in names:
897 if name not in remote_bm_names:
898 if name not in remote_bm_names:
898 raise error.Abort(
899 raise error.Abort(
899 _(
900 _(
900 b"infinitepush bookmark '{}' does not exist "
901 b"infinitepush bookmark '{}' does not exist "
901 b"in path '{}'"
902 b"in path '{}'"
902 ).format(name, path)
903 ).format(name, path)
903 )
904 )
904
905
905 bookmarks = {}
906 bookmarks = {}
906 branches = collections.defaultdict(list)
907 branches = collections.defaultdict(list)
907 for node, nametype, remote, name in remotenames:
908 for node, nametype, remote, name in remotenames:
908 if nametype == b"bookmarks" and name not in names:
909 if nametype == b"bookmarks" and name not in names:
909 bookmarks[name] = node
910 bookmarks[name] = node
910 elif nametype == b"branches":
911 elif nametype == b"branches":
911 # saveremotenames wants binary nodes for branches
912 # saveremotenames wants binary nodes for branches
912 branches[name].append(bin(node))
913 branches[name].append(bin(node))
913
914
914 remotenamesext.saveremotenames(repo, path, branches, bookmarks)
915 remotenamesext.saveremotenames(repo, path, branches, bookmarks)
915
916
916
917
917 def _phasemove(orig, pushop, nodes, phase=phases.public):
918 def _phasemove(orig, pushop, nodes, phase=phases.public):
918 """prevent commits from being marked public
919 """prevent commits from being marked public
919
920
920 Since these are going to a scratch branch, they aren't really being
921 Since these are going to a scratch branch, they aren't really being
921 published."""
922 published."""
922
923
923 if phase != phases.public:
924 if phase != phases.public:
924 orig(pushop, nodes, phase)
925 orig(pushop, nodes, phase)
925
926
926
927
927 @exchange.b2partsgenerator(scratchbranchparttype)
928 @exchange.b2partsgenerator(scratchbranchparttype)
928 def partgen(pushop, bundler):
929 def partgen(pushop, bundler):
929 bookmark = pushop.ui.config(experimental, configbookmark)
930 bookmark = pushop.ui.config(experimental, configbookmark)
930 scratchpush = pushop.ui.configbool(experimental, configscratchpush)
931 scratchpush = pushop.ui.configbool(experimental, configscratchpush)
931 if b'changesets' in pushop.stepsdone or not scratchpush:
932 if b'changesets' in pushop.stepsdone or not scratchpush:
932 return
933 return
933
934
934 if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote):
935 if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote):
935 return
936 return
936
937
937 pushop.stepsdone.add(b'changesets')
938 pushop.stepsdone.add(b'changesets')
938 if not pushop.outgoing.missing:
939 if not pushop.outgoing.missing:
939 pushop.ui.status(_(b'no changes found\n'))
940 pushop.ui.status(_(b'no changes found\n'))
940 pushop.cgresult = 0
941 pushop.cgresult = 0
941 return
942 return
942
943
943 # This parameter tells the server that the following bundle is an
944 # This parameter tells the server that the following bundle is an
944 # infinitepush. This let's it switch the part processing to our infinitepush
945 # infinitepush. This let's it switch the part processing to our infinitepush
945 # code path.
946 # code path.
946 bundler.addparam(b"infinitepush", b"True")
947 bundler.addparam(b"infinitepush", b"True")
947
948
948 scratchparts = bundleparts.getscratchbranchparts(
949 scratchparts = bundleparts.getscratchbranchparts(
949 pushop.repo, pushop.remote, pushop.outgoing, pushop.ui, bookmark
950 pushop.repo, pushop.remote, pushop.outgoing, pushop.ui, bookmark
950 )
951 )
951
952
952 for scratchpart in scratchparts:
953 for scratchpart in scratchparts:
953 bundler.addpart(scratchpart)
954 bundler.addpart(scratchpart)
954
955
955 def handlereply(op):
956 def handlereply(op):
956 # server either succeeds or aborts; no code to read
957 # server either succeeds or aborts; no code to read
957 pushop.cgresult = 1
958 pushop.cgresult = 1
958
959
959 return handlereply
960 return handlereply
960
961
961
962
962 bundle2.capabilities[bundleparts.scratchbranchparttype] = ()
963 bundle2.capabilities[bundleparts.scratchbranchparttype] = ()
963
964
964
965
965 def _getrevs(bundle, oldnode, force, bookmark):
966 def _getrevs(bundle, oldnode, force, bookmark):
966 b'extracts and validates the revs to be imported'
967 b'extracts and validates the revs to be imported'
967 revs = [bundle[r] for r in bundle.revs(b'sort(bundle())')]
968 revs = [bundle[r] for r in bundle.revs(b'sort(bundle())')]
968
969
969 # new bookmark
970 # new bookmark
970 if oldnode is None:
971 if oldnode is None:
971 return revs
972 return revs
972
973
973 # Fast forward update
974 # Fast forward update
974 if oldnode in bundle and list(bundle.set(b'bundle() & %s::', oldnode)):
975 if oldnode in bundle and list(bundle.set(b'bundle() & %s::', oldnode)):
975 return revs
976 return revs
976
977
977 return revs
978 return revs
978
979
979
980
980 @contextlib.contextmanager
981 @contextlib.contextmanager
981 def logservicecall(logger, service, **kwargs):
982 def logservicecall(logger, service, **kwargs):
982 start = time.time()
983 start = time.time()
983 logger(service, eventtype=b'start', **kwargs)
984 logger(service, eventtype=b'start', **kwargs)
984 try:
985 try:
985 yield
986 yield
986 logger(
987 logger(
987 service,
988 service,
988 eventtype=b'success',
989 eventtype=b'success',
989 elapsedms=(time.time() - start) * 1000,
990 elapsedms=(time.time() - start) * 1000,
990 **kwargs
991 **kwargs
991 )
992 )
992 except Exception as e:
993 except Exception as e:
993 logger(
994 logger(
994 service,
995 service,
995 eventtype=b'failure',
996 eventtype=b'failure',
996 elapsedms=(time.time() - start) * 1000,
997 elapsedms=(time.time() - start) * 1000,
997 errormsg=stringutil.forcebytestr(e),
998 errormsg=stringutil.forcebytestr(e),
998 **kwargs
999 **kwargs
999 )
1000 )
1000 raise
1001 raise
1001
1002
1002
1003
1003 def _getorcreateinfinitepushlogger(op):
1004 def _getorcreateinfinitepushlogger(op):
1004 logger = op.records[b'infinitepushlogger']
1005 logger = op.records[b'infinitepushlogger']
1005 if not logger:
1006 if not logger:
1006 ui = op.repo.ui
1007 ui = op.repo.ui
1007 try:
1008 try:
1008 username = procutil.getuser()
1009 username = procutil.getuser()
1009 except Exception:
1010 except Exception:
1010 username = b'unknown'
1011 username = b'unknown'
1011 # Generate random request id to be able to find all logged entries
1012 # Generate random request id to be able to find all logged entries
1012 # for the same request. Since requestid is pseudo-generated it may
1013 # for the same request. Since requestid is pseudo-generated it may
1013 # not be unique, but we assume that (hostname, username, requestid)
1014 # not be unique, but we assume that (hostname, username, requestid)
1014 # is unique.
1015 # is unique.
1015 random.seed()
1016 random.seed()
1016 requestid = random.randint(0, 2000000000)
1017 requestid = random.randint(0, 2000000000)
1017 hostname = socket.gethostname()
1018 hostname = socket.gethostname()
1018 logger = functools.partial(
1019 logger = functools.partial(
1019 ui.log,
1020 ui.log,
1020 b'infinitepush',
1021 b'infinitepush',
1021 user=username,
1022 user=username,
1022 requestid=requestid,
1023 requestid=requestid,
1023 hostname=hostname,
1024 hostname=hostname,
1024 reponame=ui.config(b'infinitepush', b'reponame'),
1025 reponame=ui.config(b'infinitepush', b'reponame'),
1025 )
1026 )
1026 op.records.add(b'infinitepushlogger', logger)
1027 op.records.add(b'infinitepushlogger', logger)
1027 else:
1028 else:
1028 logger = logger[0]
1029 logger = logger[0]
1029 return logger
1030 return logger
1030
1031
1031
1032
1032 def storetobundlestore(orig, repo, op, unbundler):
1033 def storetobundlestore(orig, repo, op, unbundler):
1033 """stores the incoming bundle coming from push command to the bundlestore
1034 """stores the incoming bundle coming from push command to the bundlestore
1034 instead of applying on the revlogs"""
1035 instead of applying on the revlogs"""
1035
1036
1036 repo.ui.status(_(b"storing changesets on the bundlestore\n"))
1037 repo.ui.status(_(b"storing changesets on the bundlestore\n"))
1037 bundler = bundle2.bundle20(repo.ui)
1038 bundler = bundle2.bundle20(repo.ui)
1038
1039
1039 # processing each part and storing it in bundler
1040 # processing each part and storing it in bundler
1040 with bundle2.partiterator(repo, op, unbundler) as parts:
1041 with bundle2.partiterator(repo, op, unbundler) as parts:
1041 for part in parts:
1042 for part in parts:
1042 bundlepart = None
1043 bundlepart = None
1043 if part.type == b'replycaps':
1044 if part.type == b'replycaps':
1044 # This configures the current operation to allow reply parts.
1045 # This configures the current operation to allow reply parts.
1045 bundle2._processpart(op, part)
1046 bundle2._processpart(op, part)
1046 else:
1047 else:
1047 bundlepart = bundle2.bundlepart(part.type, data=part.read())
1048 bundlepart = bundle2.bundlepart(part.type, data=part.read())
1048 for key, value in pycompat.iteritems(part.params):
1049 for key, value in pycompat.iteritems(part.params):
1049 bundlepart.addparam(key, value)
1050 bundlepart.addparam(key, value)
1050
1051
1051 # Certain parts require a response
1052 # Certain parts require a response
1052 if part.type in (b'pushkey', b'changegroup'):
1053 if part.type in (b'pushkey', b'changegroup'):
1053 if op.reply is not None:
1054 if op.reply is not None:
1054 rpart = op.reply.newpart(b'reply:%s' % part.type)
1055 rpart = op.reply.newpart(b'reply:%s' % part.type)
1055 rpart.addparam(
1056 rpart.addparam(
1056 b'in-reply-to', b'%d' % part.id, mandatory=False
1057 b'in-reply-to', b'%d' % part.id, mandatory=False
1057 )
1058 )
1058 rpart.addparam(b'return', b'1', mandatory=False)
1059 rpart.addparam(b'return', b'1', mandatory=False)
1059
1060
1060 op.records.add(
1061 op.records.add(
1061 part.type,
1062 part.type,
1062 {
1063 {
1063 b'return': 1,
1064 b'return': 1,
1064 },
1065 },
1065 )
1066 )
1066 if bundlepart:
1067 if bundlepart:
1067 bundler.addpart(bundlepart)
1068 bundler.addpart(bundlepart)
1068
1069
1069 # storing the bundle in the bundlestore
1070 # storing the bundle in the bundlestore
1070 buf = util.chunkbuffer(bundler.getchunks())
1071 buf = util.chunkbuffer(bundler.getchunks())
1071 fd, bundlefile = pycompat.mkstemp()
1072 fd, bundlefile = pycompat.mkstemp()
1072 try:
1073 try:
1073 try:
1074 try:
1074 fp = os.fdopen(fd, 'wb')
1075 fp = os.fdopen(fd, 'wb')
1075 fp.write(buf.read())
1076 fp.write(buf.read())
1076 finally:
1077 finally:
1077 fp.close()
1078 fp.close()
1078 storebundle(op, {}, bundlefile)
1079 storebundle(op, {}, bundlefile)
1079 finally:
1080 finally:
1080 try:
1081 try:
1081 os.unlink(bundlefile)
1082 os.unlink(bundlefile)
1082 except Exception:
1083 except Exception:
1083 # we would rather see the original exception
1084 # we would rather see the original exception
1084 pass
1085 pass
1085
1086
1086
1087
1087 def processparts(orig, repo, op, unbundler):
1088 def processparts(orig, repo, op, unbundler):
1088
1089
1089 # make sure we don't wrap processparts in case of `hg unbundle`
1090 # make sure we don't wrap processparts in case of `hg unbundle`
1090 if op.source == b'unbundle':
1091 if op.source == b'unbundle':
1091 return orig(repo, op, unbundler)
1092 return orig(repo, op, unbundler)
1092
1093
1093 # this server routes each push to bundle store
1094 # this server routes each push to bundle store
1094 if repo.ui.configbool(b'infinitepush', b'pushtobundlestore'):
1095 if repo.ui.configbool(b'infinitepush', b'pushtobundlestore'):
1095 return storetobundlestore(orig, repo, op, unbundler)
1096 return storetobundlestore(orig, repo, op, unbundler)
1096
1097
1097 if unbundler.params.get(b'infinitepush') != b'True':
1098 if unbundler.params.get(b'infinitepush') != b'True':
1098 return orig(repo, op, unbundler)
1099 return orig(repo, op, unbundler)
1099
1100
1100 handleallparts = repo.ui.configbool(b'infinitepush', b'storeallparts')
1101 handleallparts = repo.ui.configbool(b'infinitepush', b'storeallparts')
1101
1102
1102 bundler = bundle2.bundle20(repo.ui)
1103 bundler = bundle2.bundle20(repo.ui)
1103 cgparams = None
1104 cgparams = None
1104 with bundle2.partiterator(repo, op, unbundler) as parts:
1105 with bundle2.partiterator(repo, op, unbundler) as parts:
1105 for part in parts:
1106 for part in parts:
1106 bundlepart = None
1107 bundlepart = None
1107 if part.type == b'replycaps':
1108 if part.type == b'replycaps':
1108 # This configures the current operation to allow reply parts.
1109 # This configures the current operation to allow reply parts.
1109 bundle2._processpart(op, part)
1110 bundle2._processpart(op, part)
1110 elif part.type == bundleparts.scratchbranchparttype:
1111 elif part.type == bundleparts.scratchbranchparttype:
1111 # Scratch branch parts need to be converted to normal
1112 # Scratch branch parts need to be converted to normal
1112 # changegroup parts, and the extra parameters stored for later
1113 # changegroup parts, and the extra parameters stored for later
1113 # when we upload to the store. Eventually those parameters will
1114 # when we upload to the store. Eventually those parameters will
1114 # be put on the actual bundle instead of this part, then we can
1115 # be put on the actual bundle instead of this part, then we can
1115 # send a vanilla changegroup instead of the scratchbranch part.
1116 # send a vanilla changegroup instead of the scratchbranch part.
1116 cgversion = part.params.get(b'cgversion', b'01')
1117 cgversion = part.params.get(b'cgversion', b'01')
1117 bundlepart = bundle2.bundlepart(
1118 bundlepart = bundle2.bundlepart(
1118 b'changegroup', data=part.read()
1119 b'changegroup', data=part.read()
1119 )
1120 )
1120 bundlepart.addparam(b'version', cgversion)
1121 bundlepart.addparam(b'version', cgversion)
1121 cgparams = part.params
1122 cgparams = part.params
1122
1123
1123 # If we're not dumping all parts into the new bundle, we need to
1124 # If we're not dumping all parts into the new bundle, we need to
1124 # alert the future pushkey and phase-heads handler to skip
1125 # alert the future pushkey and phase-heads handler to skip
1125 # the part.
1126 # the part.
1126 if not handleallparts:
1127 if not handleallparts:
1127 op.records.add(
1128 op.records.add(
1128 scratchbranchparttype + b'_skippushkey', True
1129 scratchbranchparttype + b'_skippushkey', True
1129 )
1130 )
1130 op.records.add(
1131 op.records.add(
1131 scratchbranchparttype + b'_skipphaseheads', True
1132 scratchbranchparttype + b'_skipphaseheads', True
1132 )
1133 )
1133 else:
1134 else:
1134 if handleallparts:
1135 if handleallparts:
1135 # Ideally we would not process any parts, and instead just
1136 # Ideally we would not process any parts, and instead just
1136 # forward them to the bundle for storage, but since this
1137 # forward them to the bundle for storage, but since this
1137 # differs from previous behavior, we need to put it behind a
1138 # differs from previous behavior, we need to put it behind a
1138 # config flag for incremental rollout.
1139 # config flag for incremental rollout.
1139 bundlepart = bundle2.bundlepart(part.type, data=part.read())
1140 bundlepart = bundle2.bundlepart(part.type, data=part.read())
1140 for key, value in pycompat.iteritems(part.params):
1141 for key, value in pycompat.iteritems(part.params):
1141 bundlepart.addparam(key, value)
1142 bundlepart.addparam(key, value)
1142
1143
1143 # Certain parts require a response
1144 # Certain parts require a response
1144 if part.type == b'pushkey':
1145 if part.type == b'pushkey':
1145 if op.reply is not None:
1146 if op.reply is not None:
1146 rpart = op.reply.newpart(b'reply:pushkey')
1147 rpart = op.reply.newpart(b'reply:pushkey')
1147 rpart.addparam(
1148 rpart.addparam(
1148 b'in-reply-to', str(part.id), mandatory=False
1149 b'in-reply-to', str(part.id), mandatory=False
1149 )
1150 )
1150 rpart.addparam(b'return', b'1', mandatory=False)
1151 rpart.addparam(b'return', b'1', mandatory=False)
1151 else:
1152 else:
1152 bundle2._processpart(op, part)
1153 bundle2._processpart(op, part)
1153
1154
1154 if handleallparts:
1155 if handleallparts:
1155 op.records.add(
1156 op.records.add(
1156 part.type,
1157 part.type,
1157 {
1158 {
1158 b'return': 1,
1159 b'return': 1,
1159 },
1160 },
1160 )
1161 )
1161 if bundlepart:
1162 if bundlepart:
1162 bundler.addpart(bundlepart)
1163 bundler.addpart(bundlepart)
1163
1164
1164 # If commits were sent, store them
1165 # If commits were sent, store them
1165 if cgparams:
1166 if cgparams:
1166 buf = util.chunkbuffer(bundler.getchunks())
1167 buf = util.chunkbuffer(bundler.getchunks())
1167 fd, bundlefile = pycompat.mkstemp()
1168 fd, bundlefile = pycompat.mkstemp()
1168 try:
1169 try:
1169 try:
1170 try:
1170 fp = os.fdopen(fd, 'wb')
1171 fp = os.fdopen(fd, 'wb')
1171 fp.write(buf.read())
1172 fp.write(buf.read())
1172 finally:
1173 finally:
1173 fp.close()
1174 fp.close()
1174 storebundle(op, cgparams, bundlefile)
1175 storebundle(op, cgparams, bundlefile)
1175 finally:
1176 finally:
1176 try:
1177 try:
1177 os.unlink(bundlefile)
1178 os.unlink(bundlefile)
1178 except Exception:
1179 except Exception:
1179 # we would rather see the original exception
1180 # we would rather see the original exception
1180 pass
1181 pass
1181
1182
1182
1183
1183 def storebundle(op, params, bundlefile):
1184 def storebundle(op, params, bundlefile):
1184 log = _getorcreateinfinitepushlogger(op)
1185 log = _getorcreateinfinitepushlogger(op)
1185 parthandlerstart = time.time()
1186 parthandlerstart = time.time()
1186 log(scratchbranchparttype, eventtype=b'start')
1187 log(scratchbranchparttype, eventtype=b'start')
1187 index = op.repo.bundlestore.index
1188 index = op.repo.bundlestore.index
1188 store = op.repo.bundlestore.store
1189 store = op.repo.bundlestore.store
1189 op.records.add(scratchbranchparttype + b'_skippushkey', True)
1190 op.records.add(scratchbranchparttype + b'_skippushkey', True)
1190
1191
1191 bundle = None
1192 bundle = None
1192 try: # guards bundle
1193 try: # guards bundle
1193 bundlepath = b"bundle:%s+%s" % (op.repo.root, bundlefile)
1194 bundlepath = b"bundle:%s+%s" % (op.repo.root, bundlefile)
1194 bundle = hg.repository(op.repo.ui, bundlepath)
1195 bundle = hg.repository(op.repo.ui, bundlepath)
1195
1196
1196 bookmark = params.get(b'bookmark')
1197 bookmark = params.get(b'bookmark')
1197 bookprevnode = params.get(b'bookprevnode', b'')
1198 bookprevnode = params.get(b'bookprevnode', b'')
1198 force = params.get(b'force')
1199 force = params.get(b'force')
1199
1200
1200 if bookmark:
1201 if bookmark:
1201 oldnode = index.getnode(bookmark)
1202 oldnode = index.getnode(bookmark)
1202 else:
1203 else:
1203 oldnode = None
1204 oldnode = None
1204 bundleheads = bundle.revs(b'heads(bundle())')
1205 bundleheads = bundle.revs(b'heads(bundle())')
1205 if bookmark and len(bundleheads) > 1:
1206 if bookmark and len(bundleheads) > 1:
1206 raise error.Abort(
1207 raise error.Abort(
1207 _(b'cannot push more than one head to a scratch branch')
1208 _(b'cannot push more than one head to a scratch branch')
1208 )
1209 )
1209
1210
1210 revs = _getrevs(bundle, oldnode, force, bookmark)
1211 revs = _getrevs(bundle, oldnode, force, bookmark)
1211
1212
1212 # Notify the user of what is being pushed
1213 # Notify the user of what is being pushed
1213 plural = b's' if len(revs) > 1 else b''
1214 plural = b's' if len(revs) > 1 else b''
1214 op.repo.ui.warn(_(b"pushing %d commit%s:\n") % (len(revs), plural))
1215 op.repo.ui.warn(_(b"pushing %d commit%s:\n") % (len(revs), plural))
1215 maxoutput = 10
1216 maxoutput = 10
1216 for i in range(0, min(len(revs), maxoutput)):
1217 for i in range(0, min(len(revs), maxoutput)):
1217 firstline = bundle[revs[i]].description().split(b'\n')[0][:50]
1218 firstline = bundle[revs[i]].description().split(b'\n')[0][:50]
1218 op.repo.ui.warn(b" %s %s\n" % (revs[i], firstline))
1219 op.repo.ui.warn(b" %s %s\n" % (revs[i], firstline))
1219
1220
1220 if len(revs) > maxoutput + 1:
1221 if len(revs) > maxoutput + 1:
1221 op.repo.ui.warn(b" ...\n")
1222 op.repo.ui.warn(b" ...\n")
1222 firstline = bundle[revs[-1]].description().split(b'\n')[0][:50]
1223 firstline = bundle[revs[-1]].description().split(b'\n')[0][:50]
1223 op.repo.ui.warn(b" %s %s\n" % (revs[-1], firstline))
1224 op.repo.ui.warn(b" %s %s\n" % (revs[-1], firstline))
1224
1225
1225 nodesctx = [bundle[rev] for rev in revs]
1226 nodesctx = [bundle[rev] for rev in revs]
1226 inindex = lambda rev: bool(index.getbundle(bundle[rev].hex()))
1227 inindex = lambda rev: bool(index.getbundle(bundle[rev].hex()))
1227 if bundleheads:
1228 if bundleheads:
1228 newheadscount = sum(not inindex(rev) for rev in bundleheads)
1229 newheadscount = sum(not inindex(rev) for rev in bundleheads)
1229 else:
1230 else:
1230 newheadscount = 0
1231 newheadscount = 0
1231 # If there's a bookmark specified, there should be only one head,
1232 # If there's a bookmark specified, there should be only one head,
1232 # so we choose the last node, which will be that head.
1233 # so we choose the last node, which will be that head.
1233 # If a bug or malicious client allows there to be a bookmark
1234 # If a bug or malicious client allows there to be a bookmark
1234 # with multiple heads, we will place the bookmark on the last head.
1235 # with multiple heads, we will place the bookmark on the last head.
1235 bookmarknode = nodesctx[-1].hex() if nodesctx else None
1236 bookmarknode = nodesctx[-1].hex() if nodesctx else None
1236 key = None
1237 key = None
1237 if newheadscount:
1238 if newheadscount:
1238 with open(bundlefile, b'rb') as f:
1239 with open(bundlefile, b'rb') as f:
1239 bundledata = f.read()
1240 bundledata = f.read()
1240 with logservicecall(
1241 with logservicecall(
1241 log, b'bundlestore', bundlesize=len(bundledata)
1242 log, b'bundlestore', bundlesize=len(bundledata)
1242 ):
1243 ):
1243 bundlesizelimit = 100 * 1024 * 1024 # 100 MB
1244 bundlesizelimit = 100 * 1024 * 1024 # 100 MB
1244 if len(bundledata) > bundlesizelimit:
1245 if len(bundledata) > bundlesizelimit:
1245 error_msg = (
1246 error_msg = (
1246 b'bundle is too big: %d bytes. '
1247 b'bundle is too big: %d bytes. '
1247 + b'max allowed size is 100 MB'
1248 + b'max allowed size is 100 MB'
1248 )
1249 )
1249 raise error.Abort(error_msg % (len(bundledata),))
1250 raise error.Abort(error_msg % (len(bundledata),))
1250 key = store.write(bundledata)
1251 key = store.write(bundledata)
1251
1252
1252 with logservicecall(log, b'index', newheadscount=newheadscount), index:
1253 with logservicecall(log, b'index', newheadscount=newheadscount), index:
1253 if key:
1254 if key:
1254 index.addbundle(key, nodesctx)
1255 index.addbundle(key, nodesctx)
1255 if bookmark:
1256 if bookmark:
1256 index.addbookmark(bookmark, bookmarknode)
1257 index.addbookmark(bookmark, bookmarknode)
1257 _maybeaddpushbackpart(
1258 _maybeaddpushbackpart(
1258 op, bookmark, bookmarknode, bookprevnode, params
1259 op, bookmark, bookmarknode, bookprevnode, params
1259 )
1260 )
1260 log(
1261 log(
1261 scratchbranchparttype,
1262 scratchbranchparttype,
1262 eventtype=b'success',
1263 eventtype=b'success',
1263 elapsedms=(time.time() - parthandlerstart) * 1000,
1264 elapsedms=(time.time() - parthandlerstart) * 1000,
1264 )
1265 )
1265
1266
1266 except Exception as e:
1267 except Exception as e:
1267 log(
1268 log(
1268 scratchbranchparttype,
1269 scratchbranchparttype,
1269 eventtype=b'failure',
1270 eventtype=b'failure',
1270 elapsedms=(time.time() - parthandlerstart) * 1000,
1271 elapsedms=(time.time() - parthandlerstart) * 1000,
1271 errormsg=stringutil.forcebytestr(e),
1272 errormsg=stringutil.forcebytestr(e),
1272 )
1273 )
1273 raise
1274 raise
1274 finally:
1275 finally:
1275 if bundle:
1276 if bundle:
1276 bundle.close()
1277 bundle.close()
1277
1278
1278
1279
1279 @bundle2.parthandler(
1280 @bundle2.parthandler(
1280 scratchbranchparttype,
1281 scratchbranchparttype,
1281 (
1282 (
1282 b'bookmark',
1283 b'bookmark',
1283 b'bookprevnode',
1284 b'bookprevnode',
1284 b'force',
1285 b'force',
1285 b'pushbackbookmarks',
1286 b'pushbackbookmarks',
1286 b'cgversion',
1287 b'cgversion',
1287 ),
1288 ),
1288 )
1289 )
1289 def bundle2scratchbranch(op, part):
1290 def bundle2scratchbranch(op, part):
1290 '''unbundle a bundle2 part containing a changegroup to store'''
1291 '''unbundle a bundle2 part containing a changegroup to store'''
1291
1292
1292 bundler = bundle2.bundle20(op.repo.ui)
1293 bundler = bundle2.bundle20(op.repo.ui)
1293 cgversion = part.params.get(b'cgversion', b'01')
1294 cgversion = part.params.get(b'cgversion', b'01')
1294 cgpart = bundle2.bundlepart(b'changegroup', data=part.read())
1295 cgpart = bundle2.bundlepart(b'changegroup', data=part.read())
1295 cgpart.addparam(b'version', cgversion)
1296 cgpart.addparam(b'version', cgversion)
1296 bundler.addpart(cgpart)
1297 bundler.addpart(cgpart)
1297 buf = util.chunkbuffer(bundler.getchunks())
1298 buf = util.chunkbuffer(bundler.getchunks())
1298
1299
1299 fd, bundlefile = pycompat.mkstemp()
1300 fd, bundlefile = pycompat.mkstemp()
1300 try:
1301 try:
1301 try:
1302 try:
1302 fp = os.fdopen(fd, 'wb')
1303 fp = os.fdopen(fd, 'wb')
1303 fp.write(buf.read())
1304 fp.write(buf.read())
1304 finally:
1305 finally:
1305 fp.close()
1306 fp.close()
1306 storebundle(op, part.params, bundlefile)
1307 storebundle(op, part.params, bundlefile)
1307 finally:
1308 finally:
1308 try:
1309 try:
1309 os.unlink(bundlefile)
1310 os.unlink(bundlefile)
1310 except OSError as e:
1311 except OSError as e:
1311 if e.errno != errno.ENOENT:
1312 if e.errno != errno.ENOENT:
1312 raise
1313 raise
1313
1314
1314 return 1
1315 return 1
1315
1316
1316
1317
1317 def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params):
1318 def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params):
1318 if params.get(b'pushbackbookmarks'):
1319 if params.get(b'pushbackbookmarks'):
1319 if op.reply and b'pushback' in op.reply.capabilities:
1320 if op.reply and b'pushback' in op.reply.capabilities:
1320 params = {
1321 params = {
1321 b'namespace': b'bookmarks',
1322 b'namespace': b'bookmarks',
1322 b'key': bookmark,
1323 b'key': bookmark,
1323 b'new': newnode,
1324 b'new': newnode,
1324 b'old': oldnode,
1325 b'old': oldnode,
1325 }
1326 }
1326 op.reply.newpart(
1327 op.reply.newpart(
1327 b'pushkey', mandatoryparams=pycompat.iteritems(params)
1328 b'pushkey', mandatoryparams=pycompat.iteritems(params)
1328 )
1329 )
1329
1330
1330
1331
1331 def bundle2pushkey(orig, op, part):
1332 def bundle2pushkey(orig, op, part):
1332 """Wrapper of bundle2.handlepushkey()
1333 """Wrapper of bundle2.handlepushkey()
1333
1334
1334 The only goal is to skip calling the original function if flag is set.
1335 The only goal is to skip calling the original function if flag is set.
1335 It's set if infinitepush push is happening.
1336 It's set if infinitepush push is happening.
1336 """
1337 """
1337 if op.records[scratchbranchparttype + b'_skippushkey']:
1338 if op.records[scratchbranchparttype + b'_skippushkey']:
1338 if op.reply is not None:
1339 if op.reply is not None:
1339 rpart = op.reply.newpart(b'reply:pushkey')
1340 rpart = op.reply.newpart(b'reply:pushkey')
1340 rpart.addparam(b'in-reply-to', str(part.id), mandatory=False)
1341 rpart.addparam(b'in-reply-to', str(part.id), mandatory=False)
1341 rpart.addparam(b'return', b'1', mandatory=False)
1342 rpart.addparam(b'return', b'1', mandatory=False)
1342 return 1
1343 return 1
1343
1344
1344 return orig(op, part)
1345 return orig(op, part)
1345
1346
1346
1347
1347 def bundle2handlephases(orig, op, part):
1348 def bundle2handlephases(orig, op, part):
1348 """Wrapper of bundle2.handlephases()
1349 """Wrapper of bundle2.handlephases()
1349
1350
1350 The only goal is to skip calling the original function if flag is set.
1351 The only goal is to skip calling the original function if flag is set.
1351 It's set if infinitepush push is happening.
1352 It's set if infinitepush push is happening.
1352 """
1353 """
1353
1354
1354 if op.records[scratchbranchparttype + b'_skipphaseheads']:
1355 if op.records[scratchbranchparttype + b'_skipphaseheads']:
1355 return
1356 return
1356
1357
1357 return orig(op, part)
1358 return orig(op, part)
1358
1359
1359
1360
1360 def _asyncsavemetadata(root, nodes):
1361 def _asyncsavemetadata(root, nodes):
1361 """starts a separate process that fills metadata for the nodes
1362 """starts a separate process that fills metadata for the nodes
1362
1363
1363 This function creates a separate process and doesn't wait for it's
1364 This function creates a separate process and doesn't wait for it's
1364 completion. This was done to avoid slowing down pushes
1365 completion. This was done to avoid slowing down pushes
1365 """
1366 """
1366
1367
1367 maxnodes = 50
1368 maxnodes = 50
1368 if len(nodes) > maxnodes:
1369 if len(nodes) > maxnodes:
1369 return
1370 return
1370 nodesargs = []
1371 nodesargs = []
1371 for node in nodes:
1372 for node in nodes:
1372 nodesargs.append(b'--node')
1373 nodesargs.append(b'--node')
1373 nodesargs.append(node)
1374 nodesargs.append(node)
1374 with open(os.devnull, b'w+b') as devnull:
1375 with open(os.devnull, b'w+b') as devnull:
1375 cmdline = [
1376 cmdline = [
1376 util.hgexecutable(),
1377 util.hgexecutable(),
1377 b'debugfillinfinitepushmetadata',
1378 b'debugfillinfinitepushmetadata',
1378 b'-R',
1379 b'-R',
1379 root,
1380 root,
1380 ] + nodesargs
1381 ] + nodesargs
1381 # Process will run in background. We don't care about the return code
1382 # Process will run in background. We don't care about the return code
1382 subprocess.Popen(
1383 subprocess.Popen(
1383 pycompat.rapply(procutil.tonativestr, cmdline),
1384 pycompat.rapply(procutil.tonativestr, cmdline),
1384 close_fds=True,
1385 close_fds=True,
1385 shell=False,
1386 shell=False,
1386 stdin=devnull,
1387 stdin=devnull,
1387 stdout=devnull,
1388 stdout=devnull,
1388 stderr=devnull,
1389 stderr=devnull,
1389 )
1390 )
@@ -1,675 +1,676 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''High-level command function for lfconvert, plus the cmdtable.'''
9 '''High-level command function for lfconvert, plus the cmdtable.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import errno
12 import errno
13 import os
13 import os
14 import shutil
14 import shutil
15
15
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import (
17 from mercurial.node import (
18 bin,
18 bin,
19 hex,
19 hex,
20 )
20 )
21
21
22 from mercurial import (
22 from mercurial import (
23 cmdutil,
23 cmdutil,
24 context,
24 context,
25 error,
25 error,
26 exthelper,
26 exthelper,
27 hg,
27 hg,
28 lock,
28 lock,
29 logcmdutil,
29 match as matchmod,
30 match as matchmod,
30 pycompat,
31 pycompat,
31 scmutil,
32 scmutil,
32 util,
33 util,
33 )
34 )
34 from mercurial.utils import hashutil
35 from mercurial.utils import hashutil
35
36
36 from ..convert import (
37 from ..convert import (
37 convcmd,
38 convcmd,
38 filemap,
39 filemap,
39 )
40 )
40
41
41 from . import lfutil, storefactory
42 from . import lfutil, storefactory
42
43
43 release = lock.release
44 release = lock.release
44
45
45 # -- Commands ----------------------------------------------------------
46 # -- Commands ----------------------------------------------------------
46
47
47 eh = exthelper.exthelper()
48 eh = exthelper.exthelper()
48
49
49
50
50 @eh.command(
51 @eh.command(
51 b'lfconvert',
52 b'lfconvert',
52 [
53 [
53 (
54 (
54 b's',
55 b's',
55 b'size',
56 b'size',
56 b'',
57 b'',
57 _(b'minimum size (MB) for files to be converted as largefiles'),
58 _(b'minimum size (MB) for files to be converted as largefiles'),
58 b'SIZE',
59 b'SIZE',
59 ),
60 ),
60 (
61 (
61 b'',
62 b'',
62 b'to-normal',
63 b'to-normal',
63 False,
64 False,
64 _(b'convert from a largefiles repo to a normal repo'),
65 _(b'convert from a largefiles repo to a normal repo'),
65 ),
66 ),
66 ],
67 ],
67 _(b'hg lfconvert SOURCE DEST [FILE ...]'),
68 _(b'hg lfconvert SOURCE DEST [FILE ...]'),
68 norepo=True,
69 norepo=True,
69 inferrepo=True,
70 inferrepo=True,
70 )
71 )
71 def lfconvert(ui, src, dest, *pats, **opts):
72 def lfconvert(ui, src, dest, *pats, **opts):
72 """convert a normal repository to a largefiles repository
73 """convert a normal repository to a largefiles repository
73
74
74 Convert repository SOURCE to a new repository DEST, identical to
75 Convert repository SOURCE to a new repository DEST, identical to
75 SOURCE except that certain files will be converted as largefiles:
76 SOURCE except that certain files will be converted as largefiles:
76 specifically, any file that matches any PATTERN *or* whose size is
77 specifically, any file that matches any PATTERN *or* whose size is
77 above the minimum size threshold is converted as a largefile. The
78 above the minimum size threshold is converted as a largefile. The
78 size used to determine whether or not to track a file as a
79 size used to determine whether or not to track a file as a
79 largefile is the size of the first version of the file. The
80 largefile is the size of the first version of the file. The
80 minimum size can be specified either with --size or in
81 minimum size can be specified either with --size or in
81 configuration as ``largefiles.size``.
82 configuration as ``largefiles.size``.
82
83
83 After running this command you will need to make sure that
84 After running this command you will need to make sure that
84 largefiles is enabled anywhere you intend to push the new
85 largefiles is enabled anywhere you intend to push the new
85 repository.
86 repository.
86
87
87 Use --to-normal to convert largefiles back to normal files; after
88 Use --to-normal to convert largefiles back to normal files; after
88 this, the DEST repository can be used without largefiles at all."""
89 this, the DEST repository can be used without largefiles at all."""
89
90
90 opts = pycompat.byteskwargs(opts)
91 opts = pycompat.byteskwargs(opts)
91 if opts[b'to_normal']:
92 if opts[b'to_normal']:
92 tolfile = False
93 tolfile = False
93 else:
94 else:
94 tolfile = True
95 tolfile = True
95 size = lfutil.getminsize(ui, True, opts.get(b'size'), default=None)
96 size = lfutil.getminsize(ui, True, opts.get(b'size'), default=None)
96
97
97 if not hg.islocal(src):
98 if not hg.islocal(src):
98 raise error.Abort(_(b'%s is not a local Mercurial repo') % src)
99 raise error.Abort(_(b'%s is not a local Mercurial repo') % src)
99 if not hg.islocal(dest):
100 if not hg.islocal(dest):
100 raise error.Abort(_(b'%s is not a local Mercurial repo') % dest)
101 raise error.Abort(_(b'%s is not a local Mercurial repo') % dest)
101
102
102 rsrc = hg.repository(ui, src)
103 rsrc = hg.repository(ui, src)
103 ui.status(_(b'initializing destination %s\n') % dest)
104 ui.status(_(b'initializing destination %s\n') % dest)
104 rdst = hg.repository(ui, dest, create=True)
105 rdst = hg.repository(ui, dest, create=True)
105
106
106 success = False
107 success = False
107 dstwlock = dstlock = None
108 dstwlock = dstlock = None
108 try:
109 try:
109 # Get a list of all changesets in the source. The easy way to do this
110 # Get a list of all changesets in the source. The easy way to do this
110 # is to simply walk the changelog, using changelog.nodesbetween().
111 # is to simply walk the changelog, using changelog.nodesbetween().
111 # Take a look at mercurial/revlog.py:639 for more details.
112 # Take a look at mercurial/revlog.py:639 for more details.
112 # Use a generator instead of a list to decrease memory usage
113 # Use a generator instead of a list to decrease memory usage
113 ctxs = (
114 ctxs = (
114 rsrc[ctx]
115 rsrc[ctx]
115 for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0]
116 for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0]
116 )
117 )
117 revmap = {rsrc.nullid: rdst.nullid}
118 revmap = {rsrc.nullid: rdst.nullid}
118 if tolfile:
119 if tolfile:
119 # Lock destination to prevent modification while it is converted to.
120 # Lock destination to prevent modification while it is converted to.
120 # Don't need to lock src because we are just reading from its
121 # Don't need to lock src because we are just reading from its
121 # history which can't change.
122 # history which can't change.
122 dstwlock = rdst.wlock()
123 dstwlock = rdst.wlock()
123 dstlock = rdst.lock()
124 dstlock = rdst.lock()
124
125
125 lfiles = set()
126 lfiles = set()
126 normalfiles = set()
127 normalfiles = set()
127 if not pats:
128 if not pats:
128 pats = ui.configlist(lfutil.longname, b'patterns')
129 pats = ui.configlist(lfutil.longname, b'patterns')
129 if pats:
130 if pats:
130 matcher = matchmod.match(rsrc.root, b'', list(pats))
131 matcher = matchmod.match(rsrc.root, b'', list(pats))
131 else:
132 else:
132 matcher = None
133 matcher = None
133
134
134 lfiletohash = {}
135 lfiletohash = {}
135 with ui.makeprogress(
136 with ui.makeprogress(
136 _(b'converting revisions'),
137 _(b'converting revisions'),
137 unit=_(b'revisions'),
138 unit=_(b'revisions'),
138 total=rsrc[b'tip'].rev(),
139 total=rsrc[b'tip'].rev(),
139 ) as progress:
140 ) as progress:
140 for ctx in ctxs:
141 for ctx in ctxs:
141 progress.update(ctx.rev())
142 progress.update(ctx.rev())
142 _lfconvert_addchangeset(
143 _lfconvert_addchangeset(
143 rsrc,
144 rsrc,
144 rdst,
145 rdst,
145 ctx,
146 ctx,
146 revmap,
147 revmap,
147 lfiles,
148 lfiles,
148 normalfiles,
149 normalfiles,
149 matcher,
150 matcher,
150 size,
151 size,
151 lfiletohash,
152 lfiletohash,
152 )
153 )
153
154
154 if rdst.wvfs.exists(lfutil.shortname):
155 if rdst.wvfs.exists(lfutil.shortname):
155 rdst.wvfs.rmtree(lfutil.shortname)
156 rdst.wvfs.rmtree(lfutil.shortname)
156
157
157 for f in lfiletohash.keys():
158 for f in lfiletohash.keys():
158 if rdst.wvfs.isfile(f):
159 if rdst.wvfs.isfile(f):
159 rdst.wvfs.unlink(f)
160 rdst.wvfs.unlink(f)
160 try:
161 try:
161 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
162 rdst.wvfs.removedirs(rdst.wvfs.dirname(f))
162 except OSError:
163 except OSError:
163 pass
164 pass
164
165
165 # If there were any files converted to largefiles, add largefiles
166 # If there were any files converted to largefiles, add largefiles
166 # to the destination repository's requirements.
167 # to the destination repository's requirements.
167 if lfiles:
168 if lfiles:
168 rdst.requirements.add(b'largefiles')
169 rdst.requirements.add(b'largefiles')
169 scmutil.writereporequirements(rdst)
170 scmutil.writereporequirements(rdst)
170 else:
171 else:
171
172
172 class lfsource(filemap.filemap_source):
173 class lfsource(filemap.filemap_source):
173 def __init__(self, ui, source):
174 def __init__(self, ui, source):
174 super(lfsource, self).__init__(ui, source, None)
175 super(lfsource, self).__init__(ui, source, None)
175 self.filemapper.rename[lfutil.shortname] = b'.'
176 self.filemapper.rename[lfutil.shortname] = b'.'
176
177
177 def getfile(self, name, rev):
178 def getfile(self, name, rev):
178 realname, realrev = rev
179 realname, realrev = rev
179 f = super(lfsource, self).getfile(name, rev)
180 f = super(lfsource, self).getfile(name, rev)
180
181
181 if (
182 if (
182 not realname.startswith(lfutil.shortnameslash)
183 not realname.startswith(lfutil.shortnameslash)
183 or f[0] is None
184 or f[0] is None
184 ):
185 ):
185 return f
186 return f
186
187
187 # Substitute in the largefile data for the hash
188 # Substitute in the largefile data for the hash
188 hash = f[0].strip()
189 hash = f[0].strip()
189 path = lfutil.findfile(rsrc, hash)
190 path = lfutil.findfile(rsrc, hash)
190
191
191 if path is None:
192 if path is None:
192 raise error.Abort(
193 raise error.Abort(
193 _(b"missing largefile for '%s' in %s")
194 _(b"missing largefile for '%s' in %s")
194 % (realname, realrev)
195 % (realname, realrev)
195 )
196 )
196 return util.readfile(path), f[1]
197 return util.readfile(path), f[1]
197
198
198 class converter(convcmd.converter):
199 class converter(convcmd.converter):
199 def __init__(self, ui, source, dest, revmapfile, opts):
200 def __init__(self, ui, source, dest, revmapfile, opts):
200 src = lfsource(ui, source)
201 src = lfsource(ui, source)
201
202
202 super(converter, self).__init__(
203 super(converter, self).__init__(
203 ui, src, dest, revmapfile, opts
204 ui, src, dest, revmapfile, opts
204 )
205 )
205
206
206 found, missing = downloadlfiles(ui, rsrc)
207 found, missing = downloadlfiles(ui, rsrc)
207 if missing != 0:
208 if missing != 0:
208 raise error.Abort(_(b"all largefiles must be present locally"))
209 raise error.Abort(_(b"all largefiles must be present locally"))
209
210
210 orig = convcmd.converter
211 orig = convcmd.converter
211 convcmd.converter = converter
212 convcmd.converter = converter
212
213
213 try:
214 try:
214 convcmd.convert(
215 convcmd.convert(
215 ui, src, dest, source_type=b'hg', dest_type=b'hg'
216 ui, src, dest, source_type=b'hg', dest_type=b'hg'
216 )
217 )
217 finally:
218 finally:
218 convcmd.converter = orig
219 convcmd.converter = orig
219 success = True
220 success = True
220 finally:
221 finally:
221 if tolfile:
222 if tolfile:
222 rdst.dirstate.clear()
223 rdst.dirstate.clear()
223 release(dstlock, dstwlock)
224 release(dstlock, dstwlock)
224 if not success:
225 if not success:
225 # we failed, remove the new directory
226 # we failed, remove the new directory
226 shutil.rmtree(rdst.root)
227 shutil.rmtree(rdst.root)
227
228
228
229
229 def _lfconvert_addchangeset(
230 def _lfconvert_addchangeset(
230 rsrc, rdst, ctx, revmap, lfiles, normalfiles, matcher, size, lfiletohash
231 rsrc, rdst, ctx, revmap, lfiles, normalfiles, matcher, size, lfiletohash
231 ):
232 ):
232 # Convert src parents to dst parents
233 # Convert src parents to dst parents
233 parents = _convertparents(ctx, revmap)
234 parents = _convertparents(ctx, revmap)
234
235
235 # Generate list of changed files
236 # Generate list of changed files
236 files = _getchangedfiles(ctx, parents)
237 files = _getchangedfiles(ctx, parents)
237
238
238 dstfiles = []
239 dstfiles = []
239 for f in files:
240 for f in files:
240 if f not in lfiles and f not in normalfiles:
241 if f not in lfiles and f not in normalfiles:
241 islfile = _islfile(f, ctx, matcher, size)
242 islfile = _islfile(f, ctx, matcher, size)
242 # If this file was renamed or copied then copy
243 # If this file was renamed or copied then copy
243 # the largefile-ness of its predecessor
244 # the largefile-ness of its predecessor
244 if f in ctx.manifest():
245 if f in ctx.manifest():
245 fctx = ctx.filectx(f)
246 fctx = ctx.filectx(f)
246 renamed = fctx.copysource()
247 renamed = fctx.copysource()
247 if renamed is None:
248 if renamed is None:
248 # the code below assumes renamed to be a boolean or a list
249 # the code below assumes renamed to be a boolean or a list
249 # and won't quite work with the value None
250 # and won't quite work with the value None
250 renamed = False
251 renamed = False
251 renamedlfile = renamed and renamed in lfiles
252 renamedlfile = renamed and renamed in lfiles
252 islfile |= renamedlfile
253 islfile |= renamedlfile
253 if b'l' in fctx.flags():
254 if b'l' in fctx.flags():
254 if renamedlfile:
255 if renamedlfile:
255 raise error.Abort(
256 raise error.Abort(
256 _(b'renamed/copied largefile %s becomes symlink')
257 _(b'renamed/copied largefile %s becomes symlink')
257 % f
258 % f
258 )
259 )
259 islfile = False
260 islfile = False
260 if islfile:
261 if islfile:
261 lfiles.add(f)
262 lfiles.add(f)
262 else:
263 else:
263 normalfiles.add(f)
264 normalfiles.add(f)
264
265
265 if f in lfiles:
266 if f in lfiles:
266 fstandin = lfutil.standin(f)
267 fstandin = lfutil.standin(f)
267 dstfiles.append(fstandin)
268 dstfiles.append(fstandin)
268 # largefile in manifest if it has not been removed/renamed
269 # largefile in manifest if it has not been removed/renamed
269 if f in ctx.manifest():
270 if f in ctx.manifest():
270 fctx = ctx.filectx(f)
271 fctx = ctx.filectx(f)
271 if b'l' in fctx.flags():
272 if b'l' in fctx.flags():
272 renamed = fctx.copysource()
273 renamed = fctx.copysource()
273 if renamed and renamed in lfiles:
274 if renamed and renamed in lfiles:
274 raise error.Abort(
275 raise error.Abort(
275 _(b'largefile %s becomes symlink') % f
276 _(b'largefile %s becomes symlink') % f
276 )
277 )
277
278
278 # largefile was modified, update standins
279 # largefile was modified, update standins
279 m = hashutil.sha1(b'')
280 m = hashutil.sha1(b'')
280 m.update(ctx[f].data())
281 m.update(ctx[f].data())
281 hash = hex(m.digest())
282 hash = hex(m.digest())
282 if f not in lfiletohash or lfiletohash[f] != hash:
283 if f not in lfiletohash or lfiletohash[f] != hash:
283 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
284 rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
284 executable = b'x' in ctx[f].flags()
285 executable = b'x' in ctx[f].flags()
285 lfutil.writestandin(rdst, fstandin, hash, executable)
286 lfutil.writestandin(rdst, fstandin, hash, executable)
286 lfiletohash[f] = hash
287 lfiletohash[f] = hash
287 else:
288 else:
288 # normal file
289 # normal file
289 dstfiles.append(f)
290 dstfiles.append(f)
290
291
291 def getfilectx(repo, memctx, f):
292 def getfilectx(repo, memctx, f):
292 srcfname = lfutil.splitstandin(f)
293 srcfname = lfutil.splitstandin(f)
293 if srcfname is not None:
294 if srcfname is not None:
294 # if the file isn't in the manifest then it was removed
295 # if the file isn't in the manifest then it was removed
295 # or renamed, return None to indicate this
296 # or renamed, return None to indicate this
296 try:
297 try:
297 fctx = ctx.filectx(srcfname)
298 fctx = ctx.filectx(srcfname)
298 except error.LookupError:
299 except error.LookupError:
299 return None
300 return None
300 renamed = fctx.copysource()
301 renamed = fctx.copysource()
301 if renamed:
302 if renamed:
302 # standin is always a largefile because largefile-ness
303 # standin is always a largefile because largefile-ness
303 # doesn't change after rename or copy
304 # doesn't change after rename or copy
304 renamed = lfutil.standin(renamed)
305 renamed = lfutil.standin(renamed)
305
306
306 return context.memfilectx(
307 return context.memfilectx(
307 repo,
308 repo,
308 memctx,
309 memctx,
309 f,
310 f,
310 lfiletohash[srcfname] + b'\n',
311 lfiletohash[srcfname] + b'\n',
311 b'l' in fctx.flags(),
312 b'l' in fctx.flags(),
312 b'x' in fctx.flags(),
313 b'x' in fctx.flags(),
313 renamed,
314 renamed,
314 )
315 )
315 else:
316 else:
316 return _getnormalcontext(repo, ctx, f, revmap)
317 return _getnormalcontext(repo, ctx, f, revmap)
317
318
318 # Commit
319 # Commit
319 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
320 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
320
321
321
322
322 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
323 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
323 mctx = context.memctx(
324 mctx = context.memctx(
324 rdst,
325 rdst,
325 parents,
326 parents,
326 ctx.description(),
327 ctx.description(),
327 dstfiles,
328 dstfiles,
328 getfilectx,
329 getfilectx,
329 ctx.user(),
330 ctx.user(),
330 ctx.date(),
331 ctx.date(),
331 ctx.extra(),
332 ctx.extra(),
332 )
333 )
333 ret = rdst.commitctx(mctx)
334 ret = rdst.commitctx(mctx)
334 lfutil.copyalltostore(rdst, ret)
335 lfutil.copyalltostore(rdst, ret)
335 rdst.setparents(ret)
336 rdst.setparents(ret)
336 revmap[ctx.node()] = rdst.changelog.tip()
337 revmap[ctx.node()] = rdst.changelog.tip()
337
338
338
339
339 # Generate list of changed files
340 # Generate list of changed files
340 def _getchangedfiles(ctx, parents):
341 def _getchangedfiles(ctx, parents):
341 files = set(ctx.files())
342 files = set(ctx.files())
342 if ctx.repo().nullid not in parents:
343 if ctx.repo().nullid not in parents:
343 mc = ctx.manifest()
344 mc = ctx.manifest()
344 for pctx in ctx.parents():
345 for pctx in ctx.parents():
345 for fn in pctx.manifest().diff(mc):
346 for fn in pctx.manifest().diff(mc):
346 files.add(fn)
347 files.add(fn)
347 return files
348 return files
348
349
349
350
350 # Convert src parents to dst parents
351 # Convert src parents to dst parents
351 def _convertparents(ctx, revmap):
352 def _convertparents(ctx, revmap):
352 parents = []
353 parents = []
353 for p in ctx.parents():
354 for p in ctx.parents():
354 parents.append(revmap[p.node()])
355 parents.append(revmap[p.node()])
355 while len(parents) < 2:
356 while len(parents) < 2:
356 parents.append(ctx.repo().nullid)
357 parents.append(ctx.repo().nullid)
357 return parents
358 return parents
358
359
359
360
360 # Get memfilectx for a normal file
361 # Get memfilectx for a normal file
361 def _getnormalcontext(repo, ctx, f, revmap):
362 def _getnormalcontext(repo, ctx, f, revmap):
362 try:
363 try:
363 fctx = ctx.filectx(f)
364 fctx = ctx.filectx(f)
364 except error.LookupError:
365 except error.LookupError:
365 return None
366 return None
366 renamed = fctx.copysource()
367 renamed = fctx.copysource()
367
368
368 data = fctx.data()
369 data = fctx.data()
369 if f == b'.hgtags':
370 if f == b'.hgtags':
370 data = _converttags(repo.ui, revmap, data)
371 data = _converttags(repo.ui, revmap, data)
371 return context.memfilectx(
372 return context.memfilectx(
372 repo, ctx, f, data, b'l' in fctx.flags(), b'x' in fctx.flags(), renamed
373 repo, ctx, f, data, b'l' in fctx.flags(), b'x' in fctx.flags(), renamed
373 )
374 )
374
375
375
376
376 # Remap tag data using a revision map
377 # Remap tag data using a revision map
377 def _converttags(ui, revmap, data):
378 def _converttags(ui, revmap, data):
378 newdata = []
379 newdata = []
379 for line in data.splitlines():
380 for line in data.splitlines():
380 try:
381 try:
381 id, name = line.split(b' ', 1)
382 id, name = line.split(b' ', 1)
382 except ValueError:
383 except ValueError:
383 ui.warn(_(b'skipping incorrectly formatted tag %s\n') % line)
384 ui.warn(_(b'skipping incorrectly formatted tag %s\n') % line)
384 continue
385 continue
385 try:
386 try:
386 newid = bin(id)
387 newid = bin(id)
387 except TypeError:
388 except TypeError:
388 ui.warn(_(b'skipping incorrectly formatted id %s\n') % id)
389 ui.warn(_(b'skipping incorrectly formatted id %s\n') % id)
389 continue
390 continue
390 try:
391 try:
391 newdata.append(b'%s %s\n' % (hex(revmap[newid]), name))
392 newdata.append(b'%s %s\n' % (hex(revmap[newid]), name))
392 except KeyError:
393 except KeyError:
393 ui.warn(_(b'no mapping for id %s\n') % id)
394 ui.warn(_(b'no mapping for id %s\n') % id)
394 continue
395 continue
395 return b''.join(newdata)
396 return b''.join(newdata)
396
397
397
398
398 def _islfile(file, ctx, matcher, size):
399 def _islfile(file, ctx, matcher, size):
399 """Return true if file should be considered a largefile, i.e.
400 """Return true if file should be considered a largefile, i.e.
400 matcher matches it or it is larger than size."""
401 matcher matches it or it is larger than size."""
401 # never store special .hg* files as largefiles
402 # never store special .hg* files as largefiles
402 if file == b'.hgtags' or file == b'.hgignore' or file == b'.hgsigs':
403 if file == b'.hgtags' or file == b'.hgignore' or file == b'.hgsigs':
403 return False
404 return False
404 if matcher and matcher(file):
405 if matcher and matcher(file):
405 return True
406 return True
406 try:
407 try:
407 return ctx.filectx(file).size() >= size * 1024 * 1024
408 return ctx.filectx(file).size() >= size * 1024 * 1024
408 except error.LookupError:
409 except error.LookupError:
409 return False
410 return False
410
411
411
412
412 def uploadlfiles(ui, rsrc, rdst, files):
413 def uploadlfiles(ui, rsrc, rdst, files):
413 '''upload largefiles to the central store'''
414 '''upload largefiles to the central store'''
414
415
415 if not files:
416 if not files:
416 return
417 return
417
418
418 store = storefactory.openstore(rsrc, rdst, put=True)
419 store = storefactory.openstore(rsrc, rdst, put=True)
419
420
420 at = 0
421 at = 0
421 ui.debug(b"sending statlfile command for %d largefiles\n" % len(files))
422 ui.debug(b"sending statlfile command for %d largefiles\n" % len(files))
422 retval = store.exists(files)
423 retval = store.exists(files)
423 files = [h for h in files if not retval[h]]
424 files = [h for h in files if not retval[h]]
424 ui.debug(b"%d largefiles need to be uploaded\n" % len(files))
425 ui.debug(b"%d largefiles need to be uploaded\n" % len(files))
425
426
426 with ui.makeprogress(
427 with ui.makeprogress(
427 _(b'uploading largefiles'), unit=_(b'files'), total=len(files)
428 _(b'uploading largefiles'), unit=_(b'files'), total=len(files)
428 ) as progress:
429 ) as progress:
429 for hash in files:
430 for hash in files:
430 progress.update(at)
431 progress.update(at)
431 source = lfutil.findfile(rsrc, hash)
432 source = lfutil.findfile(rsrc, hash)
432 if not source:
433 if not source:
433 raise error.Abort(
434 raise error.Abort(
434 _(
435 _(
435 b'largefile %s missing from store'
436 b'largefile %s missing from store'
436 b' (needs to be uploaded)'
437 b' (needs to be uploaded)'
437 )
438 )
438 % hash
439 % hash
439 )
440 )
440 # XXX check for errors here
441 # XXX check for errors here
441 store.put(source, hash)
442 store.put(source, hash)
442 at += 1
443 at += 1
443
444
444
445
445 def verifylfiles(ui, repo, all=False, contents=False):
446 def verifylfiles(ui, repo, all=False, contents=False):
446 """Verify that every largefile revision in the current changeset
447 """Verify that every largefile revision in the current changeset
447 exists in the central store. With --contents, also verify that
448 exists in the central store. With --contents, also verify that
448 the contents of each local largefile file revision are correct (SHA-1 hash
449 the contents of each local largefile file revision are correct (SHA-1 hash
449 matches the revision ID). With --all, check every changeset in
450 matches the revision ID). With --all, check every changeset in
450 this repository."""
451 this repository."""
451 if all:
452 if all:
452 revs = repo.revs(b'all()')
453 revs = repo.revs(b'all()')
453 else:
454 else:
454 revs = [b'.']
455 revs = [b'.']
455
456
456 store = storefactory.openstore(repo)
457 store = storefactory.openstore(repo)
457 return store.verify(revs, contents=contents)
458 return store.verify(revs, contents=contents)
458
459
459
460
460 def cachelfiles(ui, repo, node, filelist=None):
461 def cachelfiles(ui, repo, node, filelist=None):
461 """cachelfiles ensures that all largefiles needed by the specified revision
462 """cachelfiles ensures that all largefiles needed by the specified revision
462 are present in the repository's largefile cache.
463 are present in the repository's largefile cache.
463
464
464 returns a tuple (cached, missing). cached is the list of files downloaded
465 returns a tuple (cached, missing). cached is the list of files downloaded
465 by this operation; missing is the list of files that were needed but could
466 by this operation; missing is the list of files that were needed but could
466 not be found."""
467 not be found."""
467 lfiles = lfutil.listlfiles(repo, node)
468 lfiles = lfutil.listlfiles(repo, node)
468 if filelist:
469 if filelist:
469 lfiles = set(lfiles) & set(filelist)
470 lfiles = set(lfiles) & set(filelist)
470 toget = []
471 toget = []
471
472
472 ctx = repo[node]
473 ctx = repo[node]
473 for lfile in lfiles:
474 for lfile in lfiles:
474 try:
475 try:
475 expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
476 expectedhash = lfutil.readasstandin(ctx[lfutil.standin(lfile)])
476 except IOError as err:
477 except IOError as err:
477 if err.errno == errno.ENOENT:
478 if err.errno == errno.ENOENT:
478 continue # node must be None and standin wasn't found in wctx
479 continue # node must be None and standin wasn't found in wctx
479 raise
480 raise
480 if not lfutil.findfile(repo, expectedhash):
481 if not lfutil.findfile(repo, expectedhash):
481 toget.append((lfile, expectedhash))
482 toget.append((lfile, expectedhash))
482
483
483 if toget:
484 if toget:
484 store = storefactory.openstore(repo)
485 store = storefactory.openstore(repo)
485 ret = store.get(toget)
486 ret = store.get(toget)
486 return ret
487 return ret
487
488
488 return ([], [])
489 return ([], [])
489
490
490
491
491 def downloadlfiles(ui, repo):
492 def downloadlfiles(ui, repo):
492 tonode = repo.changelog.node
493 tonode = repo.changelog.node
493 totalsuccess = 0
494 totalsuccess = 0
494 totalmissing = 0
495 totalmissing = 0
495 for rev in repo.revs(b'file(%s)', b'path:' + lfutil.shortname):
496 for rev in repo.revs(b'file(%s)', b'path:' + lfutil.shortname):
496 success, missing = cachelfiles(ui, repo, tonode(rev))
497 success, missing = cachelfiles(ui, repo, tonode(rev))
497 totalsuccess += len(success)
498 totalsuccess += len(success)
498 totalmissing += len(missing)
499 totalmissing += len(missing)
499 ui.status(_(b"%d additional largefiles cached\n") % totalsuccess)
500 ui.status(_(b"%d additional largefiles cached\n") % totalsuccess)
500 if totalmissing > 0:
501 if totalmissing > 0:
501 ui.status(_(b"%d largefiles failed to download\n") % totalmissing)
502 ui.status(_(b"%d largefiles failed to download\n") % totalmissing)
502 return totalsuccess, totalmissing
503 return totalsuccess, totalmissing
503
504
504
505
505 def updatelfiles(
506 def updatelfiles(
506 ui, repo, filelist=None, printmessage=None, normallookup=False
507 ui, repo, filelist=None, printmessage=None, normallookup=False
507 ):
508 ):
508 """Update largefiles according to standins in the working directory
509 """Update largefiles according to standins in the working directory
509
510
510 If ``printmessage`` is other than ``None``, it means "print (or
511 If ``printmessage`` is other than ``None``, it means "print (or
511 ignore, for false) message forcibly".
512 ignore, for false) message forcibly".
512 """
513 """
513 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
514 statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
514 with repo.wlock():
515 with repo.wlock():
515 lfdirstate = lfutil.openlfdirstate(ui, repo)
516 lfdirstate = lfutil.openlfdirstate(ui, repo)
516 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
517 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
517
518
518 if filelist is not None:
519 if filelist is not None:
519 filelist = set(filelist)
520 filelist = set(filelist)
520 lfiles = [f for f in lfiles if f in filelist]
521 lfiles = [f for f in lfiles if f in filelist]
521
522
522 with lfdirstate.parentchange():
523 with lfdirstate.parentchange():
523 update = {}
524 update = {}
524 dropped = set()
525 dropped = set()
525 updated, removed = 0, 0
526 updated, removed = 0, 0
526 wvfs = repo.wvfs
527 wvfs = repo.wvfs
527 wctx = repo[None]
528 wctx = repo[None]
528 for lfile in lfiles:
529 for lfile in lfiles:
529 lfileorig = os.path.relpath(
530 lfileorig = os.path.relpath(
530 scmutil.backuppath(ui, repo, lfile), start=repo.root
531 scmutil.backuppath(ui, repo, lfile), start=repo.root
531 )
532 )
532 standin = lfutil.standin(lfile)
533 standin = lfutil.standin(lfile)
533 standinorig = os.path.relpath(
534 standinorig = os.path.relpath(
534 scmutil.backuppath(ui, repo, standin), start=repo.root
535 scmutil.backuppath(ui, repo, standin), start=repo.root
535 )
536 )
536 if wvfs.exists(standin):
537 if wvfs.exists(standin):
537 if wvfs.exists(standinorig) and wvfs.exists(lfile):
538 if wvfs.exists(standinorig) and wvfs.exists(lfile):
538 shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig))
539 shutil.copyfile(wvfs.join(lfile), wvfs.join(lfileorig))
539 wvfs.unlinkpath(standinorig)
540 wvfs.unlinkpath(standinorig)
540 expecthash = lfutil.readasstandin(wctx[standin])
541 expecthash = lfutil.readasstandin(wctx[standin])
541 if expecthash != b'':
542 if expecthash != b'':
542 if lfile not in wctx: # not switched to normal file
543 if lfile not in wctx: # not switched to normal file
543 if repo.dirstate[standin] != b'?':
544 if repo.dirstate.get_entry(standin).any_tracked:
544 wvfs.unlinkpath(lfile, ignoremissing=True)
545 wvfs.unlinkpath(lfile, ignoremissing=True)
545 else:
546 else:
546 dropped.add(lfile)
547 dropped.add(lfile)
547
548
548 # use normallookup() to allocate an entry in largefiles
549 # use normallookup() to allocate an entry in largefiles
549 # dirstate to prevent lfilesrepo.status() from reporting
550 # dirstate to prevent lfilesrepo.status() from reporting
550 # missing files as removed.
551 # missing files as removed.
551 lfdirstate.update_file(
552 lfdirstate.update_file(
552 lfile,
553 lfile,
553 p1_tracked=True,
554 p1_tracked=True,
554 wc_tracked=True,
555 wc_tracked=True,
555 possibly_dirty=True,
556 possibly_dirty=True,
556 )
557 )
557 update[lfile] = expecthash
558 update[lfile] = expecthash
558 else:
559 else:
559 # Remove lfiles for which the standin is deleted, unless the
560 # Remove lfiles for which the standin is deleted, unless the
560 # lfile is added to the repository again. This happens when a
561 # lfile is added to the repository again. This happens when a
561 # largefile is converted back to a normal file: the standin
562 # largefile is converted back to a normal file: the standin
562 # disappears, but a new (normal) file appears as the lfile.
563 # disappears, but a new (normal) file appears as the lfile.
563 if (
564 if (
564 wvfs.exists(lfile)
565 wvfs.exists(lfile)
565 and repo.dirstate.normalize(lfile) not in wctx
566 and repo.dirstate.normalize(lfile) not in wctx
566 ):
567 ):
567 wvfs.unlinkpath(lfile)
568 wvfs.unlinkpath(lfile)
568 removed += 1
569 removed += 1
569
570
570 # largefile processing might be slow and be interrupted - be prepared
571 # largefile processing might be slow and be interrupted - be prepared
571 lfdirstate.write()
572 lfdirstate.write(repo.currenttransaction())
572
573
573 if lfiles:
574 if lfiles:
574 lfiles = [f for f in lfiles if f not in dropped]
575 lfiles = [f for f in lfiles if f not in dropped]
575
576
576 for f in dropped:
577 for f in dropped:
577 repo.wvfs.unlinkpath(lfutil.standin(f))
578 repo.wvfs.unlinkpath(lfutil.standin(f))
578 # This needs to happen for dropped files, otherwise they stay in
579 # This needs to happen for dropped files, otherwise they stay in
579 # the M state.
580 # the M state.
580 lfdirstate._drop(f)
581 lfdirstate._map.reset_state(f)
581
582
582 statuswriter(_(b'getting changed largefiles\n'))
583 statuswriter(_(b'getting changed largefiles\n'))
583 cachelfiles(ui, repo, None, lfiles)
584 cachelfiles(ui, repo, None, lfiles)
584
585
585 with lfdirstate.parentchange():
586 with lfdirstate.parentchange():
586 for lfile in lfiles:
587 for lfile in lfiles:
587 update1 = 0
588 update1 = 0
588
589
589 expecthash = update.get(lfile)
590 expecthash = update.get(lfile)
590 if expecthash:
591 if expecthash:
591 if not lfutil.copyfromcache(repo, expecthash, lfile):
592 if not lfutil.copyfromcache(repo, expecthash, lfile):
592 # failed ... but already removed and set to normallookup
593 # failed ... but already removed and set to normallookup
593 continue
594 continue
594 # Synchronize largefile dirstate to the last modified
595 # Synchronize largefile dirstate to the last modified
595 # time of the file
596 # time of the file
596 lfdirstate.update_file(
597 lfdirstate.update_file(
597 lfile, p1_tracked=True, wc_tracked=True
598 lfile, p1_tracked=True, wc_tracked=True
598 )
599 )
599 update1 = 1
600 update1 = 1
600
601
601 # copy the exec mode of largefile standin from the repository's
602 # copy the exec mode of largefile standin from the repository's
602 # dirstate to its state in the lfdirstate.
603 # dirstate to its state in the lfdirstate.
603 standin = lfutil.standin(lfile)
604 standin = lfutil.standin(lfile)
604 if wvfs.exists(standin):
605 if wvfs.exists(standin):
605 # exec is decided by the users permissions using mask 0o100
606 # exec is decided by the users permissions using mask 0o100
606 standinexec = wvfs.stat(standin).st_mode & 0o100
607 standinexec = wvfs.stat(standin).st_mode & 0o100
607 st = wvfs.stat(lfile)
608 st = wvfs.stat(lfile)
608 mode = st.st_mode
609 mode = st.st_mode
609 if standinexec != mode & 0o100:
610 if standinexec != mode & 0o100:
610 # first remove all X bits, then shift all R bits to X
611 # first remove all X bits, then shift all R bits to X
611 mode &= ~0o111
612 mode &= ~0o111
612 if standinexec:
613 if standinexec:
613 mode |= (mode >> 2) & 0o111 & ~util.umask
614 mode |= (mode >> 2) & 0o111 & ~util.umask
614 wvfs.chmod(lfile, mode)
615 wvfs.chmod(lfile, mode)
615 update1 = 1
616 update1 = 1
616
617
617 updated += update1
618 updated += update1
618
619
619 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
620 lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
620
621
621 lfdirstate.write()
622 lfdirstate.write(repo.currenttransaction())
622 if lfiles:
623 if lfiles:
623 statuswriter(
624 statuswriter(
624 _(b'%d largefiles updated, %d removed\n') % (updated, removed)
625 _(b'%d largefiles updated, %d removed\n') % (updated, removed)
625 )
626 )
626
627
627
628
628 @eh.command(
629 @eh.command(
629 b'lfpull',
630 b'lfpull',
630 [(b'r', b'rev', [], _(b'pull largefiles for these revisions'))]
631 [(b'r', b'rev', [], _(b'pull largefiles for these revisions'))]
631 + cmdutil.remoteopts,
632 + cmdutil.remoteopts,
632 _(b'-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'),
633 _(b'-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'),
633 )
634 )
634 def lfpull(ui, repo, source=b"default", **opts):
635 def lfpull(ui, repo, source=b"default", **opts):
635 """pull largefiles for the specified revisions from the specified source
636 """pull largefiles for the specified revisions from the specified source
636
637
637 Pull largefiles that are referenced from local changesets but missing
638 Pull largefiles that are referenced from local changesets but missing
638 locally, pulling from a remote repository to the local cache.
639 locally, pulling from a remote repository to the local cache.
639
640
640 If SOURCE is omitted, the 'default' path will be used.
641 If SOURCE is omitted, the 'default' path will be used.
641 See :hg:`help urls` for more information.
642 See :hg:`help urls` for more information.
642
643
643 .. container:: verbose
644 .. container:: verbose
644
645
645 Some examples:
646 Some examples:
646
647
647 - pull largefiles for all branch heads::
648 - pull largefiles for all branch heads::
648
649
649 hg lfpull -r "head() and not closed()"
650 hg lfpull -r "head() and not closed()"
650
651
651 - pull largefiles on the default branch::
652 - pull largefiles on the default branch::
652
653
653 hg lfpull -r "branch(default)"
654 hg lfpull -r "branch(default)"
654 """
655 """
655 repo.lfpullsource = source
656 repo.lfpullsource = source
656
657
657 revs = opts.get('rev', [])
658 revs = opts.get('rev', [])
658 if not revs:
659 if not revs:
659 raise error.Abort(_(b'no revisions specified'))
660 raise error.Abort(_(b'no revisions specified'))
660 revs = scmutil.revrange(repo, revs)
661 revs = logcmdutil.revrange(repo, revs)
661
662
662 numcached = 0
663 numcached = 0
663 for rev in revs:
664 for rev in revs:
664 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
665 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
665 (cached, missing) = cachelfiles(ui, repo, rev)
666 (cached, missing) = cachelfiles(ui, repo, rev)
666 numcached += len(cached)
667 numcached += len(cached)
667 ui.status(_(b"%d largefiles cached\n") % numcached)
668 ui.status(_(b"%d largefiles cached\n") % numcached)
668
669
669
670
670 @eh.command(b'debuglfput', [] + cmdutil.remoteopts, _(b'FILE'))
671 @eh.command(b'debuglfput', [] + cmdutil.remoteopts, _(b'FILE'))
671 def debuglfput(ui, repo, filepath, **kwargs):
672 def debuglfput(ui, repo, filepath, **kwargs):
672 hash = lfutil.hashfile(filepath)
673 hash = lfutil.hashfile(filepath)
673 storefactory.openstore(repo).put(filepath, hash)
674 storefactory.openstore(repo).put(filepath, hash)
674 ui.write(b'%s\n' % hash)
675 ui.write(b'%s\n' % hash)
675 return 0
676 return 0
@@ -1,798 +1,790 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import contextlib
12 import contextlib
13 import copy
13 import copy
14 import os
14 import os
15 import stat
15 import stat
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial.node import hex
18 from mercurial.node import hex
19 from mercurial.pycompat import open
19 from mercurial.pycompat import open
20
20
21 from mercurial import (
21 from mercurial import (
22 dirstate,
22 dirstate,
23 encoding,
23 encoding,
24 error,
24 error,
25 httpconnection,
25 httpconnection,
26 match as matchmod,
26 match as matchmod,
27 pycompat,
27 pycompat,
28 requirements,
28 requirements,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 vfs as vfsmod,
32 vfs as vfsmod,
33 )
33 )
34 from mercurial.utils import hashutil
34 from mercurial.utils import hashutil
35
35
36 shortname = b'.hglf'
36 shortname = b'.hglf'
37 shortnameslash = shortname + b'/'
37 shortnameslash = shortname + b'/'
38 longname = b'largefiles'
38 longname = b'largefiles'
39
39
40 # -- Private worker functions ------------------------------------------
40 # -- Private worker functions ------------------------------------------
41
41
42
42
43 @contextlib.contextmanager
43 @contextlib.contextmanager
44 def lfstatus(repo, value=True):
44 def lfstatus(repo, value=True):
45 oldvalue = getattr(repo, 'lfstatus', False)
45 oldvalue = getattr(repo, 'lfstatus', False)
46 repo.lfstatus = value
46 repo.lfstatus = value
47 try:
47 try:
48 yield
48 yield
49 finally:
49 finally:
50 repo.lfstatus = oldvalue
50 repo.lfstatus = oldvalue
51
51
52
52
53 def getminsize(ui, assumelfiles, opt, default=10):
53 def getminsize(ui, assumelfiles, opt, default=10):
54 lfsize = opt
54 lfsize = opt
55 if not lfsize and assumelfiles:
55 if not lfsize and assumelfiles:
56 lfsize = ui.config(longname, b'minsize', default=default)
56 lfsize = ui.config(longname, b'minsize', default=default)
57 if lfsize:
57 if lfsize:
58 try:
58 try:
59 lfsize = float(lfsize)
59 lfsize = float(lfsize)
60 except ValueError:
60 except ValueError:
61 raise error.Abort(
61 raise error.Abort(
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
63 )
63 )
64 if lfsize is None:
64 if lfsize is None:
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
66 return lfsize
66 return lfsize
67
67
68
68
69 def link(src, dest):
69 def link(src, dest):
70 """Try to create hardlink - if that fails, efficiently make a copy."""
70 """Try to create hardlink - if that fails, efficiently make a copy."""
71 util.makedirs(os.path.dirname(dest))
71 util.makedirs(os.path.dirname(dest))
72 try:
72 try:
73 util.oslink(src, dest)
73 util.oslink(src, dest)
74 except OSError:
74 except OSError:
75 # if hardlinks fail, fallback on atomic copy
75 # if hardlinks fail, fallback on atomic copy
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
77 for chunk in util.filechunkiter(srcf):
77 for chunk in util.filechunkiter(srcf):
78 dstf.write(chunk)
78 dstf.write(chunk)
79 os.chmod(dest, os.stat(src).st_mode)
79 os.chmod(dest, os.stat(src).st_mode)
80
80
81
81
82 def usercachepath(ui, hash):
82 def usercachepath(ui, hash):
83 """Return the correct location in the "global" largefiles cache for a file
83 """Return the correct location in the "global" largefiles cache for a file
84 with the given hash.
84 with the given hash.
85 This cache is used for sharing of largefiles across repositories - both
85 This cache is used for sharing of largefiles across repositories - both
86 to preserve download bandwidth and storage space."""
86 to preserve download bandwidth and storage space."""
87 return os.path.join(_usercachedir(ui), hash)
87 return os.path.join(_usercachedir(ui), hash)
88
88
89
89
90 def _usercachedir(ui, name=longname):
90 def _usercachedir(ui, name=longname):
91 '''Return the location of the "global" largefiles cache.'''
91 '''Return the location of the "global" largefiles cache.'''
92 path = ui.configpath(name, b'usercache')
92 path = ui.configpath(name, b'usercache')
93 if path:
93 if path:
94 return path
94 return path
95
95
96 hint = None
96 hint = None
97
97
98 if pycompat.iswindows:
98 if pycompat.iswindows:
99 appdata = encoding.environ.get(
99 appdata = encoding.environ.get(
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
101 )
101 )
102 if appdata:
102 if appdata:
103 return os.path.join(appdata, name)
103 return os.path.join(appdata, name)
104
104
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
106 b"LOCALAPPDATA",
106 b"LOCALAPPDATA",
107 b"APPDATA",
107 b"APPDATA",
108 name,
108 name,
109 )
109 )
110 elif pycompat.isdarwin:
110 elif pycompat.isdarwin:
111 home = encoding.environ.get(b'HOME')
111 home = encoding.environ.get(b'HOME')
112 if home:
112 if home:
113 return os.path.join(home, b'Library', b'Caches', name)
113 return os.path.join(home, b'Library', b'Caches', name)
114
114
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
116 b"HOME",
116 b"HOME",
117 name,
117 name,
118 )
118 )
119 elif pycompat.isposix:
119 elif pycompat.isposix:
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
121 if path:
121 if path:
122 return os.path.join(path, name)
122 return os.path.join(path, name)
123 home = encoding.environ.get(b'HOME')
123 home = encoding.environ.get(b'HOME')
124 if home:
124 if home:
125 return os.path.join(home, b'.cache', name)
125 return os.path.join(home, b'.cache', name)
126
126
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
128 b"XDG_CACHE_HOME",
128 b"XDG_CACHE_HOME",
129 b"HOME",
129 b"HOME",
130 name,
130 name,
131 )
131 )
132 else:
132 else:
133 raise error.Abort(
133 raise error.Abort(
134 _(b'unknown operating system: %s\n') % pycompat.osname
134 _(b'unknown operating system: %s\n') % pycompat.osname
135 )
135 )
136
136
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
138
138
139
139
140 def inusercache(ui, hash):
140 def inusercache(ui, hash):
141 path = usercachepath(ui, hash)
141 path = usercachepath(ui, hash)
142 return os.path.exists(path)
142 return os.path.exists(path)
143
143
144
144
145 def findfile(repo, hash):
145 def findfile(repo, hash):
146 """Return store path of the largefile with the specified hash.
146 """Return store path of the largefile with the specified hash.
147 As a side effect, the file might be linked from user cache.
147 As a side effect, the file might be linked from user cache.
148 Return None if the file can't be found locally."""
148 Return None if the file can't be found locally."""
149 path, exists = findstorepath(repo, hash)
149 path, exists = findstorepath(repo, hash)
150 if exists:
150 if exists:
151 repo.ui.note(_(b'found %s in store\n') % hash)
151 repo.ui.note(_(b'found %s in store\n') % hash)
152 return path
152 return path
153 elif inusercache(repo.ui, hash):
153 elif inusercache(repo.ui, hash):
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
155 path = storepath(repo, hash)
155 path = storepath(repo, hash)
156 link(usercachepath(repo.ui, hash), path)
156 link(usercachepath(repo.ui, hash), path)
157 return path
157 return path
158 return None
158 return None
159
159
160
160
161 class largefilesdirstate(dirstate.dirstate):
161 class largefilesdirstate(dirstate.dirstate):
162 def __getitem__(self, key):
162 def __getitem__(self, key):
163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
164
164
165 def set_tracked(self, f):
165 def set_tracked(self, f):
166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
167
167
168 def set_untracked(self, f):
168 def set_untracked(self, f):
169 return super(largefilesdirstate, self).set_untracked(unixpath(f))
169 return super(largefilesdirstate, self).set_untracked(unixpath(f))
170
170
171 def normal(self, f, parentfiledata=None):
171 def normal(self, f, parentfiledata=None):
172 # not sure if we should pass the `parentfiledata` down or throw it
172 # not sure if we should pass the `parentfiledata` down or throw it
173 # away. So throwing it away to stay on the safe side.
173 # away. So throwing it away to stay on the safe side.
174 return super(largefilesdirstate, self).normal(unixpath(f))
174 return super(largefilesdirstate, self).normal(unixpath(f))
175
175
176 def remove(self, f):
176 def remove(self, f):
177 return super(largefilesdirstate, self).remove(unixpath(f))
177 return super(largefilesdirstate, self).remove(unixpath(f))
178
178
179 def add(self, f):
179 def add(self, f):
180 return super(largefilesdirstate, self).add(unixpath(f))
180 return super(largefilesdirstate, self).add(unixpath(f))
181
181
182 def drop(self, f):
182 def drop(self, f):
183 return super(largefilesdirstate, self).drop(unixpath(f))
183 return super(largefilesdirstate, self).drop(unixpath(f))
184
184
185 def forget(self, f):
185 def forget(self, f):
186 return super(largefilesdirstate, self).forget(unixpath(f))
186 return super(largefilesdirstate, self).forget(unixpath(f))
187
187
188 def normallookup(self, f):
188 def normallookup(self, f):
189 return super(largefilesdirstate, self).normallookup(unixpath(f))
189 return super(largefilesdirstate, self).normallookup(unixpath(f))
190
190
191 def _ignore(self, f):
191 def _ignore(self, f):
192 return False
192 return False
193
193
194 def write(self, tr=False):
194 def write(self, tr):
195 # (1) disable PENDING mode always
195 # (1) disable PENDING mode always
196 # (lfdirstate isn't yet managed as a part of the transaction)
196 # (lfdirstate isn't yet managed as a part of the transaction)
197 # (2) avoid develwarn 'use dirstate.write with ....'
197 # (2) avoid develwarn 'use dirstate.write with ....'
198 if tr:
199 tr.addbackup(b'largefiles/dirstate', location=b'plain')
198 super(largefilesdirstate, self).write(None)
200 super(largefilesdirstate, self).write(None)
199
201
200
202
201 def openlfdirstate(ui, repo, create=True):
203 def openlfdirstate(ui, repo, create=True):
202 """
204 """
203 Return a dirstate object that tracks largefiles: i.e. its root is
205 Return a dirstate object that tracks largefiles: i.e. its root is
204 the repo root, but it is saved in .hg/largefiles/dirstate.
206 the repo root, but it is saved in .hg/largefiles/dirstate.
205 """
207 """
206 vfs = repo.vfs
208 vfs = repo.vfs
207 lfstoredir = longname
209 lfstoredir = longname
208 opener = vfsmod.vfs(vfs.join(lfstoredir))
210 opener = vfsmod.vfs(vfs.join(lfstoredir))
209 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
211 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
210 lfdirstate = largefilesdirstate(
212 lfdirstate = largefilesdirstate(
211 opener,
213 opener,
212 ui,
214 ui,
213 repo.root,
215 repo.root,
214 repo.dirstate._validate,
216 repo.dirstate._validate,
215 lambda: sparse.matcher(repo),
217 lambda: sparse.matcher(repo),
216 repo.nodeconstants,
218 repo.nodeconstants,
217 use_dirstate_v2,
219 use_dirstate_v2,
218 )
220 )
219
221
220 # If the largefiles dirstate does not exist, populate and create
222 # If the largefiles dirstate does not exist, populate and create
221 # it. This ensures that we create it on the first meaningful
223 # it. This ensures that we create it on the first meaningful
222 # largefiles operation in a new clone.
224 # largefiles operation in a new clone.
223 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
225 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
224 matcher = getstandinmatcher(repo)
226 matcher = getstandinmatcher(repo)
225 standins = repo.dirstate.walk(
227 standins = repo.dirstate.walk(
226 matcher, subrepos=[], unknown=False, ignored=False
228 matcher, subrepos=[], unknown=False, ignored=False
227 )
229 )
228
230
229 if len(standins) > 0:
231 if len(standins) > 0:
230 vfs.makedirs(lfstoredir)
232 vfs.makedirs(lfstoredir)
231
233
232 with lfdirstate.parentchange():
234 with lfdirstate.parentchange():
233 for standin in standins:
235 for standin in standins:
234 lfile = splitstandin(standin)
236 lfile = splitstandin(standin)
235 lfdirstate.update_file(
237 lfdirstate.update_file(
236 lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True
238 lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True
237 )
239 )
238 return lfdirstate
240 return lfdirstate
239
241
240
242
241 def lfdirstatestatus(lfdirstate, repo):
243 def lfdirstatestatus(lfdirstate, repo):
242 pctx = repo[b'.']
244 pctx = repo[b'.']
243 match = matchmod.always()
245 match = matchmod.always()
244 unsure, s = lfdirstate.status(
246 unsure, s = lfdirstate.status(
245 match, subrepos=[], ignored=False, clean=False, unknown=False
247 match, subrepos=[], ignored=False, clean=False, unknown=False
246 )
248 )
247 modified, clean = s.modified, s.clean
249 modified, clean = s.modified, s.clean
248 for lfile in unsure:
250 for lfile in unsure:
249 try:
251 try:
250 fctx = pctx[standin(lfile)]
252 fctx = pctx[standin(lfile)]
251 except LookupError:
253 except LookupError:
252 fctx = None
254 fctx = None
253 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
255 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
254 modified.append(lfile)
256 modified.append(lfile)
255 else:
257 else:
256 clean.append(lfile)
258 clean.append(lfile)
257 lfdirstate.set_clean(lfile)
259 lfdirstate.set_clean(lfile)
258 return s
260 return s
259
261
260
262
261 def listlfiles(repo, rev=None, matcher=None):
263 def listlfiles(repo, rev=None, matcher=None):
262 """return a list of largefiles in the working copy or the
264 """return a list of largefiles in the working copy or the
263 specified changeset"""
265 specified changeset"""
264
266
265 if matcher is None:
267 if matcher is None:
266 matcher = getstandinmatcher(repo)
268 matcher = getstandinmatcher(repo)
267
269
268 # ignore unknown files in working directory
270 # ignore unknown files in working directory
269 return [
271 return [
270 splitstandin(f)
272 splitstandin(f)
271 for f in repo[rev].walk(matcher)
273 for f in repo[rev].walk(matcher)
272 if rev is not None or repo.dirstate[f] != b'?'
274 if rev is not None or repo.dirstate.get_entry(f).any_tracked
273 ]
275 ]
274
276
275
277
276 def instore(repo, hash, forcelocal=False):
278 def instore(repo, hash, forcelocal=False):
277 '''Return true if a largefile with the given hash exists in the store'''
279 '''Return true if a largefile with the given hash exists in the store'''
278 return os.path.exists(storepath(repo, hash, forcelocal))
280 return os.path.exists(storepath(repo, hash, forcelocal))
279
281
280
282
281 def storepath(repo, hash, forcelocal=False):
283 def storepath(repo, hash, forcelocal=False):
282 """Return the correct location in the repository largefiles store for a
284 """Return the correct location in the repository largefiles store for a
283 file with the given hash."""
285 file with the given hash."""
284 if not forcelocal and repo.shared():
286 if not forcelocal and repo.shared():
285 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
287 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
286 return repo.vfs.join(longname, hash)
288 return repo.vfs.join(longname, hash)
287
289
288
290
289 def findstorepath(repo, hash):
291 def findstorepath(repo, hash):
290 """Search through the local store path(s) to find the file for the given
292 """Search through the local store path(s) to find the file for the given
291 hash. If the file is not found, its path in the primary store is returned.
293 hash. If the file is not found, its path in the primary store is returned.
292 The return value is a tuple of (path, exists(path)).
294 The return value is a tuple of (path, exists(path)).
293 """
295 """
294 # For shared repos, the primary store is in the share source. But for
296 # For shared repos, the primary store is in the share source. But for
295 # backward compatibility, force a lookup in the local store if it wasn't
297 # backward compatibility, force a lookup in the local store if it wasn't
296 # found in the share source.
298 # found in the share source.
297 path = storepath(repo, hash, False)
299 path = storepath(repo, hash, False)
298
300
299 if instore(repo, hash):
301 if instore(repo, hash):
300 return (path, True)
302 return (path, True)
301 elif repo.shared() and instore(repo, hash, True):
303 elif repo.shared() and instore(repo, hash, True):
302 return storepath(repo, hash, True), True
304 return storepath(repo, hash, True), True
303
305
304 return (path, False)
306 return (path, False)
305
307
306
308
307 def copyfromcache(repo, hash, filename):
309 def copyfromcache(repo, hash, filename):
308 """Copy the specified largefile from the repo or system cache to
310 """Copy the specified largefile from the repo or system cache to
309 filename in the repository. Return true on success or false if the
311 filename in the repository. Return true on success or false if the
310 file was not found in either cache (which should not happened:
312 file was not found in either cache (which should not happened:
311 this is meant to be called only after ensuring that the needed
313 this is meant to be called only after ensuring that the needed
312 largefile exists in the cache)."""
314 largefile exists in the cache)."""
313 wvfs = repo.wvfs
315 wvfs = repo.wvfs
314 path = findfile(repo, hash)
316 path = findfile(repo, hash)
315 if path is None:
317 if path is None:
316 return False
318 return False
317 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
319 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
318 # The write may fail before the file is fully written, but we
320 # The write may fail before the file is fully written, but we
319 # don't use atomic writes in the working copy.
321 # don't use atomic writes in the working copy.
320 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
322 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
321 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
323 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
322 if gothash != hash:
324 if gothash != hash:
323 repo.ui.warn(
325 repo.ui.warn(
324 _(b'%s: data corruption in %s with hash %s\n')
326 _(b'%s: data corruption in %s with hash %s\n')
325 % (filename, path, gothash)
327 % (filename, path, gothash)
326 )
328 )
327 wvfs.unlink(filename)
329 wvfs.unlink(filename)
328 return False
330 return False
329 return True
331 return True
330
332
331
333
332 def copytostore(repo, ctx, file, fstandin):
334 def copytostore(repo, ctx, file, fstandin):
333 wvfs = repo.wvfs
335 wvfs = repo.wvfs
334 hash = readasstandin(ctx[fstandin])
336 hash = readasstandin(ctx[fstandin])
335 if instore(repo, hash):
337 if instore(repo, hash):
336 return
338 return
337 if wvfs.exists(file):
339 if wvfs.exists(file):
338 copytostoreabsolute(repo, wvfs.join(file), hash)
340 copytostoreabsolute(repo, wvfs.join(file), hash)
339 else:
341 else:
340 repo.ui.warn(
342 repo.ui.warn(
341 _(b"%s: largefile %s not available from local store\n")
343 _(b"%s: largefile %s not available from local store\n")
342 % (file, hash)
344 % (file, hash)
343 )
345 )
344
346
345
347
346 def copyalltostore(repo, node):
348 def copyalltostore(repo, node):
347 '''Copy all largefiles in a given revision to the store'''
349 '''Copy all largefiles in a given revision to the store'''
348
350
349 ctx = repo[node]
351 ctx = repo[node]
350 for filename in ctx.files():
352 for filename in ctx.files():
351 realfile = splitstandin(filename)
353 realfile = splitstandin(filename)
352 if realfile is not None and filename in ctx.manifest():
354 if realfile is not None and filename in ctx.manifest():
353 copytostore(repo, ctx, realfile, filename)
355 copytostore(repo, ctx, realfile, filename)
354
356
355
357
356 def copytostoreabsolute(repo, file, hash):
358 def copytostoreabsolute(repo, file, hash):
357 if inusercache(repo.ui, hash):
359 if inusercache(repo.ui, hash):
358 link(usercachepath(repo.ui, hash), storepath(repo, hash))
360 link(usercachepath(repo.ui, hash), storepath(repo, hash))
359 else:
361 else:
360 util.makedirs(os.path.dirname(storepath(repo, hash)))
362 util.makedirs(os.path.dirname(storepath(repo, hash)))
361 with open(file, b'rb') as srcf:
363 with open(file, b'rb') as srcf:
362 with util.atomictempfile(
364 with util.atomictempfile(
363 storepath(repo, hash), createmode=repo.store.createmode
365 storepath(repo, hash), createmode=repo.store.createmode
364 ) as dstf:
366 ) as dstf:
365 for chunk in util.filechunkiter(srcf):
367 for chunk in util.filechunkiter(srcf):
366 dstf.write(chunk)
368 dstf.write(chunk)
367 linktousercache(repo, hash)
369 linktousercache(repo, hash)
368
370
369
371
370 def linktousercache(repo, hash):
372 def linktousercache(repo, hash):
371 """Link / copy the largefile with the specified hash from the store
373 """Link / copy the largefile with the specified hash from the store
372 to the cache."""
374 to the cache."""
373 path = usercachepath(repo.ui, hash)
375 path = usercachepath(repo.ui, hash)
374 link(storepath(repo, hash), path)
376 link(storepath(repo, hash), path)
375
377
376
378
377 def getstandinmatcher(repo, rmatcher=None):
379 def getstandinmatcher(repo, rmatcher=None):
378 '''Return a match object that applies rmatcher to the standin directory'''
380 '''Return a match object that applies rmatcher to the standin directory'''
379 wvfs = repo.wvfs
381 wvfs = repo.wvfs
380 standindir = shortname
382 standindir = shortname
381
383
382 # no warnings about missing files or directories
384 # no warnings about missing files or directories
383 badfn = lambda f, msg: None
385 badfn = lambda f, msg: None
384
386
385 if rmatcher and not rmatcher.always():
387 if rmatcher and not rmatcher.always():
386 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
388 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
387 if not pats:
389 if not pats:
388 pats = [wvfs.join(standindir)]
390 pats = [wvfs.join(standindir)]
389 match = scmutil.match(repo[None], pats, badfn=badfn)
391 match = scmutil.match(repo[None], pats, badfn=badfn)
390 else:
392 else:
391 # no patterns: relative to repo root
393 # no patterns: relative to repo root
392 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
394 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
393 return match
395 return match
394
396
395
397
396 def composestandinmatcher(repo, rmatcher):
398 def composestandinmatcher(repo, rmatcher):
397 """Return a matcher that accepts standins corresponding to the
399 """Return a matcher that accepts standins corresponding to the
398 files accepted by rmatcher. Pass the list of files in the matcher
400 files accepted by rmatcher. Pass the list of files in the matcher
399 as the paths specified by the user."""
401 as the paths specified by the user."""
400 smatcher = getstandinmatcher(repo, rmatcher)
402 smatcher = getstandinmatcher(repo, rmatcher)
401 isstandin = smatcher.matchfn
403 isstandin = smatcher.matchfn
402
404
403 def composedmatchfn(f):
405 def composedmatchfn(f):
404 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
406 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
405
407
406 smatcher.matchfn = composedmatchfn
408 smatcher.matchfn = composedmatchfn
407
409
408 return smatcher
410 return smatcher
409
411
410
412
411 def standin(filename):
413 def standin(filename):
412 """Return the repo-relative path to the standin for the specified big
414 """Return the repo-relative path to the standin for the specified big
413 file."""
415 file."""
414 # Notes:
416 # Notes:
415 # 1) Some callers want an absolute path, but for instance addlargefiles
417 # 1) Some callers want an absolute path, but for instance addlargefiles
416 # needs it repo-relative so it can be passed to repo[None].add(). So
418 # needs it repo-relative so it can be passed to repo[None].add(). So
417 # leave it up to the caller to use repo.wjoin() to get an absolute path.
419 # leave it up to the caller to use repo.wjoin() to get an absolute path.
418 # 2) Join with '/' because that's what dirstate always uses, even on
420 # 2) Join with '/' because that's what dirstate always uses, even on
419 # Windows. Change existing separator to '/' first in case we are
421 # Windows. Change existing separator to '/' first in case we are
420 # passed filenames from an external source (like the command line).
422 # passed filenames from an external source (like the command line).
421 return shortnameslash + util.pconvert(filename)
423 return shortnameslash + util.pconvert(filename)
422
424
423
425
424 def isstandin(filename):
426 def isstandin(filename):
425 """Return true if filename is a big file standin. filename must be
427 """Return true if filename is a big file standin. filename must be
426 in Mercurial's internal form (slash-separated)."""
428 in Mercurial's internal form (slash-separated)."""
427 return filename.startswith(shortnameslash)
429 return filename.startswith(shortnameslash)
428
430
429
431
430 def splitstandin(filename):
432 def splitstandin(filename):
431 # Split on / because that's what dirstate always uses, even on Windows.
433 # Split on / because that's what dirstate always uses, even on Windows.
432 # Change local separator to / first just in case we are passed filenames
434 # Change local separator to / first just in case we are passed filenames
433 # from an external source (like the command line).
435 # from an external source (like the command line).
434 bits = util.pconvert(filename).split(b'/', 1)
436 bits = util.pconvert(filename).split(b'/', 1)
435 if len(bits) == 2 and bits[0] == shortname:
437 if len(bits) == 2 and bits[0] == shortname:
436 return bits[1]
438 return bits[1]
437 else:
439 else:
438 return None
440 return None
439
441
440
442
441 def updatestandin(repo, lfile, standin):
443 def updatestandin(repo, lfile, standin):
442 """Re-calculate hash value of lfile and write it into standin
444 """Re-calculate hash value of lfile and write it into standin
443
445
444 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
446 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
445 """
447 """
446 file = repo.wjoin(lfile)
448 file = repo.wjoin(lfile)
447 if repo.wvfs.exists(lfile):
449 if repo.wvfs.exists(lfile):
448 hash = hashfile(file)
450 hash = hashfile(file)
449 executable = getexecutable(file)
451 executable = getexecutable(file)
450 writestandin(repo, standin, hash, executable)
452 writestandin(repo, standin, hash, executable)
451 else:
453 else:
452 raise error.Abort(_(b'%s: file not found!') % lfile)
454 raise error.Abort(_(b'%s: file not found!') % lfile)
453
455
454
456
455 def readasstandin(fctx):
457 def readasstandin(fctx):
456 """read hex hash from given filectx of standin file
458 """read hex hash from given filectx of standin file
457
459
458 This encapsulates how "standin" data is stored into storage layer."""
460 This encapsulates how "standin" data is stored into storage layer."""
459 return fctx.data().strip()
461 return fctx.data().strip()
460
462
461
463
462 def writestandin(repo, standin, hash, executable):
464 def writestandin(repo, standin, hash, executable):
463 '''write hash to <repo.root>/<standin>'''
465 '''write hash to <repo.root>/<standin>'''
464 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
466 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
465
467
466
468
467 def copyandhash(instream, outfile):
469 def copyandhash(instream, outfile):
468 """Read bytes from instream (iterable) and write them to outfile,
470 """Read bytes from instream (iterable) and write them to outfile,
469 computing the SHA-1 hash of the data along the way. Return the hash."""
471 computing the SHA-1 hash of the data along the way. Return the hash."""
470 hasher = hashutil.sha1(b'')
472 hasher = hashutil.sha1(b'')
471 for data in instream:
473 for data in instream:
472 hasher.update(data)
474 hasher.update(data)
473 outfile.write(data)
475 outfile.write(data)
474 return hex(hasher.digest())
476 return hex(hasher.digest())
475
477
476
478
477 def hashfile(file):
479 def hashfile(file):
478 if not os.path.exists(file):
480 if not os.path.exists(file):
479 return b''
481 return b''
480 with open(file, b'rb') as fd:
482 with open(file, b'rb') as fd:
481 return hexsha1(fd)
483 return hexsha1(fd)
482
484
483
485
484 def getexecutable(filename):
486 def getexecutable(filename):
485 mode = os.stat(filename).st_mode
487 mode = os.stat(filename).st_mode
486 return (
488 return (
487 (mode & stat.S_IXUSR)
489 (mode & stat.S_IXUSR)
488 and (mode & stat.S_IXGRP)
490 and (mode & stat.S_IXGRP)
489 and (mode & stat.S_IXOTH)
491 and (mode & stat.S_IXOTH)
490 )
492 )
491
493
492
494
493 def urljoin(first, second, *arg):
495 def urljoin(first, second, *arg):
494 def join(left, right):
496 def join(left, right):
495 if not left.endswith(b'/'):
497 if not left.endswith(b'/'):
496 left += b'/'
498 left += b'/'
497 if right.startswith(b'/'):
499 if right.startswith(b'/'):
498 right = right[1:]
500 right = right[1:]
499 return left + right
501 return left + right
500
502
501 url = join(first, second)
503 url = join(first, second)
502 for a in arg:
504 for a in arg:
503 url = join(url, a)
505 url = join(url, a)
504 return url
506 return url
505
507
506
508
507 def hexsha1(fileobj):
509 def hexsha1(fileobj):
508 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
510 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
509 object data"""
511 object data"""
510 h = hashutil.sha1()
512 h = hashutil.sha1()
511 for chunk in util.filechunkiter(fileobj):
513 for chunk in util.filechunkiter(fileobj):
512 h.update(chunk)
514 h.update(chunk)
513 return hex(h.digest())
515 return hex(h.digest())
514
516
515
517
516 def httpsendfile(ui, filename):
518 def httpsendfile(ui, filename):
517 return httpconnection.httpsendfile(ui, filename, b'rb')
519 return httpconnection.httpsendfile(ui, filename, b'rb')
518
520
519
521
520 def unixpath(path):
522 def unixpath(path):
521 '''Return a version of path normalized for use with the lfdirstate.'''
523 '''Return a version of path normalized for use with the lfdirstate.'''
522 return util.pconvert(os.path.normpath(path))
524 return util.pconvert(os.path.normpath(path))
523
525
524
526
525 def islfilesrepo(repo):
527 def islfilesrepo(repo):
526 '''Return true if the repo is a largefile repo.'''
528 '''Return true if the repo is a largefile repo.'''
527 if b'largefiles' in repo.requirements and any(
529 if b'largefiles' in repo.requirements and any(
528 shortnameslash in f[1] for f in repo.store.datafiles()
530 shortnameslash in f[1] for f in repo.store.datafiles()
529 ):
531 ):
530 return True
532 return True
531
533
532 return any(openlfdirstate(repo.ui, repo, False))
534 return any(openlfdirstate(repo.ui, repo, False))
533
535
534
536
535 class storeprotonotcapable(Exception):
537 class storeprotonotcapable(Exception):
536 def __init__(self, storetypes):
538 def __init__(self, storetypes):
537 self.storetypes = storetypes
539 self.storetypes = storetypes
538
540
539
541
540 def getstandinsstate(repo):
542 def getstandinsstate(repo):
541 standins = []
543 standins = []
542 matcher = getstandinmatcher(repo)
544 matcher = getstandinmatcher(repo)
543 wctx = repo[None]
545 wctx = repo[None]
544 for standin in repo.dirstate.walk(
546 for standin in repo.dirstate.walk(
545 matcher, subrepos=[], unknown=False, ignored=False
547 matcher, subrepos=[], unknown=False, ignored=False
546 ):
548 ):
547 lfile = splitstandin(standin)
549 lfile = splitstandin(standin)
548 try:
550 try:
549 hash = readasstandin(wctx[standin])
551 hash = readasstandin(wctx[standin])
550 except IOError:
552 except IOError:
551 hash = None
553 hash = None
552 standins.append((lfile, hash))
554 standins.append((lfile, hash))
553 return standins
555 return standins
554
556
555
557
556 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
558 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
557 lfstandin = standin(lfile)
559 lfstandin = standin(lfile)
558 if lfstandin not in repo.dirstate:
560 if lfstandin not in repo.dirstate:
559 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=False)
561 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=False)
560 else:
562 else:
561 stat = repo.dirstate._map[lfstandin]
563 entry = repo.dirstate.get_entry(lfstandin)
562 state, mtime = stat.state, stat.mtime
564 lfdirstate.update_file(
563 if state == b'n':
565 lfile,
564 if normallookup or mtime < 0 or not repo.wvfs.exists(lfile):
566 wc_tracked=entry.tracked,
565 # state 'n' doesn't ensure 'clean' in this case
567 p1_tracked=entry.p1_tracked,
566 lfdirstate.update_file(
568 p2_info=entry.p2_info,
567 lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True
569 possibly_dirty=True,
568 )
570 )
569 else:
570 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
571 elif state == b'm':
572 lfdirstate.update_file(
573 lfile, p1_tracked=True, wc_tracked=True, merged=True
574 )
575 elif state == b'r':
576 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=False)
577 elif state == b'a':
578 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
579
571
580
572
581 def markcommitted(orig, ctx, node):
573 def markcommitted(orig, ctx, node):
582 repo = ctx.repo()
574 repo = ctx.repo()
583
575
584 lfdirstate = openlfdirstate(repo.ui, repo)
576 lfdirstate = openlfdirstate(repo.ui, repo)
585 with lfdirstate.parentchange():
577 with lfdirstate.parentchange():
586 orig(node)
578 orig(node)
587
579
588 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
580 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
589 # because files coming from the 2nd parent are omitted in the latter.
581 # because files coming from the 2nd parent are omitted in the latter.
590 #
582 #
591 # The former should be used to get targets of "synclfdirstate",
583 # The former should be used to get targets of "synclfdirstate",
592 # because such files:
584 # because such files:
593 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
585 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
594 # - have to be marked as "n" after commit, but
586 # - have to be marked as "n" after commit, but
595 # - aren't listed in "repo[node].files()"
587 # - aren't listed in "repo[node].files()"
596
588
597 for f in ctx.files():
589 for f in ctx.files():
598 lfile = splitstandin(f)
590 lfile = splitstandin(f)
599 if lfile is not None:
591 if lfile is not None:
600 synclfdirstate(repo, lfdirstate, lfile, False)
592 synclfdirstate(repo, lfdirstate, lfile, False)
601 lfdirstate.write()
593 lfdirstate.write(repo.currenttransaction())
602
594
603 # As part of committing, copy all of the largefiles into the cache.
595 # As part of committing, copy all of the largefiles into the cache.
604 #
596 #
605 # Using "node" instead of "ctx" implies additional "repo[node]"
597 # Using "node" instead of "ctx" implies additional "repo[node]"
606 # lookup while copyalltostore(), but can omit redundant check for
598 # lookup while copyalltostore(), but can omit redundant check for
607 # files comming from the 2nd parent, which should exist in store
599 # files comming from the 2nd parent, which should exist in store
608 # at merging.
600 # at merging.
609 copyalltostore(repo, node)
601 copyalltostore(repo, node)
610
602
611
603
612 def getlfilestoupdate(oldstandins, newstandins):
604 def getlfilestoupdate(oldstandins, newstandins):
613 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
605 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
614 filelist = []
606 filelist = []
615 for f in changedstandins:
607 for f in changedstandins:
616 if f[0] not in filelist:
608 if f[0] not in filelist:
617 filelist.append(f[0])
609 filelist.append(f[0])
618 return filelist
610 return filelist
619
611
620
612
621 def getlfilestoupload(repo, missing, addfunc):
613 def getlfilestoupload(repo, missing, addfunc):
622 makeprogress = repo.ui.makeprogress
614 makeprogress = repo.ui.makeprogress
623 with makeprogress(
615 with makeprogress(
624 _(b'finding outgoing largefiles'),
616 _(b'finding outgoing largefiles'),
625 unit=_(b'revisions'),
617 unit=_(b'revisions'),
626 total=len(missing),
618 total=len(missing),
627 ) as progress:
619 ) as progress:
628 for i, n in enumerate(missing):
620 for i, n in enumerate(missing):
629 progress.update(i)
621 progress.update(i)
630 parents = [p for p in repo[n].parents() if p != repo.nullid]
622 parents = [p for p in repo[n].parents() if p != repo.nullid]
631
623
632 with lfstatus(repo, value=False):
624 with lfstatus(repo, value=False):
633 ctx = repo[n]
625 ctx = repo[n]
634
626
635 files = set(ctx.files())
627 files = set(ctx.files())
636 if len(parents) == 2:
628 if len(parents) == 2:
637 mc = ctx.manifest()
629 mc = ctx.manifest()
638 mp1 = ctx.p1().manifest()
630 mp1 = ctx.p1().manifest()
639 mp2 = ctx.p2().manifest()
631 mp2 = ctx.p2().manifest()
640 for f in mp1:
632 for f in mp1:
641 if f not in mc:
633 if f not in mc:
642 files.add(f)
634 files.add(f)
643 for f in mp2:
635 for f in mp2:
644 if f not in mc:
636 if f not in mc:
645 files.add(f)
637 files.add(f)
646 for f in mc:
638 for f in mc:
647 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
639 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
648 files.add(f)
640 files.add(f)
649 for fn in files:
641 for fn in files:
650 if isstandin(fn) and fn in ctx:
642 if isstandin(fn) and fn in ctx:
651 addfunc(fn, readasstandin(ctx[fn]))
643 addfunc(fn, readasstandin(ctx[fn]))
652
644
653
645
654 def updatestandinsbymatch(repo, match):
646 def updatestandinsbymatch(repo, match):
655 """Update standins in the working directory according to specified match
647 """Update standins in the working directory according to specified match
656
648
657 This returns (possibly modified) ``match`` object to be used for
649 This returns (possibly modified) ``match`` object to be used for
658 subsequent commit process.
650 subsequent commit process.
659 """
651 """
660
652
661 ui = repo.ui
653 ui = repo.ui
662
654
663 # Case 1: user calls commit with no specific files or
655 # Case 1: user calls commit with no specific files or
664 # include/exclude patterns: refresh and commit all files that
656 # include/exclude patterns: refresh and commit all files that
665 # are "dirty".
657 # are "dirty".
666 if match is None or match.always():
658 if match is None or match.always():
667 # Spend a bit of time here to get a list of files we know
659 # Spend a bit of time here to get a list of files we know
668 # are modified so we can compare only against those.
660 # are modified so we can compare only against those.
669 # It can cost a lot of time (several seconds)
661 # It can cost a lot of time (several seconds)
670 # otherwise to update all standins if the largefiles are
662 # otherwise to update all standins if the largefiles are
671 # large.
663 # large.
672 lfdirstate = openlfdirstate(ui, repo)
664 lfdirstate = openlfdirstate(ui, repo)
673 dirtymatch = matchmod.always()
665 dirtymatch = matchmod.always()
674 unsure, s = lfdirstate.status(
666 unsure, s = lfdirstate.status(
675 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
667 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
676 )
668 )
677 modifiedfiles = unsure + s.modified + s.added + s.removed
669 modifiedfiles = unsure + s.modified + s.added + s.removed
678 lfiles = listlfiles(repo)
670 lfiles = listlfiles(repo)
679 # this only loops through largefiles that exist (not
671 # this only loops through largefiles that exist (not
680 # removed/renamed)
672 # removed/renamed)
681 for lfile in lfiles:
673 for lfile in lfiles:
682 if lfile in modifiedfiles:
674 if lfile in modifiedfiles:
683 fstandin = standin(lfile)
675 fstandin = standin(lfile)
684 if repo.wvfs.exists(fstandin):
676 if repo.wvfs.exists(fstandin):
685 # this handles the case where a rebase is being
677 # this handles the case where a rebase is being
686 # performed and the working copy is not updated
678 # performed and the working copy is not updated
687 # yet.
679 # yet.
688 if repo.wvfs.exists(lfile):
680 if repo.wvfs.exists(lfile):
689 updatestandin(repo, lfile, fstandin)
681 updatestandin(repo, lfile, fstandin)
690
682
691 return match
683 return match
692
684
693 lfiles = listlfiles(repo)
685 lfiles = listlfiles(repo)
694 match._files = repo._subdirlfs(match.files(), lfiles)
686 match._files = repo._subdirlfs(match.files(), lfiles)
695
687
696 # Case 2: user calls commit with specified patterns: refresh
688 # Case 2: user calls commit with specified patterns: refresh
697 # any matching big files.
689 # any matching big files.
698 smatcher = composestandinmatcher(repo, match)
690 smatcher = composestandinmatcher(repo, match)
699 standins = repo.dirstate.walk(
691 standins = repo.dirstate.walk(
700 smatcher, subrepos=[], unknown=False, ignored=False
692 smatcher, subrepos=[], unknown=False, ignored=False
701 )
693 )
702
694
703 # No matching big files: get out of the way and pass control to
695 # No matching big files: get out of the way and pass control to
704 # the usual commit() method.
696 # the usual commit() method.
705 if not standins:
697 if not standins:
706 return match
698 return match
707
699
708 # Refresh all matching big files. It's possible that the
700 # Refresh all matching big files. It's possible that the
709 # commit will end up failing, in which case the big files will
701 # commit will end up failing, in which case the big files will
710 # stay refreshed. No harm done: the user modified them and
702 # stay refreshed. No harm done: the user modified them and
711 # asked to commit them, so sooner or later we're going to
703 # asked to commit them, so sooner or later we're going to
712 # refresh the standins. Might as well leave them refreshed.
704 # refresh the standins. Might as well leave them refreshed.
713 lfdirstate = openlfdirstate(ui, repo)
705 lfdirstate = openlfdirstate(ui, repo)
714 for fstandin in standins:
706 for fstandin in standins:
715 lfile = splitstandin(fstandin)
707 lfile = splitstandin(fstandin)
716 if lfdirstate[lfile] != b'r':
708 if lfdirstate.get_entry(lfile).tracked:
717 updatestandin(repo, lfile, fstandin)
709 updatestandin(repo, lfile, fstandin)
718
710
719 # Cook up a new matcher that only matches regular files or
711 # Cook up a new matcher that only matches regular files or
720 # standins corresponding to the big files requested by the
712 # standins corresponding to the big files requested by the
721 # user. Have to modify _files to prevent commit() from
713 # user. Have to modify _files to prevent commit() from
722 # complaining "not tracked" for big files.
714 # complaining "not tracked" for big files.
723 match = copy.copy(match)
715 match = copy.copy(match)
724 origmatchfn = match.matchfn
716 origmatchfn = match.matchfn
725
717
726 # Check both the list of largefiles and the list of
718 # Check both the list of largefiles and the list of
727 # standins because if a largefile was removed, it
719 # standins because if a largefile was removed, it
728 # won't be in the list of largefiles at this point
720 # won't be in the list of largefiles at this point
729 match._files += sorted(standins)
721 match._files += sorted(standins)
730
722
731 actualfiles = []
723 actualfiles = []
732 for f in match._files:
724 for f in match._files:
733 fstandin = standin(f)
725 fstandin = standin(f)
734
726
735 # For largefiles, only one of the normal and standin should be
727 # For largefiles, only one of the normal and standin should be
736 # committed (except if one of them is a remove). In the case of a
728 # committed (except if one of them is a remove). In the case of a
737 # standin removal, drop the normal file if it is unknown to dirstate.
729 # standin removal, drop the normal file if it is unknown to dirstate.
738 # Thus, skip plain largefile names but keep the standin.
730 # Thus, skip plain largefile names but keep the standin.
739 if f in lfiles or fstandin in standins:
731 if f in lfiles or fstandin in standins:
740 if repo.dirstate[fstandin] != b'r':
732 if not repo.dirstate.get_entry(fstandin).removed:
741 if repo.dirstate[f] != b'r':
733 if not repo.dirstate.get_entry(f).removed:
742 continue
734 continue
743 elif repo.dirstate[f] == b'?':
735 elif not repo.dirstate.get_entry(f).any_tracked:
744 continue
736 continue
745
737
746 actualfiles.append(f)
738 actualfiles.append(f)
747 match._files = actualfiles
739 match._files = actualfiles
748
740
749 def matchfn(f):
741 def matchfn(f):
750 if origmatchfn(f):
742 if origmatchfn(f):
751 return f not in lfiles
743 return f not in lfiles
752 else:
744 else:
753 return f in standins
745 return f in standins
754
746
755 match.matchfn = matchfn
747 match.matchfn = matchfn
756
748
757 return match
749 return match
758
750
759
751
760 class automatedcommithook(object):
752 class automatedcommithook(object):
761 """Stateful hook to update standins at the 1st commit of resuming
753 """Stateful hook to update standins at the 1st commit of resuming
762
754
763 For efficiency, updating standins in the working directory should
755 For efficiency, updating standins in the working directory should
764 be avoided while automated committing (like rebase, transplant and
756 be avoided while automated committing (like rebase, transplant and
765 so on), because they should be updated before committing.
757 so on), because they should be updated before committing.
766
758
767 But the 1st commit of resuming automated committing (e.g. ``rebase
759 But the 1st commit of resuming automated committing (e.g. ``rebase
768 --continue``) should update them, because largefiles may be
760 --continue``) should update them, because largefiles may be
769 modified manually.
761 modified manually.
770 """
762 """
771
763
772 def __init__(self, resuming):
764 def __init__(self, resuming):
773 self.resuming = resuming
765 self.resuming = resuming
774
766
775 def __call__(self, repo, match):
767 def __call__(self, repo, match):
776 if self.resuming:
768 if self.resuming:
777 self.resuming = False # avoids updating at subsequent commits
769 self.resuming = False # avoids updating at subsequent commits
778 return updatestandinsbymatch(repo, match)
770 return updatestandinsbymatch(repo, match)
779 else:
771 else:
780 return match
772 return match
781
773
782
774
783 def getstatuswriter(ui, repo, forcibly=None):
775 def getstatuswriter(ui, repo, forcibly=None):
784 """Return the function to write largefiles specific status out
776 """Return the function to write largefiles specific status out
785
777
786 If ``forcibly`` is ``None``, this returns the last element of
778 If ``forcibly`` is ``None``, this returns the last element of
787 ``repo._lfstatuswriters`` as "default" writer function.
779 ``repo._lfstatuswriters`` as "default" writer function.
788
780
789 Otherwise, this returns the function to always write out (or
781 Otherwise, this returns the function to always write out (or
790 ignore if ``not forcibly``) status.
782 ignore if ``not forcibly``) status.
791 """
783 """
792 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
784 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
793 return repo._lfstatuswriters[-1]
785 return repo._lfstatuswriters[-1]
794 else:
786 else:
795 if forcibly:
787 if forcibly:
796 return ui.status # forcibly WRITE OUT
788 return ui.status # forcibly WRITE OUT
797 else:
789 else:
798 return lambda *msg, **opts: None # forcibly IGNORE
790 return lambda *msg, **opts: None # forcibly IGNORE
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file renamed from rust/hg-cpython/src/dirstate/owning.rs to rust/hg-core/src/dirstate_tree/owning.rs
NO CONTENT: file renamed from rust/hg-cpython/src/dirstate/owning.rs to rust/hg-core/src/dirstate_tree/owning.rs
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file renamed from tests/test-clone-uncompressed.t to tests/test-clone-stream.t
NO CONTENT: file renamed from tests/test-clone-uncompressed.t to tests/test-clone-stream.t
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file was removed
NO CONTENT: file was removed
1 NO CONTENT: file was removed
NO CONTENT: file was removed
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file was removed
NO CONTENT: file was removed
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file was removed
NO CONTENT: file was removed
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file was removed
NO CONTENT: file was removed
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file was removed
NO CONTENT: file was removed
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file was removed
NO CONTENT: file was removed
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file was removed
NO CONTENT: file was removed
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now